lsquic_send_ctl.c revision 4051ae3a
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc.  See LICENSE. */
2/*
3 * lsquic_send_ctl.c -- Logic for sending and sent packets
4 */
5
6#include <assert.h>
7#include <errno.h>
8#include <inttypes.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/queue.h>
12
13#include <openssl/rand.h>
14
15#include "lsquic_types.h"
16#include "lsquic_int_types.h"
17#include "lsquic.h"
18#include "lsquic_mm.h"
19#include "lsquic_engine_public.h"
20#include "lsquic_packet_common.h"
21#include "lsquic_alarmset.h"
22#include "lsquic_parse.h"
23#include "lsquic_packet_out.h"
24#include "lsquic_senhist.h"
25#include "lsquic_rtt.h"
26#include "lsquic_cubic.h"
27#include "lsquic_pacer.h"
28#include "lsquic_bw_sampler.h"
29#include "lsquic_minmax.h"
30#include "lsquic_bbr.h"
31#include "lsquic_send_ctl.h"
32#include "lsquic_util.h"
33#include "lsquic_sfcw.h"
34#include "lsquic_varint.h"
35#include "lsquic_hq.h"
36#include "lsquic_hash.h"
37#include "lsquic_stream.h"
38#include "lsquic_ver_neg.h"
39#include "lsquic_ev_log.h"
40#include "lsquic_conn.h"
41#include "lsquic_conn_flow.h"
42#include "lsquic_conn_public.h"
43#include "lsquic_cong_ctl.h"
44#include "lsquic_enc_sess.h"
45#include "lsquic_malo.h"
46#include "lsquic_attq.h"
47#include "lsquic_http1x_if.h"
48#include "lsqpack.h"
49#include "lsquic_frab_list.h"
50#include "lsquic_qdec_hdl.h"
51#include "lsquic_crand.h"
52
53#define LSQUIC_LOGGER_MODULE LSQLM_SENDCTL
54#define LSQUIC_LOG_CONN_ID lsquic_conn_log_cid(ctl->sc_conn_pub->lconn)
55#include "lsquic_logger.h"
56
57#define MAX_RESUBMITTED_ON_RTO  2
58#define MAX_RTO_BACKOFFS        10
59#define DEFAULT_RETX_DELAY      500000      /* Microseconds */
60#define MAX_RTO_DELAY           60000000    /* Microseconds */
61#define MIN_RTO_DELAY           1000000      /* Microseconds */
62#define N_NACKS_BEFORE_RETX     3
63
64#define CGP(ctl) ((struct cong_ctl *) &(ctl)->sc_cong_u)
65
66#define packet_out_total_sz(p) \
67                lsquic_packet_out_total_sz(ctl->sc_conn_pub->lconn, p)
68#define packet_out_sent_sz(p) \
69                lsquic_packet_out_sent_sz(ctl->sc_conn_pub->lconn, p)
70
71enum retx_mode {
72    RETX_MODE_HANDSHAKE,
73    RETX_MODE_LOSS,
74    RETX_MODE_TLP,
75    RETX_MODE_RTO,
76};
77
78
79static const char *const retx2str[] = {
80    [RETX_MODE_HANDSHAKE] = "RETX_MODE_HANDSHAKE",
81    [RETX_MODE_LOSS]      = "RETX_MODE_LOSS",
82    [RETX_MODE_TLP]       = "RETX_MODE_TLP",
83    [RETX_MODE_RTO]       = "RETX_MODE_RTO",
84};
85
86#ifdef NDEBUG
87#define MAX_BPQ_COUNT 10
88#else
89static unsigned MAX_BPQ_COUNT = 10;
90void
91lsquic_send_ctl_set_max_bpq_count (unsigned count) { MAX_BPQ_COUNT = count; }
92#endif
93
94
95static void
96update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out);
97
98
99enum expire_filter { EXFI_ALL, EXFI_HSK, EXFI_LAST, };
100
101
102static void
103send_ctl_expire (struct lsquic_send_ctl *, enum packnum_space,
104                                                        enum expire_filter);
105
106static void
107set_retx_alarm (struct lsquic_send_ctl *, enum packnum_space, lsquic_time_t);
108
109static void
110send_ctl_detect_losses (struct lsquic_send_ctl *, enum packnum_space,
111                                                        lsquic_time_t time);
112
113static unsigned
114send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl);
115
116static unsigned
117send_ctl_all_bytes_out (const struct lsquic_send_ctl *ctl);
118
119static void
120send_ctl_reschedule_poison (struct lsquic_send_ctl *ctl);
121
122static int
123send_ctl_can_send_pre_hsk (struct lsquic_send_ctl *ctl);
124
125static int
126send_ctl_can_send (struct lsquic_send_ctl *ctl);
127
128#ifdef NDEBUG
129static
130#elif __GNUC__
131__attribute__((weak))
132#endif
133int
134lsquic_send_ctl_schedule_stream_packets_immediately (lsquic_send_ctl_t *ctl)
135{
136    return !(ctl->sc_flags & SC_BUFFER_STREAM);
137}
138
139
140#ifdef NDEBUG
141static
142#elif __GNUC__
143__attribute__((weak))
144#endif
145enum packno_bits
146lsquic_send_ctl_guess_packno_bits (lsquic_send_ctl_t *ctl)
147{
148    return PACKNO_BITS_1;   /* This is 2 bytes in both GQUIC and IQUIC */
149}
150
151
152int
153lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *ctl)
154{
155    const lsquic_packet_out_t *packet_out;
156
157    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets[PNS_APP], po_next)
158        if (0 == (packet_out->po_flags & (PO_LOSS_REC|PO_POISON))
159                && (packet_out->po_frame_types &
160                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM))))
161            return 1;
162
163    return 0;
164}
165
166
167static lsquic_packet_out_t *
168send_ctl_first_unacked_retx_packet (const struct lsquic_send_ctl *ctl,
169                                                        enum packnum_space pns)
170{
171    lsquic_packet_out_t *packet_out;
172
173    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets[pns], po_next)
174        if (0 == (packet_out->po_flags & (PO_LOSS_REC|PO_POISON))
175                && (packet_out->po_frame_types & ctl->sc_retx_frames))
176            return packet_out;
177
178    return NULL;
179}
180
181
182static lsquic_packet_out_t *
183send_ctl_last_unacked_retx_packet (const struct lsquic_send_ctl *ctl,
184                                                    enum packnum_space pns)
185{
186    lsquic_packet_out_t *packet_out;
187    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets[pns],
188                                            lsquic_packets_tailq, po_next)
189        if (0 == (packet_out->po_flags & (PO_LOSS_REC|PO_POISON))
190                && (packet_out->po_frame_types & ctl->sc_retx_frames))
191            return packet_out;
192    return NULL;
193}
194
195
196static int
197have_unacked_handshake_packets (const lsquic_send_ctl_t *ctl)
198{
199    const lsquic_packet_out_t *packet_out;
200    enum packnum_space pns;
201
202    for (pns = ctl->sc_flags & SC_IETF ? PNS_INIT : PNS_APP; pns < N_PNS; ++pns)
203        TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets[pns], po_next)
204            if (packet_out->po_flags & PO_HELLO)
205                return 1;
206    return 0;
207}
208
209
210static enum retx_mode
211get_retx_mode (const lsquic_send_ctl_t *ctl)
212{
213    if (!(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE)
214                                    && have_unacked_handshake_packets(ctl))
215        return RETX_MODE_HANDSHAKE;
216    if (ctl->sc_loss_to)
217        return RETX_MODE_LOSS;
218    if (ctl->sc_n_tlp < 2)
219        return RETX_MODE_TLP;
220    return RETX_MODE_RTO;
221}
222
223
224static lsquic_time_t
225get_retx_delay (const struct lsquic_rtt_stats *rtt_stats)
226{
227    lsquic_time_t srtt, delay;
228
229    srtt = lsquic_rtt_stats_get_srtt(rtt_stats);
230    if (srtt)
231    {
232        delay = srtt + 4 * lsquic_rtt_stats_get_rttvar(rtt_stats);
233        if (delay < MIN_RTO_DELAY)
234            delay = MIN_RTO_DELAY;
235    }
236    else
237        delay = DEFAULT_RETX_DELAY;
238
239    return delay;
240}
241
242
243static void
244retx_alarm_rings (enum alarm_id al_id, void *ctx, lsquic_time_t expiry, lsquic_time_t now)
245{
246    lsquic_send_ctl_t *ctl = ctx;
247    lsquic_packet_out_t *packet_out;
248    enum packnum_space pns;
249    enum retx_mode rm;
250
251    pns = al_id - AL_RETX_INIT;
252
253    /* This is a callback -- before it is called, the alarm is unset */
254    assert(!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX_INIT + pns));
255
256    rm = get_retx_mode(ctl);
257    LSQ_INFO("retx timeout, mode %s", retx2str[rm]);
258
259    switch (rm)
260    {
261    case RETX_MODE_HANDSHAKE:
262        send_ctl_expire(ctl, pns, EXFI_HSK);
263        /* Do not register cubic loss during handshake */
264        break;
265    case RETX_MODE_LOSS:
266        send_ctl_detect_losses(ctl, pns, now);
267        break;
268    case RETX_MODE_TLP:
269        ++ctl->sc_n_tlp;
270        send_ctl_expire(ctl, pns, EXFI_LAST);
271        break;
272    case RETX_MODE_RTO:
273        ctl->sc_last_rto_time = now;
274        ++ctl->sc_n_consec_rtos;
275        ctl->sc_next_limit = 2;
276        LSQ_DEBUG("packet RTO is %"PRIu64" usec", expiry);
277        send_ctl_expire(ctl, pns, EXFI_ALL);
278        ctl->sc_ci->cci_timeout(CGP(ctl));
279        break;
280    }
281
282    packet_out = send_ctl_first_unacked_retx_packet(ctl, pns);
283    if (packet_out)
284        set_retx_alarm(ctl, pns, now);
285    lsquic_send_ctl_sanity_check(ctl);
286}
287
288
289static lsquic_packno_t
290first_packno (const struct lsquic_send_ctl *ctl)
291{
292    if (ctl->sc_flags & SC_IETF)
293        return 0;
294    else
295        return 1;
296}
297
298
299/*
300 * [draft-ietf-quic-transport-12], Section 4.4.1:
301 *
302 * "   The first Initial packet that is sent by a client contains a packet
303 * "   number of 0.  All subsequent packets contain a packet number that is
304 * "   incremented by at least one, see (Section 4.8).
305 */
306static void
307send_ctl_pick_initial_packno (struct lsquic_send_ctl *ctl)
308{
309    ctl->sc_cur_packno = first_packno(ctl) - 1;
310}
311
312
313void
314lsquic_send_ctl_init (lsquic_send_ctl_t *ctl, struct lsquic_alarmset *alset,
315          struct lsquic_engine_public *enpub, const struct ver_neg *ver_neg,
316          struct lsquic_conn_public *conn_pub, enum send_ctl_flags flags)
317{
318    unsigned i, algo;
319    memset(ctl, 0, sizeof(*ctl));
320    TAILQ_INIT(&ctl->sc_scheduled_packets);
321    TAILQ_INIT(&ctl->sc_unacked_packets[PNS_INIT]);
322    TAILQ_INIT(&ctl->sc_unacked_packets[PNS_HSK]);
323    TAILQ_INIT(&ctl->sc_unacked_packets[PNS_APP]);
324    TAILQ_INIT(&ctl->sc_lost_packets);
325    ctl->sc_enpub = enpub;
326    ctl->sc_alset = alset;
327    ctl->sc_ver_neg = ver_neg;
328    ctl->sc_conn_pub = conn_pub;
329    assert(!(flags & ~(SC_IETF|SC_NSTP|SC_ECN)));
330    ctl->sc_flags = flags;
331    send_ctl_pick_initial_packno(ctl);
332    if (enpub->enp_settings.es_pace_packets)
333        ctl->sc_flags |= SC_PACE;
334    if (flags & SC_ECN)
335        ctl->sc_ecn = ECN_ECT0;
336    else
337        ctl->sc_ecn = ECN_NOT_ECT;
338    if (flags & SC_IETF)
339        ctl->sc_retx_frames = IQUIC_FRAME_RETX_MASK;
340    else
341        ctl->sc_retx_frames = GQUIC_FRAME_RETRANSMITTABLE_MASK;
342    lsquic_alarmset_init_alarm(alset, AL_RETX_INIT, retx_alarm_rings, ctl);
343    lsquic_alarmset_init_alarm(alset, AL_RETX_HSK, retx_alarm_rings, ctl);
344    lsquic_alarmset_init_alarm(alset, AL_RETX_APP, retx_alarm_rings, ctl);
345    lsquic_senhist_init(&ctl->sc_senhist, ctl->sc_flags & SC_IETF);
346    if (0 == enpub->enp_settings.es_cc_algo)
347        algo = LSQUIC_DF_CC_ALGO;
348    else
349        algo = enpub->enp_settings.es_cc_algo;
350    if (algo == 2)
351        ctl->sc_ci = &lsquic_cong_bbr_if;
352    else
353        ctl->sc_ci = &lsquic_cong_cubic_if;
354    ctl->sc_ci->cci_init(CGP(ctl), conn_pub, ctl->sc_retx_frames);
355    if (ctl->sc_flags & SC_PACE)
356        lsquic_pacer_init(&ctl->sc_pacer, conn_pub->lconn,
357        /* TODO: conn_pub has a pointer to enpub: drop third argument */
358                                    enpub->enp_settings.es_clock_granularity);
359    for (i = 0; i < sizeof(ctl->sc_buffered_packets) /
360                                sizeof(ctl->sc_buffered_packets[0]); ++i)
361        TAILQ_INIT(&ctl->sc_buffered_packets[i].bpq_packets);
362    ctl->sc_max_packno_bits = PACKNO_BITS_2; /* Safe value before verneg */
363    ctl->sc_cached_bpt.stream_id = UINT64_MAX;
364#if LSQUIC_EXTRA_CHECKS
365    ctl->sc_flags |= SC_SANITY_CHECK;
366    LSQ_DEBUG("sanity checks enabled");
367#endif
368    ctl->sc_gap = UINT64_MAX - 1 /* Can't have +1 == 0 */;
369    if ((ctl->sc_conn_pub->lconn->cn_flags & (LSCONN_IETF|LSCONN_SERVER))
370                                                == (LSCONN_IETF|LSCONN_SERVER))
371        ctl->sc_can_send = send_ctl_can_send_pre_hsk;
372    else
373        ctl->sc_can_send = send_ctl_can_send;
374}
375
376
377static int
378send_ctl_ecn_on (const struct lsquic_send_ctl *ctl)
379{
380    return ctl->sc_ecn != ECN_NOT_ECT;
381}
382
383
384static lsquic_time_t
385calculate_packet_rto (lsquic_send_ctl_t *ctl)
386{
387    lsquic_time_t delay;
388
389    delay = get_retx_delay(&ctl->sc_conn_pub->rtt_stats);
390
391    unsigned exp = ctl->sc_n_consec_rtos;
392    if (exp > MAX_RTO_BACKOFFS)
393        exp = MAX_RTO_BACKOFFS;
394
395    delay = delay * (1 << exp);
396
397    return delay;
398}
399
400
401static lsquic_time_t
402calculate_tlp_delay (lsquic_send_ctl_t *ctl)
403{
404    lsquic_time_t srtt, delay;
405
406    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
407    if (ctl->sc_n_in_flight_all > 1)
408    {
409        delay = 10000;  /* 10 ms is the minimum tail loss probe delay */
410        if (delay < 2 * srtt)
411            delay = 2 * srtt;
412    }
413    else
414    {
415        delay = srtt + srtt / 2 + MIN_RTO_DELAY;
416        if (delay < 2 * srtt)
417            delay = 2 * srtt;
418    }
419
420    return delay;
421}
422
423
424static void
425set_retx_alarm (struct lsquic_send_ctl *ctl, enum packnum_space pns,
426                                                            lsquic_time_t now)
427{
428    enum retx_mode rm;
429    lsquic_time_t delay;
430
431    assert(!TAILQ_EMPTY(&ctl->sc_unacked_packets[pns]));
432
433    rm = get_retx_mode(ctl);
434    switch (rm)
435    {
436    case RETX_MODE_HANDSHAKE:
437    /* [draft-iyengar-quic-loss-recovery-01]:
438     *
439     *  if (handshake packets are outstanding):
440     *      alarm_duration = max(1.5 * smoothed_rtt, 10ms) << handshake_count;
441     *      handshake_count++;
442     */
443        delay = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
444        if (delay)
445        {
446            delay += delay / 2;
447            if (10000 > delay)
448                delay = 10000;
449        }
450        else
451            delay = 150000;
452        delay <<= ctl->sc_n_hsk;
453        ++ctl->sc_n_hsk;
454        break;
455    case RETX_MODE_LOSS:
456        delay = ctl->sc_loss_to;
457        break;
458    case RETX_MODE_TLP:
459        delay = calculate_tlp_delay(ctl);
460        break;
461    default:
462        assert(rm == RETX_MODE_RTO);
463        /* XXX the comment below as well as the name of the function
464         * that follows seem obsolete.
465         */
466        /* Base RTO on the first unacked packet, following reference
467         * implementation.
468         */
469        delay = calculate_packet_rto(ctl);
470        break;
471    }
472
473    if (delay > MAX_RTO_DELAY)
474        delay = MAX_RTO_DELAY;
475
476    LSQ_DEBUG("set retx alarm to %"PRIu64", which is %"PRIu64
477        " usec from now, mode %s", now + delay, delay, retx2str[rm]);
478    lsquic_alarmset_set(ctl->sc_alset, AL_RETX_INIT + pns, now + delay);
479}
480
481
482static int
483send_ctl_in_recovery (lsquic_send_ctl_t *ctl)
484{
485    return ctl->sc_largest_acked_packno
486        && ctl->sc_largest_acked_packno <= ctl->sc_largest_sent_at_cutback;
487}
488
489
490#define SC_PACK_SIZE(ctl_) (+(ctl_)->sc_conn_pub->path->np_pack_size)
491
492static lsquic_time_t
493send_ctl_transfer_time (void *ctx)
494{
495    lsquic_send_ctl_t *const ctl = ctx;
496    lsquic_time_t tx_time;
497    uint64_t pacing_rate;
498    int in_recovery;
499
500    in_recovery = send_ctl_in_recovery(ctl);
501    pacing_rate = ctl->sc_ci->cci_pacing_rate(CGP(ctl), in_recovery);
502    tx_time = (uint64_t) SC_PACK_SIZE(ctl) * 1000000 / pacing_rate;
503    return tx_time;
504}
505
506
507static void
508send_ctl_unacked_append (struct lsquic_send_ctl *ctl,
509                         struct lsquic_packet_out *packet_out)
510{
511    enum packnum_space pns;
512
513    pns = lsquic_packet_out_pns(packet_out);
514    assert(0 == (packet_out->po_flags & (PO_LOSS_REC|PO_POISON)));
515    TAILQ_INSERT_TAIL(&ctl->sc_unacked_packets[pns], packet_out, po_next);
516    packet_out->po_flags |= PO_UNACKED;
517    ctl->sc_bytes_unacked_all += packet_out_sent_sz(packet_out);
518    ctl->sc_n_in_flight_all  += 1;
519    if (packet_out->po_frame_types & ctl->sc_retx_frames)
520    {
521        ctl->sc_bytes_unacked_retx += packet_out_total_sz(packet_out);
522        ++ctl->sc_n_in_flight_retx;
523    }
524}
525
526
527static void
528send_ctl_unacked_remove (struct lsquic_send_ctl *ctl,
529                     struct lsquic_packet_out *packet_out, unsigned packet_sz)
530{
531    enum packnum_space pns;
532
533    pns = lsquic_packet_out_pns(packet_out);
534    TAILQ_REMOVE(&ctl->sc_unacked_packets[pns], packet_out, po_next);
535    packet_out->po_flags &= ~PO_UNACKED;
536    assert(ctl->sc_bytes_unacked_all >= packet_sz);
537    ctl->sc_bytes_unacked_all -= packet_sz;
538    ctl->sc_n_in_flight_all  -= 1;
539    if (packet_out->po_frame_types & ctl->sc_retx_frames)
540    {
541        ctl->sc_bytes_unacked_retx -= packet_sz;
542        --ctl->sc_n_in_flight_retx;
543    }
544}
545
546
547static void
548send_ctl_sched_Xpend_common (struct lsquic_send_ctl *ctl,
549                      struct lsquic_packet_out *packet_out)
550{
551    packet_out->po_flags |= PO_SCHED;
552    ++ctl->sc_n_scheduled;
553    ctl->sc_bytes_scheduled += packet_out_total_sz(packet_out);
554    lsquic_send_ctl_sanity_check(ctl);
555}
556
557
558static void
559send_ctl_sched_append (struct lsquic_send_ctl *ctl,
560                       struct lsquic_packet_out *packet_out)
561{
562    TAILQ_INSERT_TAIL(&ctl->sc_scheduled_packets, packet_out, po_next);
563    send_ctl_sched_Xpend_common(ctl, packet_out);
564}
565
566
567static void
568send_ctl_sched_prepend (struct lsquic_send_ctl *ctl,
569                       struct lsquic_packet_out *packet_out)
570{
571    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, packet_out, po_next);
572    send_ctl_sched_Xpend_common(ctl, packet_out);
573}
574
575
576static void
577send_ctl_sched_remove (struct lsquic_send_ctl *ctl,
578                       struct lsquic_packet_out *packet_out)
579{
580    TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next);
581    packet_out->po_flags &= ~PO_SCHED;
582    assert(ctl->sc_n_scheduled);
583    --ctl->sc_n_scheduled;
584    ctl->sc_bytes_scheduled -= packet_out_total_sz(packet_out);
585    lsquic_send_ctl_sanity_check(ctl);
586}
587
588
589/* Poisoned packets are used to detect optimistic ACK attacks.  We only
590 * use a single poisoned packet at a time.
591 */
592static int
593send_ctl_add_poison (struct lsquic_send_ctl *ctl)
594{
595    struct lsquic_packet_out *poison;
596
597    poison = lsquic_malo_get(ctl->sc_conn_pub->packet_out_malo);
598    if (!poison)
599        return -1;
600
601    memset(poison, 0, sizeof(*poison));
602    poison->po_flags      = PO_UNACKED|PO_POISON;
603    poison->po_packno     = ctl->sc_gap;
604    poison->po_loss_chain = poison; /* Won't be used, but just in case */
605    TAILQ_INSERT_TAIL(&ctl->sc_unacked_packets[PNS_APP], poison, po_next);
606    LSQ_DEBUG("insert poisoned packet %"PRIu64, poison->po_packno);
607    ctl->sc_flags |= SC_POISON;
608    return 0;
609}
610
611
612static void
613send_ctl_reschedule_poison (struct lsquic_send_ctl *ctl)
614{
615    struct lsquic_packet_out *poison;
616    enum lsq_log_level log_level;
617    lsquic_time_t approx_now;
618
619    TAILQ_FOREACH(poison, &ctl->sc_unacked_packets[PNS_APP], po_next)
620        if (poison->po_flags & PO_POISON)
621        {
622            LSQ_DEBUG("remove poisoned packet %"PRIu64, poison->po_packno);
623            TAILQ_REMOVE(&ctl->sc_unacked_packets[PNS_APP], poison, po_next);
624            lsquic_malo_put(poison);
625            lsquic_send_ctl_begin_optack_detection(ctl);
626            ctl->sc_flags &= ~SC_POISON;
627            return;
628        }
629
630    approx_now = ctl->sc_last_sent_time;
631    if (0 == ctl->sc_enpub->enp_last_warning[WT_NO_POISON]
632                || ctl->sc_enpub->enp_last_warning[WT_NO_POISON]
633                                            + WARNING_INTERVAL < approx_now)
634    {
635        ctl->sc_enpub->enp_last_warning[WT_NO_POISON] = approx_now;
636        log_level = LSQ_LOG_WARN;
637    }
638    else
639        log_level = LSQ_LOG_DEBUG;
640    LSQ_LOG(log_level, "odd: poisoned packet %"PRIu64" not found during "
641        "reschedule, flag: %d", ctl->sc_gap, !!(ctl->sc_flags & SC_POISON));
642}
643
644
645int
646lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *ctl,
647                             struct lsquic_packet_out *packet_out)
648{
649    enum packnum_space pns;
650    char frames[lsquic_frame_types_str_sz];
651
652    assert(!(packet_out->po_flags & PO_ENCRYPTED));
653    ctl->sc_last_sent_time = packet_out->po_sent;
654    pns = lsquic_packet_out_pns(packet_out);
655    if (packet_out->po_packno == ctl->sc_gap + 1)
656    {
657        assert(!(ctl->sc_flags & SC_POISON));
658        lsquic_senhist_add(&ctl->sc_senhist, ctl->sc_gap);
659        if (0 != send_ctl_add_poison(ctl))
660            return -1;
661    }
662    LSQ_DEBUG("packet %"PRIu64" has been sent (frame types: %s)",
663        packet_out->po_packno, lsquic_frame_types_to_str(frames,
664            sizeof(frames), packet_out->po_frame_types));
665    lsquic_senhist_add(&ctl->sc_senhist, packet_out->po_packno);
666    if (ctl->sc_ci->cci_sent)
667        ctl->sc_ci->cci_sent(CGP(ctl), packet_out, ctl->sc_bytes_unacked_all,
668                                            ctl->sc_flags & SC_APP_LIMITED);
669    send_ctl_unacked_append(ctl, packet_out);
670    if (packet_out->po_frame_types & ctl->sc_retx_frames)
671    {
672        if (!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX_INIT + pns))
673            set_retx_alarm(ctl, pns, packet_out->po_sent);
674        if (ctl->sc_n_in_flight_retx == 1)
675            ctl->sc_flags |= SC_WAS_QUIET;
676    }
677    /* TODO: Do we really want to use those for RTT info? Revisit this. */
678    /* Hold on to packets that are not retransmittable because we need them
679     * to sample RTT information.  They are released when ACK is received.
680     */
681#if LSQUIC_SEND_STATS
682    ++ctl->sc_stats.n_total_sent;
683#endif
684    lsquic_send_ctl_sanity_check(ctl);
685    return 0;
686}
687
688
689static void
690take_rtt_sample (lsquic_send_ctl_t *ctl,
691                 lsquic_time_t now, lsquic_time_t lack_delta)
692{
693    const lsquic_packno_t packno = ctl->sc_largest_acked_packno;
694    const lsquic_time_t sent = ctl->sc_largest_acked_sent_time;
695    const lsquic_time_t measured_rtt = now - sent;
696    if (packno > ctl->sc_max_rtt_packno && lack_delta < measured_rtt)
697    {
698        ctl->sc_max_rtt_packno = packno;
699        lsquic_rtt_stats_update(&ctl->sc_conn_pub->rtt_stats, measured_rtt, lack_delta);
700        LSQ_DEBUG("packno %"PRIu64"; rtt: %"PRIu64"; delta: %"PRIu64"; "
701            "new srtt: %"PRIu64, packno, measured_rtt, lack_delta,
702            lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats));
703    }
704}
705
706
707static void
708send_ctl_return_enc_data (struct lsquic_send_ctl *ctl,
709                                        struct lsquic_packet_out *packet_out)
710{
711    ctl->sc_enpub->enp_pmi->pmi_return(ctl->sc_enpub->enp_pmi_ctx,
712        packet_out->po_path->np_peer_ctx,
713        packet_out->po_enc_data, lsquic_packet_out_ipv6(packet_out));
714    packet_out->po_flags &= ~PO_ENCRYPTED;
715    packet_out->po_enc_data = NULL;
716}
717
718
719static void
720send_ctl_destroy_packet (struct lsquic_send_ctl *ctl,
721                                        struct lsquic_packet_out *packet_out)
722{
723    if (0 == (packet_out->po_flags & (PO_LOSS_REC|PO_POISON)))
724        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub,
725                                            packet_out->po_path->np_peer_ctx);
726    else
727        lsquic_malo_put(packet_out);
728}
729
730
731static void
732send_ctl_maybe_renumber_sched_to_right (struct lsquic_send_ctl *ctl,
733                                        const struct lsquic_packet_out *cur)
734{
735    struct lsquic_packet_out *packet_out;
736
737    /* If current packet has PO_REPACKNO set, it means that all those to the
738     * right of it have this flag set as well.
739     */
740    if (0 == (cur->po_flags & PO_REPACKNO))
741    {
742        ctl->sc_cur_packno = cur->po_packno - 1;
743        for (packet_out = TAILQ_NEXT(cur, po_next);
744                packet_out && 0 == (packet_out->po_flags & PO_REPACKNO);
745                    packet_out = TAILQ_NEXT(packet_out, po_next))
746        {
747            packet_out->po_flags |= PO_REPACKNO;
748        }
749    }
750}
751
752
753/* The third argument to advance `next' pointer when modifying the unacked
754 * queue.  This is because the unacked queue may contain several elements
755 * of the same chain.  This is not true of the lost and scheduled packet
756 * queue, as the loss records are only present on the unacked queue.
757 */
758static void
759send_ctl_destroy_chain (struct lsquic_send_ctl *ctl,
760                        struct lsquic_packet_out *const packet_out,
761                        struct lsquic_packet_out **next)
762{
763    struct lsquic_packet_out *chain_cur, *chain_next;
764    unsigned packet_sz, count;
765    enum packnum_space pns = lsquic_packet_out_pns(packet_out);
766
767    count = 0;
768    for (chain_cur = packet_out->po_loss_chain; chain_cur != packet_out;
769                                                    chain_cur = chain_next)
770    {
771        chain_next = chain_cur->po_loss_chain;
772        switch (chain_cur->po_flags & (PO_SCHED|PO_UNACKED|PO_LOST))
773        {
774        case PO_SCHED:
775            send_ctl_maybe_renumber_sched_to_right(ctl, chain_cur);
776            send_ctl_sched_remove(ctl, chain_cur);
777            break;
778        case PO_UNACKED:
779            if (chain_cur->po_flags & PO_LOSS_REC)
780                TAILQ_REMOVE(&ctl->sc_unacked_packets[pns], chain_cur, po_next);
781            else
782            {
783                packet_sz = packet_out_sent_sz(chain_cur);
784                send_ctl_unacked_remove(ctl, chain_cur, packet_sz);
785            }
786            break;
787        case PO_LOST:
788            TAILQ_REMOVE(&ctl->sc_lost_packets, chain_cur, po_next);
789            break;
790        case 0:
791            /* This is also weird, but let it pass */
792            break;
793        default:
794            assert(0);
795            break;
796        }
797        if (next && *next == chain_cur)
798            *next = TAILQ_NEXT(*next, po_next);
799        if (0 == (chain_cur->po_flags & PO_LOSS_REC))
800            lsquic_packet_out_ack_streams(chain_cur);
801        send_ctl_destroy_packet(ctl, chain_cur);
802        ++count;
803    }
804    packet_out->po_loss_chain = packet_out;
805
806    if (count)
807        LSQ_DEBUG("destroyed %u packet%.*s in chain of packet %"PRIu64,
808            count, count != 1, "s", packet_out->po_packno);
809}
810
811
812static void
813send_ctl_record_loss (struct lsquic_send_ctl *ctl,
814                                        struct lsquic_packet_out *packet_out)
815{
816    struct lsquic_packet_out *loss_record;
817
818    loss_record = lsquic_malo_get(ctl->sc_conn_pub->packet_out_malo);
819    if (loss_record)
820    {
821        memset(loss_record, 0, sizeof(*loss_record));
822        loss_record->po_flags = PO_UNACKED|PO_LOSS_REC|PO_SENT_SZ;
823        loss_record->po_flags |=
824            ((packet_out->po_flags >> POPNS_SHIFT) & 3) << POPNS_SHIFT;
825        /* Copy values used in ACK processing: */
826        loss_record->po_packno = packet_out->po_packno;
827        loss_record->po_sent = packet_out->po_sent;
828        loss_record->po_sent_sz = packet_out_sent_sz(packet_out);
829        loss_record->po_frame_types = packet_out->po_frame_types;
830        /* Insert the loss record into the chain: */
831        loss_record->po_loss_chain = packet_out->po_loss_chain;
832        packet_out->po_loss_chain = loss_record;
833        /* Place the loss record next to the lost packet we are about to
834         * remove from the list:
835         */
836        TAILQ_INSERT_BEFORE(packet_out, loss_record, po_next);
837    }
838    else
839        LSQ_INFO("cannot allocate memory for loss record");
840}
841
842
843/* Returns true if packet was rescheduled, false otherwise.  In the latter
844 * case, you should not dereference packet_out after the function returns.
845 */
846static int
847send_ctl_handle_lost_packet (lsquic_send_ctl_t *ctl,
848            lsquic_packet_out_t *packet_out, struct lsquic_packet_out **next)
849{
850    unsigned packet_sz;
851
852    assert(ctl->sc_n_in_flight_all);
853    packet_sz = packet_out_sent_sz(packet_out);
854
855    ++ctl->sc_loss_count;
856
857    if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))
858    {
859        ctl->sc_flags |= SC_LOST_ACK_INIT << lsquic_packet_out_pns(packet_out);
860        LSQ_DEBUG("lost ACK in packet %"PRIu64, packet_out->po_packno);
861    }
862
863    if (ctl->sc_ci->cci_lost)
864        ctl->sc_ci->cci_lost(CGP(ctl), packet_out, packet_sz);
865
866    /* This is a client-only check, server check happens in mini conn */
867    if (send_ctl_ecn_on(ctl)
868            && 0 == ctl->sc_ecn_total_acked[PNS_INIT]
869                && HETY_INITIAL == packet_out->po_header_type
870                    && 3 == packet_out->po_packno)
871    {
872        LSQ_DEBUG("possible ECN black hole during handshake, disable ECN");
873        ctl->sc_ecn = ECN_NOT_ECT;
874    }
875
876    if (packet_out->po_frame_types & ctl->sc_retx_frames)
877    {
878        LSQ_DEBUG("lost retransmittable packet %"PRIu64,
879                                                    packet_out->po_packno);
880        send_ctl_record_loss(ctl, packet_out);
881        send_ctl_unacked_remove(ctl, packet_out, packet_sz);
882        TAILQ_INSERT_TAIL(&ctl->sc_lost_packets, packet_out, po_next);
883        packet_out->po_flags |= PO_LOST;
884        return 1;
885    }
886    else
887    {
888        LSQ_DEBUG("lost unretransmittable packet %"PRIu64,
889                                                    packet_out->po_packno);
890        send_ctl_unacked_remove(ctl, packet_out, packet_sz);
891        send_ctl_destroy_chain(ctl, packet_out, next);
892        send_ctl_destroy_packet(ctl, packet_out);
893        return 0;
894    }
895}
896
897
898static lsquic_packno_t
899largest_retx_packet_number (const struct lsquic_send_ctl *ctl,
900                                                    enum packnum_space pns)
901{
902    const lsquic_packet_out_t *packet_out;
903    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets[pns],
904                                                lsquic_packets_tailq, po_next)
905    {
906        if (0 == (packet_out->po_flags & (PO_LOSS_REC|PO_POISON))
907                && (packet_out->po_frame_types & ctl->sc_retx_frames))
908            return packet_out->po_packno;
909    }
910    return 0;
911}
912
913
914static void
915send_ctl_detect_losses (struct lsquic_send_ctl *ctl, enum packnum_space pns,
916                                                            lsquic_time_t time)
917{
918    lsquic_packet_out_t *packet_out, *next;
919    lsquic_packno_t largest_retx_packno, largest_lost_packno;
920
921    largest_retx_packno = largest_retx_packet_number(ctl, pns);
922    largest_lost_packno = 0;
923    ctl->sc_loss_to = 0;
924
925    for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets[pns]);
926            packet_out && packet_out->po_packno <= ctl->sc_largest_acked_packno;
927                packet_out = next)
928    {
929        next = TAILQ_NEXT(packet_out, po_next);
930
931        if (packet_out->po_flags & (PO_LOSS_REC|PO_POISON))
932            continue;
933
934        if (packet_out->po_packno + N_NACKS_BEFORE_RETX <
935                                                ctl->sc_largest_acked_packno)
936        {
937            LSQ_DEBUG("loss by FACK detected, packet %"PRIu64,
938                                                    packet_out->po_packno);
939            largest_lost_packno = packet_out->po_packno;
940            (void) send_ctl_handle_lost_packet(ctl, packet_out, &next);
941            continue;
942        }
943
944        if (largest_retx_packno
945            && (packet_out->po_frame_types & ctl->sc_retx_frames)
946            && largest_retx_packno <= ctl->sc_largest_acked_packno)
947        {
948            LSQ_DEBUG("loss by early retransmit detected, packet %"PRIu64,
949                                                    packet_out->po_packno);
950            largest_lost_packno = packet_out->po_packno;
951            ctl->sc_loss_to =
952                lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats) / 4;
953            LSQ_DEBUG("set sc_loss_to to %"PRIu64", packet %"PRIu64,
954                                    ctl->sc_loss_to, packet_out->po_packno);
955            (void) send_ctl_handle_lost_packet(ctl, packet_out, &next);
956            continue;
957        }
958
959        if (ctl->sc_largest_acked_sent_time > packet_out->po_sent +
960                    lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats))
961        {
962            LSQ_DEBUG("loss by sent time detected: packet %"PRIu64,
963                                                    packet_out->po_packno);
964            if (packet_out->po_frame_types & ctl->sc_retx_frames)
965                largest_lost_packno = packet_out->po_packno;
966            else { /* don't count it as a loss */; }
967            (void) send_ctl_handle_lost_packet(ctl, packet_out, &next);
968            continue;
969        }
970    }
971
972    if (largest_lost_packno > ctl->sc_largest_sent_at_cutback)
973    {
974        LSQ_DEBUG("detected new loss: packet %"PRIu64"; new lsac: "
975            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
976        ctl->sc_ci->cci_loss(CGP(ctl));
977        if (ctl->sc_flags & SC_PACE)
978            lsquic_pacer_loss_event(&ctl->sc_pacer);
979        ctl->sc_largest_sent_at_cutback =
980                                lsquic_senhist_largest(&ctl->sc_senhist);
981    }
982    else if (largest_lost_packno)
983        /* Lost packets whose numbers are smaller than the largest packet
984         * number sent at the time of the last loss event indicate the same
985         * loss event.  This follows NewReno logic, see RFC 6582.
986         */
987        LSQ_DEBUG("ignore loss of packet %"PRIu64" smaller than lsac "
988            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
989}
990
991
992int
993lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl,
994                         const struct ack_info *acki,
995                         lsquic_time_t ack_recv_time, lsquic_time_t now)
996{
997    const struct lsquic_packno_range *range =
998                                    &acki->ranges[ acki->n_ranges - 1 ];
999    lsquic_packet_out_t *packet_out, *next;
1000    lsquic_packno_t smallest_unacked;
1001    lsquic_packno_t ack2ed[2];
1002    unsigned packet_sz;
1003    int app_limited;
1004    signed char do_rtt, skip_checks;
1005    enum packnum_space pns;
1006    unsigned ecn_total_acked, ecn_ce_cnt, one_rtt_cnt;
1007
1008    pns = acki->pns;
1009    packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets[pns]);
1010#if __GNUC__
1011    __builtin_prefetch(packet_out);
1012#endif
1013
1014#if __GNUC__
1015#   define UNLIKELY(cond) __builtin_expect(cond, 0)
1016#else
1017#   define UNLIKELY(cond) cond
1018#endif
1019
1020#if __GNUC__
1021    if (UNLIKELY(LSQ_LOG_ENABLED(LSQ_LOG_DEBUG)))
1022#endif
1023        LSQ_DEBUG("Got ACK frame, largest acked: %"PRIu64"; delta: %"PRIu64,
1024                            largest_acked(acki), acki->lack_delta);
1025
1026    /* Validate ACK first: */
1027    if (UNLIKELY(largest_acked(acki)
1028                                > lsquic_senhist_largest(&ctl->sc_senhist)))
1029    {
1030        LSQ_INFO("at least one packet in ACK range [%"PRIu64" - %"PRIu64"] "
1031            "was never sent", acki->ranges[0].low, acki->ranges[0].high);
1032        return -1;
1033    }
1034
1035    if (ctl->sc_ci->cci_begin_ack)
1036        ctl->sc_ci->cci_begin_ack(CGP(ctl), ack_recv_time,
1037                                                    ctl->sc_bytes_unacked_all);
1038
1039    ecn_total_acked = 0;
1040    ecn_ce_cnt = 0;
1041    one_rtt_cnt = 0;
1042
1043    if (UNLIKELY(ctl->sc_flags & SC_WAS_QUIET))
1044    {
1045        ctl->sc_flags &= ~SC_WAS_QUIET;
1046        LSQ_DEBUG("ACK comes after a period of quiescence");
1047        ctl->sc_ci->cci_was_quiet(CGP(ctl), now, ctl->sc_bytes_unacked_all);
1048    }
1049
1050    if (UNLIKELY(!packet_out))
1051        goto no_unacked_packets;
1052
1053    smallest_unacked = packet_out->po_packno;
1054    LSQ_DEBUG("Smallest unacked: %"PRIu64, smallest_unacked);
1055
1056    ack2ed[1] = 0;
1057
1058    if (packet_out->po_packno > largest_acked(acki))
1059        goto detect_losses;
1060
1061    if (largest_acked(acki) > ctl->sc_cur_rt_end)
1062    {
1063        ++ctl->sc_rt_count;
1064        ctl->sc_cur_rt_end = lsquic_senhist_largest(&ctl->sc_senhist);
1065    }
1066
1067    do_rtt = 0, skip_checks = 0;
1068    app_limited = -1;
1069    do
1070    {
1071        next = TAILQ_NEXT(packet_out, po_next);
1072#if __GNUC__
1073        __builtin_prefetch(next);
1074#endif
1075        if (skip_checks)
1076            goto after_checks;
1077        /* This is faster than binary search in the normal case when the number
1078         * of ranges is not much larger than the number of unacked packets.
1079         */
1080        while (UNLIKELY(range->high < packet_out->po_packno))
1081            --range;
1082        if (range->low <= packet_out->po_packno)
1083        {
1084            skip_checks = range == acki->ranges;
1085            if (app_limited < 0)
1086                app_limited = send_ctl_retx_bytes_out(ctl) + 3 * SC_PACK_SIZE(ctl) /* This
1087                    is the "maximum burst" parameter */
1088                    < ctl->sc_ci->cci_get_cwnd(CGP(ctl));
1089  after_checks:
1090            ctl->sc_largest_acked_packno    = packet_out->po_packno;
1091            ctl->sc_largest_acked_sent_time = packet_out->po_sent;
1092            ecn_total_acked += lsquic_packet_out_ecn(packet_out) != ECN_NOT_ECT;
1093            ecn_ce_cnt += lsquic_packet_out_ecn(packet_out) == ECN_CE;
1094            one_rtt_cnt += lsquic_packet_out_enc_level(packet_out) == ENC_LEV_FORW;
1095            if (0 == (packet_out->po_flags & (PO_LOSS_REC|PO_POISON)))
1096            {
1097                packet_sz = packet_out_sent_sz(packet_out);
1098                send_ctl_unacked_remove(ctl, packet_out, packet_sz);
1099                lsquic_packet_out_ack_streams(packet_out);
1100                LSQ_DEBUG("acking via regular record %"PRIu64,
1101                                                        packet_out->po_packno);
1102            }
1103            else if (packet_out->po_flags & PO_LOSS_REC)
1104            {
1105                packet_sz = packet_out->po_sent_sz;
1106                TAILQ_REMOVE(&ctl->sc_unacked_packets[pns], packet_out,
1107                                                                    po_next);
1108                LSQ_DEBUG("acking via loss record %"PRIu64,
1109                                                        packet_out->po_packno);
1110#if LSQUIC_CONN_STATS
1111                ++ctl->sc_conn_pub->conn_stats->out.acked_via_loss;
1112#endif
1113            }
1114            else
1115            {
1116                LSQ_WARN("poisoned packet %"PRIu64" acked",
1117                                                        packet_out->po_packno);
1118                return -1;
1119            }
1120            ack2ed[!!(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))]
1121                = packet_out->po_ack2ed;
1122            do_rtt |= packet_out->po_packno == largest_acked(acki);
1123            ctl->sc_ci->cci_ack(CGP(ctl), packet_out, packet_sz, now,
1124                                                             app_limited);
1125            send_ctl_destroy_chain(ctl, packet_out, &next);
1126            send_ctl_destroy_packet(ctl, packet_out);
1127        }
1128        packet_out = next;
1129    }
1130    while (packet_out && packet_out->po_packno <= largest_acked(acki));
1131
1132    if (do_rtt)
1133    {
1134        take_rtt_sample(ctl, ack_recv_time, acki->lack_delta);
1135        ctl->sc_n_consec_rtos = 0;
1136        ctl->sc_n_hsk = 0;
1137        ctl->sc_n_tlp = 0;
1138    }
1139
1140  detect_losses:
1141    send_ctl_detect_losses(ctl, pns, ack_recv_time);
1142    if (send_ctl_first_unacked_retx_packet(ctl, pns))
1143        set_retx_alarm(ctl, pns, now);
1144    else
1145    {
1146        LSQ_DEBUG("No retransmittable packets: clear alarm");
1147        lsquic_alarmset_unset(ctl->sc_alset, AL_RETX_INIT + pns);
1148    }
1149    lsquic_send_ctl_sanity_check(ctl);
1150
1151    if ((ctl->sc_flags & SC_NSTP) && ack2ed[1] > ctl->sc_largest_ack2ed[pns])
1152        ctl->sc_largest_ack2ed[pns] = ack2ed[1];
1153
1154    if (ctl->sc_n_in_flight_retx == 0)
1155        ctl->sc_flags |= SC_WAS_QUIET;
1156
1157    if (one_rtt_cnt)
1158        ctl->sc_flags |= SC_1RTT_ACKED;
1159
1160    if (send_ctl_ecn_on(ctl))
1161    {
1162        const uint64_t sum = acki->ecn_counts[ECN_ECT0]
1163                           + acki->ecn_counts[ECN_ECT1]
1164                           + acki->ecn_counts[ECN_CE];
1165        ctl->sc_ecn_total_acked[pns] += ecn_total_acked;
1166        ctl->sc_ecn_ce_cnt[pns] += ecn_ce_cnt;
1167        if (sum >= ctl->sc_ecn_total_acked[pns])
1168        {
1169            if (sum > ctl->sc_ecn_total_acked[pns])
1170                ctl->sc_ecn_total_acked[pns] = sum;
1171            if (acki->ecn_counts[ECN_CE] > ctl->sc_ecn_ce_cnt[pns])
1172            {
1173                ctl->sc_ecn_ce_cnt[pns] = acki->ecn_counts[ECN_CE];
1174                LSQ_WARN("TODO: handle ECN CE event");  /* XXX TODO */
1175            }
1176        }
1177        else
1178        {
1179            LSQ_INFO("ECN total ACKed (%"PRIu64") is greater than the sum "
1180                "of ECN counters (%"PRIu64"): disable ECN",
1181                ctl->sc_ecn_total_acked[pns], sum);
1182            ctl->sc_ecn = ECN_NOT_ECT;
1183        }
1184    }
1185
1186  update_n_stop_waiting:
1187    if (!(ctl->sc_flags & (SC_NSTP|SC_IETF)))
1188    {
1189        if (smallest_unacked > smallest_acked(acki))
1190            /* Peer is acking packets that have been acked already.  Schedule
1191             * ACK and STOP_WAITING frame to chop the range if we get two of
1192             * these in a row.
1193             */
1194            ++ctl->sc_n_stop_waiting;
1195        else
1196            ctl->sc_n_stop_waiting = 0;
1197    }
1198    lsquic_send_ctl_sanity_check(ctl);
1199    if (ctl->sc_ci->cci_end_ack)
1200        ctl->sc_ci->cci_end_ack(CGP(ctl), ctl->sc_bytes_unacked_all);
1201    if (ctl->sc_gap < smallest_acked(acki))
1202        send_ctl_reschedule_poison(ctl);
1203    return 0;
1204
1205  no_unacked_packets:
1206    smallest_unacked = lsquic_senhist_largest(&ctl->sc_senhist) + 1;
1207    ctl->sc_flags |= SC_WAS_QUIET;
1208    goto update_n_stop_waiting;
1209}
1210
1211
1212lsquic_packno_t
1213lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl)
1214{
1215    const lsquic_packet_out_t *packet_out;
1216    enum packnum_space pns;
1217
1218    /* Packets are always sent out in order (unless we are reordering them
1219     * on purpose).  Thus, the first packet on the unacked packets list has
1220     * the smallest packet number of all packets on that list.
1221     */
1222    for (pns = ctl->sc_flags & SC_IETF ? PNS_INIT : PNS_APP; pns < N_PNS; ++pns)
1223        if ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets[pns])))
1224            /* We're OK with using a loss record */
1225            return packet_out->po_packno;
1226
1227    return lsquic_senhist_largest(&ctl->sc_senhist) + first_packno(ctl);
1228}
1229
1230
1231static struct lsquic_packet_out *
1232send_ctl_next_lost (lsquic_send_ctl_t *ctl)
1233{
1234    struct lsquic_packet_out *lost_packet;
1235
1236  get_next_lost:
1237    lost_packet = TAILQ_FIRST(&ctl->sc_lost_packets);
1238    if (lost_packet)
1239    {
1240        if (lost_packet->po_frame_types & (1 << QUIC_FRAME_STREAM))
1241        {
1242            if (0 == (lost_packet->po_flags & PO_MINI))
1243            {
1244                lsquic_packet_out_elide_reset_stream_frames(lost_packet, 0);
1245                if (lost_packet->po_regen_sz >= lost_packet->po_data_sz)
1246                {
1247                    LSQ_DEBUG("Dropping packet %"PRIu64" from lost queue",
1248                        lost_packet->po_packno);
1249                    TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next);
1250                    lost_packet->po_flags &= ~PO_LOST;
1251                    send_ctl_destroy_chain(ctl, lost_packet, NULL);
1252                    send_ctl_destroy_packet(ctl, lost_packet);
1253                    goto get_next_lost;
1254                }
1255            }
1256            else
1257            {
1258                /* Mini connection only ever sends data on stream 1.  There
1259                 * is nothing to elide: always resend it.
1260                 */
1261                ;
1262            }
1263        }
1264
1265        if (!lsquic_send_ctl_can_send(ctl))
1266            return NULL;
1267
1268        TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next);
1269        lost_packet->po_flags &= ~PO_LOST;
1270        lost_packet->po_flags |= PO_RETX;
1271    }
1272
1273    return lost_packet;
1274}
1275
1276
1277static lsquic_packno_t
1278send_ctl_next_packno (lsquic_send_ctl_t *ctl)
1279{
1280    lsquic_packno_t packno;
1281
1282    packno = ++ctl->sc_cur_packno;
1283    if (packno == ctl->sc_gap)
1284        packno = ++ctl->sc_cur_packno;
1285
1286    return packno;
1287}
1288
1289
1290void
1291lsquic_send_ctl_cleanup (lsquic_send_ctl_t *ctl)
1292{
1293    lsquic_packet_out_t *packet_out, *next;
1294    enum packnum_space pns;
1295    unsigned n;
1296
1297    lsquic_senhist_cleanup(&ctl->sc_senhist);
1298    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
1299    {
1300        send_ctl_sched_remove(ctl, packet_out);
1301        send_ctl_destroy_packet(ctl, packet_out);
1302    }
1303    assert(0 == ctl->sc_n_scheduled);
1304    assert(0 == ctl->sc_bytes_scheduled);
1305    for (pns = PNS_INIT; pns < N_PNS; ++pns)
1306        while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets[pns])))
1307        {
1308            TAILQ_REMOVE(&ctl->sc_unacked_packets[pns], packet_out, po_next);
1309            packet_out->po_flags &= ~PO_UNACKED;
1310#ifndef NDEBUG
1311            if (0 == (packet_out->po_flags & (PO_LOSS_REC|PO_POISON)))
1312            {
1313                ctl->sc_bytes_unacked_all -= packet_out_sent_sz(packet_out);
1314                --ctl->sc_n_in_flight_all;
1315            }
1316#endif
1317            send_ctl_destroy_packet(ctl, packet_out);
1318        }
1319    assert(0 == ctl->sc_n_in_flight_all);
1320    assert(0 == ctl->sc_bytes_unacked_all);
1321    while ((packet_out = TAILQ_FIRST(&ctl->sc_lost_packets)))
1322    {
1323        TAILQ_REMOVE(&ctl->sc_lost_packets, packet_out, po_next);
1324        packet_out->po_flags &= ~PO_LOST;
1325        send_ctl_destroy_packet(ctl, packet_out);
1326    }
1327    for (n = 0; n < sizeof(ctl->sc_buffered_packets) /
1328                                sizeof(ctl->sc_buffered_packets[0]); ++n)
1329    {
1330        for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets);
1331                                                packet_out; packet_out = next)
1332        {
1333            next = TAILQ_NEXT(packet_out, po_next);
1334            send_ctl_destroy_packet(ctl, packet_out);
1335        }
1336    }
1337    if (ctl->sc_flags & SC_PACE)
1338        lsquic_pacer_cleanup(&ctl->sc_pacer);
1339    ctl->sc_ci->cci_cleanup(CGP(ctl));
1340#if LSQUIC_SEND_STATS
1341    LSQ_NOTICE("stats: n_total_sent: %u; n_resent: %u; n_delayed: %u",
1342        ctl->sc_stats.n_total_sent, ctl->sc_stats.n_resent,
1343        ctl->sc_stats.n_delayed);
1344#endif
1345    free(ctl->sc_token);
1346}
1347
1348
1349static unsigned
1350send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl)
1351{
1352    return ctl->sc_bytes_scheduled
1353         + ctl->sc_bytes_unacked_retx
1354         ;
1355}
1356
1357
1358static unsigned
1359send_ctl_all_bytes_out (const struct lsquic_send_ctl *ctl)
1360{
1361    return ctl->sc_bytes_scheduled
1362         + ctl->sc_bytes_unacked_all
1363         ;
1364}
1365
1366
1367int
1368lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *ctl)
1369{
1370    return (ctl->sc_flags & SC_PACE)
1371        && !lsquic_pacer_can_schedule(&ctl->sc_pacer,
1372                                               ctl->sc_n_in_flight_all);
1373}
1374
1375
1376static int
1377send_ctl_can_send (struct lsquic_send_ctl *ctl)
1378{
1379    const unsigned n_out = send_ctl_all_bytes_out(ctl);
1380    LSQ_DEBUG("%s: n_out: %u (unacked_all: %u); cwnd: %"PRIu64, __func__,
1381        n_out, ctl->sc_bytes_unacked_all,
1382        ctl->sc_ci->cci_get_cwnd(CGP(ctl)));
1383    if (ctl->sc_flags & SC_PACE)
1384    {
1385        if (n_out >= ctl->sc_ci->cci_get_cwnd(CGP(ctl)))
1386            return 0;
1387        if (lsquic_pacer_can_schedule(&ctl->sc_pacer,
1388                               ctl->sc_n_scheduled + ctl->sc_n_in_flight_all))
1389            return 1;
1390        if (ctl->sc_flags & SC_SCHED_TICK)
1391        {
1392            ctl->sc_flags &= ~SC_SCHED_TICK;
1393            lsquic_engine_add_conn_to_attq(ctl->sc_enpub,
1394                    ctl->sc_conn_pub->lconn, lsquic_pacer_next_sched(&ctl->sc_pacer),
1395                    AEW_PACER);
1396        }
1397        return 0;
1398    }
1399    else
1400        return n_out < ctl->sc_ci->cci_get_cwnd(CGP(ctl));
1401}
1402
1403
1404static int
1405send_ctl_can_send_pre_hsk (struct lsquic_send_ctl *ctl)
1406{
1407    unsigned bytes_in, bytes_out;
1408
1409    bytes_in = ctl->sc_conn_pub->bytes_in;
1410    bytes_out = ctl->sc_conn_pub->bytes_out + ctl->sc_bytes_scheduled;
1411    if (bytes_out >= bytes_in * 2 + bytes_in / 2 /* This should work out
1412                                                to around 3 on average */)
1413    {
1414        LSQ_DEBUG("%s: amplification block: %u bytes in, %u bytes out",
1415                                            __func__, bytes_in, bytes_out);
1416        return 0;
1417    }
1418    else
1419        return send_ctl_can_send(ctl);
1420}
1421
1422
1423#ifndef NDEBUG
1424#if __GNUC__
1425__attribute__((weak))
1426#endif
1427#endif
1428int
1429lsquic_send_ctl_can_send (struct lsquic_send_ctl *ctl)
1430{
1431    return ctl->sc_can_send(ctl);
1432}
1433
1434
1435/* Like lsquic_send_ctl_can_send(), but no mods */
1436static int
1437send_ctl_could_send (const struct lsquic_send_ctl *ctl)
1438{
1439    uint64_t cwnd;
1440    unsigned n_out;
1441
1442    if ((ctl->sc_flags & SC_PACE) && lsquic_pacer_delayed(&ctl->sc_pacer))
1443        return 0;
1444
1445    cwnd = ctl->sc_ci->cci_get_cwnd(CGP(ctl));
1446    n_out = send_ctl_all_bytes_out(ctl);
1447    return n_out < cwnd;
1448}
1449
1450
1451void
1452lsquic_send_ctl_maybe_app_limited (struct lsquic_send_ctl *ctl,
1453                                            const struct network_path *path)
1454{
1455    const struct lsquic_packet_out *packet_out;
1456
1457    packet_out = lsquic_send_ctl_last_scheduled(ctl, PNS_APP, path, 0);
1458    if ((packet_out && lsquic_packet_out_avail(packet_out) > 10)
1459                                                || send_ctl_could_send(ctl))
1460    {
1461        LSQ_DEBUG("app-limited");
1462        ctl->sc_flags |= SC_APP_LIMITED;
1463    }
1464}
1465
1466
1467static void
1468send_ctl_expire (struct lsquic_send_ctl *ctl, enum packnum_space pns,
1469                                                    enum expire_filter filter)
1470{
1471    lsquic_packet_out_t *packet_out, *next;
1472    int n_resubmitted;
1473    static const char *const filter_type2str[] = {
1474        [EXFI_ALL] = "all",
1475        [EXFI_HSK] = "handshake",
1476        [EXFI_LAST] = "last",
1477    };
1478
1479    switch (filter)
1480    {
1481    case EXFI_ALL:
1482        n_resubmitted = 0;
1483        for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets[pns]);
1484                                                packet_out; packet_out = next)
1485        {
1486            next = TAILQ_NEXT(packet_out, po_next);
1487            if (0 == (packet_out->po_flags & (PO_LOSS_REC|PO_POISON)))
1488                n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out,
1489                                                                        &next);
1490        }
1491        break;
1492    case EXFI_HSK:
1493        n_resubmitted = 0;
1494        for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets[pns]); packet_out;
1495                                                            packet_out = next)
1496        {
1497            next = TAILQ_NEXT(packet_out, po_next);
1498            if (packet_out->po_flags & PO_HELLO)
1499                n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out,
1500                                                                        &next);
1501        }
1502        break;
1503    default:
1504        assert(filter == EXFI_LAST);
1505        packet_out = send_ctl_last_unacked_retx_packet(ctl, pns);
1506        if (packet_out)
1507            n_resubmitted = send_ctl_handle_lost_packet(ctl, packet_out, NULL);
1508        else
1509            n_resubmitted = 0;
1510        break;
1511    }
1512
1513    LSQ_DEBUG("consider %s packets lost: %d resubmitted",
1514                                    filter_type2str[filter], n_resubmitted);
1515}
1516
1517
1518void
1519lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl)
1520{
1521    enum packnum_space pns;
1522
1523    for (pns = ctl->sc_flags & SC_IETF ? PNS_INIT : PNS_APP; pns < N_PNS; ++pns)
1524    {
1525        lsquic_alarmset_unset(ctl->sc_alset, AL_RETX_INIT + pns);
1526        send_ctl_expire(ctl, pns, EXFI_ALL);
1527    }
1528    lsquic_send_ctl_sanity_check(ctl);
1529}
1530
1531
1532#ifndef NDEBUG
1533void
1534lsquic_send_ctl_do_sanity_check (const struct lsquic_send_ctl *ctl)
1535{
1536    const struct lsquic_packet_out *packet_out;
1537    lsquic_packno_t prev_packno;
1538    int prev_packno_set;
1539    unsigned count, bytes;
1540    enum packnum_space pns;
1541
1542#if _MSC_VER
1543    prev_packno = 0;
1544#endif
1545    count = 0, bytes = 0;
1546    for (pns = PNS_INIT; pns <= PNS_APP; ++pns)
1547    {
1548        prev_packno_set = 0;
1549        TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets[pns], po_next)
1550        {
1551            if (prev_packno_set)
1552                assert(packet_out->po_packno > prev_packno);
1553            else
1554            {
1555                prev_packno = packet_out->po_packno;
1556                prev_packno_set = 1;
1557            }
1558            if (0 == (packet_out->po_flags & (PO_LOSS_REC|PO_POISON)))
1559            {
1560                bytes += packet_out_sent_sz(packet_out);
1561                ++count;
1562            }
1563        }
1564    }
1565    assert(count == ctl->sc_n_in_flight_all);
1566    assert(bytes == ctl->sc_bytes_unacked_all);
1567
1568    count = 0, bytes = 0;
1569    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1570    {
1571        assert(packet_out->po_flags & PO_SCHED);
1572        bytes += packet_out_total_sz(packet_out);
1573        ++count;
1574    }
1575    assert(count == ctl->sc_n_scheduled);
1576    assert(bytes == ctl->sc_bytes_scheduled);
1577}
1578#endif
1579
1580
1581void
1582lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *ctl,
1583                                            lsquic_packet_out_t *packet_out)
1584{
1585#ifndef NDEBUG
1586    const lsquic_packet_out_t *last;
1587    last = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
1588    if (last)
1589        assert((last->po_flags & PO_REPACKNO) ||
1590                last->po_packno < packet_out->po_packno);
1591#endif
1592    if (ctl->sc_flags & SC_PACE)
1593    {
1594        unsigned n_out = ctl->sc_n_in_flight_retx + ctl->sc_n_scheduled;
1595        lsquic_pacer_packet_scheduled(&ctl->sc_pacer, n_out,
1596            send_ctl_in_recovery(ctl), send_ctl_transfer_time, ctl);
1597    }
1598    send_ctl_sched_append(ctl, packet_out);
1599}
1600
1601
1602/* Wrapper is used to reset the counter when it's been too long */
1603static unsigned
1604send_ctl_get_n_consec_rtos (struct lsquic_send_ctl *ctl)
1605{
1606    lsquic_time_t timeout;
1607
1608    if (ctl->sc_n_consec_rtos)
1609    {
1610        timeout = calculate_packet_rto(ctl);
1611        if (ctl->sc_last_rto_time + timeout < ctl->sc_last_sent_time)
1612        {
1613            ctl->sc_n_consec_rtos = 0;
1614            LSQ_DEBUG("reset RTO counter after %"PRIu64" usec",
1615                ctl->sc_last_sent_time - ctl->sc_last_rto_time);
1616        }
1617    }
1618
1619    return ctl->sc_n_consec_rtos;
1620}
1621
1622
1623/* This mimics the logic in lsquic_send_ctl_next_packet_to_send(): we want
1624 * to check whether the first scheduled packet cannot be sent.
1625 */
1626int
1627lsquic_send_ctl_sched_is_blocked (struct lsquic_send_ctl *ctl)
1628{
1629    const lsquic_packet_out_t *packet_out
1630                            = TAILQ_FIRST(&ctl->sc_scheduled_packets);
1631    return send_ctl_get_n_consec_rtos(ctl)
1632        && 0 == ctl->sc_next_limit
1633        && packet_out
1634        && !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK));
1635}
1636
1637
1638static void
1639send_ctl_maybe_zero_pad (struct lsquic_send_ctl *ctl,
1640                        struct lsquic_packet_out *initial_packet, size_t limit)
1641{
1642    struct lsquic_packet_out *packet_out;
1643    size_t cum_size, size;
1644
1645    cum_size = packet_out_total_sz(initial_packet);
1646    if (cum_size >= limit)
1647    {
1648        LSQ_DEBUG("packet size %zu larger than %zu-byte limit: not "
1649            "zero-padding", cum_size, limit);
1650        return;
1651    }
1652
1653    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1654    {
1655        size = packet_out_total_sz(packet_out);
1656        if (cum_size + size > limit)
1657            break;
1658        cum_size += size;
1659    }
1660
1661    LSQ_DEBUG("cum_size: %zu; limit: %zu", cum_size, limit);
1662    assert(cum_size <= limit);
1663    size = limit - cum_size;
1664    if (size > lsquic_packet_out_avail(initial_packet))
1665        size = lsquic_packet_out_avail(initial_packet);
1666    if (size)
1667    {
1668        memset(initial_packet->po_data + initial_packet->po_data_sz, 0, size);
1669        initial_packet->po_data_sz += size;
1670        initial_packet->po_frame_types |= QUIC_FTBIT_PADDING;
1671    }
1672    LSQ_DEBUG("Added %zu bytes of PADDING to packet %"PRIu64, size,
1673                                                initial_packet->po_packno);
1674}
1675
1676
1677/* Predict whether lsquic_send_ctl_next_packet_to_send() will return a
1678 * packet by mimicking its logic.  Returns true if packet will be returned,
1679 * false otherwise.
1680 */
1681int
1682lsquic_send_ctl_next_packet_to_send_predict (struct lsquic_send_ctl *ctl)
1683{
1684    const struct lsquic_packet_out *packet_out;
1685    unsigned n_rtos;
1686
1687    n_rtos = ~0u;
1688    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1689    {
1690        if (!(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))
1691            && 0 == ctl->sc_next_limit
1692            && 0 != (n_rtos == ~0u ? /* Initialize once */
1693                    (n_rtos = send_ctl_get_n_consec_rtos(ctl)) : n_rtos))
1694        {
1695            LSQ_DEBUG("send prediction: no, n_rtos: %u", n_rtos);
1696            return 0;
1697        }
1698        if ((packet_out->po_flags & PO_REPACKNO)
1699                    && packet_out->po_regen_sz == packet_out->po_data_sz)
1700        {
1701            LSQ_DEBUG("send prediction: packet %"PRIu64" would be dropped, "
1702                "continue", packet_out->po_packno);
1703            continue;
1704        }
1705        LSQ_DEBUG("send prediction: yes, packet %"PRIu64", flags %u, frames 0x%X",
1706            packet_out->po_packno, (unsigned) packet_out->po_flags,
1707            (unsigned) packet_out->po_frame_types);
1708        return 1;
1709    }
1710
1711    LSQ_DEBUG("send prediction: no, no matching scheduled packets");
1712    return 0;
1713}
1714
1715
1716lsquic_packet_out_t *
1717lsquic_send_ctl_next_packet_to_send (struct lsquic_send_ctl *ctl, size_t size)
1718{
1719    lsquic_packet_out_t *packet_out;
1720    int dec_limit;
1721
1722  get_packet:
1723    packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets);
1724    if (!packet_out)
1725        return NULL;
1726
1727    /* Note: keep logic in this function and in
1728     * lsquic_send_ctl_next_packet_to_send_predict() in synch.
1729     */
1730    if (!(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))
1731                                        && send_ctl_get_n_consec_rtos(ctl))
1732    {
1733        if (ctl->sc_next_limit)
1734            dec_limit = 1;
1735        else
1736            return NULL;
1737    }
1738    else
1739        dec_limit = 0;
1740
1741    if (packet_out->po_flags & PO_REPACKNO)
1742    {
1743        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1744        {
1745            update_for_resending(ctl, packet_out);
1746            packet_out->po_flags &= ~PO_REPACKNO;
1747        }
1748        else
1749        {
1750            LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue",
1751                packet_out->po_packno);
1752            send_ctl_sched_remove(ctl, packet_out);
1753            send_ctl_destroy_chain(ctl, packet_out, NULL);
1754            send_ctl_destroy_packet(ctl, packet_out);
1755            goto get_packet;
1756        }
1757    }
1758
1759    if (UNLIKELY(size))
1760    {
1761        if (packet_out_total_sz(packet_out) + size > SC_PACK_SIZE(ctl))
1762            return NULL;
1763        LSQ_DEBUG("packet %"PRIu64" (%zu bytes) will be tacked on to "
1764            "previous packet(s) (%zu bytes) (coalescing)",
1765            packet_out->po_packno, packet_out_total_sz(packet_out), size);
1766    }
1767    send_ctl_sched_remove(ctl, packet_out);
1768
1769    if (dec_limit)
1770    {
1771        --ctl->sc_next_limit;
1772        packet_out->po_flags |= PO_LIMITED;
1773    }
1774    else
1775        packet_out->po_flags &= ~PO_LIMITED;
1776
1777    if (UNLIKELY(packet_out->po_header_type == HETY_INITIAL)
1778                    && !(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_SERVER)
1779                    && size < 1200)
1780    {
1781        send_ctl_maybe_zero_pad(ctl, packet_out, 1200 - size);
1782    }
1783
1784    if (ctl->sc_flags & SC_QL_BITS)
1785    {
1786        packet_out->po_lflags |= POL_LOG_QL_BITS;
1787        if (ctl->sc_loss_count)
1788        {
1789            --ctl->sc_loss_count;
1790            packet_out->po_lflags |= POL_LOSS_BIT;
1791        }
1792        else
1793            packet_out->po_lflags &= ~POL_LOSS_BIT;
1794        if (packet_out->po_header_type == HETY_NOT_SET)
1795        {
1796            if (ctl->sc_gap + 1 == packet_out->po_packno)
1797                ++ctl->sc_square_count;
1798            if (ctl->sc_square_count++ & 64)
1799                packet_out->po_lflags |= POL_SQUARE_BIT;
1800            else
1801                packet_out->po_lflags &= ~POL_SQUARE_BIT;
1802        }
1803    }
1804
1805    return packet_out;
1806}
1807
1808
1809void
1810lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *ctl,
1811                                            lsquic_packet_out_t *packet_out)
1812{
1813    send_ctl_sched_prepend(ctl, packet_out);
1814    if (packet_out->po_flags & PO_LIMITED)
1815        ++ctl->sc_next_limit;
1816    LSQ_DEBUG("packet %"PRIu64" has been delayed", packet_out->po_packno);
1817#if LSQUIC_SEND_STATS
1818    ++ctl->sc_stats.n_delayed;
1819#endif
1820    if (packet_out->po_lflags & POL_LOSS_BIT)
1821        ++ctl->sc_loss_count;
1822    if ((ctl->sc_flags & SC_QL_BITS)
1823                            && packet_out->po_header_type == HETY_NOT_SET)
1824        ctl->sc_square_count -= 1 + (ctl->sc_gap + 1 == packet_out->po_packno);
1825}
1826
1827
1828int
1829lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *ctl)
1830{
1831    const lsquic_packet_out_t *packet_out;
1832    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1833        if (packet_out->po_frame_types &
1834                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
1835            return 1;
1836    return 0;
1837}
1838
1839
1840int
1841lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *ctl)
1842{
1843    const lsquic_packet_out_t *packet_out;
1844    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1845        if (packet_out->po_frame_types & ctl->sc_retx_frames)
1846            return 1;
1847    return 0;
1848}
1849
1850
1851static int
1852send_ctl_set_packet_out_token (const struct lsquic_send_ctl *ctl,
1853                                        struct lsquic_packet_out *packet_out)
1854{
1855    unsigned char *token;
1856
1857    token = malloc(ctl->sc_token_sz);
1858    if (!token)
1859    {
1860        LSQ_WARN("malloc failed: cannot set initial token");
1861        return -1;
1862    }
1863
1864    memcpy(token, ctl->sc_token, ctl->sc_token_sz);
1865    packet_out->po_token = token;
1866    packet_out->po_token_len = ctl->sc_token_sz;
1867    packet_out->po_flags |= PO_NONCE;
1868    LSQ_DEBUG("set initial token on packet");
1869    return 0;
1870}
1871
1872
1873static lsquic_packet_out_t *
1874send_ctl_allocate_packet (struct lsquic_send_ctl *ctl, enum packno_bits bits,
1875                            unsigned need_at_least, enum packnum_space pns,
1876                            const struct network_path *path)
1877{
1878    lsquic_packet_out_t *packet_out;
1879
1880    packet_out = lsquic_packet_out_new(&ctl->sc_enpub->enp_mm,
1881                    ctl->sc_conn_pub->packet_out_malo,
1882                    !(ctl->sc_flags & SC_TCID0), ctl->sc_conn_pub->lconn, bits,
1883                    ctl->sc_ver_neg->vn_tag, NULL, path);
1884    if (!packet_out)
1885        return NULL;
1886
1887    if (need_at_least && lsquic_packet_out_avail(packet_out) < need_at_least)
1888    {   /* This should never happen, this is why this check is performed at
1889         * this level and not lower, before the packet is actually allocated.
1890         */
1891        LSQ_ERROR("wanted to allocate packet with at least %u bytes of "
1892            "payload, but only got %u bytes (mtu: %u bytes)", need_at_least,
1893            lsquic_packet_out_avail(packet_out), SC_PACK_SIZE(ctl));
1894        send_ctl_destroy_packet(ctl, packet_out);
1895        return NULL;
1896    }
1897
1898    if (UNLIKELY(pns != PNS_APP))
1899    {
1900        if (pns == PNS_INIT)
1901        {
1902            packet_out->po_header_type = HETY_INITIAL;
1903            if (ctl->sc_token)
1904                (void) send_ctl_set_packet_out_token(ctl, packet_out);
1905        }
1906        else
1907            packet_out->po_header_type = HETY_HANDSHAKE;
1908    }
1909
1910    lsquic_packet_out_set_pns(packet_out, pns);
1911    packet_out->po_lflags |= ctl->sc_ecn << POECN_SHIFT;
1912    packet_out->po_loss_chain = packet_out;
1913    return packet_out;
1914}
1915
1916
1917lsquic_packet_out_t *
1918lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *ctl, unsigned need_at_least,
1919                        enum packnum_space pns, const struct network_path *path)
1920{
1921    lsquic_packet_out_t *packet_out;
1922    enum packno_bits bits;
1923
1924    bits = lsquic_send_ctl_packno_bits(ctl);
1925    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least, pns, path);
1926    if (!packet_out)
1927        return NULL;
1928
1929    packet_out->po_packno = send_ctl_next_packno(ctl);
1930    LSQ_DEBUG("created packet %"PRIu64, packet_out->po_packno);
1931    EV_LOG_PACKET_CREATED(LSQUIC_LOG_CONN_ID, packet_out);
1932    return packet_out;
1933}
1934
1935
1936struct lsquic_packet_out *
1937lsquic_send_ctl_last_scheduled (struct lsquic_send_ctl *ctl,
1938                    enum packnum_space pns, const struct network_path *path,
1939                    int regen_match)
1940{
1941    struct lsquic_packet_out *packet_out;
1942
1943    if (0 == regen_match)
1944    {
1945        TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_scheduled_packets,
1946                                                lsquic_packets_tailq, po_next)
1947            if (pns == lsquic_packet_out_pns(packet_out)
1948                                                && path == packet_out->po_path)
1949                return packet_out;
1950    }
1951    else
1952    {
1953        TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_scheduled_packets,
1954                                                lsquic_packets_tailq, po_next)
1955            if (pns == lsquic_packet_out_pns(packet_out)
1956                    && packet_out->po_regen_sz == packet_out->po_data_sz
1957                                                && path == packet_out->po_path)
1958                return packet_out;
1959    }
1960
1961    return NULL;
1962}
1963
1964
1965/* Do not use for STREAM frames
1966 */
1967lsquic_packet_out_t *
1968lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *ctl,
1969                enum packnum_space pns, unsigned need_at_least,
1970                const struct network_path *path, int regen_match, int *is_err)
1971{
1972    lsquic_packet_out_t *packet_out;
1973
1974    assert(need_at_least > 0);
1975
1976    packet_out = lsquic_send_ctl_last_scheduled(ctl, pns, path, regen_match);
1977    if (packet_out
1978        && !(packet_out->po_flags & (PO_MINI|PO_STREAM_END|PO_RETX))
1979        && lsquic_packet_out_avail(packet_out) >= need_at_least)
1980    {
1981        return packet_out;
1982    }
1983
1984    if (!lsquic_send_ctl_can_send(ctl))
1985    {
1986        if (is_err)
1987            *is_err = 0;
1988        return NULL;
1989    }
1990
1991    packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least, pns, path);
1992    if (packet_out)
1993    {
1994        lsquic_packet_out_set_pns(packet_out, pns);
1995        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1996    }
1997    else if (is_err)
1998        *is_err = 1;
1999    return packet_out;
2000}
2001
2002
2003struct lsquic_packet_out *
2004lsquic_send_ctl_get_packet_for_crypto (struct lsquic_send_ctl *ctl,
2005                          unsigned need_at_least, enum packnum_space pns,
2006                          const struct network_path *path)
2007{
2008    struct lsquic_packet_out *packet_out;
2009
2010    assert(lsquic_send_ctl_schedule_stream_packets_immediately(ctl));
2011    assert(need_at_least > 0);
2012
2013    packet_out = lsquic_send_ctl_last_scheduled(ctl, pns, path, 0);
2014    if (packet_out
2015        && !(packet_out->po_flags & (PO_STREAM_END|PO_RETX))
2016        && lsquic_packet_out_avail(packet_out) >= need_at_least)
2017    {
2018        return packet_out;
2019    }
2020
2021    if (!lsquic_send_ctl_can_send(ctl))
2022        return NULL;
2023
2024    packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least, pns, path);
2025    if (!packet_out)
2026        return NULL;
2027
2028    lsquic_send_ctl_scheduled_one(ctl, packet_out);
2029    return packet_out;
2030}
2031
2032
2033static void
2034update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out)
2035{
2036
2037    lsquic_packno_t oldno, packno;
2038
2039    /* When the packet is resent, it uses the same number of bytes to encode
2040     * the packet number as the original packet.  This follows the reference
2041     * implementation.
2042     */
2043    oldno = packet_out->po_packno;
2044    packno = send_ctl_next_packno(ctl);
2045
2046    packet_out->po_flags &= ~PO_SENT_SZ;
2047    packet_out->po_frame_types &= ~GQUIC_FRAME_REGEN_MASK;
2048    assert(packet_out->po_frame_types);
2049    packet_out->po_packno = packno;
2050    lsquic_packet_out_set_ecn(packet_out, ctl->sc_ecn);
2051
2052    if (ctl->sc_ver_neg->vn_tag)
2053    {
2054        assert(packet_out->po_flags & PO_VERSION);  /* It can only disappear */
2055        packet_out->po_ver_tag = *ctl->sc_ver_neg->vn_tag;
2056    }
2057
2058    assert(packet_out->po_regen_sz < packet_out->po_data_sz);
2059    if (packet_out->po_regen_sz)
2060    {
2061        if (packet_out->po_flags & PO_SCHED)
2062            ctl->sc_bytes_scheduled -= packet_out->po_regen_sz;
2063        lsquic_packet_out_chop_regen(packet_out);
2064    }
2065    LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64,
2066                                                            oldno, packno);
2067    EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for "
2068        "resending as packet %"PRIu64, oldno, packno);
2069}
2070
2071
2072unsigned
2073lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *ctl)
2074{
2075    lsquic_packet_out_t *packet_out;
2076    unsigned n = 0;
2077
2078    while ((packet_out = send_ctl_next_lost(ctl)))
2079    {
2080        assert(packet_out->po_regen_sz < packet_out->po_data_sz);
2081        ++n;
2082#if LSQUIC_CONN_STATS
2083        ++ctl->sc_conn_pub->conn_stats->out.retx_packets;
2084#endif
2085        update_for_resending(ctl, packet_out);
2086        lsquic_send_ctl_scheduled_one(ctl, packet_out);
2087    }
2088
2089    if (n)
2090        LSQ_DEBUG("rescheduled %u packets", n);
2091
2092    return n;
2093}
2094
2095
2096void
2097lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *ctl, int tcid0)
2098{
2099    if (tcid0)
2100    {
2101        LSQ_INFO("set TCID flag");
2102        ctl->sc_flags |=  SC_TCID0;
2103    }
2104    else
2105    {
2106        LSQ_INFO("unset TCID flag");
2107        ctl->sc_flags &= ~SC_TCID0;
2108    }
2109}
2110
2111
2112/* The controller elides this STREAM frames of stream `stream_id' from
2113 * scheduled and buffered packets.  If a packet becomes empty as a result,
2114 * it is dropped.
2115 *
2116 * Packets on other queues do not need to be processed: unacked packets
2117 * have already been sent, and lost packets' reset stream frames will be
2118 * elided in due time.
2119 */
2120void
2121lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *ctl,
2122                                                lsquic_stream_id_t stream_id)
2123{
2124    struct lsquic_packet_out *packet_out, *next;
2125    unsigned n, adj;
2126    int dropped;
2127
2128    dropped = 0;
2129#ifdef WIN32
2130    next = NULL;
2131#endif
2132    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
2133                                                            packet_out = next)
2134    {
2135        next = TAILQ_NEXT(packet_out, po_next);
2136
2137        if ((packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM))
2138                                    && 0 == (packet_out->po_flags & PO_MINI))
2139        {
2140            adj = lsquic_packet_out_elide_reset_stream_frames(packet_out,
2141                                                              stream_id);
2142            ctl->sc_bytes_scheduled -= adj;
2143            if (0 == packet_out->po_frame_types)
2144            {
2145                LSQ_DEBUG("cancel packet %"PRIu64" after eliding frames for "
2146                    "stream %"PRIu64, packet_out->po_packno, stream_id);
2147                send_ctl_sched_remove(ctl, packet_out);
2148                send_ctl_destroy_chain(ctl, packet_out, NULL);
2149                send_ctl_destroy_packet(ctl, packet_out);
2150                ++dropped;
2151            }
2152        }
2153    }
2154
2155    if (dropped)
2156        lsquic_send_ctl_reset_packnos(ctl);
2157
2158    for (n = 0; n < sizeof(ctl->sc_buffered_packets) /
2159                                sizeof(ctl->sc_buffered_packets[0]); ++n)
2160    {
2161        for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets);
2162                                                packet_out; packet_out = next)
2163        {
2164            next = TAILQ_NEXT(packet_out, po_next);
2165            if (packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM))
2166            {
2167                lsquic_packet_out_elide_reset_stream_frames(packet_out, stream_id);
2168                if (0 == packet_out->po_frame_types)
2169                {
2170                    LSQ_DEBUG("cancel buffered packet in queue #%u after eliding "
2171                        "frames for stream %"PRIu64, n, stream_id);
2172                    TAILQ_REMOVE(&ctl->sc_buffered_packets[n].bpq_packets,
2173                                 packet_out, po_next);
2174                    --ctl->sc_buffered_packets[n].bpq_count;
2175                    send_ctl_destroy_packet(ctl, packet_out);
2176                    LSQ_DEBUG("Elide packet from buffered queue #%u; count: %u",
2177                              n, ctl->sc_buffered_packets[n].bpq_count);
2178                }
2179            }
2180        }
2181    }
2182}
2183
2184
2185/* Count how many packets will remain after the squeezing performed by
2186 * lsquic_send_ctl_squeeze_sched().  This is the number of delayed data
2187 * packets.
2188 */
2189#ifndef NDEBUG
2190#if __GNUC__
2191__attribute__((weak))
2192#endif
2193#endif
2194int
2195lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl)
2196{
2197    const struct lsquic_packet_out *packet_out;
2198    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
2199        if (packet_out->po_regen_sz < packet_out->po_data_sz)
2200            return 1;
2201    return 0;
2202}
2203
2204
2205#ifndef NDEBUG
2206static void
2207send_ctl_log_packet_q (const lsquic_send_ctl_t *ctl, const char *prefix,
2208                                const struct lsquic_packets_tailq *tailq)
2209{
2210    const lsquic_packet_out_t *packet_out;
2211    unsigned n_packets;
2212    char *buf;
2213    size_t bufsz;
2214    int off;
2215
2216    n_packets = 0;
2217    TAILQ_FOREACH(packet_out, tailq, po_next)
2218        ++n_packets;
2219
2220    if (n_packets == 0)
2221    {
2222        LSQ_DEBUG("%s: [<empty set>]", prefix);
2223        return;
2224    }
2225
2226    bufsz = n_packets * sizeof("18446744073709551615" /* UINT64_MAX */);
2227    buf = malloc(bufsz);
2228    if (!buf)
2229    {
2230        LSQ_ERROR("%s: malloc: %s", __func__, strerror(errno));
2231        return;
2232    }
2233
2234    off = 0;
2235    TAILQ_FOREACH(packet_out, tailq, po_next)
2236    {
2237        if (off)
2238            buf[off++] = ' ';
2239        off += sprintf(buf + off, "%"PRIu64, packet_out->po_packno);
2240    }
2241
2242    LSQ_DEBUG("%s: [%s]", prefix, buf);
2243    free(buf);
2244}
2245
2246#define LOG_PACKET_Q(prefix, queue) do {                                    \
2247    if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG))                                     \
2248        send_ctl_log_packet_q(ctl, queue, prefix);                          \
2249} while (0)
2250#else
2251#define LOG_PACKET_Q(p, q)
2252#endif
2253
2254
2255int
2256lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl)
2257{
2258    struct lsquic_packet_out *packet_out, *next;
2259    int dropped;
2260#ifndef NDEBUG
2261    int pre_squeeze_logged = 0;
2262#endif
2263
2264    dropped = 0;
2265    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
2266                                                            packet_out = next)
2267    {
2268        next = TAILQ_NEXT(packet_out, po_next);
2269        if (packet_out->po_regen_sz < packet_out->po_data_sz
2270                || packet_out->po_frame_types == QUIC_FTBIT_PATH_CHALLENGE)
2271        {
2272            if (packet_out->po_flags & PO_ENCRYPTED)
2273                send_ctl_return_enc_data(ctl, packet_out);
2274        }
2275        else
2276        {
2277#ifndef NDEBUG
2278            /* Log the whole list before we squeeze for the first time */
2279            if (!pre_squeeze_logged++)
2280                LOG_PACKET_Q(&ctl->sc_scheduled_packets,
2281                                        "scheduled packets before squeezing");
2282#endif
2283            send_ctl_sched_remove(ctl, packet_out);
2284            LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue",
2285                packet_out->po_packno);
2286            send_ctl_destroy_chain(ctl, packet_out, NULL);
2287            send_ctl_destroy_packet(ctl, packet_out);
2288            ++dropped;
2289        }
2290    }
2291
2292    if (dropped)
2293        lsquic_send_ctl_reset_packnos(ctl);
2294
2295#ifndef NDEBUG
2296    if (pre_squeeze_logged)
2297        LOG_PACKET_Q(&ctl->sc_scheduled_packets,
2298                                        "scheduled packets after squeezing");
2299    else if (ctl->sc_n_scheduled > 0)
2300        LOG_PACKET_Q(&ctl->sc_scheduled_packets, "delayed packets");
2301#endif
2302
2303    return ctl->sc_n_scheduled > 0;
2304}
2305
2306
2307void
2308lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *ctl)
2309{
2310    struct lsquic_packet_out *packet_out;
2311
2312    ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
2313    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
2314        packet_out->po_flags |= PO_REPACKNO;
2315}
2316
2317
2318void
2319lsquic_send_ctl_ack_to_front (struct lsquic_send_ctl *ctl, unsigned n_acks)
2320{
2321    struct lsquic_packet_out *ack_packet;
2322
2323    assert(n_acks > 0);
2324    assert(ctl->sc_n_scheduled > n_acks);   /* Otherwise, why is this called? */
2325    for ( ; n_acks > 0; --n_acks)
2326    {
2327        ack_packet = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
2328        assert(ack_packet->po_frame_types & (1 << QUIC_FRAME_ACK));
2329        TAILQ_REMOVE(&ctl->sc_scheduled_packets, ack_packet, po_next);
2330        TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, ack_packet, po_next);
2331    }
2332}
2333
2334
2335void
2336lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *ctl)
2337{
2338    struct lsquic_packet_out *packet_out, *next;
2339    unsigned n;
2340
2341    n = 0;
2342    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
2343                                                            packet_out = next)
2344    {
2345        next = TAILQ_NEXT(packet_out, po_next);
2346        if (0 == (packet_out->po_flags & PO_HELLO))
2347        {
2348            send_ctl_sched_remove(ctl, packet_out);
2349            send_ctl_destroy_chain(ctl, packet_out, NULL);
2350            send_ctl_destroy_packet(ctl, packet_out);
2351            ++n;
2352        }
2353    }
2354
2355    ctl->sc_senhist.sh_flags |= SH_GAP_OK;
2356
2357    LSQ_DEBUG("dropped %u scheduled packet%s (%u left)", n, n != 1 ? "s" : "",
2358        ctl->sc_n_scheduled);
2359}
2360
2361
2362#ifdef NDEBUG
2363static
2364#elif __GNUC__
2365__attribute__((weak))
2366#endif
2367enum buf_packet_type
2368lsquic_send_ctl_determine_bpt (lsquic_send_ctl_t *ctl,
2369                                            const lsquic_stream_t *stream)
2370{
2371    const lsquic_stream_t *other_stream;
2372    struct lsquic_hash_elem *el;
2373    struct lsquic_hash *all_streams;
2374
2375    all_streams = ctl->sc_conn_pub->all_streams;
2376    for (el = lsquic_hash_first(all_streams); el;
2377                                     el = lsquic_hash_next(all_streams))
2378    {
2379        other_stream = lsquic_hashelem_getdata(el);
2380        if (other_stream != stream
2381              && (!(other_stream->stream_flags & STREAM_U_WRITE_DONE))
2382                && !lsquic_stream_is_critical(other_stream)
2383                  && other_stream->sm_priority < stream->sm_priority)
2384            return BPT_OTHER_PRIO;
2385    }
2386    return BPT_HIGHEST_PRIO;
2387}
2388
2389
2390static enum buf_packet_type
2391send_ctl_lookup_bpt (lsquic_send_ctl_t *ctl,
2392                                        const struct lsquic_stream *stream)
2393{
2394    if (ctl->sc_cached_bpt.stream_id != stream->id)
2395    {
2396        ctl->sc_cached_bpt.stream_id = stream->id;
2397        ctl->sc_cached_bpt.packet_type =
2398                                lsquic_send_ctl_determine_bpt(ctl, stream);
2399    }
2400    return ctl->sc_cached_bpt.packet_type;
2401}
2402
2403
2404static unsigned
2405send_ctl_max_bpq_count (const lsquic_send_ctl_t *ctl,
2406                                        enum buf_packet_type packet_type)
2407{
2408    unsigned long cwnd;
2409    unsigned count;
2410
2411    switch (packet_type)
2412    {
2413    case BPT_OTHER_PRIO:
2414        return MAX_BPQ_COUNT;
2415    case BPT_HIGHEST_PRIO:
2416    default: /* clang does not complain about absence of `default'... */
2417        count = ctl->sc_n_scheduled + ctl->sc_n_in_flight_retx;
2418        cwnd = ctl->sc_ci->cci_get_cwnd(CGP(ctl));
2419        if (count < cwnd / SC_PACK_SIZE(ctl))
2420        {
2421            count = cwnd / SC_PACK_SIZE(ctl) - count;
2422            if (count > MAX_BPQ_COUNT)
2423                return count;
2424        }
2425        return MAX_BPQ_COUNT;
2426    }
2427}
2428
2429
2430static void
2431send_ctl_move_ack (struct lsquic_send_ctl *ctl, struct lsquic_packet_out *dst,
2432                    struct lsquic_packet_out *src)
2433{
2434    assert(dst->po_data_sz == 0);
2435
2436    if (lsquic_packet_out_avail(dst) >= src->po_regen_sz)
2437    {
2438        memcpy(dst->po_data, src->po_data, src->po_regen_sz);
2439        dst->po_data_sz = src->po_regen_sz;
2440        dst->po_regen_sz = src->po_regen_sz;
2441        dst->po_frame_types |= (GQUIC_FRAME_REGEN_MASK & src->po_frame_types);
2442        src->po_frame_types &= ~GQUIC_FRAME_REGEN_MASK;
2443        lsquic_packet_out_chop_regen(src);
2444    }
2445}
2446
2447
2448static lsquic_packet_out_t *
2449send_ctl_get_buffered_packet (lsquic_send_ctl_t *ctl,
2450            enum buf_packet_type packet_type, unsigned need_at_least,
2451            const struct network_path *path, const struct lsquic_stream *stream)
2452{
2453    struct buf_packet_q *const packet_q =
2454                                    &ctl->sc_buffered_packets[packet_type];
2455    struct lsquic_conn *const lconn = ctl->sc_conn_pub->lconn;
2456    lsquic_packet_out_t *packet_out;
2457    enum packno_bits bits;
2458    enum { AA_STEAL, AA_GENERATE, AA_NONE, } ack_action;
2459
2460    packet_out = TAILQ_LAST(&packet_q->bpq_packets, lsquic_packets_tailq);
2461    if (packet_out
2462        && !(packet_out->po_flags & PO_STREAM_END)
2463        && lsquic_packet_out_avail(packet_out) >= need_at_least)
2464    {
2465        return packet_out;
2466    }
2467
2468    if (packet_q->bpq_count >= send_ctl_max_bpq_count(ctl, packet_type))
2469        return NULL;
2470
2471    if (packet_q->bpq_count == 0)
2472    {
2473        /* If ACK was written to the low-priority queue first, steal it */
2474        if (packet_q == &ctl->sc_buffered_packets[BPT_HIGHEST_PRIO]
2475            && !TAILQ_EMPTY(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets)
2476            && (TAILQ_FIRST(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets)
2477                                        ->po_frame_types & QUIC_FTBIT_ACK))
2478        {
2479            LSQ_DEBUG("steal ACK frame from low-priority buffered queue");
2480            ack_action = AA_STEAL;
2481            bits = ctl->sc_max_packno_bits;
2482        }
2483        /* If ACK can be generated, write it to the first buffered packet. */
2484        else if (lconn->cn_if->ci_can_write_ack(lconn))
2485        {
2486            LSQ_DEBUG("generate ACK frame for first buffered packet in "
2487                                                    "queue #%u", packet_type);
2488            ack_action = AA_GENERATE;
2489            /* Packet length is set to the largest possible size to guarantee
2490             * that buffered packet with the ACK will not need to be split.
2491             */
2492            bits = ctl->sc_max_packno_bits;
2493        }
2494        else
2495            goto no_ack_action;
2496    }
2497    else
2498    {
2499  no_ack_action:
2500        ack_action = AA_NONE;
2501        bits = lsquic_send_ctl_guess_packno_bits(ctl);
2502    }
2503
2504    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least, PNS_APP,
2505                                                                        path);
2506    if (!packet_out)
2507        return NULL;
2508
2509    switch (ack_action)
2510    {
2511    case AA_STEAL:
2512        send_ctl_move_ack(ctl, packet_out,
2513            TAILQ_FIRST(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets));
2514        break;
2515    case AA_GENERATE:
2516        lconn->cn_if->ci_write_ack(lconn, packet_out);
2517        break;
2518    case AA_NONE:
2519        break;
2520    }
2521
2522    TAILQ_INSERT_TAIL(&packet_q->bpq_packets, packet_out, po_next);
2523    ++packet_q->bpq_count;
2524    LSQ_DEBUG("Add new packet to buffered queue #%u; count: %u",
2525              packet_type, packet_q->bpq_count);
2526    return packet_out;
2527}
2528
2529
2530static void
2531send_ctl_maybe_flush_decoder (struct lsquic_send_ctl *ctl,
2532                                        const struct lsquic_stream *caller)
2533{
2534    struct lsquic_stream *decoder;
2535
2536    if ((ctl->sc_flags & SC_IETF) && ctl->sc_conn_pub->u.ietf.qdh)
2537    {
2538        decoder = ctl->sc_conn_pub->u.ietf.qdh->qdh_dec_sm_out;
2539        if (decoder && decoder != caller
2540                                && lsquic_stream_has_data_to_flush(decoder))
2541        {
2542            LSQ_DEBUG("flushing decoder stream");
2543            lsquic_stream_flush(decoder);
2544        }
2545    }
2546}
2547
2548
2549lsquic_packet_out_t *
2550lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl,
2551                unsigned need_at_least, const struct network_path *path,
2552                const struct lsquic_stream *stream)
2553{
2554    enum buf_packet_type packet_type;
2555
2556    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
2557        return lsquic_send_ctl_get_writeable_packet(ctl, PNS_APP,
2558                                                need_at_least, path, 0, NULL);
2559    else
2560    {
2561        if (!lsquic_send_ctl_has_buffered(ctl))
2562            send_ctl_maybe_flush_decoder(ctl, stream);
2563        packet_type = send_ctl_lookup_bpt(ctl, stream);
2564        return send_ctl_get_buffered_packet(ctl, packet_type, need_at_least,
2565                                            path, stream);
2566    }
2567}
2568
2569
2570#ifdef NDEBUG
2571static
2572#elif __GNUC__
2573__attribute__((weak))
2574#endif
2575enum packno_bits
2576lsquic_send_ctl_calc_packno_bits (lsquic_send_ctl_t *ctl)
2577{
2578    lsquic_packno_t smallest_unacked;
2579    enum packno_bits bits;
2580    unsigned n_in_flight;
2581    unsigned long cwnd;
2582    const struct parse_funcs *pf;
2583
2584    pf = ctl->sc_conn_pub->lconn->cn_pf;
2585
2586    smallest_unacked = lsquic_send_ctl_smallest_unacked(ctl);
2587    cwnd = ctl->sc_ci->cci_get_cwnd(CGP(ctl));
2588    n_in_flight = cwnd / SC_PACK_SIZE(ctl);
2589    bits = pf->pf_calc_packno_bits(ctl->sc_cur_packno + 1, smallest_unacked,
2590                                                            n_in_flight);
2591    if (bits <= ctl->sc_max_packno_bits)
2592        return bits;
2593    else
2594        return ctl->sc_max_packno_bits;
2595}
2596
2597
2598enum packno_bits
2599lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *ctl)
2600{
2601
2602    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
2603        return lsquic_send_ctl_calc_packno_bits(ctl);
2604    else
2605        return lsquic_send_ctl_guess_packno_bits(ctl);
2606}
2607
2608
2609static int
2610split_buffered_packet (lsquic_send_ctl_t *ctl,
2611        enum buf_packet_type packet_type, lsquic_packet_out_t *packet_out,
2612        enum packno_bits bits, unsigned excess_bytes)
2613{
2614    struct buf_packet_q *const packet_q =
2615                                    &ctl->sc_buffered_packets[packet_type];
2616    lsquic_packet_out_t *new_packet_out;
2617
2618    assert(TAILQ_FIRST(&packet_q->bpq_packets) == packet_out);
2619
2620    new_packet_out = send_ctl_allocate_packet(ctl, bits, 0,
2621                        lsquic_packet_out_pns(packet_out), packet_out->po_path);
2622    if (!new_packet_out)
2623        return -1;
2624
2625    if (0 == lsquic_packet_out_split_in_two(&ctl->sc_enpub->enp_mm, packet_out,
2626                  new_packet_out, ctl->sc_conn_pub->lconn->cn_pf, excess_bytes))
2627    {
2628        lsquic_packet_out_set_packno_bits(packet_out, bits);
2629        TAILQ_INSERT_AFTER(&packet_q->bpq_packets, packet_out, new_packet_out,
2630                           po_next);
2631        ++packet_q->bpq_count;
2632        LSQ_DEBUG("Add split packet to buffered queue #%u; count: %u",
2633                  packet_type, packet_q->bpq_count);
2634        return 0;
2635    }
2636    else
2637    {
2638        send_ctl_destroy_packet(ctl, new_packet_out);
2639        return -1;
2640    }
2641}
2642
2643
2644int
2645lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *ctl,
2646                                            enum buf_packet_type packet_type)
2647{
2648    struct buf_packet_q *const packet_q =
2649                                    &ctl->sc_buffered_packets[packet_type];
2650    const struct parse_funcs *const pf = ctl->sc_conn_pub->lconn->cn_pf;
2651    lsquic_packet_out_t *packet_out;
2652    unsigned used, excess;
2653
2654    assert(lsquic_send_ctl_schedule_stream_packets_immediately(ctl));
2655    const enum packno_bits bits = lsquic_send_ctl_calc_packno_bits(ctl);
2656    const unsigned need = pf->pf_packno_bits2len(bits);
2657
2658    while ((packet_out = TAILQ_FIRST(&packet_q->bpq_packets)) &&
2659                                            lsquic_send_ctl_can_send(ctl))
2660    {
2661        if ((packet_out->po_frame_types & QUIC_FTBIT_ACK)
2662                            && packet_out->po_ack2ed < ctl->sc_largest_acked)
2663        {
2664            /* Chrome watches for a decrease in the value of the Largest
2665             * Observed field of the ACK frame and marks it as an error:
2666             * this is why we have to send out ACK in the order they were
2667             * generated.
2668             */
2669            LSQ_DEBUG("Remove out-of-order ACK from buffered packet");
2670            lsquic_packet_out_chop_regen(packet_out);
2671            if (packet_out->po_data_sz == 0)
2672            {
2673                LSQ_DEBUG("Dropping now-empty buffered packet");
2674                TAILQ_REMOVE(&packet_q->bpq_packets, packet_out, po_next);
2675                --packet_q->bpq_count;
2676                send_ctl_destroy_packet(ctl, packet_out);
2677                continue;
2678            }
2679        }
2680        if (bits != lsquic_packet_out_packno_bits(packet_out))
2681        {
2682            used = pf->pf_packno_bits2len(
2683                                lsquic_packet_out_packno_bits(packet_out));
2684            if (need > used
2685                && need - used > lsquic_packet_out_avail(packet_out))
2686            {
2687                excess = need - used - lsquic_packet_out_avail(packet_out);
2688                if (0 != split_buffered_packet(ctl, packet_type,
2689                                               packet_out, bits, excess))
2690                {
2691                    return -1;
2692                }
2693            }
2694        }
2695        TAILQ_REMOVE(&packet_q->bpq_packets, packet_out, po_next);
2696        --packet_q->bpq_count;
2697        packet_out->po_packno = send_ctl_next_packno(ctl);
2698        LSQ_DEBUG("Remove packet from buffered queue #%u; count: %u.  "
2699            "It becomes packet %"PRIu64, packet_type, packet_q->bpq_count,
2700            packet_out->po_packno);
2701        lsquic_send_ctl_scheduled_one(ctl, packet_out);
2702    }
2703
2704    return 0;
2705}
2706
2707
2708int
2709lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *ctl,
2710                             const struct lsquic_stream *stream)
2711{
2712    enum buf_packet_type packet_type;
2713    struct buf_packet_q *packet_q;
2714    lsquic_packet_out_t *packet_out;
2715    const struct parse_funcs *pf;
2716
2717    pf = ctl->sc_conn_pub->lconn->cn_pf;
2718    packet_type = send_ctl_lookup_bpt(ctl, stream);
2719    packet_q = &ctl->sc_buffered_packets[packet_type];
2720
2721    TAILQ_FOREACH_REVERSE(packet_out, &packet_q->bpq_packets,
2722                          lsquic_packets_tailq, po_next)
2723        if (0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
2724            return 0;
2725
2726    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
2727        if (0 == packet_out->po_sent
2728            && 0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
2729        {
2730            return 0;
2731        }
2732
2733    return -1;
2734}
2735
2736
2737size_t
2738lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *ctl)
2739{
2740    const lsquic_packet_out_t *packet_out;
2741    unsigned n;
2742    size_t size;
2743    const struct lsquic_packets_tailq queues[] = {
2744        ctl->sc_scheduled_packets,
2745        ctl->sc_unacked_packets[PNS_INIT],
2746        ctl->sc_unacked_packets[PNS_HSK],
2747        ctl->sc_unacked_packets[PNS_APP],
2748        ctl->sc_lost_packets,
2749        ctl->sc_buffered_packets[0].bpq_packets,
2750        ctl->sc_buffered_packets[1].bpq_packets,
2751    };
2752
2753    size = sizeof(*ctl);
2754
2755    for (n = 0; n < sizeof(queues) / sizeof(queues[0]); ++n)
2756        TAILQ_FOREACH(packet_out, &queues[n], po_next)
2757            size += lsquic_packet_out_mem_used(packet_out);
2758
2759    return size;
2760}
2761
2762
2763void
2764lsquic_send_ctl_verneg_done (struct lsquic_send_ctl *ctl)
2765{
2766    ctl->sc_max_packno_bits = PACKNO_BITS_3;
2767    LSQ_DEBUG("version negotiation done (%s): max packno bits: %u",
2768        lsquic_ver2str[ ctl->sc_conn_pub->lconn->cn_version ],
2769        ctl->sc_max_packno_bits);
2770}
2771
2772
2773static void
2774strip_trailing_padding (struct lsquic_packet_out *packet_out)
2775{
2776    struct packet_out_srec_iter posi;
2777    const struct stream_rec *srec;
2778    unsigned off;
2779
2780    off = 0;
2781    for (srec = lsquic_posi_first(&posi, packet_out); srec;
2782                                                srec = lsquic_posi_next(&posi))
2783        off = srec->sr_off + srec->sr_len;
2784
2785    assert(off);
2786
2787    packet_out->po_data_sz = off;
2788    packet_out->po_frame_types &= ~QUIC_FTBIT_PADDING;
2789}
2790
2791
2792int
2793lsquic_send_ctl_retry (struct lsquic_send_ctl *ctl,
2794                                const unsigned char *token, size_t token_sz)
2795{
2796    struct lsquic_packet_out *packet_out, *next, *new_packet_out;
2797    struct lsquic_conn *const lconn = ctl->sc_conn_pub->lconn;
2798    size_t sz;
2799
2800    if (token_sz >= 1ull << (sizeof(packet_out->po_token_len) * 8))
2801    {
2802        LSQ_WARN("token size %zu is too long", token_sz);
2803        return -1;
2804    }
2805
2806    ++ctl->sc_retry_count;
2807    if (ctl->sc_retry_count > 3)
2808    {
2809        LSQ_INFO("failing connection after %u retries", ctl->sc_retry_count);
2810        return -1;
2811    }
2812
2813    send_ctl_expire(ctl, PNS_INIT, EXFI_ALL);
2814
2815    if (0 != lsquic_send_ctl_set_token(ctl, token, token_sz))
2816        return -1;
2817
2818    for (packet_out = TAILQ_FIRST(&ctl->sc_lost_packets); packet_out; packet_out = next)
2819    {
2820        next = TAILQ_NEXT(packet_out, po_next);
2821        if (HETY_INITIAL != packet_out->po_header_type)
2822            continue;
2823
2824        if (packet_out->po_nonce)
2825        {
2826            free(packet_out->po_nonce);
2827            packet_out->po_nonce = NULL;
2828            packet_out->po_flags &= ~PO_NONCE;
2829        }
2830
2831        if (0 != send_ctl_set_packet_out_token(ctl, packet_out))
2832        {
2833            LSQ_INFO("cannot set out token on packet");
2834            return -1;
2835        }
2836
2837        if (packet_out->po_frame_types & QUIC_FTBIT_PADDING)
2838            strip_trailing_padding(packet_out);
2839
2840        sz = lconn->cn_pf->pf_packout_size(lconn, packet_out);
2841        if (sz > 1200)
2842        {
2843            const enum packno_bits bits = lsquic_send_ctl_calc_packno_bits(ctl);
2844            new_packet_out = send_ctl_allocate_packet(ctl, bits, 0, PNS_INIT,
2845                                                        packet_out->po_path);
2846            if (!new_packet_out)
2847                return -1;
2848            if (0 != send_ctl_set_packet_out_token(ctl, new_packet_out))
2849            {
2850                send_ctl_destroy_packet(ctl, new_packet_out);
2851                LSQ_INFO("cannot set out token on packet");
2852                return -1;
2853            }
2854            if (0 == lsquic_packet_out_split_in_two(&ctl->sc_enpub->enp_mm,
2855                            packet_out, new_packet_out,
2856                            ctl->sc_conn_pub->lconn->cn_pf, sz - 1200))
2857            {
2858                LSQ_DEBUG("split lost packet %"PRIu64" into two",
2859                                                        packet_out->po_packno);
2860                lsquic_packet_out_set_packno_bits(packet_out, bits);
2861                TAILQ_INSERT_AFTER(&ctl->sc_lost_packets, packet_out,
2862                                    new_packet_out, po_next);
2863                new_packet_out->po_flags |= PO_LOST;
2864                packet_out->po_flags &= ~PO_SENT_SZ;
2865            }
2866            else
2867            {
2868                LSQ_DEBUG("could not split lost packet into two");
2869                send_ctl_destroy_packet(ctl, new_packet_out);
2870                return -1;
2871            }
2872        }
2873    }
2874
2875    return 0;
2876}
2877
2878
2879int
2880lsquic_send_ctl_set_token (struct lsquic_send_ctl *ctl,
2881                const unsigned char *token, size_t token_sz)
2882{
2883    unsigned char *copy;
2884
2885    if (token_sz > 1 <<
2886                (sizeof(((struct lsquic_packet_out *)0)->po_token_len) * 8))
2887    {
2888        errno = EINVAL;
2889        return -1;
2890    }
2891
2892    copy = malloc(token_sz);
2893    if (!copy)
2894        return -1;
2895    memcpy(copy, token, token_sz);
2896    free(ctl->sc_token);
2897    ctl->sc_token = copy;
2898    ctl->sc_token_sz = token_sz;
2899    LSQ_DEBUG("set token");
2900    return 0;
2901}
2902
2903
2904void
2905lsquic_send_ctl_empty_pns (struct lsquic_send_ctl *ctl, enum packnum_space pns)
2906{
2907    lsquic_packet_out_t *packet_out, *next;
2908    unsigned count, packet_sz;
2909    struct lsquic_packets_tailq *const *q;
2910    struct lsquic_packets_tailq *const queues[] = {
2911        &ctl->sc_lost_packets,
2912        &ctl->sc_buffered_packets[0].bpq_packets,
2913        &ctl->sc_buffered_packets[1].bpq_packets,
2914    };
2915
2916    /* Don't bother with chain destruction, as all chains members are always
2917     * within the same packet number space
2918     */
2919
2920    count = 0;
2921    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
2922                                                            packet_out = next)
2923    {
2924        next = TAILQ_NEXT(packet_out, po_next);
2925        if (pns == lsquic_packet_out_pns(packet_out))
2926        {
2927            send_ctl_maybe_renumber_sched_to_right(ctl, packet_out);
2928            send_ctl_sched_remove(ctl, packet_out);
2929            send_ctl_destroy_packet(ctl, packet_out);
2930            ++count;
2931        }
2932    }
2933
2934    for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets[pns]); packet_out;
2935                                                            packet_out = next)
2936    {
2937        next = TAILQ_NEXT(packet_out, po_next);
2938        if (packet_out->po_flags & (PO_LOSS_REC|PO_POISON))
2939            TAILQ_REMOVE(&ctl->sc_unacked_packets[pns], packet_out, po_next);
2940        else
2941        {
2942            packet_sz = packet_out_sent_sz(packet_out);
2943            send_ctl_unacked_remove(ctl, packet_out, packet_sz);
2944            lsquic_packet_out_ack_streams(packet_out);
2945        }
2946        send_ctl_destroy_packet(ctl, packet_out);
2947        ++count;
2948    }
2949
2950    for (q = queues; q < queues + sizeof(queues) / sizeof(queues[0]); ++q)
2951        for (packet_out = TAILQ_FIRST(*q); packet_out; packet_out = next)
2952            {
2953                next = TAILQ_NEXT(packet_out, po_next);
2954                if (pns == lsquic_packet_out_pns(packet_out))
2955                {
2956                    TAILQ_REMOVE(*q, packet_out, po_next);
2957                    send_ctl_destroy_packet(ctl, packet_out);
2958                    ++count;
2959                }
2960            }
2961
2962    lsquic_alarmset_unset(ctl->sc_alset, AL_RETX_INIT + pns);
2963
2964    LSQ_DEBUG("emptied %s, destroyed %u packet%.*s", lsquic_pns2str[pns],
2965        count, count != 1, "s");
2966}
2967
2968
2969void
2970lsquic_send_ctl_repath (struct lsquic_send_ctl *ctl, struct network_path *old,
2971                                                    struct network_path *new)
2972{
2973    struct lsquic_packet_out *packet_out;
2974    unsigned count;
2975    struct lsquic_packets_tailq *const *q;
2976    struct lsquic_packets_tailq *const queues[] = {
2977        &ctl->sc_scheduled_packets,
2978        &ctl->sc_unacked_packets[PNS_INIT],
2979        &ctl->sc_unacked_packets[PNS_HSK],
2980        &ctl->sc_unacked_packets[PNS_APP],
2981        &ctl->sc_lost_packets,
2982        &ctl->sc_buffered_packets[0].bpq_packets,
2983        &ctl->sc_buffered_packets[1].bpq_packets,
2984    };
2985
2986    assert(ctl->sc_flags & SC_IETF);
2987
2988    count = 0;
2989    for (q = queues; q < queues + sizeof(queues) / sizeof(queues[0]); ++q)
2990        TAILQ_FOREACH(packet_out, *q, po_next)
2991            if (packet_out->po_path == old)
2992            {
2993                ++count;
2994                packet_out->po_path = new;
2995                if (packet_out->po_flags & PO_ENCRYPTED)
2996                    send_ctl_return_enc_data(ctl, packet_out);
2997            }
2998
2999    LSQ_DEBUG("repathed %u packet%.*s", count, count != 1, "s");
3000
3001    memset(&ctl->sc_conn_pub->rtt_stats, 0,
3002                                    sizeof(ctl->sc_conn_pub->rtt_stats));
3003    ctl->sc_ci->cci_reinit(CGP(ctl));
3004}
3005
3006
3007void
3008lsquic_send_ctl_return_enc_data (struct lsquic_send_ctl *ctl)
3009{
3010    struct lsquic_packet_out *packet_out;
3011
3012    assert(!(ctl->sc_flags & SC_IETF));
3013
3014    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
3015        if (packet_out->po_flags & PO_ENCRYPTED)
3016            send_ctl_return_enc_data(ctl, packet_out);
3017}
3018
3019
3020/* When client updated DCID based on the first packet returned by the server,
3021 * we must update the number of bytes scheduled if the DCID length changed
3022 * because this length is used to calculate packet size.
3023 */
3024void
3025lsquic_send_ctl_cidlen_change (struct lsquic_send_ctl *ctl,
3026                                unsigned orig_cid_len, unsigned new_cid_len)
3027{
3028    unsigned diff;
3029
3030    assert(!(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_SERVER));
3031    if (ctl->sc_n_scheduled)
3032    {
3033        ctl->sc_flags |= SC_CIDLEN;
3034        ctl->sc_cidlen = (signed char) new_cid_len - (signed char) orig_cid_len;
3035        if (new_cid_len > orig_cid_len)
3036        {
3037            diff = new_cid_len - orig_cid_len;
3038            diff *= ctl->sc_n_scheduled;
3039            ctl->sc_bytes_scheduled += diff;
3040            LSQ_DEBUG("increased bytes scheduled by %u bytes to %u",
3041                diff, ctl->sc_bytes_scheduled);
3042        }
3043        else if (new_cid_len < orig_cid_len)
3044        {
3045            diff = orig_cid_len - new_cid_len;
3046            diff *= ctl->sc_n_scheduled;
3047            ctl->sc_bytes_scheduled -= diff;
3048            LSQ_DEBUG("decreased bytes scheduled by %u bytes to %u",
3049                diff, ctl->sc_bytes_scheduled);
3050        }
3051        else
3052            LSQ_DEBUG("DCID length did not change");
3053    }
3054    else
3055        LSQ_DEBUG("no scheduled packets at the time of DCID change");
3056}
3057
3058
3059void
3060lsquic_send_ctl_begin_optack_detection (struct lsquic_send_ctl *ctl)
3061{
3062    uint8_t rand;
3063
3064    rand = lsquic_crand_get_byte(ctl->sc_enpub->enp_crand);
3065    ctl->sc_gap = ctl->sc_cur_packno + 1 + rand;
3066}
3067
3068
3069void
3070lsquic_send_ctl_path_validated (struct lsquic_send_ctl *ctl)
3071{
3072    LSQ_DEBUG("path validated: switch to regular can_send");
3073    ctl->sc_can_send = send_ctl_can_send;
3074}
3075