lsquic_send_ctl.c revision c51ce338
1/* Copyright (c) 2017 LiteSpeed Technologies Inc.  See LICENSE. */
2/*
3 * lsquic_send_ctl.c -- Logic for sending and sent packets
4 */
5
6#include <assert.h>
7#include <errno.h>
8#include <inttypes.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/queue.h>
12
13#include "lsquic_types.h"
14#include "lsquic_int_types.h"
15#include "lsquic.h"
16#include "lsquic_mm.h"
17#include "lsquic_engine_public.h"
18#include "lsquic_alarmset.h"
19#include "lsquic_packet_common.h"
20#include "lsquic_parse.h"
21#include "lsquic_packet_out.h"
22#include "lsquic_senhist.h"
23#include "lsquic_rtt.h"
24#include "lsquic_cubic.h"
25#include "lsquic_pacer.h"
26#include "lsquic_send_ctl.h"
27#include "lsquic_util.h"
28#include "lsquic_sfcw.h"
29#include "lsquic_stream.h"
30#include "lsquic_ver_neg.h"
31#include "lsquic_ev_log.h"
32#include "lsquic_conn.h"
33#include "lsquic_conn_flow.h"
34#include "lsquic_conn_public.h"
35#include "lsquic_hash.h"
36
37#define LSQUIC_LOGGER_MODULE LSQLM_SENDCTL
38#define LSQUIC_LOG_CONN_ID ctl->sc_conn_pub->lconn->cn_cid
39#include "lsquic_logger.h"
40
41#define MAX_RESUBMITTED_ON_RTO  2
42#define MAX_RTO_BACKOFFS        10
43#define DEFAULT_RETX_DELAY      500000      /* Microseconds */
44#define MAX_RTO_DELAY           60000000    /* Microseconds */
45#define MIN_RTO_DELAY           1000000      /* Microseconds */
46#define N_NACKS_BEFORE_RETX     3
47
48
49enum retx_mode {
50    RETX_MODE_HANDSHAKE,
51    RETX_MODE_LOSS,
52    RETX_MODE_TLP,
53    RETX_MODE_RTO,
54};
55
56
57static const char *const retx2str[] = {
58    [RETX_MODE_HANDSHAKE] = "RETX_MODE_HANDSHAKE",
59    [RETX_MODE_LOSS]      = "RETX_MODE_LOSS",
60    [RETX_MODE_TLP]       = "RETX_MODE_TLP",
61    [RETX_MODE_RTO]       = "RETX_MODE_RTO",
62};
63
64
65static void
66update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out);
67
68
69enum expire_filter { EXFI_ALL, EXFI_HSK, EXFI_LAST, };
70
71
72static void
73send_ctl_expire (lsquic_send_ctl_t *, enum expire_filter);
74
75static void
76set_retx_alarm (lsquic_send_ctl_t *ctl);
77
78static void
79send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time);
80
81
82#ifdef NDEBUG
83static
84#elif __GNUC__
85__attribute__((weak))
86#endif
87int
88lsquic_send_ctl_schedule_stream_packets_immediately (lsquic_send_ctl_t *ctl)
89{
90    return !(ctl->sc_flags & SC_BUFFER_STREAM);
91}
92
93
94#ifdef NDEBUG
95static
96#elif __GNUC__
97__attribute__((weak))
98#endif
99enum lsquic_packno_bits
100lsquic_send_ctl_guess_packno_bits (lsquic_send_ctl_t *ctl)
101{
102    return PACKNO_LEN_2;
103}
104
105
106int
107lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *ctl)
108{
109    const lsquic_packet_out_t *packet_out;
110    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
111        if (packet_out->po_frame_types &
112                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
113            return 1;
114    return 0;
115}
116
117
118static lsquic_packet_out_t *
119send_ctl_first_unacked_retx_packet (const lsquic_send_ctl_t *ctl)
120{
121    lsquic_packet_out_t *packet_out;
122    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
123        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
124            return packet_out;
125    return NULL;
126}
127
128
129static lsquic_packet_out_t *
130send_ctl_last_unacked_retx_packet (const lsquic_send_ctl_t *ctl)
131{
132    lsquic_packet_out_t *packet_out;
133    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets,
134                                            lsquic_packets_tailq, po_next)
135        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
136            return packet_out;
137    return NULL;
138}
139
140
141static int
142have_unacked_handshake_packets (const lsquic_send_ctl_t *ctl)
143{
144    const lsquic_packet_out_t *packet_out;
145    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
146        if (packet_out->po_flags & PO_HELLO)
147            return 1;
148    return 0;
149}
150
151
152static enum retx_mode
153get_retx_mode (lsquic_send_ctl_t *ctl)
154{
155    if (!(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE)
156                                    && have_unacked_handshake_packets(ctl))
157        return RETX_MODE_HANDSHAKE;
158    if (ctl->sc_loss_to)
159        return RETX_MODE_LOSS;
160    if (ctl->sc_n_tlp < 2)
161        return RETX_MODE_TLP;
162    return RETX_MODE_RTO;
163}
164
165
166static lsquic_time_t
167get_retx_delay (const struct lsquic_rtt_stats *rtt_stats)
168{
169    lsquic_time_t srtt, delay;
170
171    srtt = lsquic_rtt_stats_get_srtt(rtt_stats);
172    if (srtt)
173    {
174        delay = srtt + 4 * lsquic_rtt_stats_get_rttvar(rtt_stats);
175        if (delay < MIN_RTO_DELAY)
176            delay = MIN_RTO_DELAY;
177    }
178    else
179        delay = DEFAULT_RETX_DELAY;
180
181    return delay;
182}
183
184
185static void
186retx_alarm_rings (void *ctx, lsquic_time_t expiry, lsquic_time_t now)
187{
188    lsquic_send_ctl_t *ctl = ctx;
189    lsquic_packet_out_t *packet_out;
190    enum retx_mode rm;
191
192    /* This is a callback -- before it is called, the alarm is unset */
193    assert(!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX));
194
195    rm = get_retx_mode(ctl);
196    LSQ_INFO("retx timeout, mode %s", retx2str[rm]);
197
198    switch (rm)
199    {
200    case RETX_MODE_HANDSHAKE:
201        send_ctl_expire(ctl, EXFI_HSK);
202        /* Do not register cubic loss during handshake */
203        break;
204    case RETX_MODE_LOSS:
205        send_ctl_detect_losses(ctl, lsquic_time_now());
206        break;
207    case RETX_MODE_TLP:
208        ++ctl->sc_n_tlp;
209        send_ctl_expire(ctl, EXFI_LAST);
210        break;
211    case RETX_MODE_RTO:
212        ++ctl->sc_n_consec_rtos;
213        ctl->sc_next_limit = 2;
214        LSQ_DEBUG("packet RTO is %"PRIu64" usec", expiry);
215        send_ctl_expire(ctl, EXFI_ALL);
216        lsquic_cubic_timeout(&ctl->sc_cubic);
217        break;
218    }
219
220    packet_out = send_ctl_first_unacked_retx_packet(ctl);
221    if (packet_out)
222        set_retx_alarm(ctl);
223    lsquic_send_ctl_sanity_check(ctl);
224}
225
226
227void
228lsquic_send_ctl_init (lsquic_send_ctl_t *ctl, struct lsquic_alarmset *alset,
229          struct lsquic_engine_public *enpub, const struct ver_neg *ver_neg,
230          struct lsquic_conn_public *conn_pub, unsigned short pack_size)
231{
232    unsigned i;
233    memset(ctl, 0, sizeof(*ctl));
234    TAILQ_INIT(&ctl->sc_scheduled_packets);
235    TAILQ_INIT(&ctl->sc_unacked_packets);
236    TAILQ_INIT(&ctl->sc_lost_packets);
237    ctl->sc_enpub = enpub;
238    ctl->sc_alset = alset;
239    ctl->sc_ver_neg = ver_neg;
240    ctl->sc_pack_size = pack_size;
241    ctl->sc_conn_pub = conn_pub;
242    if (enpub->enp_settings.es_pace_packets)
243        ctl->sc_flags |= SC_PACE;
244    lsquic_alarmset_init_alarm(alset, AL_RETX, retx_alarm_rings, ctl);
245    lsquic_senhist_init(&ctl->sc_senhist);
246    lsquic_cubic_init(&ctl->sc_cubic, LSQUIC_LOG_CONN_ID);
247    if (ctl->sc_flags & SC_PACE)
248        pacer_init(&ctl->sc_pacer, LSQUIC_LOG_CONN_ID, 100000);
249    for (i = 0; i < sizeof(ctl->sc_buffered_packets) /
250                                sizeof(ctl->sc_buffered_packets[0]); ++i)
251        TAILQ_INIT(&ctl->sc_buffered_packets[i].bpq_packets);
252}
253
254
255static lsquic_time_t
256calculate_packet_rto (lsquic_send_ctl_t *ctl)
257{
258    lsquic_time_t delay;
259
260    delay = get_retx_delay(&ctl->sc_conn_pub->rtt_stats);
261
262    unsigned exp = ctl->sc_n_consec_rtos;
263    if (exp > MAX_RTO_BACKOFFS)
264        exp = MAX_RTO_BACKOFFS;
265
266    delay = delay * (1 << exp);
267
268    return delay;
269}
270
271
272static lsquic_time_t
273calculate_tlp_delay (lsquic_send_ctl_t *ctl)
274{
275    lsquic_time_t srtt, delay;
276
277    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
278    if (ctl->sc_n_in_flight > 1)
279    {
280        delay = 10000;  /* 10 ms is the minimum tail loss probe delay */
281        if (delay < 2 * srtt)
282            delay = 2 * srtt;
283    }
284    else
285    {
286        delay = srtt + srtt / 2 + MIN_RTO_DELAY;
287        if (delay < 2 * srtt)
288            delay = 2 * srtt;
289    }
290
291    return delay;
292}
293
294
295static void
296set_retx_alarm (lsquic_send_ctl_t *ctl)
297{
298    enum retx_mode rm;
299    lsquic_time_t delay, now;
300
301    assert(!TAILQ_EMPTY(&ctl->sc_unacked_packets));
302
303    now = lsquic_time_now();
304
305    rm = get_retx_mode(ctl);
306    switch (rm)
307    {
308    case RETX_MODE_HANDSHAKE:
309    /* [draft-iyengar-quic-loss-recovery-01]:
310     *
311     *  if (handshake packets are outstanding):
312     *      alarm_duration = max(1.5 * smoothed_rtt, 10ms) << handshake_count;
313     *      handshake_count++;
314     */
315        delay = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
316        delay += delay / 2;
317        if (10000 > delay)
318            delay = 10000;
319        delay <<= ctl->sc_n_hsk;
320        ++ctl->sc_n_hsk;
321        break;
322    case RETX_MODE_LOSS:
323        delay = ctl->sc_loss_to;
324        break;
325    case RETX_MODE_TLP:
326        delay = calculate_tlp_delay(ctl);
327        break;
328    case RETX_MODE_RTO:
329        /* Base RTO on the first unacked packet, following reference
330         * implementation.
331         */
332        delay = calculate_packet_rto(ctl);
333        break;
334    }
335
336    if (delay > MAX_RTO_DELAY)
337        delay = MAX_RTO_DELAY;
338
339    LSQ_DEBUG("set retx alarm to %"PRIu64", which is %"PRIu64
340        " usec from now, mode %s", now + delay, delay, retx2str[rm]);
341    lsquic_alarmset_set(ctl->sc_alset, AL_RETX, now + delay);
342}
343
344
345static int
346send_ctl_in_recovery (lsquic_send_ctl_t *ctl)
347{
348    return ctl->sc_largest_acked_packno
349        && ctl->sc_largest_acked_packno <= ctl->sc_largest_sent_at_cutback;
350}
351
352
353static int
354send_ctl_in_slow_start (lsquic_send_ctl_t *ctl)
355{
356    return lsquic_cubic_in_slow_start(&ctl->sc_cubic);
357}
358
359
360static lsquic_time_t
361send_ctl_transfer_time (void *ctx)
362{
363    lsquic_send_ctl_t *const ctl = ctx;
364    uint64_t bandwidth, pacing_rate;
365    lsquic_time_t srtt, tx_time;
366    unsigned cwnd;
367
368    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
369    if (srtt == 0)
370        srtt = 50000;
371    cwnd = lsquic_cubic_get_cwnd(&ctl->sc_cubic);
372    bandwidth = (uint64_t) cwnd * (uint64_t) ctl->sc_pack_size * 1000000 / srtt;
373    if (send_ctl_in_slow_start(ctl))
374        pacing_rate = bandwidth * 2;
375    else if (send_ctl_in_recovery(ctl))
376        pacing_rate = bandwidth;
377    else
378        pacing_rate = bandwidth + bandwidth / 4;
379
380    tx_time = (uint64_t) ctl->sc_pack_size * 1000000 / pacing_rate;
381    LSQ_DEBUG("srtt: %"PRIu64"; ss: %d; rec: %d; cwnd: %u; bandwidth: "
382        "%"PRIu64"; tx_time: %"PRIu64, srtt, send_ctl_in_slow_start(ctl),
383        send_ctl_in_recovery(ctl), cwnd, bandwidth, tx_time);
384    return tx_time;
385}
386
387
388int
389lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *ctl,
390                             struct lsquic_packet_out *packet_out)
391{
392    char frames[lsquic_frame_types_str_sz];
393    LSQ_DEBUG("packet %"PRIu64" has been sent (frame types: %s)",
394        packet_out->po_packno, lsquic_frame_types_to_str(frames,
395            sizeof(frames), packet_out->po_frame_types));
396    if (0 == lsquic_senhist_add(&ctl->sc_senhist, packet_out->po_packno))
397    {
398        TAILQ_INSERT_TAIL(&ctl->sc_unacked_packets, packet_out, po_next);
399        if ((packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) &&
400                    !lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX))
401            set_retx_alarm(ctl);
402        /* Hold on to packets that are not retransmittable because we need them
403         * to sample RTT information.  They are released when ACK is received.
404         */
405        ++ctl->sc_n_in_flight;
406#if LSQUIC_SEND_STATS
407        ++ctl->sc_stats.n_total_sent;
408#endif
409        return 0;
410    }
411    else
412        return -1;
413}
414
415
416static int
417in_acked_range (const ack_info_t *acki, lsquic_packno_t packno)
418{
419    int i, low, high;
420
421    low = 0, high = (int) acki->n_ranges - 1;
422    do
423    {
424        i = low + (high - low) / 2;
425        if (acki->ranges[i].low <= packno && acki->ranges[i].high >= packno)
426            return 1;
427        else if (acki->ranges[i].high < packno)
428            high = i - 1;
429        else
430            low = i + 1;
431    }
432    while (low <= high);
433
434    return 0;
435}
436
437
438static void
439take_rtt_sample (lsquic_send_ctl_t *ctl, const lsquic_packet_out_t *packet_out,
440                 lsquic_time_t now, lsquic_time_t lack_delta)
441{
442    assert(packet_out->po_sent);
443    lsquic_time_t measured_rtt = now - packet_out->po_sent;
444    if (packet_out->po_packno > ctl->sc_max_rtt_packno && lack_delta < measured_rtt)
445    {
446        ctl->sc_max_rtt_packno = packet_out->po_packno;
447        lsquic_rtt_stats_update(&ctl->sc_conn_pub->rtt_stats, measured_rtt, lack_delta);
448        LSQ_DEBUG("packno %"PRIu64"; rtt: %"PRIu64"; delta: %"PRIu64"; "
449            "new srtt: %"PRIu64, packet_out->po_packno, measured_rtt, lack_delta,
450            lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats));
451    }
452}
453
454
455/* Returns true if packet was rescheduled, false otherwise.  In the latter
456 * case, you should not dereference packet_out after the function returns.
457 */
458static int
459send_ctl_handle_lost_packet (lsquic_send_ctl_t *ctl,
460                                            lsquic_packet_out_t *packet_out)
461{
462    assert(ctl->sc_n_in_flight);
463    --ctl->sc_n_in_flight;
464    TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
465    if (packet_out->po_flags & PO_ENCRYPTED) {
466        ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx,
467                                                packet_out->po_enc_data);
468        packet_out->po_flags &= ~PO_ENCRYPTED;
469        packet_out->po_enc_data = NULL;
470    }
471    if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))
472    {
473        ctl->sc_flags |= SC_LOST_ACK;
474        LSQ_DEBUG("lost ACK in packet %"PRIu64, packet_out->po_packno);
475    }
476    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
477    {
478        LSQ_DEBUG("lost retransmittable packet %"PRIu64,
479                                                    packet_out->po_packno);
480        TAILQ_INSERT_TAIL(&ctl->sc_lost_packets, packet_out, po_next);
481        return 1;
482    }
483    else
484    {
485        LSQ_DEBUG("lost unretransmittable packet %"PRIu64,
486                                                    packet_out->po_packno);
487        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
488        return 0;
489    }
490}
491
492
493static lsquic_packno_t
494largest_retx_packet_number (const lsquic_send_ctl_t *ctl)
495{
496    const lsquic_packet_out_t *packet_out;
497    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets,
498                                                lsquic_packets_tailq, po_next)
499    {
500        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
501            return packet_out->po_packno;
502    }
503    return 0;
504}
505
506
507static void
508send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time)
509{
510    lsquic_packet_out_t *packet_out, *next;
511    lsquic_packno_t largest_retx_packno, largest_lost_packno;
512
513    largest_retx_packno = largest_retx_packet_number(ctl);
514    largest_lost_packno = 0;
515    assert(largest_retx_packno);    /* Otherwise, why detect losses? */
516    ctl->sc_loss_to = 0;
517
518    for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets);
519            packet_out && packet_out->po_packno <= ctl->sc_largest_acked_packno;
520                packet_out = next)
521    {
522        next = TAILQ_NEXT(packet_out, po_next);
523
524        if (packet_out->po_packno + N_NACKS_BEFORE_RETX <
525                                                ctl->sc_largest_acked_packno)
526        {
527            LSQ_DEBUG("loss by FACK detected, packet %"PRIu64,
528                                                    packet_out->po_packno);
529            largest_lost_packno = packet_out->po_packno;
530            (void) send_ctl_handle_lost_packet(ctl, packet_out);
531            continue;
532        }
533
534        if ((packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) &&
535                        largest_retx_packno <= ctl->sc_largest_acked_packno)
536        {
537            LSQ_DEBUG("loss by early retransmit detected, packet %"PRIu64,
538                                                    packet_out->po_packno);
539            largest_lost_packno = packet_out->po_packno;
540            ctl->sc_loss_to =
541                lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats) / 4;
542            LSQ_DEBUG("set sc_loss_to to %"PRIu64", packet %"PRIu64,
543                                    ctl->sc_loss_to, packet_out->po_packno);
544            (void) send_ctl_handle_lost_packet(ctl, packet_out);
545            continue;
546        }
547
548        if (ctl->sc_largest_acked_sent_time > packet_out->po_sent +
549                    lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats))
550        {
551            LSQ_DEBUG("loss by sent time detected: packet %"PRIu64,
552                                                    packet_out->po_packno);
553            largest_lost_packno = packet_out->po_packno;
554            (void) send_ctl_handle_lost_packet(ctl, packet_out);
555            continue;
556        }
557    }
558
559    if (largest_lost_packno > ctl->sc_largest_sent_at_cutback)
560    {
561        LSQ_DEBUG("detected new loss: packet %"PRIu64"; new lsac: "
562            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
563        lsquic_cubic_loss(&ctl->sc_cubic);
564        ctl->sc_largest_sent_at_cutback =
565                                lsquic_senhist_largest(&ctl->sc_senhist);
566    }
567    else if (largest_lost_packno)
568        /* Lost packets whose numbers are smaller than the largest packet
569         * number sent at the time of the last loss event indicate the same
570         * loss event.  This follows NewReno logic, see RFC 6582.
571         */
572        LSQ_DEBUG("ignore loss of packet %"PRIu64" smaller than lsac "
573            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
574}
575
576
577int
578lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl,
579                         const struct ack_info *acki,
580                         lsquic_time_t ack_recv_time)
581{
582    struct lsquic_packets_tailq acked_acks =
583                                    TAILQ_HEAD_INITIALIZER(acked_acks);
584    lsquic_packet_out_t *packet_out, *next;
585    lsquic_time_t now = lsquic_time_now();
586    lsquic_packno_t high;
587    int rtt_updated = 0;
588    int app_limited;
589    unsigned n;
590
591    LSQ_DEBUG("Got ACK frame, largest acked: %"PRIu64"; delta: %"PRIu64,
592                        largest_acked(acki), acki->lack_delta);
593
594    /* Validate ACK first: */
595    for (n = 0; n < acki->n_ranges; ++n)
596        if (!lsquic_senhist_sent_range(&ctl->sc_senhist, acki->ranges[n].low,
597                                                      acki->ranges[n].high))
598        {
599            LSQ_INFO("at least one packet in ACK range [%"PRIu64" - %"PRIu64"] "
600                "was never sent", acki->ranges[n].low, acki->ranges[n].high);
601            return -1;
602        }
603
604    /* Peer is acking packets that have been acked already.  Schedule ACK
605     * and STOP_WAITING frame to chop the range if we get two of these in
606     * a row.
607     */
608    if (lsquic_send_ctl_smallest_unacked(ctl) > smallest_acked(acki))
609        ++ctl->sc_n_stop_waiting;
610    else
611        ctl->sc_n_stop_waiting = 0;
612
613    app_limited = ctl->sc_n_in_flight + 3 /* This is the "maximum
614               burst" parameter */ < lsquic_cubic_get_cwnd(&ctl->sc_cubic);
615
616    for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets);
617            packet_out && packet_out->po_packno <= largest_acked(acki);
618                packet_out = next)
619    {
620        next = TAILQ_NEXT(packet_out, po_next);
621        if (!in_acked_range(acki, packet_out->po_packno))
622            continue;
623        ctl->sc_largest_acked_packno    = packet_out->po_packno;
624        ctl->sc_largest_acked_sent_time = packet_out->po_sent;
625        if (packet_out->po_packno == largest_acked(acki))
626        {
627            take_rtt_sample(ctl, packet_out, ack_recv_time, acki->lack_delta);
628            ++rtt_updated;
629        }
630        lsquic_cubic_ack(&ctl->sc_cubic, now, now - packet_out->po_sent,
631                                                            app_limited);
632        LSQ_DEBUG("Got ACK for packet %"PRIu64", remove from unacked queue",
633            packet_out->po_packno);
634        TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
635        lsquic_packet_out_ack_streams(packet_out);
636        if ((ctl->sc_flags & SC_NSTP) &&
637                    (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)))
638            TAILQ_INSERT_TAIL(&acked_acks, packet_out, po_next);
639        else
640            lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
641        assert(ctl->sc_n_in_flight);
642        --ctl->sc_n_in_flight;
643    }
644
645    if (rtt_updated)
646    {
647        ctl->sc_n_consec_rtos = 0;
648        ctl->sc_n_hsk = 0;
649        ctl->sc_n_tlp = 0;
650    }
651
652    if (send_ctl_first_unacked_retx_packet(ctl))
653    {
654        send_ctl_detect_losses(ctl, ack_recv_time);
655        if (send_ctl_first_unacked_retx_packet(ctl))
656            set_retx_alarm(ctl);
657        else
658        {
659            LSQ_DEBUG("All retransmittable packets lost: clear alarm");
660            lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
661        }
662    }
663    else
664    {
665        LSQ_DEBUG("No unacked retransmittable packets: clear retx alarm");
666        lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
667    }
668    lsquic_send_ctl_sanity_check(ctl);
669
670    /* Processing of packets that contain acked ACK frames is deferred because
671     * we only need to process one of them: the last one, which we know to
672     * contain the largest value.
673     */
674    packet_out = TAILQ_LAST(&acked_acks, lsquic_packets_tailq);
675    if (packet_out)
676    {
677        high = ctl->sc_conn_pub->lconn->cn_pf->pf_parse_ack_high(
678                                packet_out->po_data, packet_out->po_data_sz);
679        if (high > ctl->sc_largest_ack2ed)
680            ctl->sc_largest_ack2ed = high;
681        do
682        {
683            next = TAILQ_PREV(packet_out, lsquic_packets_tailq, po_next);
684            lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
685        }
686        while ((packet_out = next));
687    }
688
689    return 0;
690}
691
692
693lsquic_packno_t
694lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl)
695{
696    const lsquic_packet_out_t *packet_out;
697
698#ifndef NDEBUG
699    if ((ctl->sc_senhist.sh_flags & SH_REORDER) &&
700                            !TAILQ_EMPTY(&ctl->sc_unacked_packets))
701    {
702        lsquic_packno_t smallest_unacked = UINT64_MAX;
703        TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
704            if (packet_out->po_packno < smallest_unacked)
705                smallest_unacked = packet_out->po_packno;
706        assert(smallest_unacked < UINT64_MAX);
707        return smallest_unacked;
708    }
709    else
710#endif
711    /* Packets are always sent out in order (unless we are reordering them
712     * on purpose).  Thus, the first packet on the unacked packets list has
713     * the smallest packet number of all packets on that list.
714     */
715         if ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
716        return packet_out->po_packno;
717    else
718        return lsquic_senhist_largest(&ctl->sc_senhist) + 1;
719}
720
721
722static struct lsquic_packet_out *
723send_ctl_next_lost (lsquic_send_ctl_t *ctl)
724{
725    lsquic_packet_out_t *lost_packet = TAILQ_FIRST(&ctl->sc_lost_packets);
726    if (lost_packet)
727    {
728        TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next);
729        if (lost_packet->po_frame_types & (1 << QUIC_FRAME_STREAM))
730        {
731                lsquic_packet_out_elide_reset_stream_frames(lost_packet, 0);
732        }
733        return lost_packet;
734    }
735    else
736        return NULL;
737}
738
739
740static lsquic_packno_t
741send_ctl_next_packno (lsquic_send_ctl_t *ctl)
742{
743    return ++ctl->sc_cur_packno;
744}
745
746
747void
748lsquic_send_ctl_cleanup (lsquic_send_ctl_t *ctl)
749{
750    lsquic_packet_out_t *packet_out;
751    lsquic_senhist_cleanup(&ctl->sc_senhist);
752    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
753    {
754        TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next);
755        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
756        --ctl->sc_n_scheduled;
757    }
758    assert(0 == ctl->sc_n_scheduled);
759    while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
760    {
761        TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
762        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
763        --ctl->sc_n_in_flight;
764    }
765    assert(0 == ctl->sc_n_in_flight);
766    while ((packet_out = TAILQ_FIRST(&ctl->sc_lost_packets)))
767    {
768        TAILQ_REMOVE(&ctl->sc_lost_packets, packet_out, po_next);
769        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
770    }
771#if LSQUIC_SEND_STATS
772    LSQ_NOTICE("stats: n_total_sent: %u; n_resent: %u; n_delayed: %u",
773        ctl->sc_stats.n_total_sent, ctl->sc_stats.n_resent,
774        ctl->sc_stats.n_delayed);
775#endif
776}
777
778
779#ifndef NDEBUG
780__attribute__((weak))
781#endif
782int
783lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl)
784{
785    const unsigned n_out = ctl->sc_n_scheduled + ctl->sc_n_in_flight;
786    if (ctl->sc_flags & SC_PACE)
787    {
788        if (n_out >= lsquic_cubic_get_cwnd(&ctl->sc_cubic))
789            return 0;
790        if (pacer_can_schedule(&ctl->sc_pacer, n_out))
791            return 1;
792        if (ctl->sc_flags & SC_SCHED_TICK)
793        {
794            ctl->sc_flags &= ~SC_SCHED_TICK;
795            lsquic_engine_add_conn_to_attq(ctl->sc_enpub,
796                    ctl->sc_conn_pub->lconn, pacer_next_sched(&ctl->sc_pacer));
797        }
798        return 0;
799    }
800    else
801        return n_out < lsquic_cubic_get_cwnd(&ctl->sc_cubic);
802}
803
804
805static void
806send_ctl_expire (lsquic_send_ctl_t *ctl, enum expire_filter filter)
807{
808    lsquic_packet_out_t *packet_out, *next;
809    int n_resubmitted;
810    static const char *const filter_type2str[] = {
811        [EXFI_ALL] = "all",
812        [EXFI_HSK] = "handshake",
813        [EXFI_LAST] = "last",
814    };
815
816    switch (filter)
817    {
818    case EXFI_ALL:
819        n_resubmitted = 0;
820        while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
821            n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out);
822        break;
823    case EXFI_HSK:
824        n_resubmitted = 0;
825        for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); packet_out;
826                                                            packet_out = next)
827        {
828            next = TAILQ_NEXT(packet_out, po_next);
829            if (packet_out->po_flags & PO_HELLO)
830                n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out);
831        }
832        break;
833    case EXFI_LAST:
834        packet_out = send_ctl_last_unacked_retx_packet(ctl);
835        if (packet_out)
836            n_resubmitted = send_ctl_handle_lost_packet(ctl, packet_out);
837        else
838            n_resubmitted = 0;
839        break;
840    }
841
842    LSQ_DEBUG("consider %s packets lost: %d resubmitted",
843                                    filter_type2str[filter], n_resubmitted);
844}
845
846
847void
848lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl)
849{
850    lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
851    send_ctl_expire(ctl, EXFI_ALL);
852    lsquic_send_ctl_sanity_check(ctl);
853}
854
855
856#ifndef NDEBUG
857void
858lsquic_send_ctl_sanity_check (const lsquic_send_ctl_t *ctl)
859{
860    const struct lsquic_packet_out *packet_out;
861    unsigned count;
862
863    assert(!send_ctl_first_unacked_retx_packet(ctl) ||
864                    lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX));
865    if (lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX))
866    {
867        assert(send_ctl_first_unacked_retx_packet(ctl));
868        assert(lsquic_time_now() < ctl->sc_alset->as_expiry[AL_RETX] + MAX_RTO_DELAY);
869    }
870
871    count = 0;
872    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
873        ++count;
874    assert(count == ctl->sc_n_in_flight);
875}
876
877
878#endif
879
880
881void
882lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *ctl,
883                                            lsquic_packet_out_t *packet_out)
884{
885#ifndef NDEBUG
886    const lsquic_packet_out_t *last;
887    last = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
888    if (last)
889        assert((last->po_flags & PO_REPACKNO) ||
890                last->po_packno < packet_out->po_packno);
891#endif
892    TAILQ_INSERT_TAIL(&ctl->sc_scheduled_packets, packet_out, po_next);
893    ++ctl->sc_n_scheduled;
894}
895
896
897lsquic_packet_out_t *
898lsquic_send_ctl_next_packet_to_send (lsquic_send_ctl_t *ctl)
899{
900    lsquic_packet_out_t *packet_out;
901
902    packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets);
903    if (!packet_out)
904        return NULL;
905
906    if (ctl->sc_n_consec_rtos &&
907                    !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)))
908    {
909        if (ctl->sc_next_limit)
910            --ctl->sc_next_limit;
911        else
912            return NULL;
913    }
914
915    if (packet_out->po_flags & PO_REPACKNO)
916    {
917        update_for_resending(ctl, packet_out);
918        packet_out->po_flags &= ~PO_REPACKNO;
919    }
920
921    TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next);
922    --ctl->sc_n_scheduled;
923    return packet_out;
924}
925
926
927void
928lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *ctl,
929                                            lsquic_packet_out_t *packet_out)
930{
931    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, packet_out, po_next);
932    ++ctl->sc_n_scheduled;
933    LSQ_DEBUG("packet %"PRIu64" has been delayed", packet_out->po_packno);
934#if LSQUIC_SEND_STATS
935    ++ctl->sc_stats.n_delayed;
936#endif
937}
938
939
940int
941lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *ctl)
942{
943    const lsquic_packet_out_t *packet_out;
944    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
945        if (packet_out->po_frame_types &
946                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
947            return 1;
948    return 0;
949}
950
951
952int
953lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *ctl)
954{
955    const lsquic_packet_out_t *packet_out;
956    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
957        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
958            return 1;
959    return 0;
960}
961
962
963static lsquic_packet_out_t *
964send_ctl_allocate_packet (lsquic_send_ctl_t *ctl, enum lsquic_packno_bits bits,
965                                                        unsigned need_at_least)
966{
967    lsquic_packet_out_t *packet_out;
968
969    packet_out = lsquic_packet_out_new(&ctl->sc_enpub->enp_mm,
970                    ctl->sc_conn_pub->packet_out_malo,
971                    !(ctl->sc_flags & SC_TCID0), ctl->sc_pack_size, bits,
972                    ctl->sc_ver_neg->vn_tag, NULL);
973    if (!packet_out)
974        return NULL;
975
976    if (need_at_least && lsquic_packet_out_avail(packet_out) < need_at_least)
977    {   /* This should never happen, this is why this check is performed at
978         * this level and not lower, before the packet is actually allocated.
979         */
980        LSQ_ERROR("wanted to allocate packet with at least %u bytes of "
981            "payload, but only got %u bytes (mtu: %u bytes)", need_at_least,
982            lsquic_packet_out_avail(packet_out), ctl->sc_pack_size);
983        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
984        return NULL;
985    }
986
987    return packet_out;
988}
989
990
991lsquic_packet_out_t *
992lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *ctl, unsigned need_at_least)
993{
994    lsquic_packet_out_t *packet_out;
995    enum lsquic_packno_bits bits;
996
997    bits = lsquic_send_ctl_packno_bits(ctl);
998    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least);
999    if (!packet_out)
1000        return NULL;
1001
1002    packet_out->po_packno = send_ctl_next_packno(ctl);
1003    LSQ_DEBUG("created packet %"PRIu64, packet_out->po_packno);
1004    EV_LOG_PACKET_CREATED(LSQUIC_LOG_CONN_ID, packet_out);
1005    return packet_out;
1006}
1007
1008
1009/* Do not use for STREAM frames
1010 */
1011lsquic_packet_out_t *
1012lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *ctl,
1013                                      unsigned need_at_least, int *is_err)
1014{
1015    lsquic_packet_out_t *packet_out;
1016    unsigned n_out;
1017
1018    assert(need_at_least > 0);
1019
1020    packet_out = lsquic_send_ctl_last_scheduled(ctl);
1021    if (packet_out
1022        && lsquic_packet_out_avail(packet_out) >= need_at_least)
1023    {
1024        return packet_out;
1025    }
1026
1027    if (!lsquic_send_ctl_can_send(ctl))
1028    {
1029        *is_err = 0;
1030        return NULL;
1031    }
1032
1033    packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least);
1034    if (packet_out)
1035    {
1036        if (ctl->sc_flags & SC_PACE)
1037        {
1038            n_out = ctl->sc_n_in_flight + ctl->sc_n_scheduled;
1039            pacer_packet_scheduled(&ctl->sc_pacer, n_out,
1040                send_ctl_in_recovery(ctl), send_ctl_transfer_time, ctl);
1041        }
1042        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1043    }
1044    else
1045        *is_err = 1;
1046    return packet_out;
1047}
1048
1049
1050static lsquic_packet_out_t *
1051send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl,
1052                      unsigned need_at_least, const lsquic_stream_t *stream)
1053{
1054    lsquic_packet_out_t *packet_out;
1055    unsigned n_out;
1056
1057    assert(need_at_least > 0);
1058
1059    packet_out = lsquic_send_ctl_last_scheduled(ctl);
1060    if (packet_out
1061        && lsquic_packet_out_avail(packet_out) >= need_at_least
1062        && !lsquic_packet_out_has_frame(packet_out, stream, QUIC_FRAME_STREAM))
1063    {
1064        return packet_out;
1065    }
1066
1067    if (!lsquic_send_ctl_can_send(ctl))
1068        return NULL;
1069
1070    packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least);
1071    if (!packet_out)
1072        return NULL;
1073
1074    if (ctl->sc_flags & SC_PACE)
1075    {
1076        n_out = ctl->sc_n_in_flight + ctl->sc_n_scheduled;
1077        pacer_packet_scheduled(&ctl->sc_pacer, n_out,
1078            send_ctl_in_recovery(ctl), send_ctl_transfer_time, ctl);
1079    }
1080    lsquic_send_ctl_scheduled_one(ctl, packet_out);
1081    return packet_out;
1082}
1083
1084
1085static void
1086update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out)
1087{
1088
1089    lsquic_packno_t oldno, packno;
1090
1091    /* When the packet is resent, it uses the same number of bytes to encode
1092     * the packet number as the original packet.  This follows the reference
1093     * implementation.
1094     */
1095    oldno = packet_out->po_packno;
1096    packno = send_ctl_next_packno(ctl);
1097
1098    packet_out->po_frame_types &= ~QFRAME_REGEN_MASK;
1099    assert(packet_out->po_frame_types);
1100    packet_out->po_packno = packno;
1101
1102    if (ctl->sc_ver_neg->vn_tag)
1103    {
1104        assert(packet_out->po_flags & PO_VERSION);  /* It can only disappear */
1105        packet_out->po_ver_tag = *ctl->sc_ver_neg->vn_tag;
1106    }
1107
1108    assert(packet_out->po_regen_sz < packet_out->po_data_sz);
1109    /* TODO: in Q038 and later, we can simply replace the ACK with NUL bytes
1110     * representing PADDING frame instead of doing memmove and adjusting
1111     * offsets.
1112     */
1113    if (packet_out->po_regen_sz)
1114        lsquic_packet_out_chop_regen(packet_out);
1115    LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64,
1116                                                            oldno, packno);
1117    EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for "
1118        "resending as packet %"PRIu64, oldno, packno);
1119}
1120
1121
1122/* A droppable hello packet is a packet that contains a part of hello message
1123 * after handshake has been completed.
1124 */
1125static int
1126droppable_hello_packet (const lsquic_send_ctl_t *ctl,
1127                                    const lsquic_packet_out_t *packet_out)
1128{
1129    return 0    /* TODO: we cannot not resend HELLO packets if we are server.
1130                 * For now, do not discard any HELLO packets.
1131                 */
1132        && (packet_out->po_flags & PO_HELLO)
1133        && (ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE);
1134}
1135
1136
1137unsigned
1138lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *ctl)
1139{
1140    lsquic_packet_out_t *packet_out;
1141    unsigned n = 0;
1142
1143    while (lsquic_send_ctl_can_send(ctl) &&
1144                                (packet_out = send_ctl_next_lost(ctl)))
1145    {
1146        if ((packet_out->po_regen_sz < packet_out->po_data_sz)
1147                            && !droppable_hello_packet(ctl, packet_out))
1148        {
1149            ++n;
1150            update_for_resending(ctl, packet_out);
1151            lsquic_send_ctl_scheduled_one(ctl, packet_out);
1152        }
1153        else
1154        {
1155            LSQ_DEBUG("Dropping packet %"PRIu64" from unacked queue",
1156                packet_out->po_packno);
1157            lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1158        }
1159    }
1160
1161    if (n)
1162        LSQ_DEBUG("rescheduled %u packets", n);
1163
1164    return n;
1165}
1166
1167
1168void
1169lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *ctl, int tcid0)
1170{
1171    if (tcid0)
1172    {
1173        LSQ_INFO("set TCID flag");
1174        ctl->sc_flags |=  SC_TCID0;
1175    }
1176    else
1177    {
1178        LSQ_INFO("unset TCID flag");
1179        ctl->sc_flags &= ~SC_TCID0;
1180    }
1181}
1182
1183
1184/* The controller elides this STREAM frames of stream `stream_id' from
1185 * scheduled and buffered packets.  If a packet becomes empty as a result,
1186 * it is dropped.
1187 *
1188 * Packets on other queues do not need to be processed: unacked packets
1189 * have already been sent, and lost packets' reset stream frames will be
1190 * elided in due time.
1191 */
1192void
1193lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *ctl, uint32_t stream_id)
1194{
1195    struct lsquic_packet_out *packet_out, *next;
1196    unsigned n;
1197
1198    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
1199                                                            packet_out = next)
1200    {
1201        next = TAILQ_NEXT(packet_out, po_next);
1202
1203        if ((packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM))
1204                                                                   )
1205        {
1206            lsquic_packet_out_elide_reset_stream_frames(packet_out, stream_id);
1207            if (0 == packet_out->po_frame_types)
1208            {
1209                LSQ_DEBUG("cancel packet %"PRIu64" after eliding frames for "
1210                    "stream %"PRIu32, packet_out->po_packno, stream_id);
1211                TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next);
1212                lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1213                assert(ctl->sc_n_scheduled);
1214                --ctl->sc_n_scheduled;
1215            }
1216        }
1217    }
1218
1219    for (n = 0; n < sizeof(ctl->sc_buffered_packets) /
1220                                sizeof(ctl->sc_buffered_packets[0]); ++n)
1221    {
1222        for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets);
1223                                                packet_out; packet_out = next)
1224        {
1225            if (!(packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM)))
1226                continue;
1227            next = TAILQ_NEXT(packet_out, po_next);
1228            if (0 == packet_out->po_frame_types)
1229            {
1230                LSQ_DEBUG("cancel packet %"PRIu64" after eliding frames for "
1231                    "stream %"PRIu32, packet_out->po_packno, stream_id);
1232                TAILQ_REMOVE(&ctl->sc_buffered_packets[n].bpq_packets,
1233                             packet_out, po_next);
1234                --ctl->sc_buffered_packets[n].bpq_count;
1235                lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1236                LSQ_DEBUG("Elide packet from buffered queue #%u; count: %u",
1237                          n, ctl->sc_buffered_packets[n].bpq_count);
1238            }
1239        }
1240    }
1241}
1242
1243
1244/* Count how many packets will remain after the squeezing performed by
1245 * lsquic_send_ctl_squeeze_sched().  This is the number of delayed data
1246 * packets.
1247 */
1248#ifndef NDEBUG
1249__attribute__((weak))
1250#endif
1251int
1252lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl)
1253{
1254    const struct lsquic_packet_out *packet_out;
1255    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1256        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1257            return 1;
1258    return 0;
1259}
1260
1261
1262#ifndef NDEBUG
1263static void
1264send_ctl_log_packet_q (const lsquic_send_ctl_t *ctl, const char *prefix,
1265                                const struct lsquic_packets_tailq *tailq)
1266{
1267    const lsquic_packet_out_t *packet_out;
1268    unsigned n_packets;
1269    char *buf;
1270    size_t bufsz;
1271    int off;
1272
1273    n_packets = 0;
1274    TAILQ_FOREACH(packet_out, tailq, po_next)
1275        ++n_packets;
1276
1277    if (n_packets == 0)
1278    {
1279        LSQ_DEBUG("%s: [<empty set>]", prefix);
1280        return;
1281    }
1282
1283    bufsz = n_packets * sizeof("18446744073709551615" /* UINT64_MAX */);
1284    buf = malloc(bufsz);
1285    if (!buf)
1286    {
1287        LSQ_ERROR("%s: malloc: %s", __func__, strerror(errno));
1288        return;
1289    }
1290
1291    off = 0;
1292    TAILQ_FOREACH(packet_out, tailq, po_next)
1293    {
1294        if (off)
1295            buf[off++] = ' ';
1296        off += sprintf(buf + off, "%"PRIu64, packet_out->po_packno);
1297    }
1298
1299    LSQ_DEBUG("%s: [%s]", prefix, buf);
1300    free(buf);
1301}
1302
1303
1304#define LOG_PACKET_Q(prefix, queue) do {                                    \
1305    if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG))                                     \
1306        send_ctl_log_packet_q(ctl, queue, prefix);                          \
1307} while (0)
1308#else
1309#define LOG_PACKET_Q(p, q)
1310#endif
1311
1312
1313int
1314lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl)
1315{
1316    struct lsquic_packet_out *packet_out, *next;
1317#ifndef NDEBUG
1318    int pre_squeeze_logged = 0;
1319#endif
1320
1321    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
1322                                                            packet_out = next)
1323    {
1324        next = TAILQ_NEXT(packet_out, po_next);
1325        if (packet_out->po_regen_sz < packet_out->po_data_sz
1326                            && !droppable_hello_packet(ctl, packet_out))
1327        {
1328            if (packet_out->po_flags & PO_ENCRYPTED)
1329            {
1330                ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx,
1331                                                    packet_out->po_enc_data);
1332                packet_out->po_enc_data = NULL;
1333                packet_out->po_flags &= ~PO_ENCRYPTED;
1334            }
1335        }
1336        else
1337        {
1338#ifndef NDEBUG
1339            /* Log the whole list before we squeeze for the first time */
1340            if (!pre_squeeze_logged++)
1341                LOG_PACKET_Q(&ctl->sc_scheduled_packets,
1342                                        "unacked packets before squeezing");
1343#endif
1344            TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next);
1345            assert(ctl->sc_n_scheduled);
1346            --ctl->sc_n_scheduled;
1347            LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue",
1348                packet_out->po_packno);
1349            lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1350        }
1351    }
1352
1353#ifndef NDEBUG
1354    if (pre_squeeze_logged)
1355        LOG_PACKET_Q(&ctl->sc_scheduled_packets,
1356                                        "unacked packets after squeezing");
1357    else if (ctl->sc_n_scheduled > 0)
1358        LOG_PACKET_Q(&ctl->sc_scheduled_packets, "delayed packets");
1359#endif
1360
1361    return ctl->sc_n_scheduled > 0;
1362}
1363
1364
1365void
1366lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *ctl)
1367{
1368    struct lsquic_packet_out *packet_out;
1369
1370    assert(ctl->sc_n_scheduled > 0);    /* Otherwise, why is this called? */
1371    ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
1372    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1373        packet_out->po_flags |= PO_REPACKNO;
1374}
1375
1376
1377void
1378lsquic_send_ctl_ack_to_front (lsquic_send_ctl_t *ctl)
1379{
1380    struct lsquic_packet_out *ack_packet;
1381
1382    assert(ctl->sc_n_scheduled > 1);    /* Otherwise, why is this called? */
1383    ack_packet = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
1384    assert(ack_packet->po_frame_types & (1 << QUIC_FRAME_ACK));
1385    TAILQ_REMOVE(&ctl->sc_scheduled_packets, ack_packet, po_next);
1386    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, ack_packet, po_next);
1387}
1388
1389
1390void
1391lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *ctl)
1392{
1393    lsquic_packet_out_t *packet_out;
1394    const unsigned n = ctl->sc_n_scheduled;
1395    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
1396    {
1397        TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next);
1398        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1399        --ctl->sc_n_scheduled;
1400    }
1401    assert(0 == ctl->sc_n_scheduled);
1402    LSQ_DEBUG("dropped %u scheduled packet%s", n, n != 0 ? "s" : "");
1403}
1404
1405
1406#ifdef NDEBUG
1407static
1408#elif __GNUC__
1409__attribute__((weak))
1410#endif
1411enum buf_packet_type
1412lsquic_send_ctl_determine_bpt (lsquic_send_ctl_t *ctl,
1413                                            const lsquic_stream_t *stream)
1414{
1415    const lsquic_stream_t *other_stream;
1416    struct lsquic_hash_elem *el;
1417    struct lsquic_hash *all_streams;
1418
1419    all_streams = ctl->sc_conn_pub->all_streams;
1420    for (el = lsquic_hash_first(all_streams); el;
1421                                     el = lsquic_hash_next(all_streams))
1422    {
1423        other_stream = lsquic_hashelem_getdata(el);
1424        if (other_stream != stream
1425              && (!(other_stream->stream_flags & STREAM_U_WRITE_DONE))
1426                && !lsquic_stream_is_critical(other_stream)
1427                  && other_stream->sm_priority < stream->sm_priority)
1428            return BPT_OTHER_PRIO;
1429    }
1430    return BPT_HIGHEST_PRIO;
1431}
1432
1433
1434static enum buf_packet_type
1435send_ctl_lookup_bpt (lsquic_send_ctl_t *ctl,
1436                                        const struct lsquic_stream *stream)
1437{
1438    if (ctl->sc_cached_bpt.stream_id != stream->id)
1439    {
1440        ctl->sc_cached_bpt.stream_id = stream->id;
1441        ctl->sc_cached_bpt.packet_type =
1442                                lsquic_send_ctl_determine_bpt(ctl, stream);
1443    }
1444    return ctl->sc_cached_bpt.packet_type;
1445}
1446
1447
1448static unsigned
1449send_ctl_max_bpq_count (const lsquic_send_ctl_t *ctl,
1450                                        enum buf_packet_type packet_type)
1451{
1452    unsigned count;
1453
1454    switch (packet_type)
1455    {
1456    case BPT_OTHER_PRIO:
1457        return MAX_BPQ_COUNT;
1458    case BPT_HIGHEST_PRIO:
1459    default: /* clang does not complain about absence of `default'... */
1460        count = ctl->sc_n_scheduled + ctl->sc_n_in_flight;
1461        if (count < lsquic_cubic_get_cwnd(&ctl->sc_cubic))
1462        {
1463            count -= lsquic_cubic_get_cwnd(&ctl->sc_cubic);
1464            if (count > MAX_BPQ_COUNT)
1465                return count;
1466        }
1467        return MAX_BPQ_COUNT;
1468    }
1469}
1470
1471
1472static lsquic_packet_out_t *
1473send_ctl_get_buffered_packet (lsquic_send_ctl_t *ctl,
1474                enum buf_packet_type packet_type, unsigned need_at_least,
1475                                        const struct lsquic_stream *stream)
1476{
1477    struct buf_packet_q *const packet_q =
1478                                    &ctl->sc_buffered_packets[packet_type];
1479    lsquic_packet_out_t *packet_out;
1480    enum lsquic_packno_bits bits;
1481
1482    packet_out = TAILQ_LAST(&packet_q->bpq_packets, lsquic_packets_tailq);
1483    if (packet_out
1484        && lsquic_packet_out_avail(packet_out) >= need_at_least
1485        && !lsquic_packet_out_has_frame(packet_out, stream, QUIC_FRAME_STREAM))
1486    {
1487        return packet_out;
1488    }
1489
1490    if (packet_q->bpq_count >= send_ctl_max_bpq_count(ctl, packet_type))
1491        return NULL;
1492
1493    bits = lsquic_send_ctl_guess_packno_bits(ctl);
1494    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least);
1495    if (!packet_out)
1496        return NULL;
1497
1498    TAILQ_INSERT_TAIL(&packet_q->bpq_packets, packet_out, po_next);
1499    ++packet_q->bpq_count;
1500    LSQ_DEBUG("Add new packet to buffered queue #%u; count: %u",
1501              packet_type, packet_q->bpq_count);
1502    return packet_out;
1503}
1504
1505
1506lsquic_packet_out_t *
1507lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl,
1508                unsigned need_at_least, const struct lsquic_stream *stream)
1509{
1510    enum buf_packet_type packet_type;
1511
1512    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
1513        return send_ctl_get_packet_for_stream(ctl, need_at_least, stream);
1514    else
1515    {
1516        packet_type = send_ctl_lookup_bpt(ctl, stream);
1517        return send_ctl_get_buffered_packet(ctl, packet_type, need_at_least,
1518                                            stream);
1519    }
1520}
1521
1522
1523#ifdef NDEBUG
1524static
1525#elif __GNUC__
1526__attribute__((weak))
1527#endif
1528enum lsquic_packno_bits
1529lsquic_send_ctl_calc_packno_bits (lsquic_send_ctl_t *ctl)
1530{
1531    lsquic_packno_t smallest_unacked;
1532    unsigned n_in_flight;
1533
1534    smallest_unacked = lsquic_send_ctl_smallest_unacked(ctl);
1535    n_in_flight = lsquic_cubic_get_cwnd(&ctl->sc_cubic);
1536    return calc_packno_bits(ctl->sc_cur_packno + 1, smallest_unacked,
1537                                                            n_in_flight);
1538}
1539
1540
1541enum lsquic_packno_bits
1542lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *ctl)
1543{
1544
1545    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
1546        return lsquic_send_ctl_calc_packno_bits(ctl);
1547    else
1548        return lsquic_send_ctl_guess_packno_bits(ctl);
1549}
1550
1551
1552static int
1553split_buffered_packet (lsquic_send_ctl_t *ctl,
1554        enum buf_packet_type packet_type, lsquic_packet_out_t *packet_out,
1555        enum lsquic_packno_bits bits, unsigned excess_bytes)
1556{
1557    struct buf_packet_q *const packet_q =
1558                                    &ctl->sc_buffered_packets[packet_type];
1559    lsquic_packet_out_t *new_packet_out;
1560
1561    assert(TAILQ_FIRST(&packet_q->bpq_packets) == packet_out);
1562
1563    new_packet_out = send_ctl_allocate_packet(ctl, bits, 0);
1564    if (!packet_out)
1565        return -1;
1566
1567    if (0 == lsquic_packet_out_split_in_two(&ctl->sc_enpub->enp_mm, packet_out,
1568                  new_packet_out, ctl->sc_conn_pub->lconn->cn_pf, excess_bytes))
1569    {
1570        TAILQ_INSERT_AFTER(&packet_q->bpq_packets, packet_out, new_packet_out,
1571                           po_next);
1572        ++packet_q->bpq_count;
1573        LSQ_DEBUG("Add split packet to buffered queue #%u; count: %u",
1574                  packet_type, packet_q->bpq_count);
1575        return 0;
1576    }
1577    else
1578    {
1579        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1580        return -1;
1581    }
1582}
1583
1584
1585int
1586lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *ctl,
1587                                            enum buf_packet_type packet_type)
1588{
1589    struct buf_packet_q *const packet_q =
1590                                    &ctl->sc_buffered_packets[packet_type];
1591    lsquic_packet_out_t *packet_out;
1592    unsigned used, excess;
1593
1594    assert(lsquic_send_ctl_schedule_stream_packets_immediately(ctl));
1595    const enum lsquic_packno_bits bits = lsquic_send_ctl_calc_packno_bits(ctl);
1596    const unsigned need = packno_bits2len(bits);
1597
1598    while ((packet_out = TAILQ_FIRST(&packet_q->bpq_packets)) &&
1599                                            lsquic_send_ctl_can_send(ctl))
1600    {
1601        if (bits != lsquic_packet_out_packno_bits(packet_out))
1602        {
1603            used = packno_bits2len(lsquic_packet_out_packno_bits(packet_out));
1604            if (need > used
1605                && need - used > lsquic_packet_out_avail(packet_out))
1606            {
1607                excess = need - used - lsquic_packet_out_avail(packet_out);
1608                if (0 != split_buffered_packet(ctl, packet_type,
1609                                               packet_out, bits, excess))
1610                {
1611                    return -1;
1612                }
1613            }
1614        }
1615        TAILQ_REMOVE(&packet_q->bpq_packets, packet_out, po_next);
1616        --packet_q->bpq_count;
1617        LSQ_DEBUG("Remove packet from buffered queue #%u; count: %u",
1618                  packet_type, packet_q->bpq_count);
1619        packet_out->po_packno = send_ctl_next_packno(ctl);
1620        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1621    }
1622
1623    return 0;
1624}
1625
1626
1627int
1628lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *ctl,
1629                             const struct lsquic_stream *stream)
1630{
1631    enum buf_packet_type packet_type;
1632    struct buf_packet_q *packet_q;
1633    lsquic_packet_out_t *packet_out;
1634    const struct parse_funcs *pf;
1635
1636    pf = ctl->sc_conn_pub->lconn->cn_pf;
1637    packet_type = send_ctl_lookup_bpt(ctl, stream);
1638    packet_q = &ctl->sc_buffered_packets[packet_type];
1639
1640    TAILQ_FOREACH_REVERSE(packet_out, &packet_q->bpq_packets,
1641                          lsquic_packets_tailq, po_next)
1642        if (0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
1643            return 0;
1644
1645    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1646        if (0 == packet_out->po_sent
1647            && 0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
1648        {
1649            return 0;
1650        }
1651
1652    return -1;
1653}
1654
1655
1656size_t
1657lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *ctl)
1658{
1659    const lsquic_packet_out_t *packet_out;
1660    unsigned n;
1661    size_t size;
1662    const struct lsquic_packets_tailq queues[] = {
1663        ctl->sc_scheduled_packets,
1664        ctl->sc_unacked_packets,
1665        ctl->sc_lost_packets,
1666        ctl->sc_buffered_packets[0].bpq_packets,
1667        ctl->sc_buffered_packets[1].bpq_packets,
1668    };
1669
1670    size = sizeof(*ctl);
1671
1672    for (n = 0; n < sizeof(queues) / sizeof(queues[0]); ++n)
1673        TAILQ_FOREACH(packet_out, &queues[n], po_next)
1674            size += lsquic_packet_out_mem_used(packet_out);
1675
1676    return size;
1677}
1678
1679
1680