lsquic_send_ctl.c revision 16a9b66a
1/* Copyright (c) 2017 LiteSpeed Technologies Inc.  See LICENSE. */
2/*
3 * lsquic_send_ctl.c -- Logic for sending and sent packets
4 */
5
6#include <assert.h>
7#include <errno.h>
8#include <inttypes.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/queue.h>
12
13#include "lsquic_types.h"
14#include "lsquic_int_types.h"
15#include "lsquic.h"
16#include "lsquic_mm.h"
17#include "lsquic_engine_public.h"
18#include "lsquic_alarmset.h"
19#include "lsquic_packet_common.h"
20#include "lsquic_parse.h"
21#include "lsquic_packet_out.h"
22#include "lsquic_senhist.h"
23#include "lsquic_rtt.h"
24#include "lsquic_cubic.h"
25#include "lsquic_pacer.h"
26#include "lsquic_send_ctl.h"
27#include "lsquic_util.h"
28#include "lsquic_sfcw.h"
29#include "lsquic_stream.h"
30#include "lsquic_ver_neg.h"
31#include "lsquic_ev_log.h"
32#include "lsquic_conn.h"
33#include "lsquic_conn_flow.h"
34#include "lsquic_conn_public.h"
35#include "lsquic_hash.h"
36
37#define LSQUIC_LOGGER_MODULE LSQLM_SENDCTL
38#define LSQUIC_LOG_CONN_ID ctl->sc_conn_pub->lconn->cn_cid
39#include "lsquic_logger.h"
40
41#define MAX_RESUBMITTED_ON_RTO  2
42#define MAX_RTO_BACKOFFS        10
43#define DEFAULT_RETX_DELAY      500000      /* Microseconds */
44#define MAX_RTO_DELAY           60000000    /* Microseconds */
45#define MIN_RTO_DELAY           1000000      /* Microseconds */
46#define N_NACKS_BEFORE_RETX     3
47
48
49enum retx_mode {
50    RETX_MODE_HANDSHAKE,
51    RETX_MODE_LOSS,
52    RETX_MODE_TLP,
53    RETX_MODE_RTO,
54};
55
56
57static const char *const retx2str[] = {
58    [RETX_MODE_HANDSHAKE] = "RETX_MODE_HANDSHAKE",
59    [RETX_MODE_LOSS]      = "RETX_MODE_LOSS",
60    [RETX_MODE_TLP]       = "RETX_MODE_TLP",
61    [RETX_MODE_RTO]       = "RETX_MODE_RTO",
62};
63
64
65static void
66update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out);
67
68
69enum expire_filter { EXFI_ALL, EXFI_HSK, EXFI_LAST, };
70
71
72static void
73send_ctl_expire (lsquic_send_ctl_t *, enum expire_filter);
74
75static void
76set_retx_alarm (lsquic_send_ctl_t *ctl);
77
78static void
79send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time);
80
81static unsigned
82send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl);
83
84
85#ifdef NDEBUG
86static
87#elif __GNUC__
88__attribute__((weak))
89#endif
90int
91lsquic_send_ctl_schedule_stream_packets_immediately (lsquic_send_ctl_t *ctl)
92{
93    return !(ctl->sc_flags & SC_BUFFER_STREAM);
94}
95
96
97#ifdef NDEBUG
98static
99#elif __GNUC__
100__attribute__((weak))
101#endif
102enum lsquic_packno_bits
103lsquic_send_ctl_guess_packno_bits (lsquic_send_ctl_t *ctl)
104{
105    return PACKNO_LEN_2;
106}
107
108
109int
110lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *ctl)
111{
112    const lsquic_packet_out_t *packet_out;
113    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
114        if (packet_out->po_frame_types &
115                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
116            return 1;
117    return 0;
118}
119
120
121static lsquic_packet_out_t *
122send_ctl_first_unacked_retx_packet (const lsquic_send_ctl_t *ctl)
123{
124    lsquic_packet_out_t *packet_out;
125    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
126        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
127            return packet_out;
128    return NULL;
129}
130
131
132static lsquic_packet_out_t *
133send_ctl_last_unacked_retx_packet (const lsquic_send_ctl_t *ctl)
134{
135    lsquic_packet_out_t *packet_out;
136    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets,
137                                            lsquic_packets_tailq, po_next)
138        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
139            return packet_out;
140    return NULL;
141}
142
143
144static int
145have_unacked_handshake_packets (const lsquic_send_ctl_t *ctl)
146{
147    const lsquic_packet_out_t *packet_out;
148    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
149        if (packet_out->po_flags & PO_HELLO)
150            return 1;
151    return 0;
152}
153
154
155static enum retx_mode
156get_retx_mode (lsquic_send_ctl_t *ctl)
157{
158    if (!(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE)
159                                    && have_unacked_handshake_packets(ctl))
160        return RETX_MODE_HANDSHAKE;
161    if (ctl->sc_loss_to)
162        return RETX_MODE_LOSS;
163    if (ctl->sc_n_tlp < 2)
164        return RETX_MODE_TLP;
165    return RETX_MODE_RTO;
166}
167
168
169static lsquic_time_t
170get_retx_delay (const struct lsquic_rtt_stats *rtt_stats)
171{
172    lsquic_time_t srtt, delay;
173
174    srtt = lsquic_rtt_stats_get_srtt(rtt_stats);
175    if (srtt)
176    {
177        delay = srtt + 4 * lsquic_rtt_stats_get_rttvar(rtt_stats);
178        if (delay < MIN_RTO_DELAY)
179            delay = MIN_RTO_DELAY;
180    }
181    else
182        delay = DEFAULT_RETX_DELAY;
183
184    return delay;
185}
186
187
188static void
189retx_alarm_rings (void *ctx, lsquic_time_t expiry, lsquic_time_t now)
190{
191    lsquic_send_ctl_t *ctl = ctx;
192    lsquic_packet_out_t *packet_out;
193    enum retx_mode rm;
194
195    /* This is a callback -- before it is called, the alarm is unset */
196    assert(!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX));
197
198    rm = get_retx_mode(ctl);
199    LSQ_INFO("retx timeout, mode %s", retx2str[rm]);
200
201    switch (rm)
202    {
203    case RETX_MODE_HANDSHAKE:
204        send_ctl_expire(ctl, EXFI_HSK);
205        /* Do not register cubic loss during handshake */
206        break;
207    case RETX_MODE_LOSS:
208        send_ctl_detect_losses(ctl, lsquic_time_now());
209        break;
210    case RETX_MODE_TLP:
211        ++ctl->sc_n_tlp;
212        send_ctl_expire(ctl, EXFI_LAST);
213        break;
214    case RETX_MODE_RTO:
215        ++ctl->sc_n_consec_rtos;
216        ctl->sc_next_limit = 2;
217        LSQ_DEBUG("packet RTO is %"PRIu64" usec", expiry);
218        send_ctl_expire(ctl, EXFI_ALL);
219        lsquic_cubic_timeout(&ctl->sc_cubic);
220        break;
221    }
222
223    packet_out = send_ctl_first_unacked_retx_packet(ctl);
224    if (packet_out)
225        set_retx_alarm(ctl);
226    lsquic_send_ctl_sanity_check(ctl);
227}
228
229
230void
231lsquic_send_ctl_init (lsquic_send_ctl_t *ctl, struct lsquic_alarmset *alset,
232          struct lsquic_engine_public *enpub, const struct ver_neg *ver_neg,
233          struct lsquic_conn_public *conn_pub, unsigned short pack_size)
234{
235    unsigned i;
236    memset(ctl, 0, sizeof(*ctl));
237    TAILQ_INIT(&ctl->sc_scheduled_packets);
238    TAILQ_INIT(&ctl->sc_unacked_packets);
239    TAILQ_INIT(&ctl->sc_lost_packets);
240    ctl->sc_enpub = enpub;
241    ctl->sc_alset = alset;
242    ctl->sc_ver_neg = ver_neg;
243    ctl->sc_pack_size = pack_size;
244    ctl->sc_conn_pub = conn_pub;
245    if (enpub->enp_settings.es_pace_packets)
246        ctl->sc_flags |= SC_PACE;
247    lsquic_alarmset_init_alarm(alset, AL_RETX, retx_alarm_rings, ctl);
248    lsquic_senhist_init(&ctl->sc_senhist);
249    lsquic_cubic_init(&ctl->sc_cubic, LSQUIC_LOG_CONN_ID);
250    if (ctl->sc_flags & SC_PACE)
251        pacer_init(&ctl->sc_pacer, LSQUIC_LOG_CONN_ID, 100000);
252    for (i = 0; i < sizeof(ctl->sc_buffered_packets) /
253                                sizeof(ctl->sc_buffered_packets[0]); ++i)
254        TAILQ_INIT(&ctl->sc_buffered_packets[i].bpq_packets);
255}
256
257
258static lsquic_time_t
259calculate_packet_rto (lsquic_send_ctl_t *ctl)
260{
261    lsquic_time_t delay;
262
263    delay = get_retx_delay(&ctl->sc_conn_pub->rtt_stats);
264
265    unsigned exp = ctl->sc_n_consec_rtos;
266    if (exp > MAX_RTO_BACKOFFS)
267        exp = MAX_RTO_BACKOFFS;
268
269    delay = delay * (1 << exp);
270
271    return delay;
272}
273
274
275static lsquic_time_t
276calculate_tlp_delay (lsquic_send_ctl_t *ctl)
277{
278    lsquic_time_t srtt, delay;
279
280    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
281    if (ctl->sc_n_in_flight_all > 1)
282    {
283        delay = 10000;  /* 10 ms is the minimum tail loss probe delay */
284        if (delay < 2 * srtt)
285            delay = 2 * srtt;
286    }
287    else
288    {
289        delay = srtt + srtt / 2 + MIN_RTO_DELAY;
290        if (delay < 2 * srtt)
291            delay = 2 * srtt;
292    }
293
294    return delay;
295}
296
297
298static void
299set_retx_alarm (lsquic_send_ctl_t *ctl)
300{
301    enum retx_mode rm;
302    lsquic_time_t delay, now;
303
304    assert(!TAILQ_EMPTY(&ctl->sc_unacked_packets));
305
306    now = lsquic_time_now();
307
308    rm = get_retx_mode(ctl);
309    switch (rm)
310    {
311    case RETX_MODE_HANDSHAKE:
312    /* [draft-iyengar-quic-loss-recovery-01]:
313     *
314     *  if (handshake packets are outstanding):
315     *      alarm_duration = max(1.5 * smoothed_rtt, 10ms) << handshake_count;
316     *      handshake_count++;
317     */
318        delay = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
319        if (delay)
320        {
321            delay += delay / 2;
322            if (10000 > delay)
323                delay = 10000;
324        }
325        else
326            delay = 150000;
327        delay <<= ctl->sc_n_hsk;
328        ++ctl->sc_n_hsk;
329        break;
330    case RETX_MODE_LOSS:
331        delay = ctl->sc_loss_to;
332        break;
333    case RETX_MODE_TLP:
334        delay = calculate_tlp_delay(ctl);
335        break;
336    case RETX_MODE_RTO:
337        /* Base RTO on the first unacked packet, following reference
338         * implementation.
339         */
340        delay = calculate_packet_rto(ctl);
341        break;
342    }
343
344    if (delay > MAX_RTO_DELAY)
345        delay = MAX_RTO_DELAY;
346
347    LSQ_DEBUG("set retx alarm to %"PRIu64", which is %"PRIu64
348        " usec from now, mode %s", now + delay, delay, retx2str[rm]);
349    lsquic_alarmset_set(ctl->sc_alset, AL_RETX, now + delay);
350}
351
352
353static int
354send_ctl_in_recovery (lsquic_send_ctl_t *ctl)
355{
356    return ctl->sc_largest_acked_packno
357        && ctl->sc_largest_acked_packno <= ctl->sc_largest_sent_at_cutback;
358}
359
360
361static int
362send_ctl_in_slow_start (lsquic_send_ctl_t *ctl)
363{
364    return lsquic_cubic_in_slow_start(&ctl->sc_cubic);
365}
366
367
368static lsquic_time_t
369send_ctl_transfer_time (void *ctx)
370{
371    lsquic_send_ctl_t *const ctl = ctx;
372    uint64_t bandwidth, pacing_rate;
373    lsquic_time_t srtt, tx_time;
374    unsigned long cwnd;
375
376    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
377    if (srtt == 0)
378        srtt = 50000;
379    cwnd = lsquic_cubic_get_cwnd(&ctl->sc_cubic);
380    bandwidth = cwnd * 1000000 / srtt;
381    if (send_ctl_in_slow_start(ctl))
382        pacing_rate = bandwidth * 2;
383    else if (send_ctl_in_recovery(ctl))
384        pacing_rate = bandwidth;
385    else
386        pacing_rate = bandwidth + bandwidth / 4;
387
388    tx_time = (uint64_t) ctl->sc_pack_size * 1000000 / pacing_rate;
389    LSQ_DEBUG("srtt: %"PRIu64"; ss: %d; rec: %d; cwnd: %lu; bandwidth: "
390        "%"PRIu64"; tx_time: %"PRIu64, srtt, send_ctl_in_slow_start(ctl),
391        send_ctl_in_recovery(ctl), cwnd, bandwidth, tx_time);
392    return tx_time;
393}
394
395
396static void
397send_ctl_unacked_append (struct lsquic_send_ctl *ctl,
398                         struct lsquic_packet_out *packet_out)
399{
400    TAILQ_INSERT_TAIL(&ctl->sc_unacked_packets, packet_out, po_next);
401    ctl->sc_bytes_unacked_all += lsquic_packet_out_total_sz(packet_out);
402    ctl->sc_n_in_flight_all  += 1;
403    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
404    {
405        ctl->sc_bytes_unacked_retx += lsquic_packet_out_total_sz(packet_out);
406        ++ctl->sc_n_in_flight_retx;
407    }
408}
409
410
411static void
412send_ctl_unacked_remove (struct lsquic_send_ctl *ctl,
413                     struct lsquic_packet_out *packet_out, unsigned packet_sz)
414{
415    TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
416    assert(ctl->sc_bytes_unacked_all >= packet_sz);
417    ctl->sc_bytes_unacked_all -= packet_sz;
418    ctl->sc_n_in_flight_all  -= 1;
419    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
420    {
421        ctl->sc_bytes_unacked_retx -= packet_sz;
422        --ctl->sc_n_in_flight_retx;
423    }
424}
425
426
427static void
428send_ctl_sched_Xpend_common (struct lsquic_send_ctl *ctl,
429                      struct lsquic_packet_out *packet_out)
430{
431    packet_out->po_flags |= PO_SCHED;
432    ++ctl->sc_n_scheduled;
433    ctl->sc_bytes_scheduled += lsquic_packet_out_total_sz(packet_out);
434    lsquic_send_ctl_sanity_check(ctl);
435}
436
437
438static void
439send_ctl_sched_append (struct lsquic_send_ctl *ctl,
440                       struct lsquic_packet_out *packet_out)
441{
442    TAILQ_INSERT_TAIL(&ctl->sc_scheduled_packets, packet_out, po_next);
443    send_ctl_sched_Xpend_common(ctl, packet_out);
444}
445
446
447static void
448send_ctl_sched_prepend (struct lsquic_send_ctl *ctl,
449                       struct lsquic_packet_out *packet_out)
450{
451    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, packet_out, po_next);
452    send_ctl_sched_Xpend_common(ctl, packet_out);
453}
454
455
456static void
457send_ctl_sched_remove (struct lsquic_send_ctl *ctl,
458                       struct lsquic_packet_out *packet_out)
459{
460    TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next);
461    packet_out->po_flags &= ~PO_SCHED;
462    assert(ctl->sc_n_scheduled);
463    --ctl->sc_n_scheduled;
464    ctl->sc_bytes_scheduled -= lsquic_packet_out_total_sz(packet_out);
465    lsquic_send_ctl_sanity_check(ctl);
466}
467
468
469int
470lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *ctl,
471                             struct lsquic_packet_out *packet_out, int account)
472{
473    char frames[lsquic_frame_types_str_sz];
474    LSQ_DEBUG("packet %"PRIu64" has been sent (frame types: %s)",
475        packet_out->po_packno, lsquic_frame_types_to_str(frames,
476            sizeof(frames), packet_out->po_frame_types));
477    if (account)
478        ctl->sc_bytes_out -= lsquic_packet_out_total_sz(packet_out);
479    lsquic_senhist_add(&ctl->sc_senhist, packet_out->po_packno);
480    send_ctl_unacked_append(ctl, packet_out);
481    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
482    {
483        if (!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX))
484            set_retx_alarm(ctl);
485        if (ctl->sc_n_in_flight_retx == 1)
486            ctl->sc_flags |= SC_WAS_QUIET;
487    }
488    /* TODO: Do we really want to use those for RTT info? Revisit this. */
489    /* Hold on to packets that are not retransmittable because we need them
490     * to sample RTT information.  They are released when ACK is received.
491     */
492#if LSQUIC_SEND_STATS
493    ++ctl->sc_stats.n_total_sent;
494#endif
495    lsquic_send_ctl_sanity_check(ctl);
496    return 0;
497}
498
499
500static void
501take_rtt_sample (lsquic_send_ctl_t *ctl,
502                 lsquic_time_t now, lsquic_time_t lack_delta)
503{
504    const lsquic_packno_t packno = ctl->sc_largest_acked_packno;
505    const lsquic_time_t sent = ctl->sc_largest_acked_sent_time;
506    const lsquic_time_t measured_rtt = now - sent;
507    if (packno > ctl->sc_max_rtt_packno && lack_delta < measured_rtt)
508    {
509        ctl->sc_max_rtt_packno = packno;
510        lsquic_rtt_stats_update(&ctl->sc_conn_pub->rtt_stats, measured_rtt, lack_delta);
511        LSQ_DEBUG("packno %"PRIu64"; rtt: %"PRIu64"; delta: %"PRIu64"; "
512            "new srtt: %"PRIu64, packno, measured_rtt, lack_delta,
513            lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats));
514    }
515}
516
517
518static void
519send_ctl_release_enc_data (struct lsquic_send_ctl *ctl,
520                                        struct lsquic_packet_out *packet_out)
521{
522    ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx,
523                                            packet_out->po_enc_data);
524    packet_out->po_flags &= ~PO_ENCRYPTED;
525    packet_out->po_enc_data = NULL;
526}
527
528
529/* Returns true if packet was rescheduled, false otherwise.  In the latter
530 * case, you should not dereference packet_out after the function returns.
531 */
532static int
533send_ctl_handle_lost_packet (lsquic_send_ctl_t *ctl,
534                                            lsquic_packet_out_t *packet_out)
535{
536    unsigned packet_sz;
537
538    assert(ctl->sc_n_in_flight_all);
539    packet_sz = lsquic_packet_out_sent_sz(packet_out);
540    send_ctl_unacked_remove(ctl, packet_out, packet_sz);
541    if (packet_out->po_flags & PO_ENCRYPTED)
542        send_ctl_release_enc_data(ctl, packet_out);
543    if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))
544    {
545        ctl->sc_flags |= SC_LOST_ACK;
546        LSQ_DEBUG("lost ACK in packet %"PRIu64, packet_out->po_packno);
547    }
548    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
549    {
550        LSQ_DEBUG("lost retransmittable packet %"PRIu64,
551                                                    packet_out->po_packno);
552        TAILQ_INSERT_TAIL(&ctl->sc_lost_packets, packet_out, po_next);
553        return 1;
554    }
555    else
556    {
557        LSQ_DEBUG("lost unretransmittable packet %"PRIu64,
558                                                    packet_out->po_packno);
559        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
560        return 0;
561    }
562}
563
564
565static lsquic_packno_t
566largest_retx_packet_number (const lsquic_send_ctl_t *ctl)
567{
568    const lsquic_packet_out_t *packet_out;
569    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets,
570                                                lsquic_packets_tailq, po_next)
571    {
572        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
573            return packet_out->po_packno;
574    }
575    return 0;
576}
577
578
579static void
580send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time)
581{
582    lsquic_packet_out_t *packet_out, *next;
583    lsquic_packno_t largest_retx_packno, largest_lost_packno;
584
585    largest_retx_packno = largest_retx_packet_number(ctl);
586    largest_lost_packno = 0;
587    ctl->sc_loss_to = 0;
588
589    for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets);
590            packet_out && packet_out->po_packno <= ctl->sc_largest_acked_packno;
591                packet_out = next)
592    {
593        next = TAILQ_NEXT(packet_out, po_next);
594
595        if (packet_out->po_packno + N_NACKS_BEFORE_RETX <
596                                                ctl->sc_largest_acked_packno)
597        {
598            LSQ_DEBUG("loss by FACK detected, packet %"PRIu64,
599                                                    packet_out->po_packno);
600            largest_lost_packno = packet_out->po_packno;
601            (void) send_ctl_handle_lost_packet(ctl, packet_out);
602            continue;
603        }
604
605        if (largest_retx_packno
606            && (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
607            && largest_retx_packno <= ctl->sc_largest_acked_packno)
608        {
609            LSQ_DEBUG("loss by early retransmit detected, packet %"PRIu64,
610                                                    packet_out->po_packno);
611            largest_lost_packno = packet_out->po_packno;
612            ctl->sc_loss_to =
613                lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats) / 4;
614            LSQ_DEBUG("set sc_loss_to to %"PRIu64", packet %"PRIu64,
615                                    ctl->sc_loss_to, packet_out->po_packno);
616            (void) send_ctl_handle_lost_packet(ctl, packet_out);
617            continue;
618        }
619
620        if (ctl->sc_largest_acked_sent_time > packet_out->po_sent +
621                    lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats))
622        {
623            LSQ_DEBUG("loss by sent time detected: packet %"PRIu64,
624                                                    packet_out->po_packno);
625            if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
626                largest_lost_packno = packet_out->po_packno;
627            else { /* don't count it as a loss */; }
628            (void) send_ctl_handle_lost_packet(ctl, packet_out);
629            continue;
630        }
631    }
632
633    if (largest_lost_packno > ctl->sc_largest_sent_at_cutback)
634    {
635        LSQ_DEBUG("detected new loss: packet %"PRIu64"; new lsac: "
636            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
637        lsquic_cubic_loss(&ctl->sc_cubic);
638        if (ctl->sc_flags & SC_PACE)
639            pacer_loss_event(&ctl->sc_pacer);
640        ctl->sc_largest_sent_at_cutback =
641                                lsquic_senhist_largest(&ctl->sc_senhist);
642    }
643    else if (largest_lost_packno)
644        /* Lost packets whose numbers are smaller than the largest packet
645         * number sent at the time of the last loss event indicate the same
646         * loss event.  This follows NewReno logic, see RFC 6582.
647         */
648        LSQ_DEBUG("ignore loss of packet %"PRIu64" smaller than lsac "
649            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
650}
651
652
653int
654lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl,
655                         const struct ack_info *acki,
656                         lsquic_time_t ack_recv_time)
657{
658    struct lsquic_packets_tailq acked_acks =
659                                    TAILQ_HEAD_INITIALIZER(acked_acks);
660    const struct lsquic_packno_range *range =
661                                    &acki->ranges[ acki->n_ranges - 1 ];
662    lsquic_packet_out_t *packet_out, *next;
663    lsquic_time_t now = 0;
664    lsquic_packno_t smallest_unacked;
665    lsquic_packno_t ack2ed[2];
666    unsigned packet_sz;
667    int app_limited;
668    signed char do_rtt, skip_checks;
669
670    packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets);
671#if __GNUC__
672    __builtin_prefetch(packet_out);
673#endif
674
675#if __GNUC__
676#   define UNLIKELY(cond) __builtin_expect(cond, 0)
677#else
678#   define UNLIKELY(cond) cond
679#endif
680
681#if __GNUC__
682    if (UNLIKELY(LSQ_LOG_ENABLED(LSQ_LOG_DEBUG)))
683#endif
684        LSQ_DEBUG("Got ACK frame, largest acked: %"PRIu64"; delta: %"PRIu64,
685                            largest_acked(acki), acki->lack_delta);
686
687    /* Validate ACK first: */
688    if (UNLIKELY(largest_acked(acki)
689                                > lsquic_senhist_largest(&ctl->sc_senhist)))
690    {
691        LSQ_INFO("at least one packet in ACK range [%"PRIu64" - %"PRIu64"] "
692            "was never sent", acki->ranges[0].low, acki->ranges[0].high);
693        return -1;
694    }
695
696    if (UNLIKELY(ctl->sc_flags & SC_WAS_QUIET))
697    {
698        ctl->sc_flags &= ~SC_WAS_QUIET;
699        LSQ_DEBUG("ACK comes after a period of quiescence");
700        if (!now)
701            now = lsquic_time_now();
702        lsquic_cubic_was_quiet(&ctl->sc_cubic, now);
703    }
704
705    if (UNLIKELY(!packet_out))
706        goto no_unacked_packets;
707
708    smallest_unacked = packet_out->po_packno;
709    ack2ed[1] = 0;
710
711    if (packet_out->po_packno > largest_acked(acki))
712        goto detect_losses;
713
714    do_rtt = 0, skip_checks = 0;
715    app_limited = -1;
716    do
717    {
718        next = TAILQ_NEXT(packet_out, po_next);
719#if __GNUC__
720        __builtin_prefetch(next);
721#endif
722        if (skip_checks)
723            goto after_checks;
724        /* This is faster than binary search in the normal case when the number
725         * of ranges is not much larger than the number of unacked packets.
726         */
727        while (UNLIKELY(range->high < packet_out->po_packno))
728            --range;
729        if (range->low <= packet_out->po_packno)
730        {
731            skip_checks = range == acki->ranges;
732            if (app_limited < 0)
733                app_limited = send_ctl_retx_bytes_out(ctl) + 3 * ctl->sc_pack_size /* This
734                    is the "maximum burst" parameter */
735                    < lsquic_cubic_get_cwnd(&ctl->sc_cubic);
736            if (!now)
737                now = lsquic_time_now();
738  after_checks:
739            packet_sz = lsquic_packet_out_sent_sz(packet_out);
740            ctl->sc_largest_acked_packno    = packet_out->po_packno;
741            ctl->sc_largest_acked_sent_time = packet_out->po_sent;
742            send_ctl_unacked_remove(ctl, packet_out, packet_sz);
743            ack2ed[!!(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))]
744                = packet_out->po_ack2ed;
745            do_rtt |= packet_out->po_packno == largest_acked(acki);
746            lsquic_cubic_ack(&ctl->sc_cubic, now, now - packet_out->po_sent,
747                             app_limited, packet_sz);
748            lsquic_packet_out_ack_streams(packet_out);
749            lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
750        }
751        packet_out = next;
752    }
753    while (packet_out && packet_out->po_packno <= largest_acked(acki));
754
755    if (do_rtt)
756    {
757        take_rtt_sample(ctl, ack_recv_time, acki->lack_delta);
758        ctl->sc_n_consec_rtos = 0;
759        ctl->sc_n_hsk = 0;
760        ctl->sc_n_tlp = 0;
761    }
762
763  detect_losses:
764    send_ctl_detect_losses(ctl, ack_recv_time);
765    if (send_ctl_first_unacked_retx_packet(ctl))
766        set_retx_alarm(ctl);
767    else
768    {
769        LSQ_DEBUG("No retransmittable packets: clear alarm");
770        lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
771    }
772    lsquic_send_ctl_sanity_check(ctl);
773
774    if ((ctl->sc_flags & SC_NSTP) && ack2ed[1] > ctl->sc_largest_ack2ed)
775        ctl->sc_largest_ack2ed = ack2ed[1];
776
777    if (ctl->sc_n_in_flight_retx == 0)
778        ctl->sc_flags |= SC_WAS_QUIET;
779
780  update_n_stop_waiting:
781    if (smallest_unacked > smallest_acked(acki))
782        /* Peer is acking packets that have been acked already.  Schedule ACK
783         * and STOP_WAITING frame to chop the range if we get two of these in
784         * a row.
785         */
786        ++ctl->sc_n_stop_waiting;
787    else
788        ctl->sc_n_stop_waiting = 0;
789    lsquic_send_ctl_sanity_check(ctl);
790    return 0;
791
792  no_unacked_packets:
793    smallest_unacked = lsquic_senhist_largest(&ctl->sc_senhist) + 1;
794    ctl->sc_flags |= SC_WAS_QUIET;
795    goto update_n_stop_waiting;
796}
797
798
799lsquic_packno_t
800lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl)
801{
802    const lsquic_packet_out_t *packet_out;
803
804    /* Packets are always sent out in order (unless we are reordering them
805     * on purpose).  Thus, the first packet on the unacked packets list has
806     * the smallest packet number of all packets on that list.
807     */
808    if ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
809        return packet_out->po_packno;
810    else
811        return lsquic_senhist_largest(&ctl->sc_senhist) + 1;
812}
813
814
815static struct lsquic_packet_out *
816send_ctl_next_lost (lsquic_send_ctl_t *ctl)
817{
818    lsquic_packet_out_t *lost_packet = TAILQ_FIRST(&ctl->sc_lost_packets);
819    if (lost_packet)
820    {
821        TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next);
822        if (lost_packet->po_frame_types & (1 << QUIC_FRAME_STREAM))
823        {
824                lsquic_packet_out_elide_reset_stream_frames(lost_packet, 0);
825        }
826        return lost_packet;
827    }
828    else
829        return NULL;
830}
831
832
833static lsquic_packno_t
834send_ctl_next_packno (lsquic_send_ctl_t *ctl)
835{
836    return ++ctl->sc_cur_packno;
837}
838
839
840void
841lsquic_send_ctl_cleanup (lsquic_send_ctl_t *ctl)
842{
843    lsquic_packet_out_t *packet_out;
844    lsquic_senhist_cleanup(&ctl->sc_senhist);
845    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
846    {
847        send_ctl_sched_remove(ctl, packet_out);
848        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
849    }
850    assert(0 == ctl->sc_n_scheduled);
851    assert(0 == ctl->sc_bytes_scheduled);
852    while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
853    {
854        TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
855        ctl->sc_bytes_unacked_all -= lsquic_packet_out_total_sz(packet_out);
856        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
857        --ctl->sc_n_in_flight_all;
858    }
859    assert(0 == ctl->sc_n_in_flight_all);
860    assert(0 == ctl->sc_bytes_unacked_all);
861    while ((packet_out = TAILQ_FIRST(&ctl->sc_lost_packets)))
862    {
863        TAILQ_REMOVE(&ctl->sc_lost_packets, packet_out, po_next);
864        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
865    }
866    pacer_cleanup(&ctl->sc_pacer);
867#if LSQUIC_SEND_STATS
868    LSQ_NOTICE("stats: n_total_sent: %u; n_resent: %u; n_delayed: %u",
869        ctl->sc_stats.n_total_sent, ctl->sc_stats.n_resent,
870        ctl->sc_stats.n_delayed);
871#endif
872}
873
874
875static unsigned
876send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl)
877{
878    return ctl->sc_bytes_scheduled
879         + ctl->sc_bytes_unacked_retx
880         + ctl->sc_bytes_out;
881}
882
883
884int
885lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *ctl)
886{
887    return (ctl->sc_flags & SC_PACE)
888        && !pacer_can_schedule(&ctl->sc_pacer,
889                               ctl->sc_n_scheduled + ctl->sc_n_in_flight_all);
890}
891
892
893#ifndef NDEBUG
894__attribute__((weak))
895#endif
896int
897lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl)
898{
899    const unsigned n_out = send_ctl_retx_bytes_out(ctl);
900    LSQ_DEBUG("%s: n_out: %u (unacked_retx: %u, out: %u); cwnd: %lu", __func__,
901        n_out, ctl->sc_bytes_unacked_retx, ctl->sc_bytes_out,
902        lsquic_cubic_get_cwnd(&ctl->sc_cubic));
903    if (ctl->sc_flags & SC_PACE)
904    {
905        if (n_out >= lsquic_cubic_get_cwnd(&ctl->sc_cubic))
906            return 0;
907        if (pacer_can_schedule(&ctl->sc_pacer,
908                               ctl->sc_n_scheduled + ctl->sc_n_in_flight_all))
909            return 1;
910        if (ctl->sc_flags & SC_SCHED_TICK)
911        {
912            ctl->sc_flags &= ~SC_SCHED_TICK;
913            lsquic_engine_add_conn_to_attq(ctl->sc_enpub,
914                    ctl->sc_conn_pub->lconn, pacer_next_sched(&ctl->sc_pacer));
915        }
916        return 0;
917    }
918    else
919        return n_out < lsquic_cubic_get_cwnd(&ctl->sc_cubic);
920}
921
922
923static void
924send_ctl_expire (lsquic_send_ctl_t *ctl, enum expire_filter filter)
925{
926    lsquic_packet_out_t *packet_out, *next;
927    int n_resubmitted;
928    static const char *const filter_type2str[] = {
929        [EXFI_ALL] = "all",
930        [EXFI_HSK] = "handshake",
931        [EXFI_LAST] = "last",
932    };
933
934    switch (filter)
935    {
936    case EXFI_ALL:
937        n_resubmitted = 0;
938        while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
939            n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out);
940        break;
941    case EXFI_HSK:
942        n_resubmitted = 0;
943        for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); packet_out;
944                                                            packet_out = next)
945        {
946            next = TAILQ_NEXT(packet_out, po_next);
947            if (packet_out->po_flags & PO_HELLO)
948                n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out);
949        }
950        break;
951    case EXFI_LAST:
952        packet_out = send_ctl_last_unacked_retx_packet(ctl);
953        if (packet_out)
954            n_resubmitted = send_ctl_handle_lost_packet(ctl, packet_out);
955        else
956            n_resubmitted = 0;
957        break;
958    }
959
960    LSQ_DEBUG("consider %s packets lost: %d resubmitted",
961                                    filter_type2str[filter], n_resubmitted);
962}
963
964
965void
966lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl)
967{
968    lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
969    send_ctl_expire(ctl, EXFI_ALL);
970    lsquic_send_ctl_sanity_check(ctl);
971}
972
973
974#if LSQUIC_EXTRA_CHECKS
975void
976lsquic_send_ctl_sanity_check (const lsquic_send_ctl_t *ctl)
977{
978    const struct lsquic_packet_out *packet_out;
979    unsigned count, bytes;
980
981    assert(!send_ctl_first_unacked_retx_packet(ctl) ||
982                    lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX));
983    if (lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX))
984    {
985        assert(send_ctl_first_unacked_retx_packet(ctl));
986        assert(lsquic_time_now() < ctl->sc_alset->as_expiry[AL_RETX] + MAX_RTO_DELAY);
987    }
988
989    count = 0, bytes = 0;
990    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
991    {
992        bytes += lsquic_packet_out_sent_sz(packet_out);
993        ++count;
994    }
995    assert(count == ctl->sc_n_in_flight_all);
996    assert(bytes == ctl->sc_bytes_unacked_all);
997
998    count = 0, bytes = 0;
999    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1000    {
1001        assert(packet_out->po_flags & PO_SCHED);
1002        bytes += lsquic_packet_out_total_sz(packet_out);
1003        ++count;
1004    }
1005    assert(count == ctl->sc_n_scheduled);
1006    assert(bytes == ctl->sc_bytes_scheduled);
1007}
1008
1009
1010#endif
1011
1012
1013void
1014lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *ctl,
1015                                            lsquic_packet_out_t *packet_out)
1016{
1017#ifndef NDEBUG
1018    const lsquic_packet_out_t *last;
1019    last = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
1020    if (last)
1021        assert((last->po_flags & PO_REPACKNO) ||
1022                last->po_packno < packet_out->po_packno);
1023#endif
1024    if (ctl->sc_flags & SC_PACE)
1025    {
1026        unsigned n_out = ctl->sc_n_in_flight_retx + ctl->sc_n_scheduled;
1027        pacer_packet_scheduled(&ctl->sc_pacer, n_out,
1028            send_ctl_in_recovery(ctl), send_ctl_transfer_time, ctl);
1029    }
1030    send_ctl_sched_append(ctl, packet_out);
1031}
1032
1033
1034lsquic_packet_out_t *
1035lsquic_send_ctl_next_packet_to_send (lsquic_send_ctl_t *ctl)
1036{
1037    lsquic_packet_out_t *packet_out;
1038
1039    packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets);
1040    if (!packet_out)
1041        return NULL;
1042
1043    if (ctl->sc_n_consec_rtos &&
1044                    !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)))
1045    {
1046        if (ctl->sc_next_limit)
1047            --ctl->sc_next_limit;
1048        else
1049            return NULL;
1050    }
1051
1052    if (packet_out->po_flags & PO_REPACKNO)
1053    {
1054        update_for_resending(ctl, packet_out);
1055        packet_out->po_flags &= ~PO_REPACKNO;
1056    }
1057
1058    send_ctl_sched_remove(ctl, packet_out);
1059    ctl->sc_bytes_out += lsquic_packet_out_total_sz(packet_out);
1060    return packet_out;
1061}
1062
1063
1064void
1065lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *ctl,
1066                                            lsquic_packet_out_t *packet_out)
1067{
1068    send_ctl_sched_prepend(ctl, packet_out);
1069    ctl->sc_bytes_out -= lsquic_packet_out_total_sz(packet_out);
1070    LSQ_DEBUG("packet %"PRIu64" has been delayed", packet_out->po_packno);
1071#if LSQUIC_SEND_STATS
1072    ++ctl->sc_stats.n_delayed;
1073#endif
1074}
1075
1076
1077int
1078lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *ctl)
1079{
1080    const lsquic_packet_out_t *packet_out;
1081    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1082        if (packet_out->po_frame_types &
1083                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
1084            return 1;
1085    return 0;
1086}
1087
1088
1089int
1090lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *ctl)
1091{
1092    const lsquic_packet_out_t *packet_out;
1093    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1094        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
1095            return 1;
1096    return 0;
1097}
1098
1099
1100static lsquic_packet_out_t *
1101send_ctl_allocate_packet (lsquic_send_ctl_t *ctl, enum lsquic_packno_bits bits,
1102                                                        unsigned need_at_least)
1103{
1104    lsquic_packet_out_t *packet_out;
1105
1106    packet_out = lsquic_packet_out_new(&ctl->sc_enpub->enp_mm,
1107                    ctl->sc_conn_pub->packet_out_malo,
1108                    !(ctl->sc_flags & SC_TCID0), ctl->sc_pack_size, bits,
1109                    ctl->sc_ver_neg->vn_tag, NULL);
1110    if (!packet_out)
1111        return NULL;
1112
1113    if (need_at_least && lsquic_packet_out_avail(packet_out) < need_at_least)
1114    {   /* This should never happen, this is why this check is performed at
1115         * this level and not lower, before the packet is actually allocated.
1116         */
1117        LSQ_ERROR("wanted to allocate packet with at least %u bytes of "
1118            "payload, but only got %u bytes (mtu: %u bytes)", need_at_least,
1119            lsquic_packet_out_avail(packet_out), ctl->sc_pack_size);
1120        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1121        return NULL;
1122    }
1123
1124    return packet_out;
1125}
1126
1127
1128lsquic_packet_out_t *
1129lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *ctl, unsigned need_at_least)
1130{
1131    lsquic_packet_out_t *packet_out;
1132    enum lsquic_packno_bits bits;
1133
1134    bits = lsquic_send_ctl_packno_bits(ctl);
1135    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least);
1136    if (!packet_out)
1137        return NULL;
1138
1139    packet_out->po_packno = send_ctl_next_packno(ctl);
1140    LSQ_DEBUG("created packet %"PRIu64, packet_out->po_packno);
1141    EV_LOG_PACKET_CREATED(LSQUIC_LOG_CONN_ID, packet_out);
1142    return packet_out;
1143}
1144
1145
1146/* Do not use for STREAM frames
1147 */
1148lsquic_packet_out_t *
1149lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *ctl,
1150                                      unsigned need_at_least, int *is_err)
1151{
1152    lsquic_packet_out_t *packet_out;
1153
1154    assert(need_at_least > 0);
1155
1156    packet_out = lsquic_send_ctl_last_scheduled(ctl);
1157    if (packet_out
1158        && !(packet_out->po_flags & PO_STREAM_END)
1159        && lsquic_packet_out_avail(packet_out) >= need_at_least)
1160    {
1161        return packet_out;
1162    }
1163
1164    if (!lsquic_send_ctl_can_send(ctl))
1165    {
1166        *is_err = 0;
1167        return NULL;
1168    }
1169
1170    packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least);
1171    if (packet_out)
1172        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1173    else
1174        *is_err = 1;
1175    return packet_out;
1176}
1177
1178
1179static lsquic_packet_out_t *
1180send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl,
1181                      unsigned need_at_least, const lsquic_stream_t *stream)
1182{
1183    lsquic_packet_out_t *packet_out;
1184
1185    assert(need_at_least > 0);
1186
1187    packet_out = lsquic_send_ctl_last_scheduled(ctl);
1188    if (packet_out
1189        && !(packet_out->po_flags & PO_STREAM_END)
1190        && lsquic_packet_out_avail(packet_out) >= need_at_least
1191        && !lsquic_packet_out_has_frame(packet_out, stream, QUIC_FRAME_STREAM))
1192    {
1193        return packet_out;
1194    }
1195
1196    if (!lsquic_send_ctl_can_send(ctl))
1197        return NULL;
1198
1199    packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least);
1200    if (!packet_out)
1201        return NULL;
1202
1203    lsquic_send_ctl_scheduled_one(ctl, packet_out);
1204    return packet_out;
1205}
1206
1207
1208static void
1209update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out)
1210{
1211
1212    lsquic_packno_t oldno, packno;
1213
1214    /* When the packet is resent, it uses the same number of bytes to encode
1215     * the packet number as the original packet.  This follows the reference
1216     * implementation.
1217     */
1218    oldno = packet_out->po_packno;
1219    packno = send_ctl_next_packno(ctl);
1220
1221    packet_out->po_flags &= ~PO_SENT_SZ;
1222    packet_out->po_frame_types &= ~QFRAME_REGEN_MASK;
1223    assert(packet_out->po_frame_types);
1224    packet_out->po_packno = packno;
1225
1226    if (ctl->sc_ver_neg->vn_tag)
1227    {
1228        assert(packet_out->po_flags & PO_VERSION);  /* It can only disappear */
1229        packet_out->po_ver_tag = *ctl->sc_ver_neg->vn_tag;
1230    }
1231
1232    assert(packet_out->po_regen_sz < packet_out->po_data_sz);
1233    if (packet_out->po_regen_sz)
1234    {
1235        assert(!(packet_out->po_flags & PO_SCHED));
1236        lsquic_packet_out_chop_regen(packet_out);
1237    }
1238    LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64,
1239                                                            oldno, packno);
1240    EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for "
1241        "resending as packet %"PRIu64, oldno, packno);
1242}
1243
1244
1245unsigned
1246lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *ctl)
1247{
1248    lsquic_packet_out_t *packet_out;
1249    unsigned n = 0;
1250
1251    while (lsquic_send_ctl_can_send(ctl) &&
1252                                (packet_out = send_ctl_next_lost(ctl)))
1253    {
1254        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1255        {
1256            ++n;
1257            update_for_resending(ctl, packet_out);
1258            lsquic_send_ctl_scheduled_one(ctl, packet_out);
1259        }
1260        else
1261        {
1262            LSQ_DEBUG("Dropping packet %"PRIu64" from unacked queue",
1263                packet_out->po_packno);
1264            lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1265        }
1266    }
1267
1268    if (n)
1269        LSQ_DEBUG("rescheduled %u packets", n);
1270
1271    return n;
1272}
1273
1274
1275void
1276lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *ctl, int tcid0)
1277{
1278    if (tcid0)
1279    {
1280        LSQ_INFO("set TCID flag");
1281        ctl->sc_flags |=  SC_TCID0;
1282    }
1283    else
1284    {
1285        LSQ_INFO("unset TCID flag");
1286        ctl->sc_flags &= ~SC_TCID0;
1287    }
1288}
1289
1290
1291/* The controller elides this STREAM frames of stream `stream_id' from
1292 * scheduled and buffered packets.  If a packet becomes empty as a result,
1293 * it is dropped.
1294 *
1295 * Packets on other queues do not need to be processed: unacked packets
1296 * have already been sent, and lost packets' reset stream frames will be
1297 * elided in due time.
1298 */
1299void
1300lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *ctl, uint32_t stream_id)
1301{
1302    struct lsquic_packet_out *packet_out, *next;
1303    struct lsquic_packet_out *pre_dropped;
1304    unsigned n, adj;
1305
1306    pre_dropped = NULL;
1307    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
1308                                                            packet_out = next)
1309    {
1310        next = TAILQ_NEXT(packet_out, po_next);
1311
1312        if ((packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM))
1313                                                                   )
1314        {
1315            adj = lsquic_packet_out_elide_reset_stream_frames(packet_out,
1316                                                              stream_id);
1317            ctl->sc_bytes_scheduled -= adj;
1318            if (0 == packet_out->po_frame_types)
1319            {
1320                if (!pre_dropped)
1321                    pre_dropped = TAILQ_PREV(packet_out, lsquic_packets_tailq,
1322                                                                    po_next);
1323                LSQ_DEBUG("cancel packet %"PRIu64" after eliding frames for "
1324                    "stream %"PRIu32, packet_out->po_packno, stream_id);
1325                send_ctl_sched_remove(ctl, packet_out);
1326                lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1327            }
1328        }
1329    }
1330
1331    /* Need to assign new packet numbers to all packets following the first
1332     * dropped packet to eliminate packet number gap.
1333     */
1334    if (pre_dropped)
1335    {
1336        ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
1337        for (packet_out = TAILQ_NEXT(pre_dropped, po_next); packet_out;
1338                                packet_out = TAILQ_NEXT(packet_out, po_next))
1339        {
1340            packet_out->po_flags |= PO_REPACKNO;
1341            if (packet_out->po_flags & PO_ENCRYPTED)
1342                send_ctl_release_enc_data(ctl, packet_out);
1343        }
1344    }
1345
1346    for (n = 0; n < sizeof(ctl->sc_buffered_packets) /
1347                                sizeof(ctl->sc_buffered_packets[0]); ++n)
1348    {
1349        for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets);
1350                                                packet_out; packet_out = next)
1351        {
1352            if (!(packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM)))
1353                continue;
1354            next = TAILQ_NEXT(packet_out, po_next);
1355            lsquic_packet_out_elide_reset_stream_frames(packet_out, stream_id);
1356            if (0 == packet_out->po_frame_types)
1357            {
1358                LSQ_DEBUG("cancel buffered packet in queue #%u after eliding "
1359                    "frames for stream %"PRIu32, n, stream_id);
1360                TAILQ_REMOVE(&ctl->sc_buffered_packets[n].bpq_packets,
1361                             packet_out, po_next);
1362                --ctl->sc_buffered_packets[n].bpq_count;
1363                lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1364                LSQ_DEBUG("Elide packet from buffered queue #%u; count: %u",
1365                          n, ctl->sc_buffered_packets[n].bpq_count);
1366            }
1367        }
1368    }
1369}
1370
1371
1372/* Count how many packets will remain after the squeezing performed by
1373 * lsquic_send_ctl_squeeze_sched().  This is the number of delayed data
1374 * packets.
1375 */
1376#ifndef NDEBUG
1377__attribute__((weak))
1378#endif
1379int
1380lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl)
1381{
1382    const struct lsquic_packet_out *packet_out;
1383    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1384        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1385            return 1;
1386    return 0;
1387}
1388
1389
1390#ifndef NDEBUG
1391static void
1392send_ctl_log_packet_q (const lsquic_send_ctl_t *ctl, const char *prefix,
1393                                const struct lsquic_packets_tailq *tailq)
1394{
1395    const lsquic_packet_out_t *packet_out;
1396    unsigned n_packets;
1397    char *buf;
1398    size_t bufsz;
1399    int off;
1400
1401    n_packets = 0;
1402    TAILQ_FOREACH(packet_out, tailq, po_next)
1403        ++n_packets;
1404
1405    if (n_packets == 0)
1406    {
1407        LSQ_DEBUG("%s: [<empty set>]", prefix);
1408        return;
1409    }
1410
1411    bufsz = n_packets * sizeof("18446744073709551615" /* UINT64_MAX */);
1412    buf = malloc(bufsz);
1413    if (!buf)
1414    {
1415        LSQ_ERROR("%s: malloc: %s", __func__, strerror(errno));
1416        return;
1417    }
1418
1419    off = 0;
1420    TAILQ_FOREACH(packet_out, tailq, po_next)
1421    {
1422        if (off)
1423            buf[off++] = ' ';
1424        off += sprintf(buf + off, "%"PRIu64, packet_out->po_packno);
1425    }
1426
1427    LSQ_DEBUG("%s: [%s]", prefix, buf);
1428    free(buf);
1429}
1430
1431
1432#define LOG_PACKET_Q(prefix, queue) do {                                    \
1433    if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG))                                     \
1434        send_ctl_log_packet_q(ctl, queue, prefix);                          \
1435} while (0)
1436#else
1437#define LOG_PACKET_Q(p, q)
1438#endif
1439
1440
1441int
1442lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl)
1443{
1444    struct lsquic_packet_out *packet_out, *next;
1445#ifndef NDEBUG
1446    int pre_squeeze_logged = 0;
1447#endif
1448
1449    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
1450                                                            packet_out = next)
1451    {
1452        next = TAILQ_NEXT(packet_out, po_next);
1453        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1454        {
1455            if (packet_out->po_flags & PO_ENCRYPTED)
1456                send_ctl_release_enc_data(ctl, packet_out);
1457        }
1458        else
1459        {
1460#ifndef NDEBUG
1461            /* Log the whole list before we squeeze for the first time */
1462            if (!pre_squeeze_logged++)
1463                LOG_PACKET_Q(&ctl->sc_scheduled_packets,
1464                                        "unacked packets before squeezing");
1465#endif
1466            send_ctl_sched_remove(ctl, packet_out);
1467            LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue",
1468                packet_out->po_packno);
1469            lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1470        }
1471    }
1472
1473#ifndef NDEBUG
1474    if (pre_squeeze_logged)
1475        LOG_PACKET_Q(&ctl->sc_scheduled_packets,
1476                                        "unacked packets after squeezing");
1477    else if (ctl->sc_n_scheduled > 0)
1478        LOG_PACKET_Q(&ctl->sc_scheduled_packets, "delayed packets");
1479#endif
1480
1481    return ctl->sc_n_scheduled > 0;
1482}
1483
1484
1485void
1486lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *ctl)
1487{
1488    struct lsquic_packet_out *packet_out;
1489
1490    assert(ctl->sc_n_scheduled > 0);    /* Otherwise, why is this called? */
1491    ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
1492    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1493        packet_out->po_flags |= PO_REPACKNO;
1494}
1495
1496
1497void
1498lsquic_send_ctl_ack_to_front (lsquic_send_ctl_t *ctl)
1499{
1500    struct lsquic_packet_out *ack_packet;
1501
1502    assert(ctl->sc_n_scheduled > 1);    /* Otherwise, why is this called? */
1503    ack_packet = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
1504    assert(ack_packet->po_frame_types & (1 << QUIC_FRAME_ACK));
1505    TAILQ_REMOVE(&ctl->sc_scheduled_packets, ack_packet, po_next);
1506    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, ack_packet, po_next);
1507}
1508
1509
1510void
1511lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *ctl)
1512{
1513    lsquic_packet_out_t *packet_out;
1514    const unsigned n = ctl->sc_n_scheduled;
1515    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
1516    {
1517        send_ctl_sched_remove(ctl, packet_out);
1518        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1519    }
1520    assert(0 == ctl->sc_n_scheduled);
1521    ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
1522    LSQ_DEBUG("dropped %u scheduled packet%s", n, n != 0 ? "s" : "");
1523}
1524
1525
1526#ifdef NDEBUG
1527static
1528#elif __GNUC__
1529__attribute__((weak))
1530#endif
1531enum buf_packet_type
1532lsquic_send_ctl_determine_bpt (lsquic_send_ctl_t *ctl,
1533                                            const lsquic_stream_t *stream)
1534{
1535    const lsquic_stream_t *other_stream;
1536    struct lsquic_hash_elem *el;
1537    struct lsquic_hash *all_streams;
1538
1539    all_streams = ctl->sc_conn_pub->all_streams;
1540    for (el = lsquic_hash_first(all_streams); el;
1541                                     el = lsquic_hash_next(all_streams))
1542    {
1543        other_stream = lsquic_hashelem_getdata(el);
1544        if (other_stream != stream
1545              && (!(other_stream->stream_flags & STREAM_U_WRITE_DONE))
1546                && !lsquic_stream_is_critical(other_stream)
1547                  && other_stream->sm_priority < stream->sm_priority)
1548            return BPT_OTHER_PRIO;
1549    }
1550    return BPT_HIGHEST_PRIO;
1551}
1552
1553
1554static enum buf_packet_type
1555send_ctl_lookup_bpt (lsquic_send_ctl_t *ctl,
1556                                        const struct lsquic_stream *stream)
1557{
1558    if (ctl->sc_cached_bpt.stream_id != stream->id)
1559    {
1560        ctl->sc_cached_bpt.stream_id = stream->id;
1561        ctl->sc_cached_bpt.packet_type =
1562                                lsquic_send_ctl_determine_bpt(ctl, stream);
1563    }
1564    return ctl->sc_cached_bpt.packet_type;
1565}
1566
1567
1568static unsigned
1569send_ctl_max_bpq_count (const lsquic_send_ctl_t *ctl,
1570                                        enum buf_packet_type packet_type)
1571{
1572    unsigned count;
1573
1574    switch (packet_type)
1575    {
1576    case BPT_OTHER_PRIO:
1577        return MAX_BPQ_COUNT;
1578    case BPT_HIGHEST_PRIO:
1579    default: /* clang does not complain about absence of `default'... */
1580        count = ctl->sc_n_scheduled + ctl->sc_n_in_flight_retx;
1581        if (count < lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size)
1582        {
1583            count -= lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size;
1584            if (count > MAX_BPQ_COUNT)
1585                return count;
1586        }
1587        return MAX_BPQ_COUNT;
1588    }
1589}
1590
1591
1592static lsquic_packet_out_t *
1593send_ctl_get_buffered_packet (lsquic_send_ctl_t *ctl,
1594                enum buf_packet_type packet_type, unsigned need_at_least,
1595                                        const struct lsquic_stream *stream)
1596{
1597    struct buf_packet_q *const packet_q =
1598                                    &ctl->sc_buffered_packets[packet_type];
1599    lsquic_packet_out_t *packet_out;
1600    enum lsquic_packno_bits bits;
1601
1602    packet_out = TAILQ_LAST(&packet_q->bpq_packets, lsquic_packets_tailq);
1603    if (packet_out
1604        && !(packet_out->po_flags & PO_STREAM_END)
1605        && lsquic_packet_out_avail(packet_out) >= need_at_least
1606        && !lsquic_packet_out_has_frame(packet_out, stream, QUIC_FRAME_STREAM))
1607    {
1608        return packet_out;
1609    }
1610
1611    if (packet_q->bpq_count >= send_ctl_max_bpq_count(ctl, packet_type))
1612        return NULL;
1613
1614    bits = lsquic_send_ctl_guess_packno_bits(ctl);
1615    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least);
1616    if (!packet_out)
1617        return NULL;
1618
1619    TAILQ_INSERT_TAIL(&packet_q->bpq_packets, packet_out, po_next);
1620    ++packet_q->bpq_count;
1621    LSQ_DEBUG("Add new packet to buffered queue #%u; count: %u",
1622              packet_type, packet_q->bpq_count);
1623    return packet_out;
1624}
1625
1626
1627lsquic_packet_out_t *
1628lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl,
1629                unsigned need_at_least, const struct lsquic_stream *stream)
1630{
1631    enum buf_packet_type packet_type;
1632
1633    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
1634        return send_ctl_get_packet_for_stream(ctl, need_at_least, stream);
1635    else
1636    {
1637        packet_type = send_ctl_lookup_bpt(ctl, stream);
1638        return send_ctl_get_buffered_packet(ctl, packet_type, need_at_least,
1639                                            stream);
1640    }
1641}
1642
1643
1644#ifdef NDEBUG
1645static
1646#elif __GNUC__
1647__attribute__((weak))
1648#endif
1649enum lsquic_packno_bits
1650lsquic_send_ctl_calc_packno_bits (lsquic_send_ctl_t *ctl)
1651{
1652    lsquic_packno_t smallest_unacked;
1653    unsigned n_in_flight;
1654
1655    smallest_unacked = lsquic_send_ctl_smallest_unacked(ctl);
1656    n_in_flight = lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size;
1657    return calc_packno_bits(ctl->sc_cur_packno + 1, smallest_unacked,
1658                                                            n_in_flight);
1659}
1660
1661
1662enum lsquic_packno_bits
1663lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *ctl)
1664{
1665
1666    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
1667        return lsquic_send_ctl_calc_packno_bits(ctl);
1668    else
1669        return lsquic_send_ctl_guess_packno_bits(ctl);
1670}
1671
1672
1673static int
1674split_buffered_packet (lsquic_send_ctl_t *ctl,
1675        enum buf_packet_type packet_type, lsquic_packet_out_t *packet_out,
1676        enum lsquic_packno_bits bits, unsigned excess_bytes)
1677{
1678    struct buf_packet_q *const packet_q =
1679                                    &ctl->sc_buffered_packets[packet_type];
1680    lsquic_packet_out_t *new_packet_out;
1681
1682    assert(TAILQ_FIRST(&packet_q->bpq_packets) == packet_out);
1683
1684    new_packet_out = send_ctl_allocate_packet(ctl, bits, 0);
1685    if (!packet_out)
1686        return -1;
1687
1688    if (0 == lsquic_packet_out_split_in_two(&ctl->sc_enpub->enp_mm, packet_out,
1689                  new_packet_out, ctl->sc_conn_pub->lconn->cn_pf, excess_bytes))
1690    {
1691        lsquic_packet_out_set_packno_bits(packet_out, bits);
1692        TAILQ_INSERT_AFTER(&packet_q->bpq_packets, packet_out, new_packet_out,
1693                           po_next);
1694        ++packet_q->bpq_count;
1695        LSQ_DEBUG("Add split packet to buffered queue #%u; count: %u",
1696                  packet_type, packet_q->bpq_count);
1697        return 0;
1698    }
1699    else
1700    {
1701        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1702        return -1;
1703    }
1704}
1705
1706
1707int
1708lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *ctl,
1709                                            enum buf_packet_type packet_type)
1710{
1711    struct buf_packet_q *const packet_q =
1712                                    &ctl->sc_buffered_packets[packet_type];
1713    lsquic_packet_out_t *packet_out;
1714    unsigned used, excess;
1715
1716    assert(lsquic_send_ctl_schedule_stream_packets_immediately(ctl));
1717    const enum lsquic_packno_bits bits = lsquic_send_ctl_calc_packno_bits(ctl);
1718    const unsigned need = packno_bits2len(bits);
1719
1720    while ((packet_out = TAILQ_FIRST(&packet_q->bpq_packets)) &&
1721                                            lsquic_send_ctl_can_send(ctl))
1722    {
1723        if (bits != lsquic_packet_out_packno_bits(packet_out))
1724        {
1725            used = packno_bits2len(lsquic_packet_out_packno_bits(packet_out));
1726            if (need > used
1727                && need - used > lsquic_packet_out_avail(packet_out))
1728            {
1729                excess = need - used - lsquic_packet_out_avail(packet_out);
1730                if (0 != split_buffered_packet(ctl, packet_type,
1731                                               packet_out, bits, excess))
1732                {
1733                    return -1;
1734                }
1735            }
1736        }
1737        TAILQ_REMOVE(&packet_q->bpq_packets, packet_out, po_next);
1738        --packet_q->bpq_count;
1739        LSQ_DEBUG("Remove packet from buffered queue #%u; count: %u",
1740                  packet_type, packet_q->bpq_count);
1741        packet_out->po_packno = send_ctl_next_packno(ctl);
1742        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1743    }
1744
1745    return 0;
1746}
1747
1748
1749int
1750lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *ctl,
1751                             const struct lsquic_stream *stream)
1752{
1753    enum buf_packet_type packet_type;
1754    struct buf_packet_q *packet_q;
1755    lsquic_packet_out_t *packet_out;
1756    const struct parse_funcs *pf;
1757
1758    pf = ctl->sc_conn_pub->lconn->cn_pf;
1759    packet_type = send_ctl_lookup_bpt(ctl, stream);
1760    packet_q = &ctl->sc_buffered_packets[packet_type];
1761
1762    TAILQ_FOREACH_REVERSE(packet_out, &packet_q->bpq_packets,
1763                          lsquic_packets_tailq, po_next)
1764        if (0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
1765            return 0;
1766
1767    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1768        if (0 == packet_out->po_sent
1769            && 0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
1770        {
1771            return 0;
1772        }
1773
1774    return -1;
1775}
1776
1777
1778size_t
1779lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *ctl)
1780{
1781    const lsquic_packet_out_t *packet_out;
1782    unsigned n;
1783    size_t size;
1784    const struct lsquic_packets_tailq queues[] = {
1785        ctl->sc_scheduled_packets,
1786        ctl->sc_unacked_packets,
1787        ctl->sc_lost_packets,
1788        ctl->sc_buffered_packets[0].bpq_packets,
1789        ctl->sc_buffered_packets[1].bpq_packets,
1790    };
1791
1792    size = sizeof(*ctl);
1793
1794    for (n = 0; n < sizeof(queues) / sizeof(queues[0]); ++n)
1795        TAILQ_FOREACH(packet_out, &queues[n], po_next)
1796            size += lsquic_packet_out_mem_used(packet_out);
1797
1798    return size;
1799}
1800
1801
1802