lsquic_send_ctl.c revision 461e84d8
1/* Copyright (c) 2017 LiteSpeed Technologies Inc.  See LICENSE. */
2/*
3 * lsquic_send_ctl.c -- Logic for sending and sent packets
4 */
5
6#include <assert.h>
7#include <errno.h>
8#include <inttypes.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/queue.h>
12
13#include "lsquic_types.h"
14#include "lsquic_int_types.h"
15#include "lsquic.h"
16#include "lsquic_mm.h"
17#include "lsquic_engine_public.h"
18#include "lsquic_alarmset.h"
19#include "lsquic_packet_common.h"
20#include "lsquic_parse.h"
21#include "lsquic_packet_out.h"
22#include "lsquic_senhist.h"
23#include "lsquic_rtt.h"
24#include "lsquic_cubic.h"
25#include "lsquic_pacer.h"
26#include "lsquic_send_ctl.h"
27#include "lsquic_util.h"
28#include "lsquic_sfcw.h"
29#include "lsquic_stream.h"
30#include "lsquic_ver_neg.h"
31#include "lsquic_ev_log.h"
32#include "lsquic_conn.h"
33#include "lsquic_conn_flow.h"
34#include "lsquic_conn_public.h"
35#include "lsquic_hash.h"
36
37#define LSQUIC_LOGGER_MODULE LSQLM_SENDCTL
38#define LSQUIC_LOG_CONN_ID ctl->sc_conn_pub->lconn->cn_cid
39#include "lsquic_logger.h"
40
41#define MAX_RESUBMITTED_ON_RTO  2
42#define MAX_RTO_BACKOFFS        10
43#define DEFAULT_RETX_DELAY      500000      /* Microseconds */
44#define MAX_RTO_DELAY           60000000    /* Microseconds */
45#define MIN_RTO_DELAY           1000000      /* Microseconds */
46#define N_NACKS_BEFORE_RETX     3
47
48
49enum retx_mode {
50    RETX_MODE_HANDSHAKE,
51    RETX_MODE_LOSS,
52    RETX_MODE_TLP,
53    RETX_MODE_RTO,
54};
55
56
57static const char *const retx2str[] = {
58    [RETX_MODE_HANDSHAKE] = "RETX_MODE_HANDSHAKE",
59    [RETX_MODE_LOSS]      = "RETX_MODE_LOSS",
60    [RETX_MODE_TLP]       = "RETX_MODE_TLP",
61    [RETX_MODE_RTO]       = "RETX_MODE_RTO",
62};
63
64
65static void
66update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out);
67
68
69enum expire_filter { EXFI_ALL, EXFI_HSK, EXFI_LAST, };
70
71
72static void
73send_ctl_expire (lsquic_send_ctl_t *, enum expire_filter);
74
75static void
76set_retx_alarm (lsquic_send_ctl_t *ctl);
77
78static void
79send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time);
80
81static unsigned
82send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl);
83
84
85#ifdef NDEBUG
86static
87#elif __GNUC__
88__attribute__((weak))
89#endif
90int
91lsquic_send_ctl_schedule_stream_packets_immediately (lsquic_send_ctl_t *ctl)
92{
93    return !(ctl->sc_flags & SC_BUFFER_STREAM);
94}
95
96
97#ifdef NDEBUG
98static
99#elif __GNUC__
100__attribute__((weak))
101#endif
102enum lsquic_packno_bits
103lsquic_send_ctl_guess_packno_bits (lsquic_send_ctl_t *ctl)
104{
105    return PACKNO_LEN_2;
106}
107
108
109int
110lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *ctl)
111{
112    const lsquic_packet_out_t *packet_out;
113    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
114        if (packet_out->po_frame_types &
115                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
116            return 1;
117    return 0;
118}
119
120
121static lsquic_packet_out_t *
122send_ctl_first_unacked_retx_packet (const lsquic_send_ctl_t *ctl)
123{
124    lsquic_packet_out_t *packet_out;
125    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
126        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
127            return packet_out;
128    return NULL;
129}
130
131
132static lsquic_packet_out_t *
133send_ctl_last_unacked_retx_packet (const lsquic_send_ctl_t *ctl)
134{
135    lsquic_packet_out_t *packet_out;
136    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets,
137                                            lsquic_packets_tailq, po_next)
138        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
139            return packet_out;
140    return NULL;
141}
142
143
144static int
145have_unacked_handshake_packets (const lsquic_send_ctl_t *ctl)
146{
147    const lsquic_packet_out_t *packet_out;
148    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
149        if (packet_out->po_flags & PO_HELLO)
150            return 1;
151    return 0;
152}
153
154
155static enum retx_mode
156get_retx_mode (lsquic_send_ctl_t *ctl)
157{
158    if (!(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE)
159                                    && have_unacked_handshake_packets(ctl))
160        return RETX_MODE_HANDSHAKE;
161    if (ctl->sc_loss_to)
162        return RETX_MODE_LOSS;
163    if (ctl->sc_n_tlp < 2)
164        return RETX_MODE_TLP;
165    return RETX_MODE_RTO;
166}
167
168
169static lsquic_time_t
170get_retx_delay (const struct lsquic_rtt_stats *rtt_stats)
171{
172    lsquic_time_t srtt, delay;
173
174    srtt = lsquic_rtt_stats_get_srtt(rtt_stats);
175    if (srtt)
176    {
177        delay = srtt + 4 * lsquic_rtt_stats_get_rttvar(rtt_stats);
178        if (delay < MIN_RTO_DELAY)
179            delay = MIN_RTO_DELAY;
180    }
181    else
182        delay = DEFAULT_RETX_DELAY;
183
184    return delay;
185}
186
187
188static void
189retx_alarm_rings (void *ctx, lsquic_time_t expiry, lsquic_time_t now)
190{
191    lsquic_send_ctl_t *ctl = ctx;
192    lsquic_packet_out_t *packet_out;
193    enum retx_mode rm;
194
195    /* This is a callback -- before it is called, the alarm is unset */
196    assert(!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX));
197
198    rm = get_retx_mode(ctl);
199    LSQ_INFO("retx timeout, mode %s", retx2str[rm]);
200
201    switch (rm)
202    {
203    case RETX_MODE_HANDSHAKE:
204        send_ctl_expire(ctl, EXFI_HSK);
205        /* Do not register cubic loss during handshake */
206        break;
207    case RETX_MODE_LOSS:
208        send_ctl_detect_losses(ctl, lsquic_time_now());
209        break;
210    case RETX_MODE_TLP:
211        ++ctl->sc_n_tlp;
212        send_ctl_expire(ctl, EXFI_LAST);
213        break;
214    case RETX_MODE_RTO:
215        ++ctl->sc_n_consec_rtos;
216        ctl->sc_next_limit = 2;
217        LSQ_DEBUG("packet RTO is %"PRIu64" usec", expiry);
218        send_ctl_expire(ctl, EXFI_ALL);
219        lsquic_cubic_timeout(&ctl->sc_cubic);
220        break;
221    }
222
223    packet_out = send_ctl_first_unacked_retx_packet(ctl);
224    if (packet_out)
225        set_retx_alarm(ctl);
226    lsquic_send_ctl_sanity_check(ctl);
227}
228
229
230void
231lsquic_send_ctl_init (lsquic_send_ctl_t *ctl, struct lsquic_alarmset *alset,
232          struct lsquic_engine_public *enpub, const struct ver_neg *ver_neg,
233          struct lsquic_conn_public *conn_pub, unsigned short pack_size)
234{
235    unsigned i;
236    memset(ctl, 0, sizeof(*ctl));
237    TAILQ_INIT(&ctl->sc_scheduled_packets);
238    TAILQ_INIT(&ctl->sc_unacked_packets);
239    TAILQ_INIT(&ctl->sc_lost_packets);
240    ctl->sc_enpub = enpub;
241    ctl->sc_alset = alset;
242    ctl->sc_ver_neg = ver_neg;
243    ctl->sc_pack_size = pack_size;
244    ctl->sc_conn_pub = conn_pub;
245    if (enpub->enp_settings.es_pace_packets)
246        ctl->sc_flags |= SC_PACE;
247    lsquic_alarmset_init_alarm(alset, AL_RETX, retx_alarm_rings, ctl);
248    lsquic_senhist_init(&ctl->sc_senhist);
249    lsquic_cubic_init(&ctl->sc_cubic, LSQUIC_LOG_CONN_ID);
250    if (ctl->sc_flags & SC_PACE)
251        pacer_init(&ctl->sc_pacer, LSQUIC_LOG_CONN_ID, 100000);
252    for (i = 0; i < sizeof(ctl->sc_buffered_packets) /
253                                sizeof(ctl->sc_buffered_packets[0]); ++i)
254        TAILQ_INIT(&ctl->sc_buffered_packets[i].bpq_packets);
255}
256
257
258static lsquic_time_t
259calculate_packet_rto (lsquic_send_ctl_t *ctl)
260{
261    lsquic_time_t delay;
262
263    delay = get_retx_delay(&ctl->sc_conn_pub->rtt_stats);
264
265    unsigned exp = ctl->sc_n_consec_rtos;
266    if (exp > MAX_RTO_BACKOFFS)
267        exp = MAX_RTO_BACKOFFS;
268
269    delay = delay * (1 << exp);
270
271    return delay;
272}
273
274
275static lsquic_time_t
276calculate_tlp_delay (lsquic_send_ctl_t *ctl)
277{
278    lsquic_time_t srtt, delay;
279
280    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
281    if (ctl->sc_n_in_flight_all > 1)
282    {
283        delay = 10000;  /* 10 ms is the minimum tail loss probe delay */
284        if (delay < 2 * srtt)
285            delay = 2 * srtt;
286    }
287    else
288    {
289        delay = srtt + srtt / 2 + MIN_RTO_DELAY;
290        if (delay < 2 * srtt)
291            delay = 2 * srtt;
292    }
293
294    return delay;
295}
296
297
298static void
299set_retx_alarm (lsquic_send_ctl_t *ctl)
300{
301    enum retx_mode rm;
302    lsquic_time_t delay = 0, now;
303
304    assert(!TAILQ_EMPTY(&ctl->sc_unacked_packets));
305
306    now = lsquic_time_now();
307
308    rm = get_retx_mode(ctl);
309    switch (rm)
310    {
311    case RETX_MODE_HANDSHAKE:
312    /* [draft-iyengar-quic-loss-recovery-01]:
313     *
314     *  if (handshake packets are outstanding):
315     *      alarm_duration = max(1.5 * smoothed_rtt, 10ms) << handshake_count;
316     *      handshake_count++;
317     */
318        delay = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
319        if (delay)
320        {
321            delay += delay / 2;
322            if (10000 > delay)
323                delay = 10000;
324        }
325        else
326            delay = 150000;
327        delay <<= ctl->sc_n_hsk;
328        ++ctl->sc_n_hsk;
329        break;
330    case RETX_MODE_LOSS:
331        delay = ctl->sc_loss_to;
332        break;
333    case RETX_MODE_TLP:
334        delay = calculate_tlp_delay(ctl);
335        break;
336    case RETX_MODE_RTO:
337        /* Base RTO on the first unacked packet, following reference
338         * implementation.
339         */
340        delay = calculate_packet_rto(ctl);
341        break;
342    }
343
344    if (delay > MAX_RTO_DELAY)
345        delay = MAX_RTO_DELAY;
346
347    LSQ_DEBUG("set retx alarm to %"PRIu64", which is %"PRIu64
348        " usec from now, mode %s", now + delay, delay, retx2str[rm]);
349    lsquic_alarmset_set(ctl->sc_alset, AL_RETX, now + delay);
350}
351
352
353static int
354send_ctl_in_recovery (lsquic_send_ctl_t *ctl)
355{
356    return ctl->sc_largest_acked_packno
357        && ctl->sc_largest_acked_packno <= ctl->sc_largest_sent_at_cutback;
358}
359
360
361static int
362send_ctl_in_slow_start (lsquic_send_ctl_t *ctl)
363{
364    return lsquic_cubic_in_slow_start(&ctl->sc_cubic);
365}
366
367
368static lsquic_time_t
369send_ctl_transfer_time (void *ctx)
370{
371    lsquic_send_ctl_t *const ctl = ctx;
372    uint64_t bandwidth, pacing_rate;
373    lsquic_time_t srtt, tx_time;
374    unsigned long cwnd;
375
376    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
377    if (srtt == 0)
378        srtt = 50000;
379    cwnd = lsquic_cubic_get_cwnd(&ctl->sc_cubic);
380    bandwidth = cwnd * 1000000 / srtt;
381    if (send_ctl_in_slow_start(ctl))
382        pacing_rate = bandwidth * 2;
383    else if (send_ctl_in_recovery(ctl))
384        pacing_rate = bandwidth;
385    else
386        pacing_rate = bandwidth + bandwidth / 4;
387
388    tx_time = (uint64_t) ctl->sc_pack_size * 1000000 / pacing_rate;
389    LSQ_DEBUG("srtt: %"PRIu64"; ss: %d; rec: %d; cwnd: %lu; bandwidth: "
390        "%"PRIu64"; tx_time: %"PRIu64, srtt, send_ctl_in_slow_start(ctl),
391        send_ctl_in_recovery(ctl), cwnd, bandwidth, tx_time);
392    return tx_time;
393}
394
395
396static void
397send_ctl_unacked_append (struct lsquic_send_ctl *ctl,
398                         struct lsquic_packet_out *packet_out)
399{
400    TAILQ_INSERT_TAIL(&ctl->sc_unacked_packets, packet_out, po_next);
401    ctl->sc_bytes_unacked_all += lsquic_packet_out_total_sz(packet_out);
402    ctl->sc_n_in_flight_all  += 1;
403    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
404    {
405        ctl->sc_bytes_unacked_retx += lsquic_packet_out_total_sz(packet_out);
406        ++ctl->sc_n_in_flight_retx;
407    }
408}
409
410
411static void
412send_ctl_unacked_remove (struct lsquic_send_ctl *ctl,
413                         struct lsquic_packet_out *packet_out)
414{
415    unsigned packet_sz;
416
417    TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
418    packet_sz = lsquic_packet_out_total_sz(packet_out);
419    assert(ctl->sc_bytes_unacked_all >= packet_sz);
420    ctl->sc_bytes_unacked_all -= packet_sz;
421    ctl->sc_n_in_flight_all  -= 1;
422    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
423    {
424        ctl->sc_bytes_unacked_retx -= packet_sz;
425        --ctl->sc_n_in_flight_retx;
426    }
427}
428
429
430static void
431send_ctl_sched_Xpend_common (struct lsquic_send_ctl *ctl,
432                      struct lsquic_packet_out *packet_out)
433{
434    packet_out->po_flags |= PO_SCHED;
435    ++ctl->sc_n_scheduled;
436    ctl->sc_bytes_scheduled += lsquic_packet_out_total_sz(packet_out);
437    lsquic_send_ctl_sanity_check(ctl);
438}
439
440
441static void
442send_ctl_sched_append (struct lsquic_send_ctl *ctl,
443                       struct lsquic_packet_out *packet_out)
444{
445    TAILQ_INSERT_TAIL(&ctl->sc_scheduled_packets, packet_out, po_next);
446    send_ctl_sched_Xpend_common(ctl, packet_out);
447}
448
449
450static void
451send_ctl_sched_prepend (struct lsquic_send_ctl *ctl,
452                       struct lsquic_packet_out *packet_out)
453{
454    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, packet_out, po_next);
455    send_ctl_sched_Xpend_common(ctl, packet_out);
456}
457
458
459static void
460send_ctl_sched_remove (struct lsquic_send_ctl *ctl,
461                       struct lsquic_packet_out *packet_out)
462{
463    TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next);
464    packet_out->po_flags &= ~PO_SCHED;
465    assert(ctl->sc_n_scheduled);
466    --ctl->sc_n_scheduled;
467    ctl->sc_bytes_scheduled -= lsquic_packet_out_total_sz(packet_out);
468    lsquic_send_ctl_sanity_check(ctl);
469}
470
471
472int
473lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *ctl,
474                             struct lsquic_packet_out *packet_out, int account)
475{
476    char frames[lsquic_frame_types_str_sz];
477    LSQ_DEBUG("packet %"PRIu64" has been sent (frame types: %s)",
478        packet_out->po_packno, lsquic_frame_types_to_str(frames,
479            sizeof(frames), packet_out->po_frame_types));
480    if (account)
481        ctl->sc_bytes_out -= lsquic_packet_out_total_sz(packet_out);
482    if (0 == lsquic_senhist_add(&ctl->sc_senhist, packet_out->po_packno))
483    {
484        send_ctl_unacked_append(ctl, packet_out);
485        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
486        {
487            if (!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX))
488                set_retx_alarm(ctl);
489            if (ctl->sc_n_in_flight_retx == 1)
490                ctl->sc_flags |= SC_WAS_QUIET;
491        }
492        /* TODO: Do we really want to use those for RTT info? Revisit this. */
493        /* Hold on to packets that are not retransmittable because we need them
494         * to sample RTT information.  They are released when ACK is received.
495         */
496#if LSQUIC_SEND_STATS
497        ++ctl->sc_stats.n_total_sent;
498#endif
499        return 0;
500    }
501    else
502        return -1;
503}
504
505
506static void
507take_rtt_sample (lsquic_send_ctl_t *ctl, const lsquic_packet_out_t *packet_out,
508                 lsquic_time_t now, lsquic_time_t lack_delta)
509{
510    assert(packet_out->po_sent);
511    lsquic_time_t measured_rtt = now - packet_out->po_sent;
512    if (packet_out->po_packno > ctl->sc_max_rtt_packno && lack_delta < measured_rtt)
513    {
514        ctl->sc_max_rtt_packno = packet_out->po_packno;
515        lsquic_rtt_stats_update(&ctl->sc_conn_pub->rtt_stats, measured_rtt, lack_delta);
516        LSQ_DEBUG("packno %"PRIu64"; rtt: %"PRIu64"; delta: %"PRIu64"; "
517            "new srtt: %"PRIu64, packet_out->po_packno, measured_rtt, lack_delta,
518            lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats));
519    }
520}
521
522
523/* Returns true if packet was rescheduled, false otherwise.  In the latter
524 * case, you should not dereference packet_out after the function returns.
525 */
526static int
527send_ctl_handle_lost_packet (lsquic_send_ctl_t *ctl,
528                                            lsquic_packet_out_t *packet_out)
529{
530    assert(ctl->sc_n_in_flight_all);
531    send_ctl_unacked_remove(ctl, packet_out);
532    if (packet_out->po_flags & PO_ENCRYPTED) {
533        ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx,
534                                                packet_out->po_enc_data);
535        packet_out->po_flags &= ~PO_ENCRYPTED;
536        packet_out->po_enc_data = NULL;
537    }
538    if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))
539    {
540        ctl->sc_flags |= SC_LOST_ACK;
541        LSQ_DEBUG("lost ACK in packet %"PRIu64, packet_out->po_packno);
542    }
543    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
544    {
545        LSQ_DEBUG("lost retransmittable packet %"PRIu64,
546                                                    packet_out->po_packno);
547        TAILQ_INSERT_TAIL(&ctl->sc_lost_packets, packet_out, po_next);
548        return 1;
549    }
550    else
551    {
552        LSQ_DEBUG("lost unretransmittable packet %"PRIu64,
553                                                    packet_out->po_packno);
554        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
555        return 0;
556    }
557}
558
559
560static lsquic_packno_t
561largest_retx_packet_number (const lsquic_send_ctl_t *ctl)
562{
563    const lsquic_packet_out_t *packet_out;
564    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets,
565                                                lsquic_packets_tailq, po_next)
566    {
567        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
568            return packet_out->po_packno;
569    }
570    return 0;
571}
572
573
574static void
575send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time)
576{
577    lsquic_packet_out_t *packet_out, *next;
578    lsquic_packno_t largest_retx_packno, largest_lost_packno;
579
580    largest_retx_packno = largest_retx_packet_number(ctl);
581    largest_lost_packno = 0;
582    ctl->sc_loss_to = 0;
583
584    for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets);
585            packet_out && packet_out->po_packno <= ctl->sc_largest_acked_packno;
586                packet_out = next)
587    {
588        next = TAILQ_NEXT(packet_out, po_next);
589
590        if (packet_out->po_packno + N_NACKS_BEFORE_RETX <
591                                                ctl->sc_largest_acked_packno)
592        {
593            LSQ_DEBUG("loss by FACK detected, packet %"PRIu64,
594                                                    packet_out->po_packno);
595            largest_lost_packno = packet_out->po_packno;
596            (void) send_ctl_handle_lost_packet(ctl, packet_out);
597            continue;
598        }
599
600        if (largest_retx_packno
601            && (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
602            && largest_retx_packno <= ctl->sc_largest_acked_packno)
603        {
604            LSQ_DEBUG("loss by early retransmit detected, packet %"PRIu64,
605                                                    packet_out->po_packno);
606            largest_lost_packno = packet_out->po_packno;
607            ctl->sc_loss_to =
608                lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats) / 4;
609            LSQ_DEBUG("set sc_loss_to to %"PRIu64", packet %"PRIu64,
610                                    ctl->sc_loss_to, packet_out->po_packno);
611            (void) send_ctl_handle_lost_packet(ctl, packet_out);
612            continue;
613        }
614
615        if (ctl->sc_largest_acked_sent_time > packet_out->po_sent +
616                    lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats))
617        {
618            LSQ_DEBUG("loss by sent time detected: packet %"PRIu64,
619                                                    packet_out->po_packno);
620            if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
621                largest_lost_packno = packet_out->po_packno;
622            else { /* don't count it as a loss */; }
623            (void) send_ctl_handle_lost_packet(ctl, packet_out);
624            continue;
625        }
626    }
627
628    if (largest_lost_packno > ctl->sc_largest_sent_at_cutback)
629    {
630        LSQ_DEBUG("detected new loss: packet %"PRIu64"; new lsac: "
631            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
632        lsquic_cubic_loss(&ctl->sc_cubic);
633        if (ctl->sc_flags & SC_PACE)
634            pacer_loss_event(&ctl->sc_pacer);
635        ctl->sc_largest_sent_at_cutback =
636                                lsquic_senhist_largest(&ctl->sc_senhist);
637    }
638    else if (largest_lost_packno)
639        /* Lost packets whose numbers are smaller than the largest packet
640         * number sent at the time of the last loss event indicate the same
641         * loss event.  This follows NewReno logic, see RFC 6582.
642         */
643        LSQ_DEBUG("ignore loss of packet %"PRIu64" smaller than lsac "
644            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
645}
646
647
648int
649lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl,
650                         const struct ack_info *acki,
651                         lsquic_time_t ack_recv_time)
652{
653    struct lsquic_packets_tailq acked_acks =
654                                    TAILQ_HEAD_INITIALIZER(acked_acks);
655#if !LSQUIC_CAN_REORDER
656    const struct lsquic_packno_range *range =
657                                    &acki->ranges[ acki->n_ranges - 1 ];
658#endif
659    lsquic_packet_out_t *packet_out, *next;
660    lsquic_time_t now = lsquic_time_now();
661    lsquic_packno_t high;
662    int rtt_updated = 0;
663    int app_limited;
664    unsigned n;
665
666    LSQ_DEBUG("Got ACK frame, largest acked: %"PRIu64"; delta: %"PRIu64,
667                        largest_acked(acki), acki->lack_delta);
668
669    /* Validate ACK first: */
670    for (n = 0; n < acki->n_ranges; ++n)
671        if (!lsquic_senhist_sent_range(&ctl->sc_senhist, acki->ranges[n].low,
672                                                      acki->ranges[n].high))
673        {
674            LSQ_INFO("at least one packet in ACK range [%"PRIu64" - %"PRIu64"] "
675                "was never sent", acki->ranges[n].low, acki->ranges[n].high);
676            return -1;
677        }
678
679    if (ctl->sc_flags & SC_WAS_QUIET)
680    {
681        ctl->sc_flags &= ~SC_WAS_QUIET;
682        LSQ_DEBUG("ACK comes after a period of quiescence");
683        lsquic_cubic_was_quiet(&ctl->sc_cubic, now);
684    }
685
686    /* Peer is acking packets that have been acked already.  Schedule ACK
687     * and STOP_WAITING frame to chop the range if we get two of these in
688     * a row.
689     */
690    if (lsquic_send_ctl_smallest_unacked(ctl) > smallest_acked(acki))
691        ++ctl->sc_n_stop_waiting;
692    else
693        ctl->sc_n_stop_waiting = 0;
694
695    app_limited = send_ctl_retx_bytes_out(ctl) + 3 * ctl->sc_pack_size /* This
696        is the "maximum burst" parameter */
697        < lsquic_cubic_get_cwnd(&ctl->sc_cubic);
698
699    for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets);
700            packet_out
701#if !LSQUIC_CAN_REORDER
702                       && packet_out->po_packno <= largest_acked(acki)
703#endif
704                                                                      ;
705                packet_out = next)
706    {
707        next = TAILQ_NEXT(packet_out, po_next);
708#if LSQUIC_CAN_REORDER
709        if (!in_acked_range(acki, packet_out->po_packno))
710            continue;
711#else
712        /* This is faster than binary search in the normal case when the number
713         * of ranges is not much larger than the number of unacked packets.
714         */
715        while (range->high < packet_out->po_packno)
716            --range;
717        if (range->low > packet_out->po_packno)
718            continue;
719#endif
720        ctl->sc_largest_acked_packno    = packet_out->po_packno;
721        ctl->sc_largest_acked_sent_time = packet_out->po_sent;
722        if (packet_out->po_packno == largest_acked(acki))
723        {
724            take_rtt_sample(ctl, packet_out, ack_recv_time, acki->lack_delta);
725            ++rtt_updated;
726        }
727        lsquic_cubic_ack(&ctl->sc_cubic, now, now - packet_out->po_sent,
728                         app_limited, lsquic_packet_out_total_sz(packet_out));
729        LSQ_DEBUG("Got ACK for packet %"PRIu64", remove from unacked queue",
730            packet_out->po_packno);
731        assert(ctl->sc_n_in_flight_all);
732        send_ctl_unacked_remove(ctl, packet_out);
733        lsquic_packet_out_ack_streams(packet_out);
734#if __GNUC__
735        __builtin_prefetch(next);
736#endif
737        if ((ctl->sc_flags & SC_NSTP) &&
738                    (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)))
739            TAILQ_INSERT_TAIL(&acked_acks, packet_out, po_next);
740        else
741            lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
742    }
743
744    if (rtt_updated)
745    {
746        ctl->sc_n_consec_rtos = 0;
747        ctl->sc_n_hsk = 0;
748        ctl->sc_n_tlp = 0;
749    }
750
751    send_ctl_detect_losses(ctl, ack_recv_time);
752    if (send_ctl_first_unacked_retx_packet(ctl))
753        set_retx_alarm(ctl);
754    else
755    {
756        LSQ_DEBUG("No retransmittable packets: clear alarm");
757        lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
758    }
759    lsquic_send_ctl_sanity_check(ctl);
760
761    /* Processing of packets that contain acked ACK frames is deferred because
762     * we only need to process one of them: the last one, which we know to
763     * contain the largest value.
764     */
765    packet_out = TAILQ_LAST(&acked_acks, lsquic_packets_tailq);
766    if (packet_out)
767    {
768        high = ctl->sc_conn_pub->lconn->cn_pf->pf_parse_ack_high(
769                                packet_out->po_data, packet_out->po_data_sz);
770        if (high > ctl->sc_largest_ack2ed)
771            ctl->sc_largest_ack2ed = high;
772        do
773        {
774            next = TAILQ_PREV(packet_out, lsquic_packets_tailq, po_next);
775            lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
776        }
777        while ((packet_out = next));
778    }
779
780    if (ctl->sc_n_in_flight_retx == 0)
781        ctl->sc_flags |= SC_WAS_QUIET;
782
783    return 0;
784}
785
786
787lsquic_packno_t
788lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl)
789{
790    const lsquic_packet_out_t *packet_out;
791
792    /* Packets are always sent out in order (unless we are reordering them
793     * on purpose).  Thus, the first packet on the unacked packets list has
794     * the smallest packet number of all packets on that list.
795     */
796         if ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
797        return packet_out->po_packno;
798    else
799        return lsquic_senhist_largest(&ctl->sc_senhist) + 1;
800}
801
802
803static struct lsquic_packet_out *
804send_ctl_next_lost (lsquic_send_ctl_t *ctl)
805{
806    lsquic_packet_out_t *lost_packet = TAILQ_FIRST(&ctl->sc_lost_packets);
807    if (lost_packet)
808    {
809        TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next);
810        if (lost_packet->po_frame_types & (1 << QUIC_FRAME_STREAM))
811        {
812                lsquic_packet_out_elide_reset_stream_frames(lost_packet, 0);
813        }
814        return lost_packet;
815    }
816    else
817        return NULL;
818}
819
820
821static lsquic_packno_t
822send_ctl_next_packno (lsquic_send_ctl_t *ctl)
823{
824    return ++ctl->sc_cur_packno;
825}
826
827
828void
829lsquic_send_ctl_cleanup (lsquic_send_ctl_t *ctl)
830{
831    lsquic_packet_out_t *packet_out;
832    lsquic_senhist_cleanup(&ctl->sc_senhist);
833    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
834    {
835        send_ctl_sched_remove(ctl, packet_out);
836        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
837    }
838    assert(0 == ctl->sc_n_scheduled);
839    assert(0 == ctl->sc_bytes_scheduled);
840    while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
841    {
842        TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
843        ctl->sc_bytes_unacked_all -= lsquic_packet_out_total_sz(packet_out);
844        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
845        --ctl->sc_n_in_flight_all;
846    }
847    assert(0 == ctl->sc_n_in_flight_all);
848    assert(0 == ctl->sc_bytes_unacked_all);
849    while ((packet_out = TAILQ_FIRST(&ctl->sc_lost_packets)))
850    {
851        TAILQ_REMOVE(&ctl->sc_lost_packets, packet_out, po_next);
852        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
853    }
854    pacer_cleanup(&ctl->sc_pacer);
855#if LSQUIC_SEND_STATS
856    LSQ_NOTICE("stats: n_total_sent: %u; n_resent: %u; n_delayed: %u",
857        ctl->sc_stats.n_total_sent, ctl->sc_stats.n_resent,
858        ctl->sc_stats.n_delayed);
859#endif
860}
861
862
863static unsigned
864send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl)
865{
866    return ctl->sc_bytes_scheduled
867         + ctl->sc_bytes_unacked_retx
868         + ctl->sc_bytes_out;
869}
870
871
872int
873lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *ctl)
874{
875    return (ctl->sc_flags & SC_PACE)
876        && !pacer_can_schedule(&ctl->sc_pacer,
877                               ctl->sc_n_scheduled + ctl->sc_n_in_flight_all);
878}
879
880
881#ifndef NDEBUG
882#if __GNUC__
883__attribute__((weak))
884#endif
885#endif
886int
887lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl)
888{
889    const unsigned n_out = send_ctl_retx_bytes_out(ctl);
890    LSQ_DEBUG("%s: n_out: %u (unacked_retx: %u, out: %u); cwnd: %lu", __func__,
891        n_out, ctl->sc_bytes_unacked_retx, ctl->sc_bytes_out,
892        lsquic_cubic_get_cwnd(&ctl->sc_cubic));
893    if (ctl->sc_flags & SC_PACE)
894    {
895        if (n_out >= lsquic_cubic_get_cwnd(&ctl->sc_cubic))
896            return 0;
897        if (pacer_can_schedule(&ctl->sc_pacer,
898                               ctl->sc_n_scheduled + ctl->sc_n_in_flight_all))
899            return 1;
900        if (ctl->sc_flags & SC_SCHED_TICK)
901        {
902            ctl->sc_flags &= ~SC_SCHED_TICK;
903            lsquic_engine_add_conn_to_attq(ctl->sc_enpub,
904                    ctl->sc_conn_pub->lconn, pacer_next_sched(&ctl->sc_pacer));
905        }
906        return 0;
907    }
908    else
909        return n_out < lsquic_cubic_get_cwnd(&ctl->sc_cubic);
910}
911
912
913static void
914send_ctl_expire (lsquic_send_ctl_t *ctl, enum expire_filter filter)
915{
916    lsquic_packet_out_t *packet_out, *next;
917    int n_resubmitted =0;
918    static const char *const filter_type2str[] = {
919        [EXFI_ALL] = "all",
920        [EXFI_HSK] = "handshake",
921        [EXFI_LAST] = "last",
922    };
923
924    switch (filter)
925    {
926    case EXFI_ALL:
927        n_resubmitted = 0;
928        while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
929            n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out);
930        break;
931    case EXFI_HSK:
932        n_resubmitted = 0;
933        for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); packet_out;
934                                                            packet_out = next)
935        {
936            next = TAILQ_NEXT(packet_out, po_next);
937            if (packet_out->po_flags & PO_HELLO)
938                n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out);
939        }
940        break;
941    case EXFI_LAST:
942        packet_out = send_ctl_last_unacked_retx_packet(ctl);
943        if (packet_out)
944            n_resubmitted = send_ctl_handle_lost_packet(ctl, packet_out);
945        else
946            n_resubmitted = 0;
947        break;
948    }
949
950    LSQ_DEBUG("consider %s packets lost: %d resubmitted",
951                                    filter_type2str[filter], n_resubmitted);
952}
953
954
955void
956lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl)
957{
958    lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
959    send_ctl_expire(ctl, EXFI_ALL);
960    lsquic_send_ctl_sanity_check(ctl);
961}
962
963
964#if LSQUIC_EXTRA_CHECKS
965void
966lsquic_send_ctl_sanity_check (const lsquic_send_ctl_t *ctl)
967{
968    const struct lsquic_packet_out *packet_out;
969    unsigned count, sched_bytes;
970
971    assert(!send_ctl_first_unacked_retx_packet(ctl) ||
972                    lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX));
973    if (lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX))
974    {
975        assert(send_ctl_first_unacked_retx_packet(ctl));
976        assert(lsquic_time_now() < ctl->sc_alset->as_expiry[AL_RETX] + MAX_RTO_DELAY);
977    }
978
979    count = 0;
980    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
981        ++count;
982    assert(count == ctl->sc_n_in_flight_all);
983
984    count = 0, sched_bytes = 0;
985    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
986    {
987        assert(packet_out->po_flags & PO_SCHED);
988        sched_bytes += lsquic_packet_out_total_sz(packet_out);
989        ++count;
990    }
991    assert(count == ctl->sc_n_scheduled);
992    assert(sched_bytes == ctl->sc_bytes_scheduled);
993}
994
995
996#endif
997
998
999void
1000lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *ctl,
1001                                            lsquic_packet_out_t *packet_out)
1002{
1003#ifndef NDEBUG
1004    const lsquic_packet_out_t *last;
1005    last = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
1006    if (last)
1007        assert((last->po_flags & PO_REPACKNO) ||
1008                last->po_packno < packet_out->po_packno);
1009#endif
1010    if (ctl->sc_flags & SC_PACE)
1011    {
1012        unsigned n_out = ctl->sc_n_in_flight_retx + ctl->sc_n_scheduled;
1013        pacer_packet_scheduled(&ctl->sc_pacer, n_out,
1014            send_ctl_in_recovery(ctl), send_ctl_transfer_time, ctl);
1015    }
1016    send_ctl_sched_append(ctl, packet_out);
1017}
1018
1019
1020lsquic_packet_out_t *
1021lsquic_send_ctl_next_packet_to_send (lsquic_send_ctl_t *ctl)
1022{
1023    lsquic_packet_out_t *packet_out;
1024
1025    packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets);
1026    if (!packet_out)
1027        return NULL;
1028
1029    if (ctl->sc_n_consec_rtos &&
1030                    !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)))
1031    {
1032        if (ctl->sc_next_limit)
1033            --ctl->sc_next_limit;
1034        else
1035            return NULL;
1036    }
1037
1038    if (packet_out->po_flags & PO_REPACKNO)
1039    {
1040        update_for_resending(ctl, packet_out);
1041        packet_out->po_flags &= ~PO_REPACKNO;
1042    }
1043
1044    send_ctl_sched_remove(ctl, packet_out);
1045    ctl->sc_bytes_out += lsquic_packet_out_total_sz(packet_out);
1046    return packet_out;
1047}
1048
1049
1050void
1051lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *ctl,
1052                                            lsquic_packet_out_t *packet_out)
1053{
1054    send_ctl_sched_prepend(ctl, packet_out);
1055    ctl->sc_bytes_out -= lsquic_packet_out_total_sz(packet_out);
1056    LSQ_DEBUG("packet %"PRIu64" has been delayed", packet_out->po_packno);
1057#if LSQUIC_SEND_STATS
1058    ++ctl->sc_stats.n_delayed;
1059#endif
1060}
1061
1062
1063int
1064lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *ctl)
1065{
1066    const lsquic_packet_out_t *packet_out;
1067    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1068        if (packet_out->po_frame_types &
1069                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
1070            return 1;
1071    return 0;
1072}
1073
1074
1075int
1076lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *ctl)
1077{
1078    const lsquic_packet_out_t *packet_out;
1079    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1080        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
1081            return 1;
1082    return 0;
1083}
1084
1085
1086static lsquic_packet_out_t *
1087send_ctl_allocate_packet (lsquic_send_ctl_t *ctl, enum lsquic_packno_bits bits,
1088                                                        unsigned need_at_least)
1089{
1090    lsquic_packet_out_t *packet_out;
1091
1092    packet_out = lsquic_packet_out_new(&ctl->sc_enpub->enp_mm,
1093                    ctl->sc_conn_pub->packet_out_malo,
1094                    !(ctl->sc_flags & SC_TCID0), ctl->sc_pack_size, bits,
1095                    ctl->sc_ver_neg->vn_tag, NULL);
1096    if (!packet_out)
1097        return NULL;
1098
1099    if (need_at_least && lsquic_packet_out_avail(packet_out) < need_at_least)
1100    {   /* This should never happen, this is why this check is performed at
1101         * this level and not lower, before the packet is actually allocated.
1102         */
1103        LSQ_ERROR("wanted to allocate packet with at least %u bytes of "
1104            "payload, but only got %u bytes (mtu: %u bytes)", need_at_least,
1105            lsquic_packet_out_avail(packet_out), ctl->sc_pack_size);
1106        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1107        return NULL;
1108    }
1109
1110    return packet_out;
1111}
1112
1113
1114lsquic_packet_out_t *
1115lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *ctl, unsigned need_at_least)
1116{
1117    lsquic_packet_out_t *packet_out;
1118    enum lsquic_packno_bits bits;
1119
1120    bits = lsquic_send_ctl_packno_bits(ctl);
1121    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least);
1122    if (!packet_out)
1123        return NULL;
1124
1125    packet_out->po_packno = send_ctl_next_packno(ctl);
1126    LSQ_DEBUG("created packet %"PRIu64, packet_out->po_packno);
1127    EV_LOG_PACKET_CREATED(LSQUIC_LOG_CONN_ID, packet_out);
1128    return packet_out;
1129}
1130
1131
1132/* Do not use for STREAM frames
1133 */
1134lsquic_packet_out_t *
1135lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *ctl,
1136                                      unsigned need_at_least, int *is_err)
1137{
1138    lsquic_packet_out_t *packet_out;
1139
1140    assert(need_at_least > 0);
1141
1142    packet_out = lsquic_send_ctl_last_scheduled(ctl);
1143    if (packet_out
1144        && !(packet_out->po_flags & PO_STREAM_END)
1145        && lsquic_packet_out_avail(packet_out) >= need_at_least)
1146    {
1147        return packet_out;
1148    }
1149
1150    if (!lsquic_send_ctl_can_send(ctl))
1151    {
1152        *is_err = 0;
1153        return NULL;
1154    }
1155
1156    packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least);
1157    if (packet_out)
1158        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1159    else
1160        *is_err = 1;
1161    return packet_out;
1162}
1163
1164
1165static lsquic_packet_out_t *
1166send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl,
1167                      unsigned need_at_least, const lsquic_stream_t *stream)
1168{
1169    lsquic_packet_out_t *packet_out;
1170
1171    assert(need_at_least > 0);
1172
1173    packet_out = lsquic_send_ctl_last_scheduled(ctl);
1174    if (packet_out
1175        && !(packet_out->po_flags & PO_STREAM_END)
1176        && lsquic_packet_out_avail(packet_out) >= need_at_least
1177        && !lsquic_packet_out_has_frame(packet_out, stream, QUIC_FRAME_STREAM))
1178    {
1179        return packet_out;
1180    }
1181
1182    if (!lsquic_send_ctl_can_send(ctl))
1183        return NULL;
1184
1185    packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least);
1186    if (!packet_out)
1187        return NULL;
1188
1189    lsquic_send_ctl_scheduled_one(ctl, packet_out);
1190    return packet_out;
1191}
1192
1193
1194static void
1195update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out)
1196{
1197
1198    lsquic_packno_t oldno, packno;
1199
1200    /* When the packet is resent, it uses the same number of bytes to encode
1201     * the packet number as the original packet.  This follows the reference
1202     * implementation.
1203     */
1204    oldno = packet_out->po_packno;
1205    packno = send_ctl_next_packno(ctl);
1206
1207    packet_out->po_frame_types &= ~QFRAME_REGEN_MASK;
1208    assert(packet_out->po_frame_types);
1209    packet_out->po_packno = packno;
1210
1211    if (ctl->sc_ver_neg->vn_tag)
1212    {
1213        assert(packet_out->po_flags & PO_VERSION);  /* It can only disappear */
1214        packet_out->po_ver_tag = *ctl->sc_ver_neg->vn_tag;
1215    }
1216
1217    assert(packet_out->po_regen_sz < packet_out->po_data_sz);
1218    if (packet_out->po_regen_sz)
1219    {
1220        assert(!(packet_out->po_flags & PO_SCHED));
1221        lsquic_packet_out_chop_regen(packet_out);
1222    }
1223    LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64,
1224                                                            oldno, packno);
1225    EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for "
1226        "resending as packet %"PRIu64, oldno, packno);
1227}
1228
1229
1230unsigned
1231lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *ctl)
1232{
1233    lsquic_packet_out_t *packet_out;
1234    unsigned n = 0;
1235
1236    while (lsquic_send_ctl_can_send(ctl) &&
1237                                (packet_out = send_ctl_next_lost(ctl)))
1238    {
1239        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1240        {
1241            ++n;
1242            update_for_resending(ctl, packet_out);
1243            lsquic_send_ctl_scheduled_one(ctl, packet_out);
1244        }
1245        else
1246        {
1247            LSQ_DEBUG("Dropping packet %"PRIu64" from unacked queue",
1248                packet_out->po_packno);
1249            lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1250        }
1251    }
1252
1253    if (n)
1254        LSQ_DEBUG("rescheduled %u packets", n);
1255
1256    return n;
1257}
1258
1259
1260void
1261lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *ctl, int tcid0)
1262{
1263    if (tcid0)
1264    {
1265        LSQ_INFO("set TCID flag");
1266        ctl->sc_flags |=  SC_TCID0;
1267    }
1268    else
1269    {
1270        LSQ_INFO("unset TCID flag");
1271        ctl->sc_flags &= ~SC_TCID0;
1272    }
1273}
1274
1275
1276/* The controller elides this STREAM frames of stream `stream_id' from
1277 * scheduled and buffered packets.  If a packet becomes empty as a result,
1278 * it is dropped.
1279 *
1280 * Packets on other queues do not need to be processed: unacked packets
1281 * have already been sent, and lost packets' reset stream frames will be
1282 * elided in due time.
1283 */
1284void
1285lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *ctl, uint32_t stream_id)
1286{
1287    struct lsquic_packet_out *packet_out = NULL, *next = NULL;
1288    unsigned n, adj;
1289
1290    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
1291                                                            packet_out = next)
1292    {
1293        next = TAILQ_NEXT(packet_out, po_next);
1294
1295        if ((packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM))
1296                                                                   )
1297        {
1298            adj = lsquic_packet_out_elide_reset_stream_frames(packet_out,
1299                                                              stream_id);
1300            ctl->sc_bytes_scheduled -= adj;
1301            if (0 == packet_out->po_frame_types)
1302            {
1303                LSQ_DEBUG("cancel packet %"PRIu64" after eliding frames for "
1304                    "stream %"PRIu32, packet_out->po_packno, stream_id);
1305                send_ctl_sched_remove(ctl, packet_out);
1306                lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1307            }
1308        }
1309    }
1310
1311    for (n = 0; n < sizeof(ctl->sc_buffered_packets) /
1312                                sizeof(ctl->sc_buffered_packets[0]); ++n)
1313    {
1314        for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets);
1315                                                packet_out; packet_out = next)
1316        {
1317            if (!(packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM)))
1318                continue;
1319            next = TAILQ_NEXT(packet_out, po_next);
1320            lsquic_packet_out_elide_reset_stream_frames(packet_out, stream_id);
1321            if (0 == packet_out->po_frame_types)
1322            {
1323                LSQ_DEBUG("cancel buffered packet in queue #%u after eliding "
1324                    "frames for stream %"PRIu32, n, stream_id);
1325                TAILQ_REMOVE(&ctl->sc_buffered_packets[n].bpq_packets,
1326                             packet_out, po_next);
1327                --ctl->sc_buffered_packets[n].bpq_count;
1328                lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1329                LSQ_DEBUG("Elide packet from buffered queue #%u; count: %u",
1330                          n, ctl->sc_buffered_packets[n].bpq_count);
1331            }
1332        }
1333    }
1334}
1335
1336
1337/* Count how many packets will remain after the squeezing performed by
1338 * lsquic_send_ctl_squeeze_sched().  This is the number of delayed data
1339 * packets.
1340 */
1341#ifndef NDEBUG
1342#if  __GNUC__
1343__attribute__((weak))
1344#endif
1345#endif
1346int
1347lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl)
1348{
1349    const struct lsquic_packet_out *packet_out;
1350    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1351        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1352            return 1;
1353    return 0;
1354}
1355
1356
1357#ifndef NDEBUG
1358static void
1359send_ctl_log_packet_q (const lsquic_send_ctl_t *ctl, const char *prefix,
1360                                const struct lsquic_packets_tailq *tailq)
1361{
1362    const lsquic_packet_out_t *packet_out;
1363    unsigned n_packets;
1364    char *buf;
1365    size_t bufsz;
1366    int off;
1367
1368    n_packets = 0;
1369    TAILQ_FOREACH(packet_out, tailq, po_next)
1370        ++n_packets;
1371
1372    if (n_packets == 0)
1373    {
1374        LSQ_DEBUG("%s: [<empty set>]", prefix);
1375        return;
1376    }
1377
1378    bufsz = n_packets * sizeof("18446744073709551615" /* UINT64_MAX */);
1379    buf = malloc(bufsz);
1380    if (!buf)
1381    {
1382        LSQ_ERROR("%s: malloc: %s", __func__, strerror(errno));
1383        return;
1384    }
1385
1386    off = 0;
1387    TAILQ_FOREACH(packet_out, tailq, po_next)
1388    {
1389        if (off)
1390            buf[off++] = ' ';
1391        off += sprintf(buf + off, "%"PRIu64, packet_out->po_packno);
1392    }
1393
1394    LSQ_DEBUG("%s: [%s]", prefix, buf);
1395    free(buf);
1396}
1397
1398
1399#define LOG_PACKET_Q(prefix, queue) do {                                    \
1400    if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG))                                     \
1401        send_ctl_log_packet_q(ctl, queue, prefix);                          \
1402} while (0)
1403#else
1404#define LOG_PACKET_Q(p, q)
1405#endif
1406
1407
1408int
1409lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl)
1410{
1411    struct lsquic_packet_out *packet_out, *next;
1412#ifndef NDEBUG
1413    int pre_squeeze_logged = 0;
1414#endif
1415
1416    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
1417                                                            packet_out = next)
1418    {
1419        next = TAILQ_NEXT(packet_out, po_next);
1420        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1421        {
1422            if (packet_out->po_flags & PO_ENCRYPTED)
1423            {
1424                ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx,
1425                                                    packet_out->po_enc_data);
1426                packet_out->po_enc_data = NULL;
1427                packet_out->po_flags &= ~PO_ENCRYPTED;
1428            }
1429        }
1430        else
1431        {
1432#ifndef NDEBUG
1433            /* Log the whole list before we squeeze for the first time */
1434            if (!pre_squeeze_logged++)
1435                LOG_PACKET_Q(&ctl->sc_scheduled_packets,
1436                                        "unacked packets before squeezing");
1437#endif
1438            send_ctl_sched_remove(ctl, packet_out);
1439            LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue",
1440                packet_out->po_packno);
1441            lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1442        }
1443    }
1444
1445#ifndef NDEBUG
1446    if (pre_squeeze_logged)
1447        LOG_PACKET_Q(&ctl->sc_scheduled_packets,
1448                                        "unacked packets after squeezing");
1449    else if (ctl->sc_n_scheduled > 0)
1450        LOG_PACKET_Q(&ctl->sc_scheduled_packets, "delayed packets");
1451#endif
1452
1453    return ctl->sc_n_scheduled > 0;
1454}
1455
1456
1457void
1458lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *ctl)
1459{
1460    struct lsquic_packet_out *packet_out;
1461
1462    assert(ctl->sc_n_scheduled > 0);    /* Otherwise, why is this called? */
1463    ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
1464    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1465        packet_out->po_flags |= PO_REPACKNO;
1466}
1467
1468
1469void
1470lsquic_send_ctl_ack_to_front (lsquic_send_ctl_t *ctl)
1471{
1472    struct lsquic_packet_out *ack_packet;
1473
1474    assert(ctl->sc_n_scheduled > 1);    /* Otherwise, why is this called? */
1475    ack_packet = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
1476    assert(ack_packet->po_frame_types & (1 << QUIC_FRAME_ACK));
1477    TAILQ_REMOVE(&ctl->sc_scheduled_packets, ack_packet, po_next);
1478    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, ack_packet, po_next);
1479}
1480
1481
1482void
1483lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *ctl)
1484{
1485    lsquic_packet_out_t *packet_out;
1486    const unsigned n = ctl->sc_n_scheduled;
1487    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
1488    {
1489        send_ctl_sched_remove(ctl, packet_out);
1490        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1491    }
1492    assert(0 == ctl->sc_n_scheduled);
1493    LSQ_DEBUG("dropped %u scheduled packet%s", n, n != 0 ? "s" : "");
1494}
1495
1496
1497#ifdef NDEBUG
1498static
1499#elif __GNUC__
1500__attribute__((weak))
1501#endif
1502enum buf_packet_type
1503lsquic_send_ctl_determine_bpt (lsquic_send_ctl_t *ctl,
1504                                            const lsquic_stream_t *stream)
1505{
1506    const lsquic_stream_t *other_stream;
1507    struct lsquic_hash_elem *el;
1508    struct lsquic_hash *all_streams;
1509
1510    all_streams = ctl->sc_conn_pub->all_streams;
1511    for (el = lsquic_hash_first(all_streams); el;
1512                                     el = lsquic_hash_next(all_streams))
1513    {
1514        other_stream = lsquic_hashelem_getdata(el);
1515        if (other_stream != stream
1516              && (!(other_stream->stream_flags & STREAM_U_WRITE_DONE))
1517                && !lsquic_stream_is_critical(other_stream)
1518                  && other_stream->sm_priority < stream->sm_priority)
1519            return BPT_OTHER_PRIO;
1520    }
1521    return BPT_HIGHEST_PRIO;
1522}
1523
1524
1525static enum buf_packet_type
1526send_ctl_lookup_bpt (lsquic_send_ctl_t *ctl,
1527                                        const struct lsquic_stream *stream)
1528{
1529    if (ctl->sc_cached_bpt.stream_id != stream->id)
1530    {
1531        ctl->sc_cached_bpt.stream_id = stream->id;
1532        ctl->sc_cached_bpt.packet_type =
1533                                lsquic_send_ctl_determine_bpt(ctl, stream);
1534    }
1535    return ctl->sc_cached_bpt.packet_type;
1536}
1537
1538
1539static unsigned
1540send_ctl_max_bpq_count (const lsquic_send_ctl_t *ctl,
1541                                        enum buf_packet_type packet_type)
1542{
1543    unsigned count;
1544
1545    switch (packet_type)
1546    {
1547    case BPT_OTHER_PRIO:
1548        return MAX_BPQ_COUNT;
1549    case BPT_HIGHEST_PRIO:
1550    default: /* clang does not complain about absence of `default'... */
1551        count = ctl->sc_n_scheduled + ctl->sc_n_in_flight_retx;
1552        if (count < lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size)
1553        {
1554            count -= lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size;
1555            if (count > MAX_BPQ_COUNT)
1556                return count;
1557        }
1558        return MAX_BPQ_COUNT;
1559    }
1560}
1561
1562
1563static lsquic_packet_out_t *
1564send_ctl_get_buffered_packet (lsquic_send_ctl_t *ctl,
1565                enum buf_packet_type packet_type, unsigned need_at_least,
1566                                        const struct lsquic_stream *stream)
1567{
1568    struct buf_packet_q *const packet_q =
1569                                    &ctl->sc_buffered_packets[packet_type];
1570    lsquic_packet_out_t *packet_out;
1571    enum lsquic_packno_bits bits;
1572
1573    packet_out = TAILQ_LAST(&packet_q->bpq_packets, lsquic_packets_tailq);
1574    if (packet_out
1575        && !(packet_out->po_flags & PO_STREAM_END)
1576        && lsquic_packet_out_avail(packet_out) >= need_at_least
1577        && !lsquic_packet_out_has_frame(packet_out, stream, QUIC_FRAME_STREAM))
1578    {
1579        return packet_out;
1580    }
1581
1582    if (packet_q->bpq_count >= send_ctl_max_bpq_count(ctl, packet_type))
1583        return NULL;
1584
1585    bits = lsquic_send_ctl_guess_packno_bits(ctl);
1586    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least);
1587    if (!packet_out)
1588        return NULL;
1589
1590    TAILQ_INSERT_TAIL(&packet_q->bpq_packets, packet_out, po_next);
1591    ++packet_q->bpq_count;
1592    LSQ_DEBUG("Add new packet to buffered queue #%u; count: %u",
1593              packet_type, packet_q->bpq_count);
1594    return packet_out;
1595}
1596
1597
1598lsquic_packet_out_t *
1599lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl,
1600                unsigned need_at_least, const struct lsquic_stream *stream)
1601{
1602    enum buf_packet_type packet_type;
1603
1604    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
1605        return send_ctl_get_packet_for_stream(ctl, need_at_least, stream);
1606    else
1607    {
1608        packet_type = send_ctl_lookup_bpt(ctl, stream);
1609        return send_ctl_get_buffered_packet(ctl, packet_type, need_at_least,
1610                                            stream);
1611    }
1612}
1613
1614
1615#ifdef NDEBUG
1616static
1617#elif __GNUC__
1618__attribute__((weak))
1619#endif
1620enum lsquic_packno_bits
1621lsquic_send_ctl_calc_packno_bits (lsquic_send_ctl_t *ctl)
1622{
1623    lsquic_packno_t smallest_unacked;
1624    unsigned n_in_flight;
1625
1626    smallest_unacked = lsquic_send_ctl_smallest_unacked(ctl);
1627    n_in_flight = lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size;
1628    return calc_packno_bits(ctl->sc_cur_packno + 1, smallest_unacked,
1629                                                            n_in_flight);
1630}
1631
1632
1633enum lsquic_packno_bits
1634lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *ctl)
1635{
1636
1637    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
1638        return lsquic_send_ctl_calc_packno_bits(ctl);
1639    else
1640        return lsquic_send_ctl_guess_packno_bits(ctl);
1641}
1642
1643
1644static int
1645split_buffered_packet (lsquic_send_ctl_t *ctl,
1646        enum buf_packet_type packet_type, lsquic_packet_out_t *packet_out,
1647        enum lsquic_packno_bits bits, unsigned excess_bytes)
1648{
1649    struct buf_packet_q *const packet_q =
1650                                    &ctl->sc_buffered_packets[packet_type];
1651    lsquic_packet_out_t *new_packet_out;
1652
1653    assert(TAILQ_FIRST(&packet_q->bpq_packets) == packet_out);
1654
1655    new_packet_out = send_ctl_allocate_packet(ctl, bits, 0);
1656    if (!packet_out)
1657        return -1;
1658
1659    if (0 == lsquic_packet_out_split_in_two(&ctl->sc_enpub->enp_mm, packet_out,
1660                  new_packet_out, ctl->sc_conn_pub->lconn->cn_pf, excess_bytes))
1661    {
1662        lsquic_packet_out_set_packno_bits(packet_out, bits);
1663        TAILQ_INSERT_AFTER(&packet_q->bpq_packets, packet_out, new_packet_out,
1664                           po_next);
1665        ++packet_q->bpq_count;
1666        LSQ_DEBUG("Add split packet to buffered queue #%u; count: %u",
1667                  packet_type, packet_q->bpq_count);
1668        return 0;
1669    }
1670    else
1671    {
1672        lsquic_packet_out_destroy(packet_out, ctl->sc_enpub);
1673        return -1;
1674    }
1675}
1676
1677
1678int
1679lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *ctl,
1680                                            enum buf_packet_type packet_type)
1681{
1682    struct buf_packet_q *const packet_q =
1683                                    &ctl->sc_buffered_packets[packet_type];
1684    lsquic_packet_out_t *packet_out;
1685    unsigned used, excess;
1686
1687    assert(lsquic_send_ctl_schedule_stream_packets_immediately(ctl));
1688    const enum lsquic_packno_bits bits = lsquic_send_ctl_calc_packno_bits(ctl);
1689    const unsigned need = packno_bits2len(bits);
1690
1691    while ((packet_out = TAILQ_FIRST(&packet_q->bpq_packets)) &&
1692                                            lsquic_send_ctl_can_send(ctl))
1693    {
1694        if (bits != lsquic_packet_out_packno_bits(packet_out))
1695        {
1696            used = packno_bits2len(lsquic_packet_out_packno_bits(packet_out));
1697            if (need > used
1698                && need - used > lsquic_packet_out_avail(packet_out))
1699            {
1700                excess = need - used - lsquic_packet_out_avail(packet_out);
1701                if (0 != split_buffered_packet(ctl, packet_type,
1702                                               packet_out, bits, excess))
1703                {
1704                    return -1;
1705                }
1706            }
1707        }
1708        TAILQ_REMOVE(&packet_q->bpq_packets, packet_out, po_next);
1709        --packet_q->bpq_count;
1710        LSQ_DEBUG("Remove packet from buffered queue #%u; count: %u",
1711                  packet_type, packet_q->bpq_count);
1712        packet_out->po_packno = send_ctl_next_packno(ctl);
1713        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1714    }
1715
1716    return 0;
1717}
1718
1719
1720int
1721lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *ctl,
1722                             const struct lsquic_stream *stream)
1723{
1724    enum buf_packet_type packet_type;
1725    struct buf_packet_q *packet_q;
1726    lsquic_packet_out_t *packet_out;
1727    const struct parse_funcs *pf;
1728
1729    pf = ctl->sc_conn_pub->lconn->cn_pf;
1730    packet_type = send_ctl_lookup_bpt(ctl, stream);
1731    packet_q = &ctl->sc_buffered_packets[packet_type];
1732
1733    TAILQ_FOREACH_REVERSE(packet_out, &packet_q->bpq_packets,
1734                          lsquic_packets_tailq, po_next)
1735        if (0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
1736            return 0;
1737
1738    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1739        if (0 == packet_out->po_sent
1740            && 0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
1741        {
1742            return 0;
1743        }
1744
1745    return -1;
1746}
1747
1748
1749size_t
1750lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *ctl)
1751{
1752    const lsquic_packet_out_t *packet_out;
1753    unsigned n;
1754    size_t size;
1755    const struct lsquic_packets_tailq queues[] = {
1756        ctl->sc_scheduled_packets,
1757        ctl->sc_unacked_packets,
1758        ctl->sc_lost_packets,
1759        ctl->sc_buffered_packets[0].bpq_packets,
1760        ctl->sc_buffered_packets[1].bpq_packets,
1761    };
1762
1763    size = sizeof(*ctl);
1764
1765    for (n = 0; n < sizeof(queues) / sizeof(queues[0]); ++n)
1766        TAILQ_FOREACH(packet_out, &queues[n], po_next)
1767            size += lsquic_packet_out_mem_used(packet_out);
1768
1769    return size;
1770}
1771
1772
1773