lsquic_send_ctl.h revision f38b395a
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc.  See LICENSE. */
2#ifndef LSQUIC_SEND_CTL_H
3#define LSQUIC_SEND_CTL_H 1
4
5#include <sys/queue.h>
6
7#include "lsquic_types.h"
8
9#ifndef LSQUIC_SEND_STATS
10#   define LSQUIC_SEND_STATS 1
11#endif
12
13TAILQ_HEAD(lsquic_packets_tailq, lsquic_packet_out);
14
15struct lsquic_packet_out;
16struct ack_info;
17struct lsquic_alarmset;
18struct lsquic_engine_public;
19struct lsquic_conn_public;
20struct network_path;
21struct ver_neg;
22enum pns;
23struct to_coal;
24
25enum buf_packet_type { BPT_HIGHEST_PRIO, BPT_OTHER_PRIO, };
26
27struct buf_packet_q
28{
29    struct lsquic_packets_tailq     bpq_packets;
30    unsigned                        bpq_count;
31};
32
33enum send_ctl_flags {
34    SC_TCID0        = (1 << 0),
35    SC_NSTP         = (1 << 2),
36    SC_PACE         = (1 << 3),
37    SC_SCHED_TICK   = (1 << 4),
38    SC_BUFFER_STREAM= (1 << 5),
39    SC_WAS_QUIET    = (1 << 6),
40    SC_IETF         = (1 << 7),
41#define SCBIT_LOST_ACK_SHIFT 8
42    SC_LOST_ACK_INIT=  1 << 8,
43    SC_LOST_ACK_HSK = SC_LOST_ACK_INIT << PNS_HSK,
44    SC_LOST_ACK_APP = SC_LOST_ACK_INIT << PNS_APP,
45    SC_1RTT_ACKED   =  1 << 11,
46    SC_APP_LIMITED  =  1 << 12,
47    SC_ECN          =  1 << 13,
48    SC_QL_BITS      =  1 << 14,
49    SC_SANITY_CHECK =  1 << 15,
50    SC_CIDLEN       =  1 << 16,     /* sc_cidlen is set */
51    SC_POISON       =  1 << 17,     /* poisoned packet exists */
52    SC_CLEANUP_BBR  =  1 << 18,
53    SC_ACK_RECV_INIT=  1 << 19,
54    SC_ACK_RECV_HSK =  SC_ACK_RECV_INIT << PNS_HSK,
55    SC_ACK_RECV_APP =  SC_ACK_RECV_INIT << PNS_APP,
56    SC_ROUGH_RTT    =  1 << 22,
57};
58
59typedef struct lsquic_send_ctl {
60    /* The first section consists of struct members which are used in the
61     * time-critical lsquic_send_ctl_got_ack() in the approximate order
62     * of usage.
63     */
64    lsquic_senhist_t                sc_senhist;
65    enum send_ctl_flags             sc_flags;
66    enum ecn                        sc_ecn;
67    unsigned                        sc_n_stop_waiting;
68    struct lsquic_packets_tailq     sc_unacked_packets[N_PNS];
69    lsquic_packno_t                 sc_largest_acked_packno;
70    lsquic_time_t                   sc_largest_acked_sent_time;
71    lsquic_time_t                   sc_last_sent_time;
72    lsquic_time_t                   sc_last_rto_time;
73    int                           (*sc_can_send)(struct lsquic_send_ctl *);
74    unsigned                        sc_bytes_unacked_retx;
75    unsigned                        sc_bytes_scheduled;
76    struct adaptive_cc              sc_adaptive_cc;
77    const struct cong_ctl_if       *sc_ci;
78    void                           *sc_cong_ctl;
79    struct lsquic_engine_public    *sc_enpub;
80    unsigned                        sc_bytes_unacked_all;
81    unsigned                        sc_n_in_flight_all;
82    unsigned                        sc_n_in_flight_retx;
83    unsigned                        sc_n_consec_rtos;
84    unsigned                        sc_n_hsk;
85    unsigned                        sc_n_tlp;
86    enum quic_ft_bit                sc_retx_frames;
87    struct lsquic_alarmset         *sc_alset;
88
89    /* Second section: everything else. */
90    struct lsquic_packets_tailq     sc_scheduled_packets,
91                                    sc_0rtt_stash,
92                                    sc_lost_packets;
93    struct buf_packet_q             sc_buffered_packets[BPT_OTHER_PRIO + 1];
94    const struct ver_neg           *sc_ver_neg;
95    struct lsquic_conn_public      *sc_conn_pub;
96    struct pacer                    sc_pacer;
97    lsquic_packno_t                 sc_cur_packno;
98    lsquic_packno_t                 sc_largest_sent_at_cutback;
99    lsquic_packno_t                 sc_max_rtt_packno;
100    /* sc_largest_ack2ed is the packet number sent by peer that we acked and
101     * we know that our ACK was received by peer.  This is used to determine
102     * the receive history cutoff point for the purposes of generating ACK
103     * frames in the absense of STOP_WAITING frames.  Used when NSTP option
104     * is set.  (The "ack2ed" is odd enough to not be confused with anything
105     * else and it is not insanely long.)
106     */
107    lsquic_packno_t                 sc_largest_ack2ed[N_PNS];
108    /* sc_largest_acked is the largest packet number in PNS_APP packet number
109     * space sent by peer for which we generated (not necessarily sent) an ACK.
110     * This information is used to drop stale ACK frames from packets in
111     * buffered queues.
112     */
113    lsquic_packno_t                 sc_largest_acked;
114    lsquic_time_t                   sc_loss_to;
115    uint64_t                        sc_ecn_total_acked[N_PNS];
116    uint64_t                        sc_ecn_ce_cnt[N_PNS];
117    struct
118    {
119        lsquic_stream_id_t      stream_id;
120        enum buf_packet_type    packet_type;
121    }                               sc_cached_bpt;
122    unsigned                        sc_next_limit;
123    unsigned                        sc_n_scheduled;
124    enum packno_bits                sc_max_packno_bits;
125#if LSQUIC_SEND_STATS
126    struct {
127        unsigned            n_total_sent,
128                            n_resent,
129                            n_delayed;
130    }                               sc_stats;
131#endif
132    unsigned char                  *sc_token;
133    size_t                          sc_token_sz;
134    unsigned                        sc_retry_count;
135    unsigned                        sc_rt_count;    /* Count round trips */
136    lsquic_packno_t                 sc_cur_rt_end;
137    lsquic_packno_t                 sc_gap;
138    unsigned                        sc_loss_count;  /* Used to set loss bit */
139    unsigned                        sc_square_count;/* Used to set square bit */
140    signed char                     sc_cidlen;      /* For debug purposes */
141} lsquic_send_ctl_t;
142
143void
144lsquic_send_ctl_init (lsquic_send_ctl_t *, struct lsquic_alarmset *,
145          struct lsquic_engine_public *, const struct ver_neg *,
146          struct lsquic_conn_public *, enum send_ctl_flags);
147
148int
149lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *, struct lsquic_packet_out *);
150
151int
152lsquic_send_ctl_got_ack (lsquic_send_ctl_t *, const struct ack_info *,
153                                                lsquic_time_t, lsquic_time_t);
154
155lsquic_packno_t
156lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl);
157
158int
159lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *);
160
161int
162lsquic_send_ctl_have_unacked_retx_data (const struct lsquic_send_ctl *);
163
164void
165lsquic_send_ctl_cleanup (lsquic_send_ctl_t *);
166
167int
168lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl);
169
170void
171lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *, struct lsquic_packet_out *);
172
173void
174lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *, struct lsquic_packet_out *);
175
176struct lsquic_packet_out *
177lsquic_send_ctl_next_packet_to_send (struct lsquic_send_ctl *,
178                                                    const struct to_coal *);
179
180int
181lsquic_send_ctl_next_packet_to_send_predict (struct lsquic_send_ctl *);
182
183void
184lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl);
185
186#define lsquic_send_ctl_n_in_flight(ctl) (+(ctl)->sc_n_in_flight)
187
188#define lsquic_send_ctl_n_scheduled(ctl) (+(ctl)->sc_n_scheduled)
189
190#define lsquic_send_ctl_largest_ack2ed(ctl, pns) \
191                                            (+(ctl)->sc_largest_ack2ed[pns])
192
193void
194lsquic_send_ctl_do_sanity_check (const struct lsquic_send_ctl *ctl);
195
196#ifndef NDEBUG
197#define lsquic_send_ctl_sanity_check(ctl) do {                      \
198    if ((ctl)->sc_flags & SC_SANITY_CHECK)                          \
199        lsquic_send_ctl_do_sanity_check(ctl);                       \
200} while (0)
201#else
202#define lsquic_send_ctl_sanity_check(ctl)
203#endif
204
205int
206lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *);
207
208int
209lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *);
210
211struct lsquic_packet_out *
212lsquic_send_ctl_last_scheduled (struct lsquic_send_ctl *, enum packnum_space,
213                                            const struct network_path *, int);
214
215struct lsquic_packet_out *
216lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *, unsigned,
217                            enum packnum_space, const struct network_path *);
218
219struct lsquic_packet_out *
220lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *, enum packnum_space,
221      unsigned need_at_least, const struct network_path *, int, int *is_err);
222
223struct lsquic_packet_out *
224lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *,
225                    unsigned need_at_least, const struct network_path *,
226                    const struct lsquic_stream *);
227
228struct lsquic_packet_out *
229lsquic_send_ctl_get_packet_for_crypto (struct lsquic_send_ctl *ctl,
230    unsigned need_at_least, enum packnum_space, const struct network_path *);
231
232unsigned
233lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *);
234
235#define lsquic_send_ctl_lost_ack(ctl) \
236    (((ctl)->sc_flags & (SC_LOST_ACK_INIT|SC_LOST_ACK_HSK|SC_LOST_ACK_APP)) \
237                                                        >> SCBIT_LOST_ACK_SHIFT)
238
239#define lsquic_send_ctl_scheduled_ack(ctl, pns, acked) do {         \
240    (ctl)->sc_flags &= ~(SC_LOST_ACK_INIT << pns);                  \
241    if (PNS_APP == pns)                                             \
242        (ctl)->sc_largest_acked = acked;                            \
243} while (0)
244
245void
246lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *, int);
247
248#define lsquic_send_ctl_turn_nstp_on(ctl) ((ctl)->sc_flags |= SC_NSTP)
249
250void
251lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *, lsquic_stream_id_t);
252
253int
254lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *);
255
256#define lsquic_send_ctl_maybe_squeeze_sched(ctl) (                  \
257    (ctl)->sc_n_scheduled && lsquic_send_ctl_squeeze_sched(ctl)     \
258)
259
260/* Same return value as for squeezing, but without actual squeezing. */
261int
262lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl);
263
264void
265lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *);
266
267void
268lsquic_send_ctl_ack_to_front (struct lsquic_send_ctl *, unsigned n_acks);
269
270#define lsquic_send_ctl_n_stop_waiting(ctl) \
271                                    (+(ctl)->sc_n_stop_waiting)
272
273#define lsquic_send_ctl_n_stop_waiting_reset(ctl) do {      \
274    (ctl)->sc_n_stop_waiting = 0;                           \
275} while (0)
276
277void
278lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *);
279
280#define lsquic_send_ctl_tick_in(ctl, now) do {              \
281    if ((ctl)->sc_flags & SC_PACE)                          \
282    {                                                       \
283        (ctl)->sc_flags |= SC_SCHED_TICK;                   \
284        lsquic_pacer_tick_in(&(ctl)->sc_pacer, now);        \
285    }                                                       \
286    (ctl)->sc_flags &= ~SC_APP_LIMITED;                     \
287} while (0)
288
289#define lsquic_send_ctl_tick_out(ctl) do {                  \
290    if ((ctl)->sc_flags & SC_PACE)                          \
291        lsquic_pacer_tick_out(&(ctl)->sc_pacer);            \
292} while (0)
293
294#define lsquic_send_ctl_next_pacer_time(ctl) (              \
295    ((ctl)->sc_flags & SC_PACE)                             \
296        && lsquic_pacer_delayed(&(ctl)->sc_pacer)           \
297        ? lsquic_pacer_next_sched(&(ctl)->sc_pacer)         \
298        : 0 )
299
300enum packno_bits
301lsquic_send_ctl_packno_bits (struct lsquic_send_ctl *, enum packnum_space);
302
303int
304lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *, enum buf_packet_type);
305
306#define lsquic_send_ctl_has_buffered(ctl) (                                 \
307    TAILQ_FIRST(&(ctl)->sc_buffered_packets[BPT_HIGHEST_PRIO].bpq_packets)  \
308 || TAILQ_FIRST(&(ctl)->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets  ))
309
310#define lsquic_send_ctl_has_buffered_high(ctl) (                            \
311    !TAILQ_EMPTY(&(ctl)->sc_buffered_packets[BPT_HIGHEST_PRIO].bpq_packets))
312
313#define lsquic_send_ctl_invalidate_bpt_cache(ctl) do {      \
314    (ctl)->sc_cached_bpt.stream_id = UINT64_MAX;            \
315} while (0)
316
317#ifndef NDEBUG
318enum packno_bits
319lsquic_send_ctl_guess_packno_bits (struct lsquic_send_ctl *);
320
321int
322lsquic_send_ctl_schedule_stream_packets_immediately (struct lsquic_send_ctl *);
323
324enum buf_packet_type
325lsquic_send_ctl_determine_bpt (struct lsquic_send_ctl *,
326                                            const struct lsquic_stream *);
327
328enum packno_bits
329lsquic_send_ctl_calc_packno_bits (struct lsquic_send_ctl *);
330
331void
332lsquic_send_ctl_set_max_bpq_count (unsigned);
333#endif
334
335size_t
336lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *);
337
338#define lsquic_send_ctl_set_buffer_stream_packets(ctl, b) do {  \
339    (ctl)->sc_flags &= ~SC_BUFFER_STREAM;                       \
340    (ctl)->sc_flags |= -!!(b) & SC_BUFFER_STREAM;               \
341} while (0)
342
343int
344lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *,
345                             const struct lsquic_stream *);
346
347int
348lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *);
349
350#define lsquic_send_ctl_incr_pack_sz(ctl, packet, delta) do {   \
351    (packet)->po_data_sz += (delta);                            \
352    if ((packet)->po_flags & PO_SCHED)                          \
353        (ctl)->sc_bytes_scheduled += (delta);                   \
354    lsquic_send_ctl_sanity_check(ctl);                          \
355} while (0)
356
357int
358lsquic_send_ctl_sched_is_blocked (struct lsquic_send_ctl *);
359
360void
361lsquic_send_ctl_verneg_done (struct lsquic_send_ctl *);
362
363int
364lsquic_send_ctl_retry (struct lsquic_send_ctl *, const unsigned char *, size_t);
365
366int
367lsquic_send_ctl_set_token (struct lsquic_send_ctl *,
368                const unsigned char *token, size_t token_sz);
369
370void
371lsquic_send_ctl_empty_pns (struct lsquic_send_ctl *, enum packnum_space);
372
373void
374lsquic_send_ctl_maybe_calc_rough_rtt (struct lsquic_send_ctl *,
375                                                        enum packnum_space);
376
377void
378lsquic_send_ctl_repath (struct lsquic_send_ctl *ctl,
379    const struct network_path *old, const struct network_path *new,
380    int keep_path_properties);
381
382void
383lsquic_send_ctl_resize (struct lsquic_send_ctl *);
384
385void
386lsquic_send_ctl_return_enc_data (struct lsquic_send_ctl *);
387
388#define lsquic_send_ctl_1rtt_acked(ctl) ((ctl)->sc_flags & SC_1RTT_ACKED)
389
390void
391lsquic_send_ctl_maybe_app_limited (struct lsquic_send_ctl *,
392                                            const struct network_path *);
393
394#define lsquic_send_ctl_do_ql_bits(ctl) do {                       \
395    (ctl)->sc_flags |= SC_QL_BITS;                                 \
396} while (0)
397
398void
399lsquic_send_ctl_cidlen_change (struct lsquic_send_ctl *,
400                                unsigned orig_cid_len, unsigned new_cid_len);
401
402void
403lsquic_send_ctl_begin_optack_detection (struct lsquic_send_ctl *);
404
405void
406lsquic_send_ctl_path_validated (struct lsquic_send_ctl *);
407
408/* Has immediately sendable packets */
409#define lsquic_send_ctl_has_sendable(ctl_) \
410    (lsquic_send_ctl_n_scheduled(ctl_) > 0 \
411                && lsquic_send_ctl_next_packet_to_send_predict(ctl_))
412
413#define lsquic_send_ctl_in_recovery(ctl_) ((ctl_)->sc_largest_acked_packno \
414    && (ctl_)->sc_largest_acked_packno <= (ctl_)->sc_largest_sent_at_cutback)
415
416#define send_ctl_in_recovery lsquic_send_ctl_in_recovery
417
418int
419lsquic_send_ctl_can_send_probe (const struct lsquic_send_ctl *,
420                                            const struct network_path *);
421
422#define lsquic_send_ctl_ecn_turned_on(ctl_) ((ctl_)->sc_ecn != ECN_NOT_ECT)
423
424void
425lsquic_send_ctl_disable_ecn (struct lsquic_send_ctl *);
426
427struct send_ctl_state
428{
429    struct pacer        pacer;
430    struct ack_state    ack_state;
431    unsigned            buf_counts[BPT_OTHER_PRIO + 1];
432};
433
434void
435lsquic_send_ctl_snapshot (struct lsquic_send_ctl *, struct send_ctl_state *);
436
437void
438lsquic_send_ctl_rollback (struct lsquic_send_ctl *, struct send_ctl_state *,
439                                                const struct iovec *, size_t);
440
441void
442lsquic_send_ctl_0rtt_to_1rtt (struct lsquic_send_ctl *);
443
444void
445lsquic_send_ctl_stash_0rtt_packets (struct lsquic_send_ctl *);
446
447#endif
448