lsquic_send_ctl.h revision c7d81ce1
1/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc.  See LICENSE. */
2#ifndef LSQUIC_SEND_CTL_H
3#define LSQUIC_SEND_CTL_H 1
4
5#include <sys/queue.h>
6
7#include "lsquic_types.h"
8
9#ifndef LSQUIC_SEND_STATS
10#   define LSQUIC_SEND_STATS 1
11#endif
12
13TAILQ_HEAD(lsquic_packets_tailq, lsquic_packet_out);
14
15struct lsquic_packet_out;
16struct ack_info;
17struct lsquic_alarmset;
18struct lsquic_engine_public;
19struct lsquic_conn_public;
20struct ver_neg;
21
22enum buf_packet_type { BPT_HIGHEST_PRIO, BPT_OTHER_PRIO, };
23
24#define MAX_BPQ_COUNT 10
25struct buf_packet_q
26{
27    struct lsquic_packets_tailq     bpq_packets;
28    unsigned                        bpq_count;
29};
30
31enum send_ctl_flags {
32    SC_TCID0        = (1 << 0),
33    SC_LOST_ACK     = (1 << 1),
34    SC_NSTP         = (1 << 2),
35    SC_PACE         = (1 << 3),
36    SC_SCHED_TICK   = (1 << 4),
37    SC_BUFFER_STREAM= (1 << 5),
38    SC_WAS_QUIET    = (1 << 6),
39};
40
41typedef struct lsquic_send_ctl {
42    /* The first section consists of struct members which are used in the
43     * time-critical lsquic_send_ctl_got_ack() in the approximate order
44     * of usage.
45     */
46    lsquic_senhist_t                sc_senhist;
47    enum send_ctl_flags             sc_flags;
48    unsigned                        sc_n_stop_waiting;
49    struct lsquic_packets_tailq     sc_unacked_packets;
50    lsquic_packno_t                 sc_largest_acked_packno;
51    lsquic_time_t                   sc_largest_acked_sent_time;
52    unsigned                        sc_bytes_out;
53    unsigned                        sc_bytes_unacked_retx;
54    unsigned                        sc_bytes_scheduled;
55    unsigned                        sc_pack_size;
56    struct lsquic_cubic             sc_cubic;
57    struct lsquic_engine_public    *sc_enpub;
58    unsigned                        sc_bytes_unacked_all;
59    unsigned                        sc_n_in_flight_all;
60    unsigned                        sc_n_in_flight_retx;
61    unsigned                        sc_n_consec_rtos;
62    unsigned                        sc_n_hsk;
63    unsigned                        sc_n_tlp;
64    struct lsquic_alarmset         *sc_alset;
65
66    /* Second section: everything else. */
67    struct lsquic_packets_tailq     sc_scheduled_packets,
68                                    sc_lost_packets;
69    struct buf_packet_q             sc_buffered_packets[BPT_OTHER_PRIO + 1];
70    const struct ver_neg           *sc_ver_neg;
71    struct lsquic_conn_public      *sc_conn_pub;
72    struct pacer                    sc_pacer;
73    lsquic_packno_t                 sc_cur_packno;
74    lsquic_packno_t                 sc_largest_sent_at_cutback;
75    lsquic_packno_t                 sc_max_rtt_packno;
76    /* sc_largest_ack2ed is the packet number sent by peer that we acked and
77     * we know that our ACK was received by peer.  This is used to determine
78     * the receive history cutoff point for the purposes of generating ACK
79     * frames in the absense of STOP_WAITING frames.  Used when NSTP option
80     * is set.  (The "ack2ed" is odd enough to not be confused with anything
81     * else and it is not insanely long.)
82     */
83    lsquic_packno_t                 sc_largest_ack2ed;
84    /* sc_largest_acked is the largest packet number in PNS_APP packet number
85     * space sent by peer for which we generated (not necessarily sent) an ACK.
86     * This information is used to drop stale ACK frames from packets in
87     * buffered queues.
88     */
89    lsquic_packno_t                 sc_largest_acked;
90    lsquic_time_t                   sc_loss_to;
91    struct
92    {
93        uint32_t                stream_id;
94        enum buf_packet_type    packet_type;
95    }                               sc_cached_bpt;
96    unsigned                        sc_next_limit;
97    unsigned                        sc_n_scheduled;
98    enum packno_bits                sc_max_packno_bits;
99#if LSQUIC_SEND_STATS
100    struct {
101        unsigned            n_total_sent,
102                            n_resent,
103                            n_delayed;
104    }                               sc_stats;
105#endif
106} lsquic_send_ctl_t;
107
108void
109lsquic_send_ctl_init (lsquic_send_ctl_t *, struct lsquic_alarmset *,
110          struct lsquic_engine_public *, const struct ver_neg *,
111          struct lsquic_conn_public *, unsigned short max_packet_size);
112
113int
114lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *, struct lsquic_packet_out *,
115                             int);
116
117int
118lsquic_send_ctl_got_ack (lsquic_send_ctl_t *, const struct ack_info *,
119                                                            lsquic_time_t);
120
121lsquic_packno_t
122lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl);
123
124int
125lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *);
126
127void
128lsquic_send_ctl_cleanup (lsquic_send_ctl_t *);
129
130int
131lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl);
132
133void
134lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *, struct lsquic_packet_out *);
135
136void
137lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *, struct lsquic_packet_out *);
138
139struct lsquic_packet_out *
140lsquic_send_ctl_next_packet_to_send (lsquic_send_ctl_t *);
141
142void
143lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl);
144
145#define lsquic_send_ctl_n_in_flight(ctl) (+(ctl)->sc_n_in_flight)
146
147#define lsquic_send_ctl_n_scheduled(ctl) (+(ctl)->sc_n_scheduled)
148
149#define lsquic_send_ctl_largest_ack2ed(ctl) (+(ctl)->sc_largest_ack2ed)
150
151#if LSQUIC_EXTRA_CHECKS
152void
153lsquic_send_ctl_sanity_check (const lsquic_send_ctl_t *ctl);
154#else
155#   define lsquic_send_ctl_sanity_check(ctl)
156#endif
157
158int
159lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *);
160
161int
162lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *);
163
164#define lsquic_send_ctl_last_scheduled(ctl) \
165            TAILQ_LAST(&(ctl)->sc_scheduled_packets, lsquic_packets_tailq)
166
167struct lsquic_packet_out *
168lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *, unsigned);
169
170struct lsquic_packet_out *
171lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *,
172                                      unsigned need_at_least, int *is_err);
173
174struct lsquic_packet_out *
175lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *,
176                        unsigned need_at_least, const struct lsquic_stream *);
177
178unsigned
179lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *);
180
181#define lsquic_send_ctl_lost_ack(ctl) ((ctl)->sc_flags & SC_LOST_ACK)
182
183#define lsquic_send_ctl_scheduled_ack(ctl, packno) do {             \
184    (ctl)->sc_flags &= ~SC_LOST_ACK;                                \
185    (ctl)->sc_largest_acked = packno;                               \
186} while (0)
187
188void
189lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *, int);
190
191#define lsquic_send_ctl_turn_nstp_on(ctl) ((ctl)->sc_flags |= SC_NSTP)
192
193void
194lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *, uint32_t);
195
196int
197lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *);
198
199#define lsquic_send_ctl_maybe_squeeze_sched(ctl) (                  \
200    (ctl)->sc_n_scheduled && lsquic_send_ctl_squeeze_sched(ctl)     \
201)
202
203/* Same return value as for squeezing, but without actual squeezing. */
204int
205lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl);
206
207void
208lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *);
209
210void
211lsquic_send_ctl_ack_to_front (lsquic_send_ctl_t *);
212
213#define lsquic_send_ctl_n_stop_waiting(ctl) (+(ctl)->sc_n_stop_waiting)
214
215#define lsquic_send_ctl_n_stop_waiting_reset(ctl) do {      \
216    (ctl)->sc_n_stop_waiting = 0;                           \
217} while (0)
218
219void
220lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *);
221
222#define lsquic_send_ctl_tick(ctl, now) do {                 \
223    if ((ctl)->sc_flags & SC_PACE)                          \
224    {                                                       \
225        (ctl)->sc_flags |= SC_SCHED_TICK;                   \
226        pacer_tick(&(ctl)->sc_pacer, now);                  \
227    }                                                       \
228} while (0)
229
230#define lsquic_send_ctl_next_pacer_time(ctl) (              \
231    ((ctl)->sc_flags & SC_PACE)                             \
232        && pacer_delayed(&(ctl)->sc_pacer)                  \
233        ? pacer_next_sched(&(ctl)->sc_pacer)                \
234        : 0 )
235
236enum packno_bits
237lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *);
238
239int
240lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *, enum buf_packet_type);
241
242#define lsquic_send_ctl_has_buffered(ctl) (                                 \
243    TAILQ_FIRST(&(ctl)->sc_buffered_packets[BPT_HIGHEST_PRIO].bpq_packets)  \
244 || TAILQ_FIRST(&(ctl)->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets  ))
245
246#define lsquic_send_ctl_invalidate_bpt_cache(ctl) do {      \
247    (ctl)->sc_cached_bpt.stream_id = 0;                     \
248} while (0)
249
250#ifndef NDEBUG
251enum packno_bits
252lsquic_send_ctl_guess_packno_bits (struct lsquic_send_ctl *);
253
254int
255lsquic_send_ctl_schedule_stream_packets_immediately (struct lsquic_send_ctl *);
256
257enum buf_packet_type
258lsquic_send_ctl_determine_bpt (struct lsquic_send_ctl *,
259                                            const struct lsquic_stream *);
260
261enum packno_bits
262lsquic_send_ctl_calc_packno_bits (struct lsquic_send_ctl *);
263#endif
264
265size_t
266lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *);
267
268#define lsquic_send_ctl_set_buffer_stream_packets(ctl, b) do {  \
269    (ctl)->sc_flags &= ~SC_BUFFER_STREAM;                       \
270    (ctl)->sc_flags |= -!!(b) & SC_BUFFER_STREAM;               \
271} while (0)
272
273int
274lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *,
275                             const struct lsquic_stream *);
276
277int
278lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *);
279
280#define lsquic_send_ctl_incr_pack_sz(ctl, packet, delta) do {   \
281    (packet)->po_data_sz += delta;                              \
282    if ((packet)->po_flags & PO_SCHED)                          \
283        (ctl)->sc_bytes_scheduled += delta;                     \
284    lsquic_send_ctl_sanity_check(ctl);                          \
285} while (0)
286
287int
288lsquic_send_ctl_sched_is_blocked (const struct lsquic_send_ctl *);
289
290int
291lsquic_send_ctl_buffered_and_same_prio_as_headers (struct lsquic_send_ctl *,
292                                                const struct lsquic_stream *);
293
294void
295lsquic_send_ctl_verneg_done (struct lsquic_send_ctl *);
296
297#endif
298