Skip to main content

h2/proto/streams/
send.rs

1use super::{
2    store, Buffer, Codec, Config, Counts, Frame, Prioritize, Prioritized, Store, Stream, StreamId,
3    StreamIdOverflow, WindowSize,
4};
5use crate::codec::UserError;
6use crate::frame::{self, Reason};
7use crate::proto::{self, Error, Initiator};
8
9use bytes::Buf;
10use tokio::io::AsyncWrite;
11
12use std::cmp::Ordering;
13use std::io;
14use std::task::{Context, Poll, Waker};
15
16/// Manages state transitions related to outbound frames.
17#[derive(Debug)]
18pub(super) struct Send {
19    /// Stream identifier to use for next initialized stream.
20    next_stream_id: Result<StreamId, StreamIdOverflow>,
21
22    /// Any streams with a higher ID are ignored.
23    ///
24    /// This starts as MAX, but is lowered when a GOAWAY is received.
25    ///
26    /// > After sending a GOAWAY frame, the sender can discard frames for
27    /// > streams initiated by the receiver with identifiers higher than
28    /// > the identified last stream.
29    max_stream_id: StreamId,
30
31    /// Initial window size of locally initiated streams
32    init_window_sz: WindowSize,
33
34    /// Prioritization layer
35    prioritize: Prioritize,
36
37    is_push_enabled: bool,
38
39    /// If extended connect protocol is enabled.
40    is_extended_connect_protocol_enabled: bool,
41}
42
43/// A value to detect which public API has called `poll_reset`.
44#[derive(Debug)]
45pub(crate) enum PollReset {
46    AwaitingHeaders,
47    Streaming,
48}
49
50impl Send {
51    /// Create a new `Send`
52    pub fn new(config: &Config) -> Self {
53        Send {
54            init_window_sz: config.remote_init_window_sz,
55            max_stream_id: StreamId::MAX,
56            next_stream_id: Ok(config.local_next_stream_id),
57            prioritize: Prioritize::new(config),
58            is_push_enabled: true,
59            is_extended_connect_protocol_enabled: false,
60        }
61    }
62
63    /// Returns the initial send window size
64    pub fn init_window_sz(&self) -> WindowSize {
65        self.init_window_sz
66    }
67
68    pub fn open(&mut self) -> Result<StreamId, UserError> {
69        let stream_id = self.ensure_next_stream_id()?;
70        self.next_stream_id = stream_id.next_id();
71        Ok(stream_id)
72    }
73
74    pub fn reserve_local(&mut self) -> Result<StreamId, UserError> {
75        let stream_id = self.ensure_next_stream_id()?;
76        self.next_stream_id = stream_id.next_id();
77        Ok(stream_id)
78    }
79
80    fn check_headers(fields: &http::HeaderMap) -> Result<(), UserError> {
81        // 8.1.2.2. Connection-Specific Header Fields
82        if fields.contains_key(http::header::CONNECTION)
83            || fields.contains_key(http::header::TRANSFER_ENCODING)
84            || fields.contains_key(http::header::UPGRADE)
85            || fields.contains_key("keep-alive")
86            || fields.contains_key("proxy-connection")
87        {
88            tracing::debug!("illegal connection-specific headers found");
89            return Err(UserError::MalformedHeaders);
90        } else if let Some(te) = fields.get(http::header::TE) {
91            if te != "trailers" {
92                tracing::debug!("illegal connection-specific headers found");
93                return Err(UserError::MalformedHeaders);
94            }
95        }
96        Ok(())
97    }
98
99    pub fn send_push_promise<B>(
100        &mut self,
101        frame: frame::PushPromise,
102        buffer: &mut Buffer<Frame<B>>,
103        stream: &mut store::Ptr,
104        task: &mut Option<Waker>,
105    ) -> Result<(), UserError> {
106        if !self.is_push_enabled {
107            return Err(UserError::PeerDisabledServerPush);
108        }
109
110        tracing::trace!(
111            "send_push_promise; frame={:?}; init_window={:?}",
112            frame,
113            self.init_window_sz
114        );
115
116        Self::check_headers(frame.fields())?;
117
118        // Queue the frame for sending
119        self.prioritize
120            .queue_frame(frame.into(), buffer, stream, task);
121
122        Ok(())
123    }
124
125    pub fn send_headers<B>(
126        &mut self,
127        frame: frame::Headers,
128        buffer: &mut Buffer<Frame<B>>,
129        stream: &mut store::Ptr,
130        counts: &mut Counts,
131        task: &mut Option<Waker>,
132    ) -> Result<(), UserError> {
133        tracing::trace!(
134            "send_headers; frame={:?}; init_window={:?}",
135            frame,
136            self.init_window_sz
137        );
138
139        Self::check_headers(frame.fields())?;
140
141        let end_stream = frame.is_end_stream();
142
143        // Update the state
144        stream.state.send_open(end_stream)?;
145
146        let mut pending_open = false;
147        if counts.peer().is_local_init(frame.stream_id()) && !stream.is_pending_push {
148            self.prioritize.queue_open(stream);
149            pending_open = true;
150        }
151
152        // Queue the frame for sending
153        //
154        // This call expects that, since new streams are in the open queue, new
155        // streams won't be pushed on pending_send.
156        self.prioritize
157            .queue_frame(frame.into(), buffer, stream, task);
158
159        // Need to notify the connection when pushing onto pending_open since
160        // queue_frame only notifies for pending_send.
161        if pending_open {
162            if let Some(task) = task.take() {
163                task.wake();
164            }
165        }
166
167        Ok(())
168    }
169
170    /// Send interim informational headers (1xx responses) without changing stream state.
171    /// This allows multiple interim informational responses to be sent before the final response.
172    pub fn send_interim_informational_headers<B>(
173        &mut self,
174        frame: frame::Headers,
175        buffer: &mut Buffer<Frame<B>>,
176        stream: &mut store::Ptr,
177        _counts: &mut Counts,
178        task: &mut Option<Waker>,
179    ) -> Result<(), UserError> {
180        tracing::trace!(
181            "send_interim_informational_headers; frame={:?}; stream_id={:?}",
182            frame,
183            frame.stream_id()
184        );
185
186        // Validate headers
187        Self::check_headers(frame.fields())?;
188
189        debug_assert!(frame.is_informational(),
190            "Frame must be informational (1xx status code) at this point. Validation should happen at the public API boundary.");
191        debug_assert!(!frame.is_end_stream(),
192            "Informational frames must not have end_stream flag set. Validation should happen at the internal send informational header streams.");
193
194        // Queue the frame for sending WITHOUT changing stream state
195        // This is the key difference from send_headers - we don't call stream.state.send_open()
196        self.prioritize
197            .queue_frame(frame.into(), buffer, stream, task);
198
199        Ok(())
200    }
201
202    /// Send an explicit RST_STREAM frame
203    pub fn send_reset<B>(
204        &mut self,
205        reason: Reason,
206        initiator: Initiator,
207        buffer: &mut Buffer<Frame<B>>,
208        stream: &mut store::Ptr,
209        counts: &mut Counts,
210        task: &mut Option<Waker>,
211    ) {
212        let is_reset = stream.state.is_reset();
213        let is_closed = stream.state.is_closed();
214        let is_empty = stream.pending_send.is_empty();
215        let stream_id = stream.id;
216
217        tracing::trace!(
218            "send_reset(..., reason={:?}, initiator={:?}, stream={:?}, ..., \
219             is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \
220             state={:?} \
221             ",
222            reason,
223            initiator,
224            stream_id,
225            is_reset,
226            is_closed,
227            is_empty,
228            stream.state
229        );
230
231        if is_reset {
232            // Don't double reset
233            tracing::trace!(
234                " -> not sending RST_STREAM ({:?} is already reset)",
235                stream_id
236            );
237            return;
238        }
239
240        // Transition the state to reset no matter what.
241        stream.set_reset(reason, initiator);
242
243        // If closed AND the send queue is flushed, then the stream cannot be
244        // reset explicitly, either. Implicit resets can still be queued.
245        if is_closed && is_empty {
246            tracing::trace!(
247                " -> not sending explicit RST_STREAM ({:?} was closed \
248                 and send queue was flushed)",
249                stream_id
250            );
251            return;
252        }
253
254        // If the stream hasn't been opened yet (its initial HEADERS are still
255        // sitting in `pending_open`/`pending_send`), clearing the queue would
256        // drop those HEADERS and let a RST_STREAM become the first frame on an
257        // idle stream. HTTP/2 forbids that: §5.1 allows only HEADERS/PRIORITY
258        // on idle streams and §6.4 says RST_STREAM on idle is a PROTOCOL_ERROR.
259        // Keep the queued HEADERS so the stream opens, then send the reset
260        // immediately after.
261        if !stream.is_pending_open {
262            // Otherwise, drop any buffered DATA/HEADERS and only send the
263            // reset.
264            //
265            // Note that we don't call `self.recv_err` because we want to enqueue
266            // the reset frame before transitioning the stream inside
267            // `reclaim_all_capacity`.
268            self.prioritize.clear_queue(buffer, stream);
269        }
270
271        let frame = frame::Reset::new(stream.id, reason);
272
273        tracing::trace!("send_reset -- queueing; frame={:?}", frame);
274        self.prioritize
275            .queue_frame(frame.into(), buffer, stream, task);
276        self.prioritize.reclaim_all_capacity(stream, counts);
277    }
278
279    pub fn schedule_implicit_reset(
280        &mut self,
281        stream: &mut store::Ptr,
282        reason: Reason,
283        counts: &mut Counts,
284        task: &mut Option<Waker>,
285    ) {
286        if stream.state.is_closed() {
287            // Stream is already closed, nothing more to do
288            return;
289        }
290
291        stream.state.set_scheduled_reset(reason);
292
293        self.prioritize.reclaim_reserved_capacity(stream, counts);
294        self.prioritize.schedule_send(stream, task);
295    }
296
297    pub fn send_data<B>(
298        &mut self,
299        frame: frame::Data<B>,
300        buffer: &mut Buffer<Frame<B>>,
301        stream: &mut store::Ptr,
302        counts: &mut Counts,
303        task: &mut Option<Waker>,
304    ) -> Result<(), UserError>
305    where
306        B: Buf,
307    {
308        self.prioritize
309            .send_data(frame, buffer, stream, counts, task)
310    }
311
312    pub fn send_trailers<B>(
313        &mut self,
314        frame: frame::Headers,
315        buffer: &mut Buffer<Frame<B>>,
316        stream: &mut store::Ptr,
317        counts: &mut Counts,
318        task: &mut Option<Waker>,
319    ) -> Result<(), UserError> {
320        // TODO: Should this logic be moved into state.rs?
321        if !stream.state.is_send_streaming() {
322            return Err(UserError::UnexpectedFrameType);
323        }
324
325        stream.state.send_close();
326
327        tracing::trace!("send_trailers -- queuing; frame={:?}", frame);
328        self.prioritize
329            .queue_frame(frame.into(), buffer, stream, task);
330
331        // Release any excess capacity
332        self.prioritize.reserve_capacity(0, stream, counts);
333
334        Ok(())
335    }
336
337    pub fn poll_complete<T, B>(
338        &mut self,
339        cx: &mut Context,
340        buffer: &mut Buffer<Frame<B>>,
341        store: &mut Store,
342        counts: &mut Counts,
343        dst: &mut Codec<T, Prioritized<B>>,
344    ) -> Poll<io::Result<()>>
345    where
346        T: AsyncWrite + Unpin,
347        B: Buf,
348    {
349        self.prioritize
350            .poll_complete(cx, buffer, store, counts, dst)
351    }
352
353    /// Request capacity to send data
354    pub fn reserve_capacity(
355        &mut self,
356        capacity: WindowSize,
357        stream: &mut store::Ptr,
358        counts: &mut Counts,
359    ) {
360        self.prioritize.reserve_capacity(capacity, stream, counts)
361    }
362
363    pub fn poll_capacity(
364        &mut self,
365        cx: &Context,
366        stream: &mut store::Ptr,
367    ) -> Poll<Option<Result<WindowSize, UserError>>> {
368        if !stream.state.is_send_streaming() {
369            return Poll::Ready(None);
370        }
371
372        if !stream.send_capacity_inc {
373            stream.wait_send(cx);
374            return Poll::Pending;
375        }
376
377        stream.send_capacity_inc = false;
378
379        Poll::Ready(Some(Ok(self.capacity(stream))))
380    }
381
382    /// Current available stream send capacity
383    pub fn capacity(&self, stream: &mut store::Ptr) -> WindowSize {
384        stream.capacity(self.prioritize.max_buffer_size())
385    }
386
387    pub fn poll_reset(
388        &self,
389        cx: &Context,
390        stream: &mut Stream,
391        mode: PollReset,
392    ) -> Poll<Result<Reason, crate::Error>> {
393        match stream.state.ensure_reason(mode)? {
394            Some(reason) => Poll::Ready(Ok(reason)),
395            None => {
396                stream.wait_send(cx);
397                Poll::Pending
398            }
399        }
400    }
401
402    pub fn recv_connection_window_update(
403        &mut self,
404        frame: frame::WindowUpdate,
405        store: &mut Store,
406        counts: &mut Counts,
407    ) -> Result<(), Reason> {
408        self.prioritize
409            .recv_connection_window_update(frame.size_increment(), store, counts)
410    }
411
412    pub fn recv_stream_window_update<B>(
413        &mut self,
414        sz: WindowSize,
415        buffer: &mut Buffer<Frame<B>>,
416        stream: &mut store::Ptr,
417        counts: &mut Counts,
418        task: &mut Option<Waker>,
419    ) -> Result<(), Reason> {
420        if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) {
421            tracing::debug!("recv_stream_window_update !!; err={:?}", e);
422
423            self.send_reset(
424                Reason::FLOW_CONTROL_ERROR,
425                Initiator::Library,
426                buffer,
427                stream,
428                counts,
429                task,
430            );
431
432            return Err(e);
433        }
434
435        Ok(())
436    }
437
438    pub(super) fn recv_go_away(&mut self, last_stream_id: StreamId) -> Result<(), Error> {
439        if last_stream_id > self.max_stream_id {
440            // The remote endpoint sent a `GOAWAY` frame indicating a stream
441            // that we never sent, or that we have already terminated on account
442            // of previous `GOAWAY` frame. In either case, that is illegal.
443            // (When sending multiple `GOAWAY`s, "Endpoints MUST NOT increase
444            // the value they send in the last stream identifier, since the
445            // peers might already have retried unprocessed requests on another
446            // connection.")
447            proto_err!(conn:
448                "recv_go_away: last_stream_id ({:?}) > max_stream_id ({:?})",
449                last_stream_id, self.max_stream_id,
450            );
451            return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
452        }
453
454        self.max_stream_id = last_stream_id;
455        Ok(())
456    }
457
458    pub fn handle_error<B>(
459        &mut self,
460        buffer: &mut Buffer<Frame<B>>,
461        stream: &mut store::Ptr,
462        counts: &mut Counts,
463    ) {
464        // Clear all pending outbound frames
465        self.prioritize.clear_queue(buffer, stream);
466        self.prioritize.reclaim_all_capacity(stream, counts);
467    }
468
469    pub fn apply_remote_settings<B>(
470        &mut self,
471        settings: &frame::Settings,
472        buffer: &mut Buffer<Frame<B>>,
473        store: &mut Store,
474        counts: &mut Counts,
475        task: &mut Option<Waker>,
476    ) -> Result<(), Error> {
477        if let Some(val) = settings.is_extended_connect_protocol_enabled() {
478            self.is_extended_connect_protocol_enabled = val;
479        }
480
481        // Applies an update to the remote endpoint's initial window size.
482        //
483        // Per RFC 7540 §6.9.2:
484        //
485        // In addition to changing the flow-control window for streams that are
486        // not yet active, a SETTINGS frame can alter the initial flow-control
487        // window size for streams with active flow-control windows (that is,
488        // streams in the "open" or "half-closed (remote)" state). When the
489        // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust
490        // the size of all stream flow-control windows that it maintains by the
491        // difference between the new value and the old value.
492        //
493        // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available
494        // space in a flow-control window to become negative. A sender MUST
495        // track the negative flow-control window and MUST NOT send new
496        // flow-controlled frames until it receives WINDOW_UPDATE frames that
497        // cause the flow-control window to become positive.
498        if let Some(val) = settings.initial_window_size() {
499            let old_val = self.init_window_sz;
500            self.init_window_sz = val;
501
502            match val.cmp(&old_val) {
503                Ordering::Less => {
504                    // We must decrease the (remote) window on every open stream.
505                    let dec = old_val - val;
506                    tracing::trace!("decrementing all windows; dec={}", dec);
507
508                    let mut total_reclaimed = 0;
509                    store.try_for_each(|mut stream| {
510                        let stream = &mut *stream;
511
512                        if stream.state.is_send_closed() && stream.buffered_send_data == 0 {
513                            tracing::trace!(
514                                "skipping send-closed stream; id={:?}; flow={:?}",
515                                stream.id,
516                                stream.send_flow
517                            );
518
519                            return Ok(());
520                        }
521
522                        tracing::trace!(
523                            "decrementing stream window; id={:?}; decr={}; flow={:?}",
524                            stream.id,
525                            dec,
526                            stream.send_flow
527                        );
528
529                        // TODO: this decrement can underflow based on received frames!
530                        stream
531                            .send_flow
532                            .dec_send_window(dec)
533                            .map_err(proto::Error::library_go_away)?;
534
535                        // It's possible that decreasing the window causes
536                        // `window_size` (the stream-specific window) to fall below
537                        // `available` (the portion of the connection-level window
538                        // that we have allocated to the stream).
539                        // In this case, we should take that excess allocation away
540                        // and reassign it to other streams.
541                        let window_size = stream.send_flow.window_size();
542                        let available = stream.send_flow.available().as_size();
543                        let reclaimed = if available > window_size {
544                            // Drop down to `window_size`.
545                            let reclaim = available - window_size;
546                            stream
547                                .send_flow
548                                .claim_capacity(reclaim)
549                                .map_err(proto::Error::library_go_away)?;
550                            total_reclaimed += reclaim;
551                            reclaim
552                        } else {
553                            0
554                        };
555
556                        tracing::trace!(
557                            "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}",
558                            stream.id,
559                            dec,
560                            reclaimed,
561                            stream.send_flow
562                        );
563
564                        // TODO: Should this notify the producer when the capacity
565                        // of a stream is reduced? Maybe it should if the capacity
566                        // is reduced to zero, allowing the producer to stop work.
567
568                        Ok::<_, proto::Error>(())
569                    })?;
570
571                    self.prioritize
572                        .assign_connection_capacity(total_reclaimed, store, counts);
573                }
574                Ordering::Greater => {
575                    let inc = val - old_val;
576
577                    store.try_for_each(|mut stream| {
578                        self.recv_stream_window_update(inc, buffer, &mut stream, counts, task)
579                            .map_err(Error::library_go_away)
580                    })?;
581                }
582                Ordering::Equal => (),
583            }
584        }
585
586        if let Some(val) = settings.is_push_enabled() {
587            self.is_push_enabled = val
588        }
589
590        Ok(())
591    }
592
593    pub fn clear_queues(&mut self, store: &mut Store, counts: &mut Counts) {
594        self.prioritize.clear_pending_capacity(store, counts);
595        self.prioritize.clear_pending_send(store, counts);
596        self.prioritize.clear_pending_open(store, counts);
597    }
598
599    pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> {
600        if let Ok(next) = self.next_stream_id {
601            if id >= next {
602                return Err(Reason::PROTOCOL_ERROR);
603            }
604        }
605        // if next_stream_id is overflowed, that's ok.
606
607        Ok(())
608    }
609
610    pub fn ensure_next_stream_id(&self) -> Result<StreamId, UserError> {
611        self.next_stream_id
612            .map_err(|_| UserError::OverflowedStreamId)
613    }
614
615    pub fn may_have_created_stream(&self, id: StreamId) -> bool {
616        if let Ok(next_id) = self.next_stream_id {
617            // Peer::is_local_init should have been called beforehand
618            debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated(),);
619            id < next_id
620        } else {
621            true
622        }
623    }
624
625    pub(super) fn maybe_reset_next_stream_id(&mut self, id: StreamId) {
626        if let Ok(next_id) = self.next_stream_id {
627            // Peer::is_local_init should have been called beforehand
628            debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated());
629            if id >= next_id {
630                self.next_stream_id = id.next_id();
631            }
632        }
633    }
634
635    pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool {
636        self.is_extended_connect_protocol_enabled
637    }
638}