Skip to main content

h2/proto/streams/
recv.rs

1use super::*;
2use crate::codec::UserError;
3use crate::frame::{PushPromiseHeaderError, Reason, DEFAULT_INITIAL_WINDOW_SIZE};
4use crate::proto;
5
6use http::{HeaderMap, Request, Response};
7
8use std::cmp::Ordering;
9use std::io;
10use std::task::{Context, Poll, Waker};
11use std::time::Instant;
12
13#[derive(Debug)]
14pub(super) struct Recv {
15    /// Initial window size of remote initiated streams
16    init_window_sz: WindowSize,
17
18    /// Connection level flow control governing received data
19    flow: FlowControl,
20
21    /// Amount of connection window capacity currently used by outstanding streams.
22    in_flight_data: WindowSize,
23
24    /// The lowest stream ID that is still idle
25    next_stream_id: Result<StreamId, StreamIdOverflow>,
26
27    /// The stream ID of the last processed stream
28    last_processed_id: StreamId,
29
30    /// Any streams with a higher ID are ignored.
31    ///
32    /// This starts as MAX, but is lowered when a GOAWAY is received.
33    ///
34    /// > After sending a GOAWAY frame, the sender can discard frames for
35    /// > streams initiated by the receiver with identifiers higher than
36    /// > the identified last stream.
37    max_stream_id: StreamId,
38
39    /// Streams that have pending window updates
40    pending_window_updates: store::Queue<stream::NextWindowUpdate>,
41
42    /// New streams to be accepted
43    pending_accept: store::Queue<stream::NextAccept>,
44
45    /// Locally reset streams that should be reaped when they expire
46    pending_reset_expired: store::Queue<stream::NextResetExpire>,
47
48    /// How long locally reset streams should ignore received frames
49    reset_duration: Duration,
50
51    /// Holds frames that are waiting to be read
52    buffer: Buffer<Event>,
53
54    /// Refused StreamId, this represents a frame that must be sent out.
55    refused: Option<StreamId>,
56
57    /// If push promises are allowed to be received.
58    is_push_enabled: bool,
59
60    /// If extended connect protocol is enabled.
61    is_extended_connect_protocol_enabled: bool,
62}
63
64#[derive(Debug)]
65pub(super) enum Event {
66    Headers(peer::PollMessage),
67    Data(Bytes),
68    Trailers(HeaderMap),
69    InformationalHeaders(peer::PollMessage),
70}
71
72#[derive(Debug)]
73pub(super) enum RecvHeaderBlockError<T> {
74    Oversize(T),
75    State(Error),
76}
77
78#[derive(Debug)]
79pub(crate) enum Open {
80    PushPromise,
81    Headers,
82}
83
84impl Recv {
85    pub fn new(peer: peer::Dyn, config: &Config) -> Self {
86        let next_stream_id = if peer.is_server() { 1 } else { 2 };
87
88        let mut flow = FlowControl::new();
89
90        // connections always have the default window size, regardless of
91        // settings
92        flow.inc_window(DEFAULT_INITIAL_WINDOW_SIZE)
93            .expect("invalid initial remote window size");
94        flow.assign_capacity(DEFAULT_INITIAL_WINDOW_SIZE).unwrap();
95
96        Recv {
97            init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE,
98            flow,
99            in_flight_data: 0 as WindowSize,
100            next_stream_id: Ok(next_stream_id.into()),
101            pending_window_updates: store::Queue::new(),
102            last_processed_id: StreamId::ZERO,
103            max_stream_id: StreamId::MAX,
104            pending_accept: store::Queue::new(),
105            pending_reset_expired: store::Queue::new(),
106            reset_duration: config.local_reset_duration,
107            buffer: Buffer::new(),
108            refused: None,
109            is_push_enabled: config.local_push_enabled,
110            is_extended_connect_protocol_enabled: config.extended_connect_protocol_enabled,
111        }
112    }
113
114    /// Returns the initial receive window size
115    pub fn init_window_sz(&self) -> WindowSize {
116        self.init_window_sz
117    }
118
119    /// Returns the ID of the last processed stream
120    pub fn last_processed_id(&self) -> StreamId {
121        self.last_processed_id
122    }
123
124    /// Update state reflecting a new, remotely opened stream
125    ///
126    /// Returns the stream state if successful. `None` if refused
127    pub fn open(
128        &mut self,
129        id: StreamId,
130        mode: Open,
131        counts: &mut Counts,
132    ) -> Result<Option<StreamId>, Error> {
133        assert!(self.refused.is_none());
134
135        counts.peer().ensure_can_open(id, mode)?;
136
137        let next_id = self.next_stream_id()?;
138        if id < next_id {
139            proto_err!(conn: "id ({:?}) < next_id ({:?})", id, next_id);
140            return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
141        }
142
143        self.next_stream_id = id.next_id();
144
145        if !counts.can_inc_num_recv_streams() {
146            self.refused = Some(id);
147            return Ok(None);
148        }
149
150        Ok(Some(id))
151    }
152
153    /// Transition the stream state based on receiving headers
154    ///
155    /// The caller ensures that the frame represents headers and not trailers.
156    pub fn recv_headers(
157        &mut self,
158        frame: frame::Headers,
159        stream: &mut store::Ptr,
160        counts: &mut Counts,
161    ) -> Result<(), RecvHeaderBlockError<Option<frame::Headers>>> {
162        tracing::trace!("opening stream; init_window={}", self.init_window_sz);
163        let is_initial = stream.state.recv_open(&frame)?;
164
165        if is_initial {
166            // TODO: be smarter about this logic
167            if frame.stream_id() > self.last_processed_id {
168                self.last_processed_id = frame.stream_id();
169            }
170
171            // Increment the number of concurrent streams
172            counts.inc_num_recv_streams(stream);
173        }
174
175        if !stream.content_length.is_head() {
176            use super::stream::ContentLength;
177            use http::header;
178
179            if let Some(content_length) = frame.fields().get(header::CONTENT_LENGTH) {
180                let content_length = match frame::parse_u64(content_length.as_bytes()) {
181                    Ok(v) => v,
182                    Err(_) => {
183                        proto_err!(stream: "could not parse content-length; stream={:?}", stream.id);
184                        return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into());
185                    }
186                };
187
188                stream.content_length = ContentLength::Remaining(content_length);
189                // END_STREAM on headers frame with non-zero content-length is malformed.
190                // https://datatracker.ietf.org/doc/html/rfc9113#section-8.1.1
191                if frame.is_end_stream()
192                    && content_length > 0
193                    && frame
194                        .pseudo()
195                        .status
196                        .map_or(true, |status| status != 204 && status != 304)
197                {
198                    proto_err!(stream: "recv_headers with END_STREAM: content-length is not zero; stream={:?};", stream.id);
199                    return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into());
200                }
201            }
202        }
203
204        if frame.is_over_size() {
205            // A frame is over size if the decoded header block was bigger than
206            // SETTINGS_MAX_HEADER_LIST_SIZE.
207            //
208            // > A server that receives a larger header block than it is willing
209            // > to handle can send an HTTP 431 (Request Header Fields Too
210            // > Large) status code [RFC6585]. A client can discard responses
211            // > that it cannot process.
212            //
213            // So, if peer is a server, we'll send a 431. In either case,
214            // an error is recorded, which will send a REFUSED_STREAM,
215            // since we don't want any of the data frames either.
216            tracing::debug!(
217                "stream error REQUEST_HEADER_FIELDS_TOO_LARGE -- \
218                 recv_headers: frame is over size; stream={:?}",
219                stream.id
220            );
221            return if counts.peer().is_server() && is_initial {
222                let mut res = frame::Headers::new(
223                    stream.id,
224                    frame::Pseudo::response(::http::StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE),
225                    HeaderMap::new(),
226                );
227                res.set_end_stream();
228                Err(RecvHeaderBlockError::Oversize(Some(res)))
229            } else {
230                Err(RecvHeaderBlockError::Oversize(None))
231            };
232        }
233
234        let stream_id = frame.stream_id();
235        let (pseudo, fields) = frame.into_parts();
236
237        if pseudo.protocol.is_some()
238            && counts.peer().is_server()
239            && !self.is_extended_connect_protocol_enabled
240        {
241            proto_err!(stream: "cannot use :protocol if extended connect protocol is disabled; stream={:?}", stream.id);
242            return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into());
243        }
244
245        if pseudo.status.is_some() && counts.peer().is_server() {
246            proto_err!(stream: "cannot use :status header for requests; stream={:?}", stream.id);
247            return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into());
248        }
249
250        if !pseudo.is_informational() {
251            let message = counts
252                .peer()
253                .convert_poll_message(pseudo, fields, stream_id)?;
254
255            // Push the frame onto the stream's recv buffer
256            stream
257                .pending_recv
258                .push_back(&mut self.buffer, Event::Headers(message));
259            stream.notify_recv();
260
261            // Only servers can receive a headers frame that initiates the stream.
262            // This is verified in `Streams` before calling this function.
263            if counts.peer().is_server() {
264                // Correctness: never push a stream to `pending_accept` without having the
265                // corresponding headers frame pushed to `stream.pending_recv`.
266                self.pending_accept.push(stream);
267            }
268        } else {
269            // This is an informational response (1xx status code)
270            // Convert to response and store it for polling
271            let message = counts
272                .peer()
273                .convert_poll_message(pseudo, fields, stream_id)?;
274
275            tracing::trace!("Received informational response: stream_id={:?}", stream_id);
276
277            // Push the informational response onto the stream's recv buffer
278            // with a special event type so it can be polled separately
279            stream
280                .pending_recv
281                .push_back(&mut self.buffer, Event::InformationalHeaders(message));
282            stream.notify_recv();
283        }
284
285        Ok(())
286    }
287
288    /// Called by the server to get the request
289    ///
290    /// # Panics
291    ///
292    /// Panics if `stream.pending_recv` has no `Event::Headers` queued.
293    ///
294    pub fn take_request(&mut self, stream: &mut store::Ptr) -> Request<()> {
295        use super::peer::PollMessage::*;
296
297        match stream.pending_recv.pop_front(&mut self.buffer) {
298            Some(Event::Headers(Server(request))) => request,
299            _ => unreachable!("server stream queue must start with Headers"),
300        }
301    }
302
303    /// Called by the client to get pushed response
304    pub fn poll_pushed(
305        &mut self,
306        cx: &Context,
307        stream: &mut store::Ptr,
308    ) -> Poll<Option<Result<(Request<()>, store::Key), proto::Error>>> {
309        use super::peer::PollMessage::*;
310
311        let mut ppp = stream.pending_push_promises.take();
312        let pushed = ppp.pop(stream.store_mut()).map(|mut pushed| {
313            match pushed.pending_recv.pop_front(&mut self.buffer) {
314                Some(Event::Headers(Server(headers))) => (headers, pushed.key()),
315                // When frames are pushed into the queue, it is verified that
316                // the first frame is a HEADERS frame.
317                _ => panic!("Headers not set on pushed stream"),
318            }
319        });
320        stream.pending_push_promises = ppp;
321        if let Some(p) = pushed {
322            Poll::Ready(Some(Ok(p)))
323        } else {
324            let is_open = stream.state.ensure_recv_open()?;
325
326            if is_open {
327                stream.push_task = Some(cx.waker().clone());
328                Poll::Pending
329            } else {
330                Poll::Ready(None)
331            }
332        }
333    }
334
335    /// Called by the client to get the response
336    pub fn poll_response(
337        &mut self,
338        cx: &Context,
339        stream: &mut store::Ptr,
340    ) -> Poll<Result<Response<()>, proto::Error>> {
341        use super::peer::PollMessage::*;
342
343        // Skip over any interim informational headers to find the main response
344        loop {
345            match stream.pending_recv.pop_front(&mut self.buffer) {
346                Some(Event::Headers(Client(response))) => return Poll::Ready(Ok(response)),
347                Some(Event::InformationalHeaders(_)) => {
348                    tracing::trace!("Skipping informational response in poll_response - should be consumed via poll_informational; stream_id={:?}", stream.id);
349                    continue;
350                }
351                Some(_) => panic!("poll_response called after response returned"),
352                None => {
353                    if !stream.state.ensure_recv_open()? {
354                        proto_err!(stream: "poll_response: stream={:?} is not opened;",  stream.id);
355                        return Poll::Ready(Err(Error::library_reset(
356                            stream.id,
357                            Reason::PROTOCOL_ERROR,
358                        )));
359                    }
360
361                    stream.recv_task = Some(cx.waker().clone());
362                    return Poll::Pending;
363                }
364            }
365        }
366    }
367
368    /// Called by the client to get informational responses (1xx status codes)
369    pub fn poll_informational(
370        &mut self,
371        cx: &Context,
372        stream: &mut store::Ptr,
373    ) -> Poll<Option<Result<Response<()>, proto::Error>>> {
374        use super::peer::PollMessage::*;
375
376        // Try to pop the front event and check if it's an informational response
377        // If it's not, we put it back
378        if let Some(event) = stream.pending_recv.pop_front(&mut self.buffer) {
379            match event {
380                Event::Headers(Client(response)) => {
381                    // Final response
382                    stream
383                        .pending_recv
384                        .push_front(&mut self.buffer, Event::Headers(Client(response)));
385                    return Poll::Ready(None);
386                }
387                Event::InformationalHeaders(Client(response)) => {
388                    // Found an informational response, return it
389                    return Poll::Ready(Some(Ok(response)));
390                }
391                other => {
392                    // Not an informational response, put it back at the front
393                    stream.pending_recv.push_front(&mut self.buffer, other);
394                }
395            }
396        }
397
398        // No informational response available at the front
399        if stream.state.ensure_recv_open()? {
400            // Request to get notified once more frames arrive
401            stream.recv_task = Some(cx.waker().clone());
402            Poll::Pending
403        } else {
404            // No more frames will be received
405            Poll::Ready(None)
406        }
407    }
408
409    /// Transition the stream based on receiving trailers
410    pub fn recv_trailers(
411        &mut self,
412        frame: frame::Headers,
413        stream: &mut store::Ptr,
414    ) -> Result<(), Error> {
415        // Transition the state
416        stream.state.recv_close()?;
417
418        if stream.ensure_content_length_zero().is_err() {
419            proto_err!(stream: "recv_trailers: content-length is not zero; stream={:?};",  stream.id);
420            return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR));
421        }
422
423        let trailers = frame.into_fields();
424
425        // Push the frame onto the stream's recv buffer
426        stream
427            .pending_recv
428            .push_back(&mut self.buffer, Event::Trailers(trailers));
429        stream.notify_recv();
430
431        Ok(())
432    }
433
434    /// Releases capacity of the connection
435    pub fn release_connection_capacity(&mut self, capacity: WindowSize, task: &mut Option<Waker>) {
436        tracing::trace!(
437            "release_connection_capacity; size={}, connection in_flight_data={}",
438            capacity,
439            self.in_flight_data,
440        );
441
442        // Decrement in-flight data
443        self.in_flight_data -= capacity;
444
445        // Assign capacity to connection
446        // TODO: proper error handling
447        let _res = self.flow.assign_capacity(capacity);
448        debug_assert!(_res.is_ok());
449
450        if self.flow.unclaimed_capacity().is_some() {
451            if let Some(task) = task.take() {
452                task.wake();
453            }
454        }
455    }
456
457    /// Releases capacity back to the connection & stream
458    pub fn release_capacity(
459        &mut self,
460        capacity: WindowSize,
461        stream: &mut store::Ptr,
462        task: &mut Option<Waker>,
463    ) -> Result<(), UserError> {
464        tracing::trace!("release_capacity; size={}", capacity);
465
466        if capacity > stream.in_flight_recv_data {
467            return Err(UserError::ReleaseCapacityTooBig);
468        }
469
470        self.release_connection_capacity(capacity, task);
471
472        // Decrement in-flight data
473        stream.in_flight_recv_data -= capacity;
474
475        // Assign capacity to stream
476        // TODO: proper error handling
477        let _res = stream.recv_flow.assign_capacity(capacity);
478        debug_assert!(_res.is_ok());
479
480        if stream.recv_flow.unclaimed_capacity().is_some() {
481            // Queue the stream for sending the WINDOW_UPDATE frame.
482            self.pending_window_updates.push(stream);
483
484            if let Some(task) = task.take() {
485                task.wake();
486            }
487        }
488
489        Ok(())
490    }
491
492    /// Release any unclaimed capacity for a closed stream.
493    pub fn release_closed_capacity(&mut self, stream: &mut store::Ptr, task: &mut Option<Waker>) {
494        debug_assert_eq!(stream.ref_count, 0);
495
496        if stream.in_flight_recv_data == 0 {
497            return;
498        }
499
500        tracing::trace!(
501            "auto-release closed stream ({:?}) capacity: {:?}",
502            stream.id,
503            stream.in_flight_recv_data,
504        );
505
506        self.release_connection_capacity(stream.in_flight_recv_data, task);
507        stream.in_flight_recv_data = 0;
508
509        self.clear_recv_buffer(stream);
510    }
511
512    /// Set the "target" connection window size.
513    ///
514    /// By default, all new connections start with 64kb of window size. As
515    /// streams used and release capacity, we will send WINDOW_UPDATEs for the
516    /// connection to bring it back up to the initial "target".
517    ///
518    /// Setting a target means that we will try to tell the peer about
519    /// WINDOW_UPDATEs so the peer knows it has about `target` window to use
520    /// for the whole connection.
521    ///
522    /// The `task` is an optional parked task for the `Connection` that might
523    /// be blocked on needing more window capacity.
524    pub fn set_target_connection_window(
525        &mut self,
526        target: WindowSize,
527        task: &mut Option<Waker>,
528    ) -> Result<(), Reason> {
529        tracing::trace!(
530            "set_target_connection_window; target={}; available={}, reserved={}",
531            target,
532            self.flow.available(),
533            self.in_flight_data,
534        );
535
536        // The current target connection window is our `available` plus any
537        // in-flight data reserved by streams.
538        //
539        // Update the flow controller with the difference between the new
540        // target and the current target.
541        let current = self
542            .flow
543            .available()
544            .add(self.in_flight_data)?
545            .checked_size();
546        if target > current {
547            self.flow.assign_capacity(target - current)?;
548        } else {
549            self.flow.claim_capacity(current - target)?;
550        }
551
552        // If changing the target capacity means we gained a bunch of capacity,
553        // enough that we went over the update threshold, then schedule sending
554        // a connection WINDOW_UPDATE.
555        if self.flow.unclaimed_capacity().is_some() {
556            if let Some(task) = task.take() {
557                task.wake();
558            }
559        }
560        Ok(())
561    }
562
563    pub(crate) fn apply_local_settings(
564        &mut self,
565        settings: &frame::Settings,
566        store: &mut Store,
567    ) -> Result<(), proto::Error> {
568        if let Some(val) = settings.is_extended_connect_protocol_enabled() {
569            self.is_extended_connect_protocol_enabled = val;
570        }
571
572        if let Some(target) = settings.initial_window_size() {
573            let old_sz = self.init_window_sz;
574            self.init_window_sz = target;
575
576            tracing::trace!("update_initial_window_size; new={}; old={}", target, old_sz,);
577
578            // Per RFC 7540 ยง6.9.2:
579            //
580            // In addition to changing the flow-control window for streams that are
581            // not yet active, a SETTINGS frame can alter the initial flow-control
582            // window size for streams with active flow-control windows (that is,
583            // streams in the "open" or "half-closed (remote)" state). When the
584            // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust
585            // the size of all stream flow-control windows that it maintains by the
586            // difference between the new value and the old value.
587            //
588            // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available
589            // space in a flow-control window to become negative. A sender MUST
590            // track the negative flow-control window and MUST NOT send new
591            // flow-controlled frames until it receives WINDOW_UPDATE frames that
592            // cause the flow-control window to become positive.
593
594            match target.cmp(&old_sz) {
595                Ordering::Less => {
596                    // We must decrease the (local) window on every open stream.
597                    let dec = old_sz - target;
598                    tracing::trace!("decrementing all windows; dec={}", dec);
599
600                    store.try_for_each(|mut stream| {
601                        stream
602                            .recv_flow
603                            .dec_recv_window(dec)
604                            .map_err(proto::Error::library_go_away)?;
605                        Ok::<_, proto::Error>(())
606                    })?;
607                }
608                Ordering::Greater => {
609                    // We must increase the (local) window on every open stream.
610                    let inc = target - old_sz;
611                    tracing::trace!("incrementing all windows; inc={}", inc);
612                    store.try_for_each(|mut stream| {
613                        // XXX: Shouldn't the peer have already noticed our
614                        // overflow and sent us a GOAWAY?
615                        stream
616                            .recv_flow
617                            .inc_window(inc)
618                            .map_err(proto::Error::library_go_away)?;
619                        stream
620                            .recv_flow
621                            .assign_capacity(inc)
622                            .map_err(proto::Error::library_go_away)?;
623                        Ok::<_, proto::Error>(())
624                    })?;
625                }
626                Ordering::Equal => (),
627            }
628        }
629
630        Ok(())
631    }
632
633    pub fn is_end_stream(&self, stream: &store::Ptr) -> bool {
634        if !stream.state.is_recv_end_stream() {
635            return false;
636        }
637
638        stream.pending_recv.is_empty()
639    }
640
641    pub fn recv_data(&mut self, frame: frame::Data, stream: &mut store::Ptr) -> Result<(), Error> {
642        // could include padding
643        let sz = frame.flow_controlled_len();
644
645        // This should have been enforced at the codec::FramedRead layer, so
646        // this is just a sanity check.
647        assert!(sz <= MAX_WINDOW_SIZE as usize);
648
649        let sz = sz as WindowSize;
650
651        let is_ignoring_frame = stream.state.is_local_error();
652
653        if !is_ignoring_frame && !stream.state.is_recv_streaming() {
654            // TODO: There are cases where this can be a stream error of
655            // STREAM_CLOSED instead...
656
657            // Receiving a DATA frame when not expecting one is a protocol
658            // error.
659            proto_err!(conn: "unexpected DATA frame; stream={:?}", stream.id);
660            return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
661        }
662
663        tracing::trace!(
664            "recv_data; size={}; connection={}; stream={}",
665            sz,
666            self.flow.window_size(),
667            stream.recv_flow.window_size()
668        );
669
670        if is_ignoring_frame {
671            tracing::trace!(
672                "recv_data; frame ignored on locally reset {:?} for some time",
673                stream.id,
674            );
675            return self.ignore_data(sz);
676        }
677
678        // Ensure that there is enough capacity on the connection before acting
679        // on the stream.
680        self.consume_connection_window(sz)?;
681
682        if stream.recv_flow.window_size() < sz {
683            // http://httpwg.org/specs/rfc7540.html#WINDOW_UPDATE
684            // > A receiver MAY respond with a stream error (Section 5.4.2) or
685            // > connection error (Section 5.4.1) of type FLOW_CONTROL_ERROR if
686            // > it is unable to accept a frame.
687            //
688            // So, for violating the **stream** window, we can send either a
689            // stream or connection error. We've opted to send a stream
690            // error.
691            return Err(Error::library_reset(stream.id, Reason::FLOW_CONTROL_ERROR));
692        }
693
694        // use payload len, padding doesn't count for content-length
695        if stream.dec_content_length(frame.payload().len()).is_err() {
696            proto_err!(stream:
697                "recv_data: content-length overflow; stream={:?}; len={:?}",
698                stream.id,
699                frame.payload().len(),
700            );
701            return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR));
702        }
703
704        if frame.is_end_stream() {
705            if stream.ensure_content_length_zero().is_err() {
706                proto_err!(stream:
707                    "recv_data: content-length underflow; stream={:?}; len={:?}",
708                    stream.id,
709                    frame.payload().len(),
710                );
711                return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR));
712            }
713
714            if stream.state.recv_close().is_err() {
715                proto_err!(conn: "recv_data: failed to transition to closed state; stream={:?}", stream.id);
716                return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
717            }
718        }
719
720        // Received a frame, but no one cared about it. fix issue#648
721        if !stream.is_recv {
722            tracing::trace!(
723                "recv_data; frame ignored on stream release {:?} for some time",
724                stream.id,
725            );
726            self.release_connection_capacity(sz, &mut None);
727            return Ok(());
728        }
729
730        // Update stream level flow control
731        stream
732            .recv_flow
733            .send_data(sz)
734            .map_err(proto::Error::library_go_away)?;
735
736        // Track the data as in-flight
737        stream.in_flight_recv_data += sz;
738
739        // We auto-release the padded length, since the user cannot.
740        if let Some(padded_len) = frame.padded_len() {
741            tracing::trace!(
742                "recv_data; auto-releasing padded length of {:?} for {:?}",
743                padded_len,
744                stream.id,
745            );
746            let _res = self.release_capacity(padded_len.into(), stream, &mut None);
747            // cannot fail, we JUST added more in_flight data above.
748            debug_assert!(_res.is_ok());
749        }
750
751        let event = Event::Data(frame.into_payload());
752
753        // Push the frame onto the recv buffer
754        stream.pending_recv.push_back(&mut self.buffer, event);
755        stream.notify_recv();
756
757        Ok(())
758    }
759
760    pub fn ignore_data(&mut self, sz: WindowSize) -> Result<(), Error> {
761        // Ensure that there is enough capacity on the connection...
762        self.consume_connection_window(sz)?;
763
764        // Since we are ignoring this frame,
765        // we aren't returning the frame to the user. That means they
766        // have no way to release the capacity back to the connection. So
767        // we have to release it automatically.
768        //
769        // This call doesn't send a WINDOW_UPDATE immediately, just marks
770        // the capacity as available to be reclaimed. When the available
771        // capacity meets a threshold, a WINDOW_UPDATE is then sent.
772        self.release_connection_capacity(sz, &mut None);
773        Ok(())
774    }
775
776    pub fn consume_connection_window(&mut self, sz: WindowSize) -> Result<(), Error> {
777        if self.flow.window_size() < sz {
778            tracing::debug!(
779                "connection error FLOW_CONTROL_ERROR -- window_size ({:?}) < sz ({:?});",
780                self.flow.window_size(),
781                sz,
782            );
783            return Err(Error::library_go_away(Reason::FLOW_CONTROL_ERROR));
784        }
785
786        // Update connection level flow control
787        self.flow.send_data(sz).map_err(Error::library_go_away)?;
788
789        // Track the data as in-flight
790        self.in_flight_data += sz;
791        Ok(())
792    }
793
794    pub fn recv_push_promise(
795        &mut self,
796        frame: frame::PushPromise,
797        stream: &mut store::Ptr,
798    ) -> Result<(), Error> {
799        stream.state.reserve_remote()?;
800        if frame.is_over_size() {
801            // A frame is over size if the decoded header block was bigger than
802            // SETTINGS_MAX_HEADER_LIST_SIZE.
803            //
804            // > A server that receives a larger header block than it is willing
805            // > to handle can send an HTTP 431 (Request Header Fields Too
806            // > Large) status code [RFC6585]. A client can discard responses
807            // > that it cannot process.
808            //
809            // So, if peer is a server, we'll send a 431. In either case,
810            // an error is recorded, which will send a PROTOCOL_ERROR,
811            // since we don't want any of the data frames either.
812            tracing::debug!(
813                "stream error PROTOCOL_ERROR -- recv_push_promise: \
814                 headers frame is over size; promised_id={:?};",
815                frame.promised_id(),
816            );
817            return Err(Error::library_reset(
818                frame.promised_id(),
819                Reason::PROTOCOL_ERROR,
820            ));
821        }
822
823        let promised_id = frame.promised_id();
824        let (pseudo, fields) = frame.into_parts();
825        let req = crate::server::Peer::convert_poll_message(pseudo, fields, promised_id)?;
826
827        if let Err(e) = frame::PushPromise::validate_request(&req) {
828            use PushPromiseHeaderError::*;
829            match e {
830                NotSafeAndCacheable => proto_err!(
831                    stream:
832                    "recv_push_promise: method {} is not safe and cacheable; promised_id={:?}",
833                    req.method(),
834                    promised_id,
835                ),
836                InvalidContentLength(e) => proto_err!(
837                    stream:
838                    "recv_push_promise; promised request has invalid content-length {:?}; promised_id={:?}",
839                    e,
840                    promised_id,
841                ),
842            }
843            return Err(Error::library_reset(promised_id, Reason::PROTOCOL_ERROR));
844        }
845
846        use super::peer::PollMessage::*;
847        stream
848            .pending_recv
849            .push_back(&mut self.buffer, Event::Headers(Server(req)));
850        stream.notify_recv();
851        stream.notify_push();
852        Ok(())
853    }
854
855    /// Ensures that `id` is not in the `Idle` state.
856    pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> {
857        if let Ok(next) = self.next_stream_id {
858            if id >= next {
859                tracing::debug!(
860                    "stream ID implicitly closed, PROTOCOL_ERROR; stream={:?}",
861                    id
862                );
863                return Err(Reason::PROTOCOL_ERROR);
864            }
865        }
866        // if next_stream_id is overflowed, that's ok.
867
868        Ok(())
869    }
870
871    /// Handle remote sending an explicit RST_STREAM.
872    pub fn recv_reset(
873        &mut self,
874        frame: frame::Reset,
875        stream: &mut Stream,
876        counts: &mut Counts,
877    ) -> Result<(), Error> {
878        // Reseting a stream that the user hasn't accepted is possible,
879        // but should be done with care. These streams will continue
880        // to take up memory in the accept queue, but will no longer be
881        // counted as "concurrent" streams.
882        //
883        // So, we have a separate limit for these.
884        //
885        // See https://github.com/hyperium/hyper/issues/2877
886        if stream.is_pending_accept {
887            if counts.can_inc_num_remote_reset_streams() {
888                counts.inc_num_remote_reset_streams();
889            } else {
890                tracing::warn!(
891                    "recv_reset; remotely-reset pending-accept streams reached limit ({:?})",
892                    counts.max_remote_reset_streams(),
893                );
894                return Err(Error::library_go_away_data(
895                    Reason::ENHANCE_YOUR_CALM,
896                    "too_many_resets",
897                ));
898            }
899        }
900
901        // Notify the stream
902        stream.state.recv_reset(frame, stream.is_pending_send);
903
904        stream.notify_send();
905        stream.notify_recv();
906        stream.notify_push();
907
908        Ok(())
909    }
910
911    /// Handle a connection-level error
912    pub fn handle_error(&mut self, err: &proto::Error, stream: &mut Stream) {
913        // Receive an error
914        stream.state.handle_error(err);
915
916        // If a receiver is waiting, notify it
917        stream.notify_send();
918        stream.notify_recv();
919        stream.notify_push();
920    }
921
922    pub fn go_away(&mut self, last_processed_id: StreamId) {
923        assert!(self.max_stream_id >= last_processed_id);
924        self.max_stream_id = last_processed_id;
925    }
926
927    pub fn recv_eof(&mut self, stream: &mut Stream) {
928        stream.state.recv_eof();
929        stream.notify_send();
930        stream.notify_recv();
931        stream.notify_push();
932    }
933
934    pub(super) fn clear_recv_buffer(&mut self, stream: &mut Stream) {
935        while stream.pending_recv.pop_front(&mut self.buffer).is_some() {
936            // drop it
937        }
938    }
939
940    /// Get the max ID of streams we can receive.
941    ///
942    /// This gets lowered if we send a GOAWAY frame.
943    pub fn max_stream_id(&self) -> StreamId {
944        self.max_stream_id
945    }
946
947    pub fn next_stream_id(&self) -> Result<StreamId, Error> {
948        if let Ok(id) = self.next_stream_id {
949            Ok(id)
950        } else {
951            Err(Error::library_go_away(Reason::PROTOCOL_ERROR))
952        }
953    }
954
955    pub fn may_have_created_stream(&self, id: StreamId) -> bool {
956        if let Ok(next_id) = self.next_stream_id {
957            // Peer::is_local_init should have been called beforehand
958            debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated(),);
959            id < next_id
960        } else {
961            true
962        }
963    }
964
965    pub(super) fn maybe_reset_next_stream_id(&mut self, id: StreamId) {
966        if let Ok(next_id) = self.next_stream_id {
967            // !Peer::is_local_init should have been called beforehand
968            debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated());
969            if id >= next_id {
970                self.next_stream_id = id.next_id();
971            }
972        }
973    }
974
975    /// Returns true if the remote peer can reserve a stream with the given ID.
976    pub fn ensure_can_reserve(&self) -> Result<(), Error> {
977        if !self.is_push_enabled {
978            proto_err!(conn: "recv_push_promise: push is disabled");
979            return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
980        }
981
982        Ok(())
983    }
984
985    /// Add a locally reset stream to queue to be eventually reaped.
986    pub fn enqueue_reset_expiration(&mut self, stream: &mut store::Ptr, counts: &mut Counts) {
987        if !stream.state.is_local_error() || stream.is_pending_reset_expiration() {
988            return;
989        }
990
991        if counts.can_inc_num_reset_streams() {
992            counts.inc_num_reset_streams();
993            tracing::trace!("enqueue_reset_expiration; added {:?}", stream.id);
994            self.pending_reset_expired.push(stream);
995        } else {
996            tracing::trace!(
997                "enqueue_reset_expiration; dropped {:?}, over max_concurrent_reset_streams",
998                stream.id
999            );
1000        }
1001    }
1002
1003    /// Send any pending refusals.
1004    pub fn send_pending_refusal<T, B>(
1005        &mut self,
1006        cx: &mut Context,
1007        dst: &mut Codec<T, Prioritized<B>>,
1008    ) -> Poll<io::Result<()>>
1009    where
1010        T: AsyncWrite + Unpin,
1011        B: Buf,
1012    {
1013        if let Some(stream_id) = self.refused {
1014            ready!(dst.poll_ready(cx))?;
1015
1016            // Create the RST_STREAM frame
1017            let frame = frame::Reset::new(stream_id, Reason::REFUSED_STREAM);
1018
1019            // Buffer the frame
1020            dst.buffer(frame.into()).expect("invalid RST_STREAM frame");
1021        }
1022
1023        self.refused = None;
1024
1025        Poll::Ready(Ok(()))
1026    }
1027
1028    pub fn clear_expired_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) {
1029        if !self.pending_reset_expired.is_empty() {
1030            let now = Instant::now();
1031            let reset_duration = self.reset_duration;
1032            while let Some(stream) = self.pending_reset_expired.pop_if(store, |stream| {
1033                let reset_at = stream.reset_at.expect("reset_at must be set if in queue");
1034                // rust-lang/rust#86470 tracks a bug in the standard library where `Instant`
1035                // subtraction can panic (because, on some platforms, `Instant` isn't actually
1036                // monotonic). We use a saturating operation to avoid this panic here.
1037                now.saturating_duration_since(reset_at) > reset_duration
1038            }) {
1039                counts.transition_after(stream, true);
1040            }
1041        }
1042    }
1043
1044    pub fn clear_queues(
1045        &mut self,
1046        clear_pending_accept: bool,
1047        store: &mut Store,
1048        counts: &mut Counts,
1049    ) {
1050        self.clear_stream_window_update_queue(store, counts);
1051        self.clear_all_reset_streams(store, counts);
1052
1053        if clear_pending_accept {
1054            self.clear_all_pending_accept(store, counts);
1055        }
1056    }
1057
1058    fn clear_stream_window_update_queue(&mut self, store: &mut Store, counts: &mut Counts) {
1059        while let Some(stream) = self.pending_window_updates.pop(store) {
1060            counts.transition(stream, |_, stream| {
1061                tracing::trace!("clear_stream_window_update_queue; stream={:?}", stream.id);
1062            })
1063        }
1064    }
1065
1066    /// Called on EOF
1067    fn clear_all_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) {
1068        while let Some(stream) = self.pending_reset_expired.pop(store) {
1069            counts.transition_after(stream, true);
1070        }
1071    }
1072
1073    fn clear_all_pending_accept(&mut self, store: &mut Store, counts: &mut Counts) {
1074        while let Some(stream) = self.pending_accept.pop(store) {
1075            counts.transition_after(stream, false);
1076        }
1077    }
1078
1079    pub fn poll_complete<T, B>(
1080        &mut self,
1081        cx: &mut Context,
1082        store: &mut Store,
1083        counts: &mut Counts,
1084        dst: &mut Codec<T, Prioritized<B>>,
1085    ) -> Poll<io::Result<()>>
1086    where
1087        T: AsyncWrite + Unpin,
1088        B: Buf,
1089    {
1090        // Send any pending connection level window updates
1091        ready!(self.send_connection_window_update(cx, dst))?;
1092
1093        // Send any pending stream level window updates
1094        ready!(self.send_stream_window_updates(cx, store, counts, dst))?;
1095
1096        Poll::Ready(Ok(()))
1097    }
1098
1099    /// Send connection level window update
1100    fn send_connection_window_update<T, B>(
1101        &mut self,
1102        cx: &mut Context,
1103        dst: &mut Codec<T, Prioritized<B>>,
1104    ) -> Poll<io::Result<()>>
1105    where
1106        T: AsyncWrite + Unpin,
1107        B: Buf,
1108    {
1109        if let Some(incr) = self.flow.unclaimed_capacity() {
1110            let frame = frame::WindowUpdate::new(StreamId::zero(), incr);
1111
1112            // Ensure the codec has capacity
1113            ready!(dst.poll_ready(cx))?;
1114
1115            // Buffer the WINDOW_UPDATE frame
1116            dst.buffer(frame.into())
1117                .expect("invalid WINDOW_UPDATE frame");
1118
1119            // Update flow control
1120            self.flow
1121                .inc_window(incr)
1122                .expect("unexpected flow control state");
1123        }
1124
1125        Poll::Ready(Ok(()))
1126    }
1127
1128    /// Send stream level window update
1129    pub fn send_stream_window_updates<T, B>(
1130        &mut self,
1131        cx: &mut Context,
1132        store: &mut Store,
1133        counts: &mut Counts,
1134        dst: &mut Codec<T, Prioritized<B>>,
1135    ) -> Poll<io::Result<()>>
1136    where
1137        T: AsyncWrite + Unpin,
1138        B: Buf,
1139    {
1140        loop {
1141            // Ensure the codec has capacity
1142            ready!(dst.poll_ready(cx))?;
1143
1144            // Get the next stream
1145            let stream = match self.pending_window_updates.pop(store) {
1146                Some(stream) => stream,
1147                None => return Poll::Ready(Ok(())),
1148            };
1149
1150            counts.transition(stream, |_, stream| {
1151                tracing::trace!("pending_window_updates -- pop; stream={:?}", stream.id);
1152                debug_assert!(!stream.is_pending_window_update);
1153
1154                if !stream.state.is_recv_streaming() {
1155                    // No need to send window updates on the stream if the stream is
1156                    // no longer receiving data.
1157                    //
1158                    // TODO: is this correct? We could possibly send a window
1159                    // update on a ReservedRemote stream if we already know
1160                    // we want to stream the data faster...
1161                    return;
1162                }
1163
1164                // TODO: de-dup
1165                if let Some(incr) = stream.recv_flow.unclaimed_capacity() {
1166                    // Create the WINDOW_UPDATE frame
1167                    let frame = frame::WindowUpdate::new(stream.id, incr);
1168
1169                    // Buffer it
1170                    dst.buffer(frame.into())
1171                        .expect("invalid WINDOW_UPDATE frame");
1172
1173                    // Update flow control
1174                    stream
1175                        .recv_flow
1176                        .inc_window(incr)
1177                        .expect("unexpected flow control state");
1178                }
1179            })
1180        }
1181    }
1182
1183    pub fn next_incoming(&mut self, store: &mut Store) -> Option<store::Key> {
1184        self.pending_accept.pop(store).map(|ptr| ptr.key())
1185    }
1186
1187    pub fn poll_data(
1188        &mut self,
1189        cx: &Context,
1190        stream: &mut Stream,
1191    ) -> Poll<Option<Result<Bytes, proto::Error>>> {
1192        match stream.pending_recv.pop_front(&mut self.buffer) {
1193            Some(Event::Data(payload)) => Poll::Ready(Some(Ok(payload))),
1194            Some(event) => {
1195                // Frame is trailer
1196                stream.pending_recv.push_front(&mut self.buffer, event);
1197
1198                // Notify the recv task. This is done just in case
1199                // `poll_trailers` was called.
1200                //
1201                // It is very likely that `notify_recv` will just be a no-op (as
1202                // the task will be None), so this isn't really much of a
1203                // performance concern. It also means we don't have to track
1204                // state to see if `poll_trailers` was called before `poll_data`
1205                // returned `None`.
1206                stream.notify_recv();
1207
1208                // No more data frames
1209                Poll::Ready(None)
1210            }
1211            None => self.schedule_recv(cx, stream),
1212        }
1213    }
1214
1215    pub fn poll_trailers(
1216        &mut self,
1217        cx: &Context,
1218        stream: &mut Stream,
1219    ) -> Poll<Option<Result<HeaderMap, proto::Error>>> {
1220        match stream.pending_recv.pop_front(&mut self.buffer) {
1221            Some(Event::Trailers(trailers)) => Poll::Ready(Some(Ok(trailers))),
1222            Some(event) => {
1223                // Frame is not trailers.. not ready to poll trailers yet.
1224                stream.pending_recv.push_front(&mut self.buffer, event);
1225
1226                Poll::Pending
1227            }
1228            None => self.schedule_recv(cx, stream),
1229        }
1230    }
1231
1232    fn schedule_recv<T>(
1233        &mut self,
1234        cx: &Context,
1235        stream: &mut Stream,
1236    ) -> Poll<Option<Result<T, proto::Error>>> {
1237        if stream.state.ensure_recv_open()? {
1238            // Request to get notified once more frames arrive
1239            stream.recv_task = Some(cx.waker().clone());
1240            Poll::Pending
1241        } else {
1242            // No more frames will be received
1243            Poll::Ready(None)
1244        }
1245    }
1246}
1247
1248// ===== impl Open =====
1249
1250impl Open {
1251    pub fn is_push_promise(&self) -> bool {
1252        matches!(*self, Self::PushPromise)
1253    }
1254}
1255
1256// ===== impl RecvHeaderBlockError =====
1257
1258impl<T> From<Error> for RecvHeaderBlockError<T> {
1259    fn from(err: Error) -> Self {
1260        RecvHeaderBlockError::State(err)
1261    }
1262}