rustls/conn.rs
1use alloc::boxed::Box;
2use core::fmt::Debug;
3use core::mem;
4use core::ops::{Deref, DerefMut, Range};
5#[cfg(feature = "std")]
6use std::io;
7
8use kernel::KernelConnection;
9
10use crate::common_state::{CommonState, Context, DEFAULT_BUFFER_LIMIT, IoState, State};
11use crate::enums::{AlertDescription, ContentType, ProtocolVersion};
12use crate::error::{Error, PeerMisbehaved};
13use crate::log::trace;
14use crate::msgs::deframer::DeframerIter;
15use crate::msgs::deframer::buffers::{BufferProgress, DeframerVecBuffer, Delocator, Locator};
16use crate::msgs::deframer::handshake::HandshakeDeframer;
17use crate::msgs::handshake::Random;
18use crate::msgs::message::{InboundPlainMessage, Message, MessagePayload};
19use crate::record_layer::Decrypted;
20use crate::suites::ExtractedSecrets;
21use crate::vecbuf::ChunkVecBuffer;
22
23// pub so that it can be re-exported from the crate root
24pub mod kernel;
25pub(crate) mod unbuffered;
26
27#[cfg(feature = "std")]
28mod connection {
29 use alloc::vec::Vec;
30 use core::fmt::Debug;
31 use core::ops::{Deref, DerefMut};
32 use std::io::{self, BufRead, Read};
33
34 use crate::ConnectionCommon;
35 use crate::common_state::{CommonState, IoState};
36 use crate::error::Error;
37 use crate::msgs::message::OutboundChunks;
38 use crate::suites::ExtractedSecrets;
39 use crate::vecbuf::ChunkVecBuffer;
40
41 /// A client or server connection.
42 #[derive(Debug)]
43 pub enum Connection {
44 /// A client connection
45 Client(crate::client::ClientConnection),
46 /// A server connection
47 Server(crate::server::ServerConnection),
48 }
49
50 impl Connection {
51 /// Read TLS content from `rd`.
52 ///
53 /// See [`ConnectionCommon::read_tls()`] for more information.
54 pub fn read_tls(&mut self, rd: &mut dyn Read) -> Result<usize, io::Error> {
55 match self {
56 Self::Client(conn) => conn.read_tls(rd),
57 Self::Server(conn) => conn.read_tls(rd),
58 }
59 }
60
61 /// Writes TLS messages to `wr`.
62 ///
63 /// See [`ConnectionCommon::write_tls()`] for more information.
64 pub fn write_tls(&mut self, wr: &mut dyn io::Write) -> Result<usize, io::Error> {
65 self.sendable_tls.write_to(wr)
66 }
67
68 /// Returns an object that allows reading plaintext.
69 pub fn reader(&mut self) -> Reader<'_> {
70 match self {
71 Self::Client(conn) => conn.reader(),
72 Self::Server(conn) => conn.reader(),
73 }
74 }
75
76 /// Returns an object that allows writing plaintext.
77 pub fn writer(&mut self) -> Writer<'_> {
78 match self {
79 Self::Client(conn) => Writer::new(&mut **conn),
80 Self::Server(conn) => Writer::new(&mut **conn),
81 }
82 }
83
84 /// Processes any new packets read by a previous call to [`Connection::read_tls`].
85 ///
86 /// See [`ConnectionCommon::process_new_packets()`] for more information.
87 pub fn process_new_packets(&mut self) -> Result<IoState, Error> {
88 match self {
89 Self::Client(conn) => conn.process_new_packets(),
90 Self::Server(conn) => conn.process_new_packets(),
91 }
92 }
93
94 /// Derives key material from the agreed connection secrets.
95 ///
96 /// See [`ConnectionCommon::export_keying_material()`] for more information.
97 pub fn export_keying_material<T: AsMut<[u8]>>(
98 &self,
99 output: T,
100 label: &[u8],
101 context: Option<&[u8]>,
102 ) -> Result<T, Error> {
103 match self {
104 Self::Client(conn) => conn.export_keying_material(output, label, context),
105 Self::Server(conn) => conn.export_keying_material(output, label, context),
106 }
107 }
108
109 /// This function uses `io` to complete any outstanding IO for this connection.
110 ///
111 /// See [`ConnectionCommon::complete_io()`] for more information.
112 pub fn complete_io<T>(&mut self, io: &mut T) -> Result<(usize, usize), io::Error>
113 where
114 Self: Sized,
115 T: Read + io::Write,
116 {
117 match self {
118 Self::Client(conn) => conn.complete_io(io),
119 Self::Server(conn) => conn.complete_io(io),
120 }
121 }
122
123 /// Extract secrets, so they can be used when configuring kTLS, for example.
124 /// Should be used with care as it exposes secret key material.
125 pub fn dangerous_extract_secrets(self) -> Result<ExtractedSecrets, Error> {
126 match self {
127 Self::Client(client) => client.dangerous_extract_secrets(),
128 Self::Server(server) => server.dangerous_extract_secrets(),
129 }
130 }
131
132 /// Sets a limit on the internal buffers
133 ///
134 /// See [`ConnectionCommon::set_buffer_limit()`] for more information.
135 pub fn set_buffer_limit(&mut self, limit: Option<usize>) {
136 match self {
137 Self::Client(client) => client.set_buffer_limit(limit),
138 Self::Server(server) => server.set_buffer_limit(limit),
139 }
140 }
141
142 /// Sends a TLS1.3 `key_update` message to refresh a connection's keys
143 ///
144 /// See [`ConnectionCommon::refresh_traffic_keys()`] for more information.
145 pub fn refresh_traffic_keys(&mut self) -> Result<(), Error> {
146 match self {
147 Self::Client(client) => client.refresh_traffic_keys(),
148 Self::Server(server) => server.refresh_traffic_keys(),
149 }
150 }
151 }
152
153 impl Deref for Connection {
154 type Target = CommonState;
155
156 fn deref(&self) -> &Self::Target {
157 match self {
158 Self::Client(conn) => &conn.core.common_state,
159 Self::Server(conn) => &conn.core.common_state,
160 }
161 }
162 }
163
164 impl DerefMut for Connection {
165 fn deref_mut(&mut self) -> &mut Self::Target {
166 match self {
167 Self::Client(conn) => &mut conn.core.common_state,
168 Self::Server(conn) => &mut conn.core.common_state,
169 }
170 }
171 }
172
173 /// A structure that implements [`std::io::Read`] for reading plaintext.
174 pub struct Reader<'a> {
175 pub(super) received_plaintext: &'a mut ChunkVecBuffer,
176 pub(super) has_received_close_notify: bool,
177 pub(super) has_seen_eof: bool,
178 }
179
180 impl<'a> Reader<'a> {
181 /// Check the connection's state if no bytes are available for reading.
182 fn check_no_bytes_state(&self) -> io::Result<()> {
183 match (self.has_received_close_notify, self.has_seen_eof) {
184 // cleanly closed; don't care about TCP EOF: express this as Ok(0)
185 (true, _) => Ok(()),
186 // unclean closure
187 (false, true) => Err(io::Error::new(
188 io::ErrorKind::UnexpectedEof,
189 UNEXPECTED_EOF_MESSAGE,
190 )),
191 // connection still going, but needs more data: signal `WouldBlock` so that
192 // the caller knows this
193 (false, false) => Err(io::ErrorKind::WouldBlock.into()),
194 }
195 }
196
197 /// Obtain a chunk of plaintext data received from the peer over this TLS connection.
198 ///
199 /// This method consumes `self` so that it can return a slice whose lifetime is bounded by
200 /// the [`ConnectionCommon`] that created this `Reader`.
201 pub fn into_first_chunk(self) -> io::Result<&'a [u8]> {
202 match self.received_plaintext.chunk() {
203 Some(chunk) => Ok(chunk),
204 None => {
205 self.check_no_bytes_state()?;
206 Ok(&[])
207 }
208 }
209 }
210 }
211
212 impl Read for Reader<'_> {
213 /// Obtain plaintext data received from the peer over this TLS connection.
214 ///
215 /// If the peer closes the TLS session cleanly, this returns `Ok(0)` once all
216 /// the pending data has been read. No further data can be received on that
217 /// connection, so the underlying TCP connection should be half-closed too.
218 ///
219 /// If the peer closes the TLS session uncleanly (a TCP EOF without sending a
220 /// `close_notify` alert) this function returns a `std::io::Error` of type
221 /// `ErrorKind::UnexpectedEof` once any pending data has been read.
222 ///
223 /// Note that support for `close_notify` varies in peer TLS libraries: many do not
224 /// support it and uncleanly close the TCP connection (this might be
225 /// vulnerable to truncation attacks depending on the application protocol).
226 /// This means applications using rustls must both handle EOF
227 /// from this function, *and* unexpected EOF of the underlying TCP connection.
228 ///
229 /// If there are no bytes to read, this returns `Err(ErrorKind::WouldBlock.into())`.
230 ///
231 /// You may learn the number of bytes available at any time by inspecting
232 /// the return of [`Connection::process_new_packets`].
233 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
234 let len = self.received_plaintext.read(buf)?;
235 if len > 0 || buf.is_empty() {
236 return Ok(len);
237 }
238
239 self.check_no_bytes_state()
240 .map(|()| len)
241 }
242
243 /// Obtain plaintext data received from the peer over this TLS connection.
244 ///
245 /// If the peer closes the TLS session, this returns `Ok(())` without filling
246 /// any more of the buffer once all the pending data has been read. No further
247 /// data can be received on that connection, so the underlying TCP connection
248 /// should be half-closed too.
249 ///
250 /// If the peer closes the TLS session uncleanly (a TCP EOF without sending a
251 /// `close_notify` alert) this function returns a `std::io::Error` of type
252 /// `ErrorKind::UnexpectedEof` once any pending data has been read.
253 ///
254 /// Note that support for `close_notify` varies in peer TLS libraries: many do not
255 /// support it and uncleanly close the TCP connection (this might be
256 /// vulnerable to truncation attacks depending on the application protocol).
257 /// This means applications using rustls must both handle EOF
258 /// from this function, *and* unexpected EOF of the underlying TCP connection.
259 ///
260 /// If there are no bytes to read, this returns `Err(ErrorKind::WouldBlock.into())`.
261 ///
262 /// You may learn the number of bytes available at any time by inspecting
263 /// the return of [`Connection::process_new_packets`].
264 #[cfg(read_buf)]
265 fn read_buf(&mut self, mut cursor: core::io::BorrowedCursor<'_>) -> io::Result<()> {
266 let before = cursor.written();
267 self.received_plaintext
268 .read_buf(cursor.reborrow())?;
269 let len = cursor.written() - before;
270 if len > 0 || cursor.capacity() == 0 {
271 return Ok(());
272 }
273
274 self.check_no_bytes_state()
275 }
276 }
277
278 impl BufRead for Reader<'_> {
279 /// Obtain a chunk of plaintext data received from the peer over this TLS connection.
280 /// This reads the same data as [`Reader::read()`], but returns a reference instead of
281 /// copying the data.
282 ///
283 /// The caller should call [`Reader::consume()`] afterward to advance the buffer.
284 ///
285 /// See [`Reader::into_first_chunk()`] for a version of this function that returns a
286 /// buffer with a longer lifetime.
287 fn fill_buf(&mut self) -> io::Result<&[u8]> {
288 Reader {
289 // reborrow
290 received_plaintext: self.received_plaintext,
291 ..*self
292 }
293 .into_first_chunk()
294 }
295
296 fn consume(&mut self, amt: usize) {
297 self.received_plaintext
298 .consume_first_chunk(amt)
299 }
300 }
301
302 const UNEXPECTED_EOF_MESSAGE: &str = "peer closed connection without sending TLS close_notify: \
303https://docs.rs/rustls/latest/rustls/manual/_03_howto/index.html#unexpected-eof";
304
305 /// A structure that implements [`std::io::Write`] for writing plaintext.
306 pub struct Writer<'a> {
307 sink: &'a mut dyn PlaintextSink,
308 }
309
310 impl<'a> Writer<'a> {
311 /// Create a new Writer.
312 ///
313 /// This is not an external interface. Get one of these objects
314 /// from [`Connection::writer`].
315 pub(crate) fn new(sink: &'a mut dyn PlaintextSink) -> Self {
316 Writer { sink }
317 }
318 }
319
320 impl io::Write for Writer<'_> {
321 /// Send the plaintext `buf` to the peer, encrypting
322 /// and authenticating it. Once this function succeeds
323 /// you should call [`Connection::write_tls`] which will output the
324 /// corresponding TLS records.
325 ///
326 /// This function buffers plaintext sent before the
327 /// TLS handshake completes, and sends it as soon
328 /// as it can. See [`ConnectionCommon::set_buffer_limit`] to control
329 /// the size of this buffer.
330 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
331 self.sink.write(buf)
332 }
333
334 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
335 self.sink.write_vectored(bufs)
336 }
337
338 fn flush(&mut self) -> io::Result<()> {
339 self.sink.flush()
340 }
341 }
342
343 /// Internal trait implemented by the [`ServerConnection`]/[`ClientConnection`]
344 /// allowing them to be the subject of a [`Writer`].
345 ///
346 /// [`ServerConnection`]: crate::ServerConnection
347 /// [`ClientConnection`]: crate::ClientConnection
348 pub(crate) trait PlaintextSink {
349 fn write(&mut self, buf: &[u8]) -> io::Result<usize>;
350 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize>;
351 fn flush(&mut self) -> io::Result<()>;
352 }
353
354 impl<T> PlaintextSink for ConnectionCommon<T> {
355 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
356 let len = self
357 .core
358 .common_state
359 .buffer_plaintext(buf.into(), &mut self.sendable_plaintext);
360 self.core.maybe_refresh_traffic_keys();
361 Ok(len)
362 }
363
364 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
365 let payload_owner: Vec<&[u8]>;
366 let payload = match bufs.len() {
367 0 => return Ok(0),
368 1 => OutboundChunks::Single(bufs[0].deref()),
369 _ => {
370 payload_owner = bufs
371 .iter()
372 .map(|io_slice| io_slice.deref())
373 .collect();
374
375 OutboundChunks::new(&payload_owner)
376 }
377 };
378 let len = self
379 .core
380 .common_state
381 .buffer_plaintext(payload, &mut self.sendable_plaintext);
382 self.core.maybe_refresh_traffic_keys();
383 Ok(len)
384 }
385
386 fn flush(&mut self) -> io::Result<()> {
387 Ok(())
388 }
389 }
390}
391
392#[cfg(feature = "std")]
393pub use connection::{Connection, Reader, Writer};
394
395#[derive(Debug)]
396pub(crate) struct ConnectionRandoms {
397 pub(crate) client: [u8; 32],
398 pub(crate) server: [u8; 32],
399}
400
401impl ConnectionRandoms {
402 pub(crate) fn new(client: Random, server: Random) -> Self {
403 Self {
404 client: client.0,
405 server: server.0,
406 }
407 }
408}
409
410/// Interface shared by client and server connections.
411pub struct ConnectionCommon<Data> {
412 pub(crate) core: ConnectionCore<Data>,
413 deframer_buffer: DeframerVecBuffer,
414 sendable_plaintext: ChunkVecBuffer,
415}
416
417impl<Data> ConnectionCommon<Data> {
418 /// Processes any new packets read by a previous call to
419 /// [`Connection::read_tls`].
420 ///
421 /// Errors from this function relate to TLS protocol errors, and
422 /// are fatal to the connection. Future calls after an error will do
423 /// no new work and will return the same error. After an error is
424 /// received from [`process_new_packets`], you should not call [`read_tls`]
425 /// any more (it will fill up buffers to no purpose). However, you
426 /// may call the other methods on the connection, including `write`,
427 /// `send_close_notify`, and `write_tls`. Most likely you will want to
428 /// call `write_tls` to send any alerts queued by the error and then
429 /// close the underlying connection.
430 ///
431 /// Success from this function comes with some sundry state data
432 /// about the connection.
433 ///
434 /// [`read_tls`]: Connection::read_tls
435 /// [`process_new_packets`]: Connection::process_new_packets
436 #[inline]
437 pub fn process_new_packets(&mut self) -> Result<IoState, Error> {
438 self.core
439 .process_new_packets(&mut self.deframer_buffer, &mut self.sendable_plaintext)
440 }
441
442 /// Derives key material from the agreed connection secrets.
443 ///
444 /// This function fills in `output` with `output.len()` bytes of key
445 /// material derived from the master session secret using `label`
446 /// and `context` for diversification. Ownership of the buffer is taken
447 /// by the function and returned via the Ok result to ensure no key
448 /// material leaks if the function fails.
449 ///
450 /// See RFC5705 for more details on what this does and is for.
451 ///
452 /// For TLS1.3 connections, this function does not use the
453 /// "early" exporter at any point.
454 ///
455 /// This function fails if called prior to the handshake completing;
456 /// check with [`CommonState::is_handshaking`] first.
457 ///
458 /// This function fails if `output.len()` is zero.
459 #[inline]
460 pub fn export_keying_material<T: AsMut<[u8]>>(
461 &self,
462 output: T,
463 label: &[u8],
464 context: Option<&[u8]>,
465 ) -> Result<T, Error> {
466 self.core
467 .export_keying_material(output, label, context)
468 }
469
470 /// Extract secrets, so they can be used when configuring kTLS, for example.
471 /// Should be used with care as it exposes secret key material.
472 pub fn dangerous_extract_secrets(self) -> Result<ExtractedSecrets, Error> {
473 self.core.dangerous_extract_secrets()
474 }
475
476 /// Sets a limit on the internal buffers used to buffer
477 /// unsent plaintext (prior to completing the TLS handshake)
478 /// and unsent TLS records. This limit acts only on application
479 /// data written through [`Connection::writer`].
480 ///
481 /// By default the limit is 64KB. The limit can be set
482 /// at any time, even if the current buffer use is higher.
483 ///
484 /// [`None`] means no limit applies, and will mean that written
485 /// data is buffered without bound -- it is up to the application
486 /// to appropriately schedule its plaintext and TLS writes to bound
487 /// memory usage.
488 ///
489 /// For illustration: `Some(1)` means a limit of one byte applies:
490 /// [`Connection::writer`] will accept only one byte, encrypt it and
491 /// add a TLS header. Once this is sent via [`Connection::write_tls`],
492 /// another byte may be sent.
493 ///
494 /// # Internal write-direction buffering
495 /// rustls has two buffers whose size are bounded by this setting:
496 ///
497 /// ## Buffering of unsent plaintext data prior to handshake completion
498 ///
499 /// Calls to [`Connection::writer`] before or during the handshake
500 /// are buffered (up to the limit specified here). Once the
501 /// handshake completes this data is encrypted and the resulting
502 /// TLS records are added to the outgoing buffer.
503 ///
504 /// ## Buffering of outgoing TLS records
505 ///
506 /// This buffer is used to store TLS records that rustls needs to
507 /// send to the peer. It is used in these two circumstances:
508 ///
509 /// - by [`Connection::process_new_packets`] when a handshake or alert
510 /// TLS record needs to be sent.
511 /// - by [`Connection::writer`] post-handshake: the plaintext is
512 /// encrypted and the resulting TLS record is buffered.
513 ///
514 /// This buffer is emptied by [`Connection::write_tls`].
515 ///
516 /// [`Connection::writer`]: crate::Connection::writer
517 /// [`Connection::write_tls`]: crate::Connection::write_tls
518 /// [`Connection::process_new_packets`]: crate::Connection::process_new_packets
519 pub fn set_buffer_limit(&mut self, limit: Option<usize>) {
520 self.sendable_plaintext.set_limit(limit);
521 self.sendable_tls.set_limit(limit);
522 }
523
524 /// Sends a TLS1.3 `key_update` message to refresh a connection's keys.
525 ///
526 /// This call refreshes our encryption keys. Once the peer receives the message,
527 /// it refreshes _its_ encryption and decryption keys and sends a response.
528 /// Once we receive that response, we refresh our decryption keys to match.
529 /// At the end of this process, keys in both directions have been refreshed.
530 ///
531 /// Note that this process does not happen synchronously: this call just
532 /// arranges that the `key_update` message will be included in the next
533 /// `write_tls` output.
534 ///
535 /// This fails with `Error::HandshakeNotComplete` if called before the initial
536 /// handshake is complete, or if a version prior to TLS1.3 is negotiated.
537 ///
538 /// # Usage advice
539 /// Note that other implementations (including rustls) may enforce limits on
540 /// the number of `key_update` messages allowed on a given connection to prevent
541 /// denial of service. Therefore, this should be called sparingly.
542 ///
543 /// rustls implicitly and automatically refreshes traffic keys when needed
544 /// according to the selected cipher suite's cryptographic constraints. There
545 /// is therefore no need to call this manually to avoid cryptographic keys
546 /// "wearing out".
547 ///
548 /// The main reason to call this manually is to roll keys when it is known
549 /// a connection will be idle for a long period.
550 pub fn refresh_traffic_keys(&mut self) -> Result<(), Error> {
551 self.core.refresh_traffic_keys()
552 }
553}
554
555#[cfg(feature = "std")]
556impl<Data> ConnectionCommon<Data> {
557 /// Returns an object that allows reading plaintext.
558 pub fn reader(&mut self) -> Reader<'_> {
559 let common = &mut self.core.common_state;
560 Reader {
561 received_plaintext: &mut common.received_plaintext,
562 // Are we done? i.e., have we processed all received messages, and received a
563 // close_notify to indicate that no new messages will arrive?
564 has_received_close_notify: common.has_received_close_notify,
565 has_seen_eof: common.has_seen_eof,
566 }
567 }
568
569 /// Returns an object that allows writing plaintext.
570 pub fn writer(&mut self) -> Writer<'_> {
571 Writer::new(self)
572 }
573
574 /// This function uses `io` to complete any outstanding IO for
575 /// this connection.
576 ///
577 /// This is a convenience function which solely uses other parts
578 /// of the public API.
579 ///
580 /// What this means depends on the connection state:
581 ///
582 /// - If the connection [`is_handshaking`], then IO is performed until
583 /// the handshake is complete.
584 /// - Otherwise, if [`wants_write`] is true, [`write_tls`] is invoked
585 /// until it is all written.
586 /// - Otherwise, if [`wants_read`] is true, [`read_tls`] is invoked
587 /// once.
588 ///
589 /// The return value is the number of bytes read from and written
590 /// to `io`, respectively. Once both `read()` and `write()` yield `WouldBlock`,
591 /// this function will propagate the error.
592 ///
593 /// Errors from TLS record handling (i.e., from [`process_new_packets`])
594 /// are wrapped in an `io::ErrorKind::InvalidData`-kind error.
595 ///
596 /// [`is_handshaking`]: CommonState::is_handshaking
597 /// [`wants_read`]: CommonState::wants_read
598 /// [`wants_write`]: CommonState::wants_write
599 /// [`write_tls`]: ConnectionCommon::write_tls
600 /// [`read_tls`]: ConnectionCommon::read_tls
601 /// [`process_new_packets`]: ConnectionCommon::process_new_packets
602 pub fn complete_io<T>(&mut self, io: &mut T) -> Result<(usize, usize), io::Error>
603 where
604 Self: Sized,
605 T: io::Read + io::Write,
606 {
607 let mut eof = false;
608 let mut wrlen = 0;
609 let mut rdlen = 0;
610 loop {
611 let (mut blocked_write, mut blocked_read) = (None, None);
612 let until_handshaked = self.is_handshaking();
613
614 if !self.wants_write() && !self.wants_read() {
615 // We will make no further progress.
616 return Ok((rdlen, wrlen));
617 }
618
619 while self.wants_write() {
620 match self.write_tls(io) {
621 Ok(0) => {
622 io.flush()?;
623 return Ok((rdlen, wrlen)); // EOF.
624 }
625 Ok(n) => wrlen += n,
626 Err(err) if err.kind() == io::ErrorKind::WouldBlock => {
627 blocked_write = Some(err);
628 break;
629 }
630 Err(err) => return Err(err),
631 }
632 }
633 if wrlen > 0 {
634 io.flush()?;
635 }
636
637 if !until_handshaked && wrlen > 0 {
638 return Ok((rdlen, wrlen));
639 }
640
641 // If we want to write, but are WouldBlocked by the underlying IO, *and*
642 // have no desire to read; that is everything.
643 if let (Some(_), false) = (&blocked_write, self.wants_read()) {
644 return match wrlen {
645 0 => Err(blocked_write.unwrap()),
646 _ => Ok((rdlen, wrlen)),
647 };
648 }
649
650 while !eof && self.wants_read() {
651 let read_size = match self.read_tls(io) {
652 Ok(0) => {
653 eof = true;
654 Some(0)
655 }
656 Ok(n) => {
657 rdlen += n;
658 Some(n)
659 }
660 Err(err) if err.kind() == io::ErrorKind::WouldBlock => {
661 blocked_read = Some(err);
662 break;
663 }
664 Err(err) if err.kind() == io::ErrorKind::Interrupted => None, // nothing to do
665 Err(err) => return Err(err),
666 };
667 if read_size.is_some() {
668 break;
669 }
670 }
671
672 if let Err(e) = self.process_new_packets() {
673 // In case we have an alert to send describing this error, try a last-gasp
674 // write -- but don't predate the primary error.
675 let _ignored = self.write_tls(io);
676 let _ignored = io.flush();
677 return Err(io::Error::new(io::ErrorKind::InvalidData, e));
678 };
679
680 // If we want to read, but are WouldBlocked by the underlying IO, *and*
681 // have no desire to write; that is everything.
682 if let (Some(_), false) = (&blocked_read, self.wants_write()) {
683 return match rdlen {
684 0 => Err(blocked_read.unwrap()),
685 _ => Ok((rdlen, wrlen)),
686 };
687 }
688
689 // if we're doing IO until handshaked, and we believe we've finished handshaking,
690 // but process_new_packets() has queued TLS data to send, loop around again to write
691 // the queued messages.
692 if until_handshaked && !self.is_handshaking() && self.wants_write() {
693 continue;
694 }
695
696 let blocked = blocked_write.zip(blocked_read);
697 match (eof, until_handshaked, self.is_handshaking(), blocked) {
698 (_, true, false, _) => return Ok((rdlen, wrlen)),
699 (_, _, _, Some((e, _))) if rdlen == 0 && wrlen == 0 => return Err(e),
700 (_, false, _, _) => return Ok((rdlen, wrlen)),
701 (true, true, true, _) => return Err(io::Error::from(io::ErrorKind::UnexpectedEof)),
702 _ => {}
703 }
704 }
705 }
706
707 /// Extract the first handshake message.
708 ///
709 /// This is a shortcut to the `process_new_packets()` -> `process_msg()` ->
710 /// `process_handshake_messages()` path, specialized for the first handshake message.
711 pub(crate) fn first_handshake_message(&mut self) -> Result<Option<Message<'static>>, Error> {
712 let mut buffer_progress = self.core.hs_deframer.progress();
713
714 let res = self
715 .core
716 .deframe(
717 None,
718 self.deframer_buffer.filled_mut(),
719 &mut buffer_progress,
720 )
721 .map(|opt| opt.map(|pm| Message::try_from(pm).map(|m| m.into_owned())));
722
723 match res? {
724 Some(Ok(msg)) => {
725 self.deframer_buffer
726 .discard(buffer_progress.take_discard());
727 Ok(Some(msg))
728 }
729 Some(Err(err)) => Err(self.send_fatal_alert(AlertDescription::DecodeError, err)),
730 None => Ok(None),
731 }
732 }
733
734 pub(crate) fn replace_state(&mut self, new: Box<dyn State<Data>>) {
735 self.core.state = Ok(new);
736 }
737
738 /// Read TLS content from `rd` into the internal buffer.
739 ///
740 /// Due to the internal buffering, `rd` can supply TLS messages in arbitrary-sized chunks (like
741 /// a socket or pipe might).
742 ///
743 /// You should call [`process_new_packets()`] each time a call to this function succeeds in order
744 /// to empty the incoming TLS data buffer.
745 ///
746 /// This function returns `Ok(0)` when the underlying `rd` does so. This typically happens when
747 /// a socket is cleanly closed, or a file is at EOF. Errors may result from the IO done through
748 /// `rd`; additionally, errors of `ErrorKind::Other` are emitted to signal backpressure:
749 ///
750 /// * In order to empty the incoming TLS data buffer, you should call [`process_new_packets()`]
751 /// each time a call to this function succeeds.
752 /// * In order to empty the incoming plaintext data buffer, you should empty it through
753 /// the [`reader()`] after the call to [`process_new_packets()`].
754 ///
755 /// This function also returns `Ok(0)` once a `close_notify` alert has been successfully
756 /// received. No additional data is ever read in this state.
757 ///
758 /// [`process_new_packets()`]: ConnectionCommon::process_new_packets
759 /// [`reader()`]: ConnectionCommon::reader
760 pub fn read_tls(&mut self, rd: &mut dyn io::Read) -> Result<usize, io::Error> {
761 if self.received_plaintext.is_full() {
762 return Err(io::Error::new(
763 io::ErrorKind::Other,
764 "received plaintext buffer full",
765 ));
766 }
767
768 if self.has_received_close_notify {
769 return Ok(0);
770 }
771
772 let res = self
773 .deframer_buffer
774 .read(rd, self.core.hs_deframer.is_active());
775 if let Ok(0) = res {
776 self.has_seen_eof = true;
777 }
778 res
779 }
780
781 /// Writes TLS messages to `wr`.
782 ///
783 /// On success, this function returns `Ok(n)` where `n` is a number of bytes written to `wr`
784 /// (after encoding and encryption).
785 ///
786 /// After this function returns, the connection buffer may not yet be fully flushed. The
787 /// [`CommonState::wants_write`] function can be used to check if the output buffer is empty.
788 pub fn write_tls(&mut self, wr: &mut dyn io::Write) -> Result<usize, io::Error> {
789 self.sendable_tls.write_to(wr)
790 }
791}
792
793impl<'a, Data> From<&'a mut ConnectionCommon<Data>> for Context<'a, Data> {
794 fn from(conn: &'a mut ConnectionCommon<Data>) -> Self {
795 Self {
796 common: &mut conn.core.common_state,
797 data: &mut conn.core.data,
798 sendable_plaintext: Some(&mut conn.sendable_plaintext),
799 }
800 }
801}
802
803impl<T> Deref for ConnectionCommon<T> {
804 type Target = CommonState;
805
806 fn deref(&self) -> &Self::Target {
807 &self.core.common_state
808 }
809}
810
811impl<T> DerefMut for ConnectionCommon<T> {
812 fn deref_mut(&mut self) -> &mut Self::Target {
813 &mut self.core.common_state
814 }
815}
816
817impl<Data> From<ConnectionCore<Data>> for ConnectionCommon<Data> {
818 fn from(core: ConnectionCore<Data>) -> Self {
819 Self {
820 core,
821 deframer_buffer: DeframerVecBuffer::default(),
822 sendable_plaintext: ChunkVecBuffer::new(Some(DEFAULT_BUFFER_LIMIT)),
823 }
824 }
825}
826
827/// Interface shared by unbuffered client and server connections.
828pub struct UnbufferedConnectionCommon<Data> {
829 pub(crate) core: ConnectionCore<Data>,
830 wants_write: bool,
831 emitted_peer_closed_state: bool,
832}
833
834impl<Data> From<ConnectionCore<Data>> for UnbufferedConnectionCommon<Data> {
835 fn from(core: ConnectionCore<Data>) -> Self {
836 Self {
837 core,
838 wants_write: false,
839 emitted_peer_closed_state: false,
840 }
841 }
842}
843
844impl<Data> UnbufferedConnectionCommon<Data> {
845 /// Extract secrets, so they can be used when configuring kTLS, for example.
846 /// Should be used with care as it exposes secret key material.
847 pub fn dangerous_extract_secrets(self) -> Result<ExtractedSecrets, Error> {
848 self.core.dangerous_extract_secrets()
849 }
850}
851
852impl<T> Deref for UnbufferedConnectionCommon<T> {
853 type Target = CommonState;
854
855 fn deref(&self) -> &Self::Target {
856 &self.core.common_state
857 }
858}
859
860pub(crate) struct ConnectionCore<Data> {
861 pub(crate) state: Result<Box<dyn State<Data>>, Error>,
862 pub(crate) data: Data,
863 pub(crate) common_state: CommonState,
864 pub(crate) hs_deframer: HandshakeDeframer,
865
866 /// We limit consecutive empty fragments to avoid a route for the peer to send
867 /// us significant but fruitless traffic.
868 seen_consecutive_empty_fragments: u8,
869}
870
871impl<Data> ConnectionCore<Data> {
872 pub(crate) fn new(state: Box<dyn State<Data>>, data: Data, common_state: CommonState) -> Self {
873 Self {
874 state: Ok(state),
875 data,
876 common_state,
877 hs_deframer: HandshakeDeframer::default(),
878 seen_consecutive_empty_fragments: 0,
879 }
880 }
881
882 pub(crate) fn process_new_packets(
883 &mut self,
884 deframer_buffer: &mut DeframerVecBuffer,
885 sendable_plaintext: &mut ChunkVecBuffer,
886 ) -> Result<IoState, Error> {
887 let mut state = match mem::replace(&mut self.state, Err(Error::HandshakeNotComplete)) {
888 Ok(state) => state,
889 Err(e) => {
890 self.state = Err(e.clone());
891 return Err(e);
892 }
893 };
894
895 let mut buffer_progress = self.hs_deframer.progress();
896
897 loop {
898 let res = self.deframe(
899 Some(&*state),
900 deframer_buffer.filled_mut(),
901 &mut buffer_progress,
902 );
903
904 let opt_msg = match res {
905 Ok(opt_msg) => opt_msg,
906 Err(e) => {
907 self.state = Err(e.clone());
908 deframer_buffer.discard(buffer_progress.take_discard());
909 return Err(e);
910 }
911 };
912
913 let Some(msg) = opt_msg else {
914 break;
915 };
916
917 match self.process_msg(msg, state, Some(sendable_plaintext)) {
918 Ok(new) => state = new,
919 Err(e) => {
920 self.state = Err(e.clone());
921 deframer_buffer.discard(buffer_progress.take_discard());
922 return Err(e);
923 }
924 }
925
926 if self
927 .common_state
928 .has_received_close_notify
929 {
930 // "Any data received after a closure alert has been received MUST be ignored."
931 // -- <https://datatracker.ietf.org/doc/html/rfc8446#section-6.1>
932 // This is data that has already been accepted in `read_tls`.
933 buffer_progress.add_discard(deframer_buffer.filled().len());
934 break;
935 }
936
937 deframer_buffer.discard(buffer_progress.take_discard());
938 }
939
940 deframer_buffer.discard(buffer_progress.take_discard());
941 self.state = Ok(state);
942 Ok(self.common_state.current_io_state())
943 }
944
945 /// Pull a message out of the deframer and send any messages that need to be sent as a result.
946 fn deframe<'b>(
947 &mut self,
948 state: Option<&dyn State<Data>>,
949 buffer: &'b mut [u8],
950 buffer_progress: &mut BufferProgress,
951 ) -> Result<Option<InboundPlainMessage<'b>>, Error> {
952 // before processing any more of `buffer`, return any extant messages from `hs_deframer`
953 if self.hs_deframer.has_message_ready() {
954 Ok(self.take_handshake_message(buffer, buffer_progress))
955 } else {
956 self.process_more_input(state, buffer, buffer_progress)
957 }
958 }
959
960 fn take_handshake_message<'b>(
961 &mut self,
962 buffer: &'b mut [u8],
963 buffer_progress: &mut BufferProgress,
964 ) -> Option<InboundPlainMessage<'b>> {
965 self.hs_deframer
966 .iter(buffer)
967 .next()
968 .map(|(message, discard)| {
969 buffer_progress.add_discard(discard);
970 message
971 })
972 }
973
974 fn process_more_input<'b>(
975 &mut self,
976 state: Option<&dyn State<Data>>,
977 buffer: &'b mut [u8],
978 buffer_progress: &mut BufferProgress,
979 ) -> Result<Option<InboundPlainMessage<'b>>, Error> {
980 let version_is_tls13 = matches!(
981 self.common_state.negotiated_version,
982 Some(ProtocolVersion::TLSv1_3)
983 );
984
985 let locator = Locator::new(buffer);
986
987 loop {
988 let mut iter = DeframerIter::new(&mut buffer[buffer_progress.processed()..]);
989
990 let (message, processed) = loop {
991 let message = match iter.next().transpose() {
992 Ok(Some(message)) => message,
993 Ok(None) => return Ok(None),
994 Err(err) => return Err(self.handle_deframe_error(err, state)),
995 };
996
997 let allowed_plaintext = match message.typ {
998 // CCS messages are always plaintext.
999 ContentType::ChangeCipherSpec => true,
1000 // Alerts are allowed to be plaintext if-and-only-if:
1001 // * The negotiated protocol version is TLS 1.3. - In TLS 1.2 it is unambiguous when
1002 // keying changes based on the CCS message. Only TLS 1.3 requires these heuristics.
1003 // * We have not yet decrypted any messages from the peer - if we have we don't
1004 // expect any plaintext.
1005 // * The payload size is indicative of a plaintext alert message.
1006 ContentType::Alert
1007 if version_is_tls13
1008 && !self
1009 .common_state
1010 .record_layer
1011 .has_decrypted()
1012 && message.payload.len() <= 2 =>
1013 {
1014 true
1015 }
1016 // In other circumstances, we expect all messages to be encrypted.
1017 _ => false,
1018 };
1019
1020 if allowed_plaintext && !self.hs_deframer.is_active() {
1021 break (message.into_plain_message(), iter.bytes_consumed());
1022 }
1023
1024 let message = match self
1025 .common_state
1026 .record_layer
1027 .decrypt_incoming(message)
1028 {
1029 // failed decryption during trial decryption is not allowed to be
1030 // interleaved with partial handshake data.
1031 Ok(None) if !self.hs_deframer.is_aligned() => {
1032 return Err(
1033 PeerMisbehaved::RejectedEarlyDataInterleavedWithHandshakeMessage.into(),
1034 );
1035 }
1036
1037 // failed decryption during trial decryption.
1038 Ok(None) => continue,
1039
1040 Ok(Some(message)) => message,
1041
1042 Err(err) => return Err(self.handle_deframe_error(err, state)),
1043 };
1044
1045 let Decrypted {
1046 want_close_before_decrypt,
1047 plaintext,
1048 } = message;
1049
1050 if want_close_before_decrypt {
1051 self.common_state.send_close_notify();
1052 }
1053
1054 break (plaintext, iter.bytes_consumed());
1055 };
1056
1057 if !self.hs_deframer.is_aligned() && message.typ != ContentType::Handshake {
1058 // "Handshake messages MUST NOT be interleaved with other record
1059 // types. That is, if a handshake message is split over two or more
1060 // records, there MUST NOT be any other records between them."
1061 // https://www.rfc-editor.org/rfc/rfc8446#section-5.1
1062 return Err(PeerMisbehaved::MessageInterleavedWithHandshakeMessage.into());
1063 }
1064
1065 match message.payload.len() {
1066 0 => {
1067 if self.seen_consecutive_empty_fragments
1068 == ALLOWED_CONSECUTIVE_EMPTY_FRAGMENTS_MAX
1069 {
1070 return Err(PeerMisbehaved::TooManyEmptyFragments.into());
1071 }
1072 self.seen_consecutive_empty_fragments += 1;
1073 }
1074 _ => {
1075 self.seen_consecutive_empty_fragments = 0;
1076 }
1077 };
1078
1079 buffer_progress.add_processed(processed);
1080
1081 // do an end-run around the borrow checker, converting `message` (containing
1082 // a borrowed slice) to an unborrowed one (containing a `Range` into the
1083 // same buffer). the reborrow happens inside the branch that returns the
1084 // message.
1085 //
1086 // is fixed by -Zpolonius
1087 // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md#problem-case-3-conditional-control-flow-across-functions
1088 let unborrowed = InboundUnborrowedMessage::unborrow(&locator, message);
1089
1090 if unborrowed.typ != ContentType::Handshake {
1091 let message = unborrowed.reborrow(&Delocator::new(buffer));
1092 buffer_progress.add_discard(processed);
1093 return Ok(Some(message));
1094 }
1095
1096 let message = unborrowed.reborrow(&Delocator::new(buffer));
1097 self.hs_deframer
1098 .input_message(message, &locator, buffer_progress.processed());
1099 self.hs_deframer.coalesce(buffer)?;
1100
1101 self.common_state.aligned_handshake = self.hs_deframer.is_aligned();
1102
1103 if self.hs_deframer.has_message_ready() {
1104 // trial decryption finishes with the first handshake message after it started.
1105 self.common_state
1106 .record_layer
1107 .finish_trial_decryption();
1108
1109 return Ok(self.take_handshake_message(buffer, buffer_progress));
1110 }
1111 }
1112 }
1113
1114 fn handle_deframe_error(&mut self, error: Error, state: Option<&dyn State<Data>>) -> Error {
1115 match error {
1116 error @ Error::InvalidMessage(_) => {
1117 if self.common_state.is_quic() {
1118 self.common_state.quic.alert = Some(AlertDescription::DecodeError);
1119 error
1120 } else {
1121 self.common_state
1122 .send_fatal_alert(AlertDescription::DecodeError, error)
1123 }
1124 }
1125 Error::PeerSentOversizedRecord => self
1126 .common_state
1127 .send_fatal_alert(AlertDescription::RecordOverflow, error),
1128 Error::DecryptError => {
1129 if let Some(state) = state {
1130 state.handle_decrypt_error();
1131 }
1132 self.common_state
1133 .send_fatal_alert(AlertDescription::BadRecordMac, error)
1134 }
1135
1136 error => error,
1137 }
1138 }
1139
1140 fn process_msg(
1141 &mut self,
1142 msg: InboundPlainMessage<'_>,
1143 state: Box<dyn State<Data>>,
1144 sendable_plaintext: Option<&mut ChunkVecBuffer>,
1145 ) -> Result<Box<dyn State<Data>>, Error> {
1146 // Drop CCS messages during handshake in TLS1.3
1147 if msg.typ == ContentType::ChangeCipherSpec
1148 && !self
1149 .common_state
1150 .may_receive_application_data
1151 && self.common_state.is_tls13()
1152 {
1153 if !msg.is_valid_ccs() {
1154 // "An implementation which receives any other change_cipher_spec value or
1155 // which receives a protected change_cipher_spec record MUST abort the
1156 // handshake with an "unexpected_message" alert."
1157 return Err(self.common_state.send_fatal_alert(
1158 AlertDescription::UnexpectedMessage,
1159 PeerMisbehaved::IllegalMiddleboxChangeCipherSpec,
1160 ));
1161 }
1162
1163 self.common_state
1164 .received_tls13_change_cipher_spec()?;
1165 trace!("Dropping CCS");
1166 return Ok(state);
1167 }
1168
1169 // Now we can fully parse the message payload.
1170 let msg = match Message::try_from(msg) {
1171 Ok(msg) => msg,
1172 Err(err) => {
1173 return Err(self
1174 .common_state
1175 .send_fatal_alert(AlertDescription::from(err), err));
1176 }
1177 };
1178
1179 // For alerts, we have separate logic.
1180 if let MessagePayload::Alert(alert) = &msg.payload {
1181 self.common_state.process_alert(alert)?;
1182 return Ok(state);
1183 }
1184
1185 self.common_state
1186 .process_main_protocol(msg, state, &mut self.data, sendable_plaintext)
1187 }
1188
1189 pub(crate) fn dangerous_extract_secrets(self) -> Result<ExtractedSecrets, Error> {
1190 Ok(self
1191 .dangerous_into_kernel_connection()?
1192 .0)
1193 }
1194
1195 pub(crate) fn dangerous_into_kernel_connection(
1196 self,
1197 ) -> Result<(ExtractedSecrets, KernelConnection<Data>), Error> {
1198 if !self
1199 .common_state
1200 .enable_secret_extraction
1201 {
1202 return Err(Error::General("Secret extraction is disabled".into()));
1203 }
1204
1205 if self.common_state.is_handshaking() {
1206 return Err(Error::HandshakeNotComplete);
1207 }
1208
1209 if !self
1210 .common_state
1211 .sendable_tls
1212 .is_empty()
1213 {
1214 return Err(Error::General(
1215 "cannot convert into an KernelConnection while there are still buffered TLS records to send"
1216 .into()
1217 ));
1218 }
1219
1220 let state = self.state?;
1221
1222 let record_layer = &self.common_state.record_layer;
1223 let secrets = state.extract_secrets()?;
1224 let secrets = ExtractedSecrets {
1225 tx: (record_layer.write_seq(), secrets.tx),
1226 rx: (record_layer.read_seq(), secrets.rx),
1227 };
1228
1229 let state = state.into_external_state()?;
1230 let external = KernelConnection::new(state, self.common_state)?;
1231
1232 Ok((secrets, external))
1233 }
1234
1235 pub(crate) fn export_keying_material<T: AsMut<[u8]>>(
1236 &self,
1237 mut output: T,
1238 label: &[u8],
1239 context: Option<&[u8]>,
1240 ) -> Result<T, Error> {
1241 if output.as_mut().is_empty() {
1242 return Err(Error::General(
1243 "export_keying_material with zero-length output".into(),
1244 ));
1245 }
1246
1247 match self.state.as_ref() {
1248 Ok(st) => st
1249 .export_keying_material(output.as_mut(), label, context)
1250 .map(|_| output),
1251 Err(e) => Err(e.clone()),
1252 }
1253 }
1254
1255 /// Trigger a `refresh_traffic_keys` if required by `CommonState`.
1256 fn maybe_refresh_traffic_keys(&mut self) {
1257 if mem::take(
1258 &mut self
1259 .common_state
1260 .refresh_traffic_keys_pending,
1261 ) {
1262 let _ = self.refresh_traffic_keys();
1263 }
1264 }
1265
1266 fn refresh_traffic_keys(&mut self) -> Result<(), Error> {
1267 match &mut self.state {
1268 Ok(st) => st.send_key_update_request(&mut self.common_state),
1269 Err(e) => Err(e.clone()),
1270 }
1271 }
1272}
1273
1274/// Data specific to the peer's side (client or server).
1275pub trait SideData: Debug {}
1276
1277/// An InboundPlainMessage which does not borrow its payload, but
1278/// references a range that can later be borrowed.
1279struct InboundUnborrowedMessage {
1280 typ: ContentType,
1281 version: ProtocolVersion,
1282 bounds: Range<usize>,
1283}
1284
1285impl InboundUnborrowedMessage {
1286 fn unborrow(locator: &Locator, msg: InboundPlainMessage<'_>) -> Self {
1287 Self {
1288 typ: msg.typ,
1289 version: msg.version,
1290 bounds: locator.locate(msg.payload),
1291 }
1292 }
1293
1294 fn reborrow<'b>(self, delocator: &Delocator<'b>) -> InboundPlainMessage<'b> {
1295 InboundPlainMessage {
1296 typ: self.typ,
1297 version: self.version,
1298 payload: delocator.slice_from_range(&self.bounds),
1299 }
1300 }
1301}
1302
1303/// cf. BoringSSL's `kMaxEmptyRecords`
1304/// <https://github.com/google/boringssl/blob/dec5989b793c56ad4dd32173bd2d8595ca78b398/ssl/tls_record.cc#L124-L128>
1305const ALLOWED_CONSECUTIVE_EMPTY_FRAGMENTS_MAX: u8 = 32;