2 Commits

Author SHA1 Message Date
2c6ad1d7b8 Update based on lints
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
Signed-off-by: Jacob Kiers <code@kiers.eu>
2025-01-09 20:38:05 +01:00
8fe1f7f57b Fix SNI header parsing
When a listener is configured to deal with TLS upstreams, we use the SNI
field of the TLS ClientHello message to decide where to send the traffic.

Therefore, a buffer of 1024 bytes was used to temporarily store this
message. However, a TLS ClientHello message can be larger than that, up
to 16K bytes.

So now the first few bytes are read and manually parsed to find out how
long the message is. And then the entire ClientHello message is
retrieved.

So hopefully that will fix the issue causing the ClientHello
determination to fail.

Closes #10

Signed-off-by: Jacob Kiers <code@kiers.eu>
2025-01-09 20:23:02 +01:00
4 changed files with 15 additions and 165 deletions

View File

@@ -7,15 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [0.1.10] - 2025-01-09
### Fixed
* The ClientHello TLS header is now read in full before it is parsed, solving
an error where there was not enough data to fully read it. In those cases
it was not possible to determine the upstream address and therefore the proxy
would go the the default action instead.
### Changed
* Updated some dependencies to prevent the build from breaking.
@@ -41,10 +32,10 @@ The ability to run `l4p` without arguments is now deprecated. Please use
## Previous versions
[unreleased]: https://code.kiers.eu/jjkiers/layer4-proxy/compare/v0.1.9...HEAD
[0.1.10]: https://code.kiers.eu/jjkiers/layer4-proxy/compare/v0.1.9...v0.1.10
[0.1.9]: https://code.kiers.eu/jjkiers/layer4-proxy/compare/v0.1.8...v0.1.9
Types of changes:
* `Added` for new features.

4
Cargo.lock generated
View File

@@ -1,6 +1,6 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
version = 3
[[package]]
name = "addr2line"
@@ -516,7 +516,7 @@ dependencies = [
[[package]]
name = "l4p"
version = "0.1.10"
version = "0.1.9"
dependencies = [
"async-trait",
"byte_string",

View File

@@ -1,6 +1,6 @@
[package]
name = "l4p"
version = "0.1.11"
version = "0.1.9"
edition = "2021"
authors = ["Jacob Kiers <code@kiers.eu>"]
license = "Apache-2.0"

View File

@@ -1,15 +1,12 @@
use crate::servers::Proxy;
use log::{debug, error, trace, warn};
use std::error::Error;
use std::io; // Import io for ErrorKind
use std::sync::Arc;
use std::time::Duration; // For potential delays
use tls_parser::{
parse_tls_extensions, parse_tls_raw_record, parse_tls_record_with_header, TlsMessage,
TlsMessageHandshake,
};
use tokio::net::TcpStream;
use tokio::time::timeout; // Use timeout for peek operations
fn get_sni(buf: &[u8]) -> Vec<String> {
let mut snis: Vec<String> = Vec::new();
@@ -60,9 +57,6 @@ fn get_sni(buf: &[u8]) -> Vec<String> {
snis
}
// Timeout duration for waiting for TLS Hello data
const TLS_PEEK_TIMEOUT: Duration = Duration::from_secs(5); // Adjust as needed
pub(crate) async fn determine_upstream_name(
inbound: &TcpStream,
proxy: &Arc<Proxy>,
@@ -70,170 +64,35 @@ pub(crate) async fn determine_upstream_name(
let default_upstream = proxy.default_action.clone();
let mut header = [0u8; 9];
inbound.peek(&mut header).await?;
// --- Step 1: Peek the initial header (9 bytes) with timeout ---
match timeout(TLS_PEEK_TIMEOUT, async {
loop {
match inbound.peek(&mut header).await {
Ok(n) if n >= header.len() => return Ok::<usize, io::Error>(n), // Got enough bytes
Ok(0) => {
// Connection closed cleanly before sending enough data
trace!("Connection closed while peeking for TLS header");
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"Connection closed while peeking for TLS header",
)
.into()); // Convert to Box<dyn Error>
}
Ok(_) => {
// Not enough bytes yet, yield and loop again
tokio::task::yield_now().await;
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
// Should not happen with await, but yield defensively
tokio::task::yield_now().await;
}
Err(e) => {
// Other I/O error
warn!("Error peeking for TLS header: {}", e);
return Err(e.into()); // Convert to Box<dyn Error>
}
}
}
})
.await
{
Ok(Ok(_)) => { /* Header peeked successfully */ }
Ok(Err(e)) => {
// Inner loop returned an error (e.g., EOF, IO error)
trace!("Failed to peek header (inner error): {}", e);
return Ok(default_upstream); // Fallback on error/EOF
}
Err(_) => {
// Timeout occurred
error!("Timeout waiting for TLS header");
return Ok(default_upstream); // Fallback on timeout
}
}
let required_bytes = client_hello_buffer_size(&header)?;
// --- Step 2: Calculate required size ---
let required_bytes = match client_hello_buffer_size(&header) {
Ok(size) => size,
Err(e) => {
// Header was invalid or not a ClientHello
trace!("Could not determine required buffer size: {}", e);
return Ok(default_upstream);
}
};
// Basic sanity check on size
if required_bytes > 16384 + 9 {
// TLS max record size + header approx
error!(
"Calculated required TLS buffer size is too large: {}",
required_bytes
);
return Ok(default_upstream);
}
// --- Step 3: Peek the full ClientHello with timeout ---
let mut hello_buf = vec![0; required_bytes];
match timeout(TLS_PEEK_TIMEOUT, async {
let mut total_peeked = 0;
loop {
// Peek into the portion of the buffer that hasn't been filled yet.
match inbound.peek(&mut hello_buf[total_peeked..]).await {
Ok(0) => {
// Connection closed cleanly before sending full ClientHello
trace!(
"Connection closed while peeking for full ClientHello (peeked {}/{} bytes)",
total_peeked,
required_bytes
);
return Err::<usize, io::Error>(
io::Error::new(
io::ErrorKind::UnexpectedEof,
"Connection closed while peeking for full ClientHello",
)
.into(),
);
}
Ok(n) => {
total_peeked += n;
if total_peeked >= required_bytes {
trace!("Successfully peeked {} bytes for ClientHello", total_peeked);
return Ok(total_peeked); // Got enough
} else {
// Not enough bytes yet, yield and loop again
trace!(
"Peeked {}/{} bytes for ClientHello, waiting for more...",
total_peeked,
required_bytes
);
tokio::task::yield_now().await;
}
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
tokio::task::yield_now().await;
}
Err(e) => {
warn!("Error peeking for full ClientHello: {}", e);
return Err(e.into());
}
}
}
})
.await
{
Ok(Ok(_)) => { /* Full hello peeked successfully */ }
Ok(Err(e)) => {
error!("Could not peek full ClientHello (inner error): {}", e);
return Ok(default_upstream); // Fallback on error/EOF
}
Err(_) => {
error!(
"Timeout waiting for full ClientHello (needed {} bytes)",
required_bytes
);
return Ok(default_upstream); // Fallback on timeout
}
let read_bytes = inbound.peek(&mut hello_buf).await?;
if read_bytes < required_bytes.into() {
error!("Could not read enough bytes to determine SNI");
return Ok(default_upstream);
}
// --- Step 4: Parse SNI ---
let snis = get_sni(&hello_buf);
// --- Step 5: Determine upstream based on SNI ---
if snis.is_empty() {
debug!("No SNI found in ClientHello, using default upstream.");
return Ok(default_upstream);
} else {
match proxy.sni.clone() {
Some(sni_map) => {
let mut upstream = default_upstream.clone(); // Clone here for default case
let mut found_match = false;
let mut upstream = default_upstream;
for sni in snis {
// snis is already Vec<String>
if let Some(target_upstream) = sni_map.get(&sni) {
debug!(
"Found matching SNI '{}', routing to upstream: {}",
sni, target_upstream
);
upstream = target_upstream.clone();
found_match = true;
let m = sni_map.get(&sni);
if m.is_some() {
upstream = m.unwrap().clone();
break;
} else {
trace!("SNI '{}' not found in map.", sni);
}
}
if !found_match {
debug!("SNI(s) found but none matched configuration, using default upstream.");
}
Ok(upstream)
}
None => {
debug!("SNI found but no SNI map configured, using default upstream.");
Ok(default_upstream)
}
None => return Ok(default_upstream),
}
}
}