feat: add upnp support (#1449)

This commit is contained in:
Debugger Chen
2026-04-21 17:19:04 +08:00
committed by GitHub
parent f4319c4d4f
commit 5cd0a3e846
26 changed files with 3707 additions and 235 deletions
+4
View File
@@ -128,6 +128,10 @@ jobs:
- name: Setup tools for test
run: sudo apt install bridge-utils
- name: Setup upnpd for test
run: |
sudo apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y miniupnpd miniupnpd-iptables iptables
- name: Setup system for test
run: |
Generated
+406 -108
View File
File diff suppressed because it is too large Load Diff
+2
View File
@@ -252,6 +252,8 @@ shellexpand = "3.1.1"
# for fake tcp
flume = { version = "0.12", optional = true }
igd-next = { version = "0.17.0", features = ["aio_tokio"] }
natpmp = "0.5.0"
[target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "freebsd"))'.dependencies]
machine-uid = "0.5.3"
+3
View File
@@ -172,6 +172,9 @@ core_clap:
disable_sym_hole_punching:
en: "if true, disable udp nat hole punching for symmetric nat (NAT4), which is based on birthday attack and may be blocked by ISP."
zh-CN: "如果为true,则禁用基于生日攻击的对称NAT (NAT4) UDP 打洞功能,该打洞方式可能会被运营商封锁"
disable_upnp:
en: "disable runtime UPnP/NAT-PMP port mapping for eligible listeners; automatic port mapping is enabled by default"
zh-CN: "禁用符合条件监听器的运行时 UPnP/NAT-PMP 端口映射;自动端口映射默认开启"
relay_all_peer_rpc:
en: "relay all peer rpc packets, even if the peer is not in the relay network whitelist. this can help peers not in relay network whitelist to establish p2p connection."
zh-CN: "转发所有对等节点的RPC数据包,即使对等节点不在转发网络白名单中。这可以帮助白名单外网络中的对等节点建立P2P连接。"
+1
View File
@@ -69,6 +69,7 @@ pub fn gen_default_flags() -> Flags {
quic_listen_port: u32::MAX,
need_p2p: false,
instance_recv_bps_limit: u64::MAX,
disable_upnp: false,
}
}
+5
View File
@@ -53,6 +53,11 @@ pub enum GlobalCtxEvent {
ListenerAcceptFailed(url::Url, String), // (url, error message)
ConnectionAccepted(String, String), // (local url, remote url)
ConnectionError(String, String, String), // (local url, remote url, error message)
ListenerPortMappingEstablished {
local_listener: url::Url,
mapped_listener: url::Url,
backend: String,
},
Connecting(url::Url),
ConnectError(String, String, String), // (dst, ip version, error message)
+1
View File
@@ -31,6 +31,7 @@ pub mod stun;
pub mod stun_codec_ext;
pub mod token_bucket;
pub mod tracing_rolling_appender;
pub mod upnp;
pub fn get_logger_timer<F: time::formatting::Formattable>(
format: F,
+19 -2
View File
@@ -276,7 +276,6 @@ impl StunClient {
let stun_host = self.stun_server;
// repeat req in case of packet loss
let mut tids = vec![];
for _ in 0..self.req_repeat {
let tid = rand::random::<u32>();
// let tid = 1;
@@ -912,6 +911,10 @@ impl TcpNatTypeDetector {
pub trait StunInfoCollectorTrait: Send + Sync {
fn get_stun_info(&self) -> StunInfo;
async fn get_udp_port_mapping(&self, local_port: u16) -> Result<SocketAddr, Error>;
async fn get_udp_port_mapping_with_socket(
&self,
udp: Arc<UdpSocket>,
) -> Result<SocketAddr, Error>;
async fn get_tcp_port_mapping(&self, local_port: u16) -> Result<SocketAddr, Error>;
}
@@ -975,6 +978,14 @@ impl StunInfoCollectorTrait for StunInfoCollector {
}
async fn get_udp_port_mapping(&self, local_port: u16) -> Result<SocketAddr, Error> {
let udp = Arc::new(UdpSocket::bind(format!("0.0.0.0:{}", local_port)).await?);
self.get_udp_port_mapping_with_socket(udp).await
}
async fn get_udp_port_mapping_with_socket(
&self,
udp: Arc<UdpSocket>,
) -> Result<SocketAddr, Error> {
self.start_stun_routine();
let mut stun_servers = self
@@ -1000,7 +1011,6 @@ impl StunInfoCollectorTrait for StunInfoCollector {
return Err(Error::NotFound);
}
let udp = Arc::new(UdpSocket::bind(format!("0.0.0.0:{}", local_port)).await?);
let mut client_builder = StunClientBuilder::new(udp.clone());
for server in stun_servers.iter() {
@@ -1316,6 +1326,13 @@ impl StunInfoCollectorTrait for MockStunInfoCollector {
Ok(format!("127.0.0.1:{}", port).parse().unwrap())
}
async fn get_udp_port_mapping_with_socket(
&self,
udp: Arc<UdpSocket>,
) -> Result<std::net::SocketAddr, Error> {
self.get_udp_port_mapping(udp.local_addr()?.port()).await
}
async fn get_tcp_port_mapping(&self, mut port: u16) -> Result<std::net::SocketAddr, Error> {
if port == 0 {
port = 40144;
+767
View File
@@ -0,0 +1,767 @@
use std::{
fmt,
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
sync::Arc,
time::Duration,
};
use anyhow::{Context, anyhow, bail};
use igd_next::{
AddAnyPortError, PortMappingProtocol, SearchOptions,
aio::{
Gateway,
tokio::{Tokio, search_gateway},
},
};
use natpmp::{
Protocol as NatPmpProtocol, Response as NatPmpResponse, new_tokio_natpmp, new_tokio_natpmp_with,
};
use tokio::{net::UdpSocket, sync::oneshot};
use super::{
global_ctx::{ArcGlobalCtx, GlobalCtxEvent},
stun::StunInfoCollectorTrait as _,
};
use crate::tunnel::build_url_from_socket_addr;
const UPNP_SEARCH_TIMEOUT: Duration = Duration::from_secs(1);
const UPNP_SEARCH_RESPONSE_TIMEOUT: Duration = Duration::from_millis(300);
const NAT_PMP_RESPONSE_TIMEOUT: Duration = Duration::from_secs(1);
const UPNP_LEASE_DURATION_SECS: u32 = 300;
const UPNP_RENEW_INTERVAL: Duration = Duration::from_secs(240);
const UPNP_DESCRIPTION: &str = "EasyTier udp hole punch";
const PORT_MAPPING_BACKEND_NAT_PMP: &str = "nat-pmp";
const PORT_MAPPING_BACKEND_IGD: &str = "igd";
type TokioGateway = Gateway<Tokio>;
enum PortMappingBackend {
NatPmp { gateway: Ipv4Addr },
Igd { gateway: TokioGateway },
}
impl PortMappingBackend {
fn name(&self) -> &'static str {
match self {
Self::NatPmp { .. } => PORT_MAPPING_BACKEND_NAT_PMP,
Self::Igd { .. } => PORT_MAPPING_BACKEND_IGD,
}
}
}
struct ActiveUdpPortMapping {
backend: PortMappingBackend,
local_listener: url::Url,
local_addr: SocketAddr,
gateway_external_port: u16,
}
impl ActiveUdpPortMapping {
async fn discover_nat_pmp_gateway(
local_listener: &url::Url,
) -> anyhow::Result<(Ipv4Addr, SocketAddr)> {
let client = new_tokio_natpmp().await.context("create nat-pmp client")?;
let gateway = *client.gateway();
let gateway_addr = SocketAddr::V4(SocketAddrV4::new(gateway, natpmp::NATPMP_PORT));
let local_addr = resolve_internal_addr(gateway_addr, local_listener).await?;
Ok((gateway, local_addr))
}
async fn establish_via_nat_pmp(
local_listener: &url::Url,
gateway: Ipv4Addr,
local_addr: SocketAddr,
) -> anyhow::Result<Self> {
let gateway_external_port =
add_udp_mapping_port_nat_pmp(gateway, local_addr, local_listener)
.await
.with_context(|| {
format!("map udp socket for {local_listener} via nat-pmp gateway {gateway}")
})?;
Ok(Self {
backend: PortMappingBackend::NatPmp { gateway },
local_listener: local_listener.clone(),
local_addr,
gateway_external_port,
})
}
async fn discover_igd_gateway(
global_ctx: &ArcGlobalCtx,
local_listener: &url::Url,
) -> anyhow::Result<(TokioGateway, SocketAddr)> {
let _g = global_ctx.net_ns.guard();
let gateway = search_gateway(SearchOptions {
timeout: Some(UPNP_SEARCH_TIMEOUT),
single_search_timeout: Some(UPNP_SEARCH_RESPONSE_TIMEOUT),
..Default::default()
})
.await
.with_context(|| format!("search igd gateway for {local_listener}"))?;
let local_addr = resolve_internal_addr(gateway.addr, local_listener).await?;
Ok((gateway, local_addr))
}
async fn establish_via_igd(
local_listener: &url::Url,
gateway: TokioGateway,
local_addr: SocketAddr,
) -> anyhow::Result<Self> {
let gateway_external_port = add_udp_mapping_port_igd(&gateway, local_addr, local_listener)
.await
.with_context(|| {
format!(
"map udp socket for {local_listener} via gateway {}",
gateway.addr
)
})?;
Ok(Self {
backend: PortMappingBackend::Igd { gateway },
local_listener: local_listener.clone(),
local_addr,
gateway_external_port,
})
}
fn backend_name(&self) -> &'static str {
self.backend.name()
}
async fn renew(&self) -> anyhow::Result<()> {
match &self.backend {
PortMappingBackend::NatPmp { gateway } => {
renew_udp_mapping_nat_pmp(
*gateway,
self.local_addr,
self.gateway_external_port,
&self.local_listener,
)
.await
}
PortMappingBackend::Igd { gateway } => {
renew_udp_mapping_igd(
gateway,
self.local_addr,
self.gateway_external_port,
&self.local_listener,
)
.await
}
}
}
async fn remove(&self) -> anyhow::Result<()> {
match &self.backend {
PortMappingBackend::NatPmp { gateway } => {
remove_udp_mapping_nat_pmp(
*gateway,
self.local_addr,
self.gateway_external_port,
&self.local_listener,
)
.await
}
PortMappingBackend::Igd { gateway } => {
remove_udp_mapping_igd(gateway, self.gateway_external_port, &self.local_listener)
.await
}
}
}
}
pub struct UdpPortMappingLease {
backend: &'static str,
gateway_external_port: u16,
stop_tx: Option<oneshot::Sender<()>>,
}
impl UdpPortMappingLease {
pub fn backend(&self) -> &'static str {
self.backend
}
pub fn gateway_external_port(&self) -> u16 {
self.gateway_external_port
}
}
impl fmt::Debug for UdpPortMappingLease {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("UdpPortMappingLease")
.field("backend", &self.backend)
.field("gateway_external_port", &self.gateway_external_port)
.finish()
}
}
impl Drop for UdpPortMappingLease {
fn drop(&mut self) {
if let Some(stop_tx) = self.stop_tx.take() {
let _ = stop_tx.send(());
}
}
}
pub async fn resolve_udp_public_addr(
global_ctx: ArcGlobalCtx,
local_listener: &url::Url,
socket: Arc<UdpSocket>,
) -> anyhow::Result<(SocketAddr, Option<UdpPortMappingLease>)> {
let port_mapping = match try_start_udp_port_mapping(&global_ctx, local_listener).await {
Ok(mapping) => mapping,
Err(err) => {
tracing::warn!(
?err,
%local_listener,
"failed to establish udp port mapping, fallback to stun-only public addr resolution"
);
None
}
};
let mapped_addr = global_ctx
.get_stun_info_collector()
.get_udp_port_mapping_with_socket(socket)
.await
.map_err(anyhow::Error::from)
.with_context(|| format!("resolve udp public addr for {local_listener}"))?;
if let Some(port_mapping) = port_mapping.as_ref() {
let mapped_listener = build_url_from_socket_addr(&mapped_addr.to_string(), "udp");
global_ctx.issue_event(GlobalCtxEvent::ListenerPortMappingEstablished {
local_listener: local_listener.clone(),
mapped_listener,
backend: port_mapping.backend().to_string(),
});
tracing::info!(
%local_listener,
backend = port_mapping.backend(),
gateway_external_port = port_mapping.gateway_external_port(),
stun_mapped_addr = %mapped_addr,
"udp public addr resolved after port mapping"
);
} else {
tracing::debug!(
%local_listener,
stun_mapped_addr = %mapped_addr,
"udp public addr resolved without port mapping"
);
}
Ok((mapped_addr, port_mapping))
}
async fn try_start_udp_port_mapping(
global_ctx: &ArcGlobalCtx,
local_listener: &url::Url,
) -> anyhow::Result<Option<UdpPortMappingLease>> {
if global_ctx.get_flags().disable_upnp || !should_map_udp_listener(local_listener) {
return Ok(None);
}
let mapping = discover_udp_port_mapping(global_ctx.clone(), local_listener.clone()).await?;
tracing::info!(
%local_listener,
backend = mapping.backend_name(),
local_addr = %mapping.local_addr,
gateway_external_port = mapping.gateway_external_port,
"udp port mapping established"
);
let backend = mapping.backend_name();
let gateway_external_port = mapping.gateway_external_port;
let runtime_global_ctx = global_ctx.clone();
let runtime_local_listener = local_listener.clone();
let (stop_tx, stop_rx) = oneshot::channel();
if should_run_port_mapping_in_dedicated_thread(&runtime_global_ctx) {
tokio::task::spawn_blocking(move || {
let _g = runtime_global_ctx.net_ns.guard();
match tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
{
Ok(runtime) => {
runtime.block_on(run_udp_port_mapping_task(
runtime_local_listener,
mapping,
stop_rx,
));
}
Err(err) => {
tracing::error!(
?err,
%runtime_local_listener,
"failed to build runtime for udp port mapping renew task"
);
}
}
});
} else {
tokio::spawn(run_udp_port_mapping_task(
runtime_local_listener,
mapping,
stop_rx,
));
}
Ok(Some(UdpPortMappingLease {
backend,
gateway_external_port,
stop_tx: Some(stop_tx),
}))
}
async fn discover_udp_port_mapping(
global_ctx: ArcGlobalCtx,
local_listener: url::Url,
) -> anyhow::Result<ActiveUdpPortMapping> {
match discover_igd_gateway_in_netns(global_ctx.clone(), local_listener.clone()).await {
Ok((gateway, local_addr)) => match establish_igd_mapping_in_netns(
global_ctx.clone(),
local_listener.clone(),
gateway,
local_addr,
)
.await
{
Ok(mapping) => Ok(mapping),
Err(igd_err) => {
tracing::debug!(
?igd_err,
%local_listener,
"igd udp port mapping failed, retry with nat-pmp"
);
match discover_nat_pmp_gateway_in_netns(global_ctx.clone(), local_listener.clone())
.await
{
Ok((gateway, local_addr)) => establish_nat_pmp_mapping_in_netns(
global_ctx,
local_listener.clone(),
gateway,
local_addr,
)
.await
.map_err(|nat_pmp_err| {
anyhow!(
"udp port mapping failed for {local_listener}: igd error: {igd_err}; nat-pmp error: {nat_pmp_err}"
)
}),
Err(nat_pmp_err) => Err(anyhow!(
"udp port mapping failed for {local_listener}: igd error: {igd_err}; nat-pmp discovery error: {nat_pmp_err}"
)),
}
}
},
Err(igd_err) => {
tracing::debug!(
?igd_err,
%local_listener,
"igd gateway discovery failed, retry with nat-pmp"
);
match discover_nat_pmp_gateway_in_netns(global_ctx.clone(), local_listener.clone()).await
{
Ok((gateway, local_addr)) => establish_nat_pmp_mapping_in_netns(
global_ctx,
local_listener.clone(),
gateway,
local_addr,
)
.await
.map_err(|nat_pmp_err| {
anyhow!(
"udp port mapping failed for {local_listener}: igd discovery error: {igd_err}; nat-pmp error: {nat_pmp_err}"
)
}),
Err(nat_pmp_err) => Err(anyhow!(
"udp port mapping failed for {local_listener}: igd discovery error: {igd_err}; nat-pmp discovery error: {nat_pmp_err}"
)),
}
}
}
}
async fn discover_igd_gateway_in_netns(
global_ctx: ArcGlobalCtx,
local_listener: url::Url,
) -> anyhow::Result<(TokioGateway, SocketAddr)> {
if !should_run_port_mapping_in_dedicated_thread(&global_ctx) {
return ActiveUdpPortMapping::discover_igd_gateway(&global_ctx, &local_listener).await;
}
tokio::task::spawn_blocking(move || {
let _g = global_ctx.net_ns.guard();
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.context("build runtime for igd gateway discovery")?
.block_on(ActiveUdpPortMapping::discover_igd_gateway(
&global_ctx,
&local_listener,
))
})
.await
.context("join igd gateway discovery task")?
}
async fn establish_igd_mapping_in_netns(
global_ctx: ArcGlobalCtx,
local_listener: url::Url,
gateway: TokioGateway,
local_addr: SocketAddr,
) -> anyhow::Result<ActiveUdpPortMapping> {
if !should_run_port_mapping_in_dedicated_thread(&global_ctx) {
return ActiveUdpPortMapping::establish_via_igd(&local_listener, gateway, local_addr).await;
}
tokio::task::spawn_blocking(move || {
let _g = global_ctx.net_ns.guard();
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.context("build runtime for igd mapping establishment")?
.block_on(ActiveUdpPortMapping::establish_via_igd(
&local_listener,
gateway,
local_addr,
))
})
.await
.context("join igd mapping establishment task")?
}
async fn discover_nat_pmp_gateway_in_netns(
global_ctx: ArcGlobalCtx,
local_listener: url::Url,
) -> anyhow::Result<(Ipv4Addr, SocketAddr)> {
if !should_run_port_mapping_in_dedicated_thread(&global_ctx) {
return ActiveUdpPortMapping::discover_nat_pmp_gateway(&local_listener).await;
}
tokio::task::spawn_blocking(move || {
let _g = global_ctx.net_ns.guard();
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.context("build runtime for nat-pmp gateway discovery")?
.block_on(ActiveUdpPortMapping::discover_nat_pmp_gateway(
&local_listener,
))
})
.await
.context("join nat-pmp gateway discovery task")?
}
async fn establish_nat_pmp_mapping_in_netns(
global_ctx: ArcGlobalCtx,
local_listener: url::Url,
gateway: Ipv4Addr,
local_addr: SocketAddr,
) -> anyhow::Result<ActiveUdpPortMapping> {
if !should_run_port_mapping_in_dedicated_thread(&global_ctx) {
return ActiveUdpPortMapping::establish_via_nat_pmp(&local_listener, gateway, local_addr)
.await;
}
tokio::task::spawn_blocking(move || {
let _g = global_ctx.net_ns.guard();
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.context("build runtime for nat-pmp mapping establishment")?
.block_on(ActiveUdpPortMapping::establish_via_nat_pmp(
&local_listener,
gateway,
local_addr,
))
})
.await
.context("join nat-pmp mapping establishment task")?
}
async fn run_udp_port_mapping_task(
local_listener: url::Url,
mapping: ActiveUdpPortMapping,
mut stop_rx: oneshot::Receiver<()>,
) {
loop {
tokio::select! {
_ = tokio::time::sleep(UPNP_RENEW_INTERVAL) => {
if let Err(err) = mapping.renew().await {
tracing::warn!(
?err,
%local_listener,
backend = mapping.backend_name(),
gateway_external_port = mapping.gateway_external_port,
"failed to renew udp port mapping"
);
}
}
_ = &mut stop_rx => break,
}
}
if let Err(err) = mapping.remove().await {
tracing::debug!(
?err,
%local_listener,
backend = mapping.backend_name(),
gateway_external_port = mapping.gateway_external_port,
"failed to remove udp port mapping"
);
}
}
fn should_run_port_mapping_in_dedicated_thread(global_ctx: &ArcGlobalCtx) -> bool {
global_ctx.net_ns.name().is_some()
}
async fn add_udp_mapping_port_igd(
gateway: &TokioGateway,
local_addr: SocketAddr,
local_listener: &url::Url,
) -> anyhow::Result<u16> {
match gateway
.add_any_port(
PortMappingProtocol::UDP,
local_addr,
UPNP_LEASE_DURATION_SECS,
UPNP_DESCRIPTION,
)
.await
{
Ok(external_port) => Ok(external_port),
Err(AddAnyPortError::RequestError(err)) => {
tracing::debug!(
?err,
%local_listener,
gateway = %gateway.addr,
%local_addr,
"igd any-port udp mapping failed, retry with same-port mapping"
);
gateway
.add_port(
PortMappingProtocol::UDP,
local_addr.port(),
local_addr,
UPNP_LEASE_DURATION_SECS,
UPNP_DESCRIPTION,
)
.await
.map(|_| local_addr.port())
.map_err(|same_port_err| {
anyhow!(
"igd udp mapping failed for {local_listener}: any-port error: {err}; same-port error: {same_port_err}"
)
})
}
Err(err) => Err(err.into()),
}
}
async fn add_udp_mapping_port_nat_pmp(
gateway: Ipv4Addr,
local_addr: SocketAddr,
local_listener: &url::Url,
) -> anyhow::Result<u16> {
match request_nat_pmp_mapping(gateway, local_addr.port(), 0, UPNP_LEASE_DURATION_SECS).await {
Ok(external_port) => Ok(external_port),
Err(any_port_err) => {
tracing::debug!(
?any_port_err,
%local_listener,
gateway = %gateway,
%local_addr,
"nat-pmp any-port udp mapping failed, retry with same-port mapping"
);
request_nat_pmp_mapping(
gateway,
local_addr.port(),
local_addr.port(),
UPNP_LEASE_DURATION_SECS,
)
.await
.map_err(|same_port_err| {
anyhow!(
"nat-pmp udp mapping failed for {local_listener}: any-port error: {any_port_err}; same-port error: {same_port_err}"
)
})
}
}
}
async fn request_nat_pmp_mapping(
gateway: Ipv4Addr,
private_port: u16,
public_port: u16,
lifetime_secs: u32,
) -> anyhow::Result<u16> {
let client = new_tokio_natpmp_with(gateway)
.await
.with_context(|| format!("create nat-pmp client for gateway {gateway}"))?;
client
.send_port_mapping_request(
NatPmpProtocol::UDP,
private_port,
public_port,
lifetime_secs,
)
.await
.with_context(|| {
format!(
"send nat-pmp udp mapping request private_port={private_port} public_port={public_port} gateway={gateway}"
)
})?;
let response = tokio::time::timeout(NAT_PMP_RESPONSE_TIMEOUT, client.read_response_or_retry())
.await
.with_context(|| {
format!(
"wait nat-pmp udp mapping response private_port={private_port} gateway={gateway}"
)
})?
.map_err(anyhow::Error::from)
.with_context(|| {
format!(
"read nat-pmp udp mapping response private_port={private_port} gateway={gateway}"
)
})?;
match response {
NatPmpResponse::UDP(mapping) | NatPmpResponse::TCP(mapping) => Ok(mapping.public_port()),
NatPmpResponse::Gateway(_) => {
bail!("unexpected nat-pmp gateway response for udp mapping request")
}
}
}
async fn renew_udp_mapping_nat_pmp(
gateway: Ipv4Addr,
local_addr: SocketAddr,
external_port: u16,
local_listener: &url::Url,
) -> anyhow::Result<()> {
request_nat_pmp_mapping(
gateway,
local_addr.port(),
external_port,
UPNP_LEASE_DURATION_SECS,
)
.await
.map(|_| ())
.with_context(|| format!("renew udp port mapping {local_listener}"))
}
async fn remove_udp_mapping_nat_pmp(
gateway: Ipv4Addr,
local_addr: SocketAddr,
external_port: u16,
local_listener: &url::Url,
) -> anyhow::Result<()> {
request_nat_pmp_mapping(gateway, local_addr.port(), external_port, 0)
.await
.map(|_| ())
.with_context(|| format!("remove udp port mapping {local_listener}"))
}
fn should_map_udp_listener(local_listener: &url::Url) -> bool {
if local_listener.scheme() != "udp" {
return false;
}
let Some(host) = listener_ipv4_host(local_listener) else {
return false;
};
if host.is_loopback() || host.is_broadcast() {
return false;
}
host.is_unspecified() || host.is_private() || host.is_link_local()
}
fn listener_ipv4_host(local_listener: &url::Url) -> Option<Ipv4Addr> {
local_listener.host_str()?.parse().ok()
}
async fn resolve_internal_addr(
gateway_addr: SocketAddr,
local_listener: &url::Url,
) -> anyhow::Result<SocketAddr> {
let port = local_listener
.port()
.ok_or_else(|| anyhow!("listener port is missing"))?;
let host =
listener_ipv4_host(local_listener).ok_or_else(|| anyhow!("listener must be ipv4"))?;
let ip = if host.is_unspecified() {
let udp = std::net::UdpSocket::bind("0.0.0.0:0")
.context("bind probe socket for gateway route")?;
udp.connect(gateway_addr)
.with_context(|| format!("connect probe socket to gateway {gateway_addr}"))?;
let SocketAddr::V4(local_addr) = udp.local_addr().context("get probe socket local addr")?
else {
bail!("gateway route selected a non-ipv4 local address");
};
*local_addr.ip()
} else {
host
};
Ok(SocketAddr::new(ip.into(), port))
}
async fn renew_udp_mapping_igd(
gateway: &TokioGateway,
local_addr: SocketAddr,
external_port: u16,
local_listener: &url::Url,
) -> anyhow::Result<()> {
gateway
.add_port(
PortMappingProtocol::UDP,
external_port,
local_addr,
UPNP_LEASE_DURATION_SECS,
UPNP_DESCRIPTION,
)
.await
.with_context(|| format!("renew udp port mapping {local_listener}"))
}
async fn remove_udp_mapping_igd(
gateway: &TokioGateway,
external_port: u16,
local_listener: &url::Url,
) -> anyhow::Result<()> {
gateway
.remove_port(PortMappingProtocol::UDP, external_port)
.await
.with_context(|| format!("remove udp port mapping {local_listener}"))
}
#[cfg(test)]
mod tests {
#[test]
fn udp_mapping_requires_private_or_unspecified_ipv4_listener() {
assert!(super::should_map_udp_listener(
&"udp://0.0.0.0:11010".parse().unwrap()
));
assert!(super::should_map_udp_listener(
&"udp://192.168.1.10:11010".parse().unwrap()
));
assert!(!super::should_map_udp_listener(
&"udp://127.0.0.1:11010".parse().unwrap()
));
assert!(!super::should_map_udp_listener(
&"udp://8.8.8.8:11010".parse().unwrap()
));
assert!(!super::should_map_udp_listener(
&"tcp://0.0.0.0:11010".parse().unwrap()
));
}
}
+116 -31
View File
@@ -2,7 +2,7 @@
use std::{
collections::HashSet,
net::{IpAddr, Ipv6Addr, SocketAddr},
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
str::FromStr,
sync::{
Arc,
@@ -27,7 +27,7 @@ use crate::{
proto::{
peer_rpc::{
DirectConnectorRpc, DirectConnectorRpcClientFactory, DirectConnectorRpcServer,
GetIpListRequest, GetIpListResponse, SendV6HolePunchPacketRequest,
GetIpListRequest, GetIpListResponse, SendUdpHolePunchPacketRequest,
},
rpc_types::controller::BaseController,
},
@@ -117,37 +117,25 @@ impl DirectConnectorManagerData {
}
}
async fn remote_send_v6_hole_punch_packet(
async fn remote_send_udp_hole_punch_packet(
&self,
dst_peer_id: PeerId,
local_socket: &UdpSocket,
connector_addr: SocketAddr,
remote_url: &url::Url,
) -> Result<(), Error> {
if !matches_scheme!(remote_url, TunnelScheme::Ip(IpScheme::Udp)) {
return Err(anyhow::anyhow!(
"udp hole punch packet only applies to udp listener: {}",
remote_url
)
.into());
}
let global_ctx = self.peer_manager.get_global_ctx();
let listener_port = remote_url.port().ok_or(anyhow::anyhow!(
"failed to parse port from remote url: {}",
remote_url
))?;
let connector_ip = global_ctx
.get_stun_info_collector()
.get_stun_info()
.public_ip
.iter()
.find(|x| x.contains(":"))
.ok_or(anyhow::anyhow!(
"failed to get public ipv6 address from stun info"
))?
.parse::<std::net::Ipv6Addr>()
.with_context(|| {
format!(
"failed to parse public ipv6 address from stun info: {:?}",
global_ctx.get_stun_info_collector().get_stun_info()
)
})?;
let connector_addr = SocketAddr::new(
std::net::IpAddr::V6(connector_ip),
local_socket.local_addr()?.port(),
);
let rpc_stub = self
.peer_manager
@@ -160,9 +148,9 @@ impl DirectConnectorManagerData {
);
rpc_stub
.send_v6_hole_punch_packet(
.send_udp_hole_punch_packet(
BaseController::default(),
SendV6HolePunchPacketRequest {
SendUdpHolePunchPacketRequest {
listener_port: listener_port as u32,
connector_addr: Some(connector_addr.into()),
},
@@ -170,7 +158,7 @@ impl DirectConnectorManagerData {
.await
.with_context(|| {
format!(
"do rpc, send v6 hole punch packet to peer {} at {}",
"do rpc, send udp hole punch packet to peer {} at {}",
dst_peer_id, remote_url
)
})?;
@@ -188,11 +176,34 @@ impl DirectConnectorManagerData {
.await
.with_context(|| format!("failed to bind local socket for {}", remote_url))?,
);
let connector_ip = self
.peer_manager
.get_global_ctx()
.get_stun_info_collector()
.get_stun_info()
.public_ip
.iter()
.find(|x| x.contains(':'))
.ok_or(anyhow::anyhow!(
"failed to get public ipv6 address from stun info"
))?
.parse::<Ipv6Addr>()
.with_context(|| {
format!(
"failed to parse public ipv6 address from stun info: {:?}",
self.peer_manager
.get_global_ctx()
.get_stun_info_collector()
.get_stun_info()
)
})?;
let connector_addr =
SocketAddr::new(IpAddr::V6(connector_ip), local_socket.local_addr()?.port());
// ask remote to send v6 hole punch packet
// and no matter what the result is, continue to connect
let _ = self
.remote_send_v6_hole_punch_packet(dst_peer_id, &local_socket, remote_url)
.remote_send_udp_hole_punch_packet(dst_peer_id, connector_addr, remote_url)
.await;
let udp_connector = UdpTunnelConnector::new(remote_url.clone());
@@ -207,14 +218,80 @@ impl DirectConnectorManagerData {
.await
}
async fn connect_to_public_ipv4(
&self,
dst_peer_id: PeerId,
remote_url: &url::Url,
) -> Result<(PeerId, PeerConnId), Error> {
let local_socket = {
let _g = self.global_ctx.net_ns.guard();
Arc::new(
UdpSocket::bind("0.0.0.0:0")
.await
.with_context(|| format!("failed to bind local socket for {}", remote_url))?,
)
};
let connector_addr = self
.peer_manager
.get_global_ctx()
.get_stun_info_collector()
.get_udp_port_mapping_with_socket(local_socket.clone())
.await
.with_context(|| format!("failed to get udp port mapping for {}", remote_url))?;
let _ = self
.remote_send_udp_hole_punch_packet(dst_peer_id, connector_addr, remote_url)
.await;
let udp_connector = UdpTunnelConnector::new(remote_url.clone());
let remote_addr = SocketAddr::from_url(remote_url.clone(), IpVersion::V4).await?;
let ret = udp_connector
.try_connect_with_socket(local_socket, remote_addr)
.await?;
self.peer_manager
.add_client_tunnel_with_peer_id_hint(ret, true, Some(dst_peer_id))
.await
}
async fn do_try_connect_to_ip(&self, dst_peer_id: PeerId, addr: String) -> Result<(), Error> {
let connector = create_connector_by_url(&addr, &self.global_ctx, IpVersion::Both).await?;
let remote_url = connector.remote_url();
let (peer_id, conn_id) = if matches_scheme!(remote_url, TunnelScheme::Ip(IpScheme::Udp))
&& matches!(remote_url.host(), Some(Host::Ipv6(_)))
{
let (peer_id, conn_id) = if matches_scheme!(remote_url, TunnelScheme::Ip(IpScheme::Udp)) {
match remote_url.host() {
Some(Host::Ipv6(_)) => {
self.connect_to_public_ipv6(dst_peer_id, &remote_url)
.await?
}
Some(Host::Ipv4(ip)) if is_public_ipv4(ip) => {
match self.connect_to_public_ipv4(dst_peer_id, &remote_url).await {
Ok(ret) => ret,
Err(err) => {
tracing::debug!(
?err,
%remote_url,
"udp public ipv4 listener punch failed, falling back to direct connect"
);
timeout(
std::time::Duration::from_secs(3),
self.peer_manager.try_direct_connect_with_peer_id_hint(
connector,
Some(dst_peer_id),
),
)
.await??
}
}
}
_ => {
timeout(
std::time::Duration::from_secs(3),
self.peer_manager
.try_direct_connect_with_peer_id_hint(connector, Some(dst_peer_id)),
)
.await??
}
}
} else {
timeout(
std::time::Duration::from_secs(3),
@@ -577,6 +654,14 @@ impl DirectConnectorManagerData {
}
}
fn is_public_ipv4(ip: Ipv4Addr) -> bool {
!ip.is_private()
&& !ip.is_loopback()
&& !ip.is_link_local()
&& !ip.is_broadcast()
&& !ip.is_unspecified()
}
impl std::fmt::Debug for DirectConnectorManagerData {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DirectConnectorManagerData")
+7
View File
@@ -621,6 +621,13 @@ mod tests {
Ok(format!("127.0.0.1:{}", port).parse().unwrap())
}
async fn get_udp_port_mapping_with_socket(
&self,
udp: std::sync::Arc<tokio::net::UdpSocket>,
) -> Result<SocketAddr, Error> {
self.get_udp_port_mapping(udp.local_addr()?.port()).await
}
async fn get_tcp_port_mapping(&self, mut port: u16) -> Result<SocketAddr, Error> {
if port == 0 {
port = 40144;
+217 -41
View File
@@ -13,8 +13,7 @@ use zerocopy::FromBytes as _;
use crate::{
common::{
PeerId, error::Error, global_ctx::ArcGlobalCtx, join_joinset_background, netns::NetNS,
stun::StunInfoCollectorTrait as _,
PeerId, error::Error, global_ctx::ArcGlobalCtx, join_joinset_background, netns::NetNS, upnp,
},
defer,
peers::peer_manager::PeerManager,
@@ -27,6 +26,7 @@ use crate::{
};
pub(crate) const HOLE_PUNCH_PACKET_BODY_LEN: u16 = 16;
const MAX_PUBLIC_UDP_HOLE_PUNCH_LISTENERS: usize = 4;
fn generate_shuffled_port_vec() -> Vec<u16> {
let mut rng = rand::thread_rng();
@@ -352,6 +352,8 @@ pub(crate) struct UdpHolePunchListener {
tasks: JoinSet<()>,
running: Arc<AtomicCell<bool>>,
mapped_addr: SocketAddr,
has_port_mapping_lease: bool,
_port_mapping_lease: Option<upnp::UdpPortMappingLease>,
conn_counter: Arc<Box<dyn TunnelConnCounter>>,
listen_time: std::time::Instant,
@@ -360,11 +362,6 @@ pub(crate) struct UdpHolePunchListener {
}
impl UdpHolePunchListener {
async fn get_avail_port() -> Result<u16, Error> {
let socket = UdpSocket::bind("0.0.0.0:0").await?;
Ok(socket.local_addr()?.port())
}
#[instrument(err)]
pub async fn new(peer_mgr: Arc<PeerManager>) -> Result<Self, Error> {
Self::new_ext(peer_mgr, true, None).await
@@ -376,18 +373,24 @@ impl UdpHolePunchListener {
with_mapped_addr: bool,
port: Option<u16>,
) -> Result<Self, Error> {
let port = port.unwrap_or(Self::get_avail_port().await?);
let listen_url = format!("udp://0.0.0.0:{}", port);
let socket = {
let _g = peer_mgr.get_global_ctx().net_ns.guard();
Arc::new(UdpSocket::bind((Ipv4Addr::UNSPECIFIED, port.unwrap_or(0))).await?)
};
let local_port = socket.local_addr()?.port();
let listen_url: url::Url = format!("udp://0.0.0.0:{local_port}").parse().unwrap();
let mapped_addr = if with_mapped_addr {
let gctx = peer_mgr.get_global_ctx();
let stun_info_collect = gctx.get_stun_info_collector();
stun_info_collect.get_udp_port_mapping(port).await?
let (mapped_addr, port_mapping_lease) = if with_mapped_addr {
upnp::resolve_udp_public_addr(peer_mgr.get_global_ctx(), &listen_url, socket.clone())
.await?
} else {
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), port))
(
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, local_port)),
None,
)
};
let mut listener = UdpTunnelListener::new(listen_url.parse().unwrap());
let mut listener = UdpTunnelListener::new_with_socket(listen_url, socket.clone());
{
let _g = peer_mgr.get_global_ctx().net_ns.guard();
@@ -437,6 +440,8 @@ impl UdpHolePunchListener {
socket,
running,
mapped_addr,
has_port_mapping_lease: port_mapping_lease.is_some(),
_port_mapping_lease: port_mapping_lease,
conn_counter,
listen_time: std::time::Instant::now(),
@@ -517,45 +522,87 @@ impl PunchHoleServerCommon {
pub(crate) async fn select_listener(
&self,
use_new_listener: bool,
prefer_port_mapping: bool,
) -> Option<(Arc<UdpSocket>, SocketAddr)> {
let all_listener_sockets = &self.listeners;
let mut use_last = false;
if all_listener_sockets.lock().await.len() < 16 || use_new_listener {
tracing::warn!("creating new udp hole punching listener");
all_listener_sockets.lock().await.push(
UdpHolePunchListener::new(self.peer_mgr.clone())
.await
.ok()?,
let (listener_count, has_reusable_listener, has_port_mapping_listener) = {
let locked = self.listeners.lock().await;
(
locked.len(),
locked.iter().any(can_reuse_public_listener),
locked.iter().any(can_reuse_port_mapping_listener),
)
};
let should_create = should_create_public_listener(
listener_count,
has_reusable_listener,
has_port_mapping_listener,
use_new_listener,
prefer_port_mapping,
);
use_last = true;
if should_create {
tracing::warn!(
max_listeners = MAX_PUBLIC_UDP_HOLE_PUNCH_LISTENERS,
"creating udp hole punching listener"
);
match UdpHolePunchListener::new(self.peer_mgr.clone()).await {
Ok(listener) => self.listeners.lock().await.push(listener),
Err(err) => {
tracing::warn!(?err, "failed to create udp hole punching listener");
}
}
}
let mut locked = all_listener_sockets.lock().await;
let listener = if use_last {
Some(locked.last_mut()?)
let mut locked = self.listeners.lock().await;
let listener_count = locked.len();
let listener_idx = if prefer_port_mapping {
select_reusable_port_mapping_listener_idx(locked.as_slice())
.or_else(|| {
if should_create && locked.last().is_some_and(can_reuse_public_listener) {
Some(locked.len() - 1)
} else {
// use the listener that is active most recently
locked
.iter_mut()
.filter(|l| !l.mapped_addr.ip().is_unspecified())
.max_by_key(|listener| listener.last_active_time.load())
None
}
})
.or_else(|| select_reusable_public_listener_idx(locked.as_slice()))
} else if should_create {
locked.len().checked_sub(1)
} else {
select_reusable_public_listener_idx(locked.as_slice())
};
if listener.is_none() || listener.as_ref().unwrap().mapped_addr.ip().is_unspecified() {
let Some(listener_idx) = listener_idx else {
tracing::warn!(
?use_new_listener,
?prefer_port_mapping,
listener_count,
max_listeners = MAX_PUBLIC_UDP_HOLE_PUNCH_LISTENERS,
"no available udp hole punching listener with mapped address"
);
if !use_new_listener {
return self.select_listener(true).await;
} else {
if should_retry_public_listener_selection(
use_new_listener,
listener_count,
prefer_port_mapping,
has_port_mapping_listener,
) {
drop(locked);
return self.select_listener(true, prefer_port_mapping).await;
}
return None;
};
let listener = &mut locked[listener_idx];
if !can_reuse_public_listener(listener) {
tracing::warn!(
?use_new_listener,
?prefer_port_mapping,
listener_count,
max_listeners = MAX_PUBLIC_UDP_HOLE_PUNCH_LISTENERS,
"selected udp hole punching listener is not reusable"
);
return None;
}
}
let listener = listener.unwrap();
Some((listener.get_socket().await, listener.mapped_addr))
}
@@ -572,7 +619,73 @@ impl PunchHoleServerCommon {
}
}
#[tracing::instrument(err, ret(level=Level::DEBUG), skip(ports))]
fn can_reuse_public_listener(listener: &UdpHolePunchListener) -> bool {
listener.running.load() && !listener.mapped_addr.ip().is_unspecified()
}
fn can_reuse_port_mapping_listener(listener: &UdpHolePunchListener) -> bool {
can_reuse_public_listener(listener) && listener.has_port_mapping_lease
}
fn select_reusable_public_listener_idx(listeners: &[UdpHolePunchListener]) -> Option<usize> {
// Reuse the listener that was active most recently.
listeners
.iter()
.enumerate()
.filter(|(_, listener)| can_reuse_public_listener(listener))
.max_by_key(|(_, listener)| listener.last_active_time.load())
.map(|(idx, _)| idx)
}
fn select_reusable_port_mapping_listener_idx(listeners: &[UdpHolePunchListener]) -> Option<usize> {
listeners
.iter()
.enumerate()
.filter(|(_, listener)| can_reuse_port_mapping_listener(listener))
.max_by_key(|(_, listener)| listener.last_active_time.load())
.map(|(idx, _)| idx)
}
fn should_create_public_listener(
current_listener_count: usize,
has_reusable_listener: bool,
has_port_mapping_listener: bool,
force_new_listener: bool,
prefer_port_mapping: bool,
) -> bool {
if current_listener_count >= MAX_PUBLIC_UDP_HOLE_PUNCH_LISTENERS {
return false;
}
if current_listener_count == 0 {
return true;
}
if force_new_listener {
return true;
}
if prefer_port_mapping && !has_port_mapping_listener {
return true;
}
!has_reusable_listener
}
fn should_retry_public_listener_selection(
force_new_listener: bool,
current_listener_count: usize,
prefer_port_mapping: bool,
has_port_mapping_listener: bool,
) -> bool {
if prefer_port_mapping && has_port_mapping_listener {
return false;
}
!force_new_listener && current_listener_count < MAX_PUBLIC_UDP_HOLE_PUNCH_LISTENERS
}
#[tracing::instrument(err, ret(level=Level::DEBUG))]
pub(crate) async fn send_symmetric_hole_punch_packet(
ports: &[u16],
udp: Arc<UdpSocket>,
@@ -647,3 +760,66 @@ pub(crate) async fn try_connect_with_socket(
.await
.map_err(Error::from)
}
#[cfg(test)]
mod tests {
use super::{
MAX_PUBLIC_UDP_HOLE_PUNCH_LISTENERS, should_create_public_listener,
should_retry_public_listener_selection,
};
#[test]
fn listener_selection_prefers_reuse_before_cap() {
assert!(!should_create_public_listener(1, true, true, false, false));
assert!(!should_create_public_listener(
MAX_PUBLIC_UDP_HOLE_PUNCH_LISTENERS,
true,
true,
false,
false
));
}
#[test]
fn listener_selection_creates_when_empty_or_no_reusable_listener() {
assert!(should_create_public_listener(0, false, false, false, false));
assert!(should_create_public_listener(1, false, false, false, false));
}
#[test]
fn listener_selection_force_new_respects_cap() {
assert!(should_create_public_listener(1, true, true, true, false));
assert!(!should_create_public_listener(
MAX_PUBLIC_UDP_HOLE_PUNCH_LISTENERS,
true,
true,
true,
false
));
}
#[test]
fn listener_selection_prefers_port_mapping_until_available() {
assert!(should_create_public_listener(1, true, false, false, true));
assert!(!should_create_public_listener(1, true, true, false, true));
}
#[test]
fn listener_selection_retry_respects_cap() {
assert!(should_retry_public_listener_selection(
false, 1, false, false
));
assert!(!should_retry_public_listener_selection(
false,
MAX_PUBLIC_UDP_HOLE_PUNCH_LISTENERS,
false,
false
));
assert!(!should_retry_public_listener_selection(
true, 1, false, false
));
assert!(!should_retry_public_listener_selection(
false, 1, true, true
));
}
}
+15 -16
View File
@@ -7,7 +7,7 @@ use anyhow::Context;
use tokio::net::UdpSocket;
use crate::{
common::{PeerId, scoped_task::ScopedTask, stun::StunInfoCollectorTrait},
common::{PeerId, scoped_task::ScopedTask, upnp},
connector::udp_hole_punch::common::{
HOLE_PUNCH_PACKET_BODY_LEN, UdpSocketArray, try_connect_with_socket,
},
@@ -117,23 +117,19 @@ impl PunchConeHoleClient {
let _g = self.peer_mgr.get_global_ctx().net_ns.guard();
Arc::new(UdpSocket::bind("0.0.0.0:0").await?)
};
let local_addr = local_socket
.local_addr()
.with_context(|| "failed to get local port from udp array")?;
let local_port = local_addr.port();
drop(local_socket);
let local_mapped_addr = global_ctx
.get_stun_info_collector()
.get_udp_port_mapping(local_port)
.with_context(|| "failed to get local addr from udp punch socket")?;
let local_listener: url::Url = format!("udp://0.0.0.0:{}", local_addr.port())
.parse()
.unwrap();
let (local_mapped_addr, _local_port_mapping_lease) = upnp::resolve_udp_public_addr(
global_ctx.clone(),
&local_listener,
local_socket.clone(),
)
.await
.with_context(|| "failed to get udp port mapping")?;
let local_socket = {
let _g = self.peer_mgr.get_global_ctx().net_ns.guard();
Arc::new(UdpSocket::bind(local_addr).await?)
};
.with_context(|| "failed to resolve udp public addr for cone hole punch")?;
// client -> server: tell server the mapped port, server will return the mapped address of listening port.
let rpc_stub = self
@@ -149,7 +145,10 @@ impl PunchConeHoleClient {
let resp = rpc_stub
.select_punch_listener(
BaseController::default(),
SelectPunchListenerRequest { force_new: false },
SelectPunchListenerRequest {
force_new: false,
prefer_port_mapping: true,
},
)
.await;
+9 -1
View File
@@ -88,7 +88,7 @@ impl UdpHolePunchRpc for UdpHolePunchServer {
) -> rpc_types::error::Result<SelectPunchListenerResponse> {
let (_, addr) = self
.common
.select_listener(input.force_new)
.select_listener(input.force_new, input.prefer_port_mapping)
.await
.ok_or(anyhow::anyhow!("no listener available"))?;
@@ -584,6 +584,11 @@ impl UdpHolePunchConnector {
Ok(())
}
#[cfg(test)]
pub async fn run_immediately_for_test(&self) {
self.client.run_immediately().await;
}
}
#[cfg(test)]
@@ -614,6 +619,9 @@ pub mod tests {
udp_nat_type: NatType,
) -> Arc<PeerManager> {
let p_a = create_mock_peer_manager().await;
let mut flags = p_a.get_global_ctx().get_flags();
flags.disable_upnp = true;
p_a.get_global_ctx().set_flags(flags);
replace_stun_info_collector(p_a.clone(), udp_nat_type);
p_a
}
@@ -434,7 +434,10 @@ impl PunchSymToConeHoleClient {
let resp = rpc_stub
.select_punch_listener(
BaseController::default(),
SelectPunchListenerRequest { force_new: false },
SelectPunchListenerRequest {
force_new: false,
prefer_port_mapping: true,
},
)
.await;
+13 -1
View File
@@ -450,6 +450,15 @@ struct NetworkOptions {
)]
disable_sym_hole_punching: Option<bool>,
#[arg(
long,
env = "ET_DISABLE_UPNP",
help = t!("core_clap.disable_upnp").to_string(),
num_args = 0..=1,
default_missing_value = "true"
)]
disable_upnp: Option<bool>,
#[arg(
long,
env = "ET_RELAY_ALL_PEER_RPC",
@@ -1101,7 +1110,10 @@ impl NetworkOptions {
f.enable_relay_foreign_network_quic = self
.enable_relay_foreign_network_quic
.unwrap_or(f.enable_relay_foreign_network_quic);
f.disable_sym_hole_punching = self.disable_sym_hole_punching.unwrap_or(false);
f.disable_sym_hole_punching = self
.disable_sym_hole_punching
.unwrap_or(f.disable_sym_hole_punching);
f.disable_upnp = self.disable_upnp.unwrap_or(f.disable_upnp);
// Configure tld_dns_zone: use provided value if set
if let Some(tld_dns_zone) = &self.tld_dns_zone {
f.tld_dns_zone = tld_dns_zone.clone();
+15
View File
@@ -355,6 +355,21 @@ fn handle_event(
event!(info, category: "CONNECTION", local, remote, err, "[{}] connection error", instance_id);
}
GlobalCtxEvent::ListenerPortMappingEstablished {
local_listener,
mapped_listener,
backend,
} => {
event!(
info,
%local_listener,
%mapped_listener,
backend,
"[{}] listener port mapping established",
instance_id
);
}
GlobalCtxEvent::TunDeviceReady(dev) => {
event!(info, dev, "[{}] tun device ready", instance_id);
}
+6
View File
@@ -801,6 +801,10 @@ impl NetworkConfig {
flags.disable_udp_hole_punching = disable_udp_hole_punching;
}
if let Some(disable_upnp) = self.disable_upnp {
flags.disable_upnp = disable_upnp;
}
if let Some(disable_sym_hole_punching) = self.disable_sym_hole_punching {
flags.disable_sym_hole_punching = disable_sym_hole_punching;
}
@@ -963,6 +967,7 @@ impl NetworkConfig {
result.disable_encryption = Some(!flags.enable_encryption);
result.disable_tcp_hole_punching = Some(flags.disable_tcp_hole_punching);
result.disable_udp_hole_punching = Some(flags.disable_udp_hole_punching);
result.disable_upnp = Some(flags.disable_upnp);
result.disable_sym_hole_punching = Some(flags.disable_sym_hole_punching);
result.enable_magic_dns = Some(flags.accept_dns);
result.mtu = Some(flags.mtu as i32);
@@ -1230,6 +1235,7 @@ mod tests {
flags.enable_encryption = rng.gen_bool(0.8);
flags.disable_tcp_hole_punching = rng.gen_bool(0.2);
flags.disable_udp_hole_punching = rng.gen_bool(0.2);
flags.disable_upnp = rng.gen_bool(0.2);
flags.accept_dns = rng.gen_bool(0.6);
flags.mtu = rng.gen_range(1200..1500);
flags.private_mode = rng.gen_bool(0.3);
+10 -10
View File
@@ -5,7 +5,7 @@ use crate::{
proto::{
common::Void,
peer_rpc::{
DirectConnectorRpc, GetIpListRequest, GetIpListResponse, SendV6HolePunchPacketRequest,
DirectConnectorRpc, GetIpListRequest, GetIpListResponse, SendUdpHolePunchPacketRequest,
},
rpc_types::{self, controller::BaseController},
},
@@ -50,29 +50,29 @@ impl DirectConnectorRpc for DirectConnectorManagerRpcServer {
Ok(ret)
}
async fn send_v6_hole_punch_packet(
async fn send_udp_hole_punch_packet(
&self,
_: BaseController,
req: SendV6HolePunchPacketRequest,
req: SendUdpHolePunchPacketRequest,
) -> rpc_types::error::Result<Void> {
let listener_port = req.listener_port as u16;
let SocketAddr::V6(connector_addr) = req
let connector_addr: SocketAddr = req
.connector_addr
.ok_or(anyhow::anyhow!("connector_addr is required"))?
.into()
else {
return Err(anyhow::anyhow!("connector_addr is not a v6 address").into());
};
.into();
tracing::info!(
"Sending v6 hole punch packet to {} from listener port {}",
"Sending udp hole punch packet to {} from listener port {}",
connector_addr,
listener_port
);
// send 3 packets to the connector
for _ in 0..3 {
udp::send_v6_hole_punch_packet(listener_port, connector_addr).await?;
match connector_addr {
SocketAddr::V4(addr) => udp::send_v4_hole_punch_packet(listener_port, addr).await?,
SocketAddr::V6(addr) => udp::send_v6_hole_punch_packet(listener_port, addr).await?,
}
tokio::time::sleep(std::time::Duration::from_millis(30)).await;
}
Ok(Default::default())
+1
View File
@@ -89,6 +89,7 @@ message NetworkConfig {
optional bool lazy_p2p = 58;
optional bool need_p2p = 59;
optional uint64 instance_recv_bps_limit = 60;
optional bool disable_upnp = 61;
}
message PortForwardConfig {
+1
View File
@@ -74,6 +74,7 @@ message FlagsInConfig {
bool lazy_p2p = 37;
bool need_p2p = 38;
uint64 instance_recv_bps_limit = 39;
bool disable_upnp = 40;
}
message RpcDescriptor {
+3 -2
View File
@@ -142,18 +142,19 @@ message GetIpListResponse {
repeated common.Url listeners = 5;
}
message SendV6HolePunchPacketRequest {
message SendUdpHolePunchPacketRequest {
common.SocketAddr connector_addr = 1;
uint32 listener_port = 2;
}
service DirectConnectorRpc {
rpc GetIpList(GetIpListRequest) returns (GetIpListResponse);
rpc SendV6HolePunchPacket(SendV6HolePunchPacketRequest) returns (common.Void);
rpc SendUdpHolePunchPacket(SendUdpHolePunchPacketRequest) returns (common.Void);
}
message SelectPunchListenerRequest {
bool force_new = 1;
bool prefer_port_mapping = 2;
}
message SelectPunchListenerResponse {
+3
View File
@@ -6,6 +6,9 @@ mod ipv6_test;
#[cfg(target_os = "linux")]
mod credential_tests;
#[cfg(target_os = "linux")]
mod upnp_test;
use crate::common::PeerId;
use crate::peers::peer_manager::PeerManager;
File diff suppressed because it is too large Load Diff
+10 -1
View File
@@ -28,10 +28,19 @@ pub enum UdpPacketType {
Data = 3,
Fin = 4,
HolePunch = 5,
V6HolePunch = 6, // when receiving v6 hole punch packet, the packet contains a socket addr of other peer, we
V4HolePunch = 6, // when receiving v4 hole punch packet, the packet contains a socket addr of other peer, we
// will send a hole punch packet to that peer. we only accept this packet from loopback interface.
V6HolePunch = 7, // when receiving v6 hole punch packet, the packet contains a socket addr of other peer, we
// will send a hole punch packet to that peer. we only accept this packet from lookback interface.
}
#[repr(C, packed)]
#[derive(AsBytes, FromBytes, FromZeroes, Clone, Debug, Default)]
pub struct V4HolePunchPacket {
pub dst_ipv4: [u8; 4],
pub dst_port: U16<DefaultEndian>,
}
#[repr(C, packed)]
#[derive(AsBytes, FromBytes, FromZeroes, Clone, Debug, Default)]
pub struct V6HolePunchPacket {
+99 -3
View File
@@ -1,6 +1,6 @@
use std::{
fmt::Debug,
net::{Ipv6Addr, SocketAddrV6},
net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
sync::{Arc, Weak},
};
@@ -12,7 +12,6 @@ use futures::{SinkExt, StreamExt, stream::FuturesUnordered};
use rand::{Rng, SeedableRng};
use zerocopy::{AsBytes, FromBytes};
use std::net::SocketAddr;
use tokio::{
net::UdpSocket,
sync::mpsc::{Receiver, Sender, UnboundedReceiver, UnboundedSender},
@@ -24,7 +23,7 @@ use super::{
FromUrl, IpVersion, Tunnel, TunnelConnCounter, TunnelError, TunnelInfo, TunnelListener,
TunnelUrl,
common::wait_for_connect_futures,
packet_def::{UDP_TUNNEL_HEADER_SIZE, UDPTunnelHeader, V6HolePunchPacket},
packet_def::{UDP_TUNNEL_HEADER_SIZE, UDPTunnelHeader, V4HolePunchPacket, V6HolePunchPacket},
ring::{RingSink, RingStream},
};
use crate::tunnel::common::bind;
@@ -114,6 +113,28 @@ pub fn new_v6_hole_punch_packet(dst: &SocketAddrV6) -> ZCPacket {
)
}
pub fn new_v4_hole_punch_packet(dst: &SocketAddrV4) -> ZCPacket {
let mut body = V4HolePunchPacket::default();
body.dst_ipv4.copy_from_slice(&dst.ip().octets());
body.dst_port.set(dst.port());
new_udp_packet(
|header| {
header.msg_type = UdpPacketType::V4HolePunch as u8;
header.conn_id.set(dst.port() as u32);
header
.len
.set(std::mem::size_of::<V4HolePunchPacket>() as u16);
},
Some(body.as_bytes()),
)
}
fn extract_dst_addr_from_v4_hole_punch_packet(buf: &[u8]) -> Option<SocketAddrV4> {
let body = V4HolePunchPacket::ref_from_prefix(buf)?;
let ip = Ipv4Addr::from(body.dst_ipv4);
Some(SocketAddrV4::new(ip, body.dst_port.get()))
}
fn extrace_dst_addr_from_hole_punch_packet(buf: &[u8]) -> Option<SocketAddrV6> {
let body = V6HolePunchPacket::ref_from_prefix(buf)?;
let ip = Ipv6Addr::from(body.dst_ipv6);
@@ -142,6 +163,21 @@ pub async fn send_v6_hole_punch_packet(
Ok(())
}
pub async fn send_v4_hole_punch_packet(
listener_port: u16,
dst_addr: SocketAddrV4,
) -> Result<(), TunnelError> {
let local_socket = UdpSocket::bind("127.0.0.1:0").await?;
let udp_packet = new_v4_hole_punch_packet(&dst_addr);
let remote_addr = format!("127.0.0.1:{}", listener_port)
.parse::<SocketAddr>()
.unwrap();
local_socket
.send_to(&udp_packet.into_bytes(), remote_addr)
.await?;
Ok(())
}
async fn respond_stun_packet(
socket: Arc<UdpSocket>,
addr: SocketAddr,
@@ -455,6 +491,27 @@ impl UdpTunnelListenerData {
tracing::error!(?e, "udp respond stun packet error");
}
});
} else if header.msg_type == UdpPacketType::V4HolePunch as u8 {
if !addr.ip().is_loopback() {
tracing::warn!(?addr, "v4 hole punch packet should be from loopback");
return;
}
if !addr.ip().is_ipv4() {
tracing::warn!(?addr, "v4 hole punch packet should be sent from ipv4");
return;
}
let Some(dst_addr) =
extract_dst_addr_from_v4_hole_punch_packet(zc_packet.udp_payload())
else {
tracing::warn!("invalid v4 hole punch packet");
return;
};
let socket = self.socket.as_ref().unwrap().clone();
let udp_packet = new_hole_punch_packet(1, 32);
if let Err(e) = socket.try_send_to(&udp_packet.into_bytes(), SocketAddr::V4(dst_addr)) {
tracing::error!(?e, "udp send hole punch packet error");
}
tracing::debug!(?dst_addr, "udp forward packet send hole punch packet");
} else if header.msg_type == UdpPacketType::V6HolePunch as u8 {
if !addr.ip().is_loopback() {
tracing::warn!(?addr, "v6 hole punch packet should be from loopback");
@@ -527,6 +584,12 @@ impl UdpTunnelListener {
}
}
pub fn new_with_socket(addr: url::Url, socket: Arc<UdpSocket>) -> Self {
let mut listener = Self::new(addr);
listener.socket = Some(socket);
listener
}
pub fn get_socket(&self) -> Option<Arc<UdpSocket>> {
self.socket.clone()
}
@@ -535,6 +598,7 @@ impl UdpTunnelListener {
#[async_trait]
impl TunnelListener for UdpTunnelListener {
async fn listen(&mut self) -> Result<(), TunnelError> {
if self.socket.is_none() {
let addr = SocketAddr::from_url(self.addr.clone(), IpVersion::Both).await?;
let tunnel_url: TunnelUrl = self.addr.clone().into();
self.socket = Some(Arc::new(
@@ -544,6 +608,7 @@ impl TunnelListener for UdpTunnelListener {
.maybe_dev(tunnel_url.bind_dev())
.call()?,
));
}
self.data.socket = self.socket.clone();
self.addr
@@ -1147,4 +1212,35 @@ mod tests {
.expect("Timeout waiting for v6 hole punch packet")
.unwrap();
}
#[tokio::test]
async fn test_v4_hole_punch_packet() {
let mut lis = UdpTunnelListener::new("udp://0.0.0.0:0".parse().unwrap());
lis.listen().await.unwrap();
let socket = Arc::new(UdpSocket::bind("127.0.0.1:0").await.unwrap());
let socket_clone = socket.clone();
let t = tokio::spawn(async move {
let mut buf = BytesMut::new();
buf.resize(128, 0);
socket_clone.recv_from(&mut buf).await.unwrap();
});
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
send_v4_hole_punch_packet(
lis.local_url().port().unwrap(),
match socket.local_addr().unwrap() {
std::net::SocketAddr::V4(addr_v4) => addr_v4,
_ => panic!("Expected an IPv4 address"),
},
)
.await
.unwrap();
tokio::time::timeout(tokio::time::Duration::from_secs(2), t)
.await
.expect("Timeout waiting for v4 hole punch packet")
.unwrap();
}
}