Update On Mon Mar 31 20:36:50 CEST 2025

This commit is contained in:
github-action[bot]
2025-03-31 20:36:50 +02:00
parent bc52aefe53
commit 65c359fa66
129 changed files with 4134 additions and 2745 deletions
+1
View File
@@ -958,3 +958,4 @@ Update On Thu Mar 27 19:36:04 CET 2025
Update On Fri Mar 28 19:38:10 CET 2025
Update On Sat Mar 29 19:34:26 CET 2025
Update On Sun Mar 30 20:34:10 CEST 2025
Update On Mon Mar 31 20:36:40 CEST 2025
+27 -1578
View File
File diff suppressed because it is too large Load Diff
+3 -2
View File
@@ -46,7 +46,7 @@ humansize = "2.1.3"
convert_case = "0.8.0"
anyhow = "1.0"
pretty_assertions = "1.4.0"
chrono = "0.4.31"
chrono = { version = "0.4", features = ["serde"] }
time = { version = "0.3", features = ["formatting", "parsing", "serde"] }
once_cell = "1.19.0"
async-trait = "0.1.77"
@@ -66,6 +66,8 @@ backon = { version = "1.0.1", features = ["tokio-sleep"] } # for
dashmap = "6"
indexmap = { version = "2.2.3", features = ["serde"] }
bimap = "0.6.3"
bumpalo = "3.17.0" # a bump allocator for heap allocation
rustc-hash = "2.1"
# Terminal Utilities
ansi-str = "0.9" # for ansi str stripped
@@ -212,7 +214,6 @@ specta = { version = "=2.0.0-rc.22", features = [
"indexmap",
"function",
] }
surrealdb = { version = "2.2.1", features = ["kv-mem"] }
[target."cfg(not(any(target_os = \"android\", target_os = \"ios\")))".dependencies]
tauri-plugin-global-shortcut = "2.2.0"
@@ -1,17 +1,23 @@
use std::{io::SeekFrom, ops::Range};
use std::{
collections::{BTreeMap, BTreeSet},
io::SeekFrom,
ops::Range,
};
use anyhow::Context;
use bumpalo::Bump;
use camino::Utf8PathBuf;
use chrono::{DateTime, Local};
use derive_builder::Builder;
use itertools::Itertools;
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
use specta::Type;
use surrealdb::{RecordId, Surreal, engine::local::Db};
use surrealdb::engine::local::Mem;
use tokio::io::{AsyncBufReadExt, AsyncSeekExt, BufReader};
#[derive(Debug, Clone, Copy, Serialize, Deserialize, Type, Hash, Eq, PartialEq)]
#[derive(
Debug, Clone, Copy, Serialize, Deserialize, Type, Hash, Eq, PartialEq, Ord, PartialOrd,
)]
#[serde(rename_all = "UPPERCASE")]
#[allow(clippy::upper_case_acronyms)]
pub enum LoggingLevel {
@@ -61,130 +67,230 @@ pub struct Query {
timestamp: Option<Range<u64>>,
}
pub type LineNumber = u64;
pub type Timestamp = u64;
struct LogIndex {
connection: Surreal<Db>,
table_name: String,
/// a bump allocator for heap allocation
arena: Bump,
/// index by line number
line_index: BTreeMap<LineNumber, *mut LogEntry>,
/// index by timestamp
/// in our case, the timestamp is nanoseconds, so only one item per timestamp
timestamp_index: BTreeMap<Timestamp, LineNumber>,
/// index by level
level_index: FxHashMap<LoggingLevel, *mut Vec<LineNumber>>,
/// index by target
target_index: FxHashMap<String, *mut Vec<LineNumber>>,
last_line_number: Option<LineNumber>,
}
impl core::fmt::Debug for LogIndex {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let lines = self
.line_index
.values()
.map(|v| unsafe {
let v = &**v;
v.clone()
})
.collect_vec();
let levels = BTreeMap::from_iter(self.level_index.iter().map(|(k, v)| {
(k, unsafe {
let v = &**v;
v.clone()
})
}));
let targets = BTreeMap::from_iter(self.target_index.iter().map(|(k, v)| {
(k, unsafe {
let v = &**v;
v.clone()
})
}));
write!(
f,
"LogIndex {{
lines: {:?};
timestamp_index: {:?};
level_index: {:?};
target_index: {:?};
last_line_number: {:?}
}}",
lines, self.timestamp_index, levels, targets, self.last_line_number
)
}
}
impl LogIndex {
pub async fn try_new(table_name: String) -> anyhow::Result<Self> {
let connection = Surreal::new::<Mem>(())
.await
.context("failed to create connection")?;
connection
.use_ns(super::LOGGING_NS)
.use_db(super::LOGGING_NS)
.await
.context("failed to use namespace and database")?;
pub fn new() -> Self {
Self {
arena: Bump::new(),
line_index: BTreeMap::new(),
timestamp_index: BTreeMap::new(),
level_index: FxHashMap::default(),
target_index: FxHashMap::default(),
last_line_number: None,
}
}
let index = Self {
connection,
table_name,
#[inline]
/// add an entry to the index
pub fn add_entry(&mut self, entry: LogEntry) {
let line_number = entry.line_number;
let timestamp = entry.timestamp;
let level = entry.level;
let target = entry.target.clone();
let entry_ptr = self.arena.alloc(entry) as *mut LogEntry;
// update level index
{
let entry = self.level_index.entry(level);
entry
.and_modify(|v| {
// SAFETY: we are sure that the vec_ptr is valid
unsafe {
let v = &mut **v;
v.push(line_number);
}
})
.or_insert_with(|| {
let vec = self.arena.alloc(vec![line_number]);
vec as *mut Vec<u64>
});
}
// update timestamp index
{
let entry = self.timestamp_index.entry(timestamp);
entry
.and_modify(|v| {
tracing::warn!(
"duplicate timestamp: {}; previous: {}, new: {}",
timestamp,
v,
line_number
);
*v = line_number;
})
.or_insert(line_number);
}
// update target index
{
let entry = self.target_index.entry(target);
entry
.and_modify(|v| {
// SAFETY: we are sure that the vec_ptr is valid
unsafe {
let v = &mut **v;
v.push(line_number);
}
})
.or_insert_with(|| {
let vec = self.arena.alloc(vec![line_number]);
vec as *mut Vec<u64>
});
}
// update line index
{
self.line_index.insert(line_number, entry_ptr);
}
self.last_line_number = Some(line_number);
}
// TODO: optimize query performance
pub fn query(&self, query: Query) -> Option<Vec<LogEntry>> {
// query by timestamp
let mut matching_lines: Option<Vec<LineNumber>> = None;
if let Some(range) = query.timestamp {
let mut range = self.timestamp_index.range(range);
let (_, start) = range.next()?;
let end = match range.last() {
Some((_, end_line)) => *end_line,
None => *start,
};
matching_lines = Some(Vec::from_iter(*start..=end));
}
// query by level
if let Some(levels) = query.level {
let mut matched_lines = BTreeSet::new();
for level in levels {
if let Some(lines) = self.level_index.get(&level) {
// SAFETY: we have allocated the vec on the heap by bumpalo
unsafe {
let lines = &**lines;
matched_lines.extend(lines.iter());
}
}
}
matching_lines = match matching_lines {
Some(lines) => Some(
lines
.into_iter()
.filter(|line| matched_lines.contains(line))
.collect_vec(),
),
None => Some(matched_lines.into_iter().collect_vec()),
}
}
// query by target
if let Some(targets) = query.target {
let mut matched_lines = BTreeSet::new();
for target in targets {
if let Some(lines) = self.target_index.get(&target) {
// SAFETY: we have allocated the vec on the heap by bumpalo
unsafe {
let lines = &**lines;
matched_lines.extend(lines.iter());
}
}
}
matching_lines = match matching_lines {
Some(lines) => Some(
lines
.into_iter()
.filter(|line| matched_lines.contains(line))
.collect_vec(),
),
None => Some(matched_lines.into_iter().collect_vec()),
}
}
let matching_lines = match matching_lines {
Some(lines) if lines.is_empty() => return None,
None => {
let last_line = self.last_line_number.as_ref()?;
Vec::from_iter(0..=*last_line)
}
Some(lines) => lines,
};
index.create_table().await?;
#[cfg(test)]
dbg!(&matching_lines);
Ok(index)
}
let results = matching_lines
.into_iter()
.skip(query.offset)
.take(query.limit)
// SAFETY: we are sure that the line_index is valid, which is allocated by bumpalo,
// and the pool only be dropped when this index is dropped
.map(|line_number| unsafe {
let entry = &**self.line_index.get(&line_number).unwrap();
entry.clone()
})
.collect_vec();
async fn create_table(&self) -> anyhow::Result<()> {
let table_name = self.table_name.as_str();
let sql = format!(
r#"DEFINE TABLE {table_name} TYPE NORMAL SCHEMAFULL PERMISSIONS NONE;
-- ------------------------------
-- FIELDS
-- ------------------------------
DEFINE FIELD end_pos ON {table_name} TYPE int PERMISSIONS FULL;
DEFINE FIELD level ON {table_name} TYPE string PERMISSIONS FULL;
DEFINE FIELD line_number ON {table_name} TYPE int PERMISSIONS FULL;
DEFINE FIELD start_pos ON {table_name} TYPE int PERMISSIONS FULL;
DEFINE FIELD target ON {table_name} TYPE string PERMISSIONS FULL;
DEFINE FIELD timestamp ON {table_name} TYPE int PERMISSIONS FULL;
-- ------------------------------
-- INDEXES
-- ------------------------------
DEFINE INDEX line_numberIndex ON TABLE {table_name} FIELDS line_number UNIQUE;
DEFINE INDEX levelIndex ON TABLE {table_name} FIELDS level;
DEFINE INDEX timestampIndex ON TABLE {table_name} FIELDS timestamp;
DEFINE INDEX targetIndex ON TABLE {table_name} FIELDS target;
DEFINE INDEX timestampAndLevel ON {table_name} FIELDS timestamp, level;
"#,
);
self.connection
.query(sql)
.await
.context("failed to create table")?;
Ok(())
}
pub async fn add_entry(&self, entry: LogEntry) -> anyhow::Result<()> {
#[derive(Debug, Serialize, Deserialize)]
struct Record {
id: RecordId,
if results.is_empty() {
None
} else {
Some(results)
}
let _: Option<Record> = self
.connection
.create(&self.table_name)
.content(entry)
.await
.context("failed to add entry")?;
Ok(())
}
fn build_query(&self, query: Query) -> String {
let table_name = self.table_name.as_str();
let offset = query.offset;
let limit = query.limit;
let mut sql = format!("SELECT * FROM {table_name} WHERE line_number >= {offset}");
if let Some(level) = query.level {
let level = level
.iter()
.map(|l| format!("level = {}", serde_json::to_string(l).unwrap()))
.collect::<Vec<_>>()
.join(" OR ");
sql = format!("{sql} AND ({level})");
}
if let Some(target) = query.target {
let target = target
.iter()
.map(|t| format!("target = {}", serde_json::to_string(t).unwrap()))
.collect::<Vec<_>>()
.join(" OR ");
sql = format!("{sql} AND ({target})");
}
if let Some(timestamp) = query.timestamp {
let start = timestamp.start;
let end = timestamp.end;
let timestamp = format!("{}..{}", start, end);
sql = format!("{sql} AND timestamp IN {timestamp}");
}
format!("{sql} ORDER BY line_number ASC LIMIT {limit}")
}
fn explain_query(&self, sql: String) -> String {
format!("{sql} EXPLAIN FULL")
}
pub async fn query(&self, query: Query) -> anyhow::Result<Vec<LogEntry>> {
let sql = self.build_query(query);
#[cfg(debug_assertions)]
dbg!(&sql);
let mut res = self
.connection
.query(sql)
.await
.context("failed to query")?;
let results: Vec<LogEntry> = res.take(0).context("failed to take results")?;
Ok(results)
}
}
pub struct Indexer {
index: LogIndex,
path: Utf8PathBuf,
@@ -199,33 +305,22 @@ struct TracingJson {
}
impl Indexer {
pub async fn try_new(path: Utf8PathBuf) -> anyhow::Result<Self> {
let index = LogIndex::try_new(format!(
"{}_{}",
super::LOGGING_DB_PREFIX,
path.file_name()
.unwrap()
.replace(".", "_")
.replace(" ", "_")
.replace("-", "__")
))
.await
.context("failed to create index")?;
Ok(Self {
index,
pub fn new(path: Utf8PathBuf) -> Self {
Self {
index: LogIndex::new(),
path,
current: CurrentPos::default(),
})
}
}
async fn handle_line(
fn handle_line(
&mut self,
line: &str,
current: &mut CurrentPos,
bytes_read: usize,
) -> anyhow::Result<()> {
let tracing_json: TracingJson = serde_json::from_str(line)?;
let tracing_json: TracingJson =
serde_json::from_str(line).context("failed to parse log line")?;
let end_pos = current.end_pos + bytes_read;
let entry = LogEntry {
line_number: current.line,
@@ -235,7 +330,7 @@ impl Indexer {
start_pos: current.end_pos,
end_pos,
};
self.index.add_entry(entry).await?;
self.index.add_entry(entry);
current.line += 1;
current.end_pos = end_pos;
Ok(())
@@ -253,7 +348,7 @@ impl Indexer {
if bytes_read == 0 {
break;
}
self.handle_line(&line, &mut current, bytes_read).await?;
self.handle_line(&line, &mut current, bytes_read)?;
line.clear();
}
#[cfg(test)]
@@ -265,6 +360,10 @@ impl Indexer {
Ok(())
}
pub fn query(&self, query: Query) -> Option<Vec<LogEntry>> {
self.index.query(query)
}
pub async fn on_file_change(&mut self) -> anyhow::Result<()> {
let mut file = tokio::fs::File::open(&self.path).await?;
file.seek(SeekFrom::Start(self.current.end_pos as u64))
@@ -277,7 +376,7 @@ impl Indexer {
if bytes_read == 0 {
break;
}
self.handle_line(&line, &mut current, bytes_read).await?;
self.handle_line(&line, &mut current, bytes_read)?;
line.clear();
}
self.current = current;
@@ -291,14 +390,13 @@ mod tests {
use camino::Utf8PathBuf;
use std::io::Write;
use tempfile::NamedTempFile;
use tokio::fs;
#[tokio::test]
async fn test_log_index() {
let index = LogIndex::try_new("test".to_string()).await.unwrap();
#[test]
fn test_log_index() {
let mut index = LogIndex::new();
let query = QueryBuilder::default().build().unwrap();
let results = index.query(query).await.unwrap();
assert!(results.is_empty(), "results should be empty");
let results = index.query(query);
assert!(results.is_none(), "results should be empty");
let entry = LogEntry {
line_number: 1,
@@ -308,7 +406,7 @@ mod tests {
start_pos: 0,
end_pos: 0,
};
index.add_entry(entry).await.unwrap();
index.add_entry(entry);
let entry = LogEntry {
line_number: 2,
@@ -318,17 +416,17 @@ mod tests {
start_pos: 0,
end_pos: 0,
};
index.add_entry(entry).await.unwrap();
index.add_entry(entry);
let entry = LogEntry {
line_number: 3,
level: LoggingLevel::ERROR,
timestamp: 1740417699000,
timestamp: 1740417699001,
target: "test".to_string(),
start_pos: 0,
end_pos: 0,
};
index.add_entry(entry).await.unwrap();
index.add_entry(entry);
let entry = LogEntry {
line_number: 4,
@@ -338,11 +436,13 @@ mod tests {
start_pos: 0,
end_pos: 0,
};
index.add_entry(entry).await.unwrap();
index.add_entry(entry);
dbg!(&index);
// Test offset limit
let query = QueryBuilder::default().offset(1).limit(1).build().unwrap();
let results = index.query(query).await.unwrap();
let results = index.query(query).unwrap();
dbg!(&results);
assert_eq!(results.len(), 1, "results should have 1 entries");
assert_eq!(results[0].line_number, 1);
@@ -352,7 +452,7 @@ mod tests {
.level(vec![LoggingLevel::INFO])
.build()
.unwrap();
let results = index.query(query).await.unwrap();
let results = index.query(query).unwrap();
dbg!(&results);
assert_eq!(results.len(), 2, "results should have 2 entries");
assert_eq!(results[0].line_number, 1);
@@ -362,7 +462,7 @@ mod tests {
.level(vec![LoggingLevel::INFO, LoggingLevel::WARN])
.build()
.unwrap();
let results = index.query(query).await.unwrap();
let results = index.query(query).unwrap();
dbg!(&results);
assert_eq!(results.len(), 3, "results should have 3 entries");
assert_eq!(results[0].line_number, 1);
@@ -374,9 +474,9 @@ mod tests {
.target(vec!["test".to_string()])
.build()
.unwrap();
let results = index.query(query).await.unwrap();
let results = index.query(query).unwrap();
dbg!(&results);
assert_eq!(results.len(), 3, "results should have 2 entries");
assert_eq!(results.len(), 3, "results should have 3 entries");
assert_eq!(results[0].line_number, 1);
assert_eq!(results[1].line_number, 2);
assert_eq!(results[2].line_number, 3);
@@ -386,7 +486,7 @@ mod tests {
.timestamp(1740417699000..1740504078324)
.build()
.unwrap();
let results = index.query(query).await.unwrap();
let results = index.query(query).unwrap();
dbg!(&results);
assert_eq!(results.len(), 2, "results should have 2 entries");
assert_eq!(results[0].line_number, 2);
@@ -399,15 +499,13 @@ mod tests {
.timestamp(1740417699000..1740504078324)
.build()
.unwrap();
let results = index.query(query).await.unwrap();
let results = index.query(query).unwrap();
dbg!(&results);
assert_eq!(results.len(), 1, "results should have 1 entries");
assert_eq!(results[0].line_number, 2);
}
async fn create_test_log_file(
entries: Vec<&str>,
) -> anyhow::Result<(NamedTempFile, Utf8PathBuf)> {
fn create_test_log_file(entries: Vec<&str>) -> anyhow::Result<(NamedTempFile, Utf8PathBuf)> {
let mut file = NamedTempFile::new()?;
for entry in entries {
writeln!(file, "{}", entry)?;
@@ -420,10 +518,7 @@ mod tests {
Ok((file, utf8_path))
}
async fn append_to_log_file(
file: &mut NamedTempFile,
entries: Vec<&str>,
) -> anyhow::Result<()> {
fn append_to_log_file(file: &mut NamedTempFile, entries: Vec<&str>) -> anyhow::Result<()> {
for entry in entries {
writeln!(file, "{}", entry)?;
}
@@ -431,7 +526,7 @@ mod tests {
Ok(())
}
async fn get_sample_log_entries() -> Vec<&'static str> {
fn get_sample_log_entries() -> Vec<&'static str> {
vec![
r#"{"level":"INFO","target":"app::module1","timestamp":"2023-02-25T10:15:30+00:00"}"#,
r#"{"level":"WARN","target":"app::module2","timestamp":"2023-02-25T10:16:30+00:00"}"#,
@@ -440,35 +535,33 @@ mod tests {
]
}
async fn get_additional_log_entries() -> Vec<&'static str> {
fn get_additional_log_entries() -> Vec<&'static str> {
vec![
r#"{"level":"INFO","target":"app::module2","timestamp":"2023-02-25T10:19:30+00:00"}"#,
r#"{"level":"FATAL","target":"app::module1","timestamp":"2023-02-25T10:20:30+00:00"}"#,
]
}
#[tokio::test]
async fn test_indexer_creation() -> anyhow::Result<()> {
let entries = get_sample_log_entries().await;
let (_guard, path) = create_test_log_file(entries).await?;
#[test]
fn test_indexer_creation() {
let entries = get_sample_log_entries();
let (_guard, path) = create_test_log_file(entries).unwrap();
let indexer = Indexer::try_new(path).await?;
let indexer = Indexer::new(path);
assert!(indexer.current.line == 0, "Initial line count should be 0");
assert!(
indexer.current.end_pos == 0,
"Initial end position should be 0"
);
Ok(())
}
#[tokio::test]
async fn test_build_index() -> anyhow::Result<()> {
let entries = get_sample_log_entries().await;
let (_guard, path) = create_test_log_file(entries.clone()).await?;
let entries = get_sample_log_entries();
let (_guard, path) = create_test_log_file(entries.clone()).unwrap();
let mut indexer = Indexer::try_new(path.clone()).await?;
indexer.build_index().await?;
let mut indexer = Indexer::new(path);
indexer.build_index().await.unwrap();
// Verify that all entries were indexed
assert_eq!(
@@ -478,8 +571,8 @@ mod tests {
);
// Query the index to verify entries
let query = QueryBuilder::default().build()?;
let results = indexer.index.query(query).await?;
let query = QueryBuilder::default().build().unwrap();
let results = indexer.index.query(query).unwrap();
assert_eq!(
results.len(),
@@ -491,25 +584,25 @@ mod tests {
let info_query = QueryBuilder::default()
.level(vec![LoggingLevel::INFO])
.build()?;
let info_results = indexer.index.query(info_query).await?;
let info_results = indexer.index.query(info_query).unwrap();
assert_eq!(info_results.len(), 1, "Should have 1 INFO entry");
let warn_query = QueryBuilder::default()
.level(vec![LoggingLevel::WARN])
.build()?;
let warn_results = indexer.index.query(warn_query).await?;
let warn_results = indexer.index.query(warn_query).unwrap();
assert_eq!(warn_results.len(), 1, "Should have 1 WARN entry");
let error_query = QueryBuilder::default()
.level(vec![LoggingLevel::ERROR])
.build()?;
let error_results = indexer.index.query(error_query).await?;
let error_results = indexer.index.query(error_query).unwrap();
assert_eq!(error_results.len(), 1, "Should have 1 ERROR entry");
let debug_query = QueryBuilder::default()
.level(vec![LoggingLevel::DEBUG])
.build()?;
let debug_results = indexer.index.query(debug_query).await?;
let debug_results = indexer.index.query(debug_query).unwrap();
assert_eq!(debug_results.len(), 1, "Should have 1 DEBUG entry");
Ok(())
@@ -517,12 +610,12 @@ mod tests {
#[tokio::test]
async fn test_on_file_change() -> anyhow::Result<()> {
let initial_entries = get_sample_log_entries().await;
let (mut file, path) = create_test_log_file(initial_entries.clone()).await?;
let initial_entries = get_sample_log_entries();
let (mut file, path) = create_test_log_file(initial_entries.clone()).unwrap();
// Initialize and build the initial index
let mut indexer = Indexer::try_new(path.clone()).await?;
indexer.build_index().await?;
let mut indexer = Indexer::new(path);
indexer.build_index().await.unwrap();
// Verify initial indexing
assert_eq!(
@@ -532,8 +625,8 @@ mod tests {
);
// Add more entries to the file
let additional_entries = get_additional_log_entries().await;
append_to_log_file(&mut file, additional_entries.clone()).await?;
let additional_entries = get_additional_log_entries();
append_to_log_file(&mut file, additional_entries.clone()).unwrap();
// Process file changes
indexer.on_file_change().await?;
@@ -546,8 +639,8 @@ mod tests {
);
// Query all entries
let query = QueryBuilder::default().build()?;
let results = indexer.index.query(query).await?;
let query = QueryBuilder::default().build().unwrap();
let results = indexer.index.query(query).unwrap();
assert_eq!(
results.len(),
total_entries,
@@ -558,7 +651,7 @@ mod tests {
let fatal_query = QueryBuilder::default()
.level(vec![LoggingLevel::FATAL])
.build()?;
let fatal_results = indexer.index.query(fatal_query).await?;
let fatal_results = indexer.index.query(fatal_query).unwrap();
assert_eq!(
fatal_results.len(),
1,
@@ -570,17 +663,17 @@ mod tests {
#[tokio::test]
async fn test_indexer_with_target_filter() -> anyhow::Result<()> {
let entries = get_sample_log_entries().await;
let (_guard, path) = create_test_log_file(entries).await?;
let entries = get_sample_log_entries();
let (_guard, path) = create_test_log_file(entries).unwrap();
let mut indexer = Indexer::try_new(path).await?;
indexer.build_index().await?;
let mut indexer = Indexer::new(path);
indexer.build_index().await.unwrap();
// Query by target
let target_query = QueryBuilder::default()
.target(vec!["app::module1".to_string()])
.build()?;
let target_results = indexer.index.query(target_query).await?;
let target_results = indexer.index.query(target_query).unwrap();
assert_eq!(
target_results.len(),
@@ -602,15 +695,15 @@ mod tests {
#[tokio::test]
async fn test_indexer_complex_query() -> anyhow::Result<()> {
let entries = get_sample_log_entries().await;
let additional_entries = get_additional_log_entries().await;
let entries = get_sample_log_entries();
let additional_entries = get_additional_log_entries();
let mut all_entries = entries.clone();
all_entries.extend(additional_entries.clone());
let (_guard, path) = create_test_log_file(all_entries).await?;
let (_guard, path) = create_test_log_file(all_entries).unwrap();
let mut indexer = Indexer::try_new(path).await?;
indexer.build_index().await?;
let mut indexer = Indexer::new(path);
indexer.build_index().await.unwrap();
// Complex query with multiple filters
let complex_query = QueryBuilder::default()
@@ -618,7 +711,7 @@ mod tests {
.target(vec!["app::module2".to_string()])
.build()?;
let complex_results = indexer.index.query(complex_query).await?;
let complex_results = indexer.index.query(complex_query).unwrap();
assert_eq!(
complex_results.len(),
2,
@@ -8,15 +8,25 @@ use notify_debouncer_full::{
DebounceEventResult, DebouncedEvent, Debouncer, RecommendedCache, new_debouncer,
notify::{RecommendedWatcher, RecursiveMode},
};
use tokio::sync::{RwLock, mpsc::Receiver};
use tokio::{
sync::{
RwLock,
mpsc::{Receiver, UnboundedSender},
oneshot,
},
task::{JoinHandle, LocalSet},
};
use tokio_util::sync::CancellationToken;
use super::{LogEntry, Query};
#[derive(Clone)]
pub struct IndexerManager {
inner: Arc<IndexerManagerInner>,
inner: Arc<IndexerRunnerGuard>,
}
impl Deref for IndexerManager {
type Target = IndexerManagerInner;
type Target = IndexerRunnerGuard;
fn deref(&self) -> &Self::Target {
&self.inner
@@ -33,98 +43,202 @@ async fn is_log_file(path: &Utf8Path) -> anyhow::Result<bool> {
impl IndexerManager {
pub async fn try_new(logging_dir: Utf8PathBuf) -> anyhow::Result<Self> {
let mut inner = IndexerManagerInner::new();
inner
.scan(&logging_dir)
.await
.context("failed to scan logging directory")?;
let mut rx = inner
.recommended_watcher(&logging_dir)
.context("failed to create recommended watcher")?;
let inner = IndexerManagerRunner::new_and_spawn().await;
let manager = Self {
inner: Arc::new(inner),
};
let this = manager.clone();
tokio::spawn(async move {
while let Some(events) = rx.recv().await {
for event in events {
if let Err(err) = this.handle_event(event).await {
tracing::error!("failed to handle event: {:?}", err);
}
}
}
});
manager.watch(&logging_dir).await?;
Ok(manager)
}
#[tracing::instrument(skip(self))]
async fn handle_event(&self, event: DebouncedEvent) -> anyhow::Result<()> {
tracing::debug!("received event: {:?}", event);
let path = event.paths.first().context("no path in event")?;
let path = Utf8Path::from_path(path).context("failed to convert path to Utf8Path")?;
let create_indexer = async |path: &Utf8Path| {
let mut map = self.inner.map.write().await;
let indexer = Indexer::try_new(path.to_path_buf()).await?;
map.insert(path.to_path_buf(), indexer);
Ok::<_, anyhow::Error>(())
};
match event.kind {
EventKind::Create(_) => {
if is_log_file(path).await? {
tracing::debug!("create indexer for {}", path);
create_indexer(path).await?;
}
}
EventKind::Remove(_) => {
let mut map = self.inner.map.write().await;
map.remove(path);
}
EventKind::Modify(_) => {
if is_log_file(path).await? {
let mut map = self.inner.map.write().await;
match map.get_mut(path) {
Some(indexer) => {
indexer.on_file_change().await?;
}
None => {
create_indexer(path).await?;
}
}
}
}
_ => (),
}
Ok(())
}
}
// TODO: only keep latest log file when we detect a serious memory report on it
pub struct IndexerManagerInner {
map: RwLock<HashMap<Utf8PathBuf, Indexer>>,
pub struct IndexerManagerRunner {
map: HashMap<Utf8PathBuf, Indexer>,
debouncer: Option<Debouncer<RecommendedWatcher, RecommendedCache>>,
}
impl IndexerManagerInner {
pub fn new() -> Self {
Self {
map: RwLock::new(HashMap::new()),
debouncer: None,
pub enum IndexerRunnerCmd {
/// scan the logging directory for new log files
Watch(Utf8PathBuf, oneshot::Sender<anyhow::Result<()>>),
/// remove the indexer for the given path
// Unwatch(Utf8PathBuf, oneshot::Sender<anyhow::Result<()>>),
/// query the indexer for the given path
AddLogFile(Utf8PathBuf, oneshot::Sender<anyhow::Result<()>>),
RemoveLogFile(Utf8PathBuf, oneshot::Sender<anyhow::Result<()>>),
LogFileChanged(Utf8PathBuf, oneshot::Sender<anyhow::Result<()>>),
Query(Utf8PathBuf, Query, oneshot::Sender<Option<Vec<LogEntry>>>),
}
pub struct IndexerRunnerGuard {
cancel_token: CancellationToken,
handle: JoinHandle<()>,
tx: tokio::sync::mpsc::UnboundedSender<IndexerRunnerCmd>,
}
impl IndexerManagerRunner {
pub async fn new_and_spawn() -> IndexerRunnerGuard {
let cancel_token = CancellationToken::new();
let (handle, rx) = Self::spawn_task(cancel_token.clone());
let tx = rx.await.unwrap();
IndexerRunnerGuard {
cancel_token,
handle,
tx,
}
}
fn spawn_task(
cancel_token: CancellationToken,
) -> (
JoinHandle<()>,
tokio::sync::oneshot::Receiver<tokio::sync::mpsc::UnboundedSender<IndexerRunnerCmd>>,
) {
let (tx, rx) = oneshot::channel();
let handle = tauri::async_runtime::spawn_blocking(move || {
let (cmd_tx, mut cmd_rx) = tokio::sync::mpsc::unbounded_channel();
let mut runner = Self {
map: HashMap::new(),
debouncer: None,
};
let local = LocalSet::new();
let cmd_tx_clone = cmd_tx.clone();
local.spawn_local(async move {
while let Some(cmd) = cmd_rx.recv().await {
runner.run_cmd(&cmd_tx_clone, cmd).await;
}
});
tx.send(cmd_tx).unwrap();
tauri::async_runtime::block_on(async {
tokio::select! {
_ = cancel_token.cancelled() => {
tracing::info!("cancel token triggered, shutting down");
}
_ = local => {}
}
});
});
// unwrap the join handle
match handle {
tauri::async_runtime::JoinHandle::Tokio(handle) => (handle, rx),
}
}
async fn run_cmd(&mut self, cmd_tx: &UnboundedSender<IndexerRunnerCmd>, cmd: IndexerRunnerCmd) {
match cmd {
IndexerRunnerCmd::Watch(path, tx) => {
if let Err(err) = self.scan(&path).await {
tx.send(Err(err)).unwrap();
return;
}
let watcher = self.recommended_watcher(&path).unwrap();
let cmd_tx = cmd_tx.clone();
nyanpasu_utils::runtime::spawn(Self::spawn_watcher(watcher, cmd_tx));
tx.send(Ok(())).unwrap();
}
IndexerRunnerCmd::Query(path, query, tx) => {
let indexer = self.map.get(&path).unwrap();
let result = indexer.query(query);
tx.send(result).unwrap();
}
IndexerRunnerCmd::AddLogFile(path, tx) => {
let mut indexer = Indexer::new(path.clone());
if let Err(err) = indexer.build_index().await {
tx.send(Err(err)).unwrap();
return;
}
self.map.insert(path, indexer);
tx.send(Ok(())).unwrap();
}
IndexerRunnerCmd::RemoveLogFile(path, tx) => {
self.map.remove(&path);
tx.send(Ok(())).unwrap();
}
IndexerRunnerCmd::LogFileChanged(path, tx) => {
let indexer = self.map.get_mut(&path).unwrap();
if let Err(err) = indexer.on_file_change().await {
tx.send(Err(err)).unwrap();
return;
}
tx.send(Ok(())).unwrap();
}
}
}
async fn spawn_watcher(
mut watcher: Receiver<Vec<DebouncedEvent>>,
cmd_tx: UnboundedSender<IndexerRunnerCmd>,
) {
while let Some(events) = watcher.recv().await {
for event in events {
let path = Utf8Path::from_path(event.paths.first().unwrap()).unwrap();
match event.kind {
EventKind::Create(_) => {
if is_log_file(path).await.is_ok_and(|ok| ok) {
tracing::debug!("create indexer for {}", path);
let (tx, rx) = oneshot::channel();
cmd_tx
.send(IndexerRunnerCmd::AddLogFile(path.to_path_buf(), tx))
.unwrap();
match rx.await {
Ok(_) => {
tracing::debug!("indexer for {} created", path);
}
Err(err) => {
tracing::error!("failed to create indexer for {}", path);
}
}
}
}
EventKind::Remove(_) => {
let (tx, rx) = oneshot::channel();
cmd_tx
.send(IndexerRunnerCmd::RemoveLogFile(path.to_path_buf(), tx))
.unwrap();
match rx.await {
Ok(_) => {
tracing::debug!("indexer for {} removed", path);
}
Err(err) => {
tracing::error!("failed to remove indexer for {}", path);
}
}
}
EventKind::Modify(_) => {
if is_log_file(path).await.is_ok_and(|ok| ok) {
let (tx, rx) = oneshot::channel();
cmd_tx
.send(IndexerRunnerCmd::LogFileChanged(path.to_path_buf(), tx))
.unwrap();
match rx.await {
Ok(_) => {
tracing::debug!("indexer for {} updated", path);
}
Err(err) => {
tracing::error!("failed to update indexer for {}", path);
}
}
}
}
_ => (),
}
}
}
}
#[tracing::instrument(skip(self))]
pub async fn scan(&self, logging_dir: &Utf8Path) -> anyhow::Result<()> {
let mut map = self.map.write().await;
pub async fn scan(&mut self, logging_dir: &Utf8Path) -> anyhow::Result<()> {
let mut entries = tokio::fs::read_dir(logging_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = Utf8PathBuf::from_path_buf(entry.path())
.map_err(|e| anyhow::anyhow!("failed to convert path: {:?}", e))?;
if is_log_file(&path).await? {
tracing::debug!("create indexer for {}", path);
let indexer = Indexer::try_new(path.clone()).await?;
map.insert(path, indexer);
let mut indexer = Indexer::new(path.clone());
indexer.build_index().await?;
self.map.insert(path, indexer);
}
}
Ok(())
@@ -164,12 +278,15 @@ impl IndexerManagerInner {
Ok(rx)
}
}
pub async fn get_indexer(
&self,
path: &Utf8Path,
) -> Option<tokio::sync::RwLockReadGuard<'_, Indexer>> {
let map = self.map.read().await;
tokio::sync::RwLockReadGuard::try_map(map, |map| map.get(path)).ok()
impl IndexerRunnerGuard {
pub async fn watch(&self, logging_dir: &Utf8Path) -> anyhow::Result<()> {
let (tx, rx) = oneshot::channel();
self.tx
.send(IndexerRunnerCmd::Watch(logging_dir.to_path_buf(), tx))
.context("failed to send watch command")?;
rx.await.context("failed to receive watch command")??;
Ok(())
}
}
@@ -11,7 +11,7 @@
"build": "tsc"
},
"dependencies": {
"@tanstack/react-query": "5.70.0",
"@tanstack/react-query": "5.71.0",
"@tauri-apps/api": "2.4.0",
"ahooks": "3.8.4",
"dayjs": "1.11.13",
@@ -29,7 +29,7 @@
"allotment": "1.20.3",
"country-code-emoji": "2.3.0",
"dayjs": "1.11.13",
"framer-motion": "12.5.0",
"framer-motion": "12.6.2",
"i18next": "24.2.3",
"jotai": "2.12.2",
"json-schema": "0.4.0",
@@ -55,7 +55,7 @@
"@emotion/react": "11.14.0",
"@iconify/json": "2.2.321",
"@monaco-editor/react": "4.7.0",
"@tanstack/react-query": "5.70.0",
"@tanstack/react-query": "5.71.0",
"@tanstack/react-router": "1.114.29",
"@tanstack/router-devtools": "1.114.29",
"@tanstack/router-plugin": "1.114.30",
+1 -1
View File
@@ -28,7 +28,7 @@
"@vitejs/plugin-react": "4.3.4",
"ahooks": "3.8.4",
"d3": "7.9.0",
"framer-motion": "12.5.0",
"framer-motion": "12.6.2",
"react": "19.1.0",
"react-dom": "19.1.0",
"react-error-boundary": "5.0.0",
+1 -1
View File
@@ -109,7 +109,7 @@
"typescript": "5.8.2",
"typescript-eslint": "8.28.0"
},
"packageManager": "pnpm@10.6.5",
"packageManager": "pnpm@10.7.0",
"engines": {
"node": "22.14.0"
},
+32 -32
View File
@@ -175,8 +175,8 @@ importers:
frontend/interface:
dependencies:
'@tanstack/react-query':
specifier: 5.70.0
version: 5.70.0(react@19.1.0)
specifier: 5.71.0
version: 5.71.0(react@19.1.0)
'@tauri-apps/api':
specifier: 2.4.0
version: 2.4.0
@@ -266,8 +266,8 @@ importers:
specifier: 1.11.13
version: 1.11.13
framer-motion:
specifier: 12.5.0
version: 12.5.0(@emotion/is-prop-valid@1.3.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
specifier: 12.6.2
version: 12.6.2(@emotion/is-prop-valid@1.3.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
i18next:
specifier: 24.2.3
version: 24.2.3(typescript@5.8.2)
@@ -339,8 +339,8 @@ importers:
specifier: 4.7.0
version: 4.7.0(monaco-editor@0.52.2)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
'@tanstack/react-query':
specifier: 5.70.0
version: 5.70.0(react@19.1.0)
specifier: 5.71.0
version: 5.71.0(react@19.1.0)
'@tanstack/react-router':
specifier: 1.114.29
version: 1.114.29(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
@@ -486,8 +486,8 @@ importers:
specifier: 7.9.0
version: 7.9.0
framer-motion:
specifier: 12.5.0
version: 12.5.0(@emotion/is-prop-valid@1.3.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
specifier: 12.6.2
version: 12.6.2(@emotion/is-prop-valid@1.3.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
react:
specifier: 19.1.0
version: 19.1.0
@@ -2782,11 +2782,11 @@ packages:
resolution: {integrity: sha512-Wo1iKt2b9OT7d+YGhvEPD3DXvPv2etTusIMhMUoG7fbhmxcXCtIjJDEygy91Y2JFlwGyjqiBPRozme7UD8hoqg==}
engines: {node: '>=12'}
'@tanstack/query-core@5.70.0':
resolution: {integrity: sha512-ZkkjQAZjI6nS5OyAmaSQafQXK180Xvp0lZYk4BzrnskkTV8On3zSJUxOIXnh0h/8EgqRkCA9i879DiJovA1kGw==}
'@tanstack/query-core@5.71.0':
resolution: {integrity: sha512-p4+T7CIEe1kMhii4booWiw42nuaiYI9La/bRCNzBaj1P3PDb0dEZYDhc/7oBifKJfHYN+mtS1ynW1qsmzQW7Og==}
'@tanstack/react-query@5.70.0':
resolution: {integrity: sha512-z0tx1zz2CQ6nTm+fCaOp93FqsFjNgXtOy+4mC5ifQ4B+rJiMD0AGfJrYSGh/OuefhrzTYDAbkGUAGw6JzkWy8g==}
'@tanstack/react-query@5.71.0':
resolution: {integrity: sha512-Udhlz9xHwk0iB7eLDchIqvu666NZFxPZZF80KnL8sZy+5J0kMvnJkzQNYRJwF70g8Vc1nn0TSMkPJgvx6+Pn4g==}
peerDependencies:
react: ^18 || ^19
@@ -4834,8 +4834,8 @@ packages:
fraction.js@4.3.7:
resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==}
framer-motion@12.5.0:
resolution: {integrity: sha512-buPlioFbH9/W7rDzYh1C09AuZHAk2D1xTA1BlounJ2Rb9aRg84OXexP0GLd+R83v0khURdMX7b5MKnGTaSg5iA==}
framer-motion@12.6.2:
resolution: {integrity: sha512-7LgPRlPs5aG8UxeZiMCMZz8firC53+2+9TnWV22tuSi38D3IFRxHRUqOREKckAkt6ztX+Dn6weLcatQilJTMcg==}
peerDependencies:
'@emotion/is-prop-valid': '*'
react: ^18.0.0 || ^19.0.0
@@ -6115,8 +6115,8 @@ packages:
peerDependencies:
monaco-editor: '>=0.36'
motion-dom@12.5.0:
resolution: {integrity: sha512-uH2PETDh7m+Hjd1UQQ56yHqwn83SAwNjimNPE/kC+Kds0t4Yh7+29rfo5wezVFpPOv57U4IuWved5d1x0kNhbQ==}
motion-dom@12.6.1:
resolution: {integrity: sha512-8XVsriTUEVOepoIDgE/LDGdg7qaKXWdt+wQA/8z0p8YzJDLYL8gbimZ3YkCLlj7bB2i/4UBD/g+VO7y9ZY0zHQ==}
motion-utils@12.5.0:
resolution: {integrity: sha512-+hFFzvimn0sBMP9iPxBa9OtRX35ZQ3py0UHnb8U29VD+d8lQ8zH3dTygJWqK7av2v6yhg7scj9iZuvTS0f4+SA==}
@@ -10608,11 +10608,11 @@ snapshots:
dependencies:
remove-accents: 0.5.0
'@tanstack/query-core@5.70.0': {}
'@tanstack/query-core@5.71.0': {}
'@tanstack/react-query@5.70.0(react@19.1.0)':
'@tanstack/react-query@5.71.0(react@19.1.0)':
dependencies:
'@tanstack/query-core': 5.70.0
'@tanstack/query-core': 5.71.0
react: 19.1.0
'@tanstack/react-router-devtools@1.114.29(@tanstack/react-router@1.114.29(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(@tanstack/router-core@1.114.29)(csstype@3.1.3)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(tiny-invariant@1.3.3)':
@@ -11526,7 +11526,7 @@ snapshots:
async-mutex@0.3.2:
dependencies:
tslib: 2.7.0
tslib: 2.8.1
async@3.2.6: {}
@@ -11705,7 +11705,7 @@ snapshots:
camel-case@4.1.2:
dependencies:
pascal-case: 3.1.2
tslib: 2.7.0
tslib: 2.8.1
camelcase-css@2.0.1: {}
@@ -12326,7 +12326,7 @@ snapshots:
dot-case@3.0.4:
dependencies:
no-case: 3.0.4
tslib: 2.7.0
tslib: 2.8.1
dot-prop@4.2.1:
dependencies:
@@ -12696,7 +12696,7 @@ snapshots:
minimatch: 9.0.5
semver: 7.7.1
stable-hash: 0.0.4
tslib: 2.7.0
tslib: 2.8.1
transitivePeerDependencies:
- supports-color
- typescript
@@ -13049,11 +13049,11 @@ snapshots:
fraction.js@4.3.7: {}
framer-motion@12.5.0(@emotion/is-prop-valid@1.3.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0):
framer-motion@12.6.2(@emotion/is-prop-valid@1.3.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0):
dependencies:
motion-dom: 12.5.0
motion-dom: 12.6.1
motion-utils: 12.5.0
tslib: 2.7.0
tslib: 2.8.1
optionalDependencies:
'@emotion/is-prop-valid': 1.3.0
react: 19.1.0
@@ -14064,7 +14064,7 @@ snapshots:
lower-case@2.0.2:
dependencies:
tslib: 2.7.0
tslib: 2.8.1
lowercase-keys@1.0.1: {}
@@ -14449,7 +14449,7 @@ snapshots:
vscode-uri: 3.0.8
yaml: 2.7.0
motion-dom@12.5.0:
motion-dom@12.6.1:
dependencies:
motion-utils: 12.5.0
@@ -14521,7 +14521,7 @@ snapshots:
no-case@3.0.4:
dependencies:
lower-case: 2.0.2
tslib: 2.7.0
tslib: 2.8.1
node-addon-api@7.1.1:
optional: true
@@ -14750,7 +14750,7 @@ snapshots:
param-case@3.0.4:
dependencies:
dot-case: 3.0.4
tslib: 2.7.0
tslib: 2.8.1
parent-module@1.0.1:
dependencies:
@@ -14781,7 +14781,7 @@ snapshots:
pascal-case@3.1.2:
dependencies:
no-case: 3.0.4
tslib: 2.7.0
tslib: 2.8.1
path-browserify@1.0.1: {}
@@ -15608,7 +15608,7 @@ snapshots:
snake-case@3.0.4:
dependencies:
dot-case: 3.0.4
tslib: 2.7.0
tslib: 2.8.1
socks@2.8.3:
dependencies:
+2 -2
View File
@@ -59,7 +59,7 @@ body:
required: true
- type: textarea
attributes:
label: 日志 / Log
description: 请提供完整或相关部分的Debug日志(请在“软件左侧菜单”->“设置”->“日志等级”调整到debug,Verge错误请把“杂项设置”->“app日志等级”调整到debug/trace,并重启Verge生效。日志文件在“软件左侧菜单”->“设置”->“日志目录”下) / Please provide a complete or relevant part of the Debug log (please adjust the "Log level" to debug in "Software left menu" -> "Settings" -> "Log level". If there is a Verge error, please adjust "Miscellaneous settings" -> "app log level" to trace, and restart Verge to take effect. The log file is under "Software left menu" -> "Settings" -> "Log directory")
label: 日志(勿上传日志文件,请粘贴日志内容) / Log (Do not upload the log file, paste the log content directly)
description: 请提供完整或相关部分的Debug日志(请在“软件左侧菜单”->“设置”->“日志等级”调整到debug,Verge错误请把“杂项设置”->“app日志等级”调整到debug,并重启Verge生效。日志文件在“软件左侧菜单”->“设置”->“日志目录”下) / Please provide a complete or relevant part of the Debug log (please adjust the "Log level" to debug in "Software left menu" -> "Settings" -> "Log level". If there is a Verge error, please adjust "Miscellaneous settings" -> "app log level" to debug, and restart Verge to take effect. The log file is under "Software left menu" -> "Settings" -> "Log directory")
validations:
required: true
+1 -1
View File
@@ -10,7 +10,7 @@ if [ $? -ne 0 ]; then
exit 1
fi
git add .
#git add .
# 允许提交
exit 0
+5 -1
View File
@@ -8,7 +8,7 @@
### 2.2.3-alpha 相对于 2.2.2
#### 修复了:
- 首页“当前代理”因为重复刷新导致的CPU占用过高的问题
- “开自启”和“DNS覆写”开关跳动问题
- “开自启”和“DNS覆写”开关跳动问题
- 自定义托盘图标未能应用更改
- MacOS 自定义托盘图标显示速率时图标和文本间隙过大
- MacOS 托盘速率显示不全
@@ -18,6 +18,10 @@
- Clash Verge Rev 从现在开始不再强依赖系统服务和管理权限
- 支持根据用户偏好选择Sidecar(用户空间)模式或安装服务
- 增加载入初始配置文件的错误提示,防止切换到错误的订阅配置
- 检测是否以管理员模式运行软件,如果是提示无法使用开机自启
- 代理组显示节点数量
- 统一运行模式检测,支持管理员模式下开启TUN模式
- 托盘切换代理模式会根据设置自动断开之前连接
#### 优化了:
- 重构了后端内核管理逻辑,更轻量化和有效的管理内核,提高了性能和稳定性
+8 -8
View File
@@ -1059,6 +1059,7 @@ dependencies = [
"getrandom 0.3.2",
"image",
"imageproc",
"libc",
"log",
"log4rs",
"mihomo_api",
@@ -4043,9 +4044,9 @@ dependencies = [
[[package]]
name = "network-interface"
version = "2.0.0"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "433419f898328beca4f2c6c73a1b52540658d92b0a99f0269330457e0fd998d5"
checksum = "c3329f515506e4a2de3aa6e07027a6758e22e0f0e8eaf64fa47261cec2282602"
dependencies = [
"cc",
"libc",
@@ -4566,9 +4567,9 @@ dependencies = [
[[package]]
name = "once_cell"
version = "1.21.1"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d75b0bedcc4fe52caa0e03d9f1151a323e4aa5e2d78ba3580400cd3c9e2bc4bc"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "opaque-debug"
@@ -6670,15 +6671,14 @@ dependencies = [
[[package]]
name = "sysinfo"
version = "0.33.1"
version = "0.34.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fc858248ea01b66f19d8e8a6d55f41deaf91e9d495246fd01368d99935c6c01"
checksum = "927fa32067cbb22b8a91987d84c0ff94c7441ccf3f767165a58d77a656d6267c"
dependencies = [
"core-foundation-sys",
"libc",
"memchr",
"ntapi",
"rayon",
"objc2-core-foundation",
"windows 0.57.0",
]
+4 -3
View File
@@ -25,11 +25,11 @@ dunce = "1.0"
log4rs = "1"
nanoid = "0.4"
chrono = "0.4.40"
sysinfo = "0.33.1"
sysinfo = "0.34"
boa_engine = "0.20.0"
serde_json = "1.0"
serde_yaml = "0.9"
once_cell = "1.21.1"
once_cell = "1.21.3"
port_scanner = "0.1.5"
delay_timer = "0.11.6"
parking_lot = "0.12"
@@ -53,7 +53,7 @@ tauri = { version = "2.4.0", features = [
"image-ico",
"image-png",
] }
network-interface = { version = "2.0.0", features = ["serde"] }
network-interface = { version = "2.0.1", features = ["serde"] }
tauri-plugin-shell = "2.2.0"
tauri-plugin-dialog = "2.2.0"
tauri-plugin-fs = "2.2.0"
@@ -73,6 +73,7 @@ async-trait = "0.1.88"
mihomo_api = { path = "src_crates/crate_mihomo_api" }
ab_glyph = "0.2.29"
tungstenite = "0.26.2"
libc = "0.2"
[target.'cfg(windows)'.dependencies]
runas = "=1.2.0"
@@ -58,3 +58,37 @@ pub fn get_app_uptime() -> CmdResult<i64> {
Ok(now - start_time)
}
/// 检查应用是否以管理员身份运行
#[tauri::command]
#[cfg(target_os = "windows")]
pub fn is_admin() -> CmdResult<bool> {
use deelevate::{PrivilegeLevel, Token};
let result = Token::with_current_process()
.and_then(|token| token.privilege_level())
.map(|level| level != PrivilegeLevel::NotPrivileged)
.unwrap_or(false);
Ok(result)
}
/// 非Windows平台检测是否以管理员身份运行
#[tauri::command]
#[cfg(not(target_os = "windows"))]
pub fn is_admin() -> CmdResult<bool> {
#[cfg(target_os = "macos")]
{
Ok(unsafe { libc::geteuid() } == 0)
}
#[cfg(target_os = "linux")]
{
Ok(unsafe { libc::geteuid() } == 0)
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
{
Ok(false)
}
}
@@ -37,6 +37,19 @@ pub fn restart_app() {
});
}
fn after_change_clash_mode() {
let _ = tauri::async_runtime::block_on(tauri::async_runtime::spawn_blocking(|| {
tauri::async_runtime::block_on(async {
let connections = MihomoManager::global().get_connections().await.unwrap();
let connections = connections["connections"].as_array().unwrap();
for connection in connections {
let id = connection["id"].as_str().unwrap();
let _ = MihomoManager::global().delete_connection(id).await;
}
})
}));
}
/// Change Clash mode (rule/global/direct/script)
pub fn change_clash_mode(mode: String) {
let mut mapping = Mapping::new();
@@ -57,6 +70,14 @@ pub fn change_clash_mode(mode: String) {
logging_error!(Type::Tray, true, tray::Tray::global().update_menu());
logging_error!(Type::Tray, true, tray::Tray::global().update_icon(None));
}
let is_auto_close_connection = Config::verge()
.data()
.auto_close_connection
.unwrap_or(false);
if is_auto_close_connection {
after_change_clash_mode();
}
}
Err(err) => println!("{err}"),
}
+28 -25
View File
@@ -53,32 +53,35 @@ pub fn open_or_close_dashboard() {
/// Setup window state monitor to save window position and size in real-time
pub fn setup_window_state_monitor(app_handle: &tauri::AppHandle) {
let window = app_handle.get_webview_window("main").unwrap();
let app_handle_clone = app_handle.clone();
// 暂时移除实时监控-保存窗口位置和大小,这个特性可能会导致窗口异常大小和位置,需要进一步优化
//
// 监听窗口移动事件
let app_handle_move = app_handle_clone.clone();
window.on_window_event(move |event| {
match event {
// 窗口移动时保存状态
tauri::WindowEvent::Moved(_) => {
let _ = app_handle_move.save_window_state(StateFlags::all());
}
// 窗口调整大小时保存状态
tauri::WindowEvent::Resized(_) => {
let _ = app_handle_move.save_window_state(StateFlags::all());
}
// 其他可能改变窗口状态的事件
tauri::WindowEvent::ScaleFactorChanged { .. } => {
let _ = app_handle_move.save_window_state(StateFlags::all());
}
// 窗口关闭时保存
tauri::WindowEvent::CloseRequested { .. } => {
let _ = app_handle_move.save_window_state(StateFlags::all());
}
_ => {}
}
});
// let window = app_handle.get_webview_window("main").unwrap();
// let app_handle_clone = app_handle.clone();
// // 监听窗口移动事件
// let app_handle_move = app_handle_clone.clone();
// window.on_window_event(move |event| {
// match event {
// // 窗口移动时保存状态
// tauri::WindowEvent::Moved(_) => {
// let _ = app_handle_move.save_window_state(StateFlags::all());
// }
// // 窗口调整大小时保存状态
// tauri::WindowEvent::Resized(_) => {
// let _ = app_handle_move.save_window_state(StateFlags::all());
// }
// // 其他可能改变窗口状态的事件
// tauri::WindowEvent::ScaleFactorChanged { .. } => {
// let _ = app_handle_move.save_window_state(StateFlags::all());
// }
// // 窗口关闭时保存
// tauri::WindowEvent::CloseRequested { .. } => {
// let _ = app_handle_move.save_window_state(StateFlags::all());
// }
// _ => {}
// }
// });
}
/// 优化的应用退出函数
+1
View File
@@ -154,6 +154,7 @@ pub fn run() {
cmd::get_running_mode,
cmd::get_app_uptime,
cmd::get_auto_launch_status,
cmd::is_admin,
// service 管理
cmd::install_service,
cmd::uninstall_service,
@@ -1,4 +1,7 @@
use crate::core::{handle, CoreManager};
use crate::{
cmd::system,
core::{handle, CoreManager},
};
use std::fmt::{self, Debug, Formatter};
use sysinfo::System;
@@ -9,14 +12,15 @@ pub struct PlatformSpecification {
system_arch: String,
verge_version: String,
running_mode: String,
is_admin: bool,
}
impl Debug for PlatformSpecification {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(
f,
"System Name: {}\nSystem Version: {}\nSystem kernel Version: {}\nSystem Arch: {}\nVerge Version: {}\nRunning Mode: {}",
self.system_name, self.system_version, self.system_kernel_version, self.system_arch, self.verge_version, self.running_mode
"System Name: {}\nSystem Version: {}\nSystem kernel Version: {}\nSystem Arch: {}\nVerge Version: {}\nRunning Mode: {}\nIs Admin: {}",
self.system_name, self.system_version, self.system_kernel_version, self.system_arch, self.verge_version, self.running_mode, self.is_admin
)
}
}
@@ -40,6 +44,11 @@ impl PlatformSpecification {
})
});
let is_admin = match system::is_admin() {
Ok(value) => value,
Err(_) => false,
};
Self {
system_name,
system_version,
@@ -47,6 +56,7 @@ impl PlatformSpecification {
system_arch,
verge_version,
running_mode,
is_admin,
}
}
}
@@ -140,4 +140,23 @@ impl MihomoManager {
let response = self.send_request(Method::GET, url, None).await?;
Ok(response)
}
pub async fn get_connections(&self) -> Result<serde_json::Value, String> {
let url = format!("{}/connections", self.mihomo_server);
let response = self.send_request(Method::GET, url, None).await?;
Ok(response)
}
pub async fn delete_connection(&self, id: &str) -> Result<(), String> {
let url = format!("{}/connections/{}", self.mihomo_server, id);
let response = self.send_request(Method::DELETE, url, None).await?;
if response["code"] == 204 {
Ok(())
} else {
Err(response["message"]
.as_str()
.unwrap_or("unknown error")
.to_string())
}
}
}
@@ -19,7 +19,7 @@ import {
SvgIconComponent,
} from "@mui/icons-material";
import { useVerge } from "@/hooks/use-verge";
import { useAppData } from "@/providers/app-data-provider";
import { useSystemState } from "@/hooks/use-system-state";
const LOCAL_STORAGE_TAB_KEY = "clash-verge-proxy-active-tab";
@@ -140,14 +140,14 @@ export const ProxyTunCard: FC = () => {
);
// 获取代理状态信息
const { sysproxy, runningMode } = useAppData();
const { verge } = useVerge();
const { isSidecarMode, isAdminMode } = useSystemState();
// 从verge配置中获取开关状态
const { enable_system_proxy, enable_tun_mode } = verge ?? {};
// 是否以sidecar模式运行
const isSidecarMode = runningMode === "Sidecar";
// 判断Tun模式是否可用 - 当处于服务模式或管理员模式时可用
const isTunAvailable = !isSidecarMode || isAdminMode;
// 处理错误
const handleError = (err: Error) => {
@@ -171,7 +171,7 @@ export const ProxyTunCard: FC = () => {
};
} else {
return {
text: isSidecarMode
text: !isTunAvailable
? t("TUN Mode Service Required")
: enable_tun_mode
? t("TUN Mode Enabled")
@@ -179,7 +179,7 @@ export const ProxyTunCard: FC = () => {
tooltip: t("TUN Mode Intercept Info"),
};
}
}, [activeTab, enable_system_proxy, enable_tun_mode, isSidecarMode, t]);
}, [activeTab, enable_system_proxy, enable_tun_mode, isTunAvailable, t]);
return (
<Box sx={{ display: "flex", flexDirection: "column", width: "100%" }}>
@@ -206,7 +206,7 @@ export const ProxyTunCard: FC = () => {
onClick={() => handleTabChange("tun")}
icon={TroubleshootRounded}
label={t("Tun Mode")}
hasIndicator={enable_tun_mode && !isSidecarMode}
hasIndicator={enable_tun_mode && isTunAvailable}
/>
</Stack>
@@ -1,21 +1,30 @@
import { useTranslation } from "react-i18next";
import { Typography, Stack, Divider, Chip, IconButton } from "@mui/material";
import { InfoOutlined, SettingsOutlined } from "@mui/icons-material";
import { Typography, Stack, Divider, Chip, IconButton, Tooltip } from "@mui/material";
import {
InfoOutlined,
SettingsOutlined,
WarningOutlined,
AdminPanelSettingsOutlined,
DnsOutlined,
ExtensionOutlined
} from "@mui/icons-material";
import { useVerge } from "@/hooks/use-verge";
import { EnhancedCard } from "./enhanced-card";
import useSWR from "swr";
import { getRunningMode, getSystemInfo, installService } from "@/services/cmds";
import { getSystemInfo, installService } from "@/services/cmds";
import { useNavigate } from "react-router-dom";
import { version as appVersion } from "@root/package.json";
import { useCallback, useEffect, useMemo, useState } from "react";
import { check as checkUpdate } from "@tauri-apps/plugin-updater";
import { useLockFn } from "ahooks";
import { Notice } from "@/components/base";
import { useSystemState } from "@/hooks/use-system-state";
export const SystemInfoCard = () => {
const { t } = useTranslation();
const { verge, patchVerge } = useVerge();
const navigate = useNavigate();
const { isAdminMode, isSidecarMode, mutateRunningMode } = useSystemState();
// 系统信息状态
const [systemState, setSystemState] = useState({
@@ -23,16 +32,6 @@ export const SystemInfoCard = () => {
lastCheckUpdate: "-",
});
// 获取运行模式
const { data: runningMode = "Sidecar", mutate: mutateRunningMode } = useSWR(
"getRunningMode",
getRunningMode,
{ suspense: false, revalidateOnFocus: false },
);
// 是否以sidecar模式运行
const isSidecarMode = runningMode === "Sidecar";
// 初始化系统信息
useEffect(() => {
// 获取系统信息
@@ -107,13 +106,13 @@ export const SystemInfoCard = () => {
// 切换自启动状态
const toggleAutoLaunch = useCallback(async () => {
if (!verge) return;
if (!verge || isAdminMode) return;
try {
await patchVerge({ enable_auto_launch: !verge.enable_auto_launch });
} catch (err) {
console.error("切换开机自启动状态失败:", err);
}
}, [verge, patchVerge]);
}, [verge, patchVerge, isAdminMode]);
// 安装系统服务
const onInstallService = useLockFn(async () => {
@@ -121,18 +120,20 @@ export const SystemInfoCard = () => {
Notice.info(t("Installing Service..."), 1000);
await installService();
Notice.success(t("Service Installed Successfully"), 2000);
await mutateRunningMode();
await mutateRunningMode();
} catch (err: any) {
Notice.error(err.message || err.toString(), 3000);
}
});
// 点击运行模式处理
// 点击运行模式处理,Sidecar或纯管理员模式允许安装服务
const handleRunningModeClick = useCallback(() => {
if (isSidecarMode) {
if (isSidecarMode || (isAdminMode && isSidecarMode)) {
onInstallService();
}
}, [isSidecarMode, onInstallService]);
}, [isSidecarMode, isAdminMode, onInstallService]);
// 检查更新
const onCheckUpdate = useLockFn(async () => {
@@ -158,15 +159,75 @@ export const SystemInfoCard = () => {
// 运行模式样式
const runningModeStyle = useMemo(
() => ({
cursor: isSidecarMode ? "pointer" : "default",
textDecoration: isSidecarMode ? "underline" : "none",
// Sidecar或纯管理员模式允许安装服务
cursor: (isSidecarMode || (isAdminMode && isSidecarMode)) ? "pointer" : "default",
textDecoration: (isSidecarMode || (isAdminMode && isSidecarMode)) ? "underline" : "none",
display: "flex",
alignItems: "center",
gap: 0.5,
"&:hover": {
opacity: isSidecarMode ? 0.7 : 1,
opacity: (isSidecarMode || (isAdminMode && isSidecarMode)) ? 0.7 : 1,
},
}),
[isSidecarMode],
[isSidecarMode, isAdminMode],
);
// 获取模式图标和文本
const getModeIcon = () => {
if (isAdminMode) {
// 判断是否为组合模式(管理员+服务)
if (!isSidecarMode) {
return (
<>
<AdminPanelSettingsOutlined
sx={{ color: "primary.main", fontSize: 16 }}
titleAccess={t("Administrator Mode")}
/>
<DnsOutlined
sx={{ color: "success.main", fontSize: 16, ml: 0.5 }}
titleAccess={t("Service Mode")}
/>
</>
);
}
return (
<AdminPanelSettingsOutlined
sx={{ color: "primary.main", fontSize: 16 }}
titleAccess={t("Administrator Mode")}
/>
);
} else if (isSidecarMode) {
return (
<ExtensionOutlined
sx={{ color: "info.main", fontSize: 16 }}
titleAccess={t("Sidecar Mode")}
/>
);
} else {
return (
<DnsOutlined
sx={{ color: "success.main", fontSize: 16 }}
titleAccess={t("Service Mode")}
/>
);
}
};
// 获取模式文本
const getModeText = () => {
if (isAdminMode) {
// 判断是否同时处于服务模式
if (!isSidecarMode) {
return t("Administrator + Service Mode");
}
return t("Administrator Mode");
} else if (isSidecarMode) {
return t("Sidecar Mode");
} else {
return t("Service Mode");
}
};
// 只有当verge存在时才渲染内容
if (!verge) return null;
@@ -191,21 +252,29 @@ export const SystemInfoCard = () => {
</Typography>
</Stack>
<Divider />
<Stack direction="row" justifyContent="space-between">
<Stack direction="row" justifyContent="space-between" alignItems="center">
<Typography variant="body2" color="text.secondary">
{t("Auto Launch")}
</Typography>
<Chip
size="small"
label={autoLaunchEnabled ? t("Enabled") : t("Disabled")}
color={autoLaunchEnabled ? "success" : "default"}
variant={autoLaunchEnabled ? "filled" : "outlined"}
onClick={toggleAutoLaunch}
sx={{ cursor: "pointer" }}
/>
<Stack direction="row" spacing={1} alignItems="center">
{isAdminMode && (
<Tooltip title={t("Administrator mode does not support auto launch")}>
<WarningOutlined sx={{ color: "warning.main", fontSize: 20 }} />
</Tooltip>
)}
<Chip
size="small"
label={autoLaunchEnabled ? t("Enabled") : t("Disabled")}
color={autoLaunchEnabled ? "success" : "default"}
variant={autoLaunchEnabled ? "filled" : "outlined"}
onClick={toggleAutoLaunch}
disabled={isAdminMode}
sx={{ cursor: isAdminMode ? "not-allowed" : "pointer" }}
/>
</Stack>
</Stack>
<Divider />
<Stack direction="row" justifyContent="space-between">
<Stack direction="row" justifyContent="space-between" alignItems="center">
<Typography variant="body2" color="text.secondary">
{t("Running Mode")}
</Typography>
@@ -215,7 +284,8 @@ export const SystemInfoCard = () => {
onClick={handleRunningModeClick}
sx={runningModeStyle}
>
{isSidecarMode ? t("Sidecar Mode") : t("Service Mode")}
{getModeIcon()}
{getModeText()}
</Typography>
</Stack>
<Divider />
@@ -5,6 +5,8 @@ import {
ListItemButton,
Typography,
styled,
Chip,
Tooltip,
} from "@mui/material";
import {
ExpandLessRounded,
@@ -21,6 +23,7 @@ import { useThemeMode } from "@/services/states";
import { useEffect, useMemo, useState } from "react";
import { convertFileSrc } from "@tauri-apps/api/core";
import { downloadIconCache } from "@/services/cmds";
import { useTranslation } from "react-i18next";
interface RenderProps {
item: IRenderItem;
@@ -32,6 +35,7 @@ interface RenderProps {
}
export const ProxyRender = (props: RenderProps) => {
const { t } = useTranslation();
const { indent, item, onLocation, onCheckAll, onHeadState, onChangeProxy } =
props;
const { type, group, headState, proxy, proxyCol } = item;
@@ -123,7 +127,20 @@ export const ProxyRender = (props: RenderProps) => {
},
}}
/>
{headState?.open ? <ExpandLessRounded /> : <ExpandMoreRounded />}
<Box sx={{ display: "flex", alignItems: "center" }}>
<Tooltip title={t("Proxy Count")} arrow>
<Chip
size="small"
label={`${group.all.length}`}
sx={{
mr: 1,
backgroundColor: (theme) => alpha(theme.palette.primary.main, 0.1),
color: (theme) => theme.palette.primary.main,
}}
/>
</Tooltip>
{headState?.open ? <ExpandLessRounded /> : <ExpandMoreRounded />}
</Box>
</ListItemButton>
);
}
@@ -18,12 +18,12 @@ import { TooltipIcon } from "@/components/base/base-tooltip-icon";
import {
getSystemProxy,
getAutotemProxy,
getRunningMode,
installService,
getAutoLaunchStatus,
} from "@/services/cmds";
import { useLockFn } from "ahooks";
import { Box, Button, Tooltip } from "@mui/material";
import { Button, Tooltip } from "@mui/material";
import { useSystemState } from "@/hooks/use-system-state";
interface Props {
onError?: (err: Error) => void;
@@ -36,16 +36,17 @@ const SettingSystem = ({ onError }: Props) => {
const { data: sysproxy } = useSWR("getSystemProxy", getSystemProxy);
const { data: autoproxy } = useSWR("getAutotemProxy", getAutotemProxy);
const { data: runningMode, mutate: mutateRunningMode } = useSWR(
"getRunningMode",
getRunningMode,
);
const { data: autoLaunchEnabled } = useSWR(
"getAutoLaunchStatus",
getAutoLaunchStatus,
{ revalidateOnFocus: false }
);
const { isAdminMode, isSidecarMode, mutateRunningMode } = useSystemState();
// 判断Tun模式是否可用 - 当处于服务模式或管理员模式时可用
const isTunAvailable = !isSidecarMode || isAdminMode;
// 当实际自启动状态与配置不同步时更新配置
useEffect(() => {
if (
@@ -58,9 +59,6 @@ const SettingSystem = ({ onError }: Props) => {
}
}, [autoLaunchEnabled]);
// 是否以sidecar模式运行
const isSidecarMode = runningMode === "Sidecar";
const sysproxyRef = useRef<DialogRef>(null);
const tunRef = useRef<DialogRef>(null);
@@ -111,12 +109,12 @@ const SettingSystem = ({ onError }: Props) => {
icon={SettingsRounded}
onClick={() => tunRef.current?.open()}
/>
{isSidecarMode && (
{isSidecarMode && !isAdminMode && (
<Tooltip title={t("TUN requires Service Mode")}>
<WarningRounded sx={{ color: "warning.main", mr: 1 }} />
</Tooltip>
)}
{isSidecarMode && (
{isSidecarMode && !isAdminMode && (
<Tooltip title={t("Install Service")}>
<Button
variant="outlined"
@@ -138,20 +136,20 @@ const SettingSystem = ({ onError }: Props) => {
onCatch={onError}
onFormat={onSwitchFormat}
onChange={(e) => {
// 当在sidecar模式下禁用切换
if (isSidecarMode) return;
// 当在sidecar模式下且非管理员模式时禁用切换
if (isSidecarMode && !isAdminMode) return;
onChangeData({ enable_tun_mode: e });
}}
onGuard={(e) => {
// 当在sidecar模式下禁用切换
if (isSidecarMode) {
// 当在sidecar模式下且非管理员模式时禁用切换
if (isSidecarMode && !isAdminMode) {
Notice.error(t("TUN requires Service Mode"), 2000);
return Promise.reject(new Error(t("TUN requires Service Mode")));
}
return patchVerge({ enable_tun_mode: e });
}}
>
<Switch edge="end" disabled={isSidecarMode} />
<Switch edge="end" disabled={isSidecarMode && !isAdminMode} />
</GuardState>
</SettingItem>
<SettingItem
@@ -192,14 +190,32 @@ const SettingSystem = ({ onError }: Props) => {
</GuardState>
</SettingItem>
<SettingItem label={t("Auto Launch")}>
<SettingItem
label={t("Auto Launch")}
extra={
isAdminMode && (
<Tooltip title={t("Administrator mode does not support auto launch")}>
<WarningRounded sx={{ color: "warning.main", mr: 1 }} />
</Tooltip>
)
}
>
<GuardState
value={enable_auto_launch ?? false}
valueProps="checked"
onCatch={onError}
onFormat={onSwitchFormat}
onChange={(e) => onChangeData({ enable_auto_launch: e })}
onChange={(e) => {
// 在管理员模式下禁用更改
if (isAdminMode) return;
onChangeData({ enable_auto_launch: e });
}}
onGuard={async (e) => {
if (isAdminMode) {
Notice.error(t("Administrator mode does not support auto launch"), 2000);
return Promise.reject(new Error(t("Administrator mode does not support auto launch")));
}
try {
// 在应用更改之前先触发UI更新,让用户立即看到反馈
onChangeData({ enable_auto_launch: e });
@@ -214,7 +230,7 @@ const SettingSystem = ({ onError }: Props) => {
}
}}
>
<Switch edge="end" />
<Switch edge="end" disabled={isAdminMode} />
</GuardState>
</SettingItem>
@@ -0,0 +1,29 @@
import useSWR from "swr";
import { getRunningMode, isAdmin } from "@/services/cmds";
/**
* hook
*
*/
export function useSystemState() {
// 获取运行模式
const { data: runningMode = "Sidecar", mutate: mutateRunningMode } =
useSWR("getRunningMode", getRunningMode, {
suspense: false,
revalidateOnFocus: false
});
// 获取管理员状态
const { data: isAdminMode = false } =
useSWR("isAdmin", isAdmin, {
suspense: false,
revalidateOnFocus: false
});
return {
runningMode,
isAdminMode,
isSidecarMode: runningMode === "Sidecar",
mutateRunningMode
};
}
+4
View File
@@ -27,6 +27,7 @@
"Proxies": "Proxies",
"Proxy Groups": "Proxy Groups",
"Proxy Provider": "Proxy Provider",
"Proxy Count": "Proxy Count",
"Update All": "Update All",
"Update At": "Update At",
"rule": "rule",
@@ -254,6 +255,7 @@
"PAC Script Content": "PAC Script Content",
"PAC URL": "PAC URL: ",
"Auto Launch": "Auto Launch",
"Administrator mode does not support auto launch": "Administrator mode does not support auto launch",
"Silent Start": "Silent Start",
"Silent Start Info": "Start the program in background mode without displaying the panel",
"TG Channel": "Telegram Channel",
@@ -552,6 +554,8 @@
"OS Info": "OS Info",
"Running Mode": "Running Mode",
"Sidecar Mode": "User Mode",
"Administrator Mode": "Administrator Mode",
"Administrator + Service Mode": "Admin + Service Mode",
"Last Check Update": "Last Check Update",
"Click to import subscription": "Click to import subscription",
"Update subscription successfully": "Update subscription successfully",
+4
View File
@@ -27,6 +27,7 @@
"Proxies": "代理",
"Proxy Groups": "代理组",
"Proxy Provider": "代理集合",
"Proxy Count": "节点数量",
"Update All": "更新全部",
"Update At": "更新于",
"rule": "规则",
@@ -254,6 +255,7 @@
"PAC Script Content": "PAC 脚本内容",
"PAC URL": "PAC 地址:",
"Auto Launch": "开机自启",
"Administrator mode does not support auto launch": "管理员模式不支持开机自启",
"Silent Start": "静默启动",
"Silent Start Info": "程序启动时以后台模式运行,不显示程序面板",
"TG Channel": "Telegram 频道",
@@ -552,6 +554,8 @@
"OS Info": "操作系统信息",
"Running Mode": "运行模式",
"Sidecar Mode": "用户模式",
"Administrator Mode": "管理员模式",
"Administrator + Service Mode": "管理员 + 服务模式",
"Last Check Update": "最后检查更新",
"Click to import subscription": "点击导入订阅",
"Update subscription successfully": "订阅更新成功",
+9
View File
@@ -346,3 +346,12 @@ export const entry_lightweight_mode = async () => {
export const exit_lightweight_mode = async () => {
return invoke<void>("exit_lightweight_mode");
};
export const isAdmin = async () => {
try {
return await invoke<boolean>("is_admin");
} catch (error) {
console.error("检查管理员权限失败:", error);
return false;
}
};
@@ -1118,7 +1118,6 @@ define KernelPackage/ixgbe
TITLE:=Intel(R) 82598/82599 PCI-Express 10 Gigabit Ethernet support
DEPENDS:=@PCI_SUPPORT +kmod-mdio +kmod-ptp +kmod-hwmon-core +kmod-libphy +!LINUX_5_4:kmod-mdio-devres
KCONFIG:=CONFIG_IXGBE \
CONFIG_IXGBE_VXLAN=n \
CONFIG_IXGBE_HWMON=y \
CONFIG_IXGBE_DCA=n
FILES:=$(LINUX_DIR)/drivers/net/ethernet/intel/ixgbe/ixgbe.ko
@@ -1137,7 +1136,6 @@ define KernelPackage/ixgbevf
TITLE:=Intel(R) 82599 Virtual Function Ethernet support
DEPENDS:=@PCI_SUPPORT +kmod-ixgbe
KCONFIG:=CONFIG_IXGBEVF \
CONFIG_IXGBE_VXLAN=n \
CONFIG_IXGBE_HWMON=y \
CONFIG_IXGBE_DCA=n
FILES:=$(LINUX_DIR)/drivers/net/ethernet/intel/ixgbevf/ixgbevf.ko
@@ -1154,11 +1152,9 @@ $(eval $(call KernelPackage,ixgbevf))
define KernelPackage/i40e
SUBMENU:=$(NETWORK_DEVICES_MENU)
TITLE:=Intel(R) Ethernet Controller XL710 Family support
DEPENDS:=@PCI_SUPPORT +kmod-mdio +kmod-ptp +kmod-hwmon-core +kmod-libphy +LINUX_6_12:kmod-libie
DEPENDS:=@PCI_SUPPORT +kmod-ptp +LINUX_6_12:kmod-libie
KCONFIG:=CONFIG_I40E \
CONFIG_I40E_VXLAN=n \
CONFIG_I40E_HWMON=y \
CONFIG_I40E_DCA=n
CONFIG_I40E_DCB=y
FILES:=$(LINUX_DIR)/drivers/net/ethernet/intel/i40e/i40e.ko
AUTOLOAD:=$(call AutoProbe,i40e)
endef
@@ -9,7 +9,7 @@ LUCI_TITLE:=LuCI support for quickstart
LUCI_DEPENDS:=+quickstart +luci-app-store
LUCI_PKGARCH:=all
PKG_VERSION:=0.8.16-1
PKG_VERSION:=0.8.17-1
# PKG_RELEASE MUST be empty for luci.mk
PKG_RELEASE:=
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,6 +1,7 @@
<%+header%>
<%
local jsonc = require "luci.jsonc"
local uci = require "luci.model.uci".cursor()
local features = { "_lua_force_array_" }
local configs = {}
if luci.sys.call("which ota >/dev/null 2>&1 && ota >/dev/null 2>&1") == 0 then
@@ -17,7 +18,6 @@
end
if luci.sys.call("/etc/init.d/ttyd running >/dev/null 2>&1") == 0 then
features[#features+1] = "ttyd"
local uci = require "luci.model.uci".cursor()
local port = uci:get_first("ttyd", "ttyd", "port") or "7681"
local ssl = uci:get_first("ttyd", "ttyd", "ssl") or "0"
configs["ttyd"] = {
@@ -25,6 +25,11 @@
ssl = ssl == "1"
}
end
if uci:get("quickstart", "main", "disable_update_check") == "1" then
configs["update"] = {
disable = true
}
end
-%>
<script>
(function(){
+2 -2
View File
@@ -10,11 +10,11 @@ include $(TOPDIR)/rules.mk
PKG_ARCH_quickstart:=$(ARCH)
PKG_NAME:=quickstart
PKG_VERSION:=0.9.7
PKG_VERSION:=0.9.8
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-binary-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://github.com/linkease/istore-packages/releases/download/prebuilt/
PKG_HASH:=6c1d0b0fb13ad75ce2dad2d86b4a51ce925aa4c14551e95eecc4c65a08d57dbf
PKG_HASH:=2550903f42f9b939cc2307f096fe16048a714e368496301ea68e9bb0623c8aa4
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-binary-$(PKG_VERSION)
+1 -1
View File
@@ -6,7 +6,7 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=luci-app-passwall
PKG_VERSION:=25.3.9
PKG_VERSION:=25.4.1
PKG_RELEASE:=1
PKG_CONFIG_DEPENDS:= \
@@ -95,6 +95,9 @@ function index()
--[[Backup]]
entry({"admin", "services", appname, "backup"}, call("create_backup")).leaf = true
--[[geoview]]
entry({"admin", "services", appname, "geo_view"}, call("geo_view")).leaf = true
end
local function http_write_json(content)
@@ -561,3 +564,53 @@ function create_backup()
http.write(fs.readfile(tar_file))
fs.remove(tar_file)
end
function geo_view()
local action = luci.http.formvalue("action")
local value = luci.http.formvalue("value")
if not value or value == "" then
http.prepare_content("text/plain")
http.write(i18n.translate("Please enter query content!"))
return
end
local geo_dir = (uci:get(appname, "@global_rules[0]", "v2ray_location_asset") or "/usr/share/v2ray/"):match("^(.*)/")
local geosite_path = geo_dir .. "/geosite.dat"
local geoip_path = geo_dir .. "/geoip.dat"
local geo_type, file_path, cmd
local geo_string = ""
if action == "lookup" then
if api.datatypes.ipaddr(value) or api.datatypes.ip6addr(value) then
geo_type, file_path = "geoip", geoip_path
else
geo_type, file_path = "geosite", geosite_path
end
cmd = string.format("geoview -type %s -action lookup -input '%s' -value '%s'", geo_type, file_path, value)
geo_string = luci.sys.exec(cmd):lower()
if geo_string ~= "" then
local lines = {}
for line in geo_string:gmatch("([^\n]*)\n?") do
if line ~= "" then
table.insert(lines, geo_type .. ":" .. line)
end
end
geo_string = table.concat(lines, "\n")
end
elseif action == "extract" then
local prefix, list = value:match("^(geoip:)(.*)$")
if not prefix then
prefix, list = value:match("^(geosite:)(.*)$")
end
if prefix and list and list ~= "" then
geo_type = prefix:sub(1, -2)
file_path = (geo_type == "geoip") and geoip_path or geosite_path
cmd = string.format("geoview -type %s -action extract -input '%s' -list '%s'", geo_type, file_path, list)
geo_string = luci.sys.exec(cmd)
end
end
http.prepare_content("text/plain")
if geo_string and geo_string ~="" then
http.write(geo_string)
else
http.write(i18n.translate("No results were found!"))
end
end
@@ -2,6 +2,7 @@ local api = require "luci.passwall.api"
local appname = "passwall"
local fs = api.fs
local sys = api.sys
local uci = api.uci
local datatypes = api.datatypes
local path = string.format("/usr/share/%s/rules/", appname)
local gfwlist_path = "/usr/share/passwall/rules/gfwlist"
@@ -313,6 +314,16 @@ end
m:append(Template(appname .. "/rule_list/js"))
local geo_dir = (uci:get(appname, "@global_rules[0]", "v2ray_location_asset") or "/usr/share/v2ray/"):match("^(.*)/")
local geosite_path = geo_dir .. "/geosite.dat"
local geoip_path = geo_dir .. "/geoip.dat"
if api.is_finded("geoview") and fs.access(geosite_path) and fs.access(geoip_path) then
s:tab("geoview", translate("Geo View"))
o = s:taboption("geoview", DummyValue, "_geoview_fieldset")
o.rawhtml = true
o.template = appname .. "/rule_list/geoview"
end
function m.on_before_save(self)
m:set("@global[0]", "flush_set", "1")
end
@@ -0,0 +1,82 @@
<%
local api = require "luci.passwall.api"
-%>
<style>
.faq-title {
color: var(--primary);
font-weight: bolder;
margin-bottom: 0.5rem;
display: inline-block;
}
.faq-item {
margin-bottom: 0.8rem;
line-height:1.2rem;
}
</style>
<div class="cbi-value">
<ul>
<b class="faq-title"><%:Tips:%></b>
<li class="faq-item">1. <span><%:By entering a domain or IP, you can query the Geo rule list they belong to.%></span></li>
<li class="faq-item">2. <span><%:By entering a GeoIP or Geosite, you can extract the domains/IPs they contain.%></span></li>
<li class="faq-item">3. <span><%:Use the GeoIP/Geosite query function to verify if the entered Geo rules are correct.%></span></li>
</ul>
</div>
<div class="cbi-value" id="cbi-passwall-geoview-lookup"><label class="cbi-value-title" for="geoview.lookup"><%:Domain/IP Query%></label>
<div class="cbi-value-field">
<input type="text" class="cbi-textfield" id="geoview.lookup" name="geoview.lookup" />
<input class="btn cbi-button cbi-button-apply" type="button" id="lookup-view_btn"
onclick='do_geoview(this, "lookup", document.getElementById("geoview.lookup").value)'
value="<%:Query%>" />
<br />
<div class="cbi-value-description">
<%:Enter a domain or IP to query the Geo rule list they belong to.%>
</div>
</div>
</div>
<div class="cbi-value" id="cbi-passwall-geoview-extract"><label class="cbi-value-title" for="geoview.extract"><%:GeoIP/Geosite Query%></label>
<div class="cbi-value-field">
<input type="text" class="cbi-textfield" id="geoview.extract" name="geoview.extract" />
<input class="btn cbi-button cbi-button-apply" type="button" id="extract-view_btn"
onclick='do_geoview(this, "extract", document.getElementById("geoview.extract").value)'
value="<%:Query%>" />
<br />
<div class="cbi-value-description">
<%:Enter a GeoIP or Geosite to extract the domains/IPs they contain. Format: geoip:cn or geosite:gfw%>
</div>
</div>
</div>
<div class="cbi-value">
<textarea id="geoview_textarea" class="cbi-input-textarea" style="width: 100%; margin-top: 10px;" rows="25" wrap="off" readonly="readonly"></textarea>
</div>
<script type="text/javascript">
//<![CDATA[
var lookup_btn = document.getElementById("lookup-view_btn");
var extract_btn = document.getElementById("extract-view_btn");
var QueryText = '<%:Query%>';
var QueryingText = '<%:Querying%>';
function do_geoview(btn,action,value) {
value = value.trim();
if (!value) {
alert("<%:Please enter query content!%>");
return;
}
lookup_btn.disabled = true;
extract_btn.disabled = true;
btn.value = QueryingText;
var textarea = document.getElementById('geoview_textarea');
textarea.textContent = "";
fetch('<%= api.url("geo_view") %>?action=' + action + '&value=' + encodeURIComponent(value))
.then(response => response.text())
.then(data => {
textarea.textContent = data;
lookup_btn.disabled = false;
extract_btn.disabled = false;
btn.value = QueryText;
})
}
//]]>
</script>
@@ -1833,3 +1833,42 @@ msgstr "端口跳跃范围"
msgid "Format as 1000:2000 Multiple groups are separated by commas (,)."
msgstr "格式为:1000:2000 多组时用逗号(,)隔开。"
msgid "Geo View"
msgstr "Geo 查询"
msgid "Query"
msgstr "查询"
msgid "Querying"
msgstr "查询中"
msgid "Please enter query content!"
msgstr "请输入查询内容!"
msgid "No results were found!"
msgstr "未找到任何结果!"
msgid "Domain/IP Query"
msgstr "域名/IP 查询"
msgid "GeoIP/Geosite Query"
msgstr "GeoIP/Geosite 查询"
msgid "Enter a domain or IP to query the Geo rule list they belong to."
msgstr "输入域名/IP,查询它们所在的 Geo 规则列表。"
msgid "Enter a GeoIP or Geosite to extract the domains/IPs they contain. Format: geoip:cn or geosite:gfw"
msgstr "输入 GeoIP/Geosite,提取它们所包含的域名/IP。格式:geoip:cn 或 geosite:gfw"
msgid "Tips:"
msgstr "小贴士:"
msgid "By entering a domain or IP, you can query the Geo rule list they belong to."
msgstr "可以通过输入域名/IP,查询它们所在的 Geo 规则列表。"
msgid "By entering a GeoIP or Geosite, you can extract the domains/IPs they contain."
msgstr "可以通过输入 GeoIP/Geosite,提取它们所包含的域名/IP。"
msgid "Use the GeoIP/Geosite query function to verify if the entered Geo rules are correct."
msgstr "利用 GeoIP/Geosite 查询功能,可以验证输入的 Geo 规则是否正确。"
@@ -5,7 +5,7 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=luci-app-passwall2
PKG_VERSION:=25.3.2
PKG_VERSION:=25.4.1
PKG_RELEASE:=1
PKG_CONFIG_DEPENDS:= \
@@ -176,7 +176,7 @@ function get_redir_log()
local name = luci.http.formvalue("name")
local file_path = "/tmp/etc/passwall2/acl/" .. id .. "/" .. name .. ".log"
if nixio.fs.access(file_path) then
local content = luci.sys.exec("cat '" .. file_path .. "'")
local content = luci.sys.exec("tail -n 19999 '" .. file_path .. "'")
content = content:gsub("\n", "<br />")
luci.http.write(content)
else
@@ -188,7 +188,7 @@ function get_socks_log()
local name = luci.http.formvalue("name")
local path = "/tmp/etc/passwall2/SOCKS_" .. name .. ".log"
if nixio.fs.access(path) then
local content = luci.sys.exec("cat ".. path)
local content = luci.sys.exec("tail -n 5000 ".. path)
content = content:gsub("\n", "<br />")
luci.http.write(content)
else
@@ -1,7 +1,7 @@
local api = require "luci.passwall2.api"
local appname = api.appname
local datatypes = api.datatypes
local has_singbox = api.finded_com("singbox")
local has_singbox = api.finded_com("sing-box")
local has_xray = api.finded_com("xray")
m = Map(appname)
@@ -3,7 +3,7 @@ local appname = api.appname
local uci = api.uci
local has_ss = api.is_finded("ss-redir")
local has_ss_rust = api.is_finded("sslocal")
local has_singbox = api.finded_com("singbox")
local has_singbox = api.finded_com("sing-box")
local has_xray = api.finded_com("xray")
local has_hysteria2 = api.finded_com("hysteria")
local ss_type = {}
@@ -11,7 +11,7 @@ end
local has_ss = api.is_finded("ss-redir")
local has_ss_rust = api.is_finded("sslocal")
local has_singbox = api.finded_com("singbox")
local has_singbox = api.finded_com("sing-box")
local has_xray = api.finded_com("xray")
local has_hysteria2 = api.finded_com("hysteria")
local ss_type = {}
@@ -1,7 +1,7 @@
local api = require "luci.passwall2.api"
local appname = api.appname
local fs = api.fs
local has_singbox = api.finded_com("singbox")
local has_singbox = api.finded_com("sing-box")
local has_xray = api.finded_com("xray")
local has_fw3 = api.is_finded("fw3")
local has_fw4 = api.is_finded("fw4")
@@ -8,7 +8,7 @@ if not arg[1] or not m:get(arg[1]) then
luci.http.redirect(api.url())
end
local has_singbox = api.finded_com("singbox")
local has_singbox = api.finded_com("sing-box")
local has_xray = api.finded_com("xray")
local nodes_table = {}
@@ -95,7 +95,7 @@ m.uci:foreach(appname, "socks", function(s)
end)
-- 负载均衡列表
local o = s:option(DynamicList, _n("balancing_node"), translate("Load balancing node list"), translate("Load balancing node list, <a target='_blank' href='https://toutyrater.github.io/routing/balance2.html'>document</a>"))
local o = s:option(DynamicList, _n("balancing_node"), translate("Load balancing node list"), translate("Load balancing node list, <a target='_blank' href='https://xtls.github.io/config/routing.html#balancerobject'>document</a>"))
o:depends({ [_n("protocol")] = "_balancing" })
for k, v in pairs(nodes_table) do o:value(v.id, v.remark) end
@@ -104,7 +104,8 @@ o:depends({ [_n("protocol")] = "_balancing" })
o:value("random")
o:value("roundRobin")
o:value("leastPing")
o.default = "leastPing"
o:value("leastLoad")
o.default = "leastLoad"
-- Fallback Node
if api.compare_versions(xray_version, ">=", "1.8.10") then
@@ -133,6 +134,7 @@ end
-- 探测地址
local ucpu = s:option(Flag, _n("useCustomProbeUrl"), translate("Use Custome Probe URL"), translate("By default the built-in probe URL will be used, enable this option to use a custom probe URL."))
ucpu:depends({ [_n("balancingStrategy")] = "leastPing" })
ucpu:depends({ [_n("balancingStrategy")] = "leastLoad" })
local pu = s:option(Value, _n("probeUrl"), translate("Probe URL"))
pu:depends({ [_n("useCustomProbeUrl")] = true })
@@ -148,8 +150,9 @@ pu.description = translate("The URL used to detect the connection status.")
-- 探测间隔
local pi = s:option(Value, _n("probeInterval"), translate("Probe Interval"))
pi:depends({ [_n("balancingStrategy")] = "leastPing" })
pi:depends({ [_n("balancingStrategy")] = "leastLoad" })
pi.default = "1m"
pi.description = translate("The interval between initiating probes. Every time this time elapses, a server status check is performed on a server. The time format is numbers + units, such as '10s', '2h45m', and the supported time units are <code>ns</code>, <code>us</code>, <code>ms</code>, <code>s</code>, <code>m</code>, <code>h</code>, which correspond to nanoseconds, microseconds, milliseconds, seconds, minutes, and hours, respectively.")
pi.description = translate("The interval between initiating probes. The time format is numbers + units, such as '10s', '2h45m', and the supported time units are <code>ns</code>, <code>us</code>, <code>ms</code>, <code>s</code>, <code>m</code>, <code>h</code>, which correspond to nanoseconds, microseconds, milliseconds, seconds, minutes, and hours, respectively.")
if api.compare_versions(xray_version, ">=", "1.8.12") then
ucpu:depends({ [_n("protocol")] = "_balancing" })
@@ -159,6 +162,12 @@ else
pi:depends({ [_n("balancingStrategy")] = "leastPing" })
end
o = s:option(Value, _n("expected"), translate("Preferred Node Count"))
o:depends({ [_n("balancingStrategy")] = "leastLoad" })
o.datatype = "uinteger"
o.default = "2"
o.description = translate("The load balancer selects the optimal number of nodes, and traffic is randomly distributed among them.")
-- [[ 分流模块 ]]
if #nodes_table > 0 then
@@ -2,7 +2,7 @@ local m, s = ...
local api = require "luci.passwall2.api"
local singbox_bin = api.finded_com("singbox")
local singbox_bin = api.finded_com("sing-box")
if not singbox_bin then
return
@@ -401,6 +401,10 @@ if singbox_tags:find("with_quic") then
end
if singbox_tags:find("with_quic") then
o = s:option(Value, _n("hysteria2_ports"), translate("Port hopping range"))
o.description = translate("Format as 1000:2000 Multiple groups are separated by commas (,).")
o:depends({ [_n("protocol")] = "hysteria2" })
o = s:option(Value, _n("hysteria2_up_mbps"), translate("Max upload Mbps"))
o:depends({ [_n("protocol")] = "hysteria2" })
@@ -2,7 +2,7 @@ local m, s = ...
local api = require "luci.passwall2.api"
local singbox_bin = api.finded_com("singbox")
local singbox_bin = api.finded_com("sing-box")
if not singbox_bin then
return
@@ -23,7 +23,7 @@ _M.hysteria = {
}
}
_M.singbox = {
_M["sing-box"] = {
name = "Sing-Box",
repo = "SagerNet/sing-box",
get_url = gh_release_url,
@@ -149,7 +149,7 @@ local function start()
bin = ln_run(api.get_app_path("xray"), "xray", "run -c " .. config_file, log_path)
elseif type == "sing-box" then
config = require(require_dir .. "util_sing-box").gen_config_server(user)
bin = ln_run(api.get_app_path("singbox"), "sing-box", "run -c " .. config_file, log_path)
bin = ln_run(api.get_app_path("sing-box"), "sing-box", "run -c " .. config_file, log_path)
elseif type == "Hysteria2" then
config = require(require_dir .. "util_hysteria2").gen_config_server(user)
bin = ln_run(api.get_app_path("hysteria"), "hysteria", "-c " .. config_file .. " server", log_path)
@@ -8,7 +8,7 @@ local fs = api.fs
local CACHE_PATH = api.CACHE_PATH
local split = api.split
local local_version = api.get_app_version("singbox")
local local_version = api.get_app_version("sing-box")
local version_ge_1_11_0 = api.compare_versions(local_version:match("[^v]+"), ">=", "1.11.0")
local new_port
@@ -349,7 +349,17 @@ function gen_outbound(flag, node, tag, proxy_table)
end
if node.protocol == "hysteria2" then
local server_ports = {}
if node.hysteria2_ports then
for range in node.hysteria2_ports:gmatch("([^,]+)") do
if range:match("^%d+:%d+$") then
table.insert(server_ports, range)
end
end
end
protocol_table = {
server_ports = next(server_ports) and server_ports or nil,
hop_interval = next(server_ports) and "30s" or nil,
up_mbps = (node.hysteria2_up_mbps and tonumber(node.hysteria2_up_mbps)) and tonumber(node.hysteria2_up_mbps) or nil,
down_mbps = (node.hysteria2_down_mbps and tonumber(node.hysteria2_down_mbps)) and tonumber(node.hysteria2_down_mbps) or nil,
obfs = {
@@ -584,7 +584,8 @@ function gen_config(var)
local inbounds = {}
local outbounds = {}
local routing = nil
local observatory = nil
local burstObservatory = nil
local strategy = nil
local COMMON = {}
local CACHE_TEXT_FILE = CACHE_PATH .. "/cache_" .. flag .. ".txt"
@@ -758,19 +759,33 @@ function gen_config(var)
end
end
end
if _node.balancingStrategy == "leastLoad" then
strategy = {
type = _node.balancingStrategy,
settings = {
expected = _node.expected and tonumber(_node.expected) and tonumber(_node.expected) or 2,
maxRTT = "1s"
}
}
else
strategy = { type = _node.balancingStrategy or "random" }
end
table.insert(balancers, {
tag = balancer_tag,
selector = valid_nodes,
fallbackTag = fallback_node_tag,
strategy = { type = _node.balancingStrategy or "random" }
strategy = strategy
})
if _node.balancingStrategy == "leastPing" or fallback_node_tag then
if not observatory then
observatory = {
if _node.balancingStrategy == "leastPing" or _node.balancingStrategy == "leastLoad" or fallback_node_tag then
if not burstObservatory then
burstObservatory = {
subjectSelector = { "blc-" },
probeUrl = _node.useCustomProbeUrl and _node.probeUrl or nil,
probeInterval = _node.probeInterval or "1m",
enableConcurrency = true
pingConfig = {
destination = _node.useCustomProbeUrl and _node.probeUrl or nil,
interval = _node.probeInterval or "1m",
sampling = 3,
timeout = "5s"
}
}
end
end
@@ -1159,7 +1174,7 @@ function gen_config(var)
end
dns = {
tag = "dns-in1",
tag = "dns-global",
hosts = {},
disableCache = (dns_cache and dns_cache == "0") and true or false,
disableFallback = true,
@@ -1195,7 +1210,7 @@ function gen_config(var)
local _remote_dns_ip = nil
local _remote_dns = {
_flag = "remote",
tag = "dns-in-remote",
queryStrategy = (remote_dns_query_strategy and remote_dns_query_strategy ~= "") and remote_dns_query_strategy or "UseIPv4"
}
@@ -1256,7 +1271,7 @@ function gen_config(var)
table.insert(fakedns, fakedns6)
end
_remote_fakedns = {
_flag = "remote_fakedns",
tag = "dns-in-remote_fakedns",
address = "fakedns",
}
table.insert(dns.servers, _remote_fakedns)
@@ -1277,7 +1292,7 @@ function gen_config(var)
end
_direct_dns = {
_flag = "direct",
tag = "dns-in-direct",
address = direct_dns_udp_server,
port = tonumber(direct_dns_udp_port) or 53,
queryStrategy = (direct_dns_query_strategy and direct_dns_query_strategy ~= "") and direct_dns_query_strategy or "UseIP",
@@ -1342,21 +1357,21 @@ function gen_config(var)
})
end
local default_dns_flag = "remote"
local default_dns_tag = "dns-in-remote"
if (not COMMON.default_balancer_tag and not COMMON.default_outbound_tag) or COMMON.default_outbound_tag == "direct" then
default_dns_flag = "direct"
default_dns_tag = "dns-in-direct"
end
if dns.servers and #dns.servers > 0 then
local dns_servers = nil
for index, value in ipairs(dns.servers) do
if not dns_servers and value["_flag"] == default_dns_flag then
if value["_flag"] == "remote" and remote_dns_fake then
value["_flag"] = "default"
if not dns_servers and value.tag == default_dns_tag then
if value.tag == "dns-in-remote" and remote_dns_fake then
value.tag = "dns-in-default"
break
end
dns_servers = {
_flag = "default",
tag = "dns-in-default",
address = value.address,
port = value.port,
queryStrategy = value.queryStrategy
@@ -1384,11 +1399,18 @@ function gen_config(var)
end
dns_server.domains = value.domain
if value.shunt_rule_name then
dns_server["_flag"] = value.shunt_rule_name
dns_server.tag = "dns-in-" .. value.shunt_rule_name
end
if dns_server then
table.insert(dns.servers, dns_server)
table.insert(routing.rules, {
inboundTag = {
dns_server.tag
},
outboundTag = value.outboundTag or nil,
balancerTag = value.balancerTag or nil
})
end
end
end
@@ -1396,7 +1418,7 @@ function gen_config(var)
for i = #dns.servers, 1, -1 do
local v = dns.servers[i]
if v["_flag"] ~= "default" then
if v.tag ~= "dns-in-default" then
if not v.domains or #v.domains == 0 then
table.remove(dns.servers, i)
end
@@ -1476,7 +1498,7 @@ function gen_config(var)
-- 传出连接
outbounds = outbounds,
-- 连接观测
observatory = observatory,
burstObservatory = burstObservatory,
-- 路由
routing = routing,
-- 本地策略
@@ -1708,7 +1730,7 @@ function gen_dns_config(var)
}
dns = {
tag = "dns-in1",
tag = "dns-global",
hosts = {},
disableCache = (dns_cache == "1") and false or true,
disableFallback = true,
@@ -1757,7 +1779,7 @@ function gen_dns_config(var)
end
local _remote_dns = {
_flag = "remote"
tag = "dns-in-remote"
}
if remote_dns_udp_server then
@@ -1803,7 +1825,7 @@ function gen_dns_config(var)
})
local _direct_dns = {
_flag = "direct"
tag = "dns-in-direct"
}
if direct_dns_udp_server then
@@ -1887,7 +1909,7 @@ function gen_dns_config(var)
table.insert(routing.rules, {
inboundTag = {
"dns-in1"
"dns-global"
},
outboundTag = dns_out_tag
})
@@ -10,7 +10,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin
-%>
<script src="<%=resource%>/qrcode.min.js"></script>
<script type="text/javascript">//<![CDATA[
let has_singbox = "<%=api.finded_com("singbox")%>"
let has_singbox = "<%=api.finded_com("sing-box")%>"
let has_xray = "<%=api.finded_com("xray")%>"
let has_hysteria2 = "<%=api.finded_com("hysteria")%>"
let ss_type = "<%=ss_type%>"
@@ -313,82 +313,84 @@ table td, .table .td {
}
/* 自动Ping */
if (auto_detection_time == "icmp" || auto_detection_time == "tcping") {
var nodes = [];
const ping_value = document.getElementsByClassName(auto_detection_time == "tcping" ? 'tcping_value' : 'ping_value');
for (var i = 0; i < ping_value.length; i++) {
var cbi_id = ping_value[i].getAttribute("cbiid");
var full = get_address_full(cbi_id);
if (full != null) {
var flag = false;
//当有多个相同地址和端口时合在一起
for (var j = 0; j < nodes.length; j++) {
if (nodes[j].address == full.address && nodes[j].port == full.port) {
nodes[j].indexs = nodes[j].indexs + "," + i;
flag = true;
break;
function pingAllNodes() {
if (auto_detection_time == "icmp" || auto_detection_time == "tcping") {
var nodes = [];
const ping_value = document.getElementsByClassName(auto_detection_time == "tcping" ? 'tcping_value' : 'ping_value');
for (var i = 0; i < ping_value.length; i++) {
var cbi_id = ping_value[i].getAttribute("cbiid");
var full = get_address_full(cbi_id);
if (full != null) {
var flag = false;
//当有多个相同地址和端口时合在一起
for (var j = 0; j < nodes.length; j++) {
if (nodes[j].address == full.address && nodes[j].port == full.port) {
nodes[j].indexs = nodes[j].indexs + "," + i;
flag = true;
break;
}
}
if (flag)
continue;
nodes.push({
indexs: i + "",
address: full.address,
port: full.port
});
}
if (flag)
continue;
nodes.push({
indexs: i + "",
address: full.address,
port: full.port
});
}
}
const _xhr = (index) => {
return new Promise((res) => {
const dom = nodes[index];
if (!dom) res()
ajax.post('<%=api.url("ping_node")%>', {
index: dom.indexs,
address: dom.address,
port: dom.port,
type: auto_detection_time
},
function(x, result) {
if (x && x.status == 200) {
var strs = dom.indexs.split(",");
for (var i = 0; i < strs.length; i++) {
if (result.ping == null || result.ping.trim() == "") {
ping_value[strs[i]].innerHTML = "<font style='color:red'><%:Timeout%></font>";
} else {
var ping = parseInt(result.ping);
if (ping < 100)
ping_value[strs[i]].innerHTML = "<font style='color:green'>" + result.ping + " ms" + "</font>";
else if (ping < 200)
ping_value[strs[i]].innerHTML = "<font style='color:#fb9a05'>" + result.ping + " ms" + "</font>";
else if (ping >= 200)
ping_value[strs[i]].innerHTML = "<font style='color:red'>" + result.ping + " ms" + "</font>";
const _xhr = (index) => {
return new Promise((res) => {
const dom = nodes[index];
if (!dom) res()
ajax.post('<%=api.url("ping_node")%>', {
index: dom.indexs,
address: dom.address,
port: dom.port,
type: auto_detection_time
},
function(x, result) {
if (x && x.status == 200) {
var strs = dom.indexs.split(",");
for (var i = 0; i < strs.length; i++) {
if (result.ping == null || result.ping.trim() == "") {
ping_value[strs[i]].innerHTML = "<font style='color:red'><%:Timeout%></font>";
} else {
var ping = parseInt(result.ping);
if (ping < 100)
ping_value[strs[i]].innerHTML = "<font style='color:green'>" + result.ping + " ms" + "</font>";
else if (ping < 200)
ping_value[strs[i]].innerHTML = "<font style='color:#fb9a05'>" + result.ping + " ms" + "</font>";
else if (ping >= 200)
ping_value[strs[i]].innerHTML = "<font style='color:red'>" + result.ping + " ms" + "</font>";
}
}
}
res();
},
5000,
function(x) {
var strs = dom.indexs.split(",");
for (var i = 0; i < strs.length; i++) {
ping_value[strs[i]].innerHTML = "<font style='color:red'><%:Timeout%></font>";
}
res();
}
res();
},
5000,
function(x) {
var strs = dom.indexs.split(",");
for (var i = 0; i < strs.length; i++) {
ping_value[strs[i]].innerHTML = "<font style='color:red'><%:Timeout%></font>";
}
res();
}
);
})
}
let task = -1;
const thread = () => {
task = task + 1
if (nodes[task]) {
_xhr(task).then(thread);
);
})
}
let task = -1;
const thread = () => {
task = task + 1
if (nodes[task]) {
_xhr(task).then(thread);
}
}
for (let i = 0; i < 20; i++) {
thread()
}
}
for (let i = 0; i < 20; i++) {
thread()
}
}
@@ -445,6 +447,14 @@ table td, .table .td {
}
document.getElementById("div_node_count").innerHTML = "<div style='margin-top:5px'>" + str + "</div>";
}
//UI渲染完成后再自动Ping
window.onload = function () {
setTimeout(function () {
pingAllNodes();
}, 800);
};
//]]>
</script>
@@ -355,8 +355,14 @@ msgstr "用于检测连接状态的网址。"
msgid "Probe Interval"
msgstr "探测间隔"
msgid "The interval between initiating probes. Every time this time elapses, a server status check is performed on a server. The time format is numbers + units, such as '10s', '2h45m', and the supported time units are <code>ns</code>, <code>us</code>, <code>ms</code>, <code>s</code>, <code>m</code>, <code>h</code>, which correspond to nanoseconds, microseconds, milliseconds, seconds, minutes, and hours, respectively."
msgstr "发起探测的间隔。每经过这个时间,就会对一个服务器进行服务器状态检测。时间格式为数字+单位,比如<code>&quot;10s&quot;</code>, <code>&quot;2h45m&quot;</code>,支持的时间单位有 <code>ns</code><code>us</code><code>ms</code><code>s</code><code>m</code><code>h</code>,分别对应纳秒、微秒、毫秒、秒、分、时。"
msgid "The interval between initiating probes. The time format is numbers + units, such as '10s', '2h45m', and the supported time units are <code>ns</code>, <code>us</code>, <code>ms</code>, <code>s</code>, <code>m</code>, <code>h</code>, which correspond to nanoseconds, microseconds, milliseconds, seconds, minutes, and hours, respectively."
msgstr "发起探测的间隔。时间格式为数字+单位,比如<code>&quot;10s&quot;</code>, <code>&quot;2h45m&quot;</code>,支持的时间单位有 <code>ns</code><code>us</code><code>ms</code><code>s</code><code>m</code><code>h</code>,分别对应纳秒、微秒、毫秒、秒、分、时。"
msgid "Preferred Node Count"
msgstr "优选节点数量"
msgid "The load balancer selects the optimal number of nodes, and traffic is randomly distributed among them."
msgstr "负载均衡器选出最优节点的个数,流量将在这几个节点中随机分配。"
msgid "Shunt"
msgstr "分流"
@@ -406,8 +412,8 @@ msgstr "IPOnDemand:当匹配时碰到任何基于 IP 的规则,将域名立
msgid "Load balancing node list"
msgstr "负载均衡节点列表"
msgid "Load balancing node list, <a target='_blank' href='https://toutyrater.github.io/routing/balance2.html'>document</a>"
msgstr "负载均衡节点列表,<a target='_blank' href='https://toutyrater.github.io/routing/balance2.html'>文档原理</a>"
msgid "Load balancing node list, <a target='_blank' href='https://xtls.github.io/config/routing.html#balancerobject'>document</a>"
msgstr "负载均衡节点列表,<a target='_blank' href='https://xtls.github.io/config/routing.html#balancerobject'>文档原理</a>"
msgid "From Share URL"
msgstr "导入分享URL"
@@ -1677,3 +1683,9 @@ msgstr "中断现有连接"
msgid "Interrupt existing connections when the selected outbound has changed."
msgstr "当选择的出站发生变化时中断现有连接。"
msgid "Port hopping range"
msgstr "端口跳跃范围"
msgid "Format as 1000:2000 Multiple groups are separated by commas (,)."
msgstr "格式为:1000:2000 多组时用逗号(,)隔开。"
@@ -8,7 +8,7 @@ server_port=$4
probe_file="/tmp/etc/passwall2/haproxy/Probe_URL"
probeUrl="https://www.google.com/generate_204"
if [ -f "$probe_file" ]; then
firstLine=$(head -n 1 "$probe_file" | tr -d ' \t')
firstLine=$(head -n 1 "$probe_file" | tr -d ' \t\n')
[ -n "$firstLine" ] && probeUrl="$firstLine"
fi
@@ -23,7 +23,7 @@ uci:revert(appname)
local has_ss = api.is_finded("ss-redir")
local has_ss_rust = api.is_finded("sslocal")
local has_singbox = api.finded_com("singbox")
local has_singbox = api.finded_com("sing-box")
local has_xray = api.finded_com("xray")
local has_hysteria2 = api.finded_com("hysteria")
local allowInsecure_default = true
@@ -182,6 +182,11 @@ do
if true then
local i = 0
local option = "lbss"
local function is_ip_port(str)
if type(str) ~= "string" then return false end
local ip, port = str:match("^([%d%.]+):(%d+)$")
return ip and datatypes.ipaddr(ip) and tonumber(port) and tonumber(port) <= 65535
end
uci:foreach(appname, "haproxy_config", function(t)
i = i + 1
local node_id = t[option]
@@ -191,11 +196,17 @@ do
remarks = "HAProxy负载均衡节点列表[" .. i .. "]",
currentNode = node_id and uci:get_all(appname, node_id) or nil,
set = function(o, server)
uci:set(appname, t[".name"], option, server)
o.newNodeId = server
-- 如果当前 lbss 值不是 ip:port 格式,才进行修改
if not is_ip_port(t[option]) then
uci:set(appname, t[".name"], option, server)
o.newNodeId = server
end
end,
delete = function(o)
uci:delete(appname, t[".name"])
-- 如果当前 lbss 值不是 ip:port 格式,才进行删除
if not is_ip_port(t[option]) then
uci:delete(appname, t[".name"])
end
end
}
end)
@@ -1294,19 +1305,21 @@ local function processData(szType, content, add_mode, add_from)
end
local function curl(url, file, ua, mode)
local curl_args = api.clone(api.curl_args)
local curl_args = {
"-skL", "-w %{http_code}", "--retry 3", "--connect-timeout 3"
}
if ua and ua ~= "" and ua ~= "curl" then
table.insert(curl_args, '--user-agent "' .. ua .. '"')
curl_args[#curl_args + 1] = '--user-agent "' .. ua .. '"'
end
local return_code
local return_code, result
if mode == "direct" then
return_code = api.curl_direct(url, file, curl_args)
return_code, result = api.curl_direct(url, file, curl_args)
elseif mode == "proxy" then
return_code = api.curl_proxy(url, file, curl_args)
return_code, result = api.curl_proxy(url, file, curl_args)
else
return_code = api.curl_auto(url, file, curl_args)
return_code, result = api.curl_auto(url, file, curl_args)
end
return return_code
return tonumber(result)
end
local function truncate_nodes(add_from)
@@ -1630,7 +1643,7 @@ local function parse_link(raw, add_mode, add_from, cfgid)
log('成功解析【' .. add_from .. '】节点数量: ' .. #node_list)
else
if add_mode == "2" then
log('获取到的【' .. add_from .. '】订阅内容为空,可能是订阅地址效,或是网络问题,请请检测。')
log('获取到的【' .. add_from .. '】订阅内容为空,可能是订阅地址效,或是网络问题,请诊断!')
end
end
end
@@ -1705,23 +1718,27 @@ local execute = function()
local result = (not access_mode) and "自动" or (access_mode == "direct" and "直连访问" or (access_mode == "proxy" and "通过代理" or "自动"))
log('正在订阅:【' .. remark .. '' .. url .. ' [' .. result .. ']')
local tmp_file = "/tmp/" .. cfgid
local raw = curl(url, tmp_file, ua, access_mode)
if raw == 0 then
local f = io.open(tmp_file, "r")
local stdout = f:read("*all")
f:close()
raw = trim(stdout)
local old_md5 = value.md5 or ""
local new_md5 = luci.sys.exec("[ -f " .. tmp_file .. " ] && md5sum " .. tmp_file .. " | awk '{print $1}' || echo 0"):gsub("\n", "")
os.remove(tmp_file)
if old_md5 == new_md5 then
log('订阅:【' .. remark .. '】没有变化,无需更新。')
else
parse_link(raw, "2", remark, cfgid)
uci:set(appname, cfgid, "md5", new_md5)
end
else
value.http_code = curl(url, tmp_file, ua, access_mode)
if value.http_code ~= 200 then
fail_list[#fail_list + 1] = value
else
if luci.sys.call("[ -f " .. tmp_file .. " ] && sed -i -e '/^[ \t]*$/d' -e '/^[ \t]*\r$/d' " .. tmp_file) == 0 then
local f = io.open(tmp_file, "r")
local stdout = f:read("*all")
f:close()
local raw_data = trim(stdout)
local old_md5 = value.md5 or ""
local new_md5 = luci.sys.exec("md5sum " .. tmp_file .. " 2>/dev/null | awk '{print $1}'"):gsub("\n", "")
os.remove(tmp_file)
if old_md5 == new_md5 then
log('订阅:【' .. remark .. '】没有变化,无需更新。')
else
parse_link(raw_data, "2", remark, cfgid)
uci:set(appname, cfgid, "md5", new_md5)
end
else
fail_list[#fail_list + 1] = value
end
end
allowInsecure_default = true
filter_keyword_mode_default = uci:get(appname, "@global_subscribe[0]", "filter_keyword_mode") or "0"
@@ -1736,7 +1753,7 @@ local execute = function()
if #fail_list > 0 then
for index, value in ipairs(fail_list) do
log(string.format('【%s】订阅失败,可能是订阅地址效,或是网络问题,请诊断!', value.remark))
log(string.format('【%s】订阅失败,可能是订阅地址效,或是网络问题,请诊断![%s]', value.remark, tostring(value.http_code)))
end
end
update_node(0)
+2 -2
View File
@@ -324,9 +324,9 @@ dependencies = [
[[package]]
name = "blake3"
version = "1.7.0"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b17679a8d69b6d7fd9cd9801a536cec9fa5e5970b69f9d4747f70b39b031f5e7"
checksum = "34a796731680be7931955498a16a10b2270c7762963d5d570fdbfe02dcbf314f"
dependencies = [
"arrayref",
"arrayvec",
+1
View File
@@ -12,6 +12,7 @@ release/config/config.json=/etc/sing-box/config.json
release/config/sing-box.service=/usr/lib/systemd/system/sing-box.service
release/config/sing-box@.service=/usr/lib/systemd/system/sing-box@.service
release/config/sing-box.sysusers=/usr/lib/sysusers.d/sing-box.conf
release/config/sing-box.rules=usr/share/polkit-1/rules.d/sing-box.rules
release/config/sing-box-split-dns.xml=/usr/share/dbus-1/system.d/sing-box-split-dns.conf
release/completions/sing-box.bash=/usr/share/bash-completion/completions/sing-box.bash
+2
View File
@@ -58,6 +58,8 @@ nfpms:
dst: /usr/lib/systemd/system/sing-box@.service
- src: release/config/sing-box.sysusers
dst: /usr/lib/sysusers.d/sing-box.conf
- src: release/config/sing-box.rules
dst: /usr/share/polkit-1/rules.d/sing-box.rules
- src: release/config/sing-box-split-dns.xml
dst: /usr/share/dbus-1/system.d/sing-box-split-dns.conf
+2
View File
@@ -140,6 +140,8 @@ nfpms:
dst: /usr/lib/systemd/system/sing-box@.service
- src: release/config/sing-box.sysusers
dst: /usr/lib/sysusers.d/sing-box.conf
- src: release/config/sing-box.rules
dst: /usr/share/polkit-1/rules.d/sing-box.rules
- src: release/config/sing-box-split-dns.xml
dst: /usr/share/dbus-1/system.d/sing-box-split-dns.conf
+26 -5
View File
@@ -7,7 +7,9 @@ import (
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/log"
"github.com/sagernet/sing-box/option"
E "github.com/sagernet/sing/common/exceptions"
"github.com/sagernet/sing/common/logger"
"github.com/sagernet/sing/service"
"github.com/miekg/dns"
)
@@ -31,11 +33,30 @@ type DNSClient interface {
}
type DNSQueryOptions struct {
Transport DNSTransport
Strategy C.DomainStrategy
DisableCache bool
RewriteTTL *uint32
ClientSubnet netip.Prefix
Transport DNSTransport
Strategy C.DomainStrategy
LookupStrategy C.DomainStrategy
DisableCache bool
RewriteTTL *uint32
ClientSubnet netip.Prefix
}
func DNSQueryOptionsFrom(ctx context.Context, options *option.DomainResolveOptions) (*DNSQueryOptions, error) {
if options == nil {
return &DNSQueryOptions{}, nil
}
transportManager := service.FromContext[DNSTransportManager](ctx)
transport, loaded := transportManager.Transport(options.Server)
if !loaded {
return nil, E.New("domain resolver not found: " + options.Server)
}
return &DNSQueryOptions{
Transport: transport,
Strategy: C.DomainStrategy(options.Strategy),
DisableCache: options.DisableCache,
RewriteTTL: options.RewriteTTL,
ClientSubnet: options.ClientSubnet.Build(netip.Prefix{}),
}, nil
}
type RDRCStore interface {
+1 -1
View File
@@ -7,7 +7,7 @@ import (
)
type FakeIPStore interface {
Service
SimpleLifecycle
Contains(address netip.Addr) bool
Create(domain string, isIPv6 bool) (netip.Addr, error)
Lookup(address netip.Addr) (string, bool)
+3 -2
View File
@@ -37,13 +37,14 @@ func NewManager(logger log.ContextLogger, registry adapter.InboundRegistry, endp
func (m *Manager) Start(stage adapter.StartStage) error {
m.access.Lock()
defer m.access.Unlock()
if m.started && m.stage >= stage {
panic("already started")
}
m.started = true
m.stage = stage
for _, inbound := range m.inbounds {
inbounds := m.inbounds
m.access.Unlock()
for _, inbound := range inbounds {
err := adapter.LegacyStart(inbound, stage)
if err != nil {
return E.Cause(err, stage, " inbound/", inbound.Type(), "[", inbound.Tag(), "]")
+5
View File
@@ -2,6 +2,11 @@ package adapter
import E "github.com/sagernet/sing/common/exceptions"
type SimpleLifecycle interface {
Start() error
Close() error
}
type StartStage uint8
const (
+6 -6
View File
@@ -28,14 +28,14 @@ func LegacyStart(starter any, stage StartStage) error {
}
type lifecycleServiceWrapper struct {
Service
SimpleLifecycle
name string
}
func NewLifecycleService(service Service, name string) LifecycleService {
func NewLifecycleService(service SimpleLifecycle, name string) LifecycleService {
return &lifecycleServiceWrapper{
Service: service,
name: name,
SimpleLifecycle: service,
name: name,
}
}
@@ -44,9 +44,9 @@ func (l *lifecycleServiceWrapper) Name() string {
}
func (l *lifecycleServiceWrapper) Start(stage StartStage) error {
return LegacyStart(l.Service, stage)
return LegacyStart(l.SimpleLifecycle, stage)
}
func (l *lifecycleServiceWrapper) Close() error {
return l.Service.Close()
return l.SimpleLifecycle.Close()
}
+1 -1
View File
@@ -11,7 +11,7 @@ type HeadlessRule interface {
type Rule interface {
HeadlessRule
Service
SimpleLifecycle
Type() string
Action() RuleAction
}
+23 -2
View File
@@ -1,6 +1,27 @@
package adapter
import (
"context"
"github.com/sagernet/sing-box/log"
"github.com/sagernet/sing-box/option"
)
type Service interface {
Start() error
Close() error
Lifecycle
Type() string
Tag() string
}
type ServiceRegistry interface {
option.ServiceOptionsRegistry
Create(ctx context.Context, logger log.ContextLogger, tag string, serviceType string, options any) (Service, error)
}
type ServiceManager interface {
Lifecycle
Services() []Service
Get(tag string) (Service, bool)
Remove(tag string) error
Create(ctx context.Context, logger log.ContextLogger, tag string, serviceType string, options any) error
}
+21
View File
@@ -0,0 +1,21 @@
package service
type Adapter struct {
serviceType string
serviceTag string
}
func NewAdapter(serviceType string, serviceTag string) Adapter {
return Adapter{
serviceType: serviceType,
serviceTag: serviceTag,
}
}
func (a *Adapter) Type() string {
return a.serviceType
}
func (a *Adapter) Tag() string {
return a.serviceTag
}
+144
View File
@@ -0,0 +1,144 @@
package service
import (
"context"
"os"
"sync"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/common/taskmonitor"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/log"
"github.com/sagernet/sing/common"
E "github.com/sagernet/sing/common/exceptions"
)
var _ adapter.ServiceManager = (*Manager)(nil)
type Manager struct {
logger log.ContextLogger
registry adapter.ServiceRegistry
access sync.Mutex
started bool
stage adapter.StartStage
services []adapter.Service
serviceByTag map[string]adapter.Service
}
func NewManager(logger log.ContextLogger, registry adapter.ServiceRegistry) *Manager {
return &Manager{
logger: logger,
registry: registry,
serviceByTag: make(map[string]adapter.Service),
}
}
func (m *Manager) Start(stage adapter.StartStage) error {
m.access.Lock()
if m.started && m.stage >= stage {
panic("already started")
}
m.started = true
m.stage = stage
services := m.services
m.access.Unlock()
for _, service := range services {
err := adapter.LegacyStart(service, stage)
if err != nil {
return E.Cause(err, stage, " service/", service.Type(), "[", service.Tag(), "]")
}
}
return nil
}
func (m *Manager) Close() error {
m.access.Lock()
defer m.access.Unlock()
if !m.started {
return nil
}
m.started = false
services := m.services
m.services = nil
monitor := taskmonitor.New(m.logger, C.StopTimeout)
var err error
for _, service := range services {
monitor.Start("close service/", service.Type(), "[", service.Tag(), "]")
err = E.Append(err, service.Close(), func(err error) error {
return E.Cause(err, "close service/", service.Type(), "[", service.Tag(), "]")
})
monitor.Finish()
}
return nil
}
func (m *Manager) Services() []adapter.Service {
m.access.Lock()
defer m.access.Unlock()
return m.services
}
func (m *Manager) Get(tag string) (adapter.Service, bool) {
m.access.Lock()
service, found := m.serviceByTag[tag]
m.access.Unlock()
return service, found
}
func (m *Manager) Remove(tag string) error {
m.access.Lock()
service, found := m.serviceByTag[tag]
if !found {
m.access.Unlock()
return os.ErrInvalid
}
delete(m.serviceByTag, tag)
index := common.Index(m.services, func(it adapter.Service) bool {
return it == service
})
if index == -1 {
panic("invalid service index")
}
m.services = append(m.services[:index], m.services[index+1:]...)
started := m.started
m.access.Unlock()
if started {
return service.Close()
}
return nil
}
func (m *Manager) Create(ctx context.Context, logger log.ContextLogger, tag string, serviceType string, options any) error {
service, err := m.registry.Create(ctx, logger, tag, serviceType, options)
if err != nil {
return err
}
m.access.Lock()
defer m.access.Unlock()
if m.started {
for _, stage := range adapter.ListStartStages {
err = adapter.LegacyStart(service, stage)
if err != nil {
return E.Cause(err, stage, " service/", service.Type(), "[", service.Tag(), "]")
}
}
}
if existsService, loaded := m.serviceByTag[tag]; loaded {
if m.started {
err = existsService.Close()
if err != nil {
return E.Cause(err, "close service/", existsService.Type(), "[", existsService.Tag(), "]")
}
}
existsIndex := common.Index(m.services, func(it adapter.Service) bool {
return it == existsService
})
if existsIndex == -1 {
panic("invalid service index")
}
m.services = append(m.services[:existsIndex], m.services[existsIndex+1:]...)
}
m.services = append(m.services, service)
m.serviceByTag[tag] = service
return nil
}
+72
View File
@@ -0,0 +1,72 @@
package service
import (
"context"
"sync"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/log"
"github.com/sagernet/sing/common"
E "github.com/sagernet/sing/common/exceptions"
)
type ConstructorFunc[T any] func(ctx context.Context, logger log.ContextLogger, tag string, options T) (adapter.Service, error)
func Register[Options any](registry *Registry, outboundType string, constructor ConstructorFunc[Options]) {
registry.register(outboundType, func() any {
return new(Options)
}, func(ctx context.Context, logger log.ContextLogger, tag string, rawOptions any) (adapter.Service, error) {
var options *Options
if rawOptions != nil {
options = rawOptions.(*Options)
}
return constructor(ctx, logger, tag, common.PtrValueOrDefault(options))
})
}
var _ adapter.ServiceRegistry = (*Registry)(nil)
type (
optionsConstructorFunc func() any
constructorFunc func(ctx context.Context, logger log.ContextLogger, tag string, options any) (adapter.Service, error)
)
type Registry struct {
access sync.Mutex
optionsType map[string]optionsConstructorFunc
constructor map[string]constructorFunc
}
func NewRegistry() *Registry {
return &Registry{
optionsType: make(map[string]optionsConstructorFunc),
constructor: make(map[string]constructorFunc),
}
}
func (m *Registry) CreateOptions(outboundType string) (any, bool) {
m.access.Lock()
defer m.access.Unlock()
optionsConstructor, loaded := m.optionsType[outboundType]
if !loaded {
return nil, false
}
return optionsConstructor(), true
}
func (m *Registry) Create(ctx context.Context, logger log.ContextLogger, tag string, outboundType string, options any) (adapter.Service, error) {
m.access.Lock()
defer m.access.Unlock()
constructor, loaded := m.constructor[outboundType]
if !loaded {
return nil, E.New("outbound type not found: " + outboundType)
}
return constructor(ctx, logger, tag, options)
}
func (m *Registry) register(outboundType string, optionsConstructor optionsConstructorFunc, constructor constructorFunc) {
m.access.Lock()
defer m.access.Unlock()
m.optionsType[outboundType] = optionsConstructor
m.constructor[outboundType] = constructor
}
+1 -1
View File
@@ -3,6 +3,6 @@ package adapter
import "time"
type TimeService interface {
Service
SimpleLifecycle
TimeFunc() func() time.Time
}
+77 -46
View File
@@ -12,6 +12,7 @@ import (
"github.com/sagernet/sing-box/adapter/endpoint"
"github.com/sagernet/sing-box/adapter/inbound"
"github.com/sagernet/sing-box/adapter/outbound"
boxService "github.com/sagernet/sing-box/adapter/service"
"github.com/sagernet/sing-box/common/certificate"
"github.com/sagernet/sing-box/common/dialer"
"github.com/sagernet/sing-box/common/taskmonitor"
@@ -34,22 +35,23 @@ import (
"github.com/sagernet/sing/service/pause"
)
var _ adapter.Service = (*Box)(nil)
var _ adapter.SimpleLifecycle = (*Box)(nil)
type Box struct {
createdAt time.Time
logFactory log.Factory
logger log.ContextLogger
network *route.NetworkManager
endpoint *endpoint.Manager
inbound *inbound.Manager
outbound *outbound.Manager
dnsTransport *dns.TransportManager
dnsRouter *dns.Router
connection *route.ConnectionManager
router *route.Router
services []adapter.LifecycleService
done chan struct{}
createdAt time.Time
logFactory log.Factory
logger log.ContextLogger
network *route.NetworkManager
endpoint *endpoint.Manager
inbound *inbound.Manager
outbound *outbound.Manager
service *boxService.Manager
dnsTransport *dns.TransportManager
dnsRouter *dns.Router
connection *route.ConnectionManager
router *route.Router
internalService []adapter.LifecycleService
done chan struct{}
}
type Options struct {
@@ -64,6 +66,7 @@ func Context(
outboundRegistry adapter.OutboundRegistry,
endpointRegistry adapter.EndpointRegistry,
dnsTransportRegistry adapter.DNSTransportRegistry,
serviceRegistry adapter.ServiceRegistry,
) context.Context {
if service.FromContext[option.InboundOptionsRegistry](ctx) == nil ||
service.FromContext[adapter.InboundRegistry](ctx) == nil {
@@ -84,6 +87,10 @@ func Context(
ctx = service.ContextWith[option.DNSTransportOptionsRegistry](ctx, dnsTransportRegistry)
ctx = service.ContextWith[adapter.DNSTransportRegistry](ctx, dnsTransportRegistry)
}
if service.FromContext[adapter.ServiceRegistry](ctx) == nil {
ctx = service.ContextWith[option.ServiceOptionsRegistry](ctx, serviceRegistry)
ctx = service.ContextWith[adapter.ServiceRegistry](ctx, serviceRegistry)
}
return ctx
}
@@ -99,6 +106,7 @@ func New(options Options) (*Box, error) {
inboundRegistry := service.FromContext[adapter.InboundRegistry](ctx)
outboundRegistry := service.FromContext[adapter.OutboundRegistry](ctx)
dnsTransportRegistry := service.FromContext[adapter.DNSTransportRegistry](ctx)
serviceRegistry := service.FromContext[adapter.ServiceRegistry](ctx)
if endpointRegistry == nil {
return nil, E.New("missing endpoint registry in context")
@@ -109,6 +117,12 @@ func New(options Options) (*Box, error) {
if outboundRegistry == nil {
return nil, E.New("missing outbound registry in context")
}
if dnsTransportRegistry == nil {
return nil, E.New("missing DNS transport registry in context")
}
if serviceRegistry == nil {
return nil, E.New("missing service registry in context")
}
ctx = pause.WithDefaultManager(ctx)
experimentalOptions := common.PtrValueOrDefault(options.Experimental)
@@ -142,7 +156,7 @@ func New(options Options) (*Box, error) {
return nil, E.Cause(err, "create log factory")
}
var services []adapter.LifecycleService
var internalServices []adapter.LifecycleService
certificateOptions := common.PtrValueOrDefault(options.Certificate)
if C.IsAndroid || certificateOptions.Store != "" && certificateOptions.Store != C.CertificateStoreSystem ||
len(certificateOptions.Certificate) > 0 ||
@@ -153,7 +167,7 @@ func New(options Options) (*Box, error) {
return nil, err
}
service.MustRegister[adapter.CertificateStore](ctx, certificateStore)
services = append(services, certificateStore)
internalServices = append(internalServices, certificateStore)
}
routeOptions := common.PtrValueOrDefault(options.Route)
@@ -162,10 +176,12 @@ func New(options Options) (*Box, error) {
inboundManager := inbound.NewManager(logFactory.NewLogger("inbound"), inboundRegistry, endpointManager)
outboundManager := outbound.NewManager(logFactory.NewLogger("outbound"), outboundRegistry, endpointManager, routeOptions.Final)
dnsTransportManager := dns.NewTransportManager(logFactory.NewLogger("dns/transport"), dnsTransportRegistry, outboundManager, dnsOptions.Final)
serviceManager := boxService.NewManager(logFactory.NewLogger("service"), serviceRegistry)
service.MustRegister[adapter.EndpointManager](ctx, endpointManager)
service.MustRegister[adapter.InboundManager](ctx, inboundManager)
service.MustRegister[adapter.OutboundManager](ctx, outboundManager)
service.MustRegister[adapter.DNSTransportManager](ctx, dnsTransportManager)
service.MustRegister[adapter.ServiceManager](ctx, serviceManager)
dnsRouter := dns.NewRouter(ctx, logFactory, dnsOptions)
service.MustRegister[adapter.DNSRouter](ctx, dnsRouter)
networkManager, err := route.NewNetworkManager(ctx, logFactory.NewLogger("network"), routeOptions)
@@ -280,6 +296,24 @@ func New(options Options) (*Box, error) {
return nil, E.Cause(err, "initialize outbound[", i, "]")
}
}
for i, serviceOptions := range options.Services {
var tag string
if serviceOptions.Tag != "" {
tag = serviceOptions.Tag
} else {
tag = F.ToString(i)
}
err = serviceManager.Create(
ctx,
logFactory.NewLogger(F.ToString("service/", serviceOptions.Type, "[", tag, "]")),
tag,
serviceOptions.Type,
serviceOptions.Options,
)
if err != nil {
return nil, E.Cause(err, "initialize service[", i, "]")
}
}
outboundManager.Initialize(common.Must1(
direct.NewOutbound(
ctx,
@@ -305,7 +339,7 @@ func New(options Options) (*Box, error) {
if needCacheFile {
cacheFile := cachefile.New(ctx, common.PtrValueOrDefault(experimentalOptions.CacheFile))
service.MustRegister[adapter.CacheFile](ctx, cacheFile)
services = append(services, cacheFile)
internalServices = append(internalServices, cacheFile)
}
if needClashAPI {
clashAPIOptions := common.PtrValueOrDefault(experimentalOptions.ClashAPI)
@@ -316,7 +350,7 @@ func New(options Options) (*Box, error) {
}
router.SetTracker(clashServer)
service.MustRegister[adapter.ClashServer](ctx, clashServer)
services = append(services, clashServer)
internalServices = append(internalServices, clashServer)
}
if needV2RayAPI {
v2rayServer, err := experimental.NewV2RayServer(logFactory.NewLogger("v2ray-api"), common.PtrValueOrDefault(experimentalOptions.V2RayAPI))
@@ -325,7 +359,7 @@ func New(options Options) (*Box, error) {
}
if v2rayServer.StatsService() != nil {
router.SetTracker(v2rayServer.StatsService())
services = append(services, v2rayServer)
internalServices = append(internalServices, v2rayServer)
service.MustRegister[adapter.V2RayServer](ctx, v2rayServer)
}
}
@@ -343,22 +377,23 @@ func New(options Options) (*Box, error) {
WriteToSystem: ntpOptions.WriteToSystem,
})
timeService.TimeService = ntpService
services = append(services, adapter.NewLifecycleService(ntpService, "ntp service"))
internalServices = append(internalServices, adapter.NewLifecycleService(ntpService, "ntp service"))
}
return &Box{
network: networkManager,
endpoint: endpointManager,
inbound: inboundManager,
outbound: outboundManager,
dnsTransport: dnsTransportManager,
dnsRouter: dnsRouter,
connection: connectionManager,
router: router,
createdAt: createdAt,
logFactory: logFactory,
logger: logFactory.Logger(),
services: services,
done: make(chan struct{}),
network: networkManager,
endpoint: endpointManager,
inbound: inboundManager,
outbound: outboundManager,
dnsTransport: dnsTransportManager,
service: serviceManager,
dnsRouter: dnsRouter,
connection: connectionManager,
router: router,
createdAt: createdAt,
logFactory: logFactory,
logger: logFactory.Logger(),
internalService: internalServices,
done: make(chan struct{}),
}, nil
}
@@ -408,11 +443,11 @@ func (s *Box) preStart() error {
if err != nil {
return E.Cause(err, "start logger")
}
err = adapter.StartNamed(adapter.StartStateInitialize, s.services) // cache-file clash-api v2ray-api
err = adapter.StartNamed(adapter.StartStateInitialize, s.internalService) // cache-file clash-api v2ray-api
if err != nil {
return err
}
err = adapter.Start(adapter.StartStateInitialize, s.network, s.dnsTransport, s.dnsRouter, s.connection, s.router, s.outbound, s.inbound, s.endpoint)
err = adapter.Start(adapter.StartStateInitialize, s.network, s.dnsTransport, s.dnsRouter, s.connection, s.router, s.outbound, s.inbound, s.endpoint, s.service)
if err != nil {
return err
}
@@ -428,31 +463,27 @@ func (s *Box) start() error {
if err != nil {
return err
}
err = adapter.StartNamed(adapter.StartStateStart, s.services)
err = adapter.StartNamed(adapter.StartStateStart, s.internalService)
if err != nil {
return err
}
err = s.inbound.Start(adapter.StartStateStart)
err = adapter.Start(adapter.StartStateStart, s.inbound, s.endpoint, s.service)
if err != nil {
return err
}
err = adapter.Start(adapter.StartStateStart, s.endpoint)
err = adapter.Start(adapter.StartStatePostStart, s.outbound, s.network, s.dnsTransport, s.dnsRouter, s.connection, s.router, s.inbound, s.endpoint, s.service)
if err != nil {
return err
}
err = adapter.Start(adapter.StartStatePostStart, s.outbound, s.network, s.dnsTransport, s.dnsRouter, s.connection, s.router, s.inbound, s.endpoint)
err = adapter.StartNamed(adapter.StartStatePostStart, s.internalService)
if err != nil {
return err
}
err = adapter.StartNamed(adapter.StartStatePostStart, s.services)
err = adapter.Start(adapter.StartStateStarted, s.network, s.dnsTransport, s.dnsRouter, s.connection, s.router, s.outbound, s.inbound, s.endpoint, s.service)
if err != nil {
return err
}
err = adapter.Start(adapter.StartStateStarted, s.network, s.dnsTransport, s.dnsRouter, s.connection, s.router, s.outbound, s.inbound, s.endpoint)
if err != nil {
return err
}
err = adapter.StartNamed(adapter.StartStateStarted, s.services)
err = adapter.StartNamed(adapter.StartStateStarted, s.internalService)
if err != nil {
return err
}
@@ -469,7 +500,7 @@ func (s *Box) Close() error {
err := common.Close(
s.inbound, s.outbound, s.endpoint, s.router, s.connection, s.dnsRouter, s.dnsTransport, s.network,
)
for _, lifecycleService := range s.services {
for _, lifecycleService := range s.internalService {
err = E.Append(err, lifecycleService.Close(), func(err error) error {
return E.Cause(err, "close ", lifecycleService.Name())
})
+1 -1
View File
@@ -69,5 +69,5 @@ func preRun(cmd *cobra.Command, args []string) {
configPaths = append(configPaths, "config.json")
}
globalCtx = service.ContextWith(globalCtx, deprecated.NewStderrManager(log.StdLogger()))
globalCtx = box.Context(globalCtx, include.InboundRegistry(), include.OutboundRegistry(), include.EndpointRegistry(), include.DNSTransportRegistry())
globalCtx = box.Context(globalCtx, include.InboundRegistry(), include.OutboundRegistry(), include.EndpointRegistry(), include.DNSTransportRegistry(), include.ServiceRegistry())
}
-4
View File
@@ -11,7 +11,6 @@ import (
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing/common"
"github.com/sagernet/sing/common/buf"
M "github.com/sagernet/sing/common/metadata"
"github.com/sagernet/sing/common/task"
mDNS "github.com/miekg/dns"
@@ -47,9 +46,6 @@ func DomainNameQuery(ctx context.Context, metadata *adapter.InboundContext, pack
if err != nil {
return err
}
if len(msg.Question) == 0 || msg.Question[0].Qclass != mDNS.ClassINET || !M.IsDomainName(msg.Question[0].Name) {
return os.ErrInvalid
}
metadata.Protocol = C.ProtocolDNS
return nil
}
+23
View File
@@ -0,0 +1,23 @@
package sniff_test
import (
"context"
"encoding/hex"
"testing"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/common/sniff"
C "github.com/sagernet/sing-box/constant"
"github.com/stretchr/testify/require"
)
func TestSniffDNS(t *testing.T) {
t.Parallel()
query, err := hex.DecodeString("740701000001000000000000012a06676f6f676c6503636f6d0000010001")
require.NoError(t, err)
var metadata adapter.InboundContext
err = sniff.DomainNameQuery(context.TODO(), &metadata, query)
require.NoError(t, err)
require.Equal(t, C.ProtocolDNS, metadata.Protocol)
}
+1 -1
View File
@@ -37,7 +37,7 @@ func (w *acmeWrapper) Close() error {
return nil
}
func startACME(ctx context.Context, options option.InboundACMEOptions) (*tls.Config, adapter.Service, error) {
func startACME(ctx context.Context, options option.InboundACMEOptions) (*tls.Config, adapter.SimpleLifecycle, error) {
var acmeServer string
switch options.Provider {
case "", "letsencrypt":
+1 -1
View File
@@ -11,6 +11,6 @@ import (
E "github.com/sagernet/sing/common/exceptions"
)
func startACME(ctx context.Context, options option.InboundACMEOptions) (*tls.Config, adapter.Service, error) {
func startACME(ctx context.Context, options option.InboundACMEOptions) (*tls.Config, adapter.SimpleLifecycle, error) {
return nil, nil, E.New(`ACME is not included in this build, rebuild with -tags with_acme`)
}
+2 -2
View File
@@ -21,7 +21,7 @@ var errInsecureUnused = E.New("tls: insecure unused")
type STDServerConfig struct {
config *tls.Config
logger log.Logger
acmeService adapter.Service
acmeService adapter.SimpleLifecycle
certificate []byte
key []byte
certificatePath string
@@ -164,7 +164,7 @@ func NewSTDServer(ctx context.Context, logger log.Logger, options option.Inbound
return nil, nil
}
var tlsConfig *tls.Config
var acmeService adapter.Service
var acmeService adapter.SimpleLifecycle
var err error
if options.ACME != nil && len(options.ACME.Domain) > 0 {
//nolint:staticcheck
-1
View File
@@ -28,7 +28,6 @@ const (
DNSTypeFakeIP = "fakeip"
DNSTypeDHCP = "dhcp"
DNSTypeTailscale = "tailscale"
DNSTypeSplitDNS = "split-dns"
)
const (
+3
View File
@@ -25,6 +25,9 @@ const (
TypeTUIC = "tuic"
TypeHysteria2 = "hysteria2"
TypeTailscale = "tailscale"
TypeDERP = "derp"
TypeDERPSTUN = "derp-stun"
TypeResolved = "resolved"
)
const (
+44 -15
View File
@@ -243,9 +243,15 @@ func (c *Client) Exchange(ctx context.Context, transport adapter.DNSTransport, m
func (c *Client) Lookup(ctx context.Context, transport adapter.DNSTransport, domain string, options adapter.DNSQueryOptions, responseChecker func(responseAddrs []netip.Addr) bool) ([]netip.Addr, error) {
domain = FqdnToDomain(domain)
dnsName := dns.Fqdn(domain)
if options.Strategy == C.DomainStrategyIPv4Only {
var strategy C.DomainStrategy
if options.LookupStrategy != C.DomainStrategyAsIS {
strategy = options.LookupStrategy
} else {
strategy = options.Strategy
}
if strategy == C.DomainStrategyIPv4Only {
return c.lookupToExchange(ctx, transport, dnsName, dns.TypeA, options, responseChecker)
} else if options.Strategy == C.DomainStrategyIPv6Only {
} else if strategy == C.DomainStrategyIPv6Only {
return c.lookupToExchange(ctx, transport, dnsName, dns.TypeAAAA, options, responseChecker)
}
var response4 []netip.Addr
@@ -271,7 +277,7 @@ func (c *Client) Lookup(ctx context.Context, transport adapter.DNSTransport, dom
if len(response4) == 0 && len(response6) == 0 {
return nil, err
}
return sortAddresses(response4, response6, options.Strategy), nil
return sortAddresses(response4, response6, strategy), nil
}
func (c *Client) ClearCache() {
@@ -527,12 +533,26 @@ func transportTagFromContext(ctx context.Context) (string, bool) {
return value, loaded
}
func FixedResponseStatus(message *dns.Msg, rcode int) *dns.Msg {
return &dns.Msg{
MsgHdr: dns.MsgHdr{
Id: message.Id,
Rcode: rcode,
Response: true,
},
Question: message.Question,
}
}
func FixedResponse(id uint16, question dns.Question, addresses []netip.Addr, timeToLive uint32) *dns.Msg {
response := dns.Msg{
MsgHdr: dns.MsgHdr{
Id: id,
Rcode: dns.RcodeSuccess,
Response: true,
Id: id,
Response: true,
Authoritative: true,
RecursionDesired: true,
RecursionAvailable: true,
Rcode: dns.RcodeSuccess,
},
Question: []dns.Question{question},
}
@@ -565,9 +585,12 @@ func FixedResponse(id uint16, question dns.Question, addresses []netip.Addr, tim
func FixedResponseCNAME(id uint16, question dns.Question, record string, timeToLive uint32) *dns.Msg {
response := dns.Msg{
MsgHdr: dns.MsgHdr{
Id: id,
Rcode: dns.RcodeSuccess,
Response: true,
Id: id,
Response: true,
Authoritative: true,
RecursionDesired: true,
RecursionAvailable: true,
Rcode: dns.RcodeSuccess,
},
Question: []dns.Question{question},
Answer: []dns.RR{
@@ -588,9 +611,12 @@ func FixedResponseCNAME(id uint16, question dns.Question, record string, timeToL
func FixedResponseTXT(id uint16, question dns.Question, records []string, timeToLive uint32) *dns.Msg {
response := dns.Msg{
MsgHdr: dns.MsgHdr{
Id: id,
Rcode: dns.RcodeSuccess,
Response: true,
Id: id,
Response: true,
Authoritative: true,
RecursionDesired: true,
RecursionAvailable: true,
Rcode: dns.RcodeSuccess,
},
Question: []dns.Question{question},
Answer: []dns.RR{
@@ -611,9 +637,12 @@ func FixedResponseTXT(id uint16, question dns.Question, records []string, timeTo
func FixedResponseMX(id uint16, question dns.Question, records []*net.MX, timeToLive uint32) *dns.Msg {
response := dns.Msg{
MsgHdr: dns.MsgHdr{
Id: id,
Rcode: dns.RcodeSuccess,
Response: true,
Id: id,
Response: true,
Authoritative: true,
RecursionDesired: true,
RecursionAvailable: true,
Rcode: dns.RcodeSuccess,
},
Question: []dns.Question{question},
}
+5
View File
@@ -285,7 +285,12 @@ func (r *Router) Exchange(ctx context.Context, message *mDNS.Msg, options adapte
} else if errors.Is(err, ErrResponseRejected) {
rejected = true
r.logger.DebugContext(ctx, E.Cause(err, "response rejected for ", FormatQuestion(message.Question[0].String())))
/*} else if responseCheck!= nil && errors.Is(err, RcodeError(mDNS.RcodeNameError)) {
rejected = true
r.logger.DebugContext(ctx, E.Cause(err, "response rejected for ", FormatQuestion(message.Question[0].String())))
*/
} else if len(message.Question) > 0 {
rejected = true
r.logger.ErrorContext(ctx, E.Cause(err, "exchange failed for ", FormatQuestion(message.Question[0].String())))
} else {
r.logger.ErrorContext(ctx, E.Cause(err, "exchange failed for <empty query>"))
-140
View File
@@ -1,140 +0,0 @@
package split
import (
"net/netip"
"strings"
"github.com/sagernet/sing-box/common/dialer"
"github.com/sagernet/sing-box/dns/transport"
"github.com/sagernet/sing-box/option"
"github.com/sagernet/sing/common"
E "github.com/sagernet/sing/common/exceptions"
M "github.com/sagernet/sing/common/metadata"
"github.com/godbus/dbus/v5"
)
type resolve1Manager Transport
type resolve1LinkNameserver struct {
Family int32
Address []byte
}
type resolve1LinkDomain struct {
Domain string
RoutingOnly bool
}
func (t *resolve1Manager) getLink(ifIndex uint32) (*TransportLink, error) {
link, loaded := t.links[ifIndex]
if !loaded {
link = &TransportLink{}
t.links[ifIndex] = link
iif, err := t.network.InterfaceFinder().ByIndex(int(ifIndex))
if err != nil {
return nil, dbus.MakeFailedError(err)
}
link.iif = iif
}
return link, nil
}
func (t *resolve1Manager) SetLinkDNS(ifIndex uint32, addresses []resolve1LinkNameserver) *dbus.Error {
t.linkAccess.Lock()
defer t.linkAccess.Unlock()
link, err := t.getLink(ifIndex)
if err != nil {
return dbus.MakeFailedError(err)
}
for _, ns := range link.nameservers {
ns.Close()
}
link.nameservers = link.nameservers[:0]
if len(addresses) > 0 {
serverDialer := common.Must1(dialer.NewDefault(t.ctx, option.DialerOptions{
BindInterface: link.iif.Name,
UDPFragmentDefault: true,
}))
var serverAddresses []netip.Addr
for _, address := range addresses {
serverAddr, ok := netip.AddrFromSlice(address.Address)
if !ok {
return dbus.MakeFailedError(E.New("invalid address"))
}
serverAddresses = append(serverAddresses, serverAddr)
}
for _, serverAddress := range serverAddresses {
link.nameservers = append(link.nameservers, transport.NewUDPRaw(t.logger, t.TransportAdapter, serverDialer, M.SocksaddrFrom(serverAddress, 53)))
}
t.logger.Info("SetLinkDNS ", link.iif.Name, " ", strings.Join(common.Map(serverAddresses, netip.Addr.String), ", "))
} else {
t.logger.Info("SetLinkDNS ", link.iif.Name, " (empty)")
}
return nil
}
func (t *resolve1Manager) SetLinkDomains(ifIndex uint32, domains []resolve1LinkDomain) *dbus.Error {
t.linkAccess.Lock()
defer t.linkAccess.Unlock()
link, err := t.getLink(ifIndex)
if err != nil {
return dbus.MakeFailedError(err)
}
link.domains = domains
if len(domains) > 0 {
t.logger.Info("SetLinkDomains ", link.iif.Name, " ", strings.Join(common.Map(domains, func(domain resolve1LinkDomain) string {
if !domain.RoutingOnly {
return domain.Domain
} else {
return domain.Domain + " (routing)"
}
}), ", "))
} else {
t.logger.Info("SetLinkDomains ", link.iif.Name, " (empty)")
}
return nil
}
func (t *resolve1Manager) SetLinkDefaultRoute(ifIndex uint32, defaultRoute bool) *dbus.Error {
t.linkAccess.Lock()
defer t.linkAccess.Unlock()
link, err := t.getLink(ifIndex)
if err != nil {
return dbus.MakeFailedError(err)
}
link.defaultRoute = defaultRoute
t.logger.Info("SetLinkDefaultRoute ", link.iif.Name, " ", defaultRoute)
return nil
}
func (t *resolve1Manager) SetLinkLLMNR(ifIndex uint32, llmnrMode string) {
}
func (t *resolve1Manager) SetLinkMulticastDNS(ifIndex uint32, mdnsMode string) {
}
func (t *resolve1Manager) SetLinkDNSOverTLS(ifIndex uint32, dotMode string) {
}
func (t *resolve1Manager) SetLinkDNSSEC(ifIndex uint32, dnssecMode string) {
}
func (t *resolve1Manager) SetLinkDNSSECNegativeTrustAnchors(ifIndex uint32, domains []string) {
}
func (t *resolve1Manager) RevertLink(ifIndex uint32) *dbus.Error {
t.linkAccess.Lock()
defer t.linkAccess.Unlock()
link, err := t.getLink(ifIndex)
if err != nil {
return dbus.MakeFailedError(err)
}
delete(t.links, ifIndex)
t.logger.Info("RevertLink ", link.iif.Name)
return nil
}
func (t *resolve1Manager) FlushCaches() {
t.dnsRouter.ClearCache()
}
-191
View File
@@ -1,191 +0,0 @@
package split
import (
"context"
"strings"
"sync"
"github.com/sagernet/sing-box/adapter"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/dns"
"github.com/sagernet/sing-box/log"
"github.com/sagernet/sing-box/option"
"github.com/sagernet/sing/common/control"
E "github.com/sagernet/sing/common/exceptions"
"github.com/sagernet/sing/common/logger"
"github.com/sagernet/sing/service"
"github.com/godbus/dbus/v5"
mDNS "github.com/miekg/dns"
)
func RegisterTransport(registry *dns.TransportRegistry) {
dns.RegisterTransport[option.SplitDNSServerOptions](registry, C.DNSTypeSplitDNS, NewTransport)
}
var _ adapter.DNSTransport = (*Transport)(nil)
type Transport struct {
dns.TransportAdapter
ctx context.Context
network adapter.NetworkManager
dnsRouter adapter.DNSRouter
logger logger.ContextLogger
acceptDefaultResolvers bool
linkAccess sync.Mutex
links map[uint32]*TransportLink
}
type TransportLink struct {
iif *control.Interface
nameservers []adapter.DNSTransport
domains []resolve1LinkDomain
defaultRoute bool
dnsOverTLS bool
}
func NewTransport(ctx context.Context, logger log.ContextLogger, tag string, options option.SplitDNSServerOptions) (adapter.DNSTransport, error) {
if !C.IsLinux {
return nil, E.New("split DNS server is only supported on Linux")
}
return &Transport{
TransportAdapter: dns.NewTransportAdapter(C.DNSTypeDHCP, tag, nil),
ctx: ctx,
logger: logger,
acceptDefaultResolvers: options.AcceptDefaultResolvers,
network: service.FromContext[adapter.NetworkManager](ctx),
dnsRouter: service.FromContext[adapter.DNSRouter](ctx),
links: make(map[uint32]*TransportLink),
}, nil
}
func (t *Transport) Start(stage adapter.StartStage) error {
switch stage {
case adapter.StartStateInitialize:
dnsTransportManager := service.FromContext[adapter.DNSTransportManager](t.ctx)
for _, transport := range dnsTransportManager.Transports() {
if transport.Type() == C.DNSTypeSplitDNS && transport != t {
return E.New("multiple split DNS server are not supported")
}
}
case adapter.StartStateStart:
systemBus, err := dbus.SystemBus()
if err != nil {
return err
}
reply, err := systemBus.RequestName("org.freedesktop.resolve1", dbus.NameFlagDoNotQueue)
if err != nil {
return err
}
switch reply {
case dbus.RequestNameReplyPrimaryOwner:
case dbus.RequestNameReplyExists:
return E.New("D-Bus object already exists, maybe real resolved is running")
default:
return E.New("unknown request name reply: ", reply)
}
err = systemBus.Export((*resolve1Manager)(t), "/org/freedesktop/resolve1", "org.freedesktop.resolve1.Manager")
if err != nil {
return err
}
}
return nil
}
func (t *Transport) Close() error {
return nil
}
func (t *Transport) Exchange(ctx context.Context, message *mDNS.Msg) (*mDNS.Msg, error) {
question := message.Question[0]
var selectedLink *TransportLink
for _, link := range t.links {
for _, domain := range link.domains {
if domain.RoutingOnly && !t.acceptDefaultResolvers {
continue
}
if strings.HasSuffix(question.Name, domain.Domain) {
selectedLink = link
}
}
}
if selectedLink == nil && t.acceptDefaultResolvers {
for _, link := range t.links {
if link.defaultRoute {
selectedLink = link
}
}
}
if selectedLink == nil {
return nil, dns.RcodeNameError
}
if question.Qtype == mDNS.TypeA || question.Qtype == mDNS.TypeAAAA {
return t.exchangeParallel(ctx, selectedLink.nameservers, message)
} else {
return t.exchangeSingleRequest(ctx, selectedLink.nameservers, message)
}
}
func (t *Transport) exchangeSingleRequest(ctx context.Context, transports []adapter.DNSTransport, message *mDNS.Msg) (*mDNS.Msg, error) {
var errors []error
for _, transport := range transports {
response, err := transport.Exchange(ctx, message)
if err == nil {
addresses, _ := dns.MessageToAddresses(response)
if len(addresses) == 0 {
err = E.New("empty result")
}
}
if err != nil {
errors = append(errors, err)
} else {
return response, nil
}
}
return nil, E.Errors(errors...)
}
func (t *Transport) exchangeParallel(ctx context.Context, transports []adapter.DNSTransport, message *mDNS.Msg) (*mDNS.Msg, error) {
returned := make(chan struct{})
defer close(returned)
type queryResult struct {
response *mDNS.Msg
err error
}
results := make(chan queryResult)
startRacer := func(ctx context.Context, transport adapter.DNSTransport) {
response, err := transport.Exchange(ctx, message)
if err == nil {
addresses, _ := dns.MessageToAddresses(response)
if len(addresses) == 0 {
err = E.New("empty result")
}
}
select {
case results <- queryResult{response, err}:
case <-returned:
}
}
queryCtx, queryCancel := context.WithCancel(ctx)
defer queryCancel()
var nameCount int
for _, fqdn := range transports {
nameCount++
go startRacer(queryCtx, fqdn)
}
var errors []error
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
case result := <-results:
if result.err == nil {
return result.response, nil
}
errors = append(errors, result.err)
if len(errors) == nameCount {
return nil, E.Errors(errors...)
}
}
}
}
+7 -3
View File
@@ -57,13 +57,17 @@ func NewTLS(ctx context.Context, logger log.ContextLogger, tag string, options o
if serverAddr.Port == 0 {
serverAddr.Port = 853
}
return NewTLSRaw(logger, dns.NewTransportAdapterWithRemoteOptions(C.DNSTypeTLS, tag, options.RemoteDNSServerOptions), transportDialer, serverAddr, tlsConfig), nil
}
func NewTLSRaw(logger logger.ContextLogger, adapter dns.TransportAdapter, dialer N.Dialer, serverAddr M.Socksaddr, tlsConfig tls.Config) *TLSTransport {
return &TLSTransport{
TransportAdapter: dns.NewTransportAdapterWithRemoteOptions(C.DNSTypeTLS, tag, options.RemoteDNSServerOptions),
TransportAdapter: adapter,
logger: logger,
dialer: transportDialer,
dialer: dialer,
serverAddr: serverAddr,
tlsConfig: tlsConfig,
}, nil
}
}
func (t *TLSTransport) Start(stage adapter.StartStage) error {
+1 -1
View File
@@ -33,7 +33,7 @@ func BaseContext(platformInterface PlatformInterface) context.Context {
})
}
}
return box.Context(context.Background(), include.InboundRegistry(), include.OutboundRegistry(), include.EndpointRegistry(), dnsRegistry)
return box.Context(context.Background(), include.InboundRegistry(), include.OutboundRegistry(), include.EndpointRegistry(), dnsRegistry, include.ServiceRegistry())
}
func parseConfig(ctx context.Context, configContent string) (option.Options, error) {
+13 -2
View File
@@ -7,13 +7,13 @@ import (
"github.com/sagernet/sing-box/adapter/endpoint"
"github.com/sagernet/sing-box/adapter/inbound"
"github.com/sagernet/sing-box/adapter/outbound"
"github.com/sagernet/sing-box/adapter/service"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/dns"
"github.com/sagernet/sing-box/dns/transport"
"github.com/sagernet/sing-box/dns/transport/fakeip"
"github.com/sagernet/sing-box/dns/transport/hosts"
"github.com/sagernet/sing-box/dns/transport/local"
"github.com/sagernet/sing-box/dns/transport/split"
"github.com/sagernet/sing-box/log"
"github.com/sagernet/sing-box/option"
"github.com/sagernet/sing-box/protocol/anytls"
@@ -34,6 +34,7 @@ import (
"github.com/sagernet/sing-box/protocol/tun"
"github.com/sagernet/sing-box/protocol/vless"
"github.com/sagernet/sing-box/protocol/vmess"
"github.com/sagernet/sing-box/service/resolved"
E "github.com/sagernet/sing/common/exceptions"
)
@@ -111,7 +112,7 @@ func DNSTransportRegistry() *dns.TransportRegistry {
hosts.RegisterTransport(registry)
local.RegisterTransport(registry)
fakeip.RegisterTransport(registry)
split.RegisterTransport(registry)
resolved.RegisterTransport(registry)
registerQUICTransports(registry)
registerDHCPTransport(registry)
@@ -120,6 +121,16 @@ func DNSTransportRegistry() *dns.TransportRegistry {
return registry
}
func ServiceRegistry() *service.Registry {
registry := service.NewRegistry()
resolved.RegisterService(registry)
registerDERPService(registry)
return registry
}
func registerStubForRemovedInbounds(registry *inbound.Registry) {
inbound.Register[option.ShadowsocksInboundOptions](registry, C.TypeShadowsocksR, func(ctx context.Context, router adapter.Router, logger log.ContextLogger, tag string, options option.ShadowsocksInboundOptions) (adapter.Inbound, error) {
return nil, E.New("ShadowsocksR is deprecated and removed in sing-box 1.6.0")
+7
View File
@@ -4,8 +4,10 @@ package include
import (
"github.com/sagernet/sing-box/adapter/endpoint"
"github.com/sagernet/sing-box/adapter/service"
"github.com/sagernet/sing-box/dns"
"github.com/sagernet/sing-box/protocol/tailscale"
"github.com/sagernet/sing-box/service/derp"
)
func registerTailscaleEndpoint(registry *endpoint.Registry) {
@@ -15,3 +17,8 @@ func registerTailscaleEndpoint(registry *endpoint.Registry) {
func registerTailscaleTransport(registry *dns.TransportRegistry) {
tailscale.RegistryTransport(registry)
}
func registerDERPService(registry *service.Registry) {
derp.Register(registry)
derp.RegisterSTUN(registry)
}
+10
View File
@@ -7,6 +7,7 @@ import (
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/adapter/endpoint"
"github.com/sagernet/sing-box/adapter/service"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/dns"
"github.com/sagernet/sing-box/log"
@@ -25,3 +26,12 @@ func registerTailscaleTransport(registry *dns.TransportRegistry) {
return nil, E.New(`Tailscale is not included in this build, rebuild with -tags with_tailscale`)
})
}
func registerDERPService(registry *service.Registry) {
service.Register[option.DERPServiceOptions](registry, C.TypeDERP, func(ctx context.Context, logger log.ContextLogger, tag string, options option.DERPServiceOptions) (adapter.Service, error) {
return nil, E.New(`DERP is not included in this build, rebuild with -tags with_tailscale`)
})
service.Register[option.DERPSTUNServiceOptions](registry, C.TypeDERP, func(ctx context.Context, logger log.ContextLogger, tag string, options option.DERPSTUNServiceOptions) (adapter.Service, error) {
return nil, E.New(`STUN (DERP) is not included in this build, rebuild with -tags with_tailscale`)
})
}
-5
View File
@@ -121,7 +121,6 @@ type LegacyDNSFakeIPOptions struct {
type DNSTransportOptionsRegistry interface {
CreateOptions(transportType string) (any, bool)
}
type _DNSServerOptions struct {
Type string `json:"type,omitempty"`
Tag string `json:"tag,omitempty"`
@@ -387,7 +386,3 @@ type DHCPDNSServerOptions struct {
LocalDNSServerOptions
Interface string `json:"interface,omitempty"`
}
type SplitDNSServerOptions struct {
AcceptDefaultResolvers bool `json:"accept_default_resolvers,omitempty"`
}
+2 -2
View File
@@ -32,11 +32,11 @@ func (h *Endpoint) UnmarshalJSONContext(ctx context.Context, content []byte) err
}
registry := service.FromContext[EndpointOptionsRegistry](ctx)
if registry == nil {
return E.New("missing Endpoint fields registry in context")
return E.New("missing endpoint fields registry in context")
}
options, loaded := registry.CreateOptions(h.Type)
if !loaded {
return E.New("unknown inbound type: ", h.Type)
return E.New("unknown endpoint type: ", h.Type)
}
err = badjson.UnmarshallExcludedContext(ctx, content, (*_Endpoint)(h), options)
if err != nil {
+1 -1
View File
@@ -34,7 +34,7 @@ func (h *Inbound) UnmarshalJSONContext(ctx context.Context, content []byte) erro
}
registry := service.FromContext[InboundOptionsRegistry](ctx)
if registry == nil {
return E.New("missing Inbound fields registry in context")
return E.New("missing inbound fields registry in context")
}
options, loaded := registry.CreateOptions(h.Type)
if !loaded {
+1
View File
@@ -19,6 +19,7 @@ type _Options struct {
Inbounds []Inbound `json:"inbounds,omitempty"`
Outbounds []Outbound `json:"outbounds,omitempty"`
Route *RouteOptions `json:"route,omitempty"`
Services []Service `json:"services,omitempty"`
Experimental *ExperimentalOptions `json:"experimental,omitempty"`
}
+49
View File
@@ -0,0 +1,49 @@
package option
import (
"context"
"net/netip"
"github.com/sagernet/sing/common"
"github.com/sagernet/sing/common/json"
"github.com/sagernet/sing/common/json/badoption"
)
type _ResolvedServiceOptions struct {
ListenOptions
}
type ResolvedServiceOptions _ResolvedServiceOptions
func (r ResolvedServiceOptions) MarshalJSONContext(ctx context.Context) ([]byte, error) {
if r.Listen != nil && netip.Addr(*r.Listen) == (netip.AddrFrom4([4]byte{127, 0, 0, 53})) {
r.Listen = nil
}
if r.ListenPort == 53 {
r.ListenPort = 0
}
return json.MarshalContext(ctx, (*_ResolvedServiceOptions)(&r))
}
func (r *ResolvedServiceOptions) UnmarshalJSONContext(ctx context.Context, bytes []byte) error {
err := json.UnmarshalContextDisallowUnknownFields(ctx, bytes, (*_ResolvedServiceOptions)(r))
if err != nil {
return err
}
if r.Listen == nil {
r.Listen = (*badoption.Addr)(common.Ptr(netip.AddrFrom4([4]byte{127, 0, 0, 53})))
}
if r.ListenPort == 0 {
r.ListenPort = 53
}
return nil
}
type SplitDNSServerOptions struct {
Service string `json:"Service"`
AcceptDefaultResolvers bool `json:"accept_default_resolvers,omitempty"`
// NDots int `json:"ndots,omitempty"`
// Timeout badoption.Duration `json:"timeout,omitempty"`
// Attempts int `json:"attempts,omitempty"`
// Rotate bool `json:"rotate,omitempty"`
}
+47
View File
@@ -0,0 +1,47 @@
package option
import (
"context"
E "github.com/sagernet/sing/common/exceptions"
"github.com/sagernet/sing/common/json"
"github.com/sagernet/sing/common/json/badjson"
"github.com/sagernet/sing/service"
)
type ServiceOptionsRegistry interface {
CreateOptions(serviceType string) (any, bool)
}
type _Service struct {
Type string `json:"type"`
Tag string `json:"tag,omitempty"`
Options any `json:"-"`
}
type Service _Service
func (h *Service) MarshalJSONContext(ctx context.Context) ([]byte, error) {
return badjson.MarshallObjectsContext(ctx, (*_Service)(h), h.Options)
}
func (h *Service) UnmarshalJSONContext(ctx context.Context, content []byte) error {
err := json.UnmarshalContext(ctx, content, (*_Service)(h))
if err != nil {
return err
}
registry := service.FromContext[ServiceOptionsRegistry](ctx)
if registry == nil {
return E.New("missing service fields registry in context")
}
options, loaded := registry.CreateOptions(h.Type)
if !loaded {
return E.New("unknown inbound type: ", h.Type)
}
err = badjson.UnmarshallExcludedContext(ctx, content, (*_Service)(h), options)
if err != nil {
return err
}
h.Options = options
return nil
}
+62
View File
@@ -2,6 +2,12 @@ package option
import (
"net/netip"
"net/url"
"reflect"
"github.com/sagernet/sing/common/json"
"github.com/sagernet/sing/common/json/badoption"
M "github.com/sagernet/sing/common/metadata"
)
type TailscaleEndpointOptions struct {
@@ -22,3 +28,59 @@ type TailscaleDNSServerOptions struct {
Endpoint string `json:"endpoint,omitempty"`
AcceptDefaultResolvers bool `json:"accept_default_resolvers,omitempty"`
}
type DERPServiceOptions struct {
ListenOptions
InboundTLSOptionsContainer
ConfigPath string `json:"config_path,omitempty"`
VerifyClientEndpoint badoption.Listable[string] `json:"verify_client_endpoint,omitempty"`
VerifyClientURL badoption.Listable[DERPVerifyClientURLOptions] `json:"verify_client_url,omitempty"`
MeshWith badoption.Listable[DERPMeshOptions] `json:"mesh_with,omitempty"`
MeshPSK string `json:"mesh_psk,omitempty"`
MeshPSKFile string `json:"mesh_psk_file,omitempty"`
DomainResolver *DomainResolveOptions `json:"domain_resolver,omitempty"`
}
type _DERPVerifyClientURLOptions struct {
URL string `json:"url,omitempty"`
DialerOptions
}
type DERPVerifyClientURLOptions _DERPVerifyClientURLOptions
func (d DERPVerifyClientURLOptions) ServerIsDomain() bool {
verifyURL, err := url.Parse(d.URL)
if err != nil {
return false
}
return M.IsDomainName(verifyURL.Host)
}
func (d DERPVerifyClientURLOptions) MarshalJSON() ([]byte, error) {
if reflect.DeepEqual(d, _DERPVerifyClientURLOptions{}) {
return json.Marshal(d.URL)
} else {
return json.Marshal(_DERPVerifyClientURLOptions(d))
}
}
func (d *DERPVerifyClientURLOptions) UnmarshalJSON(bytes []byte) error {
var stringValue string
err := json.Unmarshal(bytes, &stringValue)
if err == nil {
d.URL = stringValue
return nil
}
return json.Unmarshal(bytes, (*_DERPVerifyClientURLOptions)(d))
}
type DERPMeshOptions struct {
ServerOptions
Host string `json:"host,omitempty"`
OutboundTLSOptionsContainer
DialerOptions
}
type DERPSTUNServiceOptions struct {
ListenOptions
}

Some files were not shown because too many files have changed in this diff Show More