Update On Thu Jan 22 16:29:55 CET 2026

This commit is contained in:
github-action[bot]
2026-01-22 16:29:55 +01:00
parent 30c154cf90
commit a560e4410e
380 changed files with 3753 additions and 200336 deletions
+1
View File
@@ -1247,3 +1247,4 @@ Update On Thu Jan 15 19:49:36 CET 2026
Update On Fri Jan 16 19:44:08 CET 2026 Update On Fri Jan 16 19:44:08 CET 2026
Update On Sat Jan 17 19:40:03 CET 2026 Update On Sat Jan 17 19:40:03 CET 2026
Update On Sun Jan 18 19:40:07 CET 2026 Update On Sun Jan 18 19:40:07 CET 2026
Update On Thu Jan 22 16:29:47 CET 2026
@@ -114,7 +114,7 @@ func patchProviders(cfg *config.RawConfig, profileDir string) error {
} else if url, ok := provider["url"].(string); ok { } else if url, ok := provider["url"].(string); ok {
path = prefix + "/" + utils.MakeHash([]byte(url)).String() // same as C.GetPathByHash path = prefix + "/" + utils.MakeHash([]byte(url)).String() // same as C.GetPathByHash
} else { } else {
return // both path and url is empty, WTF??? return // both path and url are empty, maybe inline provider
} }
provider["path"] = profileDir + "/providers/" + path provider["path"] = profileDir + "/providers/" + path
}) })
+3 -3
View File
@@ -26,14 +26,14 @@ require (
github.com/metacubex/quic-go v0.59.1-0.20260112033758-aa29579f2001 github.com/metacubex/quic-go v0.59.1-0.20260112033758-aa29579f2001
github.com/metacubex/randv2 v0.2.0 github.com/metacubex/randv2 v0.2.0
github.com/metacubex/restls-client-go v0.1.7 github.com/metacubex/restls-client-go v0.1.7
github.com/metacubex/sing v0.5.6 github.com/metacubex/sing v0.5.7
github.com/metacubex/sing-mux v0.3.4 github.com/metacubex/sing-mux v0.3.5
github.com/metacubex/sing-quic v0.0.0-20260112044712-65d17608159e github.com/metacubex/sing-quic v0.0.0-20260112044712-65d17608159e
github.com/metacubex/sing-shadowsocks v0.2.12 github.com/metacubex/sing-shadowsocks v0.2.12
github.com/metacubex/sing-shadowsocks2 v0.2.7 github.com/metacubex/sing-shadowsocks2 v0.2.7
github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2 github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2
github.com/metacubex/sing-tun v0.4.12 github.com/metacubex/sing-tun v0.4.12
github.com/metacubex/sing-vmess v0.2.4 github.com/metacubex/sing-vmess v0.2.5
github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f
github.com/metacubex/smux v0.0.0-20260105030934-d0c8756d3141 github.com/metacubex/smux v0.0.0-20260105030934-d0c8756d3141
github.com/metacubex/tfo-go v0.0.0-20251130171125-413e892ac443 github.com/metacubex/tfo-go v0.0.0-20251130171125-413e892ac443
+6 -6
View File
@@ -115,10 +115,10 @@ github.com/metacubex/randv2 v0.2.0 h1:uP38uBvV2SxYfLj53kuvAjbND4RUDfFJjwr4UigMiL
github.com/metacubex/randv2 v0.2.0/go.mod h1:kFi2SzrQ5WuneuoLLCMkABtiBu6VRrMrWFqSPyj2cxY= github.com/metacubex/randv2 v0.2.0/go.mod h1:kFi2SzrQ5WuneuoLLCMkABtiBu6VRrMrWFqSPyj2cxY=
github.com/metacubex/restls-client-go v0.1.7 h1:eCwiXCTQb5WJu9IlgYvDBA1OgrINv58dEe7hcN5H15k= github.com/metacubex/restls-client-go v0.1.7 h1:eCwiXCTQb5WJu9IlgYvDBA1OgrINv58dEe7hcN5H15k=
github.com/metacubex/restls-client-go v0.1.7/go.mod h1:BN/U52vPw7j8VTSh2vleD/MnmVKCov84mS5VcjVHH4g= github.com/metacubex/restls-client-go v0.1.7/go.mod h1:BN/U52vPw7j8VTSh2vleD/MnmVKCov84mS5VcjVHH4g=
github.com/metacubex/sing v0.5.6 h1:mEPDCadsCj3DB8gn+t/EtposlYuALEkExa/LUguw6/c= github.com/metacubex/sing v0.5.7 h1:8OC+fhKFSv/l9ehEhJRaZZAOuthfZo68SteBVLe8QqM=
github.com/metacubex/sing v0.5.6/go.mod h1:ypf0mjwlZm0sKdQSY+yQvmsbWa0hNPtkeqyRMGgoN+w= github.com/metacubex/sing v0.5.7/go.mod h1:ypf0mjwlZm0sKdQSY+yQvmsbWa0hNPtkeqyRMGgoN+w=
github.com/metacubex/sing-mux v0.3.4 h1:tf4r27CIkzaxq9kBlAXQkgMXq2HPp5Mta60Kb4RCZF0= github.com/metacubex/sing-mux v0.3.5 h1:UqVN+o62SR8kJaC9/3VfOc5UiVqgVY/ef9WwfGYYkk0=
github.com/metacubex/sing-mux v0.3.4/go.mod h1:SEJfAuykNj/ozbPqngEYqyggwSr81+L7Nu09NRD5mh4= github.com/metacubex/sing-mux v0.3.5/go.mod h1:8bT7ZKT3clRrJjYc/x5CRYibC1TX/bK73a3r3+2E+Fc=
github.com/metacubex/sing-quic v0.0.0-20260112044712-65d17608159e h1:MLxp42z9Jd6LtY2suyawnl24oNzIsFxWc15bNeDIGxA= github.com/metacubex/sing-quic v0.0.0-20260112044712-65d17608159e h1:MLxp42z9Jd6LtY2suyawnl24oNzIsFxWc15bNeDIGxA=
github.com/metacubex/sing-quic v0.0.0-20260112044712-65d17608159e/go.mod h1:+lgKTd52xAarGtqugALISShyw4KxnoEpYe2u0zJh26w= github.com/metacubex/sing-quic v0.0.0-20260112044712-65d17608159e/go.mod h1:+lgKTd52xAarGtqugALISShyw4KxnoEpYe2u0zJh26w=
github.com/metacubex/sing-shadowsocks v0.2.12 h1:Wqzo8bYXrK5aWqxu/TjlTnYZzAKtKsaFQBdr6IHFaBE= github.com/metacubex/sing-shadowsocks v0.2.12 h1:Wqzo8bYXrK5aWqxu/TjlTnYZzAKtKsaFQBdr6IHFaBE=
@@ -129,8 +129,8 @@ github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2 h1:gXU+MY
github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2/go.mod h1:mbfboaXauKJNIHJYxQRa+NJs4JU9NZfkA+I33dS2+9E= github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2/go.mod h1:mbfboaXauKJNIHJYxQRa+NJs4JU9NZfkA+I33dS2+9E=
github.com/metacubex/sing-tun v0.4.12 h1:LCi+yB7y97X3cHQGdNXQBMQNHAzpP4AWg7YhSLk+LTM= github.com/metacubex/sing-tun v0.4.12 h1:LCi+yB7y97X3cHQGdNXQBMQNHAzpP4AWg7YhSLk+LTM=
github.com/metacubex/sing-tun v0.4.12/go.mod h1:L/TjQY5JEGy8nvsuYmy/XgMFMCPiF0+AWSFCYfS6r9w= github.com/metacubex/sing-tun v0.4.12/go.mod h1:L/TjQY5JEGy8nvsuYmy/XgMFMCPiF0+AWSFCYfS6r9w=
github.com/metacubex/sing-vmess v0.2.4 h1:Tx6AGgCiEf400E/xyDuYyafsel6sGbR8oF7RkAaus6I= github.com/metacubex/sing-vmess v0.2.5 h1:m9Zt5I27lB9fmLMZfism9sH2LcnAfShZfwSkf6/KJoE=
github.com/metacubex/sing-vmess v0.2.4/go.mod h1:21R5R1u90uUvBQF0owoooEu96/SAYYD56nDrwm6nFaM= github.com/metacubex/sing-vmess v0.2.5/go.mod h1:AwtlzUgf8COe9tRYAKqWZ+leDH7p5U98a0ZUpYehl8Q=
github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f h1:Sr/DYKYofKHKc4GF3qkRGNuj6XA6c0eqPgEDN+VAsYU= github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f h1:Sr/DYKYofKHKc4GF3qkRGNuj6XA6c0eqPgEDN+VAsYU=
github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f/go.mod h1:jpAkVLPnCpGSfNyVmj6Cq4YbuZsFepm/Dc+9BAOcR80= github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f/go.mod h1:jpAkVLPnCpGSfNyVmj6Cq4YbuZsFepm/Dc+9BAOcR80=
github.com/metacubex/smux v0.0.0-20260105030934-d0c8756d3141 h1:DK2l6m2Fc85H2BhiAPgbJygiWhesPlfGmF+9Vw6ARdk= github.com/metacubex/smux v0.0.0-20260105030934-d0c8756d3141 h1:DK2l6m2Fc85H2BhiAPgbJygiWhesPlfGmF+9Vw6ARdk=
+3 -5
View File
@@ -42,7 +42,6 @@ type websocketWithEarlyDataConn struct {
net.Conn net.Conn
wsWriter N.ExtendedWriter wsWriter N.ExtendedWriter
underlay net.Conn underlay net.Conn
closed bool
dialed chan bool dialed chan bool
cancel context.CancelFunc cancel context.CancelFunc
ctx context.Context ctx context.Context
@@ -204,7 +203,7 @@ func (wsedc *websocketWithEarlyDataConn) Dial(earlyData []byte) error {
} }
func (wsedc *websocketWithEarlyDataConn) Write(b []byte) (int, error) { func (wsedc *websocketWithEarlyDataConn) Write(b []byte) (int, error) {
if wsedc.closed { if wsedc.ctx.Err() != nil {
return 0, io.ErrClosedPipe return 0, io.ErrClosedPipe
} }
if wsedc.Conn == nil { if wsedc.Conn == nil {
@@ -218,7 +217,7 @@ func (wsedc *websocketWithEarlyDataConn) Write(b []byte) (int, error) {
} }
func (wsedc *websocketWithEarlyDataConn) WriteBuffer(buffer *buf.Buffer) error { func (wsedc *websocketWithEarlyDataConn) WriteBuffer(buffer *buf.Buffer) error {
if wsedc.closed { if wsedc.ctx.Err() != nil {
return io.ErrClosedPipe return io.ErrClosedPipe
} }
if wsedc.Conn == nil { if wsedc.Conn == nil {
@@ -232,7 +231,7 @@ func (wsedc *websocketWithEarlyDataConn) WriteBuffer(buffer *buf.Buffer) error {
} }
func (wsedc *websocketWithEarlyDataConn) Read(b []byte) (int, error) { func (wsedc *websocketWithEarlyDataConn) Read(b []byte) (int, error) {
if wsedc.closed { if wsedc.ctx.Err() != nil {
return 0, io.ErrClosedPipe return 0, io.ErrClosedPipe
} }
if wsedc.Conn == nil { if wsedc.Conn == nil {
@@ -246,7 +245,6 @@ func (wsedc *websocketWithEarlyDataConn) Read(b []byte) (int, error) {
} }
func (wsedc *websocketWithEarlyDataConn) Close() error { func (wsedc *websocketWithEarlyDataConn) Close() error {
wsedc.closed = true
wsedc.cancel() wsedc.cancel()
if wsedc.Conn == nil { // is dialing or not dialed if wsedc.Conn == nil { // is dialing or not dialed
return wsedc.underlay.Close() return wsedc.underlay.Close()
+4 -4
View File
@@ -2,10 +2,10 @@
"manifest_version": 1, "manifest_version": 1,
"latest": { "latest": {
"mihomo": "v1.19.19", "mihomo": "v1.19.19",
"mihomo_alpha": "alpha-0c995a2", "mihomo_alpha": "alpha-1e1434d",
"clash_rs": "v0.9.3", "clash_rs": "v0.9.4",
"clash_premium": "2023-09-05-gdcc8d87", "clash_premium": "2023-09-05-gdcc8d87",
"clash_rs_alpha": "0.9.3-alpha+sha.1c4e5b1" "clash_rs_alpha": "0.9.4-alpha+sha.348bd90"
}, },
"arch_template": { "arch_template": {
"mihomo": { "mihomo": {
@@ -69,5 +69,5 @@
"linux-armv7hf": "clash-armv7-unknown-linux-gnueabihf" "linux-armv7hf": "clash-armv7-unknown-linux-gnueabihf"
} }
}, },
"updated_at": "2026-01-17T22:21:31.032Z" "updated_at": "2026-01-21T22:22:18.748Z"
} }
+10 -10
View File
@@ -1,31 +1,31 @@
module github.com/Loyalsoldier/geoip module github.com/Loyalsoldier/geoip
go 1.24 go 1.24.0
require ( require (
github.com/klauspost/compress v1.18.0 github.com/klauspost/compress v1.18.3
github.com/maxmind/mmdbwriter v1.0.0 github.com/maxmind/mmdbwriter v1.2.0
github.com/oschwald/geoip2-golang v1.13.0 github.com/oschwald/geoip2-golang/v2 v2.1.0
github.com/oschwald/maxminddb-golang v1.13.1 github.com/oschwald/maxminddb-golang/v2 v2.1.1
github.com/sagernet/sing-box v1.12.4 github.com/sagernet/sing-box v1.12.17
github.com/spf13/cobra v1.10.1 github.com/spf13/cobra v1.10.2
github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a
github.com/tidwall/gjson v1.18.0 github.com/tidwall/gjson v1.18.0
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
google.golang.org/protobuf v1.36.8 google.golang.org/protobuf v1.36.11
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )
require ( require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/miekg/dns v1.1.67 // indirect github.com/miekg/dns v1.1.67 // indirect
github.com/sagernet/sing v0.7.6-0.20250825114712-2aeec120ce28 // indirect github.com/sagernet/sing v0.7.14 // indirect
github.com/spf13/pflag v1.0.9 // indirect github.com/spf13/pflag v1.0.9 // indirect
github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.0 // indirect github.com/tidwall/pretty v1.2.0 // indirect
golang.org/x/mod v0.27.0 // indirect golang.org/x/mod v0.27.0 // indirect
golang.org/x/net v0.43.0 // indirect golang.org/x/net v0.43.0 // indirect
golang.org/x/sync v0.16.0 // indirect golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.35.0 // indirect golang.org/x/sys v0.38.0 // indirect
golang.org/x/tools v0.36.0 // indirect golang.org/x/tools v0.36.0 // indirect
) )
+21 -20
View File
@@ -5,29 +5,29 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/maxmind/mmdbwriter v1.0.0 h1:bieL4P6yaYaHvbtLSwnKtEvScUKKD6jcKaLiTM3WSMw= github.com/maxmind/mmdbwriter v1.2.0 h1:hyvDopImmgvle3aR8AaddxXnT0iQH2KWJX3vNfkwzYM=
github.com/maxmind/mmdbwriter v1.0.0/go.mod h1:noBMCUtyN5PUQ4H8ikkOvGSHhzhLok51fON2hcrpKj8= github.com/maxmind/mmdbwriter v1.2.0/go.mod h1:EQmKHhk2y9DRVvyNxwCLKC5FrkXZLx4snc5OlLY5XLE=
github.com/miekg/dns v1.1.67 h1:kg0EHj0G4bfT5/oOys6HhZw4vmMlnoZ+gDu8tJ/AlI0= github.com/miekg/dns v1.1.67 h1:kg0EHj0G4bfT5/oOys6HhZw4vmMlnoZ+gDu8tJ/AlI0=
github.com/miekg/dns v1.1.67/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/miekg/dns v1.1.67/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
github.com/oschwald/geoip2-golang v1.13.0 h1:Q44/Ldc703pasJeP5V9+aFSZFmBN7DKHbNsSFzQATJI= github.com/oschwald/geoip2-golang/v2 v2.1.0 h1:DjnLhNJu9WHwTrmoiQFvgmyJoczhdnm7LB23UBI2Amo=
github.com/oschwald/geoip2-golang v1.13.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo= github.com/oschwald/geoip2-golang/v2 v2.1.0/go.mod h1:qdVmcPgrTJ4q2eP9tHq/yldMTdp2VMr33uVdFbHBiBc=
github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE= github.com/oschwald/maxminddb-golang/v2 v2.1.1 h1:lA8FH0oOrM4u7mLvowq8IT6a3Q/qEnqRzLQn9eH5ojc=
github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8= github.com/oschwald/maxminddb-golang/v2 v2.1.1/go.mod h1:PLdx6PR+siSIoXqqy7C7r3SB3KZnhxWr1Dp6g0Hacl8=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagernet/sing v0.7.6-0.20250825114712-2aeec120ce28 h1:C8Lnqd0Q+C15kwaMiDsfq5S45rhhaQMBG91TT+6oFVo= github.com/sagernet/sing v0.7.14 h1:5QQRDCUvYNOMyVp3LuK/hYEBAIv0VsbD3x/l9zH467s=
github.com/sagernet/sing v0.7.6-0.20250825114712-2aeec120ce28/go.mod h1:ARkL0gM13/Iv5VCZmci/NuoOlePoIsW0m7BWfln/Hak= github.com/sagernet/sing v0.7.14/go.mod h1:ARkL0gM13/Iv5VCZmci/NuoOlePoIsW0m7BWfln/Hak=
github.com/sagernet/sing-box v1.12.4 h1:FaKRKIorsior0nzjCoPSESonjHnmSOD/moKIA56OkLM= github.com/sagernet/sing-box v1.12.17 h1:Nrns3lgWuK7b5sB3IcKJnVu4SJ7B6g7MLLH1evlDDgU=
github.com/sagernet/sing-box v1.12.4/go.mod h1:sYGo3cT+O9YwEucIqz+2JBrNTnvuU2F/+vpnbhuV3Qc= github.com/sagernet/sing-box v1.12.17/go.mod h1:q/gYlJOSeHXubCCKM9MiDqzWWNi0cgi4ySYd1kasIts=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a h1:a6TNDN9CgG+cYjaeN8l2mc4kSz2iMiCDQxPEyltUV/I= github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a h1:a6TNDN9CgG+cYjaeN8l2mc4kSz2iMiCDQxPEyltUV/I=
github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo= github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
@@ -36,6 +36,7 @@ github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
@@ -44,12 +45,12 @@ golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+127 -25
View File
@@ -8,8 +8,8 @@ import (
"strings" "strings"
"github.com/Loyalsoldier/geoip/lib" "github.com/Loyalsoldier/geoip/lib"
"github.com/oschwald/geoip2-golang" "github.com/oschwald/geoip2-golang/v2"
"github.com/oschwald/maxminddb-golang" "github.com/oschwald/maxminddb-golang/v2"
) )
var ( var (
@@ -20,6 +20,15 @@ var (
defaultIPInfoOutputDir = filepath.Join("./", "output", "ipinfo") defaultIPInfoOutputDir = filepath.Join("./", "output", "ipinfo")
) )
// Reference: https://github.com/oschwald/geoip2-golang/blob/HEAD/models.go
var (
zeroDBIPLanguageNames dbipLanguageNames
zeroDBIPContinent dbipContinent
zeroDBIPCountryRecord dbipCountryRecord
zeroDBIPRepresentedCountry dbipRepresentedCountry
zeroDBIPCountry dbipCountry
)
// Reference: https://ipinfo.io/lite // Reference: https://ipinfo.io/lite
type ipInfoLite struct { type ipInfoLite struct {
ASN string `maxminddb:"asn"` ASN string `maxminddb:"asn"`
@@ -31,6 +40,66 @@ type ipInfoLite struct {
CountryCode string `maxminddb:"country_code"` CountryCode string `maxminddb:"country_code"`
} }
// Reference: https://github.com/oschwald/geoip2-golang/blob/HEAD/models.go
type dbipLanguageNames struct {
geoip2.Names
// Persian localized name
Persian string `json:"fa,omitzero" maxminddb:"fa"`
// Korean localized name
Korean string `json:"ko,omitzero" maxminddb:"ko"`
}
func (d dbipLanguageNames) HasData() bool {
return d != zeroDBIPLanguageNames
}
// Reference: https://github.com/oschwald/geoip2-golang/blob/HEAD/models.go
type dbipContinent struct {
geoip2.Continent
Names dbipLanguageNames `json:"names,omitzero" maxminddb:"names"`
}
func (d dbipContinent) HasData() bool {
return d != zeroDBIPContinent
}
// Reference: https://github.com/oschwald/geoip2-golang/blob/HEAD/models.go
type dbipCountryRecord struct {
geoip2.CountryRecord
Names dbipLanguageNames `json:"names,omitzero" maxminddb:"names"`
}
func (d dbipCountryRecord) HasData() bool {
return d != zeroDBIPCountryRecord
}
// Reference: https://github.com/oschwald/geoip2-golang/blob/HEAD/models.go
type dbipRepresentedCountry struct {
geoip2.RepresentedCountry
Names dbipLanguageNames `json:"names,omitzero" maxminddb:"names"`
}
func (d dbipRepresentedCountry) HasData() bool {
return d != zeroDBIPRepresentedCountry
}
// Reference: https://github.com/oschwald/geoip2-golang/blob/HEAD/models.go
type dbipCountry struct {
Traits geoip2.CountryTraits `json:"traits,omitzero" maxminddb:"traits"`
Continent dbipContinent `json:"continent,omitzero" maxminddb:"continent"`
RepresentedCountry dbipRepresentedCountry `json:"represented_country,omitzero" maxminddb:"represented_country"`
Country dbipCountryRecord `json:"country,omitzero" maxminddb:"country"`
RegisteredCountry dbipCountryRecord `json:"registered_country,omitzero" maxminddb:"registered_country"`
}
func (d dbipCountry) HasData() bool {
return d != zeroDBIPCountry
}
func newGeoLite2CountryMMDBOut(iType string, iDesc string, action lib.Action, data json.RawMessage) (lib.OutputConverter, error) { func newGeoLite2CountryMMDBOut(iType string, iDesc string, action lib.Action, data json.RawMessage) (lib.OutputConverter, error) {
var tmp struct { var tmp struct {
OutputName string `json:"outputName"` OutputName string `json:"outputName"`
@@ -98,26 +167,25 @@ func (g *GeoLite2CountryMMDBOut) GetExtraInfo() (map[string]any, error) {
return nil, err return nil, err
} }
db, err := maxminddb.FromBytes(content) db, err := maxminddb.OpenBytes(content)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer db.Close() defer db.Close()
infoList := make(map[string]any) infoList := make(map[string]any)
networks := db.Networks(maxminddb.SkipAliasedNetworks) for network := range db.Networks() {
for networks.Next() {
switch g.Type { switch g.Type {
case TypeGeoLite2CountryMMDBOut, TypeDBIPCountryMMDBOut: case TypeGeoLite2CountryMMDBOut:
var record geoip2.Country var record geoip2.Country
_, err := networks.Network(&record) err := network.Decode(&record)
if err != nil { if err != nil {
return nil, err return nil, err
} }
switch { switch {
case strings.TrimSpace(record.Country.IsoCode) != "": case strings.TrimSpace(record.Country.ISOCode) != "":
countryCode := strings.ToUpper(strings.TrimSpace(record.Country.IsoCode)) countryCode := strings.ToUpper(strings.TrimSpace(record.Country.ISOCode))
if _, found := infoList[countryCode]; !found { if _, found := infoList[countryCode]; !found {
infoList[countryCode] = geoip2.Country{ infoList[countryCode] = geoip2.Country{
Continent: record.Continent, Continent: record.Continent,
@@ -125,8 +193,8 @@ func (g *GeoLite2CountryMMDBOut) GetExtraInfo() (map[string]any, error) {
} }
} }
case strings.TrimSpace(record.RegisteredCountry.IsoCode) != "": case strings.TrimSpace(record.RegisteredCountry.ISOCode) != "":
countryCode := strings.ToUpper(strings.TrimSpace(record.RegisteredCountry.IsoCode)) countryCode := strings.ToUpper(strings.TrimSpace(record.RegisteredCountry.ISOCode))
if _, found := infoList[countryCode]; !found { if _, found := infoList[countryCode]; !found {
infoList[countryCode] = geoip2.Country{ infoList[countryCode] = geoip2.Country{
Continent: record.Continent, Continent: record.Continent,
@@ -134,19 +202,14 @@ func (g *GeoLite2CountryMMDBOut) GetExtraInfo() (map[string]any, error) {
} }
} }
case strings.TrimSpace(record.RepresentedCountry.IsoCode) != "": case strings.TrimSpace(record.RepresentedCountry.ISOCode) != "":
countryCode := strings.ToUpper(strings.TrimSpace(record.RepresentedCountry.IsoCode)) countryCode := strings.ToUpper(strings.TrimSpace(record.RepresentedCountry.ISOCode))
if _, found := infoList[countryCode]; !found { if _, found := infoList[countryCode]; !found {
infoList[countryCode] = geoip2.Country{ infoList[countryCode] = geoip2.Country{
Continent: record.Continent, Continent: record.Continent,
Country: struct { Country: geoip2.CountryRecord{
Names map[string]string `maxminddb:"names"`
IsoCode string `maxminddb:"iso_code"`
GeoNameID uint `maxminddb:"geoname_id"`
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
}{
Names: record.RepresentedCountry.Names, Names: record.RepresentedCountry.Names,
IsoCode: record.RepresentedCountry.IsoCode, ISOCode: record.RepresentedCountry.ISOCode,
GeoNameID: record.RepresentedCountry.GeoNameID, GeoNameID: record.RepresentedCountry.GeoNameID,
IsInEuropeanUnion: record.RepresentedCountry.IsInEuropeanUnion, IsInEuropeanUnion: record.RepresentedCountry.IsInEuropeanUnion,
}, },
@@ -154,9 +217,52 @@ func (g *GeoLite2CountryMMDBOut) GetExtraInfo() (map[string]any, error) {
} }
} }
case TypeDBIPCountryMMDBOut:
var record dbipCountry
err := network.Decode(&record)
if err != nil {
return nil, err
}
switch {
case strings.TrimSpace(record.Country.ISOCode) != "":
countryCode := strings.ToUpper(strings.TrimSpace(record.Country.ISOCode))
if _, found := infoList[countryCode]; !found {
infoList[countryCode] = dbipCountry{
Continent: record.Continent,
Country: record.Country,
}
}
case strings.TrimSpace(record.RegisteredCountry.ISOCode) != "":
countryCode := strings.ToUpper(strings.TrimSpace(record.RegisteredCountry.ISOCode))
if _, found := infoList[countryCode]; !found {
infoList[countryCode] = dbipCountry{
Continent: record.Continent,
Country: record.RegisteredCountry,
}
}
case strings.TrimSpace(record.RepresentedCountry.ISOCode) != "":
countryCode := strings.ToUpper(strings.TrimSpace(record.RepresentedCountry.ISOCode))
if _, found := infoList[countryCode]; !found {
infoList[countryCode] = dbipCountry{
Continent: record.Continent,
Country: dbipCountryRecord{
CountryRecord: geoip2.CountryRecord{
ISOCode: record.RepresentedCountry.ISOCode,
GeoNameID: record.RepresentedCountry.GeoNameID,
IsInEuropeanUnion: record.RepresentedCountry.IsInEuropeanUnion,
},
Names: record.RepresentedCountry.Names,
},
}
}
}
case TypeIPInfoCountryMMDBOut: case TypeIPInfoCountryMMDBOut:
var record ipInfoLite var record ipInfoLite
_, err := networks.Network(&record) err := network.Decode(&record)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -174,10 +280,6 @@ func (g *GeoLite2CountryMMDBOut) GetExtraInfo() (map[string]any, error) {
} }
if networks.Err() != nil {
return nil, networks.Err()
}
if len(infoList) == 0 { if len(infoList) == 0 {
return nil, fmt.Errorf("❌ [type %s | action %s] no extra info found in the source MMDB file: %s", g.Type, g.Action, g.SourceMMDBURI) return nil, fmt.Errorf("❌ [type %s | action %s] no extra info found in the source MMDB file: %s", g.Type, g.Action, g.SourceMMDBURI)
} }
+14 -21
View File
@@ -3,13 +3,12 @@ package maxmind
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net"
"os" "os"
"strings" "strings"
"github.com/Loyalsoldier/geoip/lib" "github.com/Loyalsoldier/geoip/lib"
"github.com/oschwald/geoip2-golang" "github.com/oschwald/geoip2-golang/v2"
"github.com/oschwald/maxminddb-golang" "github.com/oschwald/maxminddb-golang/v2"
) )
const ( const (
@@ -97,38 +96,36 @@ func (g *GeoLite2CountryMMDBIn) Input(container lib.Container) (lib.Container, e
} }
func (g *GeoLite2CountryMMDBIn) generateEntries(content []byte, entries map[string]*lib.Entry) error { func (g *GeoLite2CountryMMDBIn) generateEntries(content []byte, entries map[string]*lib.Entry) error {
db, err := maxminddb.FromBytes(content) db, err := maxminddb.OpenBytes(content)
if err != nil { if err != nil {
return err return err
} }
defer db.Close() defer db.Close()
networks := db.Networks(maxminddb.SkipAliasedNetworks) for network := range db.Networks() {
for networks.Next() {
var name string var name string
var subnet *net.IPNet
var err error var err error
switch g.Type { switch g.Type {
case TypeGeoLite2CountryMMDBIn, TypeDBIPCountryMMDBIn: case TypeGeoLite2CountryMMDBIn, TypeDBIPCountryMMDBIn:
var record geoip2.Country var record geoip2.Country
subnet, err = networks.Network(&record) err = network.Decode(&record)
if err != nil { if err != nil {
return err return err
} }
switch { switch {
case strings.TrimSpace(record.Country.IsoCode) != "": case strings.TrimSpace(record.Country.ISOCode) != "":
name = strings.ToUpper(strings.TrimSpace(record.Country.IsoCode)) name = strings.ToUpper(strings.TrimSpace(record.Country.ISOCode))
case strings.TrimSpace(record.RegisteredCountry.IsoCode) != "": case strings.TrimSpace(record.RegisteredCountry.ISOCode) != "":
name = strings.ToUpper(strings.TrimSpace(record.RegisteredCountry.IsoCode)) name = strings.ToUpper(strings.TrimSpace(record.RegisteredCountry.ISOCode))
case strings.TrimSpace(record.RepresentedCountry.IsoCode) != "": case strings.TrimSpace(record.RepresentedCountry.ISOCode) != "":
name = strings.ToUpper(strings.TrimSpace(record.RepresentedCountry.IsoCode)) name = strings.ToUpper(strings.TrimSpace(record.RepresentedCountry.ISOCode))
} }
case TypeIPInfoCountryMMDBIn: case TypeIPInfoCountryMMDBIn:
var record ipInfoLite var record ipInfoLite
subnet, err = networks.Network(&record) err = network.Decode(&record)
if err != nil { if err != nil {
return err return err
} }
@@ -138,7 +135,7 @@ func (g *GeoLite2CountryMMDBIn) generateEntries(content []byte, entries map[stri
return lib.ErrNotSupportedFormat return lib.ErrNotSupportedFormat
} }
if name == "" || subnet == nil { if name == "" || !network.Found() {
continue continue
} }
@@ -151,16 +148,12 @@ func (g *GeoLite2CountryMMDBIn) generateEntries(content []byte, entries map[stri
entry = lib.NewEntry(name) entry = lib.NewEntry(name)
} }
if err := entry.AddPrefix(subnet); err != nil { if err := entry.AddPrefix(network.Prefix()); err != nil {
return err return err
} }
entries[name] = entry entries[name] = entry
} }
if networks.Err() != nil {
return networks.Err()
}
return nil return nil
} }
@@ -12,7 +12,7 @@ import (
"github.com/Loyalsoldier/geoip/lib" "github.com/Loyalsoldier/geoip/lib"
"github.com/maxmind/mmdbwriter" "github.com/maxmind/mmdbwriter"
"github.com/maxmind/mmdbwriter/mmdbtype" "github.com/maxmind/mmdbwriter/mmdbtype"
"github.com/oschwald/geoip2-golang" "github.com/oschwald/geoip2-golang/v2"
) )
const ( const (
@@ -58,16 +58,19 @@ func (g *GeoLite2CountryMMDBOut) GetDescription() string {
func (g *GeoLite2CountryMMDBOut) Output(container lib.Container) error { func (g *GeoLite2CountryMMDBOut) Output(container lib.Container) error {
dbName := "" dbName := ""
dbDesc := "" dbDesc := ""
recordSize := 28 dbLanguages := []string{"en"}
recordSize := 24
switch g.Type { switch g.Type {
case TypeGeoLite2CountryMMDBOut: case TypeGeoLite2CountryMMDBOut:
dbName = "GeoLite2-Country" dbName = "GeoLite2-Country"
dbDesc = "Customized GeoLite2 Country database" dbDesc = "Customized GeoLite2 Country database"
dbLanguages = []string{"de", "en", "es", "fr", "ja", "pt-BR", "ru", "zh-CN"}
case TypeDBIPCountryMMDBOut: case TypeDBIPCountryMMDBOut:
dbName = "DBIP-Country-Lite" dbName = "DBIP-Country-Lite"
dbDesc = "Customized DB-IP Country Lite database" dbDesc = "Customized DB-IP Country Lite database"
dbLanguages = []string{"de", "en", "es", "fr", "ja", "pt-BR", "ru", "zh-CN", "fa", "ko"}
case TypeIPInfoCountryMMDBOut: case TypeIPInfoCountryMMDBOut:
dbName = "IPInfo-Lite" dbName = "IPInfo-Lite"
@@ -79,6 +82,7 @@ func (g *GeoLite2CountryMMDBOut) Output(container lib.Container) error {
mmdbwriter.Options{ mmdbwriter.Options{
DatabaseType: dbName, DatabaseType: dbName,
Description: map[string]string{"en": dbDesc}, Description: map[string]string{"en": dbDesc},
Languages: dbLanguages,
RecordSize: recordSize, RecordSize: recordSize,
IncludeReservedNetworks: true, IncludeReservedNetworks: true,
}, },
@@ -222,28 +226,28 @@ func (g *GeoLite2CountryMMDBOut) marshalData(writer *mmdbwriter.Tree, entry *lib
record = mmdbtype.Map{ record = mmdbtype.Map{
"continent": mmdbtype.Map{ "continent": mmdbtype.Map{
"names": mmdbtype.Map{ "names": mmdbtype.Map{
"de": mmdbtype.String(info.Continent.Names["de"]), "de": mmdbtype.String(info.Continent.Names.German),
"en": mmdbtype.String(info.Continent.Names["en"]), "en": mmdbtype.String(info.Continent.Names.English),
"es": mmdbtype.String(info.Continent.Names["es"]), "es": mmdbtype.String(info.Continent.Names.Spanish),
"fr": mmdbtype.String(info.Continent.Names["fr"]), "fr": mmdbtype.String(info.Continent.Names.French),
"ja": mmdbtype.String(info.Continent.Names["ja"]), "ja": mmdbtype.String(info.Continent.Names.Japanese),
"pt-BR": mmdbtype.String(info.Continent.Names["pt-BR"]), "pt-BR": mmdbtype.String(info.Continent.Names.BrazilianPortuguese),
"ru": mmdbtype.String(info.Continent.Names["ru"]), "ru": mmdbtype.String(info.Continent.Names.Russian),
"zh-CN": mmdbtype.String(info.Continent.Names["zh-CN"]), "zh-CN": mmdbtype.String(info.Continent.Names.SimplifiedChinese),
}, },
"code": mmdbtype.String(info.Continent.Code), "code": mmdbtype.String(info.Continent.Code),
"geoname_id": mmdbtype.Uint32(info.Continent.GeoNameID), "geoname_id": mmdbtype.Uint32(info.Continent.GeoNameID),
}, },
"country": mmdbtype.Map{ "country": mmdbtype.Map{
"names": mmdbtype.Map{ "names": mmdbtype.Map{
"de": mmdbtype.String(info.Country.Names["de"]), "de": mmdbtype.String(info.Country.Names.German),
"en": mmdbtype.String(info.Country.Names["en"]), "en": mmdbtype.String(info.Country.Names.English),
"es": mmdbtype.String(info.Country.Names["es"]), "es": mmdbtype.String(info.Country.Names.Spanish),
"fr": mmdbtype.String(info.Country.Names["fr"]), "fr": mmdbtype.String(info.Country.Names.French),
"ja": mmdbtype.String(info.Country.Names["ja"]), "ja": mmdbtype.String(info.Country.Names.Japanese),
"pt-BR": mmdbtype.String(info.Country.Names["pt-BR"]), "pt-BR": mmdbtype.String(info.Country.Names.BrazilianPortuguese),
"ru": mmdbtype.String(info.Country.Names["ru"]), "ru": mmdbtype.String(info.Country.Names.Russian),
"zh-CN": mmdbtype.String(info.Country.Names["zh-CN"]), "zh-CN": mmdbtype.String(info.Country.Names.SimplifiedChinese),
}, },
"iso_code": mmdbtype.String(entry.GetName()), "iso_code": mmdbtype.String(entry.GetName()),
"geoname_id": mmdbtype.Uint32(info.Country.GeoNameID), "geoname_id": mmdbtype.Uint32(info.Country.GeoNameID),
@@ -254,14 +258,14 @@ func (g *GeoLite2CountryMMDBOut) marshalData(writer *mmdbwriter.Tree, entry *lib
record = mmdbtype.Map{ record = mmdbtype.Map{
"country": mmdbtype.Map{ "country": mmdbtype.Map{
"names": mmdbtype.Map{ "names": mmdbtype.Map{
"de": mmdbtype.String(info.Country.Names["de"]), "de": mmdbtype.String(info.Country.Names.German),
"en": mmdbtype.String(info.Country.Names["en"]), "en": mmdbtype.String(info.Country.Names.English),
"es": mmdbtype.String(info.Country.Names["es"]), "es": mmdbtype.String(info.Country.Names.Spanish),
"fr": mmdbtype.String(info.Country.Names["fr"]), "fr": mmdbtype.String(info.Country.Names.French),
"ja": mmdbtype.String(info.Country.Names["ja"]), "ja": mmdbtype.String(info.Country.Names.Japanese),
"pt-BR": mmdbtype.String(info.Country.Names["pt-BR"]), "pt-BR": mmdbtype.String(info.Country.Names.BrazilianPortuguese),
"ru": mmdbtype.String(info.Country.Names["ru"]), "ru": mmdbtype.String(info.Country.Names.Russian),
"zh-CN": mmdbtype.String(info.Country.Names["zh-CN"]), "zh-CN": mmdbtype.String(info.Country.Names.SimplifiedChinese),
}, },
"iso_code": mmdbtype.String(entry.GetName()), "iso_code": mmdbtype.String(entry.GetName()),
"geoname_id": mmdbtype.Uint32(info.Country.GeoNameID), "geoname_id": mmdbtype.Uint32(info.Country.GeoNameID),
@@ -271,7 +275,7 @@ func (g *GeoLite2CountryMMDBOut) marshalData(writer *mmdbwriter.Tree, entry *lib
} }
case TypeDBIPCountryMMDBOut: case TypeDBIPCountryMMDBOut:
info, found := extraInfo[entry.GetName()].(geoip2.Country) info, found := extraInfo[entry.GetName()].(dbipCountry)
if !found { if !found {
log.Printf("⚠️ [type %s | action %s] not found extra info for list %s\n", g.Type, g.Action, entry.GetName()) log.Printf("⚠️ [type %s | action %s] not found extra info for list %s\n", g.Type, g.Action, entry.GetName())
@@ -284,32 +288,32 @@ func (g *GeoLite2CountryMMDBOut) marshalData(writer *mmdbwriter.Tree, entry *lib
record = mmdbtype.Map{ record = mmdbtype.Map{
"continent": mmdbtype.Map{ "continent": mmdbtype.Map{
"names": mmdbtype.Map{ "names": mmdbtype.Map{
"de": mmdbtype.String(info.Continent.Names["de"]), "de": mmdbtype.String(info.Continent.Names.German),
"en": mmdbtype.String(info.Continent.Names["en"]), "en": mmdbtype.String(info.Continent.Names.English),
"es": mmdbtype.String(info.Continent.Names["es"]), "es": mmdbtype.String(info.Continent.Names.Spanish),
"fa": mmdbtype.String(info.Continent.Names["fa"]), "fa": mmdbtype.String(info.Continent.Names.Persian),
"fr": mmdbtype.String(info.Continent.Names["fr"]), "fr": mmdbtype.String(info.Continent.Names.French),
"ja": mmdbtype.String(info.Continent.Names["ja"]), "ja": mmdbtype.String(info.Continent.Names.Japanese),
"ko": mmdbtype.String(info.Continent.Names["ko"]), "ko": mmdbtype.String(info.Continent.Names.Korean),
"pt-BR": mmdbtype.String(info.Continent.Names["pt-BR"]), "pt-BR": mmdbtype.String(info.Continent.Names.BrazilianPortuguese),
"ru": mmdbtype.String(info.Continent.Names["ru"]), "ru": mmdbtype.String(info.Continent.Names.Russian),
"zh-CN": mmdbtype.String(info.Continent.Names["zh-CN"]), "zh-CN": mmdbtype.String(info.Continent.Names.SimplifiedChinese),
}, },
"code": mmdbtype.String(info.Continent.Code), "code": mmdbtype.String(info.Continent.Code),
"geoname_id": mmdbtype.Uint32(info.Continent.GeoNameID), "geoname_id": mmdbtype.Uint32(info.Continent.GeoNameID),
}, },
"country": mmdbtype.Map{ "country": mmdbtype.Map{
"names": mmdbtype.Map{ "names": mmdbtype.Map{
"de": mmdbtype.String(info.Country.Names["de"]), "de": mmdbtype.String(info.Country.Names.German),
"en": mmdbtype.String(info.Country.Names["en"]), "en": mmdbtype.String(info.Country.Names.English),
"es": mmdbtype.String(info.Country.Names["es"]), "es": mmdbtype.String(info.Country.Names.Spanish),
"fa": mmdbtype.String(info.Country.Names["fa"]), "fa": mmdbtype.String(info.Country.Names.Persian),
"fr": mmdbtype.String(info.Country.Names["fr"]), "fr": mmdbtype.String(info.Country.Names.French),
"ja": mmdbtype.String(info.Country.Names["ja"]), "ja": mmdbtype.String(info.Country.Names.Japanese),
"ko": mmdbtype.String(info.Country.Names["ko"]), "ko": mmdbtype.String(info.Country.Names.Korean),
"pt-BR": mmdbtype.String(info.Country.Names["pt-BR"]), "pt-BR": mmdbtype.String(info.Country.Names.BrazilianPortuguese),
"ru": mmdbtype.String(info.Country.Names["ru"]), "ru": mmdbtype.String(info.Country.Names.Russian),
"zh-CN": mmdbtype.String(info.Country.Names["zh-CN"]), "zh-CN": mmdbtype.String(info.Country.Names.SimplifiedChinese),
}, },
"iso_code": mmdbtype.String(entry.GetName()), "iso_code": mmdbtype.String(entry.GetName()),
"geoname_id": mmdbtype.Uint32(info.Country.GeoNameID), "geoname_id": mmdbtype.Uint32(info.Country.GeoNameID),
@@ -320,16 +324,16 @@ func (g *GeoLite2CountryMMDBOut) marshalData(writer *mmdbwriter.Tree, entry *lib
record = mmdbtype.Map{ record = mmdbtype.Map{
"country": mmdbtype.Map{ "country": mmdbtype.Map{
"names": mmdbtype.Map{ "names": mmdbtype.Map{
"de": mmdbtype.String(info.Country.Names["de"]), "de": mmdbtype.String(info.Country.Names.German),
"en": mmdbtype.String(info.Country.Names["en"]), "en": mmdbtype.String(info.Country.Names.English),
"es": mmdbtype.String(info.Country.Names["es"]), "es": mmdbtype.String(info.Country.Names.Spanish),
"fa": mmdbtype.String(info.Country.Names["fa"]), "fa": mmdbtype.String(info.Country.Names.Persian),
"fr": mmdbtype.String(info.Country.Names["fr"]), "fr": mmdbtype.String(info.Country.Names.French),
"ja": mmdbtype.String(info.Country.Names["ja"]), "ja": mmdbtype.String(info.Country.Names.Japanese),
"ko": mmdbtype.String(info.Country.Names["ko"]), "ko": mmdbtype.String(info.Country.Names.Korean),
"pt-BR": mmdbtype.String(info.Country.Names["pt-BR"]), "pt-BR": mmdbtype.String(info.Country.Names.BrazilianPortuguese),
"ru": mmdbtype.String(info.Country.Names["ru"]), "ru": mmdbtype.String(info.Country.Names.Russian),
"zh-CN": mmdbtype.String(info.Country.Names["zh-CN"]), "zh-CN": mmdbtype.String(info.Country.Names.SimplifiedChinese),
}, },
"iso_code": mmdbtype.String(entry.GetName()), "iso_code": mmdbtype.String(entry.GetName()),
"geoname_id": mmdbtype.Uint32(info.Country.GeoNameID), "geoname_id": mmdbtype.Uint32(info.Country.GeoNameID),
+30 -4
View File
@@ -209,12 +209,38 @@ func (t *TextIn) scanFileForJSONIn(reader io.Reader, entry *lib.Entry) error {
path = strings.TrimSpace(path) path = strings.TrimSpace(path)
result := gjson.GetBytes(data, path) result := gjson.GetBytes(data, path)
for _, cidr := range result.Array() { if err := t.processJSONResult(result, entry); err != nil {
if err := entry.AddPrefix(cidr.String()); err != nil { return fmt.Errorf("❌ [type %s | action %s] failed to process JSON: %v", t.Type, t.Action, err)
return err
}
} }
} }
return nil return nil
} }
func (t *TextIn) processJSONResult(result gjson.Result, entry *lib.Entry) error {
switch {
case !result.Exists():
return fmt.Errorf("invaild IP address or CIDR (value not exist), please check your specified JSON path or JSON source")
case result.Type == gjson.String:
cidr := strings.TrimSpace(result.String())
if cidr == "" {
return fmt.Errorf("empty string, please check your specified JSON path or JSON source")
}
if err := entry.AddPrefix(cidr); err != nil {
return err
}
case result.IsArray():
for _, item := range result.Array() {
if err := t.processJSONResult(item, entry); err != nil {
return err
}
}
default:
return fmt.Errorf("invaild IP address or CIDR, please check your specified JSON path or JSON source")
}
return nil
}
@@ -145,6 +145,13 @@ define U-Boot/nanopi-r4se-rk3399
USE_RKBIN:=1 USE_RKBIN:=1
endef endef
define U-Boot/rock-4se-rk3399
$(U-Boot/rk3399/Default)
NAME:=ROCK 4SE
BUILD_DEVICES:= \
radxa_rock-4se
endef
define U-Boot/rock-pi-4-rk3399 define U-Boot/rock-pi-4-rk3399
$(U-Boot/rk3399/Default) $(U-Boot/rk3399/Default)
NAME:=Rock Pi 4 NAME:=Rock Pi 4
@@ -517,6 +524,7 @@ UBOOT_TARGETS := \
nanopi-r4s-rk3399 \ nanopi-r4s-rk3399 \
nanopi-r4se-rk3399 \ nanopi-r4se-rk3399 \
rock-pi-4-rk3399 \ rock-pi-4-rk3399 \
rock-4se-rk3399 \
rockpro64-rk3399 \ rockpro64-rk3399 \
sv901-eaio-rk3399 \ sv901-eaio-rk3399 \
guangmiao-g4c-rk3399 \ guangmiao-g4c-rk3399 \
@@ -24,19 +24,68 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
#define RTL8366RB_POWER_SAVE 0x15 #define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12) #define RTL8366RB_POWER_SAVE_ON BIT(12)
@@ -1090,6 +1094,15 @@ static int rtl822xb_config_init(struct p @@ -175,6 +179,10 @@ struct rtl821x_priv {
u32 saved_wolopts;
};
+struct rtl822x_priv {
+ bool enable_aldps;
+};
+
static int rtl821x_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, RTL821x_PAGE_SELECT);
@@ -1025,6 +1033,18 @@ static int rtl822x_write_mmd(struct phy_
static int rtl822x_probe(struct phy_device *phydev)
{
+ struct device *dev = &phydev->mdio.dev;
+ struct rtl822x_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->enable_aldps = of_property_read_bool(dev->of_node,
+ "realtek,aldps-enable");
+
+ phydev->priv = priv;
+
if (IS_ENABLED(CONFIG_REALTEK_PHY_HWMON) &&
phydev->phy_id != RTL_GENERIC_PHYID)
return rtl822x_hwmon_init(phydev);
@@ -1032,6 +1052,19 @@ static int rtl822x_probe(struct phy_devi
return 0;
}
+static int rtl822x_init_phycr1(struct phy_device *phydev, bool no_aldps)
+{
+ struct rtl822x_priv *priv = phydev->priv;
+ u16 val = 0;
+
+ if (priv->enable_aldps && !no_aldps)
+ val = RTL8221B_PHYCR1_ALDPS_EN | RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN;
+
+ return phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL8221B_PHYCR1,
+ RTL8221B_PHYCR1_ALDPS_EN |
+ RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN, val);
+}
+
static int rtl822xb_config_init(struct phy_device *phydev)
{
bool has_2500, has_sgmii;
@@ -1108,6 +1141,14 @@ static int rtl822xb_config_init(struct p
if (ret < 0) if (ret < 0)
return ret; return ret;
+ if (of_property_read_bool(phydev->mdio.dev.of_node, "realtek,aldps-enable")) + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6f11, 0x8020);
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, RTL8221B_PHYCR1,
+ RTL8221B_PHYCR1_ALDPS_EN | RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN);
+ else
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, RTL8221B_PHYCR1,
+ RTL8221B_PHYCR1_ALDPS_EN | RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN);
+ if (ret < 0) + if (ret < 0)
+ return ret; + return ret;
+ +
/* Disable SGMII AN */ + ret = rtl822x_init_phycr1(phydev, false);
ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x7588, 0x2); + if (ret < 0)
if (ret < 0) + return ret;
+
return 0;
}
@@ -1,58 +0,0 @@
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 30 Jan 2025 05:33:12 +0000
Subject: [PATCH] net: phy: realtek: work around broken SerDes
For still unknown reasons the SerDes init sequence may sometimes
time out because a self-clearing bit never clears, indicating the
PHY has entered an unrecoverable error state.
Work-around the issue by triggering a hardware reset and retry the
setup sequence while warning the user that this has happened.
This is really more of a work-around than a fix, and should be
replaced by a better actual fix in future (hopefully).
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -1124,6 +1124,22 @@ static int rtl822xb_config_init(struct p
return 0;
}
+static int rtl822xb_config_init_war(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = rtl822xb_config_init(phydev);
+
+ if (ret == -ETIMEDOUT) {
+ phydev_warn(phydev, "SerDes setup timed out, retrying\n");
+ phy_device_reset(phydev, 1);
+ phy_device_reset(phydev, 0);
+ ret = rtl822xb_config_init(phydev);
+ }
+
+ return ret;
+}
+
static int rtl822xb_get_rate_matching(struct phy_device *phydev,
phy_interface_t iface)
{
@@ -1813,7 +1829,7 @@ static struct phy_driver realtek_drvs[]
.handle_interrupt = rtl8221b_handle_interrupt,
.soft_reset = genphy_soft_reset,
.probe = rtl822x_probe,
- .config_init = rtl822xb_config_init,
+ .config_init = rtl822xb_config_init_war,
.get_rate_matching = rtl822xb_get_rate_matching,
.get_features = rtl822x_c45_get_features,
.config_aneg = rtl822x_c45_config_aneg,
@@ -1843,7 +1859,7 @@ static struct phy_driver realtek_drvs[]
.handle_interrupt = rtl8221b_handle_interrupt,
.soft_reset = genphy_soft_reset,
.probe = rtl822x_probe,
- .config_init = rtl822xb_config_init,
+ .config_init = rtl822xb_config_init_war,
.get_rate_matching = rtl822xb_get_rate_matching,
.get_features = rtl822x_c45_get_features,
.config_aneg = rtl822x_c45_config_aneg,
@@ -13,15 +13,21 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
--- ---
--- a/drivers/net/phy/realtek/realtek_main.c --- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c +++ b/drivers/net/phy/realtek/realtek_main.c
@@ -1050,6 +1050,11 @@ static int rtl822xb_config_init(struct p @@ -132,6 +132,7 @@
phydev->host_interfaces) || #define RTL8221B_PHYCR1 0xa430
phydev->interface == PHY_INTERFACE_MODE_SGMII; #define RTL8221B_PHYCR1_ALDPS_EN BIT(2)
#define RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN BIT(12)
+#define RTL8221B_PHYCR1_PHYAD_0_EN BIT(13)
+ /* disable listening on MDIO broadcast address (0) */ #define RTL8366RB_POWER_SAVE 0x15
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, 0xa430, BIT(13)); #define RTL8366RB_POWER_SAVE_ON BIT(12)
+ if (ret < 0) @@ -1062,7 +1063,8 @@ static int rtl822x_init_phycr1(struct ph
+ return ret;
+ return phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL8221B_PHYCR1,
/* fill in possible interfaces */ RTL8221B_PHYCR1_ALDPS_EN |
__assign_bit(PHY_INTERFACE_MODE_2500BASEX, phydev->possible_interfaces, - RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN, val);
has_2500); + RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN |
+ RTL8221B_PHYCR1_PHYAD_0_EN, val);
}
static int rtl822xb_config_init(struct phy_device *phydev)
@@ -24,19 +24,68 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
#define RTL8366RB_POWER_SAVE 0x15 #define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12) #define RTL8366RB_POWER_SAVE_ON BIT(12)
@@ -1090,6 +1094,15 @@ static int rtl822xb_config_init(struct p @@ -175,6 +179,10 @@ struct rtl821x_priv {
u32 saved_wolopts;
};
+struct rtl822x_priv {
+ bool enable_aldps;
+};
+
static int rtl821x_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, RTL821x_PAGE_SELECT);
@@ -1025,6 +1033,18 @@ static int rtl822x_write_mmd(struct phy_
static int rtl822x_probe(struct phy_device *phydev)
{
+ struct device *dev = &phydev->mdio.dev;
+ struct rtl822x_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->enable_aldps = of_property_read_bool(dev->of_node,
+ "realtek,aldps-enable");
+
+ phydev->priv = priv;
+
if (IS_ENABLED(CONFIG_REALTEK_PHY_HWMON) &&
phydev->phy_id != RTL_GENERIC_PHYID)
return rtl822x_hwmon_init(phydev);
@@ -1032,6 +1052,19 @@ static int rtl822x_probe(struct phy_devi
return 0;
}
+static int rtl822x_init_phycr1(struct phy_device *phydev, bool no_aldps)
+{
+ struct rtl822x_priv *priv = phydev->priv;
+ u16 val = 0;
+
+ if (priv->enable_aldps && !no_aldps)
+ val = RTL8221B_PHYCR1_ALDPS_EN | RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN;
+
+ return phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL8221B_PHYCR1,
+ RTL8221B_PHYCR1_ALDPS_EN |
+ RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN, val);
+}
+
static int rtl822xb_config_init(struct phy_device *phydev)
{
bool has_2500, has_sgmii;
@@ -1108,6 +1141,14 @@ static int rtl822xb_config_init(struct p
if (ret < 0) if (ret < 0)
return ret; return ret;
+ if (of_property_read_bool(phydev->mdio.dev.of_node, "realtek,aldps-enable")) + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6f11, 0x8020);
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, RTL8221B_PHYCR1,
+ RTL8221B_PHYCR1_ALDPS_EN | RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN);
+ else
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, RTL8221B_PHYCR1,
+ RTL8221B_PHYCR1_ALDPS_EN | RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN);
+ if (ret < 0) + if (ret < 0)
+ return ret; + return ret;
+ +
/* Disable SGMII AN */ + ret = rtl822x_init_phycr1(phydev, false);
ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x7588, 0x2); + if (ret < 0)
if (ret < 0) + return ret;
+
return 0;
}
@@ -1,58 +0,0 @@
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 30 Jan 2025 05:33:12 +0000
Subject: [PATCH] net: phy: realtek: work around broken SerDes
For still unknown reasons the SerDes init sequence may sometimes
time out because a self-clearing bit never clears, indicating the
PHY has entered an unrecoverable error state.
Work-around the issue by triggering a hardware reset and retry the
setup sequence while warning the user that this has happened.
This is really more of a work-around than a fix, and should be
replaced by a better actual fix in future (hopefully).
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -1124,6 +1124,22 @@ static int rtl822xb_config_init(struct p
return 0;
}
+static int rtl822xb_config_init_war(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = rtl822xb_config_init(phydev);
+
+ if (ret == -ETIMEDOUT) {
+ phydev_warn(phydev, "SerDes setup timed out, retrying\n");
+ phy_device_reset(phydev, 1);
+ phy_device_reset(phydev, 0);
+ ret = rtl822xb_config_init(phydev);
+ }
+
+ return ret;
+}
+
static int rtl822xb_get_rate_matching(struct phy_device *phydev,
phy_interface_t iface)
{
@@ -1813,7 +1829,7 @@ static struct phy_driver realtek_drvs[]
.handle_interrupt = rtl8221b_handle_interrupt,
.soft_reset = genphy_soft_reset,
.probe = rtl822x_probe,
- .config_init = rtl822xb_config_init,
+ .config_init = rtl822xb_config_init_war,
.get_rate_matching = rtl822xb_get_rate_matching,
.get_features = rtl822x_c45_get_features,
.config_aneg = rtl822x_c45_config_aneg,
@@ -1843,7 +1859,7 @@ static struct phy_driver realtek_drvs[]
.handle_interrupt = rtl8221b_handle_interrupt,
.soft_reset = genphy_soft_reset,
.probe = rtl822x_probe,
- .config_init = rtl822xb_config_init,
+ .config_init = rtl822xb_config_init_war,
.get_rate_matching = rtl822xb_get_rate_matching,
.get_features = rtl822x_c45_get_features,
.config_aneg = rtl822x_c45_config_aneg,
@@ -13,15 +13,21 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
--- ---
--- a/drivers/net/phy/realtek/realtek_main.c --- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c +++ b/drivers/net/phy/realtek/realtek_main.c
@@ -1050,6 +1050,11 @@ static int rtl822xb_config_init(struct p @@ -132,6 +132,7 @@
phydev->host_interfaces) || #define RTL8221B_PHYCR1 0xa430
phydev->interface == PHY_INTERFACE_MODE_SGMII; #define RTL8221B_PHYCR1_ALDPS_EN BIT(2)
#define RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN BIT(12)
+#define RTL8221B_PHYCR1_PHYAD_0_EN BIT(13)
+ /* disable listening on MDIO broadcast address (0) */ #define RTL8366RB_POWER_SAVE 0x15
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, 0xa430, BIT(13)); #define RTL8366RB_POWER_SAVE_ON BIT(12)
+ if (ret < 0) @@ -1062,7 +1063,8 @@ static int rtl822x_init_phycr1(struct ph
+ return ret;
+ return phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL8221B_PHYCR1,
/* fill in possible interfaces */ RTL8221B_PHYCR1_ALDPS_EN |
__assign_bit(PHY_INTERFACE_MODE_2500BASEX, phydev->possible_interfaces, - RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN, val);
has_2500); + RTL8221B_PHYCR1_ALDPS_XTAL_OFF_EN |
+ RTL8221B_PHYCR1_PHYAD_0_EN, val);
}
static int rtl822xb_config_init(struct phy_device *phydev)
@@ -0,0 +1,106 @@
From fbb1d181782f990c0ac5f39d4aa9eda5c39cb442 Mon Sep 17 00:00:00 2001
From: Sam Shih <sam.shih@mediatek.com>
Date: Tue, 4 Mar 2025 19:28:14 +0800
Subject: [PATCH 1/2] cpufreq: add support to adjust cpu volt by efuse
calibration data
---
drivers/cpufreq/mediatek-cpufreq.c | 81 ++++++++++++++++++++++++++++--
1 file changed, 76 insertions(+), 5 deletions(-)
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -15,14 +15,26 @@
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
+#include <linux/nvmem-consumer.h>
+
+struct mtk_cpufreq_corr_data {
+ unsigned int freq;
+ unsigned int vbase;
+ unsigned int vscale;
+ unsigned int vmax;
+};
struct mtk_cpufreq_platform_data {
+ /* cpufreq correction data specification */
+ const struct mtk_cpufreq_corr_data *corr_data;
int min_volt_shift;
int max_volt_shift;
int proc_max_volt;
int sram_min_volt;
int sram_max_volt;
bool ccifreq_supported;
+ /* whether voltage correction via nvmem is supported */
+ bool nvmem_volt_corr;
};
/*
@@ -197,6 +209,50 @@ static bool is_ccifreq_ready(struct mtk_
return true;
}
+static int mtk_cpufreq_nvmem_volt_corr(struct mtk_cpu_dvfs_info *info,
+ struct cpufreq_policy *policy)
+{
+ const struct mtk_cpufreq_corr_data *corr_data;
+ unsigned int target_voltage;
+ struct nvmem_cell *cell;
+ unsigned int cal_data;
+ const u8 *buf;
+ size_t len;
+ int i;
+
+ cell = nvmem_cell_get(info->cpu_dev, "calibration-data");
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ cal_data = buf[0] & 0x1f;
+ pr_debug("%s: read vbinning value: %d\n", __func__, cal_data);
+ kfree(buf);
+ if (!info->soc_data->corr_data) {
+ pr_err("voltage correction data not found\n");
+ return -EINVAL;
+ }
+
+ corr_data = &info->soc_data->corr_data[0];
+ for (i = 0 ; i < corr_data->freq ; i++) {
+ target_voltage = corr_data->vbase + cal_data * corr_data->vscale;
+ if (target_voltage > corr_data->vmax) {
+ pr_warn("freq %u exceeds max voltage\n", corr_data->freq);
+ pr_warn("force update voltage to %u\n", corr_data->vmax);
+ target_voltage = corr_data->vmax;
+ }
+ dev_pm_opp_remove(info->cpu_dev, corr_data->freq);
+ dev_pm_opp_add(info->cpu_dev, corr_data->freq, target_voltage);
+ corr_data = &info->soc_data->corr_data[i + 1];
+ }
+
+ return 0;
+}
+
static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
@@ -584,6 +640,15 @@ static int mtk_cpufreq_init(struct cpufr
return -EINVAL;
}
+ if (info->soc_data->nvmem_volt_corr) {
+ ret = mtk_cpufreq_nvmem_volt_corr(info, policy);
+ if (ret) {
+ pr_err("failed to correction voltage for cpu%d: %d\n",
+ policy->cpu, ret);
+ return ret;
+ }
+ }
+
ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
if (ret) {
dev_err(info->cpu_dev,
@@ -0,0 +1,33 @@
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -741,6 +741,16 @@ static struct platform_driver mtk_cpufre
.probe = mtk_cpufreq_probe,
};
+struct mtk_cpufreq_corr_data mt7988_volt_corr_data[] = {
+ {
+ .freq = 1800000000,
+ .vbase = 850000,
+ .vscale = 10000,
+ .vmax = 1120000,
+ },
+ { } /* sentinel */
+};
+
static const struct mtk_cpufreq_platform_data mt2701_platform_data = {
.min_volt_shift = 100000,
.max_volt_shift = 200000,
@@ -769,10 +779,12 @@ static const struct mtk_cpufreq_platform
static const struct mtk_cpufreq_platform_data mt7988_platform_data = {
.min_volt_shift = 100000,
.max_volt_shift = 200000,
- .proc_max_volt = 900000,
+ .proc_max_volt = 1120000,
.sram_min_volt = 0,
.sram_max_volt = 1150000,
.ccifreq_supported = true,
+ .nvmem_volt_corr = 1,
+ .corr_data = mt7988_volt_corr_data,
};
static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
@@ -0,0 +1,48 @@
From c776eb44070d009375559d8c6eb8790edfe129a9 Mon Sep 17 00:00:00 2001
From: Sam Shih <sam.shih@mediatek.com>
Date: Tue, 4 Mar 2025 19:35:14 +0800
Subject: [PATCH 2/2] cpufreq: mt7988: enable using efuse calibration data for
adjusting cpu volt
---
arch/arm64/boot/dts/mediatek/mt7988a.dtsi | 8 ++++++++
1 file changed, 8 insertions(+)
--- a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
@@ -55,6 +55,8 @@
<&topckgen CLK_TOP_XTAL>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cluster0_opp>;
+ nvmem-cells = <&cpufreq_calibration>;
+ nvmem-cell-names = "calibration-data";
mediatek,cci = <&cci>;
};
@@ -67,6 +69,8 @@
<&topckgen CLK_TOP_XTAL>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cluster0_opp>;
+ nvmem-cells = <&cpufreq_calibration>;
+ nvmem-cell-names = "calibration-data";
mediatek,cci = <&cci>;
};
@@ -79,6 +83,8 @@
<&topckgen CLK_TOP_XTAL>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cluster0_opp>;
+ nvmem-cells = <&cpufreq_calibration>;
+ nvmem-cell-names = "calibration-data";
mediatek,cci = <&cci>;
};
@@ -91,6 +97,8 @@
<&topckgen CLK_TOP_XTAL>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cluster0_opp>;
+ nvmem-cells = <&cpufreq_calibration>;
+ nvmem-cell-names = "calibration-data";
mediatek,cci = <&cci>;
};
@@ -0,0 +1,21 @@
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -35,6 +35,8 @@ struct mtk_cpufreq_platform_data {
bool ccifreq_supported;
/* whether voltage correction via nvmem is supported */
bool nvmem_volt_corr;
+ /* Flag indicating whether the processor voltage is fixed */
+ bool proc_fixed_volt;
};
/*
@@ -176,6 +178,9 @@ static int mtk_cpufreq_set_voltage(struc
const struct mtk_cpufreq_platform_data *soc_data = info->soc_data;
int ret;
+ if (soc_data->proc_fixed_volt)
+ return 0;
+
if (info->need_voltage_tracking)
ret = mtk_cpufreq_voltage_tracking(info, vproc);
else
@@ -0,0 +1,23 @@
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -781,6 +781,12 @@ static const struct mtk_cpufreq_platform
.ccifreq_supported = false,
};
+static const struct mtk_cpufreq_platform_data mt7987_platform_data = {
+ .proc_max_volt = 1023000,
+ .ccifreq_supported = false,
+ .proc_fixed_volt = true,
+};
+
static const struct mtk_cpufreq_platform_data mt7988_platform_data = {
.min_volt_shift = 100000,
.max_volt_shift = 200000,
@@ -825,6 +831,7 @@ static const struct of_device_id mtk_cpu
{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
{ .compatible = "mediatek,mt7623", .data = &mt7623_platform_data },
+ { .compatible = "mediatek,mt7987", .data = &mt7987_platform_data },
{ .compatible = "mediatek,mt7988a", .data = &mt7988_platform_data },
{ .compatible = "mediatek,mt7988d", .data = &mt7988_platform_data },
{ .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
@@ -8,7 +8,7 @@ Signed-off-by: Marcos Alano <marcoshalano@gmail.com>
--- ---
--- a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi --- a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
@@ -1319,4 +1319,8 @@ @@ -1327,4 +1327,8 @@
<GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
<GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
}; };
@@ -0,0 +1,50 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2019 Akash Gajjar <Akash_Gajjar@mentor.com>
* Copyright (c) 2019 Pragnesh Patel <Pragnesh_Patel@mentor.com>
*/
/dts-v1/;
#include "rk3399-t.dtsi"
#include "rk3399-rock-pi-4.dtsi"
/ {
model = "Radxa ROCK 4SE";
compatible = "radxa,rock-4se", "rockchip,rk3399";
aliases {
mmc2 = &sdio0;
};
};
&sdio0 {
status = "okay";
brcmf: wifi@1 {
compatible = "brcm,bcm4329-fmac";
reg = <1>;
interrupt-parent = <&gpio0>;
interrupts = <RK_PA3 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "host-wake";
pinctrl-names = "default";
pinctrl-0 = <&wifi_host_wake_l>;
};
};
&uart0 {
status = "okay";
bluetooth {
compatible = "brcm,bcm4345c5";
clocks = <&rk808 1>;
clock-names = "lpo";
device-wakeup-gpios = <&gpio2 RK_PD3 GPIO_ACTIVE_HIGH>;
host-wakeup-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_HIGH>;
shutdown-gpios = <&gpio0 RK_PB1 GPIO_ACTIVE_HIGH>;
max-speed = <1500000>;
pinctrl-names = "default";
pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_enable_h>;
vbat-supply = <&vcc3v3_sys>;
vddio-supply = <&vcc_1v8>;
};
};
@@ -121,16 +121,6 @@
vin-supply = <&vcc_5v0_sys>; vin-supply = <&vcc_5v0_sys>;
}; };
vcc_3v3_rtc_s5: regulator-vcc-3v3-rtc-s5 {
compatible = "regulator-fixed";
regulator-name = "vcc_3v3_rtc_s5";
regulator-boot-on;
regulator-always-on;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vin-supply = <&vcc_5v0_sys>;
};
vcc_3v3_s0: regulator-vcc-3v3-s0 { vcc_3v3_s0: regulator-vcc-3v3-s0 {
compatible = "regulator-fixed"; compatible = "regulator-fixed";
regulator-name = "vcc_3v3_s0"; regulator-name = "vcc_3v3_s0";
@@ -706,8 +696,8 @@
}; };
headphone { headphone {
hp_det: hp-det { hp_det_l: hp-det-l {
rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_up>; rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
}; };
}; };
@@ -778,6 +768,11 @@
}; };
}; };
&saradc {
vref-supply = <&vcca_1v8_s0>;
status = "okay";
};
&sdhci { &sdhci {
bus-width = <8>; bus-width = <8>;
full-pwr-cycle-in-suspend; full-pwr-cycle-in-suspend;
@@ -206,25 +206,27 @@
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&hp_det_l>; pinctrl-0 = <&hp_det_l>;
simple-audio-card,bitclock-master = <&masterdai>;
simple-audio-card,format = "i2s"; simple-audio-card,format = "i2s";
simple-audio-card,hp-det-gpios = <&gpio2 RK_PD6 GPIO_ACTIVE_LOW>; simple-audio-card,hp-det-gpios = <&gpio2 RK_PD6 GPIO_ACTIVE_LOW>;
simple-audio-card,mclk-fs = <256>; simple-audio-card,mclk-fs = <256>;
simple-audio-card,name = "realtek,rt5616-codec"; simple-audio-card,name = "Onboard Analog RT5616";
simple-audio-card,routing = simple-audio-card,routing =
"Headphones", "HPOL", "Headphones", "HPOL",
"Headphones", "HPOR", "Headphones", "HPOR",
"IN1P", "Microphone Jack"; "IN1P", "Microphone Jack";
simple-audio-card,widgets = simple-audio-card,widgets =
"Headphone", "Headphone Jack", "Headphone", "Headphones",
"Microphone", "Microphone Jack"; "Microphone", "Microphone Jack";
simple-audio-card,codec { simple-audio-card,codec {
sound-dai = <&rt5616>; sound-dai = <&rt5616>;
}; };
simple-audio-card,cpu { masterdai: simple-audio-card,cpu {
sound-dai = <&sai2>; sound-dai = <&sai2>;
system-clock-frequency = <12288000>;
}; };
}; };
}; };
@@ -329,6 +331,10 @@
}; };
}; };
&hdmi_sound {
status = "okay";
};
&hdptxphy { &hdptxphy {
status = "okay"; status = "okay";
}; };
@@ -736,6 +742,8 @@
assigned-clock-rates = <12288000>; assigned-clock-rates = <12288000>;
clocks = <&cru CLK_SAI2_MCLKOUT>; clocks = <&cru CLK_SAI2_MCLKOUT>;
clock-names = "mclk"; clock-names = "mclk";
pinctrl-0 = <&sai2m0_mclk>;
pinctrl-names = "default";
#sound-dai-cells = <0>; #sound-dai-cells = <0>;
}; };
}; };
@@ -853,6 +861,10 @@
status = "okay"; status = "okay";
}; };
&sai6 {
status = "okay";
};
&saradc { &saradc {
vref-supply = <&vcca_1v8_s0>; vref-supply = <&vcca_1v8_s0>;
status = "okay"; status = "okay";
@@ -742,7 +742,6 @@
}; };
usb_otg_pwren: usb-otg-pwren { usb_otg_pwren: usb-otg-pwren {
rockchip,pins = <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_down>; rockchip,pins = <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_down>;
}; };
}; };
@@ -765,16 +764,12 @@
cap-mmc-highspeed; cap-mmc-highspeed;
cap-sd-highspeed; cap-sd-highspeed;
disable-wp; disable-wp;
max-frequency = <200000000>;
no-sdio;
no-mmc;
sd-uhs-sdr104; sd-uhs-sdr104;
vmmc-supply = <&vcc_3v3_s3>; vmmc-supply = <&vcc_3v3_s3>;
vqmmc-supply = <&vccio_sd_s0>; vqmmc-supply = <&vccio_sd_s0>;
status = "okay"; status = "okay";
}; };
&sfc0 { &sfc0 {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&fspi0_pins &fspi0_csn0>; pinctrl-0 = <&fspi0_pins &fspi0_csn0>;
@@ -813,10 +808,6 @@
status = "okay"; status = "okay";
}; };
&ufshc {
status = "okay";
};
&usbdp_phy { &usbdp_phy {
status = "okay"; status = "okay";
}; };
@@ -543,6 +543,15 @@ define Device/radxa_rock-4d
endef endef
TARGET_DEVICES += radxa_rock-4d TARGET_DEVICES += radxa_rock-4d
define Device/radxa_rock-4se
DEVICE_VENDOR := Radxa
DEVICE_MODEL := ROCK 4SE
SOC := rk3399
IMAGE/sysupgrade.img.gz := boot-common | boot-script | pine64-img | gzip | append-metadata
UBOOT_DEVICE_NAME := rock-4se-rk3399
endef
TARGET_DEVICES += radxa_rock-4se
define Device/radxa_rock-5a define Device/radxa_rock-5a
DEVICE_VENDOR := Radxa DEVICE_VENDOR := Radxa
DEVICE_MODEL := ROCK 5A DEVICE_MODEL := ROCK 5A
+15 -13
View File
@@ -1,18 +1,20 @@
part uuid mmc ${devnum}:2 uuid part uuid mmc ${devnum}:2 uuid
if test $stdout = 'serial@2ad40000' ; if test $soc = 'rk3308'; then
then serial_addr=',0x2ad40000'; serial_port='ttyS0'; serial_addr=',0xff0c0000'; serial_port='ttyS2';
elif test $stdout = 'serial@fe660000' ; elif test $soc = 'rk3328'; then
then serial_addr=',0xfe660000'; serial_port='ttyS2'; serial_addr=',0xff130000'; serial_port='ttyS2';
elif test $stdout = 'serial@feb50000' ; elif test $soc = 'rk3399'; then
then serial_addr=',0xfeb50000'; serial_port='ttyS2'; serial_addr=',0xff1a0000'; serial_port='ttyS2';
elif test $stdout = 'serial@ff130000' ; elif test $soc = 'rk3528'; then
then serial_addr=',0xff130000'; serial_port='ttyS2'; serial_addr=',0xff9f0000'; serial_port='ttyS0';
elif test $stdout = 'serial@ff1a0000' ; elif test $soc = 'rk3568'; then
then serial_addr=',0xff1a0000'; serial_port='ttyS2'; serial_addr=',0xfe660000'; serial_port='ttyS2';
elif test $stdout = 'serial@ff9f0000' ; elif test $soc = 'rk3576'; then
then serial_addr=',0xff9f0000'; serial_port='ttyS0'; serial_addr=',0x2ad40000'; serial_port='ttyS0';
fi; elif test $soc = 'rk3588'; then
serial_addr=',0xfeb50000'; serial_port='ttyS2';
fi
setenv bootargs "console=${serial_port},1500000 earlycon=uart8250,mmio32${serial_addr} root=PARTUUID=${uuid} rw rootwait"; setenv bootargs "console=${serial_port},1500000 earlycon=uart8250,mmio32${serial_addr} root=PARTUUID=${uuid} rw rootwait";
+3 -3
View File
@@ -26,14 +26,14 @@ require (
github.com/metacubex/quic-go v0.59.1-0.20260112033758-aa29579f2001 github.com/metacubex/quic-go v0.59.1-0.20260112033758-aa29579f2001
github.com/metacubex/randv2 v0.2.0 github.com/metacubex/randv2 v0.2.0
github.com/metacubex/restls-client-go v0.1.7 github.com/metacubex/restls-client-go v0.1.7
github.com/metacubex/sing v0.5.6 github.com/metacubex/sing v0.5.7
github.com/metacubex/sing-mux v0.3.4 github.com/metacubex/sing-mux v0.3.5
github.com/metacubex/sing-quic v0.0.0-20260112044712-65d17608159e github.com/metacubex/sing-quic v0.0.0-20260112044712-65d17608159e
github.com/metacubex/sing-shadowsocks v0.2.12 github.com/metacubex/sing-shadowsocks v0.2.12
github.com/metacubex/sing-shadowsocks2 v0.2.7 github.com/metacubex/sing-shadowsocks2 v0.2.7
github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2 github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2
github.com/metacubex/sing-tun v0.4.12 github.com/metacubex/sing-tun v0.4.12
github.com/metacubex/sing-vmess v0.2.4 github.com/metacubex/sing-vmess v0.2.5
github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f
github.com/metacubex/smux v0.0.0-20260105030934-d0c8756d3141 github.com/metacubex/smux v0.0.0-20260105030934-d0c8756d3141
github.com/metacubex/tfo-go v0.0.0-20251130171125-413e892ac443 github.com/metacubex/tfo-go v0.0.0-20251130171125-413e892ac443
+6 -6
View File
@@ -115,10 +115,10 @@ github.com/metacubex/randv2 v0.2.0 h1:uP38uBvV2SxYfLj53kuvAjbND4RUDfFJjwr4UigMiL
github.com/metacubex/randv2 v0.2.0/go.mod h1:kFi2SzrQ5WuneuoLLCMkABtiBu6VRrMrWFqSPyj2cxY= github.com/metacubex/randv2 v0.2.0/go.mod h1:kFi2SzrQ5WuneuoLLCMkABtiBu6VRrMrWFqSPyj2cxY=
github.com/metacubex/restls-client-go v0.1.7 h1:eCwiXCTQb5WJu9IlgYvDBA1OgrINv58dEe7hcN5H15k= github.com/metacubex/restls-client-go v0.1.7 h1:eCwiXCTQb5WJu9IlgYvDBA1OgrINv58dEe7hcN5H15k=
github.com/metacubex/restls-client-go v0.1.7/go.mod h1:BN/U52vPw7j8VTSh2vleD/MnmVKCov84mS5VcjVHH4g= github.com/metacubex/restls-client-go v0.1.7/go.mod h1:BN/U52vPw7j8VTSh2vleD/MnmVKCov84mS5VcjVHH4g=
github.com/metacubex/sing v0.5.6 h1:mEPDCadsCj3DB8gn+t/EtposlYuALEkExa/LUguw6/c= github.com/metacubex/sing v0.5.7 h1:8OC+fhKFSv/l9ehEhJRaZZAOuthfZo68SteBVLe8QqM=
github.com/metacubex/sing v0.5.6/go.mod h1:ypf0mjwlZm0sKdQSY+yQvmsbWa0hNPtkeqyRMGgoN+w= github.com/metacubex/sing v0.5.7/go.mod h1:ypf0mjwlZm0sKdQSY+yQvmsbWa0hNPtkeqyRMGgoN+w=
github.com/metacubex/sing-mux v0.3.4 h1:tf4r27CIkzaxq9kBlAXQkgMXq2HPp5Mta60Kb4RCZF0= github.com/metacubex/sing-mux v0.3.5 h1:UqVN+o62SR8kJaC9/3VfOc5UiVqgVY/ef9WwfGYYkk0=
github.com/metacubex/sing-mux v0.3.4/go.mod h1:SEJfAuykNj/ozbPqngEYqyggwSr81+L7Nu09NRD5mh4= github.com/metacubex/sing-mux v0.3.5/go.mod h1:8bT7ZKT3clRrJjYc/x5CRYibC1TX/bK73a3r3+2E+Fc=
github.com/metacubex/sing-quic v0.0.0-20260112044712-65d17608159e h1:MLxp42z9Jd6LtY2suyawnl24oNzIsFxWc15bNeDIGxA= github.com/metacubex/sing-quic v0.0.0-20260112044712-65d17608159e h1:MLxp42z9Jd6LtY2suyawnl24oNzIsFxWc15bNeDIGxA=
github.com/metacubex/sing-quic v0.0.0-20260112044712-65d17608159e/go.mod h1:+lgKTd52xAarGtqugALISShyw4KxnoEpYe2u0zJh26w= github.com/metacubex/sing-quic v0.0.0-20260112044712-65d17608159e/go.mod h1:+lgKTd52xAarGtqugALISShyw4KxnoEpYe2u0zJh26w=
github.com/metacubex/sing-shadowsocks v0.2.12 h1:Wqzo8bYXrK5aWqxu/TjlTnYZzAKtKsaFQBdr6IHFaBE= github.com/metacubex/sing-shadowsocks v0.2.12 h1:Wqzo8bYXrK5aWqxu/TjlTnYZzAKtKsaFQBdr6IHFaBE=
@@ -129,8 +129,8 @@ github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2 h1:gXU+MY
github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2/go.mod h1:mbfboaXauKJNIHJYxQRa+NJs4JU9NZfkA+I33dS2+9E= github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2/go.mod h1:mbfboaXauKJNIHJYxQRa+NJs4JU9NZfkA+I33dS2+9E=
github.com/metacubex/sing-tun v0.4.12 h1:LCi+yB7y97X3cHQGdNXQBMQNHAzpP4AWg7YhSLk+LTM= github.com/metacubex/sing-tun v0.4.12 h1:LCi+yB7y97X3cHQGdNXQBMQNHAzpP4AWg7YhSLk+LTM=
github.com/metacubex/sing-tun v0.4.12/go.mod h1:L/TjQY5JEGy8nvsuYmy/XgMFMCPiF0+AWSFCYfS6r9w= github.com/metacubex/sing-tun v0.4.12/go.mod h1:L/TjQY5JEGy8nvsuYmy/XgMFMCPiF0+AWSFCYfS6r9w=
github.com/metacubex/sing-vmess v0.2.4 h1:Tx6AGgCiEf400E/xyDuYyafsel6sGbR8oF7RkAaus6I= github.com/metacubex/sing-vmess v0.2.5 h1:m9Zt5I27lB9fmLMZfism9sH2LcnAfShZfwSkf6/KJoE=
github.com/metacubex/sing-vmess v0.2.4/go.mod h1:21R5R1u90uUvBQF0owoooEu96/SAYYD56nDrwm6nFaM= github.com/metacubex/sing-vmess v0.2.5/go.mod h1:AwtlzUgf8COe9tRYAKqWZ+leDH7p5U98a0ZUpYehl8Q=
github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f h1:Sr/DYKYofKHKc4GF3qkRGNuj6XA6c0eqPgEDN+VAsYU= github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f h1:Sr/DYKYofKHKc4GF3qkRGNuj6XA6c0eqPgEDN+VAsYU=
github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f/go.mod h1:jpAkVLPnCpGSfNyVmj6Cq4YbuZsFepm/Dc+9BAOcR80= github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f/go.mod h1:jpAkVLPnCpGSfNyVmj6Cq4YbuZsFepm/Dc+9BAOcR80=
github.com/metacubex/smux v0.0.0-20260105030934-d0c8756d3141 h1:DK2l6m2Fc85H2BhiAPgbJygiWhesPlfGmF+9Vw6ARdk= github.com/metacubex/smux v0.0.0-20260105030934-d0c8756d3141 h1:DK2l6m2Fc85H2BhiAPgbJygiWhesPlfGmF+9Vw6ARdk=
+3 -5
View File
@@ -42,7 +42,6 @@ type websocketWithEarlyDataConn struct {
net.Conn net.Conn
wsWriter N.ExtendedWriter wsWriter N.ExtendedWriter
underlay net.Conn underlay net.Conn
closed bool
dialed chan bool dialed chan bool
cancel context.CancelFunc cancel context.CancelFunc
ctx context.Context ctx context.Context
@@ -204,7 +203,7 @@ func (wsedc *websocketWithEarlyDataConn) Dial(earlyData []byte) error {
} }
func (wsedc *websocketWithEarlyDataConn) Write(b []byte) (int, error) { func (wsedc *websocketWithEarlyDataConn) Write(b []byte) (int, error) {
if wsedc.closed { if wsedc.ctx.Err() != nil {
return 0, io.ErrClosedPipe return 0, io.ErrClosedPipe
} }
if wsedc.Conn == nil { if wsedc.Conn == nil {
@@ -218,7 +217,7 @@ func (wsedc *websocketWithEarlyDataConn) Write(b []byte) (int, error) {
} }
func (wsedc *websocketWithEarlyDataConn) WriteBuffer(buffer *buf.Buffer) error { func (wsedc *websocketWithEarlyDataConn) WriteBuffer(buffer *buf.Buffer) error {
if wsedc.closed { if wsedc.ctx.Err() != nil {
return io.ErrClosedPipe return io.ErrClosedPipe
} }
if wsedc.Conn == nil { if wsedc.Conn == nil {
@@ -232,7 +231,7 @@ func (wsedc *websocketWithEarlyDataConn) WriteBuffer(buffer *buf.Buffer) error {
} }
func (wsedc *websocketWithEarlyDataConn) Read(b []byte) (int, error) { func (wsedc *websocketWithEarlyDataConn) Read(b []byte) (int, error) {
if wsedc.closed { if wsedc.ctx.Err() != nil {
return 0, io.ErrClosedPipe return 0, io.ErrClosedPipe
} }
if wsedc.Conn == nil { if wsedc.Conn == nil {
@@ -246,7 +245,6 @@ func (wsedc *websocketWithEarlyDataConn) Read(b []byte) (int, error) {
} }
func (wsedc *websocketWithEarlyDataConn) Close() error { func (wsedc *websocketWithEarlyDataConn) Close() error {
wsedc.closed = true
wsedc.cancel() wsedc.cancel()
if wsedc.Conn == nil { // is dialing or not dialed if wsedc.Conn == nil { // is dialing or not dialed
return wsedc.underlay.Close() return wsedc.underlay.Close()
+1 -1
View File
@@ -16,7 +16,7 @@
include $(TOPDIR)/rules.mk include $(TOPDIR)/rules.mk
PKG_NAME:=luci-app-amlogic PKG_NAME:=luci-app-amlogic
PKG_VERSION:=3.1.282 PKG_VERSION:=3.1.283
PKG_RELEASE:=1 PKG_RELEASE:=1
PKG_LICENSE:=GPL-2.0 License PKG_LICENSE:=GPL-2.0 License
@@ -116,7 +116,7 @@ else
# Intelligent File Discovery # Intelligent File Discovery
plugin_file_name="" plugin_file_name=""
lang_file_name="" lang_file_list=""
# Method 1: Use GitHub API if 'jq' is installed (Preferred Method) # Method 1: Use GitHub API if 'jq' is installed (Preferred Method)
if command -v jq >/dev/null 2>&1; then if command -v jq >/dev/null 2>&1; then
@@ -129,7 +129,7 @@ else
if [[ -n "${asset_list}" ]]; then if [[ -n "${asset_list}" ]]; then
# Discover exact filenames using regular expressions from the asset list # Discover exact filenames using regular expressions from the asset list
plugin_file_name="$(echo "${asset_list}" | tr ' ' '\n' | grep -oE "^luci-app-amlogic.*${package_manager}$" | head -n 1)" plugin_file_name="$(echo "${asset_list}" | tr ' ' '\n' | grep -oE "^luci-app-amlogic.*${package_manager}$" | head -n 1)"
lang_file_name="$(echo "${asset_list}" | tr ' ' '\n' | grep -oE "^luci-i18n-amlogic-zh-cn.*${package_manager}$" | head -n 1)" lang_file_list=($(echo "${asset_list}" | tr ' ' '\n' | grep -oE "^luci-i18n-amlogic.*${package_manager}$"))
else else
tolog "Warning: Failed to fetch data from GitHub API." "1" tolog "Warning: Failed to fetch data from GitHub API." "1"
fi fi
@@ -138,29 +138,26 @@ else
fi fi
# Validation and Download # Validation and Download
if [[ -z "${plugin_file_name}" || -z "${lang_file_name}" ]]; then if [[ -z "${plugin_file_name}" || "${#lang_file_list[@]}" -eq "0" ]]; then
tolog "02.03.2 Could not discover plugin(.${package_manager}) in the release. Aborting." "1" tolog "02.03.2 Could not discover plugin(.${package_manager}) in the release. Aborting." "1"
fi fi
tolog "Found plugin file: ${plugin_file_name}" tolog "Found plugin file: ${plugin_file_name}"
tolog "Found language file: ${lang_file_name}" tolog "Found language file: $(echo ${lang_file_list[@]} | xargs)"
plugin_full_url="${download_repo}/${latest_version}/${plugin_file_name}"
lang_full_url="${download_repo}/${latest_version}/${lang_file_name}"
# Download the language pack
tolog "02.04 Downloading language pack..."
curl -fsSL "${lang_full_url}" -o "${TMP_CHECK_DIR}/${lang_file_name}"
if [[ "${?}" -ne "0" ]]; then
tolog "02.04 Language pack download failed." "1"
fi
# Download the main plugin file # Download the main plugin file
tolog "02.05 Downloading main plugin..." plugin_full_url="${download_repo}/${latest_version}/${plugin_file_name}"
tolog "02.04 Downloading main plugin..."
curl -fsSL "${plugin_full_url}" -o "${TMP_CHECK_DIR}/${plugin_file_name}" curl -fsSL "${plugin_full_url}" -o "${TMP_CHECK_DIR}/${plugin_file_name}"
if [[ "${?}" -ne "0" ]]; then [[ "${?}" -ne "0" ]] && tolog "02.04 Plugin [ ${plugin_file_name} ] download failed." "1"
tolog "02.05 Plugin download failed." "1"
fi # Download language packs
for langfile in "${lang_file_list[@]}"; do
lang_full_url="${download_repo}/${latest_version}/${langfile}"
tolog "02.05 Downloading language pack [ ${langfile} ]..."
curl -fsSL "${lang_full_url}" -o "${TMP_CHECK_DIR}/${langfile}"
[[ "${?}" -ne "0" ]] && tolog "02.05 Language pack [ ${langfile} ] download failed." "1"
done
sync && sleep 2 sync && sleep 2
fi fi
@@ -168,7 +165,6 @@ fi
tolog "03. The plug is ready, you can update." tolog "03. The plug is ready, you can update."
sleep 2 sleep 2
#echo '<a href=upload>Update</a>' >$START_LOG
tolog '<input type="button" class="cbi-button cbi-button-reload" value="Update" onclick="return amlogic_plugin(this)"/> Latest version: '${latest_version}'' "1" tolog '<input type="button" class="cbi-button cbi-button-reload" value="Update" onclick="return amlogic_plugin(this)"/> Latest version: '${latest_version}'' "1"
exit 0 exit 0
+3 -3
View File
@@ -1,14 +1,14 @@
# SPDX-License-Identifier: GPL-3.0-only # SPDX-License-Identifier: GPL-3.0-only
# #
# Copyright (C) 2021-2025 sirpdboy <herboy2008@gmail.com> # Copyright (C) 2021-2026 sirpdboy <herboy2008@gmail.com>
# https://github.com/sirpdboy/luci-app-ddns-go # https://github.com/sirpdboy/luci-app-ddns-go
# This is free software, licensed under the Apache License, Version 2.0 . # This is free software, licensed under the Apache License, Version 2.0 .
# #
include $(TOPDIR)/rules.mk include $(TOPDIR)/rules.mk
PKG_NAME:=luci-app-ddns-go PKG_NAME:=luci-app-ddns-go
PKG_VERSION:=1.6.4 PKG_VERSION:=1.6.5
PKG_RELEASE:=20251106 PKG_RELEASE:=20260121
PKG_MAINTAINER:=sirpdboy <herboy2008@gmail.com> PKG_MAINTAINER:=sirpdboy <herboy2008@gmail.com>
PKG_CONFIG_DEPENDS:= PKG_CONFIG_DEPENDS:=
@@ -1,4 +1,4 @@
/* Copyright (C) 2021-2025 sirpdboy herboy2008@gmail.com https://github.com/sirpdboy/luci-app-ddns-go */ /* Copyright (C) 2021-2026 sirpdboy herboy2008@gmail.com https://github.com/sirpdboy/luci-app-ddns-go */
'use strict'; 'use strict';
'require view'; 'require view';
'require fs'; 'require fs';
@@ -314,6 +314,11 @@ return view.extend({
o.inputstyle = 'apply'; o.inputstyle = 'apply';
o.onclick = L.bind(this.handleResetPassword, this, data); o.onclick = L.bind(this.handleResetPassword, this, data);
o = s.option(form.Button, '_update', _('Update kernel'));
o.inputtitle = _('Check Update');
o.inputstyle = 'apply';
o.onclick = L.bind(this.handleUpdate, this, data);
o = s.option(form.DummyValue, '_update_status', _('Current Version')); o = s.option(form.DummyValue, '_update_status', _('Current Version'));
o.rawhtml = true; o.rawhtml = true;
var currentVersion = ''; var currentVersion = '';
@@ -333,12 +338,6 @@ return view.extend({
]); ]);
}; };
o = s.option(form.Button, '_update', _('Update kernel'),
_('Check and update DDNS-Go to the latest version'));
o.inputtitle = _('Check Update');
o.inputstyle = 'apply';
o.onclick = L.bind(this.handleUpdate, this, data);
return m.render(); return m.render();
} }
}); });
@@ -1,4 +1,4 @@
/* Copyright (C) 2021-2025 sirpdboy herboy2008@gmail.com https://github.com/sirpdboy/luci-app-ddns-go */ /* Copyright (C) 2021-2026 sirpdboy herboy2008@gmail.com https://github.com/sirpdboy/luci-app-ddns-go */
'use strict'; 'use strict';
'require view'; 'require view';
@@ -1,4 +1,7 @@
/* Copyright (C) 2021-2025 sirpdboy herboy2008@gmail.com https://github.com/sirpdboy/luci-app-ddns-go */ // SPDX-License-Identifier: Apache-2.0
/*
* Copyright (C) 2022-2026 sirpdboy <herboy2008@gmail.com>
*/
'use strict'; 'use strict';
'require dom'; 'require dom';
'require fs'; 'require fs';
@@ -10,7 +13,6 @@
return view.extend({ return view.extend({
render: function () { render: function () {
var css = ` var css = `
/* 日志框文本区域 */
#log_textarea pre { #log_textarea pre {
padding: 10px; /* 内边距 */ padding: 10px; /* 内边距 */
border-bottom: 1px solid #ddd; /* 边框颜色 */ border-bottom: 1px solid #ddd; /* 边框颜色 */
@@ -20,15 +22,35 @@ return view.extend({
word-wrap: break-word; word-wrap: break-word;
overflow-y: auto; overflow-y: auto;
} }
/* 5s 自动刷新文字 */
.cbi-section small { .cbi-section small {
margin-left: 1rem; margin-left: 1rem;
font-size: small; font-size: small;
color: #666; /* 深灰色文字 */
} }
.log-container {
display: flex;
flex-direction: column;
max-height: 1200px;
overflow-y: auto;
border-radius: 3px;
margin-top: 10px;
padding: 5px;
}
.log-line {
padding: 3px 0;
font-family: monospace;
font-size: 12px;
line-height: 1.4;
}
.log-line:last-child {
border-bottom: none;
}
.log-timestamp {
margin-right: 10px;
}
`; `;
var log_textarea = E('div', { 'id': 'log_textarea' }, var log_container = E('div', { 'class': 'log-container', 'id': 'log_container' },
E('img', { E('img', {
'src': L.resource(['icons/loading.gif']), 'src': L.resource(['icons/loading.gif']),
'alt': _('Loading...'), 'alt': _('Loading...'),
@@ -38,6 +60,77 @@ return view.extend({
var log_path = '/var/log/ddns-go.log'; var log_path = '/var/log/ddns-go.log';
var lastLogContent = ''; var lastLogContent = '';
var lastScrollTop = 0;
var isScrolledToTop = true;
// 解析日志行的时间戳
function parseLogTimestamp(logLine) {
// 匹配格式: 2026/01/21 22:35:13 Listening on :9876
var timestampMatch = logLine.match(/^(\d{4}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2})/);
if (timestampMatch) {
var dateStr = timestampMatch[1].replace(/\//g, '-');
return new Date(dateStr).getTime();
}
return Date.now();
}
function reverseLogLines(logContent) {
if (!logContent || logContent.trim() === '') {
return logContent;
}
var lines = logContent.split('\n');
lines = lines.filter(function(line) {
return line.trim() !== '';
});
lines.sort(function(a, b) {
var timeA = parseLogTimestamp(a);
var timeB = parseLogTimestamp(b);
return timeB - timeA; // 降序排列
});
return lines.join('\n');
}
function formatLogLines(logContent, isNewContent) {
if (!logContent || logContent.trim() === '') {
return E('div', { 'class': 'log-line' }, _('Log is clean.'));
}
var lines = logContent.split('\n');
var formattedLines = [];
for (var i = 0; i < lines.length; i++) {
var line = lines[i].trim();
if (line === '') continue;
var timestampMatch = line.match(/^(\d{4}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2})/);
var timestampSpan = null;
var messageSpan = null;
var lineClass = 'log-line';
if (timestampMatch) {
timestampSpan = E('span', {
'class': 'log-timestamp',
'title': timestampMatch[1]
}, timestampMatch[0] + ' ');
messageSpan = E('span', {}, line.substring(timestampMatch[0].length + 1));
} else {
messageSpan = E('span', {}, line);
}
var lineDiv = E('div', { 'class': lineClass }, [
timestampSpan,
messageSpan
].filter(function(el) { return el !== null; }));
formattedLines.push(lineDiv);
}
return E('div', {}, formattedLines);
}
var clear_log_button = E('div', {}, [ var clear_log_button = E('div', {}, [
E('button', { E('button', {
@@ -53,9 +146,10 @@ return view.extend({
button.disabled = false; button.disabled = false;
button.textContent = _('Clear Logs'); button.textContent = _('Clear Logs');
// 立即刷新日志显示框 // 立即刷新日志显示框
var log = E('pre', { 'wrap': 'pre' }, [_('Log is clean.')]); var logContent = _('Log is clean.');
dom.content(log_textarea, log); lastLogContent = logContent;
lastLogContent = ''; dom.content(log_container, formatLogLines(logContent, false));
isScrolledToTop = true; // 清空日志后,保持在顶部
}) })
.catch(function () { .catch(function () {
button.textContent = _('Failed to clear log.'); button.textContent = _('Failed to clear log.');
@@ -66,52 +160,80 @@ return view.extend({
}, _('Clear Logs')) }, _('Clear Logs'))
]); ]);
log_container.addEventListener('scroll', function() {
lastScrollTop = this.scrollTop;
isScrolledToTop = this.scrollTop <= 1;
});
poll.add(L.bind(function () { poll.add(L.bind(function () {
return fs.read_direct(log_path, 'text') return fs.read_direct(log_path, 'text')
.then(function (res) { .then(function (res) {
var newContent = res.trim() || _('Log is clean.'); var logContent = res.trim();
if (logContent === '') {
logContent = _('Log is clean.');
}
if (newContent !== lastLogContent) { // 检查内容是否有变化
var log = E('pre', { 'wrap': 'pre' }, [newContent]); if (logContent !== lastLogContent) {
dom.content(log_textarea, log); var isNewContent = lastLogContent !== '' && lastLogContent !== _('Log is clean.');
log.scrollTop = log.scrollHeight;
lastLogContent = newContent; var reversedLog = reverseLogLines(logContent);
// 格式化为HTML
var formattedLog = formatLogLines(reversedLog, isNewContent);
var prevScrollHeight = log_container.scrollHeight;
var prevScrollTop = log_container.scrollTop;
dom.content(log_container, formattedLog);
lastLogContent = logContent;
if (isScrolledToTop || isNewContent) {
log_container.scrollTop = 0;
} else {
var newScrollHeight = log_container.scrollHeight;
var heightDiff = newScrollHeight - prevScrollHeight;
log_container.scrollTop = prevScrollTop + heightDiff;
}
} }
}).catch(function (err) { }).catch(function (err) {
var log; var logContent;
if (err.toString().includes('NotFoundError')) { if (err.toString().includes('NotFoundError')) {
log = E('pre', { 'wrap': 'pre' }, [_('Log file does not exist.')]); logContent = _('Log file does not exist.');
} else { } else {
log = E('pre', { 'wrap': 'pre' }, [_('Unknown error: %s').format(err)]); logContent = _('Unknown error: %s').format(err);
}
if (logContent !== lastLogContent) {
dom.content(log_container, formatLogLines(logContent, false));
lastLogContent = logContent;
} }
dom.content(log_textarea, log);
}); });
})); }));
// 启动轮询
poll.start();
return E('div', { 'class': 'cbi-map' }, [ return E('div', { 'class': 'cbi-map' }, [
E('style', [css]), E('style', [css]),
E('div', { 'class': 'cbi-section' }, [ E('div', { 'class': 'cbi-section' }, [
clear_log_button, clear_log_button,
log_textarea, log_container,
E('small', {}, _('Refresh every 5 seconds.').format(L.env.pollinterval)), E('small', {}, _('Refresh every 5 seconds.').format(L.env.pollinterval)),
E('div', { 'class': 'cbi-section-actions cbi-section-actions-right' }) E('div', { 'class': 'cbi-section-actions cbi-section-actions-right' })
]), ]),
E('div', { 'style': 'text-align: right; font-style: italic;' }, [ E('div', { 'style': 'text-align: right; font-style: italic;' }, [
E('span', {}, [ E('span', {}, [
_('© github '), _('© github '),
E('a', { E('a', {
'href': 'https://github.com/sirpdboy', 'href': 'https://github.com/sirpdboy',
'target': '_blank', 'target': '_blank',
'style': 'text-decoration: none;' 'style': 'text-decoration: none;'
}, 'by sirpdboy') }, 'by sirpdboy')
]) ])
]) ])
]); ]);
} },
//handleSaveApply: null, handleSaveApply: null,
//handleSave: null, handleSave: null,
//handleReset: null handleReset: null
}); });
@@ -76,6 +76,9 @@ msgstr "开机延时启动(秒)"
msgid "Update kernel" msgid "Update kernel"
msgstr "更新内核" msgstr "更新内核"
msgid "Current Version"
msgstr "当前版本"
msgid "Check and update DDNS-Go to the latest version" msgid "Check and update DDNS-Go to the latest version"
msgstr "更新DDNS-Go到最新版本" msgstr "更新DDNS-Go到最新版本"
@@ -1,33 +1,34 @@
{ {
"luci-app-ddns-go": { "luci-app-ddns-go": {
"description": "Grant UCI access for luci-app-ddns-go", "description": "Grant UCI access for luci-app-ddns-go",
"read": { "read": {
"uci": [ "ddns-go" ], "uci": ["*"],
"file": { "file": {
"/etc/init.d/ddns-go": [ "exec" ], "/etc/init.d/ddns-go": ["exec"],
"/usr/libexec/ddns-go-call": [ "exec" ], "/usr/libexec/ddns-go-call": ["exec"],
"/usr/share/rpcd/ucode/luci.ddns-go": [ "exec" ], "/usr/share/rpcd/ucode/luci.ddns-go": ["exec"],
"/bin/pidof": [ "exec" ], "/bin/pidof": ["exec"],
"/bin/ps": [ "exec" ], "/bin/ps": ["exec"],
"/bin/ash": [ "exec" ], "/bin/ash": ["exec"],
"/etc/ddns-go/ddns-go-config.yaml": [ "read" ], "/etc/ddns-go/ddns-go-config.yaml": ["read"],
"/var/log/ddns-go.log": [ "read" ] "/var/log/ddns-go.log": ["read"]
}, },
"ubus": { "ubus": {
"rc": [ "*" ], "rc": ["*"],
"service": ["list"], "service": ["list"],
"luci.ddns-go": [ "*" ] "luci.ddns-go": ["*"],
"network.interface.*": ["status"],
"network": ["reload", "restart"]
} }
}, },
"write": { "write": {
"ubus": { "ubus": {
"luci.ddns-go": [ "*" ] "luci.ddns-go": ["*"]
}, },
"file": { "file": {
"/etc/ddns-go/ddns-go-config.yaml": ["write"] "/etc/ddns-go/ddns-go-config.yaml": ["write"]
}, },
"uci": ["ddns-go"] "uci": ["*"]
} }
} }
} }
@@ -1,14 +1,14 @@
# SPDX-License-Identifier: GPL-3.0-only # SPDX-License-Identifier: GPL-3.0-only
# #
# Copyright (C) 2021-2025 sirpdboy <herboy2008@gmail.com> https://github.com/sirpdboy/luci-app-lucky # Copyright (C) 2021-2026 sirpdboy <herboy2008@gmail.com> https://github.com/sirpdboy/luci-app-lucky
# #
# This is free software, licensed under the Apache License, Version 2.0 . # This is free software, licensed under the Apache License, Version 2.0 .
# #
include $(TOPDIR)/rules.mk include $(TOPDIR)/rules.mk
PKG_NAME:=luci-app-lucky PKG_NAME:=luci-app-lucky
PKG_VERSION:=3.0.2 PKG_VERSION:=3.0.3
PKG_RELEASE:=14 PKG_RELEASE:=15
LUCI_TITLE:=LuCI Support for Dynamic lucky Client LUCI_TITLE:=LuCI Support for Dynamic lucky Client
LUCI_DEPENDS:=+lucky LUCI_DEPENDS:=+lucky
@@ -1,4 +1,4 @@
// Copyright (C) 2021-2025 sirpdboy herboy2008@gmail.com https://github.com/sirpdboy/luci-app-lucky // Copyright (C) 2021-2026 sirpdboy herboy2008@gmail.com https://github.com/sirpdboy/luci-app-lucky
'use strict'; 'use strict';
'require form'; 'require form';
@@ -1,4 +1,4 @@
// 版权 Copyright (C) 2021-2025 sirpdboy herboy2008@gmail.com https://github.com/sirpdboy/luci-app-lucky // 版权 Copyright (C) 2021-2026 sirpdboy herboy2008@gmail.com https://github.com/sirpdboy/luci-app-lucky
'use strict'; 'use strict';
'require view'; 'require view';
@@ -16,7 +16,7 @@ return view.extend({
checkRunning: function() { checkRunning: function() {
return fs.exec('/bin/pidof', ['lucky']).then(function(pidRes) { return fs.exec('/bin/pidof', ['lucky']).then(function(pidRes) {
if (pidRes.code === 0) return { isRunning: true }; if (pidRes.code === 0) return { isRunning: true };
return fs.exec('/bin/ash', ['-c', 'ps | grep -q "[d]dns-go"']).then(function(grepRes) { return fs.exec('/bin/ash', ['-c', 'ps | grep -q "[l]ucky"']).then(function(grepRes) {
return { isRunning: grepRes.code === 0 }; return { isRunning: grepRes.code === 0 };
}); });
}); });
@@ -2,7 +2,7 @@ msgid ""
msgstr "Content-Type: text/plain; charset=UTF-8" msgstr "Content-Type: text/plain; charset=UTF-8"
msgid "Lucky" msgid "Lucky"
msgstr "Lucky" msgstr "Lucky大吉"
msgid "Lucky Control panel" msgid "Lucky Control panel"
msgstr "Lucky操作台" msgstr "Lucky操作台"
@@ -57,3 +57,7 @@ msgstr "使用https加密访问"
msgid "Set an installation access path, eg:sirpdboy" msgid "Set an installation access path, eg:sirpdboy"
msgstr "设置一个安装访问路径,如:sirpdboy" msgstr "设置一个安装访问路径,如:sirpdboy"
msgid "Lucky Service Not Running"
msgstr "Lucky服务未启用"
@@ -9,9 +9,5 @@ uci -q batch <<-EOF >/dev/null
EOF EOF
} }
rm -f /tmp/luci-indexcache* 2>/dev/null
rm -f /tmp/luci-modulecache/* 2>/dev/null # OpenWrt 21.02
rm -f /tmp/luci-indexcache
rm -rf /tmp/luci-* rm -rf /tmp/luci-*
exit 0 exit 0
@@ -10,23 +10,21 @@
"uci": { "lucky": true } "uci": { "lucky": true }
} }
}, },
"admin/services/lucky/config": {
"title": "Base Setting",
"order": 10,
"action": {
"type": "view",
"path": "lucky/config"
}
},
"admin/services/lucky/lucky": { "admin/services/lucky/lucky": {
"title": "Lucky Control panel", "title": "Lucky Control panel",
"order": 20, "order": 10,
"action": { "action": {
"type": "view", "type": "view",
"path": "lucky/lucky" "path": "lucky/lucky"
} }
},
"admin/services/lucky/config": {
"title": "Base Setting",
"order": 20,
"action": {
"type": "view",
"path": "lucky/config"
}
} }
} }
@@ -2,7 +2,7 @@
/* /*
* SPDX-License-Identifier: GPL-2.0-only * SPDX-License-Identifier: GPL-2.0-only
* *
* Copyright (C) 2021-2025 sirpdboy <herboy2008@gmail.com> https://github.com/sirpdboy/luci-app-lucky * Copyright (C) 2021-2026 sirpdboy <herboy2008@gmail.com> https://github.com/sirpdboy/luci-app-lucky
*/ */
'use strict'; 'use strict';
+14 -9
View File
@@ -9,7 +9,7 @@
include $(TOPDIR)/rules.mk include $(TOPDIR)/rules.mk
PKG_NAME:=lucky PKG_NAME:=lucky
PKG_VERSION:=2.24.0 PKG_VERSION:=2.26.1
PKG_RELEASE:=1 PKG_RELEASE:=1
PKGARCH:=all PKGARCH:=all
@@ -19,9 +19,6 @@ endif
ifeq ($(ARCH),mips) ifeq ($(ARCH),mips)
LUCKY_ARCH:=mips_softfloat LUCKY_ARCH:=mips_softfloat
endif endif
ifeq ($(ARCH),86)
LUCKY_ARCH:=i386
endif
ifeq ($(ARCH),i386) ifeq ($(ARCH),i386)
LUCKY_ARCH:=i386 LUCKY_ARCH:=i386
endif endif
@@ -29,17 +26,25 @@ ifeq ($(ARCH),x86_64)
LUCKY_ARCH:=x86_64 LUCKY_ARCH:=x86_64
endif endif
ifeq ($(ARCH),arm) ifeq ($(ARCH),arm)
LUCKY_ARCH:=armv5 ifeq ($(BOARD),bcm53xx)
endif LUCKY_ARCH:=armv6
ifeq ($(ARCH),arm7) else
LUCKY_ARCH:=armv7 LUCKY_ARCH:=armv7
endif endif
ifeq ($(ARCH),armv8) endif
LUCKY_ARCH:=arm64 ifeq ($(BOARD),bcm53xx)
LUCKY_ARCH:=armv6
ifeq ($(word 2,$(subst +,$(space),$(call qstrip,$(CONFIG_CPU_TYPE)))),)
LUCKY_ARCH:=armv5
endif
endif
ifeq ($(BOARD),kirkwood)
LUCKY_ARCH:=armv5
endif endif
ifeq ($(ARCH),aarch64) ifeq ($(ARCH),aarch64)
LUCKY_ARCH:=arm64 LUCKY_ARCH:=arm64
endif endif
PKG_LICENSE:=GPL-3.0-only PKG_LICENSE:=GPL-3.0-only
PKG_LICENSE_FILES:=LICENSE PKG_LICENSE_FILES:=LICENSE
PKG_MAINTAINER:=GDY666 <gdy666@foxmail.com> PKG_MAINTAINER:=GDY666 <gdy666@foxmail.com>
@@ -1,5 +1,6 @@
# #
# Copyright (c) 2022-2025 SMALLPROGRAM <https://github.com/smallprogram> # Copyright (c) 2022-2025 SMALLPROGRAM <https://github.com/smallprogram>
# Copyright (C) 2026 Openwrt-Passwall Organization
# Description: Auto compile # Description: Auto compile
# #
name: "Auto compile with openwrt sdk" name: "Auto compile with openwrt sdk"
@@ -15,8 +16,6 @@ env:
TZ: Asia/Shanghai TZ: Asia/Shanghai
passwall: ${{ github.repository }} passwall: ${{ github.repository }}
packages: Openwrt-Passwall/openwrt-passwall-packages packages: Openwrt-Passwall/openwrt-passwall-packages
package_names: "chinadns-ng dns2socks geoview hysteria ipt2socks microsocks naiveproxy tcping trojan-plus tuic-client shadowsocks-rust shadowsocksr-libev simple-obfs sing-box v2ray-geodata v2ray-plugin xray-core xray-plugin shadow-tls"
package_release: "chinadns-ng dns2socks geoview hysteria ipt2socks microsocks naiveproxy tcping trojan-plus tuic-client shadowsocks-rust shadowsocksr-libev simple-obfs sing-box v2ray-geoip v2ray-plugin v2ray-geosite xray-core xray-plugin shadow-tls"
permissions: permissions:
contents: write contents: write
@@ -67,10 +66,31 @@ jobs:
- name: Prepare release - name: Prepare release
if: steps.check_version.outputs.has_update == 'true' if: steps.check_version.outputs.has_update == 'true'
run: | run: |
echo "## :mega:Update content" >> release.txt echo "## :mega: 获取其他软件包 / How to Get Other Packages" >>release.txt
echo "![](https://img.shields.io/github/downloads/${{ env.passwall }}/${{steps.check_version.outputs.latest_version}}/total?style=flat-square)" >> release.txt echo "" >>release.txt
echo "### Passwall Info" >> release.txt echo "### 方式 1:添加软件源 / Method 1: Add Software Source" >>release.txt
echo "**:minidisc: Passwall Version: ${{steps.check_version.outputs.latest_version}}**" >> release.txt echo " * 请按照 [openwrt-passwall-build](https://github.com/moetayuko/openwrt-passwall-build) 仓库的说明,将其提供的软件源添加到设备中,然后直接使用包管理器安装。" >>release.txt
echo " Follow the instructions in the [openwrt-passwall-build](https://github.com/moetayuko/openwrt-passwall-build) repository to add the provided software source to your device, then install directly using the package manager." >>release.txt
echo "" >>release.txt
echo "### 方式 2:手动下载并安装 / Method 2: Manual Download and Install" >>release.txt
echo "" >>release.txt
echo "* 从 [SourceForge](https://sourceforge.net/projects/openwrt-passwall-build/files/) 下载预构建的软件包。" >>release.txt
echo " Download prebuilt packages from [SourceForge](https://sourceforge.net/projects/openwrt-passwall-build/files/)." >>release.txt
echo "" >>release.txt
echo "* 将文件上传到您的设备,使用 Shell 命令进行安装。" >>release.txt
echo " Upload files to your device, install it with shell command." >>release.txt
echo "" >>release.txt
echo "ipk" >>release.txt
echo "\`\`\`" >>release.txt
echo "opkg install /path/to/package.ipk" >>release.txt
echo "\`\`\`" >>release.txt
echo "apk" >>release.txt
echo "\`\`\`" >>release.txt
echo "apk add /path/to/package.apk" >>release.txt
echo "\`\`\`" >>release.txt
echo "" >>release.txt
echo ":warning: 请将 /path/to/package 替换为实际下载的软件包路径。" >>release.txt
echo "- Replace /path/to/package with the actual path of the downloaded packages." >>release.txt
touch release.txt touch release.txt
- name: Generate new tag & release - name: Generate new tag & release
@@ -94,13 +114,14 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
include: include:
- platform: x86_64 - url_sdk: https://archive.openwrt.org/releases/21.02.7/targets/x86/64/openwrt-sdk-21.02.7-x86-64_gcc-8.4.0_musl.Linux-x86_64.tar.xz
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/x86/64/openwrt-sdk-24.10.4-x86-64_gcc-13.3.0_musl.Linux-x86_64.tar.zst ver: "22.03-"
ver: "ipk"
- platform: x86_64 - url_sdk: https://downloads.openwrt.org/releases/24.10.5/targets/x86/64/openwrt-sdk-24.10.5-x86-64_gcc-13.3.0_musl.Linux-x86_64.tar.zst
url_sdk: https://downloads.openwrt.org/snapshots/targets/x86/64/openwrt-sdk-x86-64_gcc-14.3.0_musl.Linux-x86_64.tar.zst ver: "23.05-24.10"
ver: "apk"
- url_sdk: https://downloads.openwrt.org/snapshots/targets/x86/64/openwrt-sdk-x86-64_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "25.12+"
steps: steps:
- name: Install packages - name: Install packages
run: | run: |
@@ -141,7 +162,7 @@ jobs:
"feeds.conf.default" "feeds.conf.default"
cat > feeds.tmp <<'EOF' cat > feeds.tmp <<'EOF'
src-git passwall_packages https://github.com/Openwrt-Passwall/openwrt-passwall-packages.git;main src-git passwall_packages https://github.com/${{ env.packages }}.git;main
src-git passwall https://github.com/${{ env.passwall }}.git;${{ github.ref_name }} src-git passwall https://github.com/${{ env.passwall }}.git;${{ github.ref_name }}
EOF EOF
cat feeds.conf.default >> feeds.tmp cat feeds.conf.default >> feeds.tmp
@@ -150,27 +171,6 @@ jobs:
./scripts/feeds update -a ./scripts/feeds update -a
./scripts/feeds install -a ./scripts/feeds install -a
#--------------------------------------begin_patches------------------------------------------
echo "Start applying the patch"
rm -rf temp_resp
git clone -b master --single-branch https://github.com/openwrt/packages.git temp_resp
echo "update golang version"
rm -rf feeds/packages/lang/golang
cp -r temp_resp/lang/golang feeds/packages/lang
echo "update rust version"
rm -rf feeds/packages/lang/rust
cp -r temp_resp/lang/rust feeds/packages/lang
rm -rf temp_resp
echo "update patch-kernel.sh"
git clone -b main --single-branch https://github.com/openwrt/openwrt.git temp_resp
cp -f temp_resp/scripts/patch-kernel.sh scripts/
rm -rf temp_resp
echo "Patch application completed"
#--------------------------------------end_patches--------------------------------------------
- name: Compile - name: Compile
id: compile id: compile
run: | run: |
@@ -187,6 +187,7 @@ jobs:
mkdir upload mkdir upload
mv bin/packages/*/passwall/luci-* upload/ mv bin/packages/*/passwall/luci-* upload/
cd upload cd upload
for i in $(ls); do mv $i ${{ matrix.ver }}_$i; done
echo "status=success" >> $GITHUB_OUTPUT echo "status=success" >> $GITHUB_OUTPUT
echo "FIRMWARE=$PWD" >> $GITHUB_ENV echo "FIRMWARE=$PWD" >> $GITHUB_ENV
@@ -198,356 +199,3 @@ jobs:
with: with:
tag_name: ${{needs.job_check.outputs.passwall_version}} tag_name: ${{needs.job_check.outputs.passwall_version}}
files: ${{ env.FIRMWARE }}/* files: ${{ env.FIRMWARE }}/*
job_auto_compile:
if: ${{ needs.job_check.outputs.has_update == 'true' && needs.job_check.outputs.prerelease == 'false' }}
needs: job_check
runs-on: ubuntu-latest
name: build (${{ matrix.ver }}-${{ matrix.platform }})
strategy:
fail-fast: false
matrix:
include:
- platform: x86_64
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/x86/64/openwrt-sdk-24.10.4-x86-64_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: aarch64_generic
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/rockchip/armv8/openwrt-sdk-24.10.4-rockchip-armv8_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: aarch64_cortex-a53
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/mvebu/cortexa53/openwrt-sdk-24.10.4-mvebu-cortexa53_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: aarch64_cortex-a72
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/mvebu/cortexa72/openwrt-sdk-24.10.4-mvebu-cortexa72_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a5_vfpv4
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/at91/sama5/openwrt-sdk-24.10.4-at91-sama5_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a7
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/mediatek/mt7629/openwrt-sdk-24.10.4-mediatek-mt7629_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a7_neon-vfpv4
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/sunxi/cortexa7/openwrt-sdk-24.10.4-sunxi-cortexa7_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a8_vfpv3
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/sunxi/cortexa8/openwrt-sdk-24.10.4-sunxi-cortexa8_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a9
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/bcm53xx/generic/openwrt-sdk-24.10.4-bcm53xx-generic_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a9_neon
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/zynq/generic/openwrt-sdk-24.10.4-zynq-generic_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a9_vfpv3-d16
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/mvebu/cortexa9/openwrt-sdk-24.10.4-mvebu-cortexa9_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a15_neon-vfpv4
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/ipq806x/generic/openwrt-sdk-24.10.4-ipq806x-generic_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: mips_24kc
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/ath79/generic/openwrt-sdk-24.10.4-ath79-generic_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: mips_4kec
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/realtek/rtl838x/openwrt-sdk-24.10.4-realtek-rtl838x_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: mips_mips32
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/bcm53xx/generic/openwrt-sdk-24.10.4-bcm53xx-generic_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: mipsel_24kc
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/ramips/rt288x/openwrt-sdk-24.10.4-ramips-rt288x_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: mipsel_74kc
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/ramips/rt3883/openwrt-sdk-24.10.4-ramips-rt3883_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: mipsel_mips32
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/bcm47xx/generic/openwrt-sdk-24.10.4-bcm47xx-generic_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: x86_64
url_sdk: https://downloads.openwrt.org/snapshots/targets/x86/64/openwrt-sdk-x86-64_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: aarch64_generic
url_sdk: https://downloads.openwrt.org/snapshots/targets/rockchip/armv8/openwrt-sdk-rockchip-armv8_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: aarch64_cortex-a53
url_sdk: https://downloads.openwrt.org/snapshots/targets/mvebu/cortexa53/openwrt-sdk-mvebu-cortexa53_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: aarch64_cortex-a72
url_sdk: https://downloads.openwrt.org/snapshots/targets/mvebu/cortexa72/openwrt-sdk-mvebu-cortexa72_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a5_vfpv4
url_sdk: https://downloads.openwrt.org/snapshots/targets/at91/sama5/openwrt-sdk-at91-sama5_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a7
url_sdk: https://downloads.openwrt.org/snapshots/targets/mediatek/mt7629/openwrt-sdk-mediatek-mt7629_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a7_neon-vfpv4
url_sdk: https://downloads.openwrt.org/snapshots/targets/sunxi/cortexa7/openwrt-sdk-sunxi-cortexa7_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a8_vfpv3
url_sdk: https://downloads.openwrt.org/snapshots/targets/sunxi/cortexa8/openwrt-sdk-sunxi-cortexa8_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a9
url_sdk: https://downloads.openwrt.org/snapshots/targets/bcm53xx/generic/openwrt-sdk-bcm53xx-generic_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a9_neon
url_sdk: https://downloads.openwrt.org/snapshots/targets/zynq/generic/openwrt-sdk-zynq-generic_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a9_vfpv3-d16
url_sdk: https://downloads.openwrt.org/snapshots/targets/mvebu/cortexa9/openwrt-sdk-mvebu-cortexa9_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a15_neon-vfpv4
url_sdk: https://downloads.openwrt.org/snapshots/targets/ipq806x/generic/openwrt-sdk-ipq806x-generic_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: mips_24kc
url_sdk: https://downloads.openwrt.org/snapshots/targets/ath79/generic/openwrt-sdk-ath79-generic_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: mips_4kec
url_sdk: https://downloads.openwrt.org/snapshots/targets/realtek/rtl838x/openwrt-sdk-realtek-rtl838x_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: mips_mips32
url_sdk: https://downloads.openwrt.org/snapshots/targets/bcm53xx/generic/openwrt-sdk-bcm53xx-generic_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: mipsel_24kc
url_sdk: https://downloads.openwrt.org/snapshots/targets/ramips/rt288x/openwrt-sdk-ramips-rt288x_gcc-14.2.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: mipsel_74kc
url_sdk: https://downloads.openwrt.org/snapshots/targets/ramips/rt3883/openwrt-sdk-ramips-rt3883_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: mipsel_mips32
url_sdk: https://downloads.openwrt.org/snapshots/targets/bcm47xx/generic/openwrt-sdk-bcm47xx-generic_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
steps:
- name: Initialization ${{ matrix.platform }} compile environment
run: |
sudo -E rm -rf /usr/share/dotnet /etc/mysql /etc/php /usr/local/lib/android
echo "Install packages"
sudo -E apt-get -qq update
sudo -E apt-get -qq install ack antlr3 asciidoc autoconf automake autopoint binutils bison build-essential \
bzip2 ccache clang cmake cpio curl device-tree-compiler ecj fastjar flex gawk gettext gcc-multilib \
g++-multilib git gnutls-dev gperf haveged help2man intltool lib32gcc-s1 libc6-dev-i386 libelf-dev \
libglib2.0-dev libgmp3-dev libltdl-dev libmpc-dev libmpfr-dev libncurses-dev libpython3-dev \
libreadline-dev libssl-dev libtool libyaml-dev libz-dev lld llvm lrzsz mkisofs msmtp nano \
ninja-build p7zip p7zip-full patch pkgconf python3 python3-pip python3-ply python3-docutils \
python3-pyelftools qemu-utils re2c rsync scons squashfs-tools subversion swig texinfo uglifyjs \
upx-ucl unzip vim wget xmlto xxd zlib1g-dev zstd
sudo -E apt-get -qq autoremove --purge
sudo -E apt-get -qq clean
- name: ${{ matrix.platform }} sdk download
run: |
wget ${{ matrix.url_sdk }}
file_name=$(echo ${{matrix.url_sdk}} | awk -F/ '{print $NF}')
mkdir sdk
if [[ $file_name == *.tar.xz ]]; then
tar -xJf $file_name -C ./sdk --strip-components=1
elif [[ $file_name == *.tar.zst ]]; then
tar --zstd -x -f $file_name -C ./sdk --strip-components=1
else
echo "Unsupported file format: $file_name"
exit 1
fi
cd sdk
- name: SSH connection to Actions
uses: mxschmitt/action-tmate@v3.13
if: (github.event.inputs.ssh == 'true' && github.event.inputs.ssh != 'false') || contains(github.event.action, 'ssh')
- name: ${{ matrix.platform }} feeds configuration packages
run: |
cd sdk
# Update feeds to github source
sed -i \
-e 's|git\.openwrt\.org/feed|github.com/openwrt|g' \
-e 's|git\.openwrt\.org/project|github.com/openwrt|g' \
-e 's|git\.openwrt\.org/openwrt|github.com/openwrt|g' \
"feeds.conf.default"
cat > feeds.tmp <<'EOF'
src-git passwall_packages https://github.com/Openwrt-Passwall/openwrt-passwall-packages.git;main
src-git passwall https://github.com/${{ env.passwall }}.git;${{ github.ref_name }}
EOF
cat feeds.conf.default >> feeds.tmp
mv feeds.tmp feeds.conf.default
./scripts/feeds update -a
./scripts/feeds install -a
#--------------------------------------begin_patches------------------------------------------
echo "Start applying the patch"
rm -rf temp_resp
git clone -b master --single-branch https://github.com/openwrt/packages.git temp_resp
echo "update golang version"
rm -rf feeds/packages/lang/golang
cp -r temp_resp/lang/golang feeds/packages/lang
echo "update rust version"
rm -rf feeds/packages/lang/rust
cp -r temp_resp/lang/rust feeds/packages/lang
rm -rf temp_resp
git clone -b main --single-branch https://github.com/openwrt/openwrt.git temp_resp
cp -f temp_resp/scripts/patch-kernel.sh scripts/
rm -rf temp_resp
echo "fixed rust host build error"
sed -i 's/--set=llvm\.download-ci-llvm=false/--set=llvm.download-ci-llvm=true/' feeds/packages/lang/rust/Makefile
grep -q -- '--ci false \\' feeds/packages/lang/rust/Makefile || sed -i '/x\.py \\/a \ --ci false \\' feeds/packages/lang/rust/Makefile
echo "Patch application completed"
#--------------------------------------end_patches--------------------------------------------
echo "CONFIG_ALL_NONSHARED=n" > .config
echo "CONFIG_ALL_KMODS=n" >> .config
echo "CONFIG_ALL=n" >> .config
echo "CONFIG_AUTOREMOVE=n" >> .config
echo "CONFIG_SIGNED_PACKAGES=n" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall=m" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_Iptables_Transparent_Proxy=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_Nftables_Transparent_Proxy=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_Geoview=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_Haproxy=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_Hysteria=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_NaiveProxy=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_Shadowsocks_Libev_Client=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_Shadowsocks_Libev_Server=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_Shadowsocks_Rust_Client=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_Shadowsocks_Rust_Server=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_ShadowsocksR_Libev_Client=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_ShadowsocksR_Libev_Server=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_Shadow_TLS=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_Simple_Obfs=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_SingBox=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_Trojan_Plus=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_tuic_client=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_V2ray_Geodata=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_V2ray_Plugin=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_Xray=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall_INCLUDE_Xray_Plugin=y" >> .config
make defconfig
- name: ${{ matrix.platform }} compile
id: compile
run: |
cd sdk
for package in ${{ env.package_names }}; do
if [ -d "feeds/passwall_packages/$package" ]; then
echo "-----------begin compile $package ---------------"
sleep 10s
make package/$package/compile -j$(nproc) V=s
echo "-----------compiled $package ---------------"
echo ""
fi
done
echo "status=success" >> $GITHUB_OUTPUT
- name: Organize ${{ matrix.platform }} files
id: organize
if: steps.compile.outputs.status == 'success'
run: |
cd sdk
mkdir tmp_upload
shopt -s nullglob
for src_dir in bin/packages/*/{packages,passwall_packages}; do
[[ -d "$src_dir" ]] || continue
echo "Scanning: $src_dir"
for prefix in ${{ env.package_release }}; do
for file in "$src_dir"/"$prefix"*; do
[[ -f "$file" ]] || continue
filename=$(basename "$file")
echo " Found: $filename"
cp -r "$file" "tmp_upload/"
done
done
done
mkdir upload
zip -jr upload/passwall_packages_${{ matrix.ver }}_${{ matrix.platform }}.zip tmp_upload/*
echo "FIRMWARE=$PWD" >> $GITHUB_ENV
echo "status=success" >> $GITHUB_OUTPUT
- name: Generate release info
id: info
if: steps.compile.outputs.status == 'success'
run: |
cd sdk
echo "## :mega:Update content" >> release.txt
echo "![](https://img.shields.io/github/downloads/${{ env.passwall }}/${{needs.job_check.outputs.passwall_version}}/total?style=flat-square)" >> release.txt
echo "### Passwall Info" >> release.txt
echo "**:minidisc: Passwall Version: ${{needs.job_check.outputs.passwall_version}}**" >> release.txt
echo "### Packages Version" >> release.txt
echo "**package name**|**package version**" >> release.txt
echo "-|-" >> release.txt
pkgs=$(ls feeds/passwall_packages -I v2ray-geodata | grep -E "$(echo "${{ env.package_names }}" | sed 's/ /|/g')")
for pkg in $pkgs; do
version=$(awk -F ':=' '/PKG_VERSION:=/{print $2}' feeds/passwall_packages/$pkg/Makefile | sed 's/\r//g')
[ -z "${version}" ] && version=$(awk -F ':=' '/PKG_SOURCE_DATE:=/{print $2}' feeds/passwall_packages/$pkg/Makefile | sed 's/\r//g')
echo "**:ice_cube: $pkg**|**${version}**" >> release.txt
done
echo "**:ice_cube: v2ray-geoip**|**$(awk -F ':=' '/GEOIP_VER:=/{print $2}' feeds/passwall_packages/v2ray-geodata/Makefile)**" >> release.txt
echo "**:ice_cube: v2ray-geosite**|**$(awk -F ':=' '/GEOSITE_VER:=/{print $2}' feeds/passwall_packages/v2ray-geodata/Makefile)**" >> release.txt
touch release.txt
echo "status=success" >> $GITHUB_OUTPUT
- name: Upload firmware to release
uses: softprops/action-gh-release@v2
if: steps.info.outputs.status == 'success'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{needs.job_check.outputs.passwall_version}}
body_path: ${{ env.FIRMWARE }}/release.txt
files: ${{ env.FIRMWARE }}/upload/*
+1 -1
View File
@@ -7,7 +7,7 @@
include $(TOPDIR)/rules.mk include $(TOPDIR)/rules.mk
PKG_NAME:=luci-app-passwall PKG_NAME:=luci-app-passwall
PKG_VERSION:=26.1.17 PKG_VERSION:=26.1.21
PKG_RELEASE:=1 PKG_RELEASE:=1
PKG_PO_VERSION:=$(PKG_VERSION) PKG_PO_VERSION:=$(PKG_VERSION)
@@ -464,11 +464,19 @@ o:depends({xray_dns_mode = "tcp+doh"})
o:depends({singbox_dns_mode = "doh"}) o:depends({singbox_dns_mode = "doh"})
o = s:option(Value, "remote_dns_client_ip", translate("EDNS Client Subnet")) o = s:option(Value, "remote_dns_client_ip", translate("EDNS Client Subnet"))
o.description = translate("Notify the DNS server when the DNS query is notified, the location of the client (cannot be a private IP address).") .. "<br />" ..
translate("This feature requires the DNS server to support the Edns Client Subnet (RFC7871).")
o.datatype = "ipaddr" o.datatype = "ipaddr"
o:depends({dns_mode = "sing-box"}) o:depends({dns_mode = "sing-box"})
o:depends({dns_mode = "xray"}) o:depends({dns_mode = "xray"})
o:depends({_node_sel_shunt = "1"}) o:depends({_node_sel_shunt = "1"})
o = s:option(Flag, "remote_fakedns", "FakeDNS", translate("Use FakeDNS work in the shunt domain that proxy."))
o.default = "0"
o.rmempty = false
o:depends({dns_mode = "sing-box"})
o:depends({dns_mode = "xray"})
o = s:option(ListValue, "chinadns_ng_default_tag", translate("Default DNS")) o = s:option(ListValue, "chinadns_ng_default_tag", translate("Default DNS"))
o.default = "none" o.default = "none"
o:value("gfw", translate("Remote DNS")) o:value("gfw", translate("Remote DNS"))
@@ -161,7 +161,7 @@ if (has_singbox or has_xray) and #nodes_table > 0 then
o.cfgvalue = get_cfgvalue(v.id, "preproxy_enabled") o.cfgvalue = get_cfgvalue(v.id, "preproxy_enabled")
o.write = get_write(v.id, "preproxy_enabled") o.write = get_write(v.id, "preproxy_enabled")
o = s:taboption("Main", ListValue, vid .. "-main_node", string.format('<a style="color:red">%s</a>', translate("Preproxy Node")), translate("Set the node to be used as a pre-proxy. Each rule (including <code>Default</code>) has a separate switch that controls whether this rule uses the pre-proxy or not.")) o = s:taboption("Main", ListValue, vid .. "-main_node", string.format('<a style="color:#FF8C00">%s</a>', translate("Preproxy Node")), translate("Set the node to be used as a pre-proxy. Each rule (including <code>Default</code>) has a separate switch that controls whether this rule uses the pre-proxy or not."))
o:depends(vid .. "-preproxy_enabled", "1") o:depends(vid .. "-preproxy_enabled", "1")
o.template = appname .. "/cbi/nodes_listvalue" o.template = appname .. "/cbi/nodes_listvalue"
o.group = {} o.group = {}
@@ -188,6 +188,12 @@ if (has_singbox or has_xray) and #nodes_table > 0 then
o.cfgvalue = get_cfgvalue(v.id, "main_node") o.cfgvalue = get_cfgvalue(v.id, "main_node")
o.write = get_write(v.id, "main_node") o.write = get_write(v.id, "main_node")
o = s:taboption("Main", Flag, vid .. "-fakedns", "FakeDNS", translate("Use FakeDNS work in the shunt domain that proxy."))
o:depends("tcp_node", v.id)
o.cfgvalue = get_cfgvalue(v.id, "fakedns")
o.write = get_write(v.id, "fakedns")
o.remove = get_remove(v.id, "fakedns")
m.uci:foreach(appname, "shunt_rules", function(e) m.uci:foreach(appname, "shunt_rules", function(e)
local id = e[".name"] local id = e[".name"]
local node_option = vid .. "-" .. id .. "_node" local node_option = vid .. "-" .. id .. "_node"
@@ -204,16 +210,23 @@ if (has_singbox or has_xray) and #nodes_table > 0 then
o.template = appname .. "/cbi/nodes_listvalue" o.template = appname .. "/cbi/nodes_listvalue"
o.group = {"","","",""} o.group = {"","","",""}
local pt = s:taboption("Main", ListValue, vid .. "-".. id .. "_proxy_tag", string.format('* <a style="color:red">%s</a>', e.remarks .. " " .. translate("Preproxy"))) local pt = s:taboption("Main", ListValue, vid .. "-".. id .. "_proxy_tag", string.format('* <a style="color:#FF8C00">%s</a>', e.remarks .. " " .. translate("Preproxy")))
pt.cfgvalue = get_cfgvalue(v.id, id .. "_proxy_tag") pt.cfgvalue = get_cfgvalue(v.id, id .. "_proxy_tag")
pt.write = get_write(v.id, id .. "_proxy_tag") pt.write = get_write(v.id, id .. "_proxy_tag")
pt.remove = get_remove(v.id, id .. "_proxy_tag") pt.remove = get_remove(v.id, id .. "_proxy_tag")
pt:value("", translate("Close")) pt:value("", translate("Close"))
pt:value("main", translate("Preproxy Node")) pt:value("main", translate("Preproxy Node"))
pt:depends("__hide__", "1") pt:depends("__hide__", "1")
local fakedns_tag = s:taboption("Main", Flag, vid .. "-".. id .. "_fakedns", string.format('* <a style="color:#FF8C00">%s</a>', e.remarks .. " " .. "FakeDNS"))
fakedns_tag.cfgvalue = get_cfgvalue(v.id, id .. "_fakedns")
fakedns_tag.write = get_write(v.id, id .. "_fakedns")
fakedns_tag.remove = get_remove(v.id, id .. "_fakedns")
for k1, v1 in pairs(socks_list) do for k1, v1 in pairs(socks_list) do
o:value(v1.id, v1.remark) o:value(v1.id, v1.remark)
o.group[#o.group+1] = (v1.group and v1.group ~= "") and v1.group or translate("default") o.group[#o.group+1] = (v1.group and v1.group ~= "") and v1.group or translate("default")
fakedns_tag:depends({ [node_option] = v1.id, [vid .. "-fakedns"] = "1" })
end end
for k1, v1 in pairs(balancing_list) do for k1, v1 in pairs(balancing_list) do
o:value(v1.id, v1.remark) o:value(v1.id, v1.remark)
@@ -233,6 +246,10 @@ if (has_singbox or has_xray) and #nodes_table > 0 then
if not api.is_local_ip(v1.address) then --本地节点禁止使用前置 if not api.is_local_ip(v1.address) then --本地节点禁止使用前置
pt:depends({ [node_option] = v1.id, [vid .. "-preproxy_enabled"] = "1" }) pt:depends({ [node_option] = v1.id, [vid .. "-preproxy_enabled"] = "1" })
end end
fakedns_tag:depends({ [node_option] = v1.id, [vid .. "-fakedns"] = "1" })
end
if v.default_node ~= "_direct" or v.default_node ~= "_blackhole" then
fakedns_tag:depends({ [node_option] = "_default", [vid .. "-fakedns"] = "1" })
end end
end end
end) end)
@@ -269,7 +286,7 @@ if (has_singbox or has_xray) and #nodes_table > 0 then
end end
local id = "default_proxy_tag" local id = "default_proxy_tag"
o = s:taboption("Main", ListValue, vid .. "-" .. id, string.format('* <a style="color:red">%s</a>', translate("Default Preproxy")), translate("When using, localhost will connect this node first and then use this node to connect the default node.")) o = s:taboption("Main", ListValue, vid .. "-" .. id, string.format('* <a style="color:#FF8C00">%s</a>', translate("Default Preproxy")), translate("When using, localhost will connect this node first and then use this node to connect the default node."))
o.cfgvalue = get_cfgvalue(v.id, id) o.cfgvalue = get_cfgvalue(v.id, id)
o.write = get_write(v.id, id) o.write = get_write(v.id, id)
o.remove = get_remove(v.id, id) o.remove = get_remove(v.id, id)
@@ -559,7 +576,7 @@ o:depends({singbox_dns_mode = "doh"})
o = s:taboption("DNS", Value, "remote_dns_client_ip", translate("EDNS Client Subnet")) o = s:taboption("DNS", Value, "remote_dns_client_ip", translate("EDNS Client Subnet"))
o.description = translate("Notify the DNS server when the DNS query is notified, the location of the client (cannot be a private IP address).") .. "<br />" .. o.description = translate("Notify the DNS server when the DNS query is notified, the location of the client (cannot be a private IP address).") .. "<br />" ..
translate("This feature requires the DNS server to support the Edns Client Subnet (RFC7871).") translate("This feature requires the DNS server to support the Edns Client Subnet (RFC7871).")
o.datatype = "ipaddr" o.datatype = "ipaddr"
o:depends({dns_mode = "sing-box"}) o:depends({dns_mode = "sing-box"})
o:depends({dns_mode = "xray"}) o:depends({dns_mode = "xray"})
@@ -574,7 +591,7 @@ o:depends({smartdns_dns_mode = "sing-box", dns_shunt = "smartdns"})
o:depends({dns_mode = "xray", dns_shunt = "dnsmasq"}) o:depends({dns_mode = "xray", dns_shunt = "dnsmasq"})
o:depends({dns_mode = "xray", dns_shunt = "chinadns-ng"}) o:depends({dns_mode = "xray", dns_shunt = "chinadns-ng"})
o:depends({smartdns_dns_mode = "xray", dns_shunt = "smartdns"}) o:depends({smartdns_dns_mode = "xray", dns_shunt = "smartdns"})
o:depends("_node_sel_shunt", "1") --o:depends("_node_sel_shunt", "1")
o.validate = function(self, value, t) o.validate = function(self, value, t)
if value and value == "1" then if value and value == "1" then
local _dns_mode = s.fields["dns_mode"]:formvalue(t) local _dns_mode = s.fields["dns_mode"]:formvalue(t)
@@ -138,7 +138,10 @@ end
source.write = dynamicList_write source.write = dynamicList_write
--[[
-- Too low usage rate, hidden
sourcePort = s:option(Value, "sourcePort", translate("Source port")) sourcePort = s:option(Value, "sourcePort", translate("Source port"))
]]--
port = s:option(Value, "port", translate("port")) port = s:option(Value, "port", translate("port"))
@@ -163,6 +166,11 @@ domain_list.validate = function(self, value)
flag = 0 flag = 0
elseif host:find("ext:") and host:find("ext:") == 1 then elseif host:find("ext:") and host:find("ext:") == 1 then
flag = 0 flag = 0
elseif host:find("rule-set:", 1, true) == 1 or host:find("rs:") == 1 then
local w = host:sub(host:find(":") + 1, #host)
if w:find("local:") == 1 or w:find("remote:") == 1 then
flag = 0
end
elseif host:find("#") and host:find("#") == 1 then elseif host:find("#") and host:find("#") == 1 then
flag = 0 flag = 0
end end
@@ -174,13 +182,21 @@ domain_list.validate = function(self, value)
end end
return value return value
end end
domain_list.description = "<br /><ul><li>" .. translate("Plaintext: If this string matches any part of the targeting domain, this rule takes effet. Example: rule 'sina.com' matches targeting domain 'sina.com', 'sina.com.cn' and 'www.sina.com', but not 'sina.cn'.") domain_list.description = "<br /><ul>"
.. "</li><li>" .. translate("Regular expression: Begining with 'regexp:', the rest is a regular expression. When the regexp matches targeting domain, this rule takes effect. Example: rule 'regexp:\\.goo.*\\.com$' matches 'www.google.com' and 'fonts.googleapis.com', but not 'google.com'.") .. "<li>" .. translate("Plaintext: If this string matches any part of the targeting domain, this rule takes effet. Example: rule 'sina.com' matches targeting domain 'sina.com', 'sina.com.cn' and 'www.sina.com', but not 'sina.cn'.") .. "</li>"
.. "</li><li>" .. translate("Subdomain (recommended): Begining with 'domain:' and the rest is a domain. When the targeting domain is exactly the value, or is a subdomain of the value, this rule takes effect. Example: rule 'domain:v2ray.com' matches 'www.v2ray.com', 'v2ray.com', but not 'xv2ray.com'.") .. "<li>" .. translate("Regular expression: Begining with 'regexp:', the rest is a regular expression. When the regexp matches targeting domain, this rule takes effect. Example: rule 'regexp:\\.goo.*\\.com$' matches 'www.google.com' and 'fonts.googleapis.com', but not 'google.com'.") .. "</li>"
.. "</li><li>" .. translate("Full domain: Begining with 'full:' and the rest is a domain. When the targeting domain is exactly the value, the rule takes effect. Example: rule 'domain:v2ray.com' matches 'v2ray.com', but not 'www.v2ray.com'.") .. "<li>" .. translate("Subdomain (recommended): Begining with 'domain:' and the rest is a domain. When the targeting domain is exactly the value, or is a subdomain of the value, this rule takes effect. Example: rule 'domain:v2ray.com' matches 'www.v2ray.com', 'v2ray.com', but not 'xv2ray.com'.") .. "</li>"
.. "</li><li>" .. translate("Pre-defined domain list: Begining with 'geosite:' and the rest is a name, such as geosite:google or geosite:cn.") .. "<li>" .. translate("Full domain: Begining with 'full:' and the rest is a domain. When the targeting domain is exactly the value, the rule takes effect. Example: rule 'domain:v2ray.com' matches 'v2ray.com', but not 'www.v2ray.com'.") .. "</li>"
.. "</li><li>" .. translate("Annotation: Begining with #") .. "<li>" .. translate("Pre-defined domain list: Begining with 'geosite:' and the rest is a name, such as geosite:google or geosite:cn.") .. "</li>"
.. "</li></ul>" .. "<li>"
.. translate("Sing-Box is compatible with Geo rules and rule-set. rule-set begin with 'rule-set:remote:' or 'rule-set:local:'.")
.. "<ul>"
.. "<li>" .. translate("Such as:") .. "'rule-set:remote:https://raw.githubusercontent.com/SagerNet/sing-geosite/rule-set/geosite-cn.srs'" .. "</li>"
.. "<li>" .. translate("Such as:") .. "'rule-set:local:/usr/share/sing-box/geosite-cn.srs'" .. "</li>"
.. "</ul>"
.. "</li>"
.. "<li>" .. translate("Annotation: Begining with #") .. "</li>"
.. "</ul>"
ip_list = s:option(TextValue, "ip_list", "IP") ip_list = s:option(TextValue, "ip_list", "IP")
ip_list.rows = 10 ip_list.rows = 10
ip_list.wrap = "off" ip_list.wrap = "off"
@@ -191,6 +207,11 @@ ip_list.validate = function(self, value)
for index, ipmask in ipairs(ipmasks) do for index, ipmask in ipairs(ipmasks) do
if ipmask:find("geoip:") and ipmask:find("geoip:") == 1 and not ipmask:find("%s") then if ipmask:find("geoip:") and ipmask:find("geoip:") == 1 and not ipmask:find("%s") then
elseif ipmask:find("ext:") and ipmask:find("ext:") == 1 and not ipmask:find("%s") then elseif ipmask:find("ext:") and ipmask:find("ext:") == 1 and not ipmask:find("%s") then
elseif ipmask:find("rule-set:", 1, true) == 1 or ipmask:find("rs:") == 1 then
local w = ipmask:sub(ipmask:find(":") + 1, #ipmask)
if w:find("local:") == 1 or w:find("remote:") == 1 then
flag = 0
end
elseif ipmask:find("#") and ipmask:find("#") == 1 then elseif ipmask:find("#") and ipmask:find("#") == 1 then
else else
if not (datatypes.ipmask4(ipmask) or datatypes.ipmask6(ipmask)) then if not (datatypes.ipmask4(ipmask) or datatypes.ipmask6(ipmask)) then
@@ -200,10 +221,20 @@ ip_list.validate = function(self, value)
end end
return value return value
end end
ip_list.description = "<br /><ul><li>" .. translate("IP: such as '127.0.0.1'.") ip_list.description = "<br /><ul>"
.. "</li><li>" .. translate("CIDR: such as '127.0.0.0/8'.") .. "<li>" .. translate("IP: such as '127.0.0.1'.") .. "</li>"
.. "</li><li>" .. translate("GeoIP: such as 'geoip:cn'. It begins with geoip: (lower case) and followed by two letter of country code.") .. "<li>" .. translate("CIDR: such as '127.0.0.0/8'.") .. "</li>"
.. "</li><li>" .. translate("Annotation: Begining with #") .. "<li>" .. translate("GeoIP: such as 'geoip:cn'. It begins with geoip: (lower case) and followed by two letter of country code.") .. "</li>"
.. "</li></ul>" .. "<li>"
.. translate("Sing-Box is compatible with Geo rules and rule-set. rule-set begin with 'rule-set:remote:' or 'rule-set:local:'.")
.. "<ul>"
.. "<li>" .. translate("Such as:") .. "'rule-set:remote:https://raw.githubusercontent.com/SagerNet/sing-geoip/rule-set/geoip-cn.srs'" .. "</li>"
.. "<li>" .. translate("Such as:") .. "'rule-set:local:/usr/share/sing-box/geoip-cn.srs'" .. "</li>"
.. "</ul>"
.. "</li>"
.. "<li>" .. translate("Annotation: Begining with #") .. "</li>"
.. "</ul>"
o = s:option(Flag, "invert", "Invert", translate("Invert match result.") .. " " .. translate("Only support Sing-Box."))
return m return m
@@ -215,13 +215,13 @@ o.default = "2"
o.placeholder = "2" o.placeholder = "2"
o.description = translate("The load balancer selects the optimal number of nodes, and traffic is randomly distributed among them.") o.description = translate("The load balancer selects the optimal number of nodes, and traffic is randomly distributed among them.")
local default_node = m.uci:get(appname, arg[1], "default_node") or "_direct"
-- [[ 分流模块 ]] -- [[ 分流模块 ]]
if #nodes_table > 0 then if #nodes_table > 0 then
o = s:option(Flag, _n("preproxy_enabled"), translate("Preproxy")) o = s:option(Flag, _n("preproxy_enabled"), translate("Preproxy"))
o:depends({ [_n("protocol")] = "_shunt" }) o:depends({ [_n("protocol")] = "_shunt" })
o = s:option(ListValue, _n("main_node"), string.format('<a style="color:red">%s</a>', translate("Preproxy Node")), translate("Set the node to be used as a pre-proxy. Each rule (including <code>Default</code>) has a separate switch that controls whether this rule uses the pre-proxy or not.")) o = s:option(ListValue, _n("main_node"), string.format('<a style="color:#FF8C00">%s</a>', translate("Preproxy Node")), translate("Set the node to be used as a pre-proxy. Each rule (including <code>Default</code>) has a separate switch that controls whether this rule uses the pre-proxy or not."))
o:depends({ [_n("protocol")] = "_shunt", [_n("preproxy_enabled")] = true }) o:depends({ [_n("protocol")] = "_shunt", [_n("preproxy_enabled")] = true })
o.template = appname .. "/cbi/nodes_listvalue" o.template = appname .. "/cbi/nodes_listvalue"
o.group = {} o.group = {}
@@ -241,6 +241,9 @@ if #nodes_table > 0 then
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
end end
o = s:option(Flag, _n("fakedns"), "FakeDNS", translate("Use FakeDNS work in the shunt domain that proxy."))
o:depends({ [_n("protocol")] = "_shunt" })
end end
m.uci:foreach(appname, "shunt_rules", function(e) m.uci:foreach(appname, "shunt_rules", function(e)
if e[".name"] and e.remarks then if e[".name"] and e.remarks then
@@ -254,9 +257,17 @@ m.uci:foreach(appname, "shunt_rules", function(e)
o.group = {"","","",""} o.group = {"","","",""}
if #nodes_table > 0 then if #nodes_table > 0 then
local pt = s:option(ListValue, _n(e[".name"] .. "_proxy_tag"), string.format('* <a style="color:#FF8C00">%s</a>', e.remarks .. " " .. translate("Preproxy")))
pt:value("", translate("Close"))
pt:value("main", translate("Preproxy Node"))
pt:depends("__hide__", "1")
local fakedns_tag = s:option(Flag, _n(e[".name"] .. "_fakedns"), string.format('* <a style="color:#FF8C00">%s</a>', e.remarks .. " " .. "FakeDNS"))
for k, v in pairs(socks_list) do for k, v in pairs(socks_list) do
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
fakedns_tag:depends({ [_n("protocol")] = "_shunt", [_n("fakedns")] = true, [_n(e[".name"])] = v.id })
end end
for k, v in pairs(balancers_table) do for k, v in pairs(balancers_table) do
o:value(v.id, v.remark) o:value(v.id, v.remark)
@@ -266,16 +277,16 @@ m.uci:foreach(appname, "shunt_rules", function(e)
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
end end
local pt = s:option(ListValue, _n(e[".name"] .. "_proxy_tag"), string.format('* <a style="color:red">%s</a>', e.remarks .. " " .. translate("Preproxy")))
pt:value("", translate("Close"))
pt:value("main", translate("Preproxy Node"))
pt:depends("__hide__", "1")
for k, v in pairs(nodes_table) do for k, v in pairs(nodes_table) do
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
if not api.is_local_ip(v.address) then --本地节点禁止使用前置 if not api.is_local_ip(v.address) then --本地节点禁止使用前置
pt:depends({ [_n("protocol")] = "_shunt", [_n("preproxy_enabled")] = true, [_n(e[".name"])] = v.id }) pt:depends({ [_n("protocol")] = "_shunt", [_n("preproxy_enabled")] = true, [_n(e[".name"])] = v.id })
end end
fakedns_tag:depends({ [_n("protocol")] = "_shunt", [_n("fakedns")] = true, [_n(e[".name"])] = v.id })
end
if default_node ~= "_direct" or default_node ~= "_blackhole" then
fakedns_tag:depends({ [_n("protocol")] = "_shunt", [_n("fakedns")] = true, [_n(e[".name"])] = "_default" })
end end
end end
end end
@@ -187,12 +187,13 @@ o:depends({ [_n("protocol")] = "_urltest" })
o.default = "0" o.default = "0"
o.description = translate("Interrupt existing connections when the selected outbound has changed.") o.description = translate("Interrupt existing connections when the selected outbound has changed.")
local default_node = m.uci:get(appname, arg[1], "default_node") or "_direct"
-- [[ 分流模块 ]] -- [[ 分流模块 ]]
if #nodes_table > 0 then if #nodes_table > 0 then
o = s:option(Flag, _n("preproxy_enabled"), translate("Preproxy")) o = s:option(Flag, _n("preproxy_enabled"), translate("Preproxy"))
o:depends({ [_n("protocol")] = "_shunt" }) o:depends({ [_n("protocol")] = "_shunt" })
o = s:option(ListValue, _n("main_node"), string.format('<a style="color:red">%s</a>', translate("Preproxy Node")), translate("Set the node to be used as a pre-proxy. Each rule (including <code>Default</code>) has a separate switch that controls whether this rule uses the pre-proxy or not.")) o = s:option(ListValue, _n("main_node"), string.format('<a style="color:#FF8C00">%s</a>', translate("Preproxy Node")), translate("Set the node to be used as a pre-proxy. Each rule (including <code>Default</code>) has a separate switch that controls whether this rule uses the pre-proxy or not."))
o:depends({ [_n("protocol")] = "_shunt", [_n("preproxy_enabled")] = true }) o:depends({ [_n("protocol")] = "_shunt", [_n("preproxy_enabled")] = true })
o.template = appname .. "/cbi/nodes_listvalue" o.template = appname .. "/cbi/nodes_listvalue"
o.group = {} o.group = {}
@@ -212,6 +213,9 @@ if #nodes_table > 0 then
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
end end
o = s:option(Flag, _n("fakedns"), "FakeDNS", translate("Use FakeDNS work in the shunt domain that proxy."))
o:depends({ [_n("protocol")] = "_shunt" })
end end
m.uci:foreach(appname, "shunt_rules", function(e) m.uci:foreach(appname, "shunt_rules", function(e)
if e[".name"] and e.remarks then if e[".name"] and e.remarks then
@@ -225,9 +229,17 @@ m.uci:foreach(appname, "shunt_rules", function(e)
o.group = {"","","",""} o.group = {"","","",""}
if #nodes_table > 0 then if #nodes_table > 0 then
local pt = s:option(ListValue, _n(e[".name"] .. "_proxy_tag"), string.format('* <a style="color:#FF8C00">%s</a>', e.remarks .. " " .. translate("Preproxy")))
pt:value("", translate("Close"))
pt:value("main", translate("Preproxy Node"))
pt:depends("__hide__", "1")
local fakedns_tag = s:option(Flag, _n(e[".name"] .. "_fakedns"), string.format('* <a style="color:#FF8C00">%s</a>', e.remarks .. " " .. "FakeDNS"))
for k, v in pairs(socks_list) do for k, v in pairs(socks_list) do
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
fakedns_tag:depends({ [_n("protocol")] = "_shunt", [_n("fakedns")] = true, [_n(e[".name"])] = v.id })
end end
for k, v in pairs(urltest_table) do for k, v in pairs(urltest_table) do
o:value(v.id, v.remark) o:value(v.id, v.remark)
@@ -237,16 +249,16 @@ m.uci:foreach(appname, "shunt_rules", function(e)
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
end end
local pt = s:option(ListValue, _n(e[".name"] .. "_proxy_tag"), string.format('* <a style="color:red">%s</a>', e.remarks .. " " .. translate("Preproxy")))
pt:value("", translate("Close"))
pt:value("main", translate("Preproxy Node"))
pt:depends("__hide__", "1")
for k, v in pairs(nodes_table) do for k, v in pairs(nodes_table) do
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
if not api.is_local_ip(v.address) then --本地节点禁止使用前置 if not api.is_local_ip(v.address) then --本地节点禁止使用前置
pt:depends({ [_n("protocol")] = "_shunt", [_n("preproxy_enabled")] = true, [_n(e[".name"])] = v.id }) pt:depends({ [_n("protocol")] = "_shunt", [_n("preproxy_enabled")] = true, [_n(e[".name"])] = v.id })
end end
fakedns_tag:depends({ [_n("protocol")] = "_shunt", [_n("fakedns")] = true, [_n(e[".name"])] = v.id })
end
if default_node ~= "_direct" or default_node ~= "_blackhole" then
fakedns_tag:depends({ [_n("protocol")] = "_shunt", [_n("fakedns")] = true, [_n(e[".name"])] = "_default" })
end end
end end
end end
@@ -21,6 +21,8 @@ LOG_FILE = "/tmp/log/" .. appname .. ".log"
TMP_PATH = "/tmp/etc/" .. appname TMP_PATH = "/tmp/etc/" .. appname
TMP_IFACE_PATH = TMP_PATH .. "/iface" TMP_IFACE_PATH = TMP_PATH .. "/iface"
NEW_PORT = nil
function log(...) function log(...)
local result = os.date("%Y-%m-%d %H:%M:%S: ") .. table.concat({...}, " ") local result = os.date("%Y-%m-%d %H:%M:%S: ") .. table.concat({...}, " ")
local f, err = io.open(LOG_FILE, "a") local f, err = io.open(LOG_FILE, "a")
@@ -94,6 +96,16 @@ function get_cache_var(key)
return val return val
end end
function get_new_port()
local cmd_format = ". /usr/share/passwall/utils.sh ; echo -n $(get_new_port %s tcp,udp)"
local set_port = 0
if NEW_PORT and tonumber(NEW_PORT) then
set_port = tonumber(NEW_PORT) + 1
end
NEW_PORT = tonumber(sys.exec(string.format(cmd_format, set_port == 0 and "auto" or set_port)))
return NEW_PORT
end
function exec_call(cmd) function exec_call(cmd)
local process = io.popen(cmd .. '; echo -e "\n$?"') local process = io.popen(cmd .. '; echo -e "\n$?"')
local lines = {} local lines = {}
@@ -11,66 +11,74 @@ local local_version = api.get_app_version("sing-box"):match("[^v]+")
local version_ge_1_11_0 = api.compare_versions(local_version, ">=", "1.11.0") local version_ge_1_11_0 = api.compare_versions(local_version, ">=", "1.11.0")
local version_ge_1_12_0 = api.compare_versions(local_version, ">=", "1.12.0") local version_ge_1_12_0 = api.compare_versions(local_version, ">=", "1.12.0")
local geosite_all_tag = {} local GEO_VAR = {
local geoip_all_tag = {} OK = nil,
local srss_path = "/tmp/etc/" .. appname .."_tmp/srss/" DIR = nil,
SITE_PATH = nil,
IP_PATH = nil,
SITE_TAGS = {},
IP_TAGS = {},
TO_SRS_PATH = "/tmp/etc/" .. appname .."_tmp/singbox_srss/"
}
local function convert_geofile() function check_geoview()
if api.compare_versions(local_version, "<", "1.8.0") then if not GEO_VAR.OK then
api.log("!!!注意:Sing-Box 版本低,Sing-Box 分流无法启用!请在[组件更新]中更新。") -- Only get once
return GEO_VAR.OK = (api.finded_com("geoview") and api.compare_versions(api.get_app_version("geoview"), ">=", "0.1.10")) and 1 or 0
end end
local geo_dir = (uci:get(appname, "@global_rules[0]", "v2ray_location_asset") or "/usr/share/v2ray/"):match("^(.*)/") if GEO_VAR.OK == 0 then
local geosite_path = geo_dir .. "/geosite.dat" api.log("!!!注意:缺少 Geoview 组件或版本过低,Sing-Box 分流无法启用!")
local geoip_path = geo_dir .. "/geoip.dat"
if not api.finded_com("geoview") then
api.log("!!!注意:缺少 Geoview 组件,Sing-Box 分流无法启用!请在[组件更新]中更新。")
return
else else
if api.compare_versions(api.get_app_version("geoview"), "<", "0.1.10") then GEO_VAR.DIR = GEO_VAR.DIR or (uci:get(appname, "@global_rules[0]", "v2ray_location_asset") or "/usr/share/v2ray/"):match("^(.*)/")
api.log("!!!注意:Geoview 组件版本低,Sing-Box 分流无法启用!请在[组件更新]中更新。") GEO_VAR.SITE_PATH = GEO_VAR.SITE_PATH or (GEO_VAR.DIR .. "/geosite.dat")
return GEO_VAR.IP_PATH = GEO_VAR.IP_PATH or (GEO_VAR.DIR .. "/geoip.dat")
if not fs.access(GEO_VAR.TO_SRS_PATH) then
fs.mkdir(GEO_VAR.TO_SRS_PATH)
end end
end end
if not fs.access(srss_path) then return GEO_VAR.OK
fs.mkdir(srss_path) end
function geo_convert_srs(var)
if check_geoview() ~= 1 then
return
end
local geo_path = var["-geo_path"]
local prefix = var["-prefix"]
local rule_name = var["-rule_name"]
local output_srs_file = GEO_VAR.TO_SRS_PATH .. prefix .. "-" .. rule_name .. ".srs"
if not fs.access(output_srs_file) then
local cmd = string.format("geoview -type %s -action convert -input '%s' -list '%s' -output '%s' -lowmem=true",
prefix, geo_path, rule_name, output_srs_file)
sys.call(cmd)
local status = fs.access(output_srs_file) and "success." or "failed!"
if status == "failed!" then
api.log(string.format(" - %s:%s 转换为srs格式:%s", prefix, rule_name, status))
end
end
end
local function convert_geofile()
if check_geoview() ~= 1 then
return
end end
local function convert(file_path, prefix, tags) local function convert(file_path, prefix, tags)
if next(tags) and fs.access(file_path) then if next(tags) and fs.access(file_path) then
local md5_file = srss_path .. prefix .. ".dat.md5" local md5_file = GEO_VAR.TO_SRS_PATH .. prefix .. ".dat.md5"
local new_md5 = sys.exec("md5sum " .. file_path .. " 2>/dev/null | awk '{print $1}'"):gsub("\n", "") local new_md5 = sys.exec("md5sum " .. file_path .. " 2>/dev/null | awk '{print $1}'"):gsub("\n", "")
local old_md5 = sys.exec("[ -f " .. md5_file .. " ] && head -n 1 " .. md5_file .. " | tr -d ' \t\n' || echo ''") local old_md5 = sys.exec("[ -f " .. md5_file .. " ] && head -n 1 " .. md5_file .. " | tr -d ' \t\n' || echo ''")
if new_md5 ~= "" and new_md5 ~= old_md5 then if new_md5 ~= "" and new_md5 ~= old_md5 then
sys.call("printf '%s' " .. new_md5 .. " > " .. md5_file) sys.call("printf '%s' " .. new_md5 .. " > " .. md5_file)
sys.call("rm -rf " .. srss_path .. prefix .. "-*.srs" ) sys.call("rm -rf " .. GEO_VAR.TO_SRS_PATH .. prefix .. "-*.srs" )
end end
for k in pairs(tags) do for k in pairs(tags) do
local srs_file = srss_path .. prefix .. "-" .. k .. ".srs" geo_convert_srs({["-geo_path"] = file_path, ["-prefix"] = prefix, ["-rule_name"] = k})
if not fs.access(srs_file) then
local cmd = string.format("geoview -type %s -action convert -input '%s' -list '%s' -output '%s' -lowmem=true",
prefix, file_path, k, srs_file)
sys.exec(cmd)
--local status = fs.access(srs_file) and "成功。" or "失败!"
--api.log(string.format(" - 转换 %s:%s ... %s", prefix, k, status))
end
end end
end end
end end
--api.log("Sing-Box 规则集转换:") --api.log("Sing-Box 规则集转换:")
convert(geosite_path, "geosite", geosite_all_tag) convert(GEO_VAR.SITE_PATH, "geosite", GEO_VAR.SITE_TAGS)
convert(geoip_path, "geoip", geoip_all_tag) convert(GEO_VAR.IP_PATH, "geoip", GEO_VAR.IP_TAGS)
end
local new_port
local function get_new_port()
local cmd_format = ". /usr/share/passwall/utils.sh ; echo -n $(get_new_port %s tcp)"
local set_port = 0
if new_port and tonumber(new_port) then
set_port = tonumber(new_port) + 1
end
new_port = tonumber(sys.exec(string.format(cmd_format, set_port == 0 and "auto" or set_port)))
return new_port
end end
function gen_outbound(flag, node, tag, proxy_table) function gen_outbound(flag, node, tag, proxy_table)
@@ -94,7 +102,7 @@ function gen_outbound(flag, node, tag, proxy_table)
if node.type ~= "sing-box" then if node.type ~= "sing-box" then
local relay_port = node.port local relay_port = node.port
new_port = get_new_port() local new_port = api.get_new_port()
local config_file = string.format("%s_%s_%s.json", flag, tag, new_port) local config_file = string.format("%s_%s_%s.json", flag, tag, new_port)
if tag and node_id and not tag:find(node_id) then if tag and node_id and not tag:find(node_id) then
config_file = string.format("%s_%s_%s_%s.json", flag, tag, node_id, new_port) config_file = string.format("%s_%s_%s_%s.json", flag, tag, node_id, new_port)
@@ -925,6 +933,7 @@ function gen_config(var)
local dns = nil local dns = nil
local inbounds = {} local inbounds = {}
local outbounds = {} local outbounds = {}
local rule_set_table = {}
local COMMON = {} local COMMON = {}
local singbox_settings = uci:get_all(appname, "@global_singbox[0]") or {} local singbox_settings = uci:get_all(appname, "@global_singbox[0]") or {}
@@ -935,6 +944,59 @@ function gen_config(var)
local experimental = nil local experimental = nil
function add_rule_set(tab)
if tab and next(tab) and tab.tag and not rule_set_table[tab.tag]then
rule_set_table[tab.tag] = tab
end
end
function parse_rule_set(w, rs)
-- Format: remote:https://raw.githubusercontent.com/lyc8503/sing-box-rules/rule-set-geosite/geosite-netflix.srs'
-- Format: local:/usr/share/sing-box/geosite-netflix.srs'
local result = nil
if w and #w > 0 then
if w:find("local:") == 1 or w:find("remote:") == 1 then
local _type = w:sub(1, w:find(":") - 1) -- "local" or "remote"
w = w:sub(w:find(":") + 1, #w)
local format = nil
local filename = w:sub(-w:reverse():find("/") + 1) -- geosite-netflix.srs
local suffix = ""
local find_doc = filename:reverse():find("%.")
if find_doc then
suffix = filename:sub(-find_doc + 1) -- "srs" or "json"
end
if suffix == "srs" then
format = "binary"
elseif suffix == "json" then
format = "source"
end
if format then
local rule_set_tag = filename:sub(1, filename:find("%.") - 1) --geosite-netflix
if rule_set_tag and #rule_set_tag > 0 then
if rs then
rule_set_tag = "rs_" .. rule_set_tag
end
result = {
type = _type,
tag = rule_set_tag,
format = format,
path = _type == "local" and w or nil,
url = _type == "remote" and w or nil,
--download_detour = _type == "remote" and "",
--update_interval = _type == "remote" and "",
}
end
end
end
end
return result
end
function geo_rule_set(prefix, rule_name)
local output_srs_file = "local:" .. GEO_VAR.TO_SRS_PATH .. prefix .. "-" .. rule_name .. ".srs"
return parse_rule_set(output_srs_file)
end
if node_id then if node_id then
local node = uci:get_all(appname, node_id) local node = uci:get_all(appname, node_id)
if node then if node then
@@ -1154,6 +1216,8 @@ function gen_config(var)
local preproxy_tag = preproxy_rule_name local preproxy_tag = preproxy_rule_name
local preproxy_node_id = preproxy_rule_name and node["main_node"] or nil local preproxy_node_id = preproxy_rule_name and node["main_node"] or nil
inner_fakedns = node.fakedns or "0"
local function gen_shunt_node(rule_name, _node_id) local function gen_shunt_node(rule_name, _node_id)
if not rule_name then return nil, nil end if not rule_name then return nil, nil end
if not _node_id then _node_id = node[rule_name] end if not _node_id then _node_id = node[rule_name] end
@@ -1205,7 +1269,7 @@ function gen_config(var)
pre_proxy = true pre_proxy = true
end end
if pre_proxy then if pre_proxy then
new_port = get_new_port() local new_port = api.get_new_port()
table.insert(inbounds, { table.insert(inbounds, {
type = "direct", type = "direct",
tag = "proxy_" .. rule_name, tag = "proxy_" .. rule_name,
@@ -1344,6 +1408,8 @@ function gen_config(var)
if is_private or #source_ip_cidr > 0 then rule.rule_set_ip_cidr_match_source = true end if is_private or #source_ip_cidr > 0 then rule.rule_set_ip_cidr_match_source = true end
end end
--[[
-- Too low usage rate, hidden
if e.sourcePort then if e.sourcePort then
local source_port = {} local source_port = {}
local source_port_range = {} local source_port_range = {}
@@ -1357,6 +1423,7 @@ function gen_config(var)
rule.source_port = #source_port > 0 and source_port or nil rule.source_port = #source_port > 0 and source_port or nil
rule.source_port_range = #source_port_range > 0 and source_port_range or nil rule.source_port_range = #source_port_range > 0 and source_port_range or nil
end end
]]--
if e.port then if e.port then
local port = {} local port = {}
@@ -1372,7 +1439,7 @@ function gen_config(var)
rule.port_range = #port_range > 0 and port_range or nil rule.port_range = #port_range > 0 and port_range or nil
end end
local rule_set_tag = {} local rule_set = {}
if e.domain_list then if e.domain_list then
local domain_table = { local domain_table = {
@@ -1382,20 +1449,34 @@ function gen_config(var)
domain_keyword = {}, domain_keyword = {},
domain_regex = {}, domain_regex = {},
rule_set = {}, rule_set = {},
fakedns = nil,
invert = e.invert == "1" and true or nil
} }
string.gsub(e.domain_list, '[^' .. "\r\n" .. ']+', function(w) string.gsub(e.domain_list, '[^' .. "\r\n" .. ']+', function(w)
if w:find("#") == 1 then return end if w:find("#") == 1 then return end
if w:find("geosite:") == 1 then if w:find("geosite:") == 1 then
local _geosite = w:sub(1 + #"geosite:") --适配srs local _geosite = w:sub(1 + #"geosite:") --适配srs
geosite_all_tag[_geosite] = true local t = geo_rule_set("geosite", _geosite)
table.insert(rule_set_tag, "geosite-" .. _geosite) if t then
table.insert(domain_table.rule_set, "geosite-" .. _geosite) GEO_VAR.SITE_TAGS[_geosite] = true
add_rule_set(t)
table.insert(rule_set, t.tag)
table.insert(domain_table.rule_set, t.tag)
end
elseif w:find("regexp:") == 1 then elseif w:find("regexp:") == 1 then
table.insert(domain_table.domain_regex, w:sub(1 + #"regexp:")) table.insert(domain_table.domain_regex, w:sub(1 + #"regexp:"))
elseif w:find("full:") == 1 then elseif w:find("full:") == 1 then
table.insert(domain_table.domain, w:sub(1 + #"full:")) table.insert(domain_table.domain, w:sub(1 + #"full:"))
elseif w:find("domain:") == 1 then elseif w:find("domain:") == 1 then
table.insert(domain_table.domain_suffix, w:sub(1 + #"domain:")) table.insert(domain_table.domain_suffix, w:sub(1 + #"domain:"))
elseif w:find("rule-set:", 1, true) == 1 or w:find("rs:") == 1 then
w = w:sub(w:find(":") + 1, #w)
local t = parse_rule_set(w, true)
if t then
add_rule_set(t)
table.insert(rule_set, t.tag)
table.insert(domain_table.rule_set, t.tag)
end
else else
table.insert(domain_table.domain_keyword, w) table.insert(domain_table.domain_keyword, w)
end end
@@ -1404,6 +1485,10 @@ function gen_config(var)
rule.domain_suffix = #domain_table.domain_suffix > 0 and domain_table.domain_suffix or nil rule.domain_suffix = #domain_table.domain_suffix > 0 and domain_table.domain_suffix or nil
rule.domain_keyword = #domain_table.domain_keyword > 0 and domain_table.domain_keyword or nil rule.domain_keyword = #domain_table.domain_keyword > 0 and domain_table.domain_keyword or nil
rule.domain_regex = #domain_table.domain_regex > 0 and domain_table.domain_regex or nil rule.domain_regex = #domain_table.domain_regex > 0 and domain_table.domain_regex or nil
rule.rule_set = #domain_table.rule_set > 0 and domain_table.rule_set or nil
if inner_fakedns == "1" and node[e[".name"] .. "_fakedns"] == "1" then
domain_table.fakedns = true
end
if outboundTag then if outboundTag then
table.insert(dns_domain_rules, api.clone(domain_table)) table.insert(dns_domain_rules, api.clone(domain_table))
@@ -1420,8 +1505,19 @@ function gen_config(var)
if _geoip == "private" then if _geoip == "private" then
is_private = true is_private = true
else else
geoip_all_tag[_geoip] = true local t = geo_rule_set("geoip", _geoip)
table.insert(rule_set_tag, "geoip-" .. _geoip) if t then
GEO_VAR.IP_TAGS[_geoip] = true
add_rule_set(t)
table.insert(rule_set, t.tag)
end
end
elseif w:find("rule-set:", 1, true) == 1 or w:find("rs:") == 1 then
w = w:sub(w:find(":") + 1, #w)
local t = parse_rule_set(w, true)
if t then
add_rule_set(t)
table.insert(rule_set, t.tag)
end end
else else
table.insert(ip_cidr, w) table.insert(ip_cidr, w)
@@ -1432,7 +1528,8 @@ function gen_config(var)
rule.ip_cidr = #ip_cidr > 0 and ip_cidr or nil rule.ip_cidr = #ip_cidr > 0 and ip_cidr or nil
end end
rule.rule_set = #rule_set_tag > 0 and rule_set_tag or nil --适配srs rule.rule_set = #rule_set > 0 and rule_set or nil --适配srs
rule.invert = e.invert == "1" and true or nil
table.insert(rules, rule) table.insert(rules, rule)
end end
@@ -1441,34 +1538,6 @@ function gen_config(var)
for index, value in ipairs(rules) do for index, value in ipairs(rules) do
table.insert(route.rules, rules[index]) table.insert(route.rules, rules[index])
end end
local rule_set = {} --适配srs
if next(geosite_all_tag) then
for k,v in pairs(geosite_all_tag) do
local srs_file = srss_path .. "geosite-" .. k ..".srs"
local _rule_set = {
tag = "geosite-" .. k,
type = "local",
format = "binary",
path = srs_file
}
table.insert(rule_set, _rule_set)
end
end
if next(geoip_all_tag) then
for k,v in pairs(geoip_all_tag) do
local srs_file = srss_path .. "geoip-" .. k ..".srs"
local _rule_set = {
tag = "geoip-" .. k,
type = "local",
format = "binary",
path = srs_file
}
table.insert(rule_set, _rule_set)
end
end
route.rule_set = #rule_set >0 and rule_set or nil
elseif node.protocol == "_urltest" then elseif node.protocol == "_urltest" then
if node.urltest_node then if node.urltest_node then
COMMON.default_outbound_tag = gen_urltest(node) COMMON.default_outbound_tag = gen_urltest(node)
@@ -1571,7 +1640,7 @@ function gen_config(var)
table.insert(dns.servers, remote_server) table.insert(dns.servers, remote_server)
end end
if remote_dns_fake then if remote_dns_fake or inner_fakedns == "1" then
dns.fakeip = { dns.fakeip = {
enabled = true, enabled = true,
inet4_range = "198.18.0.0/15", inet4_range = "198.18.0.0/15",
@@ -1637,7 +1706,7 @@ function gen_config(var)
table.insert(dns.servers, remote_server) table.insert(dns.servers, remote_server)
end end
if remote_dns_fake then if remote_dns_fake or inner_fakedns == "1" then
table.insert(dns.servers, { table.insert(dns.servers, {
tag = fakedns_tag, tag = fakedns_tag,
type = "fakeip", type = "fakeip",
@@ -1755,8 +1824,9 @@ function gen_config(var)
domain_suffix = (value.domain_suffix and #value.domain_suffix > 0) and value.domain_suffix or nil, domain_suffix = (value.domain_suffix and #value.domain_suffix > 0) and value.domain_suffix or nil,
domain_keyword = (value.domain_keyword and #value.domain_keyword > 0) and value.domain_keyword or nil, domain_keyword = (value.domain_keyword and #value.domain_keyword > 0) and value.domain_keyword or nil,
domain_regex = (value.domain_regex and #value.domain_regex > 0) and value.domain_regex or nil, domain_regex = (value.domain_regex and #value.domain_regex > 0) and value.domain_regex or nil,
rule_set = (value.rule_set and #value.rule_set > 0) and value.rule_set or nil, --适配srs rule_set = (value.rule_set and #value.rule_set > 0) and value.rule_set or nil, --适配srs
disable_cache = false, disable_cache = false,
invert = value.invert,
strategy = (version_ge_1_12_0 and value.outboundTag == "direct") and direct_strategy or nil --Migrate to 1.12 DNS strategy = (version_ge_1_12_0 and value.outboundTag == "direct") and direct_strategy or nil --Migrate to 1.12 DNS
} }
if version_ge_1_12_0 and value.outboundTag == "block" then --Migrate to 1.12 DNS if version_ge_1_12_0 and value.outboundTag == "block" then --Migrate to 1.12 DNS
@@ -1778,7 +1848,7 @@ function gen_config(var)
table.insert(dns.servers, remote_shunt_server) table.insert(dns.servers, remote_shunt_server)
dns_rule.server = remote_shunt_server.tag dns_rule.server = remote_shunt_server.tag
end end
if remote_dns_fake then if value.fakedns then
local fakedns_dns_rule = api.clone(dns_rule) local fakedns_dns_rule = api.clone(dns_rule)
fakedns_dns_rule.query_type = { fakedns_dns_rule.query_type = {
"A", "AAAA" "A", "AAAA"
@@ -1813,6 +1883,13 @@ function gen_config(var)
}) })
end end
if next(rule_set_table) then
route.rule_set = {}
for k, v in pairs(rule_set_table) do
table.insert(route.rule_set, v)
end
end
if inbounds or outbounds then if inbounds or outbounds then
local config = { local config = {
log = { log = {
@@ -2053,12 +2130,13 @@ end
_G.gen_config = gen_config _G.gen_config = gen_config
_G.gen_proto_config = gen_proto_config _G.gen_proto_config = gen_proto_config
_G.geo_convert_srs = geo_convert_srs
if arg[1] then if arg[1] then
local func =_G[arg[1]] local func =_G[arg[1]]
if func then if func then
print(func(api.get_function_args(arg))) print(func(api.get_function_args(arg)))
if (next(geosite_all_tag) or next(geoip_all_tag)) and not no_run then if (next(GEO_VAR.SITE_TAGS) or next(GEO_VAR.IP_TAGS)) and not no_run then
convert_geofile() convert_geofile()
end end
end end
@@ -6,18 +6,6 @@ local jsonc = api.jsonc
local appname = "passwall" local appname = "passwall"
local fs = api.fs local fs = api.fs
local new_port
local function get_new_port()
local cmd_format = ". /usr/share/passwall/utils.sh ; echo -n $(get_new_port %s tcp)"
local set_port = 0
if new_port and tonumber(new_port) then
set_port = tonumber(new_port) + 1
end
new_port = tonumber(sys.exec(string.format(cmd_format, set_port == 0 and "auto" or set_port)))
return new_port
end
local function get_noise_packets() local function get_noise_packets()
local noises = {} local noises = {}
uci:foreach(appname, "xray_noise_packets", function(n) uci:foreach(appname, "xray_noise_packets", function(n)
@@ -73,7 +61,7 @@ function gen_outbound(flag, node, tag, proxy_table)
node.transport = "tcp" node.transport = "tcp"
else else
local relay_port = node.port local relay_port = node.port
new_port = get_new_port() local new_port = api.get_new_port()
local config_file = string.format("%s_%s_%s.json", flag, tag, new_port) local config_file = string.format("%s_%s_%s.json", flag, tag, new_port)
if tag and node_id and not tag:find(node_id) then if tag and node_id and not tag:find(node_id) then
config_file = string.format("%s_%s_%s_%s.json", flag, tag, node_id, new_port) config_file = string.format("%s_%s_%s_%s.json", flag, tag, node_id, new_port)
@@ -731,50 +719,6 @@ function gen_config(var)
table.insert(inbounds, inbound) table.insert(inbounds, inbound)
end end
if tcp_redir_port or udp_redir_port then
local inbound = {
protocol = "dokodemo-door",
settings = {network = "tcp,udp", followRedirect = true},
streamSettings = {sockopt = {tproxy = "tproxy"}},
sniffing = {
enabled = xray_settings.sniffing_override_dest == "1" or node.protocol == "_shunt"
}
}
if inbound.sniffing.enabled == true then
inbound.sniffing.destOverride = {"http", "tls", "quic"}
inbound.sniffing.metadataOnly = false
inbound.sniffing.routeOnly = xray_settings.sniffing_override_dest ~= "1" or nil
inbound.sniffing.domainsExcluded = xray_settings.sniffing_override_dest == "1" and get_domain_excluded() or nil
end
if remote_dns_fake then
inbound.sniffing.enabled = true
if not inbound.sniffing.destOverride then
inbound.sniffing.destOverride = {"fakedns"}
inbound.sniffing.metadataOnly = true
else
table.insert(inbound.sniffing.destOverride, "fakedns")
inbound.sniffing.metadataOnly = false
end
end
if tcp_redir_port then
local tcp_inbound = api.clone(inbound)
tcp_inbound.tag = "tcp_redir"
tcp_inbound.settings.network = "tcp"
tcp_inbound.port = tonumber(tcp_redir_port)
tcp_inbound.streamSettings.sockopt.tproxy = tcp_proxy_way
table.insert(inbounds, tcp_inbound)
end
if udp_redir_port then
local udp_inbound = api.clone(inbound)
udp_inbound.tag = "udp_redir"
udp_inbound.settings.network = "udp"
udp_inbound.port = tonumber(udp_redir_port)
table.insert(inbounds, udp_inbound)
end
end
local function gen_loopback(outbound_tag, loopback_dst) local function gen_loopback(outbound_tag, loopback_dst)
if not outbound_tag or outbound_tag == "" then return nil end if not outbound_tag or outbound_tag == "" then return nil end
local inbound_tag = loopback_dst and "lo-to-" .. loopback_dst or outbound_tag .. "-lo" local inbound_tag = loopback_dst and "lo-to-" .. loopback_dst or outbound_tag .. "-lo"
@@ -993,6 +937,8 @@ function gen_config(var)
local preproxy_outbound_tag, preproxy_balancer_tag local preproxy_outbound_tag, preproxy_balancer_tag
local preproxy_nodes local preproxy_nodes
inner_fakedns = node.fakedns or "0"
local function gen_shunt_node(rule_name, _node_id) local function gen_shunt_node(rule_name, _node_id)
if not rule_name then return nil, nil end if not rule_name then return nil, nil end
if not _node_id then if not _node_id then
@@ -1047,7 +993,7 @@ function gen_config(var)
end end
--new outbound --new outbound
if use_proxy and _node.type ~= "Xray" then if use_proxy and _node.type ~= "Xray" then
new_port = get_new_port() local new_port = api.get_new_port()
table.insert(inbounds, { table.insert(inbounds, {
tag = "proxy_" .. rule_name, tag = "proxy_" .. rule_name,
listen = "127.0.0.1", listen = "127.0.0.1",
@@ -1193,13 +1139,18 @@ function gen_config(var)
outboundTag = outbound_tag, outboundTag = outbound_tag,
balancerTag = balancer_tag, balancerTag = balancer_tag,
domain = {}, domain = {},
fakedns = nil,
} }
domains = {} domains = {}
string.gsub(e.domain_list, '[^' .. "\r\n" .. ']+', function(w) string.gsub(e.domain_list, '[^' .. "\r\n" .. ']+', function(w)
if w:find("#") == 1 then return end if w:find("#") == 1 then return end
if w:find("rule-set:", 1, true) == 1 or w:find("rs:") == 1 then return end
table.insert(domains, w) table.insert(domains, w)
table.insert(domain_table.domain, w) table.insert(domain_table.domain, w)
end) end)
if inner_fakedns == "1" and node[e[".name"] .. "_fakedns"] == "1" and #domains > 0 then
domain_table.fakedns = true
end
if outbound_tag or balancer_tag then if outbound_tag or balancer_tag then
table.insert(dns_domain_rules, api.clone(domain_table)) table.insert(dns_domain_rules, api.clone(domain_table))
end end
@@ -1210,6 +1161,7 @@ function gen_config(var)
ip = {} ip = {}
string.gsub(e.ip_list, '[^' .. "\r\n" .. ']+', function(w) string.gsub(e.ip_list, '[^' .. "\r\n" .. ']+', function(w)
if w:find("#") == 1 then return end if w:find("#") == 1 then return end
if w:find("rule-set:", 1, true) == 1 or w:find("rs:") == 1 then return end
table.insert(ip, w) table.insert(ip, w)
end) end)
if #ip == 0 then ip = nil end if #ip == 0 then ip = nil end
@@ -1228,7 +1180,7 @@ function gen_config(var)
balancerTag = balancer_tag, balancerTag = balancer_tag,
network = e["network"] or "tcp,udp", network = e["network"] or "tcp,udp",
source = source, source = source,
sourcePort = e["sourcePort"] ~= "" and e["sourcePort"] or nil, --sourcePort = e["sourcePort"] ~= "" and e["sourcePort"] or nil,
port = e["port"] ~= "" and e["port"] or nil, port = e["port"] ~= "" and e["port"] or nil,
protocol = protocols protocol = protocols
} }
@@ -1320,6 +1272,50 @@ function gen_config(var)
network = "tcp,udp" network = "tcp,udp"
}) })
end end
if tcp_redir_port or udp_redir_port then
local inbound = {
protocol = "dokodemo-door",
settings = {network = "tcp,udp", followRedirect = true},
streamSettings = {sockopt = {tproxy = "tproxy"}},
sniffing = {
enabled = xray_settings.sniffing_override_dest == "1" or node.protocol == "_shunt"
}
}
if inbound.sniffing.enabled == true then
inbound.sniffing.destOverride = {"http", "tls", "quic"}
inbound.sniffing.metadataOnly = false
inbound.sniffing.routeOnly = xray_settings.sniffing_override_dest ~= "1" or nil
inbound.sniffing.domainsExcluded = xray_settings.sniffing_override_dest == "1" and get_domain_excluded() or nil
end
if remote_dns_fake or inner_fakedns == "1" then
inbound.sniffing.enabled = true
if not inbound.sniffing.destOverride then
inbound.sniffing.destOverride = {"fakedns"}
inbound.sniffing.metadataOnly = true
else
table.insert(inbound.sniffing.destOverride, "fakedns")
inbound.sniffing.metadataOnly = false
end
end
if tcp_redir_port then
local tcp_inbound = api.clone(inbound)
tcp_inbound.tag = "tcp_redir"
tcp_inbound.settings.network = "tcp"
tcp_inbound.port = tonumber(tcp_redir_port)
tcp_inbound.streamSettings.sockopt.tproxy = tcp_proxy_way
table.insert(inbounds, tcp_inbound)
end
if udp_redir_port then
local udp_inbound = api.clone(inbound)
udp_inbound.tag = "udp_redir"
udp_inbound.settings.network = "udp"
udp_inbound.port = tonumber(udp_redir_port)
table.insert(inbounds, udp_inbound)
end
end
end end
if (remote_dns_udp_server and remote_dns_udp_port) or (remote_dns_tcp_server and remote_dns_tcp_port) then if (remote_dns_udp_server and remote_dns_udp_port) or (remote_dns_tcp_server and remote_dns_tcp_port) then
@@ -1402,7 +1398,7 @@ function gen_config(var)
address = "fakedns", address = "fakedns",
} }
if remote_dns_fake then if remote_dns_fake or inner_fakedns == "1" then
fakedns = {} fakedns = {}
local fakedns4 = { local fakedns4 = {
ipPool = "198.18.0.0/15", ipPool = "198.18.0.0/15",
@@ -1420,7 +1416,9 @@ function gen_config(var)
elseif remote_dns_query_strategy == "UseIPv6" then elseif remote_dns_query_strategy == "UseIPv6" then
table.insert(fakedns, fakedns6) table.insert(fakedns, fakedns6)
end end
table.insert(dns.servers, 1, _remote_fakedns) if remote_dns_fake and inner_fakedns == "0" then
table.insert(dns.servers, 1, _remote_fakedns)
end
end end
local dns_outbound_tag = "direct" local dns_outbound_tag = "direct"
@@ -1510,7 +1508,7 @@ function gen_config(var)
if value.outboundTag == "direct" and _direct_dns.address then if value.outboundTag == "direct" and _direct_dns.address then
dns_server = api.clone(_direct_dns) dns_server = api.clone(_direct_dns)
else else
if remote_dns_fake then if value.fakedns then
dns_server = api.clone(_remote_fakedns) dns_server = api.clone(_remote_fakedns)
else else
dns_server = api.clone(_remote_dns) dns_server = api.clone(_remote_dns)
@@ -1273,9 +1273,21 @@ msgstr "完整匹配: 由'full:'开始,余下部分是一个域名。当此域
msgid "Pre-defined domain list: Begining with 'geosite:' and the rest is a name, such as geosite:google or geosite:cn." msgid "Pre-defined domain list: Begining with 'geosite:' and the rest is a name, such as geosite:google or geosite:cn."
msgstr "预定义域名列表:由'geosite:'开头,余下部分是一个名称,如geosite:google或者geosite:cn。" msgstr "预定义域名列表:由'geosite:'开头,余下部分是一个名称,如geosite:google或者geosite:cn。"
msgid "Sing-Box is compatible with Geo rules and rule-set. rule-set begin with 'rule-set:remote:' or 'rule-set:local:'."
msgstr "Sing-Box 可兼容 Geo 规则和规则集,规则集由 'rule-set:remote:' 或 'rule-set:local:' 开始。"
msgid "Such as:"
msgstr "例如:"
msgid "Annotation: Begining with #" msgid "Annotation: Begining with #"
msgstr "注释: 由 # 开头" msgstr "注释: 由 # 开头"
msgid "Invert match result."
msgstr "反选匹配结果。"
msgid "Only support Sing-Box."
msgstr "只支持 Sing-Box。"
msgid "IP: such as '127.0.0.1'." msgid "IP: such as '127.0.0.1'."
msgstr "IP: 形如'127.0.0.1'。" msgstr "IP: 形如'127.0.0.1'。"
@@ -3,8 +3,8 @@
# Copyright (C) 2021-2025 xiaorouji # Copyright (C) 2021-2025 xiaorouji
# Copyright (C) 2026 Openwrt-Passwall Organization # Copyright (C) 2026 Openwrt-Passwall Organization
. $IPKG_INSTROOT/lib/functions.sh . /lib/functions.sh
. $IPKG_INSTROOT/lib/functions/service.sh . /lib/functions/service.sh
. /usr/share/passwall/utils.sh . /usr/share/passwall/utils.sh
@@ -71,109 +71,6 @@ check_run_environment() {
fi fi
} }
first_type() {
[ "${1#/}" != "$1" ] && [ -x "$1" ] && echo "$1" && return
for p in "/bin/$1" "/usr/bin/$1" "${TMP_BIN_PATH:-/tmp}/$1"; do
[ -x "$p" ] && echo "$p" && return
done
command -v "$1" 2>/dev/null || command -v "$2" 2>/dev/null
}
is_socks_wrap() {
case "$1" in
Socks_*) return 0 ;;
*) return 1 ;;
esac
}
ln_run() {
local file_func=${1}
local ln_name=${2}
local output=${3}
shift 3;
if [ "${file_func%%/*}" != "${file_func}" ]; then
[ ! -L "${file_func}" ] && {
ln -s "${file_func}" "${TMP_BIN_PATH}/${ln_name}" >/dev/null 2>&1
file_func="${TMP_BIN_PATH}/${ln_name}"
}
[ -x "${file_func}" ] || echolog " - $(readlink ${file_func}) 没有执行权限,无法启动:${file_func} $*"
fi
#echo "${file_func} $*" >&2
[ -n "${file_func}" ] || echolog " - 找不到 ${ln_name},无法启动..."
[ "${output}" != "/dev/null" ] && [ "${ln_name}" != "chinadns-ng" ] && {
local persist_log_path=$(config_t_get global persist_log_path)
local sys_log=$(config_t_get global sys_log "0")
}
if [ -z "$persist_log_path" ] && [ "$sys_log" != "1" ]; then
${file_func:-echolog " - ${ln_name}"} "$@" >${output} 2>&1 &
else
[ "${output: -1, -7}" == "TCP.log" ] && local protocol="TCP"
[ "${output: -1, -7}" == "UDP.log" ] && local protocol="UDP"
if [ -n "${persist_log_path}" ]; then
mkdir -p ${persist_log_path}
local log_file=${persist_log_path}/passwall_${protocol}_${ln_name}_$(date '+%F').log
echolog "记录到持久性日志文件:${log_file}"
${file_func:-echolog " - ${ln_name}"} "$@" >> ${log_file} 2>&1 &
sys_log=0
fi
if [ "${sys_log}" == "1" ]; then
echolog "记录 ${ln_name}_${protocol} 到系统日志"
${file_func:-echolog " - ${ln_name}"} "$@" 2>&1 | logger -t PASSWALL_${protocol}_${ln_name} &
fi
fi
process_count=$(ls $TMP_SCRIPT_FUNC_PATH | wc -l)
process_count=$((process_count + 1))
echo "${file_func:-echolog " - ${ln_name}"} $@ >${output}" > $TMP_SCRIPT_FUNC_PATH/$process_count
}
parse_doh() {
local __doh=$1 __url_var=$2 __host_var=$3 __port_var=$4 __bootstrap_var=$5
__doh=$(echo -e "$__doh" | tr -d ' \t\n')
local __url=${__doh%%,*}
local __bootstrap=${__doh#*,}
local __host_port=$(lua_api "get_domain_from_url(\"${__url}\")")
local __host __port
if echo "${__host_port}" | grep -q '^\[.*\]:[0-9]\+$'; then
__host=${__host_port%%]:*}]
__port=${__host_port##*:}
elif echo "${__host_port}" | grep -q ':[0-9]\+$'; then
__host=${__host_port%:*}
__port=${__host_port##*:}
else
__host=${__host_port}
__port=443
fi
__host=${__host#[}
__host=${__host%]}
if [ "$(lua_api "is_ip(\"${__host}\")")" = "true" ]; then
__bootstrap=${__host}
fi
__bootstrap=${__bootstrap#[}
__bootstrap=${__bootstrap%]}
eval "${__url_var}='${__url}' ${__host_var}='${__host}' ${__port_var}='${__port}' ${__bootstrap_var}='${__bootstrap}'"
}
get_geoip() {
local geoip_code="$1"
local geoip_type_flag=""
local geoip_path="${V2RAY_LOCATION_ASSET%*/}/geoip.dat"
[ -s "$geoip_path" ] || { echo ""; return 1; }
case "$2" in
"ipv4") geoip_type_flag="-ipv6=false" ;;
"ipv6") geoip_type_flag="-ipv4=false" ;;
esac
if type geoview &> /dev/null; then
geoview -input "$geoip_path" -list "$geoip_code" $geoip_type_flag -lowmem=true
return 0
else
echo ""
return 1
fi
}
run_ipt2socks() { run_ipt2socks() {
local flag proto tcp_tproxy local_port socks_address socks_port socks_username socks_password log_file local flag proto tcp_tproxy local_port socks_address socks_port socks_username socks_password log_file
local _extra_param="" local _extra_param=""
@@ -1111,14 +1008,6 @@ socks_node_switch() {
} }
} }
clean_log() {
logsnum=$(cat $LOG_FILE 2>/dev/null | wc -l)
[ "$logsnum" -gt 1000 ] && {
echo "" > $LOG_FILE
echolog "日志文件过长,清空处理!"
}
}
clean_crontab() { clean_crontab() {
[ -f "/tmp/lock/${CONFIG}_cron.lock" ] && return [ -f "/tmp/lock/${CONFIG}_cron.lock" ] && return
touch /etc/crontabs/root touch /etc/crontabs/root
@@ -1556,10 +1445,6 @@ start_haproxy() {
ln_run "$(first_type haproxy)" haproxy "/dev/null" -f "${haproxy_path}/${haproxy_conf}" ln_run "$(first_type haproxy)" haproxy "/dev/null" -f "${haproxy_path}/${haproxy_conf}"
} }
kill_all() {
kill -9 $(pidof "$@") >/dev/null 2>&1
}
acl_app() { acl_app() {
local items=$(uci show ${CONFIG} | grep "=acl_rule" | cut -d '.' -sf 2 | cut -d '=' -sf 1) local items=$(uci show ${CONFIG} | grep "=acl_rule" | cut -d '.' -sf 2 | cut -d '=' -sf 1)
[ -n "$items" ] && { [ -n "$items" ] && {
@@ -1754,7 +1639,6 @@ acl_app() {
if [ -n "${type}" ] && ([ "${type}" = "sing-box" ] || [ "${type}" = "xray" ]); then if [ -n "${type}" ] && ([ "${type}" = "sing-box" ] || [ "${type}" = "xray" ]); then
config_file="acl/${tcp_node}_TCP_${redir_port}.json" config_file="acl/${tcp_node}_TCP_${redir_port}.json"
_extra_param="socks_address=127.0.0.1 socks_port=$socks_port" _extra_param="socks_address=127.0.0.1 socks_port=$socks_port"
_extra_param="${_extra_param} tcp_proxy_way=$TCP_PROXY_WAY"
if [ "$dns_mode" = "sing-box" ] || [ "$dns_mode" = "xray" ]; then if [ "$dns_mode" = "sing-box" ] || [ "$dns_mode" = "xray" ]; then
dns_port=$(get_new_port $(expr $dns_port + 1)) dns_port=$(get_new_port $(expr $dns_port + 1))
_dns_port=$dns_port _dns_port=$dns_port
@@ -1766,6 +1650,7 @@ acl_app() {
[ "$dns_mode" = "xray" ] && [ "$v2ray_dns_mode" = "tcp+doh" ] && remote_dns_doh=${remote_dns_doh:-https://1.1.1.1/dns-query} [ "$dns_mode" = "xray" ] && [ "$v2ray_dns_mode" = "tcp+doh" ] && remote_dns_doh=${remote_dns_doh:-https://1.1.1.1/dns-query}
_extra_param="dns_listen_port=${_dns_port} remote_dns_protocol=${v2ray_dns_mode} remote_dns_udp_server=${remote_dns} remote_dns_tcp_server=${remote_dns} remote_dns_doh=${remote_dns_doh} remote_dns_query_strategy=${remote_dns_query_strategy} remote_dns_client_ip=${remote_dns_client_ip}" _extra_param="dns_listen_port=${_dns_port} remote_dns_protocol=${v2ray_dns_mode} remote_dns_udp_server=${remote_dns} remote_dns_tcp_server=${remote_dns} remote_dns_doh=${remote_dns_doh} remote_dns_query_strategy=${remote_dns_query_strategy} remote_dns_client_ip=${remote_dns_client_ip}"
fi fi
_extra_param="${_extra_param} tcp_proxy_way=$TCP_PROXY_WAY"
[ -n "$udp_node" ] && ([ "$udp_node" = "tcp" ] || [ "$udp_node" = "$tcp_node" ]) && { [ -n "$udp_node" ] && ([ "$udp_node" = "tcp" ] || [ "$udp_node" = "$tcp_node" ]) && {
config_file="${config_file//TCP_/TCP_UDP_}" config_file="${config_file//TCP_/TCP_UDP_}"
_extra_param="${_extra_param} udp_redir_port=$redir_port" _extra_param="${_extra_param} udp_redir_port=$redir_port"
@@ -17,17 +17,6 @@ function get_ip_port_from(str)
return result_ip, result_port return result_ip, result_port
end end
local new_port
local function get_new_port()
local cmd_format = ". /usr/share/passwall/utils.sh ; echo -n $(get_new_port %s tcp)"
local set_port = 0
if new_port and tonumber(new_port) then
set_port = tonumber(new_port) + 1
end
new_port = tonumber(sys.exec(string.format(cmd_format, set_port == 0 and "auto" or set_port)))
return new_port
end
local var = api.get_args(arg) local var = api.get_args(arg)
local haproxy_path = var["-path"] local haproxy_path = var["-path"]
local haproxy_conf = var["-conf"] local haproxy_conf = var["-conf"]
@@ -119,7 +108,7 @@ uci:foreach(appname, "haproxy_config", function(t)
if health_check_type == "passwall_logic" then if health_check_type == "passwall_logic" then
if server_node.type ~= "Socks" then if server_node.type ~= "Socks" then
local relay_port = server_node.port local relay_port = server_node.port
new_port = get_new_port() local new_port = api.get_new_port()
local config_file = string.format("haproxy_%s_%s.json", t[".name"], new_port) local config_file = string.format("haproxy_%s_%s.json", t[".name"], new_port)
sys.call(string.format('/usr/share/%s/app.sh run_socks "%s"> /dev/null', sys.call(string.format('/usr/share/%s/app.sh run_socks "%s"> /dev/null',
appname, appname,
@@ -166,17 +166,23 @@ local file_vpslist = TMP_ACL_PATH .. "/vpslist"
if not is_file_nonzero(file_vpslist) then if not is_file_nonzero(file_vpslist) then
local f_out = io.open(file_vpslist, "w") local f_out = io.open(file_vpslist, "w")
local written_domains = {} local written_domains = {}
uci:foreach(appname, "nodes", function(t) local function process_address(address)
local function process_address(address) if address == "engage.cloudflareclient.com" then return end
if address == "engage.cloudflareclient.com" then return end if datatypes.hostname(address) and not written_domains[address] then
if datatypes.hostname(address) and not written_domains[address] then f_out:write(address .. "\n")
f_out:write(address .. "\n") written_domains[address] = true
written_domains[address] = true
end
end end
end
uci:foreach(appname, "nodes", function(t)
process_address(t.address) process_address(t.address)
process_address(t.download_address) process_address(t.download_address)
end) end)
uci:foreach(appname, "subscribe_list", function(t) --订阅链接
local url, _ = api.get_domain_port_from_url(t.url or "")
if url and url ~= "" then
process_address(url)
end
end)
f_out:close() f_out:close()
end end
if is_file_nonzero(file_vpslist) then if is_file_nonzero(file_vpslist) then
@@ -365,17 +365,23 @@ function add_rule(var)
setflag_4 .. "passwall_vps", setflag_4 .. "passwall_vps",
setflag_6 .. "passwall_vps6" setflag_6 .. "passwall_vps6"
} }
uci:foreach(appname, "nodes", function(t) local function process_address(address)
local function process_address(address) if address == "engage.cloudflareclient.com" then return end
if address == "engage.cloudflareclient.com" then return end if datatypes.hostname(address) then
if datatypes.hostname(address) then set_domain_dns(address, fwd_dns)
set_domain_dns(address, fwd_dns) set_domain_ipset(address, table.concat(sets, ","))
set_domain_ipset(address, table.concat(sets, ","))
end
end end
end
uci:foreach(appname, "nodes", function(t)
process_address(t.address) process_address(t.address)
process_address(t.download_address) process_address(t.download_address)
end) end)
uci:foreach(appname, "subscribe_list", function(t) --订阅链接
local url, _ = api.get_domain_port_from_url(t.url or "")
if url and url ~= "" then
process_address(url)
end
end)
log(string.format(" - 节点列表中的域名(vpslist)%s", fwd_dns or "默认")) log(string.format(" - 节点列表中的域名(vpslist)%s", fwd_dns or "默认"))
end end
end end
@@ -309,17 +309,23 @@ local file_vpslist = TMP_ACL_PATH .. "/vpslist"
if not is_file_nonzero(file_vpslist) then if not is_file_nonzero(file_vpslist) then
local f_out = io.open(file_vpslist, "w") local f_out = io.open(file_vpslist, "w")
local written_domains = {} local written_domains = {}
uci:foreach(appname, "nodes", function(t) local function process_address(address)
local function process_address(address) if address == "engage.cloudflareclient.com" then return end
if address == "engage.cloudflareclient.com" then return end if datatypes.hostname(address) and not written_domains[address] then
if datatypes.hostname(address) and not written_domains[address] then f_out:write(address .. "\n")
f_out:write(address .. "\n") written_domains[address] = true
written_domains[address] = true
end
end end
end
uci:foreach(appname, "nodes", function(t)
process_address(t.address) process_address(t.address)
process_address(t.download_address) process_address(t.download_address)
end) end)
uci:foreach(appname, "subscribe_list", function(t) --订阅链接
local url, _ = api.get_domain_port_from_url(t.url or "")
if url and url ~= "" then
process_address(url)
end
end)
f_out:close() f_out:close()
end end
if is_file_nonzero(file_vpslist) then if is_file_nonzero(file_vpslist) then
@@ -769,6 +769,9 @@ filter_vpsip() {
echolog " - [$?]加入所有IPv4节点到ipset[$IPSET_VPS]直连完成" echolog " - [$?]加入所有IPv4节点到ipset[$IPSET_VPS]直连完成"
uci show $CONFIG | grep -E "(.address=|.download_address=)" | cut -d "'" -f 2 | grep -E "([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}" | sed -e "/^$/d" | sed -e "s/^/add $IPSET_VPS6 &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R uci show $CONFIG | grep -E "(.address=|.download_address=)" | cut -d "'" -f 2 | grep -E "([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}" | sed -e "/^$/d" | sed -e "s/^/add $IPSET_VPS6 &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
echolog " - [$?]加入所有IPv6节点到ipset[$IPSET_VPS6]直连完成" echolog " - [$?]加入所有IPv6节点到ipset[$IPSET_VPS6]直连完成"
#订阅方式为直连时
get_subscribe_host | grep -E "([0-9]{1,3}[\.]){3}[0-9]{1,3}" | grep -v "^127\.0\.0\.1$" | sed -e "/^$/d" | sed -e "s/^/add $IPSET_VPS &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
get_subscribe_host | grep -E "([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}" | sed -e "/^$/d" | sed -e "s/^/add $IPSET_VPS6 &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
} }
filter_server_port() { filter_server_port() {
@@ -812,6 +812,9 @@ filter_vpsip() {
echolog " - [$?]加入所有IPv4节点到nftset[$NFTSET_VPS]直连完成" echolog " - [$?]加入所有IPv4节点到nftset[$NFTSET_VPS]直连完成"
uci show $CONFIG | grep -E "(.address=|.download_address=)" | cut -d "'" -f 2 | grep -E "([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}" | sed -e "/^$/d" | insert_nftset $NFTSET_VPS6 "-1" uci show $CONFIG | grep -E "(.address=|.download_address=)" | cut -d "'" -f 2 | grep -E "([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}" | sed -e "/^$/d" | insert_nftset $NFTSET_VPS6 "-1"
echolog " - [$?]加入所有IPv6节点到nftset[$NFTSET_VPS6]直连完成" echolog " - [$?]加入所有IPv6节点到nftset[$NFTSET_VPS6]直连完成"
#订阅方式为直连时
get_subscribe_host | grep -E "([0-9]{1,3}[\.]){3}[0-9]{1,3}" | grep -v "^127\.0\.0\.1$" | sed -e "/^$/d" | insert_nftset $NFTSET_VPS "-1"
get_subscribe_host | grep -E "([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}" | sed -e "/^$/d" | insert_nftset $NFTSET_VPS6 "-1"
} }
filter_server_port() { filter_server_port() {
@@ -48,7 +48,7 @@ test_node() {
local node_id=$1 local node_id=$1
local _type=$(echo $(config_n_get ${node_id} type) | tr 'A-Z' 'a-z') local _type=$(echo $(config_n_get ${node_id} type) | tr 'A-Z' 'a-z')
[ -n "${_type}" ] && { [ -n "${_type}" ] && {
local _tmp_port=$(get_new_port 61080 tcp,udp) local _tmp_port=$(get_new_port 48800 tcp,udp)
$APP_FILE run_socks flag="test_node_${node_id}" node=${node_id} bind=127.0.0.1 socks_port=${_tmp_port} config_file=test_node_${node_id}.json $APP_FILE run_socks flag="test_node_${node_id}" node=${node_id} bind=127.0.0.1 socks_port=${_tmp_port} config_file=test_node_${node_id}.json
local curlx="socks5h://127.0.0.1:${_tmp_port}" local curlx="socks5h://127.0.0.1:${_tmp_port}"
sleep 1s sleep 1s
@@ -446,7 +446,14 @@ local function get_subscribe_info(cfgid, value)
for _, p in ipairs(rem_patterns) do rem_traffic = value:match(p) or rem_traffic end for _, p in ipairs(rem_patterns) do rem_traffic = value:match(p) or rem_traffic end
subscribe_info[cfgid] = subscribe_info[cfgid] or {expired_date = "", rem_traffic = ""} subscribe_info[cfgid] = subscribe_info[cfgid] or {expired_date = "", rem_traffic = ""}
if expired_date then if expired_date then
subscribe_info[cfgid]["expired_date"] = expired_date local function formatDate(str)
local y, m, d = str:match("(%d%d%d%d)[-/]?(%d%d?)[-/]?(%d%d?)")
if y and m and d then
return y .. "." .. m .. "." .. d
end
return str
end
subscribe_info[cfgid]["expired_date"] = formatDate(expired_date)
end end
if rem_traffic then if rem_traffic then
subscribe_info[cfgid]["rem_traffic"] = rem_traffic subscribe_info[cfgid]["rem_traffic"] = rem_traffic
@@ -1560,11 +1567,11 @@ local function curl(url, file, ua, mode)
curl_args[#curl_args + 1] = get_headers() curl_args[#curl_args + 1] = get_headers()
local return_code, result local return_code, result
if mode == "direct" then if mode == "direct" then
return_code, result = api.curl_direct(url, file, curl_args) return_code, result = api.curl_base(url, file, curl_args)
elseif mode == "proxy" then elseif mode == "proxy" then
return_code, result = api.curl_proxy(url, file, curl_args) return_code, result = api.curl_proxy(url, file, curl_args)
else else
return_code, result = api.curl_auto(url, file, curl_args) return_code, result = api.curl_logic(url, file, curl_args)
end end
return tonumber(result) return tonumber(result)
end end
@@ -56,7 +56,7 @@ url_test_node() {
[ -n "${_username}" ] && [ -n "${_password}" ] && curlx="socks5h://${_username}:${_password}@${_address}:${_port}" [ -n "${_username}" ] && [ -n "${_password}" ] && curlx="socks5h://${_username}:${_password}@${_address}:${_port}"
} }
else else
local _tmp_port=$(get_new_port 61080 tcp) local _tmp_port=$(get_new_port 48900 tcp,udp)
/usr/share/${CONFIG}/app.sh run_socks flag="url_test_${node_id}" node=${node_id} bind=127.0.0.1 socks_port=${_tmp_port} config_file=url_test_${node_id}.json /usr/share/${CONFIG}/app.sh run_socks flag="url_test_${node_id}" node=${node_id} bind=127.0.0.1 socks_port=${_tmp_port} config_file=url_test_${node_id}.json
local curlx="socks5h://127.0.0.1:${_tmp_port}" local curlx="socks5h://127.0.0.1:${_tmp_port}"
fi fi
@@ -20,6 +20,14 @@ echolog() {
echo -e "$d: $*" >>$LOG_FILE echo -e "$d: $*" >>$LOG_FILE
} }
clean_log() {
logsnum=$(cat $LOG_FILE 2>/dev/null | wc -l)
[ "$logsnum" -gt 1000 ] && {
echo "" > $LOG_FILE
echolog "日志文件过长,清空处理!"
}
}
config_get_type() { config_get_type() {
local ret=$(uci -q get "${CONFIG}.${1}" 2>/dev/null) local ret=$(uci -q get "${CONFIG}.${1}" 2>/dev/null)
echo "${ret:=$2}" echo "${ret:=$2}"
@@ -41,10 +49,36 @@ config_t_set() {
local ret=$(uci -q set "${CONFIG}.@${1}[${index}].${2}=${3}" 2>/dev/null) local ret=$(uci -q set "${CONFIG}.@${1}[${index}].${2}=${3}" 2>/dev/null)
} }
first_type() {
[ "${1#/}" != "$1" ] && [ -x "$1" ] && echo "$1" && return
for p in "/bin/$1" "/usr/bin/$1" "${TMP_BIN_PATH:-/tmp}/$1"; do
[ -x "$p" ] && echo "$p" && return
done
command -v "$1" 2>/dev/null || command -v "$2" 2>/dev/null
}
get_enabled_anonymous_secs() { get_enabled_anonymous_secs() {
uci -q show "${CONFIG}" | grep "${1}\[.*\.enabled='1'" | cut -d '.' -sf2 uci -q show "${CONFIG}" | grep "${1}\[.*\.enabled='1'" | cut -d '.' -sf2
} }
get_geoip() {
local geoip_code="$1"
local geoip_type_flag=""
local geoip_path="${V2RAY_LOCATION_ASSET%*/}/geoip.dat"
[ -s "$geoip_path" ] || { echo ""; return 1; }
case "$2" in
"ipv4") geoip_type_flag="-ipv6=false" ;;
"ipv6") geoip_type_flag="-ipv4=false" ;;
esac
if type geoview &> /dev/null; then
geoview -input "$geoip_path" -list "$geoip_code" $geoip_type_flag -lowmem=true
return 0
else
echo ""
return 1
fi
}
get_host_ip() { get_host_ip() {
local host=$2 local host=$2
local count=$3 local count=$3
@@ -101,8 +135,35 @@ get_ip_port_from() {
eval "${__ipv}=\"$val1\"; ${__portv}=\"$val2\"" eval "${__ipv}=\"$val1\"; ${__portv}=\"$val2\""
} }
parse_doh() {
local __doh=$1 __url_var=$2 __host_var=$3 __port_var=$4 __bootstrap_var=$5
__doh=$(echo -e "$__doh" | tr -d ' \t\n')
local __url=${__doh%%,*}
local __bootstrap=${__doh#*,}
local __host_port=$(lua_api "get_domain_from_url(\"${__url}\")")
local __host __port
if echo "${__host_port}" | grep -q '^\[.*\]:[0-9]\+$'; then
__host=${__host_port%%]:*}]
__port=${__host_port##*:}
elif echo "${__host_port}" | grep -q ':[0-9]\+$'; then
__host=${__host_port%:*}
__port=${__host_port##*:}
else
__host=${__host_port}
__port=443
fi
__host=${__host#[}
__host=${__host%]}
if [ "$(lua_api "is_ip(\"${__host}\")")" = "true" ]; then
__bootstrap=${__host}
fi
__bootstrap=${__bootstrap#[}
__bootstrap=${__bootstrap%]}
eval "${__url_var}='${__url}' ${__host_var}='${__host}' ${__port_var}='${__port}' ${__bootstrap_var}='${__bootstrap}'"
}
host_from_url(){ host_from_url(){
local f=${1} local f="${1}"
## Remove protocol part of url ## ## Remove protocol part of url ##
f="${f##http://}" f="${f##http://}"
@@ -184,16 +245,22 @@ check_port_exists() {
} }
get_new_port() { get_new_port() {
local default_start_port=2000
local min_port=1025
local max_port=49151
local port=$1 local port=$1
[ "$port" == "auto" ] && port=2082 [ "$port" == "auto" ] && port=$default_start_port
[ "$port" -lt $min_port -o "$port" -gt $max_port ] && port=$default_start_port
local protocol=$(echo $2 | tr 'A-Z' 'a-z') local protocol=$(echo $2 | tr 'A-Z' 'a-z')
local result=$(check_port_exists $port $protocol) local result=$(check_port_exists $port $protocol)
if [ "$result" != 0 ]; then if [ "$result" != 0 ]; then
local temp= local temp=
if [ "$port" -lt 65535 ]; then if [ "$port" -lt $max_port ]; then
temp=$(expr $port + 1) temp=$(expr $port + 1)
elif [ "$port" -gt 1 ]; then elif [ "$port" -gt $min_port ]; then
temp=$(expr $port - 1) temp=$(expr $port - 1)
else
temp=$default_start_port
fi fi
get_new_port $temp $protocol get_new_port $temp $protocol
else else
@@ -315,3 +382,66 @@ delete_ip2route() {
done done
} }
} }
ln_run() {
local file_func=${1}
local ln_name=${2}
local output=${3}
shift 3;
if [ "${file_func%%/*}" != "${file_func}" ]; then
[ ! -L "${file_func}" ] && {
ln -s "${file_func}" "${TMP_BIN_PATH}/${ln_name}" >/dev/null 2>&1
file_func="${TMP_BIN_PATH}/${ln_name}"
}
[ -x "${file_func}" ] || echolog " - $(readlink ${file_func}) 没有执行权限,无法启动:${file_func} $*"
fi
#echo "${file_func} $*" >&2
[ -n "${file_func}" ] || echolog " - 找不到 ${ln_name},无法启动..."
[ "${output}" != "/dev/null" ] && [ "${ln_name}" != "chinadns-ng" ] && {
local persist_log_path=$(config_t_get global persist_log_path)
local sys_log=$(config_t_get global sys_log "0")
}
if [ -z "$persist_log_path" ] && [ "$sys_log" != "1" ]; then
${file_func:-echolog " - ${ln_name}"} "$@" >${output} 2>&1 &
else
[ "${output: -1, -7}" == "TCP.log" ] && local protocol="TCP"
[ "${output: -1, -7}" == "UDP.log" ] && local protocol="UDP"
if [ -n "${persist_log_path}" ]; then
mkdir -p ${persist_log_path}
local log_file=${persist_log_path}/passwall_${protocol}_${ln_name}_$(date '+%F').log
echolog "记录到持久性日志文件:${log_file}"
${file_func:-echolog " - ${ln_name}"} "$@" >> ${log_file} 2>&1 &
sys_log=0
fi
if [ "${sys_log}" == "1" ]; then
echolog "记录 ${ln_name}_${protocol} 到系统日志"
${file_func:-echolog " - ${ln_name}"} "$@" 2>&1 | logger -t PASSWALL_${protocol}_${ln_name} &
fi
fi
process_count=$(ls $TMP_SCRIPT_FUNC_PATH | wc -l)
process_count=$((process_count + 1))
echo "${file_func:-echolog " - ${ln_name}"} $@ >${output}" > $TMP_SCRIPT_FUNC_PATH/$process_count
}
is_socks_wrap() {
case "$1" in
Socks_*) return 0 ;;
*) return 1 ;;
esac
}
kill_all() {
kill -9 $(pidof "$@") >/dev/null 2>&1
}
get_subscribe_host(){
local line
uci show "${CONFIG}" | grep "=subscribe_list" | while read -r line; do
local section="$(echo "$line" | cut -d '.' -sf 2 | cut -d '=' -sf 1)"
local url="$(config_n_get $section url)"
[ -n "$url" ] || continue
url="$(host_from_url "$url")"
echo "$url"
done
}
@@ -14,9 +14,6 @@ on:
env: env:
TZ: Asia/Shanghai TZ: Asia/Shanghai
passwall2: ${{ github.repository }} passwall2: ${{ github.repository }}
packages: Openwrt-Passwall/openwrt-passwall-packages
package_names: "chinadns-ng geoview hysteria naiveproxy tcping tuic-client shadowsocks-rust shadowsocksr-libev simple-obfs sing-box v2ray-geodata v2ray-plugin xray-core"
package_release: "chinadns-ng geoview hysteria naiveproxy tcping tuic-client shadowsocks-rust shadowsocksr-libev simple-obfs sing-box v2ray-geoip v2ray-geosite v2ray-plugin xray-core"
permissions: permissions:
contents: write contents: write
@@ -156,12 +153,25 @@ jobs:
rm -rf temp_resp rm -rf temp_resp
git clone -b master --single-branch https://github.com/openwrt/packages.git temp_resp git clone -b master --single-branch https://github.com/openwrt/packages.git temp_resp
#--------------------------------------Update Golang------------------------------------------
echo "update golang version" echo "update golang version"
rm -rf feeds/packages/lang/golang rm -rf feeds/packages/lang/golang
cp -r temp_resp/lang/golang feeds/packages/lang cp -r temp_resp/lang/golang feeds/packages/lang
#--------------------------------------Get latest Golang version------------------------------------------
wget https://go.dev/dl/?mode=json -O /tmp/golang.json
go_latest_version=$(cat /tmp/golang.json | jq -r '.[0].version')
GO_VERSION_MAJOR_MINOR=$(echo $go_latest_version | sed 's#go##' | awk -F '.' '{print $1 "." $2}')
GO_VERSION_PATCH=$(echo $go_latest_version | sed 's#go##' | awk -F '.' '{print $3}')
go_latest_version_hash=$(cat /tmp/golang.json | jq -r '.[0].files[0].sha256')
sed -i -e "s/^GO_VERSION_MAJOR_MINOR:=.*/GO_VERSION_MAJOR_MINOR:=${GO_VERSION_MAJOR_MINOR}/" -e "s/^GO_VERSION_PATCH:=.*/GO_VERSION_PATCH:=${GO_VERSION_PATCH}/" -e "s/^PKG_HASH:=.*/PKG_HASH:=${go_latest_version_hash}/" "feeds/packages/lang/golang/golang/Makefile"
#--------------------------------------Update Rust------------------------------------------
echo "update rust version" echo "update rust version"
rm -rf feeds/packages/lang/rust rm -rf feeds/packages/lang/rust
cp -r temp_resp/lang/rust feeds/packages/lang cp -r temp_resp/lang/rust feeds/packages/lang
rm -rf temp_resp rm -rf temp_resp
echo "update patch-kernel.sh" echo "update patch-kernel.sh"
@@ -181,6 +191,7 @@ jobs:
echo "CONFIG_ALL=n" >> .config echo "CONFIG_ALL=n" >> .config
echo "CONFIG_AUTOREMOVE=n" >> .config echo "CONFIG_AUTOREMOVE=n" >> .config
echo "CONFIG_LUCI_LANG_zh_Hans=y" >> .config echo "CONFIG_LUCI_LANG_zh_Hans=y" >> .config
echo "CONFIG_LUCI_LANG_zh_Hant=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2=m" >> .config echo "CONFIG_PACKAGE_luci-app-passwall2=m" >> .config
make defconfig make defconfig
echo "make package/luci-app-passwall2/{clean,compile} -j$(nproc)" echo "make package/luci-app-passwall2/{clean,compile} -j$(nproc)"
@@ -200,354 +211,3 @@ jobs:
with: with:
tag_name: ${{needs.job_check.outputs.passwall2_version}} tag_name: ${{needs.job_check.outputs.passwall2_version}}
files: ${{ env.FIRMWARE }}/* files: ${{ env.FIRMWARE }}/*
job_auto_compile:
if: ${{ needs.job_check.outputs.has_update == 'true' && needs.job_check.outputs.prerelease == 'false' }}
needs: job_check
runs-on: ubuntu-latest
name: build (${{ matrix.ver }}-${{ matrix.platform }})
strategy:
fail-fast: false
matrix:
include:
- platform: x86_64
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/x86/64/openwrt-sdk-24.10.4-x86-64_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: aarch64_generic
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/rockchip/armv8/openwrt-sdk-24.10.4-rockchip-armv8_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: aarch64_cortex-a53
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/mvebu/cortexa53/openwrt-sdk-24.10.4-mvebu-cortexa53_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: aarch64_cortex-a72
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/mvebu/cortexa72/openwrt-sdk-24.10.4-mvebu-cortexa72_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a5_vfpv4
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/at91/sama5/openwrt-sdk-24.10.4-at91-sama5_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a7
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/mediatek/mt7629/openwrt-sdk-24.10.4-mediatek-mt7629_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a7_neon-vfpv4
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/sunxi/cortexa7/openwrt-sdk-24.10.4-sunxi-cortexa7_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a8_vfpv3
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/sunxi/cortexa8/openwrt-sdk-24.10.4-sunxi-cortexa8_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a9
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/bcm53xx/generic/openwrt-sdk-24.10.4-bcm53xx-generic_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a9_neon
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/zynq/generic/openwrt-sdk-24.10.4-zynq-generic_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a9_vfpv3-d16
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/mvebu/cortexa9/openwrt-sdk-24.10.4-mvebu-cortexa9_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: arm_cortex-a15_neon-vfpv4
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/ipq806x/generic/openwrt-sdk-24.10.4-ipq806x-generic_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: mips_24kc
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/ath79/generic/openwrt-sdk-24.10.4-ath79-generic_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: mips_4kec
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/realtek/rtl838x/openwrt-sdk-24.10.4-realtek-rtl838x_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: mips_mips32
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/bcm53xx/generic/openwrt-sdk-24.10.4-bcm53xx-generic_gcc-13.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "ipk"
- platform: mipsel_24kc
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/ramips/rt288x/openwrt-sdk-24.10.4-ramips-rt288x_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: mipsel_74kc
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/ramips/rt3883/openwrt-sdk-24.10.4-ramips-rt3883_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: mipsel_mips32
url_sdk: https://downloads.openwrt.org/releases/24.10.4/targets/bcm47xx/generic/openwrt-sdk-24.10.4-bcm47xx-generic_gcc-13.3.0_musl.Linux-x86_64.tar.zst
ver: "ipk"
- platform: x86_64
url_sdk: https://downloads.openwrt.org/snapshots/targets/x86/64/openwrt-sdk-x86-64_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: aarch64_generic
url_sdk: https://downloads.openwrt.org/snapshots/targets/rockchip/armv8/openwrt-sdk-rockchip-armv8_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: aarch64_cortex-a53
url_sdk: https://downloads.openwrt.org/snapshots/targets/mvebu/cortexa53/openwrt-sdk-mvebu-cortexa53_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: aarch64_cortex-a72
url_sdk: https://downloads.openwrt.org/snapshots/targets/mvebu/cortexa72/openwrt-sdk-mvebu-cortexa72_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a5_vfpv4
url_sdk: https://downloads.openwrt.org/snapshots/targets/at91/sama5/openwrt-sdk-at91-sama5_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a7
url_sdk: https://downloads.openwrt.org/snapshots/targets/mediatek/mt7629/openwrt-sdk-mediatek-mt7629_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a7_neon-vfpv4
url_sdk: https://downloads.openwrt.org/snapshots/targets/sunxi/cortexa7/openwrt-sdk-sunxi-cortexa7_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a8_vfpv3
url_sdk: https://downloads.openwrt.org/snapshots/targets/sunxi/cortexa8/openwrt-sdk-sunxi-cortexa8_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a9
url_sdk: https://downloads.openwrt.org/snapshots/targets/bcm53xx/generic/openwrt-sdk-bcm53xx-generic_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a9_neon
url_sdk: https://downloads.openwrt.org/snapshots/targets/zynq/generic/openwrt-sdk-zynq-generic_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a9_vfpv3-d16
url_sdk: https://downloads.openwrt.org/snapshots/targets/mvebu/cortexa9/openwrt-sdk-mvebu-cortexa9_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: arm_cortex-a15_neon-vfpv4
url_sdk: https://downloads.openwrt.org/snapshots/targets/ipq806x/generic/openwrt-sdk-ipq806x-generic_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: mips_24kc
url_sdk: https://downloads.openwrt.org/snapshots/targets/ath79/generic/openwrt-sdk-ath79-generic_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: mips_4kec
url_sdk: https://downloads.openwrt.org/snapshots/targets/realtek/rtl838x/openwrt-sdk-realtek-rtl838x_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: mips_mips32
url_sdk: https://downloads.openwrt.org/snapshots/targets/bcm53xx/generic/openwrt-sdk-bcm53xx-generic_gcc-14.3.0_musl_eabi.Linux-x86_64.tar.zst
ver: "apk"
- platform: mipsel_24kc
url_sdk: https://downloads.openwrt.org/snapshots/targets/ramips/rt288x/openwrt-sdk-ramips-rt288x_gcc-14.2.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: mipsel_74kc
url_sdk: https://downloads.openwrt.org/snapshots/targets/ramips/rt3883/openwrt-sdk-ramips-rt3883_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
- platform: mipsel_mips32
url_sdk: https://downloads.openwrt.org/snapshots/targets/bcm47xx/generic/openwrt-sdk-bcm47xx-generic_gcc-14.3.0_musl.Linux-x86_64.tar.zst
ver: "apk"
steps:
- name: Initialization ${{ matrix.platform }} compile environment
run: |
sudo -E rm -rf /usr/share/dotnet /etc/mysql /etc/php /usr/local/lib/android
echo "Install packages"
sudo -E apt-get -qq update
sudo -E apt-get -qq install ack antlr3 asciidoc autoconf automake autopoint binutils bison build-essential \
bzip2 ccache clang cmake cpio curl device-tree-compiler ecj fastjar flex gawk gettext gcc-multilib \
g++-multilib git gnutls-dev gperf haveged help2man intltool lib32gcc-s1 libc6-dev-i386 libelf-dev \
libglib2.0-dev libgmp3-dev libltdl-dev libmpc-dev libmpfr-dev libncurses-dev libpython3-dev \
libreadline-dev libssl-dev libtool libyaml-dev libz-dev lld llvm lrzsz mkisofs msmtp nano \
ninja-build p7zip p7zip-full patch pkgconf python3 python3-pip python3-ply python3-docutils \
python3-pyelftools qemu-utils re2c rsync scons squashfs-tools subversion swig texinfo uglifyjs \
upx-ucl unzip vim wget xmlto xxd zlib1g-dev zstd
sudo -E apt-get -qq autoremove --purge
sudo -E apt-get -qq clean
- name: ${{ matrix.platform }} sdk download
run: |
wget ${{ matrix.url_sdk }}
file_name=$(echo ${{matrix.url_sdk}} | awk -F/ '{print $NF}')
mkdir sdk
if [[ $file_name == *.tar.xz ]]; then
tar -xJf $file_name -C ./sdk --strip-components=1
elif [[ $file_name == *.tar.zst ]]; then
tar --zstd -x -f $file_name -C ./sdk --strip-components=1
else
echo "Unsupported file format: $file_name"
exit 1
fi
cd sdk
- name: SSH connection to Actions
uses: mxschmitt/action-tmate@v3.13
if: (github.event.inputs.ssh == 'true' && github.event.inputs.ssh != 'false') || contains(github.event.action, 'ssh')
- name: ${{ matrix.platform }} feeds configuration packages
run: |
cd sdk
# Update feeds to github source
sed -i \
-e 's|git\.openwrt\.org/feed|github.com/openwrt|g' \
-e 's|git\.openwrt\.org/project|github.com/openwrt|g' \
-e 's|git\.openwrt\.org/openwrt|github.com/openwrt|g' \
"feeds.conf.default"
cat > feeds.tmp <<'EOF'
src-git passwall_packages https://github.com/Openwrt-Passwall/openwrt-passwall-packages.git;main
src-git passwall2 https://github.com/${{ env.passwall2 }}.git;${{ github.ref_name }}
EOF
cat feeds.conf.default >> feeds.tmp
mv feeds.tmp feeds.conf.default
./scripts/feeds update -a
./scripts/feeds install -a
#--------------------------------------begin_patches------------------------------------------
echo "Start applying the patch"
rm -rf temp_resp
git clone -b master --single-branch https://github.com/openwrt/packages.git temp_resp
echo "update golang version"
rm -rf feeds/packages/lang/golang
cp -r temp_resp/lang/golang feeds/packages/lang
echo "update rust version"
rm -rf feeds/packages/lang/rust
cp -r temp_resp/lang/rust feeds/packages/lang
rm -rf temp_resp
echo "update patch-kernel.sh"
git clone -b main --single-branch https://github.com/openwrt/openwrt.git temp_resp
cp -f temp_resp/scripts/patch-kernel.sh scripts/
rm -rf temp_resp
echo "fixed rust host build error"
sed -i 's/--set=llvm\.download-ci-llvm=false/--set=llvm.download-ci-llvm=true/' feeds/packages/lang/rust/Makefile
grep -q -- '--ci false \\' feeds/packages/lang/rust/Makefile || sed -i '/x\.py \\/a \ --ci false \\' feeds/packages/lang/rust/Makefile
echo "Patch application completed"
#--------------------------------------end_patches--------------------------------------------
echo "CONFIG_ALL_NONSHARED=n" > .config
echo "CONFIG_ALL_KMODS=n" >> .config
echo "CONFIG_ALL=n" >> .config
echo "CONFIG_AUTOREMOVE=n" >> .config
echo "CONFIG_SIGNED_PACKAGES=n" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2=m" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_Iptables_Transparent_Proxy=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_Nftables_Transparent_Proxy=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_Haproxy=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_Hysteria=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_IPv6_Nat=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_NaiveProxy=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_Shadowsocks_Libev_Client=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_Shadowsocks_Libev_Server=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_Shadowsocks_Rust_Client=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_Shadowsocks_Rust_Server=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_ShadowsocksR_Libev_Client=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_ShadowsocksR_Libev_Server=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_Simple_Obfs=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_SingBox=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_tuic_client=y" >> .config
echo "CONFIG_PACKAGE_luci-app-passwall2_INCLUDE_V2ray_Plugin=y" >> .config
make defconfig
- name: ${{ matrix.platform }} compile
id: compile
run: |
cd sdk
for package in ${{ env.package_names }}; do
if [ -d "feeds/passwall_packages/$package" ]; then
echo "-----------begin compile $package ---------------"
sleep 10s
make package/$package/compile -j$(nproc) V=s
echo "-----------compiled $package ---------------"
echo ""
fi
done
echo "status=success" >> $GITHUB_OUTPUT
- name: Organize ${{ matrix.platform }} files
id: organize
if: steps.compile.outputs.status == 'success'
run: |
cd sdk
mkdir tmp_upload
shopt -s nullglob
for src_dir in bin/packages/*/{packages,passwall_packages}; do
[[ -d "$src_dir" ]] || continue
echo "Scanning: $src_dir"
for prefix in ${{ env.package_release }}; do
for file in "$src_dir"/"$prefix"*; do
[[ -f "$file" ]] || continue
filename=$(basename "$file")
echo " Found: $filename"
cp -r "$file" "tmp_upload/"
done
done
done
mkdir upload
zip -jr upload/passwall_packages_${{ matrix.ver }}_${{ matrix.platform }}.zip tmp_upload/*
echo "FIRMWARE=$PWD" >> $GITHUB_ENV
echo "status=success" >> $GITHUB_OUTPUT
- name: Generate release info
id: info
if: steps.compile.outputs.status == 'success'
run: |
cd sdk
echo "## :mega:Update content" >> release.txt
echo "![](https://img.shields.io/github/downloads/${{ env.passwall2 }}/${{needs.job_check.outputs.passwall2_version}}/total?style=flat-square)" >> release.txt
echo "### Passwall2 Info" >> release.txt
echo "**:minidisc: Passwall2 Version: ${{needs.job_check.outputs.passwall2_version}}**" >> release.txt
echo "### Packages Version" >> release.txt
echo "**package name**|**package version**" >> release.txt
echo "-|-" >> release.txt
pkgs=$(ls feeds/passwall_packages -I v2ray-geodata | grep -E "$(echo "${{ env.package_names }}" | sed 's/ /|/g')")
for pkg in $pkgs; do
version=$(awk -F ':=' '/PKG_VERSION:=/{print $2}' feeds/passwall_packages/$pkg/Makefile | sed 's/\r//g')
[ -z "${version}" ] && version=$(awk -F ':=' '/PKG_SOURCE_DATE:=/{print $2}' feeds/passwall_packages/$pkg/Makefile | sed 's/\r//g')
echo "**:ice_cube: $pkg**|**${version}**" >> release.txt
done
echo "**:ice_cube: v2ray-geoip**|**$(awk -F ':=' '/GEOIP_VER:=/{print $2}' feeds/passwall_packages/v2ray-geodata/Makefile)**" >> release.txt
echo "**:ice_cube: v2ray-geosite**|**$(awk -F ':=' '/GEOSITE_VER:=/{print $2}' feeds/passwall_packages/v2ray-geodata/Makefile)**" >> release.txt
touch release.txt
echo "status=success" >> $GITHUB_OUTPUT
- name: Upload firmware to release
uses: softprops/action-gh-release@v2
if: steps.info.outputs.status == 'success'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{needs.job_check.outputs.passwall2_version}}
body_path: ${{ env.FIRMWARE }}/release.txt
files: ${{ env.FIRMWARE }}/upload/*
@@ -6,7 +6,7 @@
include $(TOPDIR)/rules.mk include $(TOPDIR)/rules.mk
PKG_NAME:=luci-app-passwall2 PKG_NAME:=luci-app-passwall2
PKG_VERSION:=26.1.18 PKG_VERSION:=26.1.19
PKG_RELEASE:=1 PKG_RELEASE:=1
PKG_PO_VERSION:=$(PKG_VERSION) PKG_PO_VERSION:=$(PKG_VERSION)
@@ -312,9 +312,6 @@ o:depends("remote_dns_protocol", "udp")
o = s:option(Flag, "remote_fakedns", "FakeDNS", translate("Use FakeDNS work in the domain that proxy.")) o = s:option(Flag, "remote_fakedns", "FakeDNS", translate("Use FakeDNS work in the domain that proxy."))
o.default = "0" o.default = "0"
o.rmempty = false o.rmempty = false
o:depends("remote_dns_protocol", "tcp")
o:depends("remote_dns_protocol", "doh")
o:depends("remote_dns_protocol", "udp")
o = s:option(ListValue, "remote_dns_query_strategy", translate("Remote Query Strategy")) o = s:option(ListValue, "remote_dns_query_strategy", translate("Remote Query Strategy"))
o.default = "UseIPv4" o.default = "UseIPv4"
@@ -347,6 +344,12 @@ for k, v in pairs(nodes_table) do
if v.type == "Xray" then if v.type == "Xray" then
s.fields["_xray_node"]:depends({ node = v.id }) s.fields["_xray_node"]:depends({ node = v.id })
end end
if v.node_type == "normal" or v.protocol == "_balancing" or v.protocol == "_urltest" then
--Shunt node has its own separate options.
s.fields["remote_fakedns"]:depends({ node = v.id, remote_dns_protocol = "tcp" })
s.fields["remote_fakedns"]:depends({ node = v.id, remote_dns_protocol = "doh" })
s.fields["remote_fakedns"]:depends({ node = v.id, remote_dns_protocol = "udp" })
end
end end
s.fields["dns_hosts"]:depends({ _xray_node = "1" }) s.fields["dns_hosts"]:depends({ _xray_node = "1" })
@@ -161,6 +161,12 @@ if (has_singbox or has_xray) and #nodes_table > 0 then
o.cfgvalue = get_cfgvalue(v.id, "main_node") o.cfgvalue = get_cfgvalue(v.id, "main_node")
o.write = get_write(v.id, "main_node") o.write = get_write(v.id, "main_node")
o = s:taboption("Main", Flag, vid .. "-fakedns", "FakeDNS")
o:depends("node", v.id)
o.cfgvalue = get_cfgvalue(v.id, "fakedns")
o.write = get_write(v.id, "fakedns")
o.remove = get_remove(v.id, "fakedns")
if (has_singbox and has_xray) or (v.type == "sing-box" and not has_singbox) or (v.type == "Xray" and not has_xray) then if (has_singbox and has_xray) or (v.type == "sing-box" and not has_singbox) or (v.type == "Xray" and not has_xray) then
type:depends("node", v.id) type:depends("node", v.id)
else else
@@ -189,9 +195,16 @@ if (has_singbox or has_xray) and #nodes_table > 0 then
pt.remove = get_remove(v.id, id .. "_proxy_tag") pt.remove = get_remove(v.id, id .. "_proxy_tag")
pt:value("", translate("Close")) pt:value("", translate("Close"))
pt:value("main", translate("Preproxy Node")) pt:value("main", translate("Preproxy Node"))
local fakedns_tag = s:taboption("Main", Flag, vid .. "-".. id .. "_fakedns", string.format('* <a style="color:red">%s</a>', e.remarks .. " " .. "FakeDNS"), translate("Use FakeDNS work in the domain that proxy."))
fakedns_tag.cfgvalue = get_cfgvalue(v.id, id .. "_fakedns")
fakedns_tag.write = get_write(v.id, id .. "_fakedns")
fakedns_tag.remove = get_remove(v.id, id .. "_fakedns")
for k1, v1 in pairs(socks_list) do for k1, v1 in pairs(socks_list) do
o:value(v1.id, v1.remark) o:value(v1.id, v1.remark)
o.group[#o.group+1] = (v1.group and v1.group ~= "") and v1.group or translate("default") o.group[#o.group+1] = (v1.group and v1.group ~= "") and v1.group or translate("default")
fakedns_tag:depends({ [node_option] = v1.id, [vid .. "-fakedns"] = "1" })
end end
for k1, v1 in pairs(balancing_list) do for k1, v1 in pairs(balancing_list) do
o:value(v1.id, v1.remark) o:value(v1.id, v1.remark)
@@ -209,6 +222,10 @@ if (has_singbox or has_xray) and #nodes_table > 0 then
o:value(v1.id, v1.remark) o:value(v1.id, v1.remark)
o.group[#o.group+1] = (v1.group and v1.group ~= "") and v1.group or translate("default") o.group[#o.group+1] = (v1.group and v1.group ~= "") and v1.group or translate("default")
pt:depends({ [node_option] = v1.id, [vid .. "-preproxy_enabled"] = "1" }) pt:depends({ [node_option] = v1.id, [vid .. "-preproxy_enabled"] = "1" })
fakedns_tag:depends({ [node_option] = v1.id, [vid .. "-fakedns"] = "1" })
end
if v.default_node ~= "_direct" or v.default_node ~= "_blackhole" then
fakedns_tag:depends({ [node_option] = "_default", [vid .. "-fakedns"] = "1" })
end end
end end
end) end)
@@ -509,6 +526,10 @@ for k, v in pairs(nodes_table) do
if v.type == "Xray" then if v.type == "Xray" then
s.fields["_xray_node"]:depends({ node = v.id }) s.fields["_xray_node"]:depends({ node = v.id })
end end
if v.node_type == "normal" or v.protocol == "_balancing" or v.protocol == "_urltest" then
--Shunt node has its own separate options.
s.fields["remote_fakedns"]:depends({ node = v.id })
end
end end
m:append(Template(appname .. "/global/footer")) m:append(Template(appname .. "/global/footer"))
@@ -141,7 +141,10 @@ end
source.write = dynamicList_write source.write = dynamicList_write
--[[
-- Too low usage rate, hidden
sourcePort = s:option(Value, "sourcePort", translate("Source port")) sourcePort = s:option(Value, "sourcePort", translate("Source port"))
]]--
port = s:option(Value, "port", translate("Port")) port = s:option(Value, "port", translate("Port"))
@@ -215,7 +215,7 @@ o.default = "2"
o.placeholder = "2" o.placeholder = "2"
o.description = translate("The load balancer selects the optimal number of nodes, and traffic is randomly distributed among them.") o.description = translate("The load balancer selects the optimal number of nodes, and traffic is randomly distributed among them.")
local default_node = m.uci:get(appname, arg[1], "default_node") or "_direct"
-- [[ Shunt Start ]] -- [[ Shunt Start ]]
if #nodes_table > 0 then if #nodes_table > 0 then
o = s:option(Flag, _n("preproxy_enabled"), translate("Preproxy")) o = s:option(Flag, _n("preproxy_enabled"), translate("Preproxy"))
@@ -244,6 +244,9 @@ if #nodes_table > 0 then
if #o.keylist > 0 then if #o.keylist > 0 then
o.default = o.keylist[1] o.default = o.keylist[1]
end end
o = s:option(Flag, _n("fakedns"), "FakeDNS")
o:depends({ [_n("protocol")] = "_shunt" })
end end
m.uci:foreach(appname, "shunt_rules", function(e) m.uci:foreach(appname, "shunt_rules", function(e)
if e[".name"] and e.remarks then if e[".name"] and e.remarks then
@@ -257,9 +260,16 @@ m.uci:foreach(appname, "shunt_rules", function(e)
o.group = {"","","",""} o.group = {"","","",""}
if #nodes_table > 0 then if #nodes_table > 0 then
local pt = s:option(ListValue, _n(e[".name"] .. "_proxy_tag"), string.format('* <a style="color:red">%s</a>', e.remarks .. " " .. translate("Preproxy")))
pt:value("", translate("Close"))
pt:value("main", translate("Preproxy Node"))
local fakedns_tag = s:option(Flag, _n(e[".name"] .. "_fakedns"), string.format('* <a style="color:red">%s</a>', e.remarks .. " " .. "FakeDNS"), translate("Use FakeDNS work in the domain that proxy."))
for k, v in pairs(socks_list) do for k, v in pairs(socks_list) do
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
fakedns_tag:depends({ [_n("protocol")] = "_shunt", [_n("fakedns")] = true, [_n(e[".name"])] = v.id })
end end
for k, v in pairs(balancers_table) do for k, v in pairs(balancers_table) do
o:value(v.id, v.remark) o:value(v.id, v.remark)
@@ -269,13 +279,14 @@ m.uci:foreach(appname, "shunt_rules", function(e)
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
end end
local pt = s:option(ListValue, _n(e[".name"] .. "_proxy_tag"), string.format('* <a style="color:red">%s</a>', e.remarks .. " " .. translate("Preproxy")))
pt:value("", translate("Close"))
pt:value("main", translate("Preproxy Node"))
for k, v in pairs(nodes_table) do for k, v in pairs(nodes_table) do
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
pt:depends({ [_n("protocol")] = "_shunt", [_n("preproxy_enabled")] = true, [_n(e[".name"])] = v.id }) pt:depends({ [_n("protocol")] = "_shunt", [_n("preproxy_enabled")] = true, [_n(e[".name"])] = v.id })
fakedns_tag:depends({ [_n("protocol")] = "_shunt", [_n("fakedns")] = true, [_n(e[".name"])] = v.id })
end
if default_node ~= "_direct" or default_node ~= "_blackhole" then
fakedns_tag:depends({ [_n("protocol")] = "_shunt", [_n("fakedns")] = true, [_n(e[".name"])] = "_default" })
end end
end end
end end
@@ -192,6 +192,7 @@ o:depends({ [_n("protocol")] = "_urltest" })
o.default = "0" o.default = "0"
o.description = translate("Interrupt existing connections when the selected outbound has changed.") o.description = translate("Interrupt existing connections when the selected outbound has changed.")
local default_node = m.uci:get(appname, arg[1], "default_node") or "_direct"
-- [[ Shunt Start ]] -- [[ Shunt Start ]]
if #nodes_table > 0 then if #nodes_table > 0 then
o = s:option(Flag, _n("preproxy_enabled"), translate("Preproxy")) o = s:option(Flag, _n("preproxy_enabled"), translate("Preproxy"))
@@ -220,6 +221,9 @@ if #nodes_table > 0 then
if #o.keylist > 0 then if #o.keylist > 0 then
o.default = o.keylist[1] o.default = o.keylist[1]
end end
o = s:option(Flag, _n("fakedns"), "FakeDNS")
o:depends({ [_n("protocol")] = "_shunt" })
end end
m.uci:foreach(appname, "shunt_rules", function(e) m.uci:foreach(appname, "shunt_rules", function(e)
if e[".name"] and e.remarks then if e[".name"] and e.remarks then
@@ -233,9 +237,16 @@ m.uci:foreach(appname, "shunt_rules", function(e)
o.group = {"","","",""} o.group = {"","","",""}
if #nodes_table > 0 then if #nodes_table > 0 then
local pt = s:option(ListValue, _n(e[".name"] .. "_proxy_tag"), string.format('* <a style="color:red">%s</a>', e.remarks .. " " .. translate("Preproxy")))
pt:value("", translate("Close"))
pt:value("main", translate("Preproxy Node"))
local fakedns_tag = s:option(Flag, _n(e[".name"] .. "_fakedns"), string.format('* <a style="color:red">%s</a>', e.remarks .. " " .. "FakeDNS"), translate("Use FakeDNS work in the domain that proxy."))
for k, v in pairs(socks_list) do for k, v in pairs(socks_list) do
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
fakedns_tag:depends({ [_n("protocol")] = "_shunt", [_n("fakedns")] = true, [_n(e[".name"])] = v.id })
end end
for k, v in pairs(urltest_table) do for k, v in pairs(urltest_table) do
o:value(v.id, v.remark) o:value(v.id, v.remark)
@@ -245,13 +256,14 @@ m.uci:foreach(appname, "shunt_rules", function(e)
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
end end
local pt = s:option(ListValue, _n(e[".name"] .. "_proxy_tag"), string.format('* <a style="color:red">%s</a>', e.remarks .. " " .. translate("Preproxy")))
pt:value("", translate("Close"))
pt:value("main", translate("Preproxy Node"))
for k, v in pairs(nodes_table) do for k, v in pairs(nodes_table) do
o:value(v.id, v.remark) o:value(v.id, v.remark)
o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default") o.group[#o.group+1] = (v.group and v.group ~= "") and v.group or translate("default")
pt:depends({ [_n("protocol")] = "_shunt", [_n("preproxy_enabled")] = true, [_n(e[".name"])] = v.id }) pt:depends({ [_n("protocol")] = "_shunt", [_n("preproxy_enabled")] = true, [_n(e[".name"])] = v.id })
fakedns_tag:depends({ [_n("protocol")] = "_shunt", [_n("fakedns")] = true, [_n(e[".name"])] = v.id })
end
if default_node ~= "_direct" or default_node ~= "_blackhole" then
fakedns_tag:depends({ [_n("protocol")] = "_shunt", [_n("fakedns")] = true, [_n(e[".name"])] = "_default" })
end end
end end
end end
@@ -13,7 +13,7 @@ local version_ge_1_11_0 = api.compare_versions(local_version, ">=", "1.11.0")
local version_ge_1_12_0 = api.compare_versions(local_version, ">=", "1.12.0") local version_ge_1_12_0 = api.compare_versions(local_version, ">=", "1.12.0")
local GEO_VAR = { local GEO_VAR = {
OK = false, OK = nil,
DIR = nil, DIR = nil,
SITE_PATH = nil, SITE_PATH = nil,
IP_PATH = nil, IP_PATH = nil,
@@ -24,9 +24,10 @@ local GEO_VAR = {
function check_geoview() function check_geoview()
if not GEO_VAR.OK then if not GEO_VAR.OK then
GEO_VAR.OK = (api.finded_com("geoview") and api.compare_versions(api.get_app_version("geoview"), ">=", "0.1.10")) and true or false -- Only get once
GEO_VAR.OK = (api.finded_com("geoview") and api.compare_versions(api.get_app_version("geoview"), ">=", "0.1.10")) and 1 or 0
end end
if GEO_VAR.OK == false then if GEO_VAR.OK == 0 then
api.log(0, "!!! Note: Geo rules cannot be used if the Geoview component is missing or the version is too low.") api.log(0, "!!! Note: Geo rules cannot be used if the Geoview component is missing or the version is too low.")
else else
GEO_VAR.DIR = GEO_VAR.DIR or (uci:get(appname, "@global_rules[0]", "v2ray_location_asset") or "/usr/share/v2ray/"):match("^(.*)/") GEO_VAR.DIR = GEO_VAR.DIR or (uci:get(appname, "@global_rules[0]", "v2ray_location_asset") or "/usr/share/v2ray/"):match("^(.*)/")
@@ -40,7 +41,7 @@ function check_geoview()
end end
function geo_convert_srs(var) function geo_convert_srs(var)
if check_geoview() == false then if check_geoview() ~= 1 then
return return
end end
local geo_path = var["-geo_path"] local geo_path = var["-geo_path"]
@@ -59,7 +60,7 @@ function geo_convert_srs(var)
end end
local function convert_geofile() local function convert_geofile()
if check_geoview() == false then if check_geoview() ~= 1 then
return return
end end
local function convert(file_path, prefix, tags) local function convert(file_path, prefix, tags)
@@ -1220,6 +1221,8 @@ function gen_config(var)
local preproxy_tag = preproxy_rule_name local preproxy_tag = preproxy_rule_name
local preproxy_node_id = preproxy_rule_name and node["main_node"] or nil local preproxy_node_id = preproxy_rule_name and node["main_node"] or nil
inner_fakedns = node.fakedns or "0"
local function gen_shunt_node(rule_name, _node_id) local function gen_shunt_node(rule_name, _node_id)
if not rule_name then return nil end if not rule_name then return nil end
if not _node_id then _node_id = node[rule_name] end if not _node_id then _node_id = node[rule_name] end
@@ -1407,6 +1410,8 @@ function gen_config(var)
rule.source_ip_is_private = source_is_private and true or nil rule.source_ip_is_private = source_is_private and true or nil
end end
--[[
-- Too low usage rate, hidden
if e.sourcePort then if e.sourcePort then
local source_port = {} local source_port = {}
local source_port_range = {} local source_port_range = {}
@@ -1420,6 +1425,7 @@ function gen_config(var)
rule.source_port = #source_port > 0 and source_port or nil rule.source_port = #source_port > 0 and source_port or nil
rule.source_port_range = #source_port_range > 0 and source_port_range or nil rule.source_port_range = #source_port_range > 0 and source_port_range or nil
end end
]]--
if e.port then if e.port then
local port = {} local port = {}
@@ -1445,6 +1451,7 @@ function gen_config(var)
domain_keyword = {}, domain_keyword = {},
domain_regex = {}, domain_regex = {},
rule_set = {}, rule_set = {},
fakedns = nil,
invert = e.invert == "1" and true or nil invert = e.invert == "1" and true or nil
} }
string.gsub(e.domain_list, '[^' .. "\r\n" .. ']+', function(w) string.gsub(e.domain_list, '[^' .. "\r\n" .. ']+', function(w)
@@ -1481,6 +1488,9 @@ function gen_config(var)
rule.domain_keyword = #domain_table.domain_keyword > 0 and domain_table.domain_keyword or nil rule.domain_keyword = #domain_table.domain_keyword > 0 and domain_table.domain_keyword or nil
rule.domain_regex = #domain_table.domain_regex > 0 and domain_table.domain_regex or nil rule.domain_regex = #domain_table.domain_regex > 0 and domain_table.domain_regex or nil
rule.rule_set = #domain_table.rule_set > 0 and domain_table.rule_set or nil rule.rule_set = #domain_table.rule_set > 0 and domain_table.rule_set or nil
if inner_fakedns == "1" and node[e[".name"] .. "_fakedns"] == "1" then
domain_table.fakedns = true
end
if outboundTag then if outboundTag then
table.insert(dns_domain_rules, api.clone(domain_table)) table.insert(dns_domain_rules, api.clone(domain_table))
@@ -1617,7 +1627,7 @@ function gen_config(var)
end end
local fakedns_tag = "remote_fakeip" local fakedns_tag = "remote_fakeip"
if remote_dns_fake then if remote_dns_fake or inner_fakedns == "1" then
dns.fakeip = { dns.fakeip = {
enabled = true, enabled = true,
inet4_range = "198.18.0.0/16", inet4_range = "198.18.0.0/16",
@@ -1707,7 +1717,7 @@ function gen_config(var)
table.insert(dns.servers, remote_dns_server) table.insert(dns.servers, remote_dns_server)
dns_rule.server = remote_dns_server.tag dns_rule.server = remote_dns_server.tag
end end
if remote_dns_fake then if value.fakedns then
local fakedns_dns_rule = api.clone(dns_rule) local fakedns_dns_rule = api.clone(dns_rule)
fakedns_dns_rule.query_type = { fakedns_dns_rule.query_type = {
"A", "AAAA" "A", "AAAA"
@@ -708,49 +708,6 @@ function gen_config(var)
table.insert(inbounds, inbound) table.insert(inbounds, inbound)
end end
if redir_port then
local inbound = {
port = tonumber(redir_port),
protocol = "dokodemo-door",
settings = {network = "tcp,udp", followRedirect = true},
streamSettings = {sockopt = {tproxy = "tproxy"}},
sniffing = {
enabled = xray_settings.sniffing_override_dest == "1" or node.protocol == "_shunt"
}
}
if inbound.sniffing.enabled == true then
inbound.sniffing.destOverride = {"http", "tls", "quic"}
inbound.sniffing.metadataOnly = false
inbound.sniffing.routeOnly = xray_settings.sniffing_override_dest ~= "1" or nil
inbound.sniffing.domainsExcluded = xray_settings.sniffing_override_dest == "1" and get_domain_excluded() or nil
end
if remote_dns_fake then
inbound.sniffing.enabled = true
if not inbound.sniffing.destOverride then
inbound.sniffing.destOverride = {"fakedns"}
inbound.sniffing.metadataOnly = true
else
table.insert(inbound.sniffing.destOverride, "fakedns")
inbound.sniffing.metadataOnly = false
end
end
local tcp_inbound = api.clone(inbound)
tcp_inbound.tag = "tcp_redir"
tcp_inbound.settings.network = "tcp"
tcp_inbound.streamSettings.sockopt.tproxy = tcp_proxy_way
table.insert(inbounds, tcp_inbound)
local udp_inbound = api.clone(inbound)
udp_inbound.tag = "udp_redir"
udp_inbound.settings.network = "udp"
table.insert(inbounds, udp_inbound)
end
local function get_balancer_tag(_node_id)
return "balancer-" .. _node_id
end
local function gen_loopback(outboundTag, dst_node_id) local function gen_loopback(outboundTag, dst_node_id)
if not outboundTag then return nil end if not outboundTag then return nil end
local inboundTag = dst_node_id and "loop-in-" .. dst_node_id or outboundTag .. "-lo" local inboundTag = dst_node_id and "loop-in-" .. dst_node_id or outboundTag .. "-lo"
@@ -974,6 +931,8 @@ function gen_config(var)
local preproxy_outbound_tag, preproxy_balancer_tag local preproxy_outbound_tag, preproxy_balancer_tag
local preproxy_nodes local preproxy_nodes
inner_fakedns = node.fakedns or "0"
local function gen_shunt_node(rule_name, _node_id) local function gen_shunt_node(rule_name, _node_id)
if not rule_name then return nil, nil end if not rule_name then return nil, nil end
if not _node_id then _node_id = node[rule_name] end if not _node_id then _node_id = node[rule_name] end
@@ -1173,6 +1132,7 @@ function gen_config(var)
outboundTag = outboundTag, outboundTag = outboundTag,
balancerTag = balancerTag, balancerTag = balancerTag,
domain = {}, domain = {},
fakedns = nil,
} }
domains = {} domains = {}
string.gsub(e.domain_list, '[^' .. "\r\n" .. ']+', function(w) string.gsub(e.domain_list, '[^' .. "\r\n" .. ']+', function(w)
@@ -1181,10 +1141,15 @@ function gen_config(var)
table.insert(domains, w) table.insert(domains, w)
table.insert(domain_table.domain, w) table.insert(domain_table.domain, w)
end) end)
if inner_fakedns == "1" and node[e[".name"] .. "_fakedns"] == "1" and #domains > 0 then
domain_table.fakedns = true
end
if outboundTag or balancerTag then if outboundTag or balancerTag then
table.insert(dns_domain_rules, api.clone(domain_table)) table.insert(dns_domain_rules, api.clone(domain_table))
end end
if #domains == 0 then domains = nil end if #domains == 0 then
domains = nil
end
end end
local ip = nil local ip = nil
if e.ip_list then if e.ip_list then
@@ -1210,7 +1175,7 @@ function gen_config(var)
balancerTag = balancerTag, balancerTag = balancerTag,
network = e["network"] or "tcp,udp", network = e["network"] or "tcp,udp",
source = source, source = source,
sourcePort = e["sourcePort"] ~= "" and e["sourcePort"] or nil, --sourcePort = e["sourcePort"] ~= "" and e["sourcePort"] or nil,
port = e["port"] ~= "" and e["port"] or nil, port = e["port"] ~= "" and e["port"] or nil,
protocol = protocols protocol = protocols
} }
@@ -1391,7 +1356,7 @@ function gen_config(var)
end end
local _remote_fakedns = nil local _remote_fakedns = nil
if remote_dns_fake then if remote_dns_fake or inner_fakedns == "1" then
fakedns = {} fakedns = {}
local fakedns4 = { local fakedns4 = {
ipPool = "198.18.0.0/16", ipPool = "198.18.0.0/16",
@@ -1532,7 +1497,7 @@ function gen_config(var)
if value.outboundTag == "direct" then if value.outboundTag == "direct" then
dns_server = api.clone(_direct_dns) dns_server = api.clone(_direct_dns)
else else
if remote_dns_fake then if value.fakedns then
dns_server = api.clone(_remote_fakedns) dns_server = api.clone(_remote_fakedns)
else else
dns_server = api.clone(_remote_dns) dns_server = api.clone(_remote_dns)
@@ -1636,6 +1601,45 @@ function gen_config(var)
end end
end end
if redir_port then
local inbound = {
port = tonumber(redir_port),
protocol = "dokodemo-door",
settings = {network = "tcp,udp", followRedirect = true},
streamSettings = {sockopt = {tproxy = "tproxy"}},
sniffing = {
enabled = xray_settings.sniffing_override_dest == "1" or node.protocol == "_shunt"
}
}
if inbound.sniffing.enabled == true then
inbound.sniffing.destOverride = {"http", "tls", "quic"}
inbound.sniffing.metadataOnly = false
inbound.sniffing.routeOnly = xray_settings.sniffing_override_dest ~= "1" or nil
inbound.sniffing.domainsExcluded = xray_settings.sniffing_override_dest == "1" and get_domain_excluded() or nil
end
if remote_dns_fake or inner_fakedns == "1" then
inbound.sniffing.enabled = true
if not inbound.sniffing.destOverride then
inbound.sniffing.destOverride = {"fakedns"}
inbound.sniffing.metadataOnly = true
else
table.insert(inbound.sniffing.destOverride, "fakedns")
inbound.sniffing.metadataOnly = false
end
end
local tcp_inbound = api.clone(inbound)
tcp_inbound.tag = "tcp_redir"
tcp_inbound.settings.network = "tcp"
tcp_inbound.streamSettings.sockopt.tproxy = tcp_proxy_way
table.insert(inbounds, tcp_inbound)
local udp_inbound = api.clone(inbound)
udp_inbound.tag = "udp_redir"
udp_inbound.settings.network = "udp"
table.insert(inbounds, udp_inbound)
end
if inbounds or outbounds then if inbounds or outbounds then
local config = { local config = {
log = { log = {
@@ -16,6 +16,9 @@ msgstr "连接失败"
msgid "Touch Check" msgid "Touch Check"
msgstr "点我检测" msgstr "点我检测"
msgid "Global"
msgstr "全局"
msgid "Kernel Unsupported" msgid "Kernel Unsupported"
msgstr "内核不支持" msgstr "内核不支持"
@@ -1942,6 +1945,9 @@ msgstr "Socks节点:[%s]%s,启动 %s:%s"
msgid "To enable experimental IPv6 transparent proxy (TProxy), please ensure your node and type support IPv6!" msgid "To enable experimental IPv6 transparent proxy (TProxy), please ensure your node and type support IPv6!"
msgstr "开启实验性IPv6透明代理(TProxy),请确认您的节点及类型支持IPv6!" msgstr "开启实验性IPv6透明代理(TProxy),请确认您的节点及类型支持IPv6!"
msgid "[%s] process %s error, skip!"
msgstr "【%s】 进程 %s 错误,跳过!"
msgid "Analyzing the node configuration of the Socks service..." msgid "Analyzing the node configuration of the Socks service..."
msgstr "分析 Socks 服务的节点配置..." msgstr "分析 Socks 服务的节点配置..."
@@ -16,6 +16,9 @@ msgstr "連接失敗"
msgid "Touch Check" msgid "Touch Check"
msgstr "點我檢測" msgstr "點我檢測"
msgid "Global"
msgstr "全局"
msgid "Kernel Unsupported" msgid "Kernel Unsupported"
msgstr "內核不支持" msgstr "內核不支持"
@@ -1942,6 +1945,9 @@ msgstr "Socks節點:[%s]%s,啟動 %s:%s"
msgid "To enable experimental IPv6 transparent proxy (TProxy), please ensure your node and type support IPv6!" msgid "To enable experimental IPv6 transparent proxy (TProxy), please ensure your node and type support IPv6!"
msgstr "開啟實驗性IPv6透明代理(TProxy),請確認您的節點及類型支持IPv6!" msgstr "開啟實驗性IPv6透明代理(TProxy),請確認您的節點及類型支持IPv6!"
msgid "[%s] process %s error, skip!"
msgstr "【%s】 進程 %s 錯誤,跳過!"
msgid "Analyzing the node configuration of the Socks service..." msgid "Analyzing the node configuration of the Socks service..."
msgstr "分析 Socks 服務的節點配置..." msgstr "分析 Socks 服務的節點配置..."
@@ -14,6 +14,8 @@ UTIL_XRAY=$LUA_UTIL_PATH/util_xray.lua
UTIL_NAIVE=$LUA_UTIL_PATH/util_naiveproxy.lua UTIL_NAIVE=$LUA_UTIL_PATH/util_naiveproxy.lua
UTIL_HYSTERIA2=$LUA_UTIL_PATH/util_hysteria2.lua UTIL_HYSTERIA2=$LUA_UTIL_PATH/util_hysteria2.lua
UTIL_TUIC=$LUA_UTIL_PATH/util_tuic.lua UTIL_TUIC=$LUA_UTIL_PATH/util_tuic.lua
SINGBOX_BIN=$(first_type $(config_t_get global_app sing_box_file) sing-box)
XRAY_BIN=$(first_type $(config_t_get global_app xray_file) xray)
check_run_environment() { check_run_environment() {
local prefer_nft=$(config_t_get global_forwarding prefer_nft 1) local prefer_nft=$(config_t_get global_forwarding prefer_nft 1)
@@ -68,64 +70,12 @@ check_run_environment() {
fi fi
} }
first_type() {
[ "${1#/}" != "$1" ] && [ -x "$1" ] && echo "$1" && return
for p in "/bin/$1" "/usr/bin/$1" "${TMP_BIN_PATH:-/tmp}/$1"; do
[ -x "$p" ] && echo "$p" && return
done
command -v "$1" 2>/dev/null || command -v "$2" 2>/dev/null
}
ln_run() {
local file_func=${1}
local ln_name=${2}
local output=${3}
shift 3;
if [ "${file_func%%/*}" != "${file_func}" ]; then
[ ! -L "${file_func}" ] && {
ln -s "${file_func}" "${TMP_BIN_PATH}/${ln_name}" >/dev/null 2>&1
file_func="${TMP_BIN_PATH}/${ln_name}"
}
[ -x "${file_func}" ] || log 1 "$(i18n "%s does not have execute permissions and cannot be started: %s %s" "$(readlink ${file_func})" "${file_func}" "$*")"
fi
#echo "${file_func} $*" >&2
[ -n "${file_func}" ] || log 1 "$(i18n "%s not found, unable to start..." "${ln_name}")"
${file_func:-log 1 "${ln_name}"} "$@" >${output} 2>&1 &
process_count=$(ls $TMP_SCRIPT_FUNC_PATH | grep -v "^_" | wc -l)
process_count=$((process_count + 1))
echo "${file_func:-log 1 "${ln_name}"} $@ >${output}" > $TMP_SCRIPT_FUNC_PATH/$process_count
}
get_geoip() {
local geoip_code="$1"
local geoip_type_flag=""
local geoip_path="$(config_t_get global_rules v2ray_location_asset)"
geoip_path="${geoip_path%*/}/geoip.dat"
[ -e "$geoip_path" ] || { echo ""; return; }
case "$2" in
"ipv4") geoip_type_flag="-ipv6=false" ;;
"ipv6") geoip_type_flag="-ipv4=false" ;;
esac
if type geoview &> /dev/null; then
geoview -input "$geoip_path" -list "$geoip_code" $geoip_type_flag -lowmem=true
else
echo ""
fi
}
run_xray() { run_xray() {
local flag node redir_port tcp_proxy_way socks_address socks_port socks_username socks_password http_address http_port http_username http_password local flag node redir_port tcp_proxy_way socks_address socks_port socks_username socks_password http_address http_port http_username http_password
local dns_listen_port direct_dns_query_strategy remote_dns_protocol remote_dns_udp_server remote_dns_tcp_server remote_dns_doh remote_dns_client_ip remote_dns_detour remote_fakedns remote_dns_query_strategy dns_cache write_ipset_direct local dns_listen_port direct_dns_query_strategy remote_dns_protocol remote_dns_udp_server remote_dns_tcp_server remote_dns_doh remote_dns_client_ip remote_dns_detour remote_fakedns remote_dns_query_strategy dns_cache write_ipset_direct
local loglevel log_file config_file local loglevel log_file config_file
local _extra_param="" local _extra_param=""
eval_set_val $@ eval_set_val $@
local type=$(echo $(config_n_get $node type) | tr 'A-Z' 'a-z')
if [ "$type" != "xray" ]; then
local bin=$(first_type $(config_t_get global_app xray_file) xray)
[ -n "$bin" ] && type="xray"
fi
[ -z "$type" ] && return 1
[ -n "$log_file" ] || local log_file="/dev/null" [ -n "$log_file" ] || local log_file="/dev/null"
[ -z "$loglevel" ] && local loglevel=$(config_t_get global loglevel "warning") [ -z "$loglevel" ] && local loglevel=$(config_t_get global loglevel "warning")
[ -n "$flag" ] && pgrep -af "$TMP_BIN_PATH" | awk -v P1="${flag}" 'BEGIN{IGNORECASE=1}$0~P1{print $1}' | xargs kill -9 >/dev/null 2>&1 [ -n "$flag" ] && pgrep -af "$TMP_BIN_PATH" | awk -v P1="${flag}" 'BEGIN{IGNORECASE=1}$0~P1{print $1}' | xargs kill -9 >/dev/null 2>&1
@@ -213,7 +163,7 @@ run_xray() {
DNS_REMOTE_ARGS="${DNS_REMOTE_ARGS} -dns_out_tag remote -dns_listen_port ${dns_remote_listen_port} -remote_dns_outbound_socks_address 127.0.0.1 -remote_dns_outbound_socks_port ${socks_port}" DNS_REMOTE_ARGS="${DNS_REMOTE_ARGS} -dns_out_tag remote -dns_listen_port ${dns_remote_listen_port} -remote_dns_outbound_socks_address 127.0.0.1 -remote_dns_outbound_socks_port ${socks_port}"
lua $UTIL_XRAY gen_dns_config ${DNS_REMOTE_ARGS} > $V2RAY_DNS_REMOTE_CONFIG lua $UTIL_XRAY gen_dns_config ${DNS_REMOTE_ARGS} > $V2RAY_DNS_REMOTE_CONFIG
ln_run "$(first_type $(config_t_get global_app ${type}_file) ${type})" ${type} $V2RAY_DNS_REMOTE_LOG run -c "$V2RAY_DNS_REMOTE_CONFIG" ln_run "$XRAY_BIN" "xray" $V2RAY_DNS_REMOTE_LOG run -c "$V2RAY_DNS_REMOTE_CONFIG"
_extra_param="${_extra_param} -remote_dns_udp_port ${dns_remote_listen_port} -remote_dns_udp_server 127.0.0.1 -remote_dns_query_strategy ${remote_dns_query_strategy}" _extra_param="${_extra_param} -remote_dns_udp_port ${dns_remote_listen_port} -remote_dns_udp_server 127.0.0.1 -remote_dns_query_strategy ${remote_dns_query_strategy}"
fi fi
} }
@@ -224,7 +174,13 @@ run_xray() {
} }
lua $UTIL_XRAY gen_config -node $node -loglevel $loglevel ${_extra_param} > $config_file lua $UTIL_XRAY gen_config -node $node -loglevel $loglevel ${_extra_param} > $config_file
ln_run "$(first_type $(config_t_get global_app ${type}_file) ${type})" ${type} $log_file run -c "$config_file"
$XRAY_BIN run -test -c "$config_file" > $log_file; local status=$?
if [ "${status}" == 0 ]; then
ln_run "$XRAY_BIN" xray $log_file run -c "$config_file"
else
return ${status}
fi
} }
run_singbox() { run_singbox() {
@@ -245,8 +201,7 @@ run_singbox() {
[ -z "$loglevel" ] && local loglevel=$(config_t_get global loglevel "warn") [ -z "$loglevel" ] && local loglevel=$(config_t_get global loglevel "warn")
[ "$loglevel" = "warning" ] && loglevel="warn" [ "$loglevel" = "warning" ] && loglevel="warn"
_extra_param="${_extra_param} -loglevel $loglevel" _extra_param="${_extra_param} -loglevel $loglevel"
_extra_param="${_extra_param} -tags $($SINGBOX_BIN version | grep 'Tags:' | awk '{print $2}')"
_extra_param="${_extra_param} -tags $($(first_type $(config_t_get global_app sing_box_file) sing-box) version | grep 'Tags:' | awk '{print $2}')"
[ -n "$flag" ] && pgrep -af "$TMP_BIN_PATH" | awk -v P1="${flag}" 'BEGIN{IGNORECASE=1}$0~P1{print $1}' | xargs kill -9 >/dev/null 2>&1 [ -n "$flag" ] && pgrep -af "$TMP_BIN_PATH" | awk -v P1="${flag}" 'BEGIN{IGNORECASE=1}$0~P1{print $1}' | xargs kill -9 >/dev/null 2>&1
[ -n "$flag" ] && _extra_param="${_extra_param} -flag $flag" [ -n "$flag" ] && _extra_param="${_extra_param} -flag $flag"
@@ -330,7 +285,13 @@ run_singbox() {
} }
lua $UTIL_SINGBOX gen_config -node $node ${_extra_param} > $config_file lua $UTIL_SINGBOX gen_config -node $node ${_extra_param} > $config_file
ln_run "$(first_type $(config_t_get global_app sing_box_file) sing-box)" "sing-box" "${log_file}" run -c "$config_file"
$SINGBOX_BIN check -c "$config_file" > $log_file 2>&1; local status=$?
if [ "${status}" == 0 ]; then
ln_run "$SINGBOX_BIN" "sing-box" "${log_file}" run -c "$config_file"
else
return ${status}
fi
} }
run_socks() { run_socks() {
@@ -393,7 +354,7 @@ run_socks() {
} }
[ -n "$no_run" ] && _extra_param="${_extra_param} -no_run 1" [ -n "$no_run" ] && _extra_param="${_extra_param} -no_run 1"
lua $UTIL_SINGBOX gen_config -flag SOCKS_$flag -node $node -local_socks_address $bind -local_socks_port $socks_port ${_extra_param} > $config_file lua $UTIL_SINGBOX gen_config -flag SOCKS_$flag -node $node -local_socks_address $bind -local_socks_port $socks_port ${_extra_param} > $config_file
[ -n "$no_run" ] || ln_run "$(first_type $(config_t_get global_app sing_box_file) sing-box)" "sing-box" /dev/null run -c "$config_file" [ -n "$no_run" ] || ln_run "$SINGBOX_BIN" "sing-box" /dev/null run -c "$config_file"
;; ;;
xray) xray)
[ "$http_port" != "0" ] && { [ "$http_port" != "0" ] && {
@@ -404,7 +365,7 @@ run_socks() {
[ -n "$relay_port" ] && _extra_param="${_extra_param} -server_host $server_host -server_port $server_port" [ -n "$relay_port" ] && _extra_param="${_extra_param} -server_host $server_host -server_port $server_port"
[ -n "$no_run" ] && _extra_param="${_extra_param} -no_run 1" [ -n "$no_run" ] && _extra_param="${_extra_param} -no_run 1"
lua $UTIL_XRAY gen_config -flag SOCKS_$flag -node $node -local_socks_address $bind -local_socks_port $socks_port ${_extra_param} > $config_file lua $UTIL_XRAY gen_config -flag SOCKS_$flag -node $node -local_socks_address $bind -local_socks_port $socks_port ${_extra_param} > $config_file
[ -n "$no_run" ] || ln_run "$(first_type $(config_t_get global_app xray_file) xray)" "xray" $log_file run -c "$config_file" [ -n "$no_run" ] || ln_run "$XRAY_BIN" "xray" $log_file run -c "$config_file"
;; ;;
naiveproxy) naiveproxy)
lua $UTIL_NAIVE gen_config -node $node -run_type socks -local_addr $bind -local_port $socks_port -server_host $server_host -server_port $server_port > $config_file lua $UTIL_NAIVE gen_config -node $node -run_type socks -local_addr $bind -local_port $socks_port -server_host $server_host -server_port $server_port > $config_file
@@ -453,13 +414,13 @@ run_socks() {
# http to socks # http to socks
[ -z "$http_flag" ] && [ "$http_port" != "0" ] && [ -n "$http_config_file" ] && [ "$type" != "sing-box" ] && [ "$type" != "xray" ] && [ "$type" != "socks" ] && { [ -z "$http_flag" ] && [ "$http_port" != "0" ] && [ -n "$http_config_file" ] && [ "$type" != "sing-box" ] && [ "$type" != "xray" ] && [ "$type" != "socks" ] && {
local bin=$(first_type $(config_t_get global_app sing_box_file) sing-box) local bin=$SINGBOX_BIN
if [ -n "$bin" ]; then if [ -n "$bin" ]; then
type="sing-box" type="sing-box"
lua $UTIL_SINGBOX gen_proto_config -local_http_port $http_port -server_proto socks -server_address "127.0.0.1" -server_port $socks_port -server_username $_username -server_password $_password > $http_config_file lua $UTIL_SINGBOX gen_proto_config -local_http_port $http_port -server_proto socks -server_address "127.0.0.1" -server_port $socks_port -server_username $_username -server_password $_password > $http_config_file
[ -n "$no_run" ] || ln_run "$bin" ${type} /dev/null run -c "$http_config_file" [ -n "$no_run" ] || ln_run "$bin" ${type} /dev/null run -c "$http_config_file"
else else
bin=$(first_type $(config_t_get global_app xray_file) xray) bin=$XRAY_BIN
[ -n "$bin" ] && type="xray" [ -n "$bin" ] && type="xray"
[ -z "$type" ] && return 1 [ -z "$type" ] && return 1
lua $UTIL_XRAY gen_proto_config -local_http_port $http_port -server_proto socks -server_address "127.0.0.1" -server_port $socks_port -server_username $_username -server_password $_password > $http_config_file lua $UTIL_XRAY gen_proto_config -local_http_port $http_port -server_proto socks -server_address "127.0.0.1" -server_port $socks_port -server_username $_username -server_password $_password > $http_config_file
@@ -524,35 +485,34 @@ run_global() {
V2RAY_ARGS="flag=global node=$NODE redir_port=$REDIR_PORT tcp_proxy_way=${TCP_PROXY_WAY}" V2RAY_ARGS="flag=global node=$NODE redir_port=$REDIR_PORT tcp_proxy_way=${TCP_PROXY_WAY}"
V2RAY_ARGS="${V2RAY_ARGS} dns_listen_port=${TUN_DNS_PORT} direct_dns_query_strategy=${DIRECT_DNS_QUERY_STRATEGY} remote_dns_query_strategy=${REMOTE_DNS_QUERY_STRATEGY} dns_cache=${DNS_CACHE}" V2RAY_ARGS="${V2RAY_ARGS} dns_listen_port=${TUN_DNS_PORT} direct_dns_query_strategy=${DIRECT_DNS_QUERY_STRATEGY} remote_dns_query_strategy=${REMOTE_DNS_QUERY_STRATEGY} dns_cache=${DNS_CACHE}"
local msg="DNS: ${TUN_DNS} $(i18n "Direct DNS: %s" "${AUTO_DNS}")" local dns_msg="DNS: ${TUN_DNS} $(i18n "Direct DNS: %s" "${AUTO_DNS}")"
[ -n "$REMOTE_DNS_PROTOCOL" ] && { [ -n "$REMOTE_DNS_PROTOCOL" ] && {
V2RAY_ARGS="${V2RAY_ARGS} remote_dns_protocol=${REMOTE_DNS_PROTOCOL} remote_dns_detour=${REMOTE_DNS_DETOUR}" V2RAY_ARGS="${V2RAY_ARGS} remote_dns_protocol=${REMOTE_DNS_PROTOCOL} remote_dns_detour=${REMOTE_DNS_DETOUR}"
case "$REMOTE_DNS_PROTOCOL" in case "$REMOTE_DNS_PROTOCOL" in
udp*) udp*)
V2RAY_ARGS="${V2RAY_ARGS} remote_dns_udp_server=${REMOTE_DNS}" V2RAY_ARGS="${V2RAY_ARGS} remote_dns_udp_server=${REMOTE_DNS}"
msg="${msg} $(i18n "Remote DNS: %s" "${REMOTE_DNS}")" dns_msg="${dns_msg} $(i18n "Remote DNS: %s" "${REMOTE_DNS}")"
;; ;;
tcp) tcp)
V2RAY_ARGS="${V2RAY_ARGS} remote_dns_tcp_server=${REMOTE_DNS}" V2RAY_ARGS="${V2RAY_ARGS} remote_dns_tcp_server=${REMOTE_DNS}"
msg="${msg} $(i18n "Remote DNS: %s" "${REMOTE_DNS}")" dns_msg="${dns_msg} $(i18n "Remote DNS: %s" "${REMOTE_DNS}")"
;; ;;
doh) doh)
REMOTE_DNS_DOH=$(config_t_get global remote_dns_doh "https://1.1.1.1/dns-query") REMOTE_DNS_DOH=$(config_t_get global remote_dns_doh "https://1.1.1.1/dns-query")
V2RAY_ARGS="${V2RAY_ARGS} remote_dns_doh=${REMOTE_DNS_DOH}" V2RAY_ARGS="${V2RAY_ARGS} remote_dns_doh=${REMOTE_DNS_DOH}"
msg="${msg} $(i18n "Remote DNS: %s" "${REMOTE_DNS_DOH}")" dns_msg="${dns_msg} $(i18n "Remote DNS: %s" "${REMOTE_DNS_DOH}")"
;; ;;
esac esac
[ "$REMOTE_FAKEDNS" = "1" ] && { [ "$REMOTE_FAKEDNS" = "1" ] && {
V2RAY_ARGS="${V2RAY_ARGS} remote_fakedns=1" V2RAY_ARGS="${V2RAY_ARGS} remote_fakedns=1"
msg="${msg} + FakeDNS " dns_msg="${dns_msg} + FakeDNS "
} }
local _remote_dns_client_ip=$(config_t_get global remote_dns_client_ip) local _remote_dns_client_ip=$(config_t_get global remote_dns_client_ip)
[ -n "${_remote_dns_client_ip}" ] && V2RAY_ARGS="${V2RAY_ARGS} remote_dns_client_ip=${_remote_dns_client_ip}" [ -n "${_remote_dns_client_ip}" ] && V2RAY_ARGS="${V2RAY_ARGS} remote_dns_client_ip=${_remote_dns_client_ip}"
} }
msg="${msg}" dns_msg="${dns_msg}"
log 0 ${msg}
V2RAY_CONFIG=${GLOBAL_ACL_PATH}/global.json V2RAY_CONFIG=${GLOBAL_ACL_PATH}/global.json
V2RAY_LOG=${GLOBAL_ACL_PATH}/global.log V2RAY_LOG=${GLOBAL_ACL_PATH}/global.log
@@ -580,7 +540,15 @@ run_global() {
run_func="run_singbox" run_func="run_singbox"
fi fi
${run_func} ${V2RAY_ARGS} ${run_func} ${V2RAY_ARGS}; local status=$?
if [ "$status" == 0 ]; then
log 0 ${dns_msg}
else
log_i18n 0 "[%s] process %s error, skip!" $(i18n "Global") "${V2RAY_CONFIG}"
ENABLED_DEFAULT_ACL=0
return 1
fi
local RUN_NEW_DNSMASQ=1 local RUN_NEW_DNSMASQ=1
RUN_NEW_DNSMASQ=${DNS_REDIRECT} RUN_NEW_DNSMASQ=${DNS_REDIRECT}
@@ -651,14 +619,6 @@ start_socks() {
} }
} }
clean_log() {
logsnum=$(cat $LOG_FILE 2>/dev/null | wc -l)
[ "$logsnum" -gt 1000 ] && {
echo "" > $LOG_FILE
log_i18n 0 "Log file is too long, clear it!"
}
}
clean_crontab() { clean_crontab() {
[ -f "/tmp/lock/${CONFIG}_cron.lock" ] && return [ -f "/tmp/lock/${CONFIG}_cron.lock" ] && return
touch /etc/crontabs/root touch /etc/crontabs/root
@@ -873,10 +833,6 @@ run_ipset_dnsmasq() {
ln_run "$(first_type dnsmasq)" "dnsmasq" "/dev/null" -C $config_file ln_run "$(first_type dnsmasq)" "dnsmasq" "/dev/null" -C $config_file
} }
kill_all() {
kill -9 $(pidof "$@") >/dev/null 2>&1
}
acl_app() { acl_app() {
local items=$(uci show ${CONFIG} | grep "=acl_rule" | cut -d '.' -sf 2 | cut -d '=' -sf 1) local items=$(uci show ${CONFIG} | grep "=acl_rule" | cut -d '.' -sf 2 | cut -d '=' -sf 1)
[ -n "$items" ] && { [ -n "$items" ] && {
@@ -978,7 +934,18 @@ acl_app() {
elif [ "${type}" = "sing-box" ] && [ -n "${SINGBOX_BIN}" ]; then elif [ "${type}" = "sing-box" ] && [ -n "${SINGBOX_BIN}" ]; then
run_func="run_singbox" run_func="run_singbox"
fi fi
${run_func} flag=acl_$sid node=$node redir_port=$redir_port tcp_proxy_way=${TCP_PROXY_WAY} socks_address=127.0.0.1 socks_port=$acl_socks_port dns_listen_port=${dns_port} direct_dns_query_strategy=${direct_dns_query_strategy} remote_dns_protocol=${remote_dns_protocol} remote_dns_tcp_server=${remote_dns} remote_dns_udp_server=${remote_dns} remote_dns_doh="${remote_dns}" remote_dns_client_ip=${remote_dns_client_ip} remote_dns_detour=${remote_dns_detour} remote_fakedns=${remote_fakedns} remote_dns_query_strategy=${remote_dns_query_strategy} write_ipset_direct=${write_ipset_direct} config_file=${config_file} ${run_func} flag=acl_$sid node=$node redir_port=$redir_port tcp_proxy_way=${TCP_PROXY_WAY} \
socks_address=127.0.0.1 socks_port=$acl_socks_port \
dns_listen_port=${dns_port} \
direct_dns_query_strategy=${direct_dns_query_strategy} \
remote_dns_protocol=${remote_dns_protocol} remote_dns_tcp_server=${remote_dns} remote_dns_udp_server=${remote_dns} remote_dns_doh="${remote_dns}" \
remote_dns_client_ip=${remote_dns_client_ip} remote_dns_detour=${remote_dns_detour} remote_fakedns=${remote_fakedns} remote_dns_query_strategy=${remote_dns_query_strategy} \
write_ipset_direct=${write_ipset_direct} config_file=${config_file}
local status=$?
if [ "$status" != 0 ]; then
log_i18n 2 "[%s] process %s error, skip!" "${remarks}" "${config_file}"
continue
fi
fi fi
dnsmasq_port=$(get_new_port $(expr $dnsmasq_port + 1)) dnsmasq_port=$(get_new_port $(expr $dnsmasq_port + 1))
run_copy_dnsmasq flag="$sid" listen_port=$dnsmasq_port tun_dns="127.0.0.1#${dns_port}" run_copy_dnsmasq flag="$sid" listen_port=$dnsmasq_port tun_dns="127.0.0.1#${dns_port}"
@@ -1143,9 +1110,6 @@ get_config() {
fi fi
set_cache_var GLOBAL_DNSMASQ_CONF ${DNSMASQ_CONF_DIR}/dnsmasq-${CONFIG}.conf set_cache_var GLOBAL_DNSMASQ_CONF ${DNSMASQ_CONF_DIR}/dnsmasq-${CONFIG}.conf
set_cache_var GLOBAL_DNSMASQ_CONF_PATH ${GLOBAL_ACL_PATH}/dnsmasq.d set_cache_var GLOBAL_DNSMASQ_CONF_PATH ${GLOBAL_ACL_PATH}/dnsmasq.d
XRAY_BIN=$(first_type $(config_t_get global_app xray_file) xray)
SINGBOX_BIN=$(first_type $(config_t_get global_app sing_box_file) sing-box)
} }
arg1=$1 arg1=$1
@@ -107,6 +107,14 @@ log_i18n() {
log ${num} "$(i18n "$@")" log ${num} "$(i18n "$@")"
} }
clean_log() {
logsnum=$(cat $LOG_FILE 2>/dev/null | wc -l)
[ "$logsnum" -gt 1000 ] && {
echo "" > $LOG_FILE
log_i18n 0 "Log file is too long, clear it!"
}
}
lua_api() { lua_api() {
local func=${1} local func=${1}
[ -z "${func}" ] && { [ -z "${func}" ] && {
@@ -126,10 +134,35 @@ check_host() {
return 0 return 0
} }
first_type() {
[ "${1#/}" != "$1" ] && [ -x "$1" ] && echo "$1" && return
for p in "/bin/$1" "/usr/bin/$1" "${TMP_BIN_PATH:-/tmp}/$1"; do
[ -x "$p" ] && echo "$p" && return
done
command -v "$1" 2>/dev/null || command -v "$2" 2>/dev/null
}
get_enabled_anonymous_secs() { get_enabled_anonymous_secs() {
uci -q show "${CONFIG}" | grep "${1}\[.*\.enabled='1'" | cut -d '.' -sf2 uci -q show "${CONFIG}" | grep "${1}\[.*\.enabled='1'" | cut -d '.' -sf2
} }
get_geoip() {
local geoip_code="$1"
local geoip_type_flag=""
local geoip_path="$(config_t_get global_rules v2ray_location_asset)"
geoip_path="${geoip_path%*/}/geoip.dat"
[ -e "$geoip_path" ] || { echo ""; return; }
case "$2" in
"ipv4") geoip_type_flag="-ipv6=false" ;;
"ipv6") geoip_type_flag="-ipv4=false" ;;
esac
if type geoview &> /dev/null; then
geoview -input "$geoip_path" -list "$geoip_code" $geoip_type_flag -lowmem=true
else
echo ""
fi
}
get_host_ip() { get_host_ip() {
local host=$2 local host=$2
local count=$3 local count=$3
@@ -322,3 +355,34 @@ delete_ip2route() {
done done
} }
} }
ln_run() {
local file_func=${1}
local ln_name=${2}
local output=${3}
shift 3;
if [ "${file_func%%/*}" != "${file_func}" ]; then
[ ! -L "${file_func}" ] && {
ln -s "${file_func}" "${TMP_BIN_PATH}/${ln_name}" >/dev/null 2>&1
file_func="${TMP_BIN_PATH}/${ln_name}"
}
[ -x "${file_func}" ] || log 1 "$(i18n "%s does not have execute permissions and cannot be started: %s %s" "$(readlink ${file_func})" "${file_func}" "$*")"
fi
#echo "${file_func} $*" >&2
[ -n "${file_func}" ] || log 1 "$(i18n "%s not found, unable to start..." "${ln_name}")"
${file_func:-log 1 "${ln_name}"} "$@" >${output} 2>&1 &
local pid=${!}
#sleep 1s
#kill -0 ${pid} 2>/dev/null
#local status_code=${?}
process_count=$(ls $TMP_SCRIPT_FUNC_PATH | grep -v "^_" | wc -l)
process_count=$((process_count + 1))
echo "${file_func:-log 1 "${ln_name}"} $@ >${output}" > $TMP_SCRIPT_FUNC_PATH/$process_count
#return ${status_code}
}
kill_all() {
kill -9 $(pidof "$@") >/dev/null 2>&1
}
@@ -24,7 +24,7 @@ fun Project.setupCommon() {
compileSdkVersion(36) compileSdkVersion(36)
defaultConfig { defaultConfig {
minSdk = 23 minSdk = 23
targetSdk = 35 targetSdk = 36
testInstrumentationRunner = "androidx.test.runner.AndroidJUnitRunner" testInstrumentationRunner = "androidx.test.runner.AndroidJUnitRunner"
} }
compileOptions { compileOptions {
@@ -35,6 +35,7 @@ import androidx.work.WorkerParameters
import com.github.shadowsocks.Core import com.github.shadowsocks.Core
import com.github.shadowsocks.Core.app import com.github.shadowsocks.Core.app
import com.github.shadowsocks.core.BuildConfig import com.github.shadowsocks.core.BuildConfig
import com.github.shadowsocks.preference.DataStore
import com.github.shadowsocks.utils.useCancellable import com.github.shadowsocks.utils.useCancellable
import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.Dispatchers
import timber.log.Timber import timber.log.Timber
@@ -68,7 +69,8 @@ class AclSyncer(context: Context, workerParams: WorkerParameters) : CoroutineWor
override suspend fun doWork(): Result = try { override suspend fun doWork(): Result = try {
val route = inputData.getString(KEY_ROUTE)!! val route = inputData.getString(KEY_ROUTE)!!
val connection = URL("https://shadowsocks.org/acl/android/v1/$route.acl").openConnection() as HttpURLConnection val connection = URL("https://shadowsocks.org/acl/android/v1/$route.acl")
.openConnection(DataStore.proxy) as HttpURLConnection
val acl = connection.useCancellable { inputStream.bufferedReader().use { it.readText() } } val acl = connection.useCancellable { inputStream.bufferedReader().use { it.readText() } }
Acl.getFile(route).printWriter().use { it.write(acl) } Acl.getFile(route).printWriter().use { it.write(acl) }
Result.success() Result.success()
@@ -29,6 +29,7 @@ import android.os.IBinder
import android.os.RemoteCallbackList import android.os.RemoteCallbackList
import android.os.RemoteException import android.os.RemoteException
import androidx.core.content.ContextCompat import androidx.core.content.ContextCompat
import androidx.core.os.bundleOf
import com.github.shadowsocks.BootReceiver import com.github.shadowsocks.BootReceiver
import com.github.shadowsocks.Core import com.github.shadowsocks.Core
import com.github.shadowsocks.Core.app import com.github.shadowsocks.Core.app
@@ -43,10 +44,17 @@ import com.github.shadowsocks.utils.Action
import com.github.shadowsocks.utils.broadcastReceiver import com.github.shadowsocks.utils.broadcastReceiver
import com.github.shadowsocks.utils.readableMessage import com.github.shadowsocks.utils.readableMessage
import com.google.firebase.analytics.FirebaseAnalytics import com.google.firebase.analytics.FirebaseAnalytics
import com.google.firebase.analytics.ktx.analytics import kotlinx.coroutines.CancellationException
import com.google.firebase.analytics.logEvent import kotlinx.coroutines.CoroutineScope
import com.google.firebase.ktx.Firebase import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.* import kotlinx.coroutines.GlobalScope
import kotlinx.coroutines.Job
import kotlinx.coroutines.cancel
import kotlinx.coroutines.cancelAndJoin
import kotlinx.coroutines.coroutineScope
import kotlinx.coroutines.delay
import kotlinx.coroutines.launch
import kotlinx.coroutines.withContext
import timber.log.Timber import timber.log.Timber
import java.io.File import java.io.File
import java.io.IOException import java.io.IOException
@@ -269,7 +277,8 @@ object BaseService {
// channge the state // channge the state
data.changeState(State.Stopping) data.changeState(State.Stopping)
GlobalScope.launch(Dispatchers.Main.immediate) { GlobalScope.launch(Dispatchers.Main.immediate) {
Firebase.analytics.logEvent("stop") { param(FirebaseAnalytics.Param.METHOD, tag) } FirebaseAnalytics.getInstance(this@Interface as Service).logEvent("stop",
bundleOf(FirebaseAnalytics.Param.METHOD to tag))
data.connectingJob?.cancelAndJoin() // ensure stop connecting first data.connectingJob?.cancelAndJoin() // ensure stop connecting first
this@Interface as Service this@Interface as Service
// we use a coroutineScope here to allow clean-up in parallel // we use a coroutineScope here to allow clean-up in parallel
@@ -344,7 +353,8 @@ object BaseService {
} }
data.notification = createNotification(profile.formattedName) data.notification = createNotification(profile.formattedName)
Firebase.analytics.logEvent("start") { param(FirebaseAnalytics.Param.METHOD, tag) } FirebaseAnalytics.getInstance(this).logEvent("start",
bundleOf(FirebaseAnalytics.Param.METHOD to tag))
data.changeState(State.Connecting) data.changeState(State.Connecting)
data.connectingJob = GlobalScope.launch(Dispatchers.Main.immediate) { data.connectingJob = GlobalScope.launch(Dispatchers.Main.immediate) {
@@ -166,8 +166,8 @@ class VpnService : BaseVpnService(), BaseService.Interface {
if (profile.ipv6) builder.addAddress(PRIVATE_VLAN6_CLIENT, 126) if (profile.ipv6) builder.addAddress(PRIVATE_VLAN6_CLIENT, 126)
val me = packageName
if (profile.proxyApps) { if (profile.proxyApps) {
val me = packageName
profile.individual.split('\n') profile.individual.split('\n')
.filter { it != me } .filter { it != me }
.forEach { .forEach {
@@ -178,7 +178,9 @@ class VpnService : BaseVpnService(), BaseService.Interface {
Timber.w(ex) Timber.w(ex)
} }
} }
if (!profile.bypass) builder.addAllowedApplication(me) if (profile.bypass) builder.addDisallowedApplication(me)
} else {
builder.addDisallowedApplication(me)
} }
when (profile.route) { when (profile.route) {
@@ -27,7 +27,6 @@ import androidx.lifecycle.ViewModel
import com.github.shadowsocks.Core.app import com.github.shadowsocks.Core.app
import com.github.shadowsocks.core.R import com.github.shadowsocks.core.R
import com.github.shadowsocks.preference.DataStore import com.github.shadowsocks.preference.DataStore
import com.github.shadowsocks.utils.Key
import com.github.shadowsocks.utils.useCancellable import com.github.shadowsocks.utils.useCancellable
import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.GlobalScope import kotlinx.coroutines.GlobalScope
@@ -35,7 +34,6 @@ import kotlinx.coroutines.Job
import kotlinx.coroutines.launch import kotlinx.coroutines.launch
import java.io.IOException import java.io.IOException
import java.net.HttpURLConnection import java.net.HttpURLConnection
import java.net.Proxy
import java.net.URL import java.net.URL
import java.net.URLConnection import java.net.URLConnection
@@ -83,9 +81,7 @@ class HttpsTest : ViewModel() {
cancelTest() cancelTest()
status.value = Status.Testing status.value = Status.Testing
val url = URL("https://cp.cloudflare.com") val url = URL("https://cp.cloudflare.com")
val conn = (if (DataStore.serviceMode != Key.modeVpn) { val conn = url.openConnection(DataStore.proxy) as HttpURLConnection
url.openConnection(Proxy(Proxy.Type.SOCKS, DataStore.proxyAddress))
} else url.openConnection()) as HttpURLConnection
conn.setRequestProperty("Connection", "close") conn.setRequestProperty("Connection", "close")
conn.instanceFollowRedirects = false conn.instanceFollowRedirects = false
conn.useCaches = false conn.useCaches = false
@@ -30,6 +30,7 @@ import com.github.shadowsocks.utils.DirectBoot
import com.github.shadowsocks.utils.Key import com.github.shadowsocks.utils.Key
import com.github.shadowsocks.utils.parsePort import com.github.shadowsocks.utils.parsePort
import java.net.InetSocketAddress import java.net.InetSocketAddress
import java.net.Proxy
object DataStore : OnPreferenceDataStoreChangeListener { object DataStore : OnPreferenceDataStoreChangeListener {
val publicStore = RoomPreferenceDataStore(PublicDatabase.kvPairDao) val publicStore = RoomPreferenceDataStore(PublicDatabase.kvPairDao)
@@ -68,7 +69,7 @@ object DataStore : OnPreferenceDataStoreChangeListener {
var portProxy: Int var portProxy: Int
get() = getLocalPort(Key.portProxy, 1080) get() = getLocalPort(Key.portProxy, 1080)
set(value) = publicStore.putString(Key.portProxy, value.toString()) set(value) = publicStore.putString(Key.portProxy, value.toString())
val proxyAddress get() = InetSocketAddress("127.0.0.1", portProxy) val proxy get() = Proxy(Proxy.Type.SOCKS, InetSocketAddress("127.0.0.1", portProxy))
var portLocalDns: Int var portLocalDns: Int
get() = getLocalPort(Key.portLocalDns, 5450) get() = getLocalPort(Key.portLocalDns, 5450)
set(value) = publicStore.putString(Key.portLocalDns, value.toString()) set(value) = publicStore.putString(Key.portLocalDns, value.toString())
@@ -0,0 +1,13 @@
#This file is generated by updateDaemonJvm
toolchainUrl.FREE_BSD.AARCH64=https\://api.foojay.io/disco/v3.0/ids/ff1d4fc92bcfc9d3799beabb4e70cfa3/redirect
toolchainUrl.FREE_BSD.X86_64=https\://api.foojay.io/disco/v3.0/ids/08ce182188ada0b93565cd9ca4a4ab32/redirect
toolchainUrl.LINUX.AARCH64=https\://api.foojay.io/disco/v3.0/ids/c5760d82d08e6c26884debb23736ea57/redirect
toolchainUrl.LINUX.X86_64=https\://api.foojay.io/disco/v3.0/ids/08ce182188ada0b93565cd9ca4a4ab32/redirect
toolchainUrl.MAC_OS.AARCH64=https\://api.foojay.io/disco/v3.0/ids/021e528cbed860c875a9016f29ee13c1/redirect
toolchainUrl.MAC_OS.X86_64=https\://api.foojay.io/disco/v3.0/ids/6141bf023dcc7a96c47cad75c59b054e/redirect
toolchainUrl.UNIX.AARCH64=https\://api.foojay.io/disco/v3.0/ids/ff1d4fc92bcfc9d3799beabb4e70cfa3/redirect
toolchainUrl.UNIX.X86_64=https\://api.foojay.io/disco/v3.0/ids/08ce182188ada0b93565cd9ca4a4ab32/redirect
toolchainUrl.WINDOWS.AARCH64=https\://api.foojay.io/disco/v3.0/ids/22860963aebba7217b72b98c4f9a5cfb/redirect
toolchainUrl.WINDOWS.X86_64=https\://api.foojay.io/disco/v3.0/ids/a6eb06d81d82a782734ef3b616ba2684/redirect
toolchainVendor=JETBRAINS
toolchainVersion=21
+25 -24
View File
@@ -1,21 +1,22 @@
[versions] [versions]
camera = "1.4.2" camera = "1.5.2"
coroutines = "1.10.2" coroutines = "1.10.2"
lifecycle = "2.9.1" lifecycle = "2.10.0"
room = "2.7.2" room = "2.8.4"
work = "2.10.2" work = "2.11.0"
[libraries] [libraries]
android-gradle = "com.android.tools.build:gradle:8.11.1" android-gradle = "com.android.tools.build:gradle:8.13.2"
androidx-browser = "androidx.browser:browser:1.8.0" androidx-browser = "androidx.browser:browser:1.9.0"
androidx-camera-camera2 = { module = "androidx.camera:camera-camera2", version.ref = "camera" } androidx-camera-camera2 = { module = "androidx.camera:camera-camera2", version.ref = "camera" }
androidx-camera-lifecycle = { module = "androidx.camera:camera-lifecycle", version.ref = "camera" } androidx-camera-lifecycle = { module = "androidx.camera:camera-lifecycle", version.ref = "camera" }
androidx-camera-view = { module = "androidx.camera:camera-view", version.ref = "camera" } androidx-camera-view = { module = "androidx.camera:camera-view", version.ref = "camera" }
androidx-concurrent-futures-ktx = "androidx.concurrent:concurrent-futures-ktx:1.2.0" androidx-concurrent-futures-ktx = "androidx.concurrent:concurrent-futures-ktx:1.3.0"
androidx-constraintlayout = "androidx.constraintlayout:constraintlayout:2.2.1" androidx-constraintlayout = "androidx.constraintlayout:constraintlayout:2.2.1"
androidx-core-ktx = "androidx.core:core-ktx:1.16.0" androidx-core-ktx = "androidx.core:core-ktx:1.17.0"
androidx-espresso-core = "androidx.test.espresso:espresso-core:3.6.1" androidx-espresso-core = "androidx.test.espresso:espresso-core:3.7.0"
androidx-junit-ktx = "androidx.test.ext:junit-ktx:1.2.1" androidx-fragment-ktx = "androidx.fragment:fragment-ktx:1.8.9"
androidx-junit-ktx = "androidx.test.ext:junit-ktx:1.3.0"
androidx-leanback-preference = "androidx.leanback:leanback-preference:1.2.0" androidx-leanback-preference = "androidx.leanback:leanback-preference:1.2.0"
androidx-lifecycle-livedata-core-ktx = { module = "androidx.lifecycle:lifecycle-livedata-core-ktx", version.ref = "lifecycle" } androidx-lifecycle-livedata-core-ktx = { module = "androidx.lifecycle:lifecycle-livedata-core-ktx", version.ref = "lifecycle" }
androidx-lifecycle-runtime-ktx = { module = "androidx.lifecycle:lifecycle-runtime-ktx", version.ref = "lifecycle" } androidx-lifecycle-runtime-ktx = { module = "androidx.lifecycle:lifecycle-runtime-ktx", version.ref = "lifecycle" }
@@ -23,32 +24,32 @@ androidx-preference = "androidx.preference:preference:1.2.1"
androidx-room-compiler = { module = "androidx.room:room-compiler", version.ref = "room" } androidx-room-compiler = { module = "androidx.room:room-compiler", version.ref = "room" }
androidx-room-runtime = { module = "androidx.room:room-runtime", version.ref = "room" } androidx-room-runtime = { module = "androidx.room:room-runtime", version.ref = "room" }
androidx-room-testing = { module = "androidx.room:room-testing", version.ref = "room" } androidx-room-testing = { module = "androidx.room:room-testing", version.ref = "room" }
androidx-test-runner = "androidx.test:runner:1.6.2" androidx-test-runner = "androidx.test:runner:1.7.0"
androidx-work-multiprocess = { module = "androidx.work:work-multiprocess", version.ref = "work" } androidx-work-multiprocess = { module = "androidx.work:work-multiprocess", version.ref = "work" }
androidx-work-runtime-ktx = { module = "androidx.work:work-runtime-ktx", version.ref = "work" } androidx-work-runtime-ktx = { module = "androidx.work:work-runtime-ktx", version.ref = "work" }
barcode-scanning = "com.google.mlkit:barcode-scanning:17.3.0" barcode-scanning = "com.google.mlkit:barcode-scanning:17.3.0"
desugar = "com.android.tools:desugar_jdk_libs:2.1.5" desugar = "com.android.tools:desugar_jdk_libs:2.1.5"
dnsjava = "dnsjava:dnsjava:3.6.3" dnsjava = "dnsjava:dnsjava:3.6.4"
dokka = "org.jetbrains.dokka:dokka-gradle-plugin:2.0.0" dokka = "org.jetbrains.dokka:dokka-gradle-plugin:2.1.0"
fastscroll = "me.zhanghai.android.fastscroll:library:1.3.0" fastscroll = "me.zhanghai.android.fastscroll:library:1.3.0"
firebase-analytics = "com.google.firebase:firebase-analytics:22.5.0" firebase-analytics = "com.google.firebase:firebase-analytics:23.0.0"
firebase-crashlytics = "com.google.firebase:firebase-crashlytics:19.4.4" firebase-crashlytics = "com.google.firebase:firebase-crashlytics:20.0.4"
firebase-crashlytics-gradle = "com.google.firebase:firebase-crashlytics-gradle:3.0.4" firebase-crashlytics-gradle = "com.google.firebase:firebase-crashlytics-gradle:3.0.6"
google-oss-licenses = "com.google.android.gms:oss-licenses-plugin:0.10.6" google-oss-licenses = "com.google.android.gms:oss-licenses-plugin:0.10.10"
google-services = "com.google.gms:google-services:4.4.3" google-services = "com.google.gms:google-services:4.4.4"
gradle-maven-publish = "com.vanniktech:gradle-maven-publish-plugin:0.33.0" gradle-maven-publish = "com.vanniktech:gradle-maven-publish-plugin:0.36.0"
junit = "junit:junit:4.13.2" junit = "junit:junit:4.13.2"
kotlin-gradle = "org.jetbrains.kotlin:kotlin-gradle-plugin:2.2.0" kotlin-gradle = "org.jetbrains.kotlin:kotlin-gradle-plugin:2.2.0"
kotlinx-coroutines-android = { module = "org.jetbrains.kotlinx:kotlinx-coroutines-android", version.ref = "coroutines" } kotlinx-coroutines-android = { module = "org.jetbrains.kotlinx:kotlinx-coroutines-android", version.ref = "coroutines" }
kotlinx-coroutines-play-services = { module = "org.jetbrains.kotlinx:kotlinx-coroutines-play-services", version.ref = "coroutines" } kotlinx-coroutines-play-services = { module = "org.jetbrains.kotlinx:kotlinx-coroutines-play-services", version.ref = "coroutines" }
locale-api = "com.twofortyfouram:android-plugin-api-for-locale:1.0.4" locale-api = "com.twofortyfouram:android-plugin-api-for-locale:1.0.4"
material = "com.google.android.material:material:1.12.0" material = "com.google.android.material:material:1.13.0"
play-services-oss-licenses = "com.google.android.gms:play-services-oss-licenses:17.2.0" play-services-oss-licenses = "com.google.android.gms:play-services-oss-licenses:17.3.0"
preferencex-simplemenu = "com.takisoft.preferencex:preferencex-simplemenu:1.1.0" preferencex-simplemenu = "com.takisoft.preferencex:preferencex-simplemenu:1.1.0"
rust-android = "org.mozilla.rust-android-gradle:plugin:0.9.6" rust-android = "org.mozilla.rust-android-gradle:plugin:0.9.6"
timber = "com.jakewharton.timber:timber:5.0.1" timber = "com.jakewharton.timber:timber:5.0.1"
zxing = "com.google.zxing:core:3.5.3" zxing = "com.google.zxing:core:3.5.4"
[plugins] [plugins]
ksp = "com.google.devtools.ksp:2.2.0-2.0.2" ksp = "com.google.devtools.ksp:2.3.4"
versions = "com.github.ben-manes.versions:0.52.0" versions = "com.github.ben-manes.versions:0.53.0"
@@ -53,9 +53,7 @@ import com.github.shadowsocks.widget.ServiceButton
import com.github.shadowsocks.widget.StatsBar import com.github.shadowsocks.widget.StatsBar
import com.google.android.material.navigation.NavigationView import com.google.android.material.navigation.NavigationView
import com.google.android.material.snackbar.Snackbar import com.google.android.material.snackbar.Snackbar
import com.google.firebase.analytics.ktx.analytics import com.google.firebase.analytics.FirebaseAnalytics
import com.google.firebase.analytics.logEvent
import com.google.firebase.ktx.Firebase
class MainActivity : AppCompatActivity(), ShadowsocksConnection.Callback, OnPreferenceDataStoreChangeListener, class MainActivity : AppCompatActivity(), ShadowsocksConnection.Callback, OnPreferenceDataStoreChangeListener,
NavigationView.OnNavigationItemSelectedListener { NavigationView.OnNavigationItemSelectedListener {
@@ -204,7 +202,7 @@ class MainActivity : AppCompatActivity(), ShadowsocksConnection.Callback, OnPref
} }
R.id.globalSettings -> displayFragment(GlobalSettingsFragment()) R.id.globalSettings -> displayFragment(GlobalSettingsFragment())
R.id.about -> { R.id.about -> {
Firebase.analytics.logEvent("about") { } FirebaseAnalytics.getInstance(this).logEvent("about", null)
displayFragment(AboutFragment()) displayFragment(AboutFragment())
} }
R.id.faq -> { R.id.faq -> {
+3 -3
View File
@@ -14,9 +14,9 @@ android {
dependencies { dependencies {
coreLibraryDesugaring(libs.desugar) coreLibraryDesugaring(libs.desugar)
api("androidx.core:core-ktx:1.7.0") api(libs.androidx.core.ktx)
api("androidx.fragment:fragment-ktx:1.5.5") api(libs.androidx.fragment.ktx)
api("com.google.android.material:material:1.6.0") api(libs.material)
testImplementation(libs.junit) testImplementation(libs.junit)
androidTestImplementation(libs.androidx.test.runner) androidTestImplementation(libs.androidx.test.runner)
androidTestImplementation(libs.androidx.espresso.core) androidTestImplementation(libs.androidx.espresso.core)
+3
View File
@@ -1 +1,4 @@
plugins {
id("org.gradle.toolchains.foojay-resolver-convention") version "1.0.0"
}
include(":core", ":plugin", ":mobile", ":tv") include(":core", ":plugin", ":mobile", ":tv")
+19 -19
View File
@@ -382,7 +382,7 @@ dependencies = [
"serde", "serde",
"serde_bytes", "serde_bytes",
"simdutf8", "simdutf8",
"thiserror 2.0.17", "thiserror 2.0.18",
"time", "time",
"uuid", "uuid",
] ]
@@ -784,7 +784,7 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10d60334b3b2e7c9d91ef8150abfb6fa4c1c39ebbcf4a81c2e346aad939fee3e" checksum = "10d60334b3b2e7c9d91ef8150abfb6fa4c1c39ebbcf4a81c2e346aad939fee3e"
dependencies = [ dependencies = [
"thiserror 2.0.17", "thiserror 2.0.18",
] ]
[[package]] [[package]]
@@ -1345,7 +1345,7 @@ dependencies = [
"ring", "ring",
"rustls", "rustls",
"serde", "serde",
"thiserror 2.0.17", "thiserror 2.0.18",
"tinyvec", "tinyvec",
"tokio", "tokio",
"tokio-rustls", "tokio-rustls",
@@ -1373,7 +1373,7 @@ dependencies = [
"rustls", "rustls",
"serde", "serde",
"smallvec", "smallvec",
"thiserror 2.0.17", "thiserror 2.0.18",
"tokio", "tokio",
"tokio-rustls", "tokio-rustls",
"tracing", "tracing",
@@ -1989,7 +1989,7 @@ dependencies = [
"serde-value", "serde-value",
"serde_json", "serde_json",
"serde_yaml", "serde_yaml",
"thiserror 2.0.17", "thiserror 2.0.18",
"thread-id", "thread-id",
"typemap-ors", "typemap-ors",
"unicode-segmentation", "unicode-segmentation",
@@ -2516,7 +2516,7 @@ dependencies = [
"rustc-hash", "rustc-hash",
"rustls", "rustls",
"socket2 0.6.1", "socket2 0.6.1",
"thiserror 2.0.17", "thiserror 2.0.18",
"tokio", "tokio",
"tracing", "tracing",
"web-time", "web-time",
@@ -2538,7 +2538,7 @@ dependencies = [
"rustls", "rustls",
"rustls-pki-types", "rustls-pki-types",
"slab", "slab",
"thiserror 2.0.17", "thiserror 2.0.18",
"tinyvec", "tinyvec",
"tracing", "tracing",
"web-time", "web-time",
@@ -2634,7 +2634,7 @@ checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac"
dependencies = [ dependencies = [
"getrandom 0.2.16", "getrandom 0.2.16",
"libredox", "libredox",
"thiserror 2.0.17", "thiserror 2.0.18",
] ]
[[package]] [[package]]
@@ -3124,7 +3124,7 @@ dependencies = [
"shadowsocks-crypto", "shadowsocks-crypto",
"socket2 0.6.1", "socket2 0.6.1",
"spin", "spin",
"thiserror 2.0.17", "thiserror 2.0.18",
"tokio", "tokio",
"tokio-tfo", "tokio-tfo",
"trait-variant", "trait-variant",
@@ -3191,7 +3191,7 @@ dependencies = [
"sysexits", "sysexits",
"syslog-tracing", "syslog-tracing",
"tcmalloc", "tcmalloc",
"thiserror 2.0.17", "thiserror 2.0.18",
"time", "time",
"tokio", "tokio",
"tracing", "tracing",
@@ -3243,7 +3243,7 @@ dependencies = [
"smoltcp", "smoltcp",
"socket2 0.6.1", "socket2 0.6.1",
"spin", "spin",
"thiserror 2.0.17", "thiserror 2.0.18",
"tokio", "tokio",
"tokio-native-tls", "tokio-native-tls",
"tokio-rustls", "tokio-rustls",
@@ -3544,11 +3544,11 @@ dependencies = [
[[package]] [[package]]
name = "thiserror" name = "thiserror"
version = "2.0.17" version = "2.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4"
dependencies = [ dependencies = [
"thiserror-impl 2.0.17", "thiserror-impl 2.0.18",
] ]
[[package]] [[package]]
@@ -3564,9 +3564,9 @@ dependencies = [
[[package]] [[package]]
name = "thiserror-impl" name = "thiserror-impl"
version = "2.0.17" version = "2.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@@ -3792,7 +3792,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf" checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf"
dependencies = [ dependencies = [
"crossbeam-channel", "crossbeam-channel",
"thiserror 2.0.17", "thiserror 2.0.18",
"time", "time",
"tracing-subscriber", "tracing-subscriber",
] ]
@@ -3879,7 +3879,7 @@ dependencies = [
"libc", "libc",
"log", "log",
"nix", "nix",
"thiserror 2.0.17", "thiserror 2.0.18",
"tokio", "tokio",
"tokio-util", "tokio-util",
"windows-sys 0.61.2", "windows-sys 0.61.2",
@@ -4596,7 +4596,7 @@ dependencies = [
"futures", "futures",
"libloading 0.9.0", "libloading 0.9.0",
"log", "log",
"thiserror 2.0.17", "thiserror 2.0.18",
"windows-sys 0.61.2", "windows-sys 0.61.2",
"winreg 0.55.0", "winreg 0.55.0",
] ]
@@ -249,6 +249,9 @@ where
if c.is_closed() { if c.is_closed() {
continue; continue;
} }
if !c.is_ready() {
continue;
}
return Some(c); return Some(c);
} }
} }
@@ -271,12 +274,23 @@ where
"HTTP connection keep-alive for host: {}, response: {:?}", "HTTP connection keep-alive for host: {}, response: {:?}",
host, response host, response
); );
self.cache_conn let cache_conn = self.cache_conn.clone();
.lock() tokio::spawn(async move {
.await match c.ready().await {
.entry(host) Ok(_) => {
.or_insert_with(VecDeque::new) trace!("HTTP connection for host: {host} is ready and will be cached");
.push_back((c, Instant::now())); cache_conn
.lock()
.await
.entry(host)
.or_insert_with(VecDeque::new)
.push_back((c, Instant::now()));
}
Err(e) => {
trace!("HTTP connection for host: {host} failed to become ready: {}", e);
}
};
});
} }
Ok(response) Ok(response)
@@ -448,4 +462,18 @@ where
Self::Http2(r) => r.is_closed(), Self::Http2(r) => r.is_closed(),
} }
} }
pub fn is_ready(&self) -> bool {
match self {
Self::Http1(r) => r.is_ready(),
Self::Http2(r) => r.is_ready(),
}
}
pub async fn ready(&mut self) -> Result<(), hyper::Error> {
match self {
HttpConnection::Http1(r) => r.ready().await,
HttpConnection::Http2(r) => r.ready().await,
}
}
} }
+2 -2
View File
@@ -1,12 +1,12 @@
include $(TOPDIR)/rules.mk include $(TOPDIR)/rules.mk
PKG_NAME:=geoview PKG_NAME:=geoview
PKG_VERSION:=0.2.2 PKG_VERSION:=0.2.4
PKG_RELEASE:=1 PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/snowie2000/geoview/tar.gz/$(PKG_VERSION)? PKG_SOURCE_URL:=https://codeload.github.com/snowie2000/geoview/tar.gz/$(PKG_VERSION)?
PKG_HASH:=3cdec7da60d5ec84f71e086fdc77f43287d064371f51d49bcfe09abd50604343 PKG_HASH:=1903d0e9aa1eea53cc445e558fae45ba6e328b1c44200954b4f15aa1adc8aff0
PKG_LICENSE:=Apache-2.0 PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=LICENSE PKG_LICENSE_FILES:=LICENSE
@@ -309,7 +309,7 @@ const parseRulesYaml = hm.parseYaml.extend({
if (!entry) if (!entry)
return null; return null;
// key mapping // 2026/01/17 // key mapping // 2026/01/18
let config = { let config = {
id: this.id, id: this.id,
label: '%s %s'.format(this.id.slice(0,7), _('(Imported)')), label: '%s %s'.format(this.id.slice(0,7), _('(Imported)')),

Some files were not shown because too many files have changed in this diff Show More