Update On Sat Apr 18 21:05:12 CEST 2026

This commit is contained in:
github-action[bot]
2026-04-18 21:05:12 +02:00
parent 3bd27b378d
commit 3e632818f9
189 changed files with 7974 additions and 1615 deletions
+1
View File
@@ -1333,3 +1333,4 @@ Update On Tue Apr 14 21:27:36 CEST 2026
Update On Wed Apr 15 21:30:45 CEST 2026
Update On Thu Apr 16 21:28:40 CEST 2026
Update On Fri Apr 17 21:15:01 CEST 2026
Update On Sat Apr 18 21:05:03 CEST 2026
@@ -57,11 +57,12 @@ jobs:
update-go-mod-replace ${{ github.workspace }}/core/src/foss/golang/clash/go.mod $(pwd)/go.mod
go mod tidy
- uses: tibdex/github-app-token@v2
- uses: actions/create-github-app-token@v3
id: generate-token
with:
app_id: ${{ secrets.MAINTAINER_APPID }}
private_key: ${{ secrets.MAINTAINER_APP_PRIVATE_KEY }}
client-id: ${{ secrets.MAINTAINER_APPID }}
private-key: ${{ secrets.MAINTAINER_APP_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
- name: Create Pull Request
id: cpr
+1 -1
View File
@@ -19,7 +19,7 @@ jobs:
- uses: actions/create-github-app-token@v3
id: generate-token
with:
app-id: ${{ secrets.MAINTAINER_APPID }}
client-id: ${{ secrets.MAINTAINER_APPID }}
private-key: ${{ secrets.MAINTAINER_APP_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
+52
View File
@@ -85,6 +85,19 @@ type XHTTPOptions struct {
Headers map[string]string `proxy:"headers,omitempty"`
NoGRPCHeader bool `proxy:"no-grpc-header,omitempty"`
XPaddingBytes string `proxy:"x-padding-bytes,omitempty"`
XPaddingObfsMode bool `proxy:"x-padding-obfs-mode,omitempty"`
XPaddingKey string `proxy:"x-padding-key,omitempty"`
XPaddingHeader string `proxy:"x-padding-header,omitempty"`
XPaddingPlacement string `proxy:"x-padding-placement,omitempty"`
XPaddingMethod string `proxy:"x-padding-method,omitempty"`
UplinkHTTPMethod string `proxy:"uplink-http-method,omitempty"`
SessionPlacement string `proxy:"session-placement,omitempty"`
SessionKey string `proxy:"session-key,omitempty"`
SeqPlacement string `proxy:"seq-placement,omitempty"`
SeqKey string `proxy:"seq-key,omitempty"`
UplinkDataPlacement string `proxy:"uplink-data-placement,omitempty"`
UplinkDataKey string `proxy:"uplink-data-key,omitempty"`
UplinkChunkSize string `proxy:"uplink-chunk-size,omitempty"`
ScMaxEachPostBytes string `proxy:"sc-max-each-post-bytes,omitempty"`
ScMinPostsIntervalMs string `proxy:"sc-min-posts-interval-ms,omitempty"`
ReuseSettings *XHTTPReuseSettings `proxy:"reuse-settings,omitempty"` // aka XMUX
@@ -107,6 +120,19 @@ type XHTTPDownloadSettings struct {
Headers *map[string]string `proxy:"headers,omitempty"`
NoGRPCHeader *bool `proxy:"no-grpc-header,omitempty"`
XPaddingBytes *string `proxy:"x-padding-bytes,omitempty"`
XPaddingObfsMode *bool `proxy:"x-padding-obfs-mode,omitempty"`
XPaddingKey *string `proxy:"x-padding-key,omitempty"`
XPaddingHeader *string `proxy:"x-padding-header,omitempty"`
XPaddingPlacement *string `proxy:"x-padding-placement,omitempty"`
XPaddingMethod *string `proxy:"x-padding-method,omitempty"`
UplinkHTTPMethod *string `proxy:"uplink-http-method,omitempty"`
SessionPlacement *string `proxy:"session-placement,omitempty"`
SessionKey *string `proxy:"session-key,omitempty"`
SeqPlacement *string `proxy:"seq-placement,omitempty"`
SeqKey *string `proxy:"seq-key,omitempty"`
UplinkDataPlacement *string `proxy:"uplink-data-placement,omitempty"`
UplinkDataKey *string `proxy:"uplink-data-key,omitempty"`
UplinkChunkSize *string `proxy:"uplink-chunk-size,omitempty"`
ScMaxEachPostBytes *string `proxy:"sc-max-each-post-bytes,omitempty"`
ScMinPostsIntervalMs *string `proxy:"sc-min-posts-interval-ms,omitempty"`
ReuseSettings *XHTTPReuseSettings `proxy:"reuse-settings,omitempty"` // aka XMUX
@@ -552,6 +578,19 @@ func NewVless(option VlessOption) (*Vless, error) {
Headers: v.option.XHTTPOpts.Headers,
NoGRPCHeader: v.option.XHTTPOpts.NoGRPCHeader,
XPaddingBytes: v.option.XHTTPOpts.XPaddingBytes,
XPaddingObfsMode: v.option.XHTTPOpts.XPaddingObfsMode,
XPaddingKey: v.option.XHTTPOpts.XPaddingKey,
XPaddingHeader: v.option.XHTTPOpts.XPaddingHeader,
XPaddingPlacement: v.option.XHTTPOpts.XPaddingPlacement,
XPaddingMethod: v.option.XHTTPOpts.XPaddingMethod,
UplinkHTTPMethod: v.option.XHTTPOpts.UplinkHTTPMethod,
SessionPlacement: v.option.XHTTPOpts.SessionPlacement,
SessionKey: v.option.XHTTPOpts.SessionKey,
SeqPlacement: v.option.XHTTPOpts.SeqPlacement,
SeqKey: v.option.XHTTPOpts.SeqKey,
UplinkDataPlacement: v.option.XHTTPOpts.UplinkDataPlacement,
UplinkDataKey: v.option.XHTTPOpts.UplinkDataKey,
UplinkChunkSize: v.option.XHTTPOpts.UplinkChunkSize,
ScMaxEachPostBytes: v.option.XHTTPOpts.ScMaxEachPostBytes,
ScMinPostsIntervalMs: v.option.XHTTPOpts.ScMinPostsIntervalMs,
ReuseConfig: reuseCfg,
@@ -667,6 +706,19 @@ func NewVless(option VlessOption) (*Vless, error) {
Headers: lo.FromPtrOr(ds.Headers, v.option.XHTTPOpts.Headers),
NoGRPCHeader: lo.FromPtrOr(ds.NoGRPCHeader, v.option.XHTTPOpts.NoGRPCHeader),
XPaddingBytes: lo.FromPtrOr(ds.XPaddingBytes, v.option.XHTTPOpts.XPaddingBytes),
XPaddingObfsMode: lo.FromPtrOr(ds.XPaddingObfsMode, v.option.XHTTPOpts.XPaddingObfsMode),
XPaddingKey: lo.FromPtrOr(ds.XPaddingKey, v.option.XHTTPOpts.XPaddingKey),
XPaddingHeader: lo.FromPtrOr(ds.XPaddingHeader, v.option.XHTTPOpts.XPaddingHeader),
XPaddingPlacement: lo.FromPtrOr(ds.XPaddingPlacement, v.option.XHTTPOpts.XPaddingPlacement),
XPaddingMethod: lo.FromPtrOr(ds.XPaddingMethod, v.option.XHTTPOpts.XPaddingMethod),
UplinkHTTPMethod: lo.FromPtrOr(ds.UplinkHTTPMethod, v.option.XHTTPOpts.UplinkHTTPMethod),
SessionPlacement: lo.FromPtrOr(ds.SessionPlacement, v.option.XHTTPOpts.SessionPlacement),
SessionKey: lo.FromPtrOr(ds.SessionKey, v.option.XHTTPOpts.SessionKey),
SeqPlacement: lo.FromPtrOr(ds.SeqPlacement, v.option.XHTTPOpts.SeqPlacement),
SeqKey: lo.FromPtrOr(ds.SeqKey, v.option.XHTTPOpts.SeqKey),
UplinkDataPlacement: lo.FromPtrOr(ds.UplinkDataPlacement, v.option.XHTTPOpts.UplinkDataPlacement),
UplinkDataKey: lo.FromPtrOr(ds.UplinkDataKey, v.option.XHTTPOpts.UplinkDataKey),
UplinkChunkSize: lo.FromPtrOr(ds.UplinkChunkSize, v.option.XHTTPOpts.UplinkChunkSize),
ScMaxEachPostBytes: lo.FromPtrOr(ds.ScMaxEachPostBytes, v.option.XHTTPOpts.ScMaxEachPostBytes),
ScMinPostsIntervalMs: lo.FromPtrOr(ds.ScMinPostsIntervalMs, v.option.XHTTPOpts.ScMinPostsIntervalMs),
ReuseConfig: downloadReuseCfg,
+42
View File
@@ -825,6 +825,19 @@ proxies: # socks5
# X-Forwarded-For: ""
# no-grpc-header: false
# x-padding-bytes: "100-1000"
# x-padding-obfs-mode: false
# x-padding-key: x_padding
# x-padding-header: Referer
# x-padding-placement: queryInHeader # Available: queryInHeader, cookie, header, query
# x-padding-method: repeat-x # Available: repeat-x, tokenish
# uplink-http-method: POST # Available: POST, PUT, PATCH, DELETE
# session-placement: path # Available: path, query, cookie, header
# session-key: ""
# seq-placement: path # Available: path, query, cookie, header
# seq-key: ""
# uplink-data-placement: body # Available: body, cookie, header
# uplink-data-key: ""
# uplink-chunk-size: 0 # only applicable when uplink-data-placement is not body
# sc-max-each-post-bytes: 1000000
# sc-min-posts-interval-ms: 30
# reuse-settings: # aka XMUX
@@ -842,6 +855,19 @@ proxies: # socks5
# X-Forwarded-For: ""
# no-grpc-header: false
# x-padding-bytes: "100-1000"
# x-padding-obfs-mode: false
# x-padding-key: x_padding
# x-padding-header: Referer
# x-padding-placement: queryInHeader # Available: queryInHeader, cookie, header, query
# x-padding-method: repeat-x # Available: repeat-x, tokenish
# uplink-http-method: POST # Available: POST, PUT, PATCH, DELETE
# session-placement: path # Available: path, query, cookie, header
# session-key: ""
# seq-placement: path # Available: path, query, cookie, header
# seq-key: ""
# uplink-data-placement: body # Available: body, cookie, header
# uplink-data-key: ""
# uplink-chunk-size: 0 # only applicable when uplink-data-placement is not body
# sc-max-each-post-bytes: 1000000
# sc-min-posts-interval-ms: 30
# reuse-settings: # aka XMUX
@@ -1687,6 +1713,20 @@ listeners:
# host: ""
# mode: auto # Available: "stream-one", "stream-up" or "packet-up"
# no-sse-header: false
# x-padding-bytes: "100-1000"
# x-padding-obfs-mode: false
# x-padding-key: x_padding
# x-padding-header: Referer
# x-padding-placement: queryInHeader # Available: queryInHeader, cookie, header, query
# x-padding-method: repeat-x # Available: repeat-x, tokenish
# uplink-http-method: POST # Available: POST, PUT, PATCH, DELETE
# session-placement: path # Available: path, query, cookie, header
# session-key: ""
# seq-placement: path # Available: path, query, cookie, header
# seq-key: ""
# uplink-data-placement: body # Available: body, cookie, header
# uplink-data-key: ""
# uplink-chunk-size: 0 # only applicable when uplink-data-placement is not body
# sc-max-buffered-posts: 30
# sc-stream-up-server-secs: "20-80"
# sc-max-each-post-bytes: 1000000
@@ -1768,6 +1808,8 @@ listeners:
username2: password2
# 一个 base64 字符串用于微调网络行为
# traffic-pattern: ""
# 如果开启,且客户端不发送用户提示,代理服务器将拒绝连接
# user-hint-is-mandatory: false
- name: sudoku-in-1
type: sudoku
+1 -1
View File
@@ -6,7 +6,7 @@ require (
github.com/bahlo/generic-list-go v0.2.0
github.com/coreos/go-iptables v0.8.0
github.com/dlclark/regexp2 v1.11.5
github.com/enfein/mieru/v3 v3.30.1
github.com/enfein/mieru/v3 v3.31.0
github.com/gobwas/ws v1.4.0
github.com/gofrs/uuid/v5 v5.4.0
github.com/golang/snappy v1.0.0
+2 -2
View File
@@ -20,8 +20,8 @@ github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZ
github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/dunglas/httpsfv v1.0.2 h1:iERDp/YAfnojSDJ7PW3dj1AReJz4MrwbECSSE59JWL0=
github.com/dunglas/httpsfv v1.0.2/go.mod h1:zID2mqw9mFsnt7YC3vYQ9/cjq30q41W+1AnDwH8TiMg=
github.com/enfein/mieru/v3 v3.30.1 h1:gHHXQfpQO/5d789o9kokVfej7jl795aJwPihUk3gTDU=
github.com/enfein/mieru/v3 v3.30.1/go.mod h1:zJBUCsi5rxyvHM8fjFf+GLaEl4OEjjBXr1s5F6Qd3hM=
github.com/enfein/mieru/v3 v3.31.0 h1:Fl2ocRCRXJzMygzdRjBHgqI996ZuIDHUmyQyovSf9sA=
github.com/enfein/mieru/v3 v3.31.0/go.mod h1:zJBUCsi5rxyvHM8fjFf+GLaEl4OEjjBXr1s5F6Qd3hM=
github.com/ericlagergren/aegis v0.0.0-20250325060835-cd0defd64358 h1:kXYqH/sL8dS/FdoFjr12ePjnLPorPo2FsnrHNuXSDyo=
github.com/ericlagergren/aegis v0.0.0-20250325060835-cd0defd64358/go.mod h1:hkIFzoiIPZYxdFOOLyDho59b7SrDfo+w3h+yWdlg45I=
github.com/ericlagergren/polyval v0.0.0-20220411101811-e25bc10ba391 h1:8j2RH289RJplhA6WfdaPqzg1MjH2K8wX5e0uhAxrw2g=
+14
View File
@@ -34,6 +34,20 @@ type XHTTPConfig struct {
Path string
Host string
Mode string
XPaddingBytes string
XPaddingObfsMode bool
XPaddingKey string
XPaddingHeader string
XPaddingPlacement string
XPaddingMethod string
UplinkHTTPMethod string
SessionPlacement string
SessionKey string
SeqPlacement string
SeqKey string
UplinkDataPlacement string
UplinkDataKey string
UplinkChunkSize string
NoSSEHeader bool
ScStreamUpServerSecs string
ScMaxBufferedPosts string
@@ -41,6 +41,7 @@ func testInboundAnyTLS(t *testing.T, inboundOptions inbound.AnyTLSOption, outbou
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.Password = userUUID
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewAnyTLS(outboundOptions)
if !assert.NoError(t, err) {
@@ -58,6 +58,30 @@ func init() {
realityPublickey = base64.RawURLEncoding.EncodeToString(privateKey.PublicKey().Bytes())
}
type TestDialer struct {
dialer C.Dialer
ctx context.Context
}
func (t *TestDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
start:
conn, err := t.dialer.DialContext(ctx, network, address)
if err != nil && ctx.Err() == nil && t.ctx.Err() == nil {
// We are conducting tests locally, and they shouldn't fail.
// However, a large number of requests in a short period during concurrent testing can exhaust system ports.
// This can lead to various errors such as WSAECONNREFUSED and WSAENOBUFS.
// So we just retry if the context is not canceled.
goto start
}
return conn, err
}
func (t *TestDialer) ListenPacket(ctx context.Context, network, address string, rAddrPort netip.AddrPort) (net.PacketConn, error) {
return t.dialer.ListenPacket(ctx, network, address, rAddrPort)
}
var _ C.Dialer = (*TestDialer)(nil)
type TestTunnel struct {
HandleTCPConnFn func(conn net.Conn, metadata *C.Metadata)
HandleUDPPacketFn func(packet C.UDPPacket, metadata *C.Metadata)
@@ -65,6 +89,7 @@ type TestTunnel struct {
CloseFn func() error
DoSequentialTestFn func(t *testing.T, proxy C.ProxyAdapter)
DoConcurrentTestFn func(t *testing.T, proxy C.ProxyAdapter)
NewDialerFn func() C.Dialer
}
func (tt *TestTunnel) HandleTCPConn(conn net.Conn, metadata *C.Metadata) {
@@ -96,6 +121,10 @@ func (tt *TestTunnel) DoConcurrentTest(t *testing.T, proxy C.ProxyAdapter) {
tt.DoConcurrentTestFn(t, proxy)
}
func (tt *TestTunnel) NewDialer() C.Dialer {
return tt.NewDialerFn()
}
type TestTunnelListener struct {
ch chan net.Conn
ctx context.Context
@@ -328,6 +357,7 @@ func NewHttpTestTunnel() *TestTunnel {
CloseFn: ln.Close,
DoSequentialTestFn: sequentialTestFn,
DoConcurrentTestFn: concurrentTestFn,
NewDialerFn: func() C.Dialer { return &TestDialer{dialer: dialer.NewDialer(), ctx: ctx} },
}
return tunnel
}
@@ -41,6 +41,7 @@ func testInboundHysteria2(t *testing.T, inboundOptions inbound.Hysteria2Option,
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.Password = userUUID
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewHysteria2(outboundOptions)
if !assert.NoError(t, err) {
+14 -6
View File
@@ -27,9 +27,10 @@ type Mieru struct {
type MieruOption struct {
BaseOption
Transport string `inbound:"transport"`
Users map[string]string `inbound:"users"`
TrafficPattern string `inbound:"traffic-pattern,omitempty"`
Transport string `inbound:"transport"`
Users map[string]string `inbound:"users"`
TrafficPattern string `inbound:"traffic-pattern,omitempty"`
UserHintIsMandatory bool `inbound:"user-hint-is-mandatory,omitempty"`
}
type mieruListenerFactory struct{}
@@ -158,11 +159,18 @@ func buildMieruServerConfig(option *MieruOption, ports utils.IntRanges[uint16])
}
var trafficPattern *mierupb.TrafficPattern
trafficPattern, _ = mierutp.Decode(option.TrafficPattern)
var advancedSettings *mierupb.ServerAdvancedSettings
if option.UserHintIsMandatory {
advancedSettings = &mierupb.ServerAdvancedSettings{
UserHintIsMandatory: proto.Bool(true),
}
}
return &mieruserver.ServerConfig{
Config: &mierupb.ServerConfig{
PortBindings: portBindings,
Users: users,
TrafficPattern: trafficPattern,
PortBindings: portBindings,
Users: users,
TrafficPattern: trafficPattern,
AdvancedSettings: advancedSettings,
},
StreamListenerFactory: mieruListenerFactory{},
PacketListenerFactory: mieruListenerFactory{},
+8 -4
View File
@@ -206,8 +206,9 @@ func testInboundMieruTCP(t *testing.T, handshakeMode string) {
Listen: "127.0.0.1",
Port: strconv.Itoa(port),
},
Transport: "TCP",
Users: map[string]string{"test": "password"},
Transport: "TCP",
Users: map[string]string{"test": "password"},
UserHintIsMandatory: true,
}
in, err := inbound.NewMieru(&inboundOptions)
if !assert.NoError(t, err) {
@@ -236,6 +237,7 @@ func testInboundMieruTCP(t *testing.T, handshakeMode string) {
Password: "password",
HandshakeMode: handshakeMode,
}
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewMieru(outboundOptions)
if !assert.NoError(t, err) {
return
@@ -260,8 +262,9 @@ func testInboundMieruUDP(t *testing.T, handshakeMode string) {
Listen: "127.0.0.1",
Port: strconv.Itoa(port),
},
Transport: "UDP",
Users: map[string]string{"test": "password"},
Transport: "UDP",
Users: map[string]string{"test": "password"},
UserHintIsMandatory: true,
}
in, err := inbound.NewMieru(&inboundOptions)
if !assert.NoError(t, err) {
@@ -290,6 +293,7 @@ func testInboundMieruUDP(t *testing.T, handshakeMode string) {
Password: "password",
HandshakeMode: handshakeMode,
}
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewMieru(outboundOptions)
if !assert.NoError(t, err) {
return
@@ -85,6 +85,7 @@ func testInboundShadowSocks0(t *testing.T, inboundOptions inbound.ShadowSocksOpt
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.Password = password
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewShadowSocks(outboundOptions)
if !assert.NoError(t, err) {
@@ -43,6 +43,7 @@ func testInboundSudoku(t *testing.T, inboundOptions inbound.SudokuOption, outbou
outboundOptions.Name = "sudoku_outbound"
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewSudoku(outboundOptions)
if !assert.NoError(t, err) {
@@ -43,6 +43,7 @@ func testInboundTrojan(t *testing.T, inboundOptions inbound.TrojanOption, outbou
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.Password = userUUID
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewTrojan(outboundOptions)
if !assert.NoError(t, err) {
@@ -42,6 +42,7 @@ func testInboundTrustTunnel(t *testing.T, inboundOptions inbound.TrustTunnelOpti
outboundOptions.Port = int(addrPort.Port())
outboundOptions.UserName = "test"
outboundOptions.Password = userUUID
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewTrustTunnel(outboundOptions)
if !assert.NoError(t, err) {
+1
View File
@@ -69,6 +69,7 @@ func testInboundTuic0(t *testing.T, inboundOptions inbound.TuicOption, outboundO
outboundOptions.Name = "tuic_outbound"
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewTuic(outboundOptions)
if !assert.NoError(t, err) {
+27
View File
@@ -35,6 +35,20 @@ type XHTTPConfig struct {
Path string `inbound:"path,omitempty"`
Host string `inbound:"host,omitempty"`
Mode string `inbound:"mode,omitempty"`
XPaddingBytes string `inbound:"xpadding-bytes,omitempty"`
XPaddingObfsMode bool `inbound:"xpadding-obfs-mode,omitempty"`
XPaddingKey string `inbound:"xpadding-key,omitempty"`
XPaddingHeader string `inbound:"xpadding-header,omitempty"`
XPaddingPlacement string `inbound:"xpadding-placement,omitempty"`
XPaddingMethod string `inbound:"xpadding-method,omitempty"`
UplinkHTTPMethod string `inbound:"uplink-http-method,omitempty"`
SessionPlacement string `inbound:"session-placement,omitempty"`
SessionKey string `inbound:"session-key,omitempty"`
SeqPlacement string `inbound:"seq-placement,omitempty"`
SeqKey string `inbound:"seq-key,omitempty"`
UplinkDataPlacement string `inbound:"uplink-data-placement,omitempty"`
UplinkDataKey string `inbound:"uplink-data-key,omitempty"`
UplinkChunkSize string `inbound:"uplink-chunk-size,omitempty"`
NoSSEHeader bool `inbound:"no-sse-header,omitempty"`
ScStreamUpServerSecs string `inbound:"sc-stream-up-server-secs,omitempty"`
ScMaxBufferedPosts string `inbound:"sc-max-buffered-posts,omitempty"`
@@ -47,6 +61,19 @@ func (o XHTTPConfig) Build() LC.XHTTPConfig {
Host: o.Host,
Mode: o.Mode,
NoSSEHeader: o.NoSSEHeader,
XPaddingBytes: o.XPaddingBytes,
XPaddingObfsMode: o.XPaddingObfsMode,
XPaddingKey: o.XPaddingKey,
XPaddingHeader: o.XPaddingHeader,
XPaddingPlacement: o.XPaddingPlacement,
UplinkHTTPMethod: o.UplinkHTTPMethod,
SessionPlacement: o.SessionPlacement,
SessionKey: o.SessionKey,
SeqPlacement: o.SeqPlacement,
SeqKey: o.SeqKey,
UplinkDataPlacement: o.UplinkDataPlacement,
UplinkDataKey: o.UplinkDataKey,
UplinkChunkSize: o.UplinkChunkSize,
ScStreamUpServerSecs: o.ScStreamUpServerSecs,
ScMaxBufferedPosts: o.ScMaxBufferedPosts,
ScMaxEachPostBytes: o.ScMaxEachPostBytes,
+121
View File
@@ -44,6 +44,7 @@ func testInboundVless(t *testing.T, inboundOptions inbound.VlessOption, outbound
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.UUID = userUUID
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewVless(outboundOptions)
if !assert.NoError(t, err) {
@@ -488,6 +489,77 @@ func TestInboundVless_XHTTP_Reality(t *testing.T) {
}
}
func TestInboundVless_XHTTP_Encryption(t *testing.T) {
privateKeyBase64, passwordBase64, _, err := encryption.GenX25519("")
if err != nil {
t.Fatal(err)
return
}
testCases := []struct {
mode string
}{
{mode: "auto"},
{mode: "stream-one"},
{mode: "stream-up"},
{mode: "packet-up"},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.mode, func(t *testing.T) {
getConfig := func() (inbound.VlessOption, outbound.VlessOption) {
inboundOptions := inbound.VlessOption{
Decryption: "mlkem768x25519plus.native.600s." + privateKeyBase64,
XHTTPConfig: inbound.XHTTPConfig{
Path: "/vless-xhttp",
Host: "example.com",
Mode: testCase.mode,
},
}
outboundOptions := outbound.VlessOption{
Encryption: "mlkem768x25519plus.native.0rtt." + passwordBase64,
Network: "xhttp",
XHTTPOpts: outbound.XHTTPOptions{
Path: "/vless-xhttp",
Host: "example.com",
Mode: testCase.mode,
},
}
return inboundOptions, outboundOptions
}
t.Run("nosplit", func(t *testing.T) {
t.Run("single", func(t *testing.T) {
inboundOptions, outboundOptions := getConfig()
testInboundVless(t, inboundOptions, outboundOptions)
})
t.Run("reuse", func(t *testing.T) {
inboundOptions, outboundOptions := getConfig()
testInboundVless(t, inboundOptions, withXHTTPReuse(outboundOptions))
})
})
t.Run("split", func(t *testing.T) {
if testCase.mode == "stream-one" { // stream-one not supported download settings
return
}
t.Run("single", func(t *testing.T) {
inboundOptions, outboundOptions := getConfig()
outboundOptions.XHTTPOpts.DownloadSettings = &outbound.XHTTPDownloadSettings{}
testInboundVless(t, inboundOptions, outboundOptions)
})
t.Run("reuse", func(t *testing.T) {
inboundOptions, outboundOptions := getConfig()
outboundOptions.XHTTPOpts.DownloadSettings = &outbound.XHTTPDownloadSettings{}
testInboundVless(t, inboundOptions, withXHTTPReuse(outboundOptions))
})
})
})
}
}
func TestInboundVless_XHTTP_PacketUp_H1(t *testing.T) {
getConfig := func() (inbound.VlessOption, outbound.VlessOption) {
inboundOptions := inbound.VlessOption{
@@ -524,6 +596,55 @@ func TestInboundVless_XHTTP_PacketUp_H1(t *testing.T) {
})
}
func TestInboundVless_XHTTP_PacketUp_H1_Encryption(t *testing.T) {
privateKeyBase64, passwordBase64, _, err := encryption.GenX25519("")
if err != nil {
t.Fatal(err)
return
}
getConfig := func() (inbound.VlessOption, outbound.VlessOption) {
inboundOptions := inbound.VlessOption{
Decryption: "mlkem768x25519plus.native.600s." + privateKeyBase64,
XHTTPConfig: inbound.XHTTPConfig{
Path: "/vless-xhttp",
Host: "example.com",
Mode: "packet-up",
},
}
outboundOptions := outbound.VlessOption{
Encryption: "mlkem768x25519plus.native.0rtt." + passwordBase64,
Network: "xhttp",
ALPN: []string{"http/1.1"},
XHTTPOpts: outbound.XHTTPOptions{
Path: "/vless-xhttp",
Host: "example.com",
Mode: "packet-up",
},
}
return inboundOptions, outboundOptions
}
t.Run("default", func(t *testing.T) {
inboundOptions, outboundOptions := getConfig()
testInboundVless(t, inboundOptions, outboundOptions)
t.Run("xtls-rprx-vision", func(t *testing.T) {
outboundOptions := outboundOptions
outboundOptions.Flow = "xtls-rprx-vision"
testInboundVless(t, inboundOptions, outboundOptions)
})
})
t.Run("reuse", func(t *testing.T) {
inboundOptions, outboundOptions := getConfig()
testInboundVless(t, inboundOptions, outboundOptions)
t.Run("xtls-rprx-vision", func(t *testing.T) {
outboundOptions := outboundOptions
outboundOptions.Flow = "xtls-rprx-vision"
testInboundVless(t, inboundOptions, outboundOptions)
})
})
}
func withXHTTPReuse(out outbound.VlessOption) outbound.VlessOption {
out.XHTTPOpts.ReuseSettings = &outbound.XHTTPReuseSettings{
MaxConnections: "0",
@@ -45,6 +45,7 @@ func testInboundVMess(t *testing.T, inboundOptions inbound.VmessOption, outbound
outboundOptions.UUID = userUUID
outboundOptions.AlterID = 0
outboundOptions.Cipher = "auto"
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewVmess(outboundOptions)
if !assert.NoError(t, err) {
+14
View File
@@ -159,6 +159,20 @@ func New(config LC.VlessServer, tunnel C.Tunnel, additions ...inbound.Addition)
Host: config.XHTTPConfig.Host,
Path: config.XHTTPConfig.Path,
Mode: config.XHTTPConfig.Mode,
XPaddingBytes: config.XHTTPConfig.XPaddingBytes,
XPaddingObfsMode: config.XHTTPConfig.XPaddingObfsMode,
XPaddingKey: config.XHTTPConfig.XPaddingKey,
XPaddingHeader: config.XHTTPConfig.XPaddingHeader,
XPaddingPlacement: config.XHTTPConfig.XPaddingPlacement,
XPaddingMethod: config.XHTTPConfig.XPaddingMethod,
UplinkHTTPMethod: config.XHTTPConfig.UplinkHTTPMethod,
SessionPlacement: config.XHTTPConfig.SessionPlacement,
SessionKey: config.XHTTPConfig.SessionKey,
SeqPlacement: config.XHTTPConfig.SeqPlacement,
SeqKey: config.XHTTPConfig.SeqKey,
UplinkDataPlacement: config.XHTTPConfig.UplinkDataPlacement,
UplinkDataKey: config.XHTTPConfig.UplinkDataKey,
UplinkChunkSize: config.XHTTPConfig.UplinkChunkSize,
NoSSEHeader: config.XHTTPConfig.NoSSEHeader,
ScStreamUpServerSecs: config.XHTTPConfig.ScStreamUpServerSecs,
ScMaxBufferedPosts: config.XHTTPConfig.ScMaxBufferedPosts,
+249
View File
@@ -0,0 +1,249 @@
package xhttp
import (
"math"
"strconv"
"strings"
"time"
"github.com/metacubex/http"
"github.com/metacubex/randv2"
)
// The Chrome version generator will suffer from deviation of a normal distribution.
func ChromeVersion() int {
// Start from Chrome 144, released on 2026.1.13.
var startVersion int = 144
var timeStart int64 = time.Date(2026, 1, 13, 0, 0, 0, 0, time.UTC).Unix() / 86400
var timeCurrent int64 = time.Now().Unix() / 86400
var timeDiff int = int((timeCurrent - timeStart - 35)) - int(math.Floor(math.Pow(randv2.Float64(), 2)*105))
return startVersion + (timeDiff / 35) // It's 31.15 currently.
}
var safariMinorMap [25]int = [25]int{0, 0, 0, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 4, 4,
4, 5, 5, 5, 5, 5, 6, 6, 6, 6}
// The following version generators use deterministic generators, but with the distribution scaled by a curve.
func CurlVersion() string {
// curl 8.0.0 was released on 20/03/2023.
var timeCurrent int64 = time.Now().Unix() / 86400
var timeStart int64 = time.Date(2023, 3, 20, 0, 0, 0, 0, time.UTC).Unix() / 86400
var timeDiff int = int((timeCurrent - timeStart - 60)) - int(math.Floor(math.Pow(randv2.Float64(), 2)*165))
var minorValue int = int(timeDiff / 57) // The release cadence is actually 56.67 days.
return "8." + strconv.Itoa(minorValue) + ".0"
}
func FirefoxVersion() int {
// Firefox 128 ESR was released on 09/07/2023.
var timeCurrent int64 = time.Now().Unix() / 86400
var timeStart int64 = time.Date(2024, 7, 29, 0, 0, 0, 0, time.UTC).Unix() / 86400
var timeDiff = timeCurrent - timeStart - 25 - int64(math.Floor(math.Pow(randv2.Float64(), 2)*50))
return int(timeDiff/30) + 128
}
func SafariVersion() string {
var anchoredTime time.Time = time.Now()
var releaseYear int = anchoredTime.Year()
var splitPoint time.Time = time.Date(releaseYear, 9, 23, 0, 0, 0, 0, time.UTC)
var delayedDays = int(math.Floor(math.Pow(randv2.Float64(), 3) * 75))
splitPoint = splitPoint.AddDate(0, 0, delayedDays)
if anchoredTime.Compare(splitPoint) < 0 {
releaseYear--
splitPoint = time.Date(releaseYear, 9, 23, 0, 0, 0, 0, time.UTC)
splitPoint = splitPoint.AddDate(0, 0, delayedDays)
}
var minorVersion = safariMinorMap[(anchoredTime.Unix()-splitPoint.Unix())/1296000]
return strconv.Itoa(releaseYear-1999) + "." + strconv.Itoa(minorVersion)
}
// The full Chromium brand GREASE implementation
var clientHintGreaseNA = []string{" ", "(", ":", "-", ".", "/", ")", ";", "=", "?", "_"}
var clientHintVersionNA = []string{"8", "99", "24"}
var clientHintShuffle3 = [][3]int{{0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}}
var clientHintShuffle4 = [][4]int{
{0, 1, 2, 3}, {0, 1, 3, 2}, {0, 2, 1, 3}, {0, 2, 3, 1}, {0, 3, 1, 2}, {0, 3, 2, 1},
{1, 0, 2, 3}, {1, 0, 3, 2}, {1, 2, 0, 3}, {1, 2, 3, 0}, {1, 3, 0, 2}, {1, 3, 2, 0},
{2, 0, 1, 3}, {2, 0, 3, 1}, {2, 1, 0, 3}, {2, 1, 3, 0}, {2, 3, 0, 1}, {2, 3, 1, 0},
{3, 0, 1, 2}, {3, 0, 2, 1}, {3, 1, 0, 2}, {3, 1, 2, 0}, {3, 2, 0, 1}, {3, 2, 1, 0}}
func getGreasedChInvalidBrand(seed int) string {
return "\"Not" + clientHintGreaseNA[seed%len(clientHintGreaseNA)] + "A" + clientHintGreaseNA[(seed+1)%len(clientHintGreaseNA)] + "Brand\";v=\"" + clientHintVersionNA[seed%len(clientHintVersionNA)] + "\""
}
func getGreasedChOrder(brandLength int, seed int) []int {
switch brandLength {
case 1:
return []int{0}
case 2:
return []int{seed % brandLength, (seed + 1) % brandLength}
case 3:
return clientHintShuffle3[seed%len(clientHintShuffle3)][:]
default:
return clientHintShuffle4[seed%len(clientHintShuffle4)][:]
}
//return []int{}
}
func getUngreasedChUa(majorVersion int, forkName string) []string {
// Set the capacity to 4, the maximum allowed brand size, so Go will never allocate memory twice
baseChUa := make([]string, 0, 4)
baseChUa = append(baseChUa, getGreasedChInvalidBrand(majorVersion),
"\"Chromium\";v=\""+strconv.Itoa(majorVersion)+"\"")
switch forkName {
case "chrome":
baseChUa = append(baseChUa, "\"Google Chrome\";v=\""+strconv.Itoa(majorVersion)+"\"")
case "edge":
baseChUa = append(baseChUa, "\"Microsoft Edge\";v=\""+strconv.Itoa(majorVersion)+"\"")
}
return baseChUa
}
func getGreasedChUa(majorVersion int, forkName string) string {
ungreasedCh := getUngreasedChUa(majorVersion, forkName)
shuffleMap := getGreasedChOrder(len(ungreasedCh), majorVersion)
shuffledCh := make([]string, len(ungreasedCh))
for i, e := range shuffleMap {
shuffledCh[e] = ungreasedCh[i]
}
return strings.Join(shuffledCh, ", ")
}
// The code below provides a coherent default browser user agent string based on a CPU-seeded PRNG.
var CurlUA = "curl/" + CurlVersion()
var AnchoredFirefoxVersion = strconv.Itoa(FirefoxVersion())
var FirefoxUA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:" + AnchoredFirefoxVersion + ".0) Gecko/20100101 Firefox/" + AnchoredFirefoxVersion + ".0"
var SafariUA = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/" + SafariVersion() + " Safari/605.1.15"
// Chromium browsers.
var AnchoredChromeVersion = ChromeVersion()
var ChromeUA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/" + strconv.Itoa(AnchoredChromeVersion) + ".0.0.0 Safari/537.36"
var ChromeUACH = getGreasedChUa(AnchoredChromeVersion, "chrome")
var MSEdgeUA = ChromeUA + "Edg/" + strconv.Itoa(AnchoredChromeVersion) + ".0.0.0"
var MSEdgeUACH = getGreasedChUa(AnchoredChromeVersion, "edge")
func applyMasqueradedHeaders(header http.Header, browser string, variant string) {
// Browser-specific.
switch browser {
case "chrome":
header["Sec-CH-UA"] = []string{ChromeUACH}
header["Sec-CH-UA-Mobile"] = []string{"?0"}
header["Sec-CH-UA-Platform"] = []string{"\"Windows\""}
header["DNT"] = []string{"1"}
header.Set("User-Agent", ChromeUA)
header.Set("Accept-Language", "en-US,en;q=0.9")
case "edge":
header["Sec-CH-UA"] = []string{MSEdgeUACH}
header["Sec-CH-UA-Mobile"] = []string{"?0"}
header["Sec-CH-UA-Platform"] = []string{"\"Windows\""}
header["DNT"] = []string{"1"}
header.Set("User-Agent", MSEdgeUA)
header.Set("Accept-Language", "en-US,en;q=0.9")
case "firefox":
header.Set("User-Agent", FirefoxUA)
header["DNT"] = []string{"1"}
header.Set("Accept-Language", "en-US,en;q=0.5")
case "safari":
header.Set("User-Agent", SafariUA)
header.Set("Accept-Language", "en-US,en;q=0.9")
case "golang":
// Expose the default net/http header.
header.Del("User-Agent")
return
case "curl":
header.Set("User-Agent", CurlUA)
return
}
// Context-specific.
switch variant {
case "nav":
if header.Get("Cache-Control") == "" {
switch browser {
case "chrome", "edge":
header.Set("Cache-Control", "max-age=0")
}
}
header.Set("Upgrade-Insecure-Requests", "1")
if header.Get("Accept") == "" {
switch browser {
case "chrome", "edge":
header.Set("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/jxl,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7")
case "firefox", "safari":
header.Set("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
}
}
header.Set("Sec-Fetch-Site", "none")
header.Set("Sec-Fetch-Mode", "navigate")
switch browser {
case "safari":
default:
header.Set("Sec-Fetch-User", "?1")
}
header.Set("Sec-Fetch-Dest", "document")
header.Set("Priority", "u=0, i")
case "ws":
header.Set("Sec-Fetch-Mode", "websocket")
switch browser {
case "safari":
// Safari is NOT web-compliant here!
header.Set("Sec-Fetch-Dest", "websocket")
default:
header.Set("Sec-Fetch-Dest", "empty")
}
header.Set("Sec-Fetch-Site", "same-origin")
if header.Get("Cache-Control") == "" {
header.Set("Cache-Control", "no-cache")
}
if header.Get("Pragma") == "" {
header.Set("Pragma", "no-cache")
}
if header.Get("Accept") == "" {
header.Set("Accept", "*/*")
}
case "fetch":
header.Set("Sec-Fetch-Mode", "cors")
header.Set("Sec-Fetch-Dest", "empty")
header.Set("Sec-Fetch-Site", "same-origin")
if header.Get("Priority") == "" {
switch browser {
case "chrome", "edge":
header.Set("Priority", "u=1, i")
case "firefox":
header.Set("Priority", "u=4")
case "safari":
header.Set("Priority", "u=3, i")
}
}
if header.Get("Cache-Control") == "" {
header.Set("Cache-Control", "no-cache")
}
if header.Get("Pragma") == "" {
header.Set("Pragma", "no-cache")
}
if header.Get("Accept") == "" {
header.Set("Accept", "*/*")
}
}
}
func TryDefaultHeadersWith(header http.Header, variant string) {
// The global UA special value handler for transports. Used to be called HandleTransportUASettings.
// Just a FYI to whoever needing to fix this piece of code after some spontaneous event, I tried to make the two methods separate to let the code be cleaner and more organized.
if len(header.Values("User-Agent")) < 1 {
applyMasqueradedHeaders(header, "chrome", variant)
} else {
switch header.Get("User-Agent") {
case "chrome":
applyMasqueradedHeaders(header, "chrome", variant)
case "firefox":
applyMasqueradedHeaders(header, "firefox", variant)
case "safari":
applyMasqueradedHeaders(header, "safari", variant)
case "edge":
applyMasqueradedHeaders(header, "edge", variant)
case "curl":
applyMasqueradedHeaders(header, "curl", variant)
case "golang":
applyMasqueradedHeaders(header, "golang", variant)
}
}
}
+3 -9
View File
@@ -21,7 +21,6 @@ import (
"github.com/metacubex/quic-go"
"github.com/metacubex/quic-go/http3"
"github.com/metacubex/tls"
"golang.org/x/sync/semaphore"
)
// ConnIdleTimeout defines the maximum time an idle TCP session can survive in the tunnel,
@@ -115,7 +114,7 @@ func (c *PacketUpWriter) write(b []byte) (int, error) {
Path: c.cfg.NormalizedPath(),
}
req, err := http.NewRequestWithContext(c.ctx, http.MethodPost, u.String(), nil)
req, err := http.NewRequestWithContext(c.ctx, c.cfg.GetNormalizedUplinkHTTPMethod(), u.String(), nil)
if err != nil {
return 0, err
}
@@ -177,12 +176,7 @@ func NewTransport(dialRaw DialRawFunc, wrapTLS WrapTLSFunc, dialQUIC DialQUICFun
}
}
if len(alpn) == 1 && alpn[0] == "http/1.1" { // `alpn: [http/1.1]` means using http/1.1 mode
w := semaphore.NewWeighted(20) // limit concurrent dialing to avoid WSAECONNREFUSED on Windows
dialContext := func(ctx context.Context, network, addr string) (net.Conn, error) {
if err := w.Acquire(ctx, 1); err != nil {
return nil, err
}
defer w.Release(1)
raw, err := dialRaw(ctx)
if err != nil {
return nil, err
@@ -359,7 +353,7 @@ func (c *Client) DialStreamOne() (net.Conn, error) {
},
})
req, err := http.NewRequestWithContext(ctx, http.MethodPost, requestURL.String(), pr)
req, err := http.NewRequestWithContext(ctx, c.cfg.GetNormalizedUplinkHTTPMethod(), requestURL.String(), pr)
if err != nil {
_ = pr.Close()
_ = pw.Close()
@@ -470,7 +464,7 @@ func (c *Client) DialStreamUp() (net.Conn, error) {
uploadReq, err := http.NewRequestWithContext(
c.ctx,
http.MethodPost,
c.cfg.GetNormalizedUplinkHTTPMethod(),
streamURL.String(),
pr,
)
+337 -74
View File
@@ -2,6 +2,7 @@ package xhttp
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"math/rand"
@@ -11,6 +12,16 @@ import (
"github.com/metacubex/http"
)
const (
PlacementQueryInHeader = "queryInHeader"
PlacementCookie = "cookie"
PlacementHeader = "header"
PlacementQuery = "query"
PlacementPath = "path"
PlacementBody = "body"
PlacementAuto = "auto"
)
type Config struct {
Host string
Path string
@@ -18,6 +29,19 @@ type Config struct {
Headers map[string]string
NoGRPCHeader bool
XPaddingBytes string
XPaddingObfsMode bool
XPaddingKey string
XPaddingHeader string
XPaddingPlacement string
XPaddingMethod string
UplinkHTTPMethod string
SessionPlacement string
SessionKey string
SeqPlacement string
SeqKey string
UplinkDataPlacement string
UplinkDataKey string
UplinkChunkSize string
NoSSEHeader bool // server only
ScStreamUpServerSecs string // server only
ScMaxBufferedPosts string // server only
@@ -70,37 +94,92 @@ func (c *Config) NormalizedPath() string {
return path
}
func (c *Config) RequestHeader() http.Header {
func (c *Config) GetRequestHeader() http.Header {
h := http.Header{}
for k, v := range c.Headers {
h.Set(k, v)
}
if h.Get("User-Agent") == "" {
h.Set("User-Agent", "Mozilla/5.0")
}
if h.Get("Accept") == "" {
h.Set("Accept", "*/*")
}
if h.Get("Accept-Language") == "" {
h.Set("Accept-Language", "en-US,en;q=0.9")
}
if h.Get("Cache-Control") == "" {
h.Set("Cache-Control", "no-cache")
}
if h.Get("Pragma") == "" {
h.Set("Pragma", "no-cache")
}
TryDefaultHeadersWith(h, "fetch")
return h
}
func (c *Config) RandomPadding() (string, error) {
r, err := ParseRange(c.XPaddingBytes, "100-1000")
if err != nil {
return "", fmt.Errorf("invalid x-padding-bytes: %w", err)
func (c *Config) GetRequestHeaderWithPayload(payload []byte, uplinkChunkSize Range) http.Header {
header := c.GetRequestHeader()
key := c.UplinkDataKey
encodedData := base64.RawURLEncoding.EncodeToString(payload)
for i := 0; len(encodedData) > 0; i++ {
chunkSize := uplinkChunkSize.Rand()
if len(encodedData) < chunkSize {
chunkSize = len(encodedData)
}
chunk := encodedData[:chunkSize]
encodedData = encodedData[chunkSize:]
headerKey := fmt.Sprintf("%s-%d", key, i)
header.Set(headerKey, chunk)
}
return strings.Repeat("X", r.Rand()), nil
return header
}
func (c *Config) GetRequestCookiesWithPayload(payload []byte, uplinkChunkSize Range) []*http.Cookie {
cookies := []*http.Cookie{}
key := c.UplinkDataKey
encodedData := base64.RawURLEncoding.EncodeToString(payload)
for i := 0; len(encodedData) > 0; i++ {
chunkSize := uplinkChunkSize.Rand()
if len(encodedData) < chunkSize {
chunkSize = len(encodedData)
}
chunk := encodedData[:chunkSize]
encodedData = encodedData[chunkSize:]
cookieName := fmt.Sprintf("%s_%d", key, i)
cookies = append(cookies, &http.Cookie{Name: cookieName, Value: chunk})
}
return cookies
}
func (c *Config) WriteResponseHeader(writer http.ResponseWriter, requestMethod string, requestHeader http.Header) {
if origin := requestHeader.Get("Origin"); origin == "" {
writer.Header().Set("Access-Control-Allow-Origin", "*")
} else {
// Chrome says: The value of the 'Access-Control-Allow-Origin' header in the response must not be the wildcard '*' when the request's credentials mode is 'include'.
writer.Header().Set("Access-Control-Allow-Origin", origin)
}
if c.GetNormalizedSessionPlacement() == PlacementCookie ||
c.GetNormalizedSeqPlacement() == PlacementCookie ||
c.XPaddingPlacement == PlacementCookie ||
c.GetNormalizedUplinkDataPlacement() == PlacementCookie {
writer.Header().Set("Access-Control-Allow-Credentials", "true")
}
if requestMethod == "OPTIONS" {
requestedMethod := requestHeader.Get("Access-Control-Request-Method")
if requestedMethod != "" {
writer.Header().Set("Access-Control-Allow-Methods", requestedMethod)
} else {
writer.Header().Set("Access-Control-Allow-Methods", "*")
}
requestedHeaders := requestHeader.Get("Access-Control-Request-Headers")
if requestedHeaders == "" {
writer.Header().Set("Access-Control-Allow-Headers", "*")
} else {
writer.Header().Set("Access-Control-Allow-Headers", requestedHeaders)
}
}
}
func (c *Config) GetNormalizedUplinkHTTPMethod() string {
if c.UplinkHTTPMethod == "" {
return "POST"
}
return c.UplinkHTTPMethod
}
func (c *Config) GetNormalizedScStreamUpServerSecs() (Range, error) {
@@ -144,6 +223,84 @@ func (c *Config) GetNormalizedScMinPostsIntervalMs() (Range, error) {
return r, nil
}
func (c *Config) GetNormalizedUplinkChunkSize() (Range, error) {
uplinkChunkSize, err := ParseRange(c.UplinkChunkSize, "")
if err != nil {
return Range{}, fmt.Errorf("invalid uplink-chunk-size: %w", err)
}
if uplinkChunkSize.Max == 0 {
switch c.GetNormalizedUplinkDataPlacement() {
case PlacementCookie:
return Range{
Min: 2 * 1024, // 2 KiB
Max: 3 * 1024, // 3 KiB
}, nil
case PlacementHeader:
return Range{
Min: 3 * 1024, // 3 KiB
Max: 4 * 1024, // 4 KiB
}, nil
default:
return c.GetNormalizedScMaxEachPostBytes()
}
} else if uplinkChunkSize.Min < 64 {
uplinkChunkSize.Min = 64
if uplinkChunkSize.Max < 64 {
uplinkChunkSize.Max = 64
}
}
return uplinkChunkSize, nil
}
func (c *Config) GetNormalizedSessionPlacement() string {
if c.SessionPlacement == "" {
return PlacementPath
}
return c.SessionPlacement
}
func (c *Config) GetNormalizedSeqPlacement() string {
if c.SeqPlacement == "" {
return PlacementPath
}
return c.SeqPlacement
}
func (c *Config) GetNormalizedUplinkDataPlacement() string {
if c.UplinkDataPlacement == "" {
return PlacementBody
}
return c.UplinkDataPlacement
}
func (c *Config) GetNormalizedSessionKey() string {
if c.SessionKey != "" {
return c.SessionKey
}
switch c.GetNormalizedSessionPlacement() {
case PlacementHeader:
return "X-Session"
case PlacementCookie, PlacementQuery:
return "x_session"
default:
return ""
}
}
func (c *Config) GetNormalizedSeqKey() string {
if c.SeqKey != "" {
return c.SeqKey
}
switch c.GetNormalizedSeqPlacement() {
case PlacementHeader:
return "X-Seq"
case PlacementCookie, PlacementQuery:
return "x_seq"
default:
return ""
}
}
type Range struct {
Min int
Max int
@@ -231,32 +388,6 @@ func (c *ReuseConfig) ResolveEntryConfig() (Range, Range, Range, error) {
return cMaxReuseTimes, hMaxRequestTimes, hMaxReusableSecs, nil
}
func (c *Config) FillStreamRequest(req *http.Request, sessionID string) error {
req.Header = c.RequestHeader()
paddingValue, err := c.RandomPadding()
if err != nil {
return err
}
if paddingValue != "" {
rawURL := req.URL.String()
sep := "?"
if strings.Contains(rawURL, "?") {
sep = "&"
}
req.Header.Set("Referer", rawURL+sep+"x_padding="+paddingValue)
}
c.ApplyMetaToRequest(req, sessionID, "")
if req.Body != nil && !c.NoGRPCHeader {
req.Header.Set("Content-Type", "application/grpc")
}
return nil
}
func appendToPath(path, value string) string {
if strings.HasSuffix(path, "/") {
return path + value
@@ -264,53 +395,185 @@ func appendToPath(path, value string) string {
return path + "/" + value
}
func (c *Config) ApplyMetaToRequest(req *http.Request, sessionID string, seqStr string) {
if sessionID != "" {
req.URL.Path = appendToPath(req.URL.Path, sessionID)
func (c *Config) ApplyMetaToRequest(req *http.Request, sessionId string, seqStr string) {
sessionPlacement := c.GetNormalizedSessionPlacement()
seqPlacement := c.GetNormalizedSeqPlacement()
sessionKey := c.GetNormalizedSessionKey()
seqKey := c.GetNormalizedSeqKey()
if sessionId != "" {
switch sessionPlacement {
case PlacementPath:
req.URL.Path = appendToPath(req.URL.Path, sessionId)
case PlacementQuery:
q := req.URL.Query()
q.Set(sessionKey, sessionId)
req.URL.RawQuery = q.Encode()
case PlacementHeader:
req.Header.Set(sessionKey, sessionId)
case PlacementCookie:
req.AddCookie(&http.Cookie{Name: sessionKey, Value: sessionId})
}
}
if seqStr != "" {
req.URL.Path = appendToPath(req.URL.Path, seqStr)
switch seqPlacement {
case PlacementPath:
req.URL.Path = appendToPath(req.URL.Path, seqStr)
case PlacementQuery:
q := req.URL.Query()
q.Set(seqKey, seqStr)
req.URL.RawQuery = q.Encode()
case PlacementHeader:
req.Header.Set(seqKey, seqStr)
case PlacementCookie:
req.AddCookie(&http.Cookie{Name: seqKey, Value: seqStr})
}
}
}
func (c *Config) FillPacketRequest(req *http.Request, sessionID string, seqStr string, payload []byte) error {
req.Header = c.RequestHeader()
req.Body = io.NopCloser(bytes.NewReader(payload))
req.ContentLength = int64(len(payload))
func (c *Config) ExtractMetaFromRequest(req *http.Request, path string) (sessionId string, seqStr string) {
sessionPlacement := c.GetNormalizedSessionPlacement()
seqPlacement := c.GetNormalizedSeqPlacement()
sessionKey := c.GetNormalizedSessionKey()
seqKey := c.GetNormalizedSeqKey()
paddingValue, err := c.RandomPadding()
var subpath []string
pathPart := 0
if sessionPlacement == PlacementPath || seqPlacement == PlacementPath {
subpath = strings.Split(req.URL.Path[len(path):], "/")
}
switch sessionPlacement {
case PlacementPath:
if len(subpath) > pathPart {
sessionId = subpath[pathPart]
pathPart += 1
}
case PlacementQuery:
sessionId = req.URL.Query().Get(sessionKey)
case PlacementHeader:
sessionId = req.Header.Get(sessionKey)
case PlacementCookie:
if cookie, e := req.Cookie(sessionKey); e == nil {
sessionId = cookie.Value
}
}
switch seqPlacement {
case PlacementPath:
if len(subpath) > pathPart {
seqStr = subpath[pathPart]
pathPart += 1
}
case PlacementQuery:
seqStr = req.URL.Query().Get(seqKey)
case PlacementHeader:
seqStr = req.Header.Get(seqKey)
case PlacementCookie:
if cookie, e := req.Cookie(seqKey); e == nil {
seqStr = cookie.Value
}
}
return sessionId, seqStr
}
func (c *Config) FillStreamRequest(req *http.Request, sessionID string) error {
req.Header = c.GetRequestHeader()
xPaddingBytes, err := c.GetNormalizedXPaddingBytes()
if err != nil {
return err
}
if paddingValue != "" {
rawURL := req.URL.String()
sep := "?"
if strings.Contains(rawURL, "?") {
sep = "&"
length := xPaddingBytes.Rand()
config := XPaddingConfig{Length: length}
if c.XPaddingObfsMode {
config.Placement = XPaddingPlacement{
Placement: c.XPaddingPlacement,
Key: c.XPaddingKey,
Header: c.XPaddingHeader,
RawURL: req.URL.String(),
}
config.Method = PaddingMethod(c.XPaddingMethod)
} else {
config.Placement = XPaddingPlacement{
Placement: PlacementQueryInHeader,
Key: "x_padding",
Header: "Referer",
RawURL: req.URL.String(),
}
req.Header.Set("Referer", rawURL+sep+"x_padding="+paddingValue)
}
c.ApplyMetaToRequest(req, sessionID, seqStr)
c.ApplyXPaddingToRequest(req, config)
c.ApplyMetaToRequest(req, sessionID, "")
if req.Body != nil && !c.NoGRPCHeader { // stream-up/one
req.Header.Set("Content-Type", "application/grpc")
}
return nil
}
func (c *Config) FillDownloadRequest(req *http.Request, sessionID string) error {
req.Header = c.RequestHeader()
return c.FillStreamRequest(req, sessionID)
}
paddingValue, err := c.RandomPadding()
func (c *Config) FillPacketRequest(request *http.Request, sessionId string, seqStr string, data []byte) error {
dataPlacement := c.GetNormalizedUplinkDataPlacement()
if dataPlacement == PlacementBody || dataPlacement == PlacementAuto {
request.Header = c.GetRequestHeader()
request.Body = io.NopCloser(bytes.NewReader(data))
request.ContentLength = int64(len(data))
} else {
request.Body = nil
request.ContentLength = 0
switch dataPlacement {
case PlacementHeader:
uplinkChunkSize, err := c.GetNormalizedUplinkChunkSize()
if err != nil {
return err
}
request.Header = c.GetRequestHeaderWithPayload(data, uplinkChunkSize)
case PlacementCookie:
request.Header = c.GetRequestHeader()
uplinkChunkSize, err := c.GetNormalizedUplinkChunkSize()
if err != nil {
return err
}
for _, cookie := range c.GetRequestCookiesWithPayload(data, uplinkChunkSize) {
request.AddCookie(cookie)
}
}
}
xPaddingBytes, err := c.GetNormalizedXPaddingBytes()
if err != nil {
return err
}
if paddingValue != "" {
rawURL := req.URL.String()
sep := "?"
if strings.Contains(rawURL, "?") {
sep = "&"
length := xPaddingBytes.Rand()
config := XPaddingConfig{Length: length}
if c.XPaddingObfsMode {
config.Placement = XPaddingPlacement{
Placement: c.XPaddingPlacement,
Key: c.XPaddingKey,
Header: c.XPaddingHeader,
RawURL: request.URL.String(),
}
config.Method = PaddingMethod(c.XPaddingMethod)
} else {
config.Placement = XPaddingPlacement{
Placement: PlacementQueryInHeader,
Key: "x_padding",
Header: "Referer",
RawURL: request.URL.String(),
}
req.Header.Set("Referer", rawURL+sep+"x_padding="+paddingValue)
}
c.ApplyMetaToRequest(req, sessionID, "")
c.ApplyXPaddingToRequest(request, config)
c.ApplyMetaToRequest(request, sessionId, seqStr)
return nil
}
+229 -137
View File
@@ -1,6 +1,9 @@
package xhttp
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"net"
"strconv"
@@ -98,6 +101,7 @@ type requestHandler struct {
connHandler func(net.Conn)
httpHandler http.Handler
xPaddingBytes Range
scMaxEachPostBytes Range
scStreamUpServerSecs Range
scMaxBufferedPosts Range
@@ -107,6 +111,10 @@ type requestHandler struct {
}
func NewServerHandler(opt ServerOption) (http.Handler, error) {
xPaddingBytes, err := opt.Config.GetNormalizedXPaddingBytes()
if err != nil {
return nil, err
}
scMaxEachPostBytes, err := opt.Config.GetNormalizedScMaxEachPostBytes()
if err != nil {
return nil, err
@@ -125,6 +133,7 @@ func NewServerHandler(opt ServerOption) (http.Handler, error) {
config: opt.Config,
connHandler: opt.ConnHandler,
httpHandler: opt.HttpHandler,
xPaddingBytes: xPaddingBytes,
scMaxEachPostBytes: scMaxEachPostBytes,
scStreamUpServerSecs: scStreamUpServerSecs,
scMaxBufferedPosts: scMaxBufferedPosts,
@@ -134,7 +143,7 @@ func NewServerHandler(opt ServerOption) (http.Handler, error) {
}), nil
}
func (h *requestHandler) getOrCreateSession(sessionID string) *httpSession {
func (h *requestHandler) upsertSession(sessionID string) *httpSession {
h.mu.Lock()
defer h.mu.Unlock()
@@ -161,8 +170,6 @@ func (h *requestHandler) getOrCreateSession(sessionID string) *httpSession {
return s
}
func (h *requestHandler) deleteSession(sessionID string) {
h.mu.Lock()
defer h.mu.Unlock()
@@ -239,11 +246,227 @@ func (h *requestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
rest := strings.TrimPrefix(r.URL.Path, path)
parts := splitNonEmpty(rest)
h.config.WriteResponseHeader(w, r.Method, r.Header)
length := h.xPaddingBytes.Rand()
config := XPaddingConfig{Length: length}
if h.config.XPaddingObfsMode {
config.Placement = XPaddingPlacement{
Placement: h.config.XPaddingPlacement,
Key: h.config.XPaddingKey,
Header: h.config.XPaddingHeader,
}
config.Method = PaddingMethod(h.config.XPaddingMethod)
} else {
config.Placement = XPaddingPlacement{
Placement: PlacementHeader,
Header: "X-Padding",
}
}
h.config.ApplyXPaddingToResponse(w, config)
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
paddingValue, _ := h.config.ExtractXPaddingFromRequest(r, h.config.XPaddingObfsMode)
if !h.config.IsPaddingValid(paddingValue, h.xPaddingBytes.Min, h.xPaddingBytes.Max, PaddingMethod(h.config.XPaddingMethod)) {
http.Error(w, "invalid xpadding", http.StatusBadRequest)
return
}
sessionId, seqStr := h.config.ExtractMetaFromRequest(r, path)
var currentSession *httpSession
if sessionId != "" {
currentSession = h.upsertSession(sessionId)
}
// stream-up upload: POST /path/{session}
if r.Method != http.MethodGet && sessionId != "" && seqStr == "" && h.allowStreamUpUpload() {
httpSC := newHTTPServerConn(w, r.Body)
err := currentSession.uploadQueue.Push(Packet{
Reader: httpSC,
})
if err != nil {
http.Error(w, err.Error(), http.StatusConflict)
return
}
// magic header instructs nginx + apache to not buffer response body
w.Header().Set("X-Accel-Buffering", "no")
// A web-compliant header telling all middleboxes to disable caching.
// Should be able to prevent overloading the cache, or stop CDNs from
// teeing the response stream into their cache, causing slowdowns.
w.Header().Set("Cache-Control", "no-store")
if !h.config.NoSSEHeader {
// magic header to make the HTTP middle box consider this as SSE to disable buffer
w.Header().Set("Content-Type", "text/event-stream")
}
w.WriteHeader(http.StatusOK)
referrer := r.Header.Get("Referer")
if referrer != "" && h.scStreamUpServerSecs.Max > 0 {
go func() {
for {
_, err := httpSC.Write(bytes.Repeat([]byte{'X'}, int(h.xPaddingBytes.Rand())))
if err != nil {
break
}
time.Sleep(time.Duration(h.scStreamUpServerSecs.Rand()) * time.Second)
}
}()
}
select {
case <-r.Context().Done():
case <-httpSC.Wait():
}
_ = httpSC.Close()
return
}
// packet-up upload: POST /path/{session}/{seq}
if r.Method != http.MethodGet && sessionId != "" && seqStr != "" && h.allowPacketUpUpload() {
scMaxEachPostBytes := h.scMaxEachPostBytes.Max
dataPlacement := h.config.GetNormalizedUplinkDataPlacement()
uplinkDataKey := h.config.UplinkDataKey
var headerPayload []byte
var err error
if dataPlacement == PlacementAuto || dataPlacement == PlacementHeader {
var headerPayloadChunks []string
for i := 0; true; i++ {
chunk := r.Header.Get(fmt.Sprintf("%s-%d", uplinkDataKey, i))
if chunk == "" {
break
}
headerPayloadChunks = append(headerPayloadChunks, chunk)
}
headerPayloadEncoded := strings.Join(headerPayloadChunks, "")
headerPayload, err = base64.RawURLEncoding.DecodeString(headerPayloadEncoded)
if err != nil {
http.Error(w, "invalid base64 in header's payload", http.StatusBadRequest)
return
}
}
var cookiePayload []byte
if dataPlacement == PlacementAuto || dataPlacement == PlacementCookie {
var cookiePayloadChunks []string
for i := 0; true; i++ {
cookieName := fmt.Sprintf("%s_%d", uplinkDataKey, i)
if c, _ := r.Cookie(cookieName); c != nil {
cookiePayloadChunks = append(cookiePayloadChunks, c.Value)
} else {
break
}
}
cookiePayloadEncoded := strings.Join(cookiePayloadChunks, "")
cookiePayload, err = base64.RawURLEncoding.DecodeString(cookiePayloadEncoded)
if err != nil {
http.Error(w, "invalid base64 in cookies' payload", http.StatusBadRequest)
return
}
}
var bodyPayload []byte
if dataPlacement == PlacementAuto || dataPlacement == PlacementBody {
if r.ContentLength > int64(scMaxEachPostBytes) {
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
return
}
bodyPayload, err = io.ReadAll(io.LimitReader(r.Body, int64(scMaxEachPostBytes)+1))
if err != nil {
http.Error(w, "failed to read body", http.StatusBadRequest)
return
}
}
var payload []byte
switch dataPlacement {
case PlacementHeader:
payload = headerPayload
case PlacementCookie:
payload = cookiePayload
case PlacementBody:
payload = bodyPayload
case PlacementAuto:
payload = headerPayload
payload = append(payload, cookiePayload...)
payload = append(payload, bodyPayload...)
}
if len(payload) > h.scMaxEachPostBytes.Max {
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
return
}
seq, err := strconv.ParseUint(seqStr, 10, 64)
if err != nil {
http.Error(w, "invalid xhttp seq", http.StatusBadRequest)
return
}
err = currentSession.uploadQueue.Push(Packet{
Seq: seq,
Payload: payload,
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if len(payload) == 0 {
// Methods without a body are usually cached by default.
w.Header().Set("Cache-Control", "no-store")
}
w.WriteHeader(http.StatusOK)
return
}
// stream-up/packet-up download: GET /path/{session}
if r.Method == http.MethodGet && sessionId != "" && seqStr == "" && h.allowSessionDownload() {
currentSession.markConnected()
// magic header instructs nginx + apache to not buffer response body
w.Header().Set("X-Accel-Buffering", "no")
// A web-compliant header telling all middleboxes to disable caching.
// Should be able to prevent overloading the cache, or stop CDNs from
// teeing the response stream into their cache, causing slowdowns.
w.Header().Set("Cache-Control", "no-store")
if !h.config.NoSSEHeader {
// magic header to make the HTTP middle box consider this as SSE to disable buffer
w.Header().Set("Content-Type", "text/event-stream")
}
w.WriteHeader(http.StatusOK)
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
httpSC := newHTTPServerConn(w, r.Body)
conn := &Conn{
writer: httpSC,
reader: currentSession.uploadQueue,
onClose: func() {
h.deleteSession(sessionId)
},
}
httputils.SetAddrFromRequest(&conn.NetAddr, r)
go h.connHandler(N.NewDeadlineConn(conn))
select {
case <-r.Context().Done():
case <-httpSC.Wait():
}
_ = conn.Close()
return
}
// stream-one: POST /path
if r.Method == http.MethodPost && len(parts) == 0 && h.allowStreamOne() {
if r.Method != http.MethodGet && sessionId == "" && seqStr == "" && h.allowStreamOne() {
w.Header().Set("X-Accel-Buffering", "no")
w.Header().Set("Cache-Control", "no-store")
w.WriteHeader(http.StatusOK)
@@ -269,137 +492,6 @@ func (h *requestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
// stream-up/packet-up download: GET /path/{session}
if r.Method == http.MethodGet && len(parts) == 1 && h.allowSessionDownload() {
sessionID := parts[0]
session := h.getOrCreateSession(sessionID)
session.markConnected()
// magic header instructs nginx + apache to not buffer response body
w.Header().Set("X-Accel-Buffering", "no")
// A web-compliant header telling all middleboxes to disable caching.
// Should be able to prevent overloading the cache, or stop CDNs from
// teeing the response stream into their cache, causing slowdowns.
w.Header().Set("Cache-Control", "no-store")
if !h.config.NoSSEHeader {
// magic header to make the HTTP middle box consider this as SSE to disable buffer
w.Header().Set("Content-Type", "text/event-stream")
}
w.WriteHeader(http.StatusOK)
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
httpSC := newHTTPServerConn(w, r.Body)
conn := &Conn{
writer: httpSC,
reader: session.uploadQueue,
onClose: func() {
h.deleteSession(sessionID)
},
}
httputils.SetAddrFromRequest(&conn.NetAddr, r)
go h.connHandler(N.NewDeadlineConn(conn))
select {
case <-r.Context().Done():
case <-httpSC.Wait():
}
_ = conn.Close()
return
}
// stream-up upload: POST /path/{session}
if r.Method == http.MethodPost && len(parts) == 1 && h.allowStreamUpUpload() {
sessionID := parts[0]
session := h.getOrCreateSession(sessionID)
httpSC := newHTTPServerConn(w, r.Body)
err := session.uploadQueue.Push(Packet{
Reader: httpSC,
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// magic header instructs nginx + apache to not buffer response body
w.Header().Set("X-Accel-Buffering", "no")
// A web-compliant header telling all middleboxes to disable caching.
// Should be able to prevent overloading the cache, or stop CDNs from
// teeing the response stream into their cache, causing slowdowns.
w.Header().Set("Cache-Control", "no-store")
if !h.config.NoSSEHeader {
// magic header to make the HTTP middle box consider this as SSE to disable buffer
w.Header().Set("Content-Type", "text/event-stream")
}
w.WriteHeader(http.StatusOK)
referrer := r.Header.Get("Referer")
if referrer != "" && h.scStreamUpServerSecs.Max > 0 {
go func() {
for {
paddingValue, _ := h.config.RandomPadding()
if paddingValue == "" {
break
}
_, err = httpSC.Write([]byte(paddingValue))
if err != nil {
break
}
time.Sleep(time.Duration(h.scStreamUpServerSecs.Rand()) * time.Second)
}
}()
}
select {
case <-r.Context().Done():
case <-httpSC.Wait():
}
_ = httpSC.Close()
return
}
// packet-up upload: POST /path/{session}/{seq}
if r.Method == http.MethodPost && len(parts) == 2 && h.allowPacketUpUpload() {
sessionID := parts[0]
seq, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
http.Error(w, "invalid xhttp seq", http.StatusBadRequest)
return
}
session := h.getOrCreateSession(sessionID)
if r.ContentLength > int64(h.scMaxEachPostBytes.Max) {
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
return
}
body, err := io.ReadAll(io.LimitReader(r.Body, int64(h.scMaxEachPostBytes.Max)+1))
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
err = session.uploadQueue.Push(Packet{
Seq: seq,
Payload: body,
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if len(body) == 0 {
w.Header().Set("Cache-Control", "no-store")
}
w.WriteHeader(http.StatusOK)
return
}
http.NotFound(w, r)
}
+9 -7
View File
@@ -78,22 +78,24 @@ func TestServerHandlerModeRestrictions(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
config := Config{
Path: "/xhttp",
Mode: testCase.mode,
}
handler, err := NewServerHandler(ServerOption{
Config: Config{
Path: "/xhttp",
Mode: testCase.mode,
},
Config: config,
ConnHandler: func(conn net.Conn) {
_ = conn.Close()
},
})
if err != nil {
panic(err)
}
assert.NoError(t, err)
req := httptest.NewRequest(testCase.method, testCase.target, io.NopCloser(http.NoBody))
recorder := httptest.NewRecorder()
err = config.FillStreamRequest(req, "")
assert.NoError(t, err)
handler.ServeHTTP(recorder, req)
assert.Equal(t, testCase.wantStatus, recorder.Result().StatusCode)
+333
View File
@@ -0,0 +1,333 @@
package xhttp
import (
"crypto/rand"
"fmt"
"math"
"net/url"
"strings"
"github.com/metacubex/http"
"golang.org/x/net/http2/hpack"
)
type PaddingMethod string
const (
PaddingMethodRepeatX PaddingMethod = "repeat-x"
PaddingMethodTokenish PaddingMethod = "tokenish"
)
const charsetBase62 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
// Huffman encoding gives ~20% size reduction for base62 sequences
const avgHuffmanBytesPerCharBase62 = 0.8
const validationTolerance = 2
type XPaddingPlacement struct {
Placement string
Key string
Header string
RawURL string
}
type XPaddingConfig struct {
Length int
Placement XPaddingPlacement
Method PaddingMethod
}
func randStringFromCharset(n int, charset string) (string, bool) {
if n <= 0 || len(charset) == 0 {
return "", false
}
m := len(charset)
limit := byte(256 - (256 % m))
result := make([]byte, n)
i := 0
buf := make([]byte, 256)
for i < n {
if _, err := rand.Read(buf); err != nil {
return "", false
}
for _, rb := range buf {
if rb >= limit {
continue
}
result[i] = charset[int(rb)%m]
i++
if i == n {
break
}
}
}
return string(result), true
}
func absInt(x int) int {
if x < 0 {
return -x
}
return x
}
func GenerateTokenishPaddingBase62(targetHuffmanBytes int) string {
n := int(math.Ceil(float64(targetHuffmanBytes) / avgHuffmanBytesPerCharBase62))
if n < 1 {
n = 1
}
randBase62Str, ok := randStringFromCharset(n, charsetBase62)
if !ok {
return ""
}
const maxIter = 150
adjustChar := byte('X')
// Adjust until close enough
for iter := 0; iter < maxIter; iter++ {
currentLength := int(hpack.HuffmanEncodeLength(randBase62Str))
diff := currentLength - targetHuffmanBytes
if absInt(diff) <= validationTolerance {
return randBase62Str
}
if diff < 0 {
// Too small -> append padding char(s)
randBase62Str += string(adjustChar)
// Avoid a long run of identical chars
if adjustChar == 'X' {
adjustChar = 'Z'
} else {
adjustChar = 'X'
}
} else {
// Too big -> remove from the end
if len(randBase62Str) <= 1 {
return randBase62Str
}
randBase62Str = randBase62Str[:len(randBase62Str)-1]
}
}
return randBase62Str
}
func GeneratePadding(method PaddingMethod, length int) string {
if length <= 0 {
return ""
}
// https://www.rfc-editor.org/rfc/rfc7541.html#appendix-B
// h2's HPACK Header Compression feature employs a huffman encoding using a static table.
// 'X' and 'Z' are assigned an 8 bit code, so HPACK compression won't change actual padding length on the wire.
// https://www.rfc-editor.org/rfc/rfc9204.html#section-4.1.2-2
// h3's similar QPACK feature uses the same huffman table.
switch method {
case PaddingMethodRepeatX:
return strings.Repeat("X", length)
case PaddingMethodTokenish:
paddingValue := GenerateTokenishPaddingBase62(length)
if paddingValue == "" {
return strings.Repeat("X", length)
}
return paddingValue
default:
return strings.Repeat("X", length)
}
}
func ApplyPaddingToCookie(req *http.Request, name, value string) {
if req == nil || name == "" || value == "" {
return
}
req.AddCookie(&http.Cookie{
Name: name,
Value: value,
Path: "/",
})
}
func ApplyPaddingToResponseCookie(writer http.ResponseWriter, name, value string) {
if name == "" || value == "" {
return
}
http.SetCookie(writer, &http.Cookie{
Name: name,
Value: value,
Path: "/",
})
}
func ApplyPaddingToQuery(u *url.URL, key, value string) {
if u == nil || key == "" || value == "" {
return
}
q := u.Query()
q.Set(key, value)
u.RawQuery = q.Encode()
}
func (c *Config) GetNormalizedXPaddingBytes() (Range, error) {
r, err := ParseRange(c.XPaddingBytes, "100-1000")
if err != nil {
return Range{}, fmt.Errorf("invalid x-padding-bytes: %w", err)
}
return r, nil
}
func (c *Config) ApplyXPaddingToHeader(h http.Header, config XPaddingConfig) {
if h == nil {
return
}
paddingValue := GeneratePadding(config.Method, config.Length)
switch p := config.Placement; p.Placement {
case PlacementHeader:
h.Set(p.Header, paddingValue)
case PlacementQueryInHeader:
u, err := url.Parse(p.RawURL)
if err != nil || u == nil {
return
}
u.RawQuery = p.Key + "=" + paddingValue
h.Set(p.Header, u.String())
}
}
func (c *Config) ApplyXPaddingToRequest(req *http.Request, config XPaddingConfig) {
if req == nil {
return
}
if req.Header == nil {
req.Header = make(http.Header)
}
placement := config.Placement.Placement
if placement == PlacementHeader || placement == PlacementQueryInHeader {
c.ApplyXPaddingToHeader(req.Header, config)
return
}
paddingValue := GeneratePadding(config.Method, config.Length)
switch placement {
case PlacementCookie:
ApplyPaddingToCookie(req, config.Placement.Key, paddingValue)
case PlacementQuery:
ApplyPaddingToQuery(req.URL, config.Placement.Key, paddingValue)
}
}
func (c *Config) ApplyXPaddingToResponse(writer http.ResponseWriter, config XPaddingConfig) {
placement := config.Placement.Placement
if placement == PlacementHeader || placement == PlacementQueryInHeader {
c.ApplyXPaddingToHeader(writer.Header(), config)
return
}
paddingValue := GeneratePadding(config.Method, config.Length)
switch placement {
case PlacementCookie:
ApplyPaddingToResponseCookie(writer, config.Placement.Key, paddingValue)
}
}
func (c *Config) ExtractXPaddingFromRequest(req *http.Request, obfsMode bool) (string, string) {
if req == nil {
return "", ""
}
if !obfsMode {
referrer := req.Header.Get("Referer")
if referrer != "" {
if referrerURL, err := url.Parse(referrer); err == nil {
paddingValue := referrerURL.Query().Get("x_padding")
paddingPlacement := PlacementQueryInHeader + "=Referer, key=x_padding"
return paddingValue, paddingPlacement
}
} else {
paddingValue := req.URL.Query().Get("x_padding")
return paddingValue, PlacementQuery + ", key=x_padding"
}
}
key := c.XPaddingKey
header := c.XPaddingHeader
if cookie, err := req.Cookie(key); err == nil {
if cookie != nil && cookie.Value != "" {
paddingValue := cookie.Value
paddingPlacement := PlacementCookie + ", key=" + key
return paddingValue, paddingPlacement
}
}
headerValue := req.Header.Get(header)
if headerValue != "" {
if c.XPaddingPlacement == PlacementHeader {
paddingPlacement := PlacementHeader + "=" + header
return headerValue, paddingPlacement
}
if parsedURL, err := url.Parse(headerValue); err == nil {
paddingPlacement := PlacementQueryInHeader + "=" + header + ", key=" + key
return parsedURL.Query().Get(key), paddingPlacement
}
}
queryValue := req.URL.Query().Get(key)
if queryValue != "" {
paddingPlacement := PlacementQuery + ", key=" + key
return queryValue, paddingPlacement
}
return "", ""
}
func (c *Config) IsPaddingValid(paddingValue string, from, to int, method PaddingMethod) bool {
if paddingValue == "" {
return false
}
if to <= 0 {
if r, err := c.GetNormalizedXPaddingBytes(); err == nil {
from, to = r.Min, r.Max
}
}
switch method {
case PaddingMethodRepeatX:
n := len(paddingValue)
return n >= from && n <= to
case PaddingMethodTokenish:
const tolerance = validationTolerance
n := int(hpack.HuffmanEncodeLength(paddingValue))
f := from - tolerance
t := to + tolerance
if f < 0 {
f = 0
}
return n >= f && n <= t
default:
n := len(paddingValue)
return n >= from && n <= to
}
}
+2 -2
View File
@@ -10683,9 +10683,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "uuid"
version = "1.23.0"
version = "1.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ac8b6f42ead25368cf5b098aeb3dc8a1a2c05a3eee8a9a1a68c640edbfc79d9"
checksum = "ddd74a9687298c6858e9b88ec8935ec45d22e8fd5e6394fa1bd4e99a87789c76"
dependencies = [
"getrandom 0.4.1",
"js-sys",
@@ -26,11 +26,11 @@
"@mui/x-date-pickers": "8.27.2",
"@nyanpasu/interface": "workspace:^",
"@nyanpasu/ui": "workspace:^",
"@paper-design/shaders-react": "0.0.72",
"@paper-design/shaders-react": "0.0.76",
"@radix-ui/react-use-controllable-state": "1.2.2",
"@tailwindcss/postcss": "4.2.2",
"@tanstack/react-table": "8.21.3",
"@tanstack/react-virtual": "3.13.23",
"@tanstack/react-virtual": "3.13.24",
"@tanstack/router-zod-adapter": "1.81.5",
"@tauri-apps/api": "2.10.1",
"@uidotdev/usehooks": "2.4.1",
@@ -73,9 +73,9 @@
"@iconify/json": "2.2.463",
"@monaco-editor/react": "4.7.0",
"@tanstack/react-query": "5.97.0",
"@tanstack/react-router": "1.168.13",
"@tanstack/react-router-devtools": "1.166.11",
"@tanstack/router-plugin": "1.167.12",
"@tanstack/react-router": "1.168.22",
"@tanstack/react-router-devtools": "1.166.13",
"@tanstack/router-plugin": "1.167.22",
"@tauri-apps/plugin-clipboard-manager": "2.3.2",
"@tauri-apps/plugin-dialog": "2.6.0",
"@tauri-apps/plugin-fs": "2.4.5",
@@ -68,7 +68,7 @@ export default defineConfig(({ command, mode }) => {
tsconfigPaths(),
legacy({
renderLegacyChunks: false,
modernTargets: ['edge>=109', 'safari>=13'],
modernTargets: ['edge>=109', 'safari>=15'],
modernPolyfills: true,
additionalModernPolyfills: [
'core-js/modules/es.object.has-own.js',
+2 -2
View File
@@ -2,7 +2,7 @@
"manifest_version": 1,
"latest": {
"mihomo": "v1.19.23",
"mihomo_alpha": "alpha-7ab4eed",
"mihomo_alpha": "alpha-0e0265f",
"clash_rs": "v0.9.7",
"clash_premium": "2023-09-05-gdcc8d87",
"clash_rs_alpha": "0.9.7-alpha+sha.dd693bf"
@@ -69,5 +69,5 @@
"linux-armv7hf": "clash-rs-armv7-unknown-linux-gnueabihf"
}
},
"updated_at": "2026-04-16T22:25:52.280Z"
"updated_at": "2026-04-17T22:24:36.473Z"
}
+5 -5
View File
@@ -51,7 +51,7 @@
"prepare:check": "deno run -A scripts/deno/check.ts"
},
"dependencies": {
"@prettier/plugin-oxc": "0.1.3",
"@prettier/plugin-oxc": "0.1.4",
"husky": "9.1.7",
"lodash-es": "4.18.1"
},
@@ -63,7 +63,7 @@
"@types/fs-extra": "11.0.4",
"@types/lodash-es": "4.17.12",
"@types/node": "24.11.0",
"autoprefixer": "10.4.27",
"autoprefixer": "10.5.0",
"conventional-changelog-conventionalcommits": "9.3.1",
"cross-env": "10.1.0",
"dedent": "1.7.2",
@@ -76,11 +76,11 @@
"postcss-html": "1.8.1",
"postcss-import": "16.1.1",
"postcss-scss": "4.0.9",
"prettier": "3.8.2",
"prettier-plugin-ember-template-tag": "2.1.4",
"prettier": "3.8.3",
"prettier-plugin-ember-template-tag": "2.1.5",
"prettier-plugin-tailwindcss": "0.7.2",
"prettier-plugin-toml": "2.0.6",
"stylelint": "17.6.0",
"stylelint": "17.8.0",
"stylelint-config-html": "1.1.0",
"stylelint-config-recess-order": "7.7.0",
"stylelint-config-standard": "40.0.0",
+355 -274
View File
File diff suppressed because it is too large Load Diff
+1 -1
View File
@@ -45,7 +45,7 @@ jobs:
- name: Build site
run: task docs
- name: Upload static files as artifact
uses: actions/upload-pages-artifact@v4
uses: actions/upload-pages-artifact@v5
with:
path: www/public
- name: Deploy to GitHub Pages
+1 -1
View File
@@ -9,7 +9,7 @@ services:
volumes:
- filebrowser:/flux/vault
environment:
- REDIS_CACHE_URL=redis://default:filebrowser@redis:6379 # Use rediss:// for ssl
- FB_REDIS_CACHE_URL=redis://default:filebrowser@redis:6379 # Use rediss:// for ssl
redis:
container_name: redis
+203 -158
View File
@@ -28,7 +28,7 @@ importers:
version: 1.11.20
dompurify:
specifier: ^3.2.6
version: 3.3.3
version: 3.4.0
epubjs:
specifier: ^0.3.93
version: 0.3.93
@@ -101,7 +101,7 @@ importers:
devDependencies:
'@intlify/unplugin-vue-i18n':
specifier: ^11.0.1
version: 11.0.7(@vue/compiler-dom@3.5.32)(eslint@10.2.0)(rollup@4.57.1)(typescript@5.9.3)(vue-i18n@11.3.2(vue@3.5.32(typescript@5.9.3)))(vue@3.5.32(typescript@5.9.3))
version: 11.0.7(@vue/compiler-dom@3.5.32)(eslint@10.2.1)(rollup@4.57.1)(typescript@5.9.3)(vue-i18n@11.3.2(vue@3.5.32(typescript@5.9.3)))(vue@3.5.32(typescript@5.9.3))
'@tsconfig/node24':
specifier: ^24.0.2
version: 24.0.4
@@ -113,43 +113,43 @@ importers:
version: 24.12.2
'@typescript-eslint/eslint-plugin':
specifier: ^8.37.0
version: 8.58.1(@typescript-eslint/parser@8.56.0(eslint@10.2.0)(typescript@5.9.3))(eslint@10.2.0)(typescript@5.9.3)
version: 8.58.2(@typescript-eslint/parser@8.56.0(eslint@10.2.1)(typescript@5.9.3))(eslint@10.2.1)(typescript@5.9.3)
'@vitejs/plugin-legacy':
specifier: ^8.0.0
version: 8.0.1(terser@5.46.1)(vite@8.0.8(@types/node@24.12.2)(esbuild@0.27.3)(terser@5.46.1)(yaml@2.8.3))
'@vitejs/plugin-vue':
specifier: ^6.0.1
version: 6.0.5(vite@8.0.8(@types/node@24.12.2)(esbuild@0.27.3)(terser@5.46.1)(yaml@2.8.3))(vue@3.5.32(typescript@5.9.3))
version: 6.0.6(vite@8.0.8(@types/node@24.12.2)(esbuild@0.27.3)(terser@5.46.1)(yaml@2.8.3))(vue@3.5.32(typescript@5.9.3))
'@vue/eslint-config-prettier':
specifier: ^10.2.0
version: 10.2.0(eslint@10.2.0)(prettier@3.8.2)
version: 10.2.0(eslint@10.2.1)(prettier@3.8.3)
'@vue/eslint-config-typescript':
specifier: ^14.6.0
version: 14.7.0(eslint-plugin-vue@10.8.0(@typescript-eslint/parser@8.56.0(eslint@10.2.0)(typescript@5.9.3))(eslint@10.2.0)(vue-eslint-parser@10.4.0(eslint@10.2.0)))(eslint@10.2.0)(typescript@5.9.3)
version: 14.7.0(eslint-plugin-vue@10.8.0(@typescript-eslint/parser@8.56.0(eslint@10.2.1)(typescript@5.9.3))(eslint@10.2.1)(vue-eslint-parser@10.4.0(eslint@10.2.1)))(eslint@10.2.1)(typescript@5.9.3)
'@vue/tsconfig':
specifier: ^0.9.0
version: 0.9.1(typescript@5.9.3)(vue@3.5.32(typescript@5.9.3))
autoprefixer:
specifier: ^10.4.21
version: 10.4.27(postcss@8.5.9)
version: 10.5.0(postcss@8.5.10)
eslint:
specifier: ^10.0.0
version: 10.2.0
version: 10.2.1
eslint-config-prettier:
specifier: ^10.1.5
version: 10.1.8(eslint@10.2.0)
version: 10.1.8(eslint@10.2.1)
eslint-plugin-prettier:
specifier: ^5.5.1
version: 5.5.5(eslint-config-prettier@10.1.8(eslint@10.2.0))(eslint@10.2.0)(prettier@3.8.2)
version: 5.5.5(eslint-config-prettier@10.1.8(eslint@10.2.1))(eslint@10.2.1)(prettier@3.8.3)
eslint-plugin-vue:
specifier: ^10.5.1
version: 10.8.0(@typescript-eslint/parser@8.56.0(eslint@10.2.0)(typescript@5.9.3))(eslint@10.2.0)(vue-eslint-parser@10.4.0(eslint@10.2.0))
version: 10.8.0(@typescript-eslint/parser@8.56.0(eslint@10.2.1)(typescript@5.9.3))(eslint@10.2.1)(vue-eslint-parser@10.4.0(eslint@10.2.1))
postcss:
specifier: ^8.5.6
version: 8.5.9
version: 8.5.10
prettier:
specifier: ^3.6.2
version: 3.8.2
version: 3.8.3
terser:
specifier: ^5.43.1
version: 5.46.1
@@ -1022,12 +1022,16 @@ packages:
resolution: {integrity: sha512-rZAP3aVgB9ds9KOeUSL+zZ21hPmo8dh6fnIFwRQj5EAZl9gzR7wxYbYXYysAM8CTqGmUGyp2S4kUdV17MnGuWQ==}
engines: {node: ^20.19.0 || ^22.13.0 || >=24}
'@humanfs/core@0.19.1':
resolution: {integrity: sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==}
'@humanfs/core@0.19.2':
resolution: {integrity: sha512-UhXNm+CFMWcbChXywFwkmhqjs3PRCmcSa/hfBgLIb7oQ5HNb1wS0icWsGtSAUNgefHeI+eBrA8I1fxmbHsGdvA==}
engines: {node: '>=18.18.0'}
'@humanfs/node@0.16.7':
resolution: {integrity: sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==}
'@humanfs/node@0.16.8':
resolution: {integrity: sha512-gE1eQNZ3R++kTzFUpdGlpmy8kDZD/MLyHqDwqjkVQI0JMdI1D51sy1H958PNXYkM2rAac7e5/CnIKZrHtPh3BQ==}
engines: {node: '>=18.18.0'}
'@humanfs/types@0.15.0':
resolution: {integrity: sha512-ZZ1w0aoQkwuUuC7Yf+7sdeaNfqQiiLcSRbfI08oAxqLtpXQr9AIVX7Ay7HLDuiLYAaFPu8oBYNq/QIi9URHJ3Q==}
engines: {node: '>=18.18.0'}
'@humanwhocodes/module-importer@1.0.1':
@@ -1240,12 +1244,12 @@ packages:
cpu: [x64]
os: [win32]
'@rolldown/pluginutils@1.0.0-rc.13':
resolution: {integrity: sha512-3ngTAv6F/Py35BsYbeeLeecvhMKdsKm4AoOETVhAA+Qc8nrA2I0kF7oa93mE9qnIurngOSpMnQ0x2nQY2FPviA==}
'@rolldown/pluginutils@1.0.0-rc.15':
resolution: {integrity: sha512-UromN0peaE53IaBRe9W7CjrZgXl90fqGpK+mIZbA3qSTeYqg3pqpROBdIPvOG3F5ereDHNwoHBI2e50n1BDr1g==}
'@rolldown/pluginutils@1.0.0-rc.2':
resolution: {integrity: sha512-izyXV/v+cHiRfozX62W9htOAvwMo4/bXKDrQ+vom1L1qRuexPock/7VZDAhnpHCLNejd3NJ6hiab+tO0D44Rgw==}
'@rollup/pluginutils@5.3.0':
resolution: {integrity: sha512-5EdhGZtnu3V88ces7s53hhfK5KSASnJZv8Lulpc04cWO3REESroJXg73DFsOmgbU2BhwV0E20bu2IDZb3VKW4Q==}
engines: {node: '>=14.0.0'}
@@ -1444,11 +1448,11 @@ packages:
eslint: ^8.57.0 || ^9.0.0 || ^10.0.0
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/eslint-plugin@8.58.1':
resolution: {integrity: sha512-eSkwoemjo76bdXl2MYqtxg51HNwUSkWfODUOQ3PaTLZGh9uIWWFZIjyjaJnex7wXDu+TRx+ATsnSxdN9YWfRTQ==}
'@typescript-eslint/eslint-plugin@8.58.2':
resolution: {integrity: sha512-aC2qc5thQahutKjP+cl8cgN9DWe3ZUqVko30CMSZHnFEHyhOYoZSzkGtAI2mcwZ38xeImDucI4dnqsHiOYuuCw==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
'@typescript-eslint/parser': ^8.58.1
'@typescript-eslint/parser': ^8.58.2
eslint: ^8.57.0 || ^9.0.0 || ^10.0.0
typescript: '>=4.8.4 <6.1.0'
@@ -1465,8 +1469,8 @@ packages:
peerDependencies:
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/project-service@8.58.1':
resolution: {integrity: sha512-gfQ8fk6cxhtptek+/8ZIqw8YrRW5048Gug8Ts5IYcMLCw18iUgrZAEY/D7s4hkI0FxEfGakKuPK/XUMPzPxi5g==}
'@typescript-eslint/project-service@8.58.2':
resolution: {integrity: sha512-Cq6UfpZZk15+r87BkIh5rDpi38W4b+Sjnb8wQCPPDDweS/LRCFjCyViEbzHk5Ck3f2QDfgmlxqSa7S7clDtlfg==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
typescript: '>=4.8.4 <6.1.0'
@@ -1475,8 +1479,8 @@ packages:
resolution: {integrity: sha512-7UiO/XwMHquH+ZzfVCfUNkIXlp/yQjjnlYUyYz7pfvlK3/EyyN6BK+emDmGNyQLBtLGaYrTAI6KOw8tFucWL2w==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@typescript-eslint/scope-manager@8.58.1':
resolution: {integrity: sha512-TPYUEqJK6avLcEjumWsIuTpuYODTTDAtoMdt8ZZa93uWMTX13Nb8L5leSje1NluammvU+oI3QRr5lLXPgihX3w==}
'@typescript-eslint/scope-manager@8.58.2':
resolution: {integrity: sha512-SgmyvDPexWETQek+qzZnrG6844IaO02UVyOLhI4wpo82dpZJY9+6YZCKAMFzXb7qhx37mFK1QcPQ18tud+vo6Q==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@typescript-eslint/tsconfig-utils@8.56.0':
@@ -1485,8 +1489,8 @@ packages:
peerDependencies:
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/tsconfig-utils@8.58.1':
resolution: {integrity: sha512-JAr2hOIct2Q+qk3G+8YFfqkqi7sC86uNryT+2i5HzMa2MPjw4qNFvtjnw1IiA1rP7QhNKVe21mSSLaSjwA1Olw==}
'@typescript-eslint/tsconfig-utils@8.58.2':
resolution: {integrity: sha512-3SR+RukipDvkkKp/d0jP0dyzuls3DbGmwDpVEc5wqk5f38KFThakqAAO0XMirWAE+kT00oTauTbzMFGPoAzB0A==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
typescript: '>=4.8.4 <6.1.0'
@@ -1498,8 +1502,8 @@ packages:
eslint: ^8.57.0 || ^9.0.0 || ^10.0.0
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/type-utils@8.58.1':
resolution: {integrity: sha512-HUFxvTJVroT+0rXVJC7eD5zol6ID+Sn5npVPWoFuHGg9Ncq5Q4EYstqR+UOqaNRFXi5TYkpXXkLhoCHe3G0+7w==}
'@typescript-eslint/type-utils@8.58.2':
resolution: {integrity: sha512-Z7EloNR/B389FvabdGeTo2XMs4W9TjtPiO9DAsmT0yom0bwlPyRjkJ1uCdW1DvrrrYP50AJZ9Xc3sByZA9+dcg==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
eslint: ^8.57.0 || ^9.0.0 || ^10.0.0
@@ -1509,8 +1513,8 @@ packages:
resolution: {integrity: sha512-DBsLPs3GsWhX5HylbP9HNG15U0bnwut55Lx12bHB9MpXxQ+R5GC8MwQe+N1UFXxAeQDvEsEDY6ZYwX03K7Z6HQ==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@typescript-eslint/types@8.58.1':
resolution: {integrity: sha512-io/dV5Aw5ezwzfPBBWLoT+5QfVtP8O7q4Kftjn5azJ88bYyp/ZMCsyW1lpKK46EXJcaYMZ1JtYj+s/7TdzmQMw==}
'@typescript-eslint/types@8.58.2':
resolution: {integrity: sha512-9TukXyATBQf/Jq9AMQXfvurk+G5R2MwfqQGDR2GzGz28HvY/lXNKGhkY+6IOubwcquikWk5cjlgPvD2uAA7htQ==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@typescript-eslint/typescript-estree@8.56.0':
@@ -1519,8 +1523,8 @@ packages:
peerDependencies:
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/typescript-estree@8.58.1':
resolution: {integrity: sha512-w4w7WR7GHOjqqPnvAYbazq+Y5oS68b9CzasGtnd6jIeOIeKUzYzupGTB2T4LTPSv4d+WPeccbxuneTFHYgAAWg==}
'@typescript-eslint/typescript-estree@8.58.2':
resolution: {integrity: sha512-ELGuoofuhhoCvNbQjFFiobFcGgcDCEm0ThWdmO4Z0UzLqPXS3KFvnEZ+SHewwOYHjM09tkzOWXNTv9u6Gqtyuw==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
typescript: '>=4.8.4 <6.1.0'
@@ -1532,8 +1536,8 @@ packages:
eslint: ^8.57.0 || ^9.0.0 || ^10.0.0
typescript: '>=4.8.4 <6.0.0'
'@typescript-eslint/utils@8.58.1':
resolution: {integrity: sha512-Ln8R0tmWC7pTtLOzgJzYTXSCjJ9rDNHAqTaVONF4FEi2qwce8mD9iSOxOpLFFvWp/wBFlew0mjM1L1ihYWfBdQ==}
'@typescript-eslint/utils@8.58.2':
resolution: {integrity: sha512-QZfjHNEzPY8+l0+fIXMvuQ2sJlplB4zgDZvA+NmvZsZv3EQwOcc1DuIU1VJUTWZ/RKouBMhDyNaBMx4sWvrzRA==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
peerDependencies:
eslint: ^8.57.0 || ^9.0.0 || ^10.0.0
@@ -1543,8 +1547,8 @@ packages:
resolution: {integrity: sha512-q+SL+b+05Ud6LbEE35qe4A99P+htKTKVbyiNEe45eCbJFyh/HVK9QXwlrbz+Q4L8SOW4roxSVwXYj4DMBT7Ieg==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@typescript-eslint/visitor-keys@8.58.1':
resolution: {integrity: sha512-y+vH7QE8ycjoa0bWciFg7OpFcipUuem1ujhrdLtq1gByKwfbC7bPeKsiny9e0urg93DqwGcHey+bGRKCnF1nZQ==}
'@typescript-eslint/visitor-keys@8.58.2':
resolution: {integrity: sha512-f1WO2Lx8a9t8DARmcWAUPJbu0G20bJlj8L4z72K00TMeJAoyLr/tHhI/pzYBLrR4dXWkcxO1cWYZEOX8DKHTqA==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@videojs/http-streaming@3.17.4':
@@ -1567,8 +1571,8 @@ packages:
terser: ^5.16.0
vite: ^8.0.0
'@vitejs/plugin-vue@6.0.5':
resolution: {integrity: sha512-bL3AxKuQySfk1iGcBsQnoRVexTPJq0Z/ixFVM8OhVJAP6ZXXXLtM7NFKWhLl30Kg7uTBqIaPXbh+nuQCuBDedg==}
'@vitejs/plugin-vue@6.0.6':
resolution: {integrity: sha512-u9HHgfrq3AjXlysn0eINFnWQOJQLO9WN6VprZ8FXl7A2bYisv3Hui9Ij+7QZ41F/WYWarHjwBbXtD7dKg3uxbg==}
engines: {node: ^20.19.0 || >=22.12.0}
peerDependencies:
vite: ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0
@@ -1801,8 +1805,8 @@ packages:
resolution: {integrity: sha512-cbdCP0PGOBq0ASG+sjnKIoYkWMKhhz+F/h9pRexUdX2Hd38+WOlBkRKlqkGOSm0YQpcFMQBJeK4WspUAkwsEdg==}
engines: {node: '>=20.19.0'}
autoprefixer@10.4.27:
resolution: {integrity: sha512-NP9APE+tO+LuJGn7/9+cohklunJsXWiaWEfV3si4Gi/XHDwVNgkwr1J3RQYFIvPy76GmJ9/bW8vyoU1LcxwKHA==}
autoprefixer@10.5.0:
resolution: {integrity: sha512-FMhOoZV4+qR6aTUALKX2rEqGG+oyATvwBt9IIzVR5rMa2HRWPkxf+P+PAJLD1I/H5/II+HuZcBJYEFBpq39ong==}
engines: {node: ^10 || ^12 || >=14}
hasBin: true
peerDependencies:
@@ -1830,6 +1834,11 @@ packages:
resolution: {integrity: sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==}
engines: {node: 18 || 20 || >=22}
baseline-browser-mapping@2.10.19:
resolution: {integrity: sha512-qCkNLi2sfBOn8XhZQ0FXsT1Ki/Yo5P90hrkRamVFRS7/KV9hpfA4HkoWNU152+8w0zPjnxo5psx5NL3PSGgv5g==}
engines: {node: '>=6.0.0'}
hasBin: true
baseline-browser-mapping@2.9.19:
resolution: {integrity: sha512-ipDqC8FrAl/76p2SSWKSI+H9tFwm7vYqXQrItCuiVPt26Km0jS+NzSsBWAaBusvSbQcfJG+JitdMm+wZAgTYqg==}
hasBin: true
@@ -1840,8 +1849,8 @@ packages:
boolbase@1.0.0:
resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==}
brace-expansion@2.0.3:
resolution: {integrity: sha512-MCV/fYJEbqx68aE58kv2cA/kiky1G8vux3OR6/jbS+jIMe/6fJWa0DTzJU7dqijOWYwHi1t29FlfYI9uytqlpA==}
brace-expansion@2.1.0:
resolution: {integrity: sha512-TN1kCZAgdgweJhWWpgKYrQaMNHcDULHkWwQIspdtjV4Y5aurRdZpjAqn6yX3FPqTA9ngHCc4hJxMAMgGfve85w==}
brace-expansion@5.0.5:
resolution: {integrity: sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==}
@@ -1863,11 +1872,16 @@ packages:
engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
hasBin: true
browserslist@4.28.2:
resolution: {integrity: sha512-48xSriZYYg+8qXna9kwqjIVzuQxi+KYWp2+5nCYnYKPTr0LvD89Jqk2Or5ogxz0NUMfIjhh2lIUX/LyX9B4oIg==}
engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
hasBin: true
buffer-from@1.1.2:
resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==}
caniuse-lite@1.0.30001774:
resolution: {integrity: sha512-DDdwPGz99nmIEv216hKSgLD+D4ikHQHjBC/seF98N9CPqRX4M5mSxT9eTV6oyisnJcuzxtZy4n17yKKQYmYQOA==}
caniuse-lite@1.0.30001788:
resolution: {integrity: sha512-6q8HFp+lOQtcf7wBK+uEenxymVWkGKkjFpCvw5W25cmMwEDU45p1xQFBQv8JDlMMry7eNxyBaR+qxgmTUZkIRQ==}
chai@6.2.2:
resolution: {integrity: sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==}
@@ -1956,12 +1970,15 @@ packages:
dom-walk@0.1.2:
resolution: {integrity: sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w==}
dompurify@3.3.3:
resolution: {integrity: sha512-Oj6pzI2+RqBfFG+qOaOLbFXLQ90ARpcGG6UePL82bJLtdsa6CYJD7nmiU8MW9nQNOtCHV3lZ/Bzq1X0QYbBZCA==}
dompurify@3.4.0:
resolution: {integrity: sha512-nolgK9JcaUXMSmW+j1yaSvaEaoXYHwWyGJlkoCTghc97KgGDDSnpoU/PlEnw63Ah+TGKFOyY+X5LnxaWbCSfXg==}
electron-to-chromium@1.5.286:
resolution: {integrity: sha512-9tfDXhJ4RKFNerfjdCcZfufu49vg620741MNs26a9+bhLThdB+plgMeou98CAaHu/WATj2iHOOHTp1hWtABj2A==}
electron-to-chromium@1.5.340:
resolution: {integrity: sha512-908qahOGocRMinT2nM3ajCEM99H4iPdv84eagPP3FfZy/1ZGeOy2CZYzjhms81ckOPCXPlW7LkY4XpxD8r1DrA==}
entities@7.0.1:
resolution: {integrity: sha512-TWrgLOFUQTH994YUyl1yT4uyavY5nNB5muff+RtWaqNVCAK408b5ZnnbNAUEWLTCpum9w6arT70i1XdQ4UeOPA==}
engines: {node: '>=0.12'}
@@ -2052,8 +2069,8 @@ packages:
resolution: {integrity: sha512-tD40eHxA35h0PEIZNeIjkHoDR4YjjJp34biM0mDvplBe//mB+IHCqHDGV7pxF+7MklTvighcCPPZC7ynWyjdTA==}
engines: {node: ^20.19.0 || ^22.13.0 || >=24}
eslint@10.2.0:
resolution: {integrity: sha512-+L0vBFYGIpSNIt/KWTpFonPrqYvgKw1eUI5Vn7mEogrQcWtWYtNQ7dNqC+px/J0idT3BAkiWrhfS7k+Tum8TUA==}
eslint@10.2.1:
resolution: {integrity: sha512-wiyGaKsDgqXvF40P8mDwiUp/KQjE1FdrIEJsM8PZ3XCiniTMXS3OHWWUe5FI5agoCnr8x4xPrTDZuxsBlNHl+Q==}
engines: {node: ^20.19.0 || ^22.13.0 || >=24}
hasBin: true
peerDependencies:
@@ -2514,6 +2531,9 @@ packages:
node-releases@2.0.27:
resolution: {integrity: sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==}
node-releases@2.0.37:
resolution: {integrity: sha512-1h5gKZCF+pO/o3Iqt5Jp7wc9rH3eJJ0+nh/CIoiRwjRxde/hAHyLPXYN4V3CqKAbiZPSeJFSWHmJsbkicta0Eg==}
normalize.css@8.0.1:
resolution: {integrity: sha512-qizSNPO93t1YUuUhP22btGOo3chcvDFqFaj2TRybP0DMxkHOCTYwp3n34fel4a31ORXy4m1Xq0Gyqpb5m33qIg==}
@@ -2601,8 +2621,8 @@ packages:
postcss-value-parser@4.2.0:
resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==}
postcss@8.5.9:
resolution: {integrity: sha512-7a70Nsot+EMX9fFU3064K/kdHWZqGVY+BADLyXc8Dfv+mTLLVl6JzJpPaCZ2kQL9gIJvKXSLMHhqdRRjwQeFtw==}
postcss@8.5.10:
resolution: {integrity: sha512-pMMHxBOZKFU6HgAZ4eyGnwXF/EvPGGqUr0MnZ5+99485wwW41kW91A4LOGxSHhgugZmSChL5AlElNdwlNgcnLQ==}
engines: {node: ^10 || ^12 || >=14}
prelude-ls@1.2.1:
@@ -2613,8 +2633,8 @@ packages:
resolution: {integrity: sha512-SxToR7P8Y2lWmv/kTzVLC1t/GDI2WGjMwNhLLE9qtH8Q13C+aEmuRlzDst4Up4s0Wc8sF2M+J57iB3cMLqftfg==}
engines: {node: '>=6.0.0'}
prettier@3.8.2:
resolution: {integrity: sha512-8c3mgTe0ASwWAJK+78dpviD+A8EqhndQPUBpNUIPt6+xWlIigCwfN01lWr9MAede4uqXGTEKeQWTvzb3vjia0Q==}
prettier@3.8.3:
resolution: {integrity: sha512-7igPTM53cGHMW8xWuVTydi2KO233VFiTNyF5hLJqpilHfmn8C8gPf+PS7dUT64YcXFbiMGZxS9pCSxL/Dxm/Jw==}
engines: {node: '>=14'}
hasBin: true
@@ -3939,9 +3959,9 @@ snapshots:
'@esbuild/win32-x64@0.27.3':
optional: true
'@eslint-community/eslint-utils@4.9.1(eslint@10.2.0)':
'@eslint-community/eslint-utils@4.9.1(eslint@10.2.1)':
dependencies:
eslint: 10.2.0
eslint: 10.2.1
eslint-visitor-keys: 3.4.3
'@eslint-community/regexpp@4.12.2': {}
@@ -3969,13 +3989,18 @@ snapshots:
'@eslint/core': 1.2.1
levn: 0.4.1
'@humanfs/core@0.19.1': {}
'@humanfs/node@0.16.7':
'@humanfs/core@0.19.2':
dependencies:
'@humanfs/core': 0.19.1
'@humanfs/types': 0.15.0
'@humanfs/node@0.16.8':
dependencies:
'@humanfs/core': 0.19.2
'@humanfs/types': 0.15.0
'@humanwhocodes/retry': 0.4.3
'@humanfs/types@0.15.0': {}
'@humanwhocodes/module-importer@1.0.1': {}
'@humanwhocodes/retry@0.4.3': {}
@@ -4014,9 +4039,9 @@ snapshots:
'@intlify/shared@11.3.2': {}
'@intlify/unplugin-vue-i18n@11.0.7(@vue/compiler-dom@3.5.32)(eslint@10.2.0)(rollup@4.57.1)(typescript@5.9.3)(vue-i18n@11.3.2(vue@3.5.32(typescript@5.9.3)))(vue@3.5.32(typescript@5.9.3))':
'@intlify/unplugin-vue-i18n@11.0.7(@vue/compiler-dom@3.5.32)(eslint@10.2.1)(rollup@4.57.1)(typescript@5.9.3)(vue-i18n@11.3.2(vue@3.5.32(typescript@5.9.3)))(vue@3.5.32(typescript@5.9.3))':
dependencies:
'@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0)
'@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1)
'@intlify/bundle-utils': 11.0.7(vue-i18n@11.3.2(vue@3.5.32(typescript@5.9.3)))
'@intlify/shared': 11.2.8
'@intlify/vue-i18n-extensions': 8.0.0(@intlify/shared@11.2.8)(@vue/compiler-dom@3.5.32)(vue-i18n@11.3.2(vue@3.5.32(typescript@5.9.3)))(vue@3.5.32(typescript@5.9.3))
@@ -4143,9 +4168,9 @@ snapshots:
'@rolldown/binding-win32-x64-msvc@1.0.0-rc.15':
optional: true
'@rolldown/pluginutils@1.0.0-rc.15': {}
'@rolldown/pluginutils@1.0.0-rc.13': {}
'@rolldown/pluginutils@1.0.0-rc.2': {}
'@rolldown/pluginutils@1.0.0-rc.15': {}
'@rollup/pluginutils@5.3.0(rollup@4.57.1)':
dependencies:
@@ -4271,15 +4296,15 @@ snapshots:
'@types/web-bluetooth@0.0.21': {}
'@typescript-eslint/eslint-plugin@8.56.0(@typescript-eslint/parser@8.56.0(eslint@10.2.0)(typescript@5.9.3))(eslint@10.2.0)(typescript@5.9.3)':
'@typescript-eslint/eslint-plugin@8.56.0(@typescript-eslint/parser@8.56.0(eslint@10.2.1)(typescript@5.9.3))(eslint@10.2.1)(typescript@5.9.3)':
dependencies:
'@eslint-community/regexpp': 4.12.2
'@typescript-eslint/parser': 8.56.0(eslint@10.2.0)(typescript@5.9.3)
'@typescript-eslint/parser': 8.56.0(eslint@10.2.1)(typescript@5.9.3)
'@typescript-eslint/scope-manager': 8.56.0
'@typescript-eslint/type-utils': 8.56.0(eslint@10.2.0)(typescript@5.9.3)
'@typescript-eslint/utils': 8.56.0(eslint@10.2.0)(typescript@5.9.3)
'@typescript-eslint/type-utils': 8.56.0(eslint@10.2.1)(typescript@5.9.3)
'@typescript-eslint/utils': 8.56.0(eslint@10.2.1)(typescript@5.9.3)
'@typescript-eslint/visitor-keys': 8.56.0
eslint: 10.2.0
eslint: 10.2.1
ignore: 7.0.5
natural-compare: 1.4.0
ts-api-utils: 2.5.0(typescript@5.9.3)
@@ -4287,15 +4312,15 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@typescript-eslint/eslint-plugin@8.58.1(@typescript-eslint/parser@8.56.0(eslint@10.2.0)(typescript@5.9.3))(eslint@10.2.0)(typescript@5.9.3)':
'@typescript-eslint/eslint-plugin@8.58.2(@typescript-eslint/parser@8.56.0(eslint@10.2.1)(typescript@5.9.3))(eslint@10.2.1)(typescript@5.9.3)':
dependencies:
'@eslint-community/regexpp': 4.12.2
'@typescript-eslint/parser': 8.56.0(eslint@10.2.0)(typescript@5.9.3)
'@typescript-eslint/scope-manager': 8.58.1
'@typescript-eslint/type-utils': 8.58.1(eslint@10.2.0)(typescript@5.9.3)
'@typescript-eslint/utils': 8.58.1(eslint@10.2.0)(typescript@5.9.3)
'@typescript-eslint/visitor-keys': 8.58.1
eslint: 10.2.0
'@typescript-eslint/parser': 8.56.0(eslint@10.2.1)(typescript@5.9.3)
'@typescript-eslint/scope-manager': 8.58.2
'@typescript-eslint/type-utils': 8.58.2(eslint@10.2.1)(typescript@5.9.3)
'@typescript-eslint/utils': 8.58.2(eslint@10.2.1)(typescript@5.9.3)
'@typescript-eslint/visitor-keys': 8.58.2
eslint: 10.2.1
ignore: 7.0.5
natural-compare: 1.4.0
ts-api-utils: 2.5.0(typescript@5.9.3)
@@ -4303,14 +4328,14 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@typescript-eslint/parser@8.56.0(eslint@10.2.0)(typescript@5.9.3)':
'@typescript-eslint/parser@8.56.0(eslint@10.2.1)(typescript@5.9.3)':
dependencies:
'@typescript-eslint/scope-manager': 8.56.0
'@typescript-eslint/types': 8.56.0
'@typescript-eslint/typescript-estree': 8.56.0(typescript@5.9.3)
'@typescript-eslint/visitor-keys': 8.56.0
debug: 4.4.3
eslint: 10.2.0
eslint: 10.2.1
typescript: 5.9.3
transitivePeerDependencies:
- supports-color
@@ -4324,10 +4349,10 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@typescript-eslint/project-service@8.58.1(typescript@5.9.3)':
'@typescript-eslint/project-service@8.58.2(typescript@5.9.3)':
dependencies:
'@typescript-eslint/tsconfig-utils': 8.58.1(typescript@5.9.3)
'@typescript-eslint/types': 8.58.1
'@typescript-eslint/tsconfig-utils': 8.58.2(typescript@5.9.3)
'@typescript-eslint/types': 8.58.2
debug: 4.4.3
typescript: 5.9.3
transitivePeerDependencies:
@@ -4338,38 +4363,38 @@ snapshots:
'@typescript-eslint/types': 8.56.0
'@typescript-eslint/visitor-keys': 8.56.0
'@typescript-eslint/scope-manager@8.58.1':
'@typescript-eslint/scope-manager@8.58.2':
dependencies:
'@typescript-eslint/types': 8.58.1
'@typescript-eslint/visitor-keys': 8.58.1
'@typescript-eslint/types': 8.58.2
'@typescript-eslint/visitor-keys': 8.58.2
'@typescript-eslint/tsconfig-utils@8.56.0(typescript@5.9.3)':
dependencies:
typescript: 5.9.3
'@typescript-eslint/tsconfig-utils@8.58.1(typescript@5.9.3)':
'@typescript-eslint/tsconfig-utils@8.58.2(typescript@5.9.3)':
dependencies:
typescript: 5.9.3
'@typescript-eslint/type-utils@8.56.0(eslint@10.2.0)(typescript@5.9.3)':
'@typescript-eslint/type-utils@8.56.0(eslint@10.2.1)(typescript@5.9.3)':
dependencies:
'@typescript-eslint/types': 8.56.0
'@typescript-eslint/typescript-estree': 8.56.0(typescript@5.9.3)
'@typescript-eslint/utils': 8.56.0(eslint@10.2.0)(typescript@5.9.3)
'@typescript-eslint/utils': 8.56.0(eslint@10.2.1)(typescript@5.9.3)
debug: 4.4.3
eslint: 10.2.0
eslint: 10.2.1
ts-api-utils: 2.5.0(typescript@5.9.3)
typescript: 5.9.3
transitivePeerDependencies:
- supports-color
'@typescript-eslint/type-utils@8.58.1(eslint@10.2.0)(typescript@5.9.3)':
'@typescript-eslint/type-utils@8.58.2(eslint@10.2.1)(typescript@5.9.3)':
dependencies:
'@typescript-eslint/types': 8.58.1
'@typescript-eslint/typescript-estree': 8.58.1(typescript@5.9.3)
'@typescript-eslint/utils': 8.58.1(eslint@10.2.0)(typescript@5.9.3)
'@typescript-eslint/types': 8.58.2
'@typescript-eslint/typescript-estree': 8.58.2(typescript@5.9.3)
'@typescript-eslint/utils': 8.58.2(eslint@10.2.1)(typescript@5.9.3)
debug: 4.4.3
eslint: 10.2.0
eslint: 10.2.1
ts-api-utils: 2.5.0(typescript@5.9.3)
typescript: 5.9.3
transitivePeerDependencies:
@@ -4377,7 +4402,7 @@ snapshots:
'@typescript-eslint/types@8.56.0': {}
'@typescript-eslint/types@8.58.1': {}
'@typescript-eslint/types@8.58.2': {}
'@typescript-eslint/typescript-estree@8.56.0(typescript@5.9.3)':
dependencies:
@@ -4394,12 +4419,12 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@typescript-eslint/typescript-estree@8.58.1(typescript@5.9.3)':
'@typescript-eslint/typescript-estree@8.58.2(typescript@5.9.3)':
dependencies:
'@typescript-eslint/project-service': 8.58.1(typescript@5.9.3)
'@typescript-eslint/tsconfig-utils': 8.58.1(typescript@5.9.3)
'@typescript-eslint/types': 8.58.1
'@typescript-eslint/visitor-keys': 8.58.1
'@typescript-eslint/project-service': 8.58.2(typescript@5.9.3)
'@typescript-eslint/tsconfig-utils': 8.58.2(typescript@5.9.3)
'@typescript-eslint/types': 8.58.2
'@typescript-eslint/visitor-keys': 8.58.2
debug: 4.4.3
minimatch: 10.2.5
semver: 7.7.4
@@ -4409,24 +4434,24 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@typescript-eslint/utils@8.56.0(eslint@10.2.0)(typescript@5.9.3)':
'@typescript-eslint/utils@8.56.0(eslint@10.2.1)(typescript@5.9.3)':
dependencies:
'@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0)
'@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1)
'@typescript-eslint/scope-manager': 8.56.0
'@typescript-eslint/types': 8.56.0
'@typescript-eslint/typescript-estree': 8.56.0(typescript@5.9.3)
eslint: 10.2.0
eslint: 10.2.1
typescript: 5.9.3
transitivePeerDependencies:
- supports-color
'@typescript-eslint/utils@8.58.1(eslint@10.2.0)(typescript@5.9.3)':
'@typescript-eslint/utils@8.58.2(eslint@10.2.1)(typescript@5.9.3)':
dependencies:
'@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0)
'@typescript-eslint/scope-manager': 8.58.1
'@typescript-eslint/types': 8.58.1
'@typescript-eslint/typescript-estree': 8.58.1(typescript@5.9.3)
eslint: 10.2.0
'@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1)
'@typescript-eslint/scope-manager': 8.58.2
'@typescript-eslint/types': 8.58.2
'@typescript-eslint/typescript-estree': 8.58.2(typescript@5.9.3)
eslint: 10.2.1
typescript: 5.9.3
transitivePeerDependencies:
- supports-color
@@ -4436,9 +4461,9 @@ snapshots:
'@typescript-eslint/types': 8.56.0
eslint-visitor-keys: 5.0.1
'@typescript-eslint/visitor-keys@8.58.1':
'@typescript-eslint/visitor-keys@8.58.2':
dependencies:
'@typescript-eslint/types': 8.58.1
'@typescript-eslint/types': 8.58.2
eslint-visitor-keys: 5.0.1
'@videojs/http-streaming@3.17.4(video.js@8.23.7)':
@@ -4482,9 +4507,9 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@vitejs/plugin-vue@6.0.5(vite@8.0.8(@types/node@24.12.2)(esbuild@0.27.3)(terser@5.46.1)(yaml@2.8.3))(vue@3.5.32(typescript@5.9.3))':
'@vitejs/plugin-vue@6.0.6(vite@8.0.8(@types/node@24.12.2)(esbuild@0.27.3)(terser@5.46.1)(yaml@2.8.3))(vue@3.5.32(typescript@5.9.3))':
dependencies:
'@rolldown/pluginutils': 1.0.0-rc.2
'@rolldown/pluginutils': 1.0.0-rc.13
vite: 8.0.8(@types/node@24.12.2)(esbuild@0.27.3)(terser@5.46.1)(yaml@2.8.3)
vue: 3.5.32(typescript@5.9.3)
@@ -4573,7 +4598,7 @@ snapshots:
'@vue/shared': 3.5.32
estree-walker: 2.0.2
magic-string: 0.30.21
postcss: 8.5.9
postcss: 8.5.10
source-map-js: 1.2.1
'@vue/compiler-ssr@3.5.32':
@@ -4614,23 +4639,23 @@ snapshots:
'@vue/devtools-shared@8.1.1': {}
'@vue/eslint-config-prettier@10.2.0(eslint@10.2.0)(prettier@3.8.2)':
'@vue/eslint-config-prettier@10.2.0(eslint@10.2.1)(prettier@3.8.3)':
dependencies:
eslint: 10.2.0
eslint-config-prettier: 10.1.8(eslint@10.2.0)
eslint-plugin-prettier: 5.5.5(eslint-config-prettier@10.1.8(eslint@10.2.0))(eslint@10.2.0)(prettier@3.8.2)
prettier: 3.8.2
eslint: 10.2.1
eslint-config-prettier: 10.1.8(eslint@10.2.1)
eslint-plugin-prettier: 5.5.5(eslint-config-prettier@10.1.8(eslint@10.2.1))(eslint@10.2.1)(prettier@3.8.3)
prettier: 3.8.3
transitivePeerDependencies:
- '@types/eslint'
'@vue/eslint-config-typescript@14.7.0(eslint-plugin-vue@10.8.0(@typescript-eslint/parser@8.56.0(eslint@10.2.0)(typescript@5.9.3))(eslint@10.2.0)(vue-eslint-parser@10.4.0(eslint@10.2.0)))(eslint@10.2.0)(typescript@5.9.3)':
'@vue/eslint-config-typescript@14.7.0(eslint-plugin-vue@10.8.0(@typescript-eslint/parser@8.56.0(eslint@10.2.1)(typescript@5.9.3))(eslint@10.2.1)(vue-eslint-parser@10.4.0(eslint@10.2.1)))(eslint@10.2.1)(typescript@5.9.3)':
dependencies:
'@typescript-eslint/utils': 8.56.0(eslint@10.2.0)(typescript@5.9.3)
eslint: 10.2.0
eslint-plugin-vue: 10.8.0(@typescript-eslint/parser@8.56.0(eslint@10.2.0)(typescript@5.9.3))(eslint@10.2.0)(vue-eslint-parser@10.4.0(eslint@10.2.0))
'@typescript-eslint/utils': 8.56.0(eslint@10.2.1)(typescript@5.9.3)
eslint: 10.2.1
eslint-plugin-vue: 10.8.0(@typescript-eslint/parser@8.56.0(eslint@10.2.1)(typescript@5.9.3))(eslint@10.2.1)(vue-eslint-parser@10.4.0(eslint@10.2.1))
fast-glob: 3.3.3
typescript-eslint: 8.56.0(eslint@10.2.0)(typescript@5.9.3)
vue-eslint-parser: 10.4.0(eslint@10.2.0)
typescript-eslint: 8.56.0(eslint@10.2.1)(typescript@5.9.3)
vue-eslint-parser: 10.4.0(eslint@10.2.1)
optionalDependencies:
typescript: 5.9.3
transitivePeerDependencies:
@@ -4737,13 +4762,13 @@ snapshots:
'@babel/parser': 7.29.2
ast-kit: 2.2.0
autoprefixer@10.4.27(postcss@8.5.9):
autoprefixer@10.5.0(postcss@8.5.10):
dependencies:
browserslist: 4.28.1
caniuse-lite: 1.0.30001774
browserslist: 4.28.2
caniuse-lite: 1.0.30001788
fraction.js: 5.3.4
picocolors: 1.1.1
postcss: 8.5.9
postcss: 8.5.10
postcss-value-parser: 4.2.0
babel-plugin-polyfill-corejs2@0.4.17(@babel/core@7.29.0):
@@ -4774,13 +4799,15 @@ snapshots:
balanced-match@4.0.4: {}
baseline-browser-mapping@2.10.19: {}
baseline-browser-mapping@2.9.19: {}
birpc@2.9.0: {}
boolbase@1.0.0: {}
brace-expansion@2.0.3:
brace-expansion@2.1.0:
dependencies:
balanced-match: 1.0.2
@@ -4800,14 +4827,22 @@ snapshots:
browserslist@4.28.1:
dependencies:
baseline-browser-mapping: 2.9.19
caniuse-lite: 1.0.30001774
caniuse-lite: 1.0.30001788
electron-to-chromium: 1.5.286
node-releases: 2.0.27
update-browserslist-db: 1.2.3(browserslist@4.28.1)
browserslist@4.28.2:
dependencies:
baseline-browser-mapping: 2.10.19
caniuse-lite: 1.0.30001788
electron-to-chromium: 1.5.340
node-releases: 2.0.37
update-browserslist-db: 1.2.3(browserslist@4.28.2)
buffer-from@1.1.2: {}
caniuse-lite@1.0.30001774: {}
caniuse-lite@1.0.30001788: {}
chai@6.2.2: {}
@@ -4875,12 +4910,14 @@ snapshots:
dom-walk@0.1.2: {}
dompurify@3.3.3:
dompurify@3.4.0:
optionalDependencies:
'@types/trusted-types': 2.0.7
electron-to-chromium@1.5.286: {}
electron-to-chromium@1.5.340: {}
entities@7.0.1: {}
epubjs@0.3.93:
@@ -4986,31 +5023,31 @@ snapshots:
optionalDependencies:
source-map: 0.6.1
eslint-config-prettier@10.1.8(eslint@10.2.0):
eslint-config-prettier@10.1.8(eslint@10.2.1):
dependencies:
eslint: 10.2.0
eslint: 10.2.1
eslint-plugin-prettier@5.5.5(eslint-config-prettier@10.1.8(eslint@10.2.0))(eslint@10.2.0)(prettier@3.8.2):
eslint-plugin-prettier@5.5.5(eslint-config-prettier@10.1.8(eslint@10.2.1))(eslint@10.2.1)(prettier@3.8.3):
dependencies:
eslint: 10.2.0
prettier: 3.8.2
eslint: 10.2.1
prettier: 3.8.3
prettier-linter-helpers: 1.0.1
synckit: 0.11.12
optionalDependencies:
eslint-config-prettier: 10.1.8(eslint@10.2.0)
eslint-config-prettier: 10.1.8(eslint@10.2.1)
eslint-plugin-vue@10.8.0(@typescript-eslint/parser@8.56.0(eslint@10.2.0)(typescript@5.9.3))(eslint@10.2.0)(vue-eslint-parser@10.4.0(eslint@10.2.0)):
eslint-plugin-vue@10.8.0(@typescript-eslint/parser@8.56.0(eslint@10.2.1)(typescript@5.9.3))(eslint@10.2.1)(vue-eslint-parser@10.4.0(eslint@10.2.1)):
dependencies:
'@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0)
eslint: 10.2.0
'@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1)
eslint: 10.2.1
natural-compare: 1.4.0
nth-check: 2.1.1
postcss-selector-parser: 7.1.1
semver: 7.7.4
vue-eslint-parser: 10.4.0(eslint@10.2.0)
vue-eslint-parser: 10.4.0(eslint@10.2.1)
xml-name-validator: 4.0.0
optionalDependencies:
'@typescript-eslint/parser': 8.56.0(eslint@10.2.0)(typescript@5.9.3)
'@typescript-eslint/parser': 8.56.0(eslint@10.2.1)(typescript@5.9.3)
eslint-scope@9.1.2:
dependencies:
@@ -5023,15 +5060,15 @@ snapshots:
eslint-visitor-keys@5.0.1: {}
eslint@10.2.0:
eslint@10.2.1:
dependencies:
'@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0)
'@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1)
'@eslint-community/regexpp': 4.12.2
'@eslint/config-array': 0.23.5
'@eslint/config-helpers': 0.5.5
'@eslint/core': 1.2.1
'@eslint/plugin-kit': 0.7.1
'@humanfs/node': 0.16.7
'@humanfs/node': 0.16.8
'@humanwhocodes/module-importer': 1.0.1
'@humanwhocodes/retry': 0.4.3
'@types/estree': 1.0.8
@@ -5417,7 +5454,7 @@ snapshots:
minimatch@9.0.9:
dependencies:
brace-expansion: 2.0.3
brace-expansion: 2.1.0
mitt@3.0.1: {}
@@ -5452,6 +5489,8 @@ snapshots:
node-releases@2.0.27: {}
node-releases@2.0.37: {}
normalize.css@8.0.1: {}
nth-check@2.1.1:
@@ -5531,7 +5570,7 @@ snapshots:
postcss-value-parser@4.2.0: {}
postcss@8.5.9:
postcss@8.5.10:
dependencies:
nanoid: 3.3.11
picocolors: 1.1.1
@@ -5543,7 +5582,7 @@ snapshots:
dependencies:
fast-diff: 1.3.0
prettier@3.8.2: {}
prettier@3.8.3: {}
pretty-bytes@7.1.0: {}
@@ -5781,13 +5820,13 @@ snapshots:
type@2.7.3: {}
typescript-eslint@8.56.0(eslint@10.2.0)(typescript@5.9.3):
typescript-eslint@8.56.0(eslint@10.2.1)(typescript@5.9.3):
dependencies:
'@typescript-eslint/eslint-plugin': 8.56.0(@typescript-eslint/parser@8.56.0(eslint@10.2.0)(typescript@5.9.3))(eslint@10.2.0)(typescript@5.9.3)
'@typescript-eslint/parser': 8.56.0(eslint@10.2.0)(typescript@5.9.3)
'@typescript-eslint/eslint-plugin': 8.56.0(@typescript-eslint/parser@8.56.0(eslint@10.2.1)(typescript@5.9.3))(eslint@10.2.1)(typescript@5.9.3)
'@typescript-eslint/parser': 8.56.0(eslint@10.2.1)(typescript@5.9.3)
'@typescript-eslint/typescript-estree': 8.56.0(typescript@5.9.3)
'@typescript-eslint/utils': 8.56.0(eslint@10.2.0)(typescript@5.9.3)
eslint: 10.2.0
'@typescript-eslint/utils': 8.56.0(eslint@10.2.1)(typescript@5.9.3)
eslint: 10.2.1
typescript: 5.9.3
transitivePeerDependencies:
- supports-color
@@ -5833,6 +5872,12 @@ snapshots:
escalade: 3.2.0
picocolors: 1.1.1
update-browserslist-db@1.2.3(browserslist@4.28.2):
dependencies:
browserslist: 4.28.2
escalade: 3.2.0
picocolors: 1.1.1
uri-js@4.4.1:
dependencies:
punycode: 2.3.1
@@ -5892,7 +5937,7 @@ snapshots:
dependencies:
lightningcss: 1.32.0
picomatch: 4.0.4
postcss: 8.5.9
postcss: 8.5.10
rolldown: 1.0.0-rc.15
tinyglobby: 0.2.16
optionalDependencies:
@@ -5931,10 +5976,10 @@ snapshots:
vscode-uri@3.1.0: {}
vue-eslint-parser@10.4.0(eslint@10.2.0):
vue-eslint-parser@10.4.0(eslint@10.2.1):
dependencies:
debug: 4.4.3
eslint: 10.2.0
eslint: 10.2.1
eslint-scope: 9.1.2
eslint-visitor-keys: 5.0.1
espree: 11.2.0
+1 -1
View File
@@ -119,7 +119,7 @@
"signup": "S'inscrire",
"submit": "Se connecter",
"username": "Utilisateur",
"usernameTaken": "Le nom d'utilisateur·ice est déjà pris",
"usernameTaken": "Le nom d'utilisateur est déjà pris",
"wrongCredentials": "Identifiants incorrects !",
"passwordTooShort": "Le mot de passe doit contenir au moins {min} caractères",
"logout_reasons": {
@@ -70,7 +70,7 @@
<a
class="link"
target="_blank"
href="https://filebrowser.org/configuration.html#custom-branding"
href="https://filebrowser.org/customization.html#custom-branding"
>{{ t("settings.documentation") }}</a
>
</i18n-t>
@@ -209,7 +209,7 @@
<a
class="link"
target="_blank"
href="https://filebrowser.org/configuration.html#command-runner"
href="https://filebrowser.org/command-execution.html#hook-runner"
>{{ t("settings.documentation") }}</a
>
</i18n-t>
+6
View File
@@ -30,6 +30,12 @@ jobs:
- name: Install Python dependencies
run: pip install -r requirements.txt
- name: Install gofumpt
run: go install mvdan.cc/gofumpt@latest
- name: Check format
run: python hyperbole.py format-check
- name: Test core
working-directory: core
run: go test -v -count=1 -skip 'Stress' ./...
+27 -1
View File
@@ -6,12 +6,18 @@ import (
"errors"
"fmt"
"io"
"math"
"sort"
"github.com/apernet/quic-go/quicvarint"
"golang.org/x/crypto/hkdf"
)
const (
maxCryptoFrameDataLen = 256 * 1024 // 256 KiB
maxCryptoPayloadLen = 256 * 1024 // 256 KiB
)
func ReadCryptoPayload(packet []byte) ([]byte, error) {
hdr, offset, err := ParseInitialHeader(packet)
if err != nil {
@@ -82,11 +88,20 @@ func extractCryptoFrames(r *bytes.Reader) ([]cryptoFrame, error) {
if err != nil {
return nil, err
}
if offset > uint64(math.MaxInt64) {
return nil, errors.New("invalid crypto frame offset")
}
frame.Offset = int64(offset)
dataLen, err := quicvarint.Read(r)
if err != nil {
return nil, err
}
if dataLen > maxCryptoFrameDataLen {
return nil, errors.New("crypto frame data too large")
}
if dataLen > uint64(r.Len()) {
return nil, io.ErrUnexpectedEOF
}
frame.Data = make([]byte, dataLen)
if _, err := io.ReadFull(r, frame.Data); err != nil {
return nil, err
@@ -114,7 +129,18 @@ func assembleCryptoFrames(frames []cryptoFrame) []byte {
}
}
// concatenate the frames
data := make([]byte, frames[len(frames)-1].Offset+int64(len(frames[len(frames)-1].Data)))
last := frames[len(frames)-1]
if last.Offset < 0 {
return nil
}
if last.Offset > maxCryptoPayloadLen {
return nil
}
end := last.Offset + int64(len(last.Data))
if end < 0 || end > maxCryptoPayloadLen {
return nil
}
data := make([]byte, end)
for _, frame := range frames {
copy(data[frame.Offset:], frame.Data)
}
+24
View File
@@ -356,6 +356,25 @@ def cmd_format():
print("Failed to format code")
def cmd_format_check():
if not check_command(["gofumpt", "-version"]):
print("gofumpt is not installed. Please install gofumpt and try again.")
sys.exit(1)
try:
output = (
subprocess.check_output(["gofumpt", "-l", "-extra", "."]).decode().strip()
)
except Exception:
print("Failed to check code format")
sys.exit(1)
if output:
print("The following files are not properly formatted:")
print(output)
sys.exit(1)
def cmd_mockgen():
if not check_command(["mockery", "--version"]):
print("mockery is not installed. Please install mockery and try again.")
@@ -500,6 +519,9 @@ def main():
# Format
p_cmd.add_parser("format", help="Format the code")
# Format check
p_cmd.add_parser("format-check", help="Check code format")
# Mockgen
p_cmd.add_parser("mockgen", help="Generate mock interfaces")
@@ -533,6 +555,8 @@ def main():
cmd_build(args.pprof, args.release, args.race)
elif args.command == "format":
cmd_format()
elif args.command == "format-check":
cmd_format_check()
elif args.command == "mockgen":
cmd_mockgen()
elif args.command == "protogen":
+2 -7
View File
@@ -7,7 +7,7 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=cmake
PKG_VERSION:=3.30.5
PKG_VERSION:=3.31.5
PKG_VERSION_MAJOR:=$(word 1,$(subst ., ,$(PKG_VERSION))).$(word 2,$(subst ., ,$(PKG_VERSION)))
PKG_RELEASE:=1
PKG_CPE_ID:=cpe:/a:kitware:cmake
@@ -15,7 +15,7 @@ PKG_CPE_ID:=cpe:/a:kitware:cmake
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://github.com/Kitware/CMake/releases/download/v$(PKG_VERSION)/ \
https://cmake.org/files/v$(PKG_VERSION_MAJOR)/
PKG_HASH:=9f55e1a40508f2f29b7e065fa08c29f82c402fa0402da839fffe64a25755a86d
PKG_HASH:=66fb53a145648be56b46fa9e8ccade3a4d0dfc92e401e52ce76bdad1fea43d27
HOST_BUILD_PARALLEL:=1
HOST_CONFIGURE_PARALLEL:=1
@@ -30,13 +30,8 @@ HOST_CONFIGURE_VARS += \
MAKE="$(STAGING_DIR_HOST)/bin/ninja"
HOST_CONFIGURE_ARGS := \
--no-debugger \
$(if $(MAKE_JOBSERVER),--parallel="$(MAKE_JOBSERVER)") \
--prefix="$(STAGING_DIR_HOST)" \
--system-expat \
--system-liblzma \
--system-zlib \
--system-zstd \
--generator=Ninja
define Host/Compile/Default
@@ -11,15 +11,15 @@
# like vs9 or vs10
--- a/Modules/Dart.cmake
+++ b/Modules/Dart.cmake
@@ -47,7 +47,7 @@ if(cmp0145 STREQUAL "")
message(AUTHOR_WARNING "${_cmp0145_warning}")
endif()
@@ -33,7 +33,7 @@ whether testing support should be enable
#
#
-option(BUILD_TESTING "Build the testing tree." ON)
+option(BUILD_TESTING "Build the testing tree." OFF)
if(BUILD_TESTING)
# We only get here if a project already ran include(Dart),
find_package(Dart QUIET)
--- a/Tests/Contracts/VTK/Dashboard.cmake.in
+++ b/Tests/Contracts/VTK/Dashboard.cmake.in
@@ -25,7 +25,7 @@ ctest_empty_binary_directory(${CTEST_BIN
@@ -1,17 +0,0 @@
--- a/Modules/FindLibLZMA.cmake
+++ b/Modules/FindLibLZMA.cmake
@@ -61,7 +61,13 @@ The following variables are provided for
cmake_policy(PUSH)
cmake_policy(SET CMP0159 NEW) # file(STRINGS) with REGEX updates CMAKE_MATCH_<n>
-find_path(LIBLZMA_INCLUDE_DIR lzma.h )
+if(UNIX)
+ find_package(PkgConfig QUIET)
+ pkg_search_module(PC_liblzma liblzma)
+endif()
+
+find_path(LIBLZMA_INCLUDE_DIR lzma.h HINTS ${PC_liblzma_INCLUDEDIR} ${PC_liblzma_INCLUDE_DIRS})
+find_library(LIBLZMA_LIBRARY NAMES lzma HINTS ${PC_liblzma_LIBDIR} ${PC_liblzma_LIBRARY_DIRS})
if(NOT LIBLZMA_LIBRARY)
find_library(LIBLZMA_LIBRARY_RELEASE NAMES lzma liblzma NAMES_PER_DIR PATH_SUFFIXES lib)
find_library(LIBLZMA_LIBRARY_DEBUG NAMES lzmad liblzmad NAMES_PER_DIR PATH_SUFFIXES lib)
@@ -1,37 +0,0 @@
From: Jo-Philipp Wich <jo@mein.io>
Date: Wed, 11 Jan 2017 03:36:04 +0100
Subject: [PATCH] cmcurl: link librt
When cmake is linked against LibreSSL, there might be an indirect
dependency on librt on certain systems if LibreSSL's libcrypto uses
clock_gettime() from librt:
[ 28%] Linking C executable LIBCURL
.../lib/libcrypto.a(getentropy_linux.o): In function `getentropy_fallback':
getentropy_linux.c:(.text+0x16d): undefined reference to `clock_gettime'
getentropy_linux.c:(.text+0x412): undefined reference to `clock_gettime'
collect2: error: ld returned 1 exit status
make[5]: *** [Utilities/cmcurl/LIBCURL] Error 1
Modify the cmcurl CMakeLists.txt to check for clock_gettime() in librt
and unconditionally link the rt library when the symbol is found.
Signed-off-by: Jo-Philipp Wich <jo@mein.io>
---
--- a/Utilities/cmcurl/CMakeLists.txt
+++ b/Utilities/cmcurl/CMakeLists.txt
@@ -648,6 +648,14 @@ if(CURL_USE_OPENSSL)
endif()
set(SSL_ENABLED ON)
set(USE_OPENSSL ON)
+ check_library_exists("rt" clock_gettime "" HAVE_LIBRT)
+ if(HAVE_LIBRT)
+ list(APPEND OPENSSL_LIBRARIES rt)
+ endif()
+ check_library_exists("pthread" pthread_once "" HAVE_PTHREAD)
+ if(HAVE_PTHREAD)
+ list(APPEND OPENSSL_LIBRARIES pthread)
+ endif()
list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES})
include_directories(${OPENSSL_INCLUDE_DIR})
@@ -1,6 +1,6 @@
--- a/bootstrap
+++ b/bootstrap
@@ -1509,7 +1509,10 @@ int main(){ printf("1%c", (char)0x0a); r
@@ -1421,7 +1421,10 @@ int main(){ printf("1%c", (char)0x0a); r
' > "test.c"
cmake_original_make_flags="${cmake_make_flags}"
if test "x${cmake_parallel_make}" != "x"; then
+1 -1
View File
@@ -1,6 +1,6 @@
--- a/Modules/FindZLIB.cmake
+++ b/Modules/FindZLIB.cmake
@@ -120,10 +120,13 @@ else()
@@ -160,10 +160,13 @@ else()
set(ZLIB_NAMES_DEBUG zd zlibd zdlld zlibd1 zlib1d zlibstaticd zlibwapid zlibvcd zlibstatd)
endif()
@@ -1,6 +1,6 @@
--- a/Utilities/cmlibarchive/CMakeLists.txt
+++ b/Utilities/cmlibarchive/CMakeLists.txt
@@ -656,7 +656,7 @@ IF(ENABLE_ZSTD)
@@ -669,7 +669,7 @@ IF(ENABLE_ZSTD)
SET(ZSTD_FIND_QUIETLY TRUE)
ENDIF (ZSTD_INCLUDE_DIR)
@@ -1,6 +1,6 @@
--- a/Source/CMakeLists.txt
+++ b/Source/CMakeLists.txt
@@ -858,7 +858,7 @@ if(CMake_USE_XCOFF_PARSER)
@@ -903,7 +903,7 @@ if(CMake_USE_XCOFF_PARSER)
endif()
# Xcode only works on Apple
@@ -11,8 +11,8 @@
PRIVATE
--- a/Source/cmake.cxx
+++ b/Source/cmake.cxx
@@ -134,7 +134,7 @@
# include "cmGlobalGhsMultiGenerator.h"
@@ -143,7 +143,7 @@
# endif
#endif
-#if defined(__APPLE__)
+1 -1
View File
@@ -32,7 +32,7 @@ PROJECT_NAME=$(shell basename "${ROOT}")
# - pkg/version/current.go
#
# Use `tools/bump_version.sh` script to change all those files at one shot.
VERSION="3.30.1"
VERSION="3.31.0"
# With .ONESHELL, each recipe is executed in a single shell instance.
# This allows `cd` to affect subsequent commands in the same recipe.
@@ -1,5 +1,5 @@
Package: mieru
Version: 3.30.1
Version: 3.31.0
Section: net
Priority: optional
Architecture: amd64
@@ -1,5 +1,5 @@
Name: mieru
Version: 3.30.1
Version: 3.31.0
Release: 1%{?dist}
Summary: Mieru proxy client
License: GPLv3+
@@ -1,5 +1,5 @@
Package: mieru
Version: 3.30.1
Version: 3.31.0
Section: net
Priority: optional
Architecture: arm64
@@ -1,5 +1,5 @@
Name: mieru
Version: 3.30.1
Version: 3.31.0
Release: 1%{?dist}
Summary: Mieru proxy client
License: GPLv3+
@@ -1,5 +1,5 @@
Package: mita
Version: 3.30.1
Version: 3.31.0
Section: net
Priority: optional
Architecture: amd64
+1 -1
View File
@@ -1,5 +1,5 @@
Name: mita
Version: 3.30.1
Version: 3.31.0
Release: 1%{?dist}
Summary: Mieru proxy server
License: GPLv3+
@@ -1,5 +1,5 @@
Package: mita
Version: 3.30.1
Version: 3.31.0
Section: net
Priority: optional
Architecture: arm64
+1 -1
View File
@@ -1,5 +1,5 @@
Name: mita
Version: 3.30.1
Version: 3.31.0
Release: 1%{?dist}
Summary: Mieru proxy server
License: GPLv3+
+2
View File
@@ -20,6 +20,8 @@ Since the key depends on the system time, the time difference between the client
The mieru protocol allows the use of any [AEAD](https://en.wikipedia.org/wiki/Authenticated_encryption) algorithm for encryption. The nonce length of the AEAD algorithm must be 24 bytes. The current version of mieru only implements the XChaCha20-Poly1305 algorithm.
To accelerate user lookup, the last 4 bytes of the nonce is replace by the first 4 bytes of a SHA-256 output. The input of SHA-256 is user name concatenate by the first 16 bytes of the nonce.
## Segment Format
When mieru receives a network access request from a user, it divides the original data stream into small fragments and sends them to the Internet after encryption and encapsulation. The fields and their lengths in each segment are as shown in the following table:
+2
View File
@@ -20,6 +20,8 @@ TCP 和 UDP 协议共用同一套密钥生成方法。
mieru 协议允许使用任何 [AEAD](https://en.wikipedia.org/wiki/Authenticated_encryption) 算法进行加密。算法的 nonce 长度必须为 24 字节。当前 mieru 版本只实现了 XChaCha20-Poly1305 算法。
为了加快用户查找,nonce 的最后 4 个字节被替换为 SHA-256 输出的前 4 个字节,其中 SHA-256 的输入是用户名再接上 nonce 的前 16 个字节。
## 数据段的格式
mieru 收到用户的网络访问请求后,会将原始数据流量切分成小段(fragment),经过加密封装发送到互联网上。每个数据段(segment)中的数据项(field)及其长度如下表所示。
+22 -8
View File
@@ -18,32 +18,32 @@ Or you can manually install and configure proxy server using the steps below.
```sh
# Debian / Ubuntu - X86_64
curl -LSO https://github.com/enfein/mieru/releases/download/v3.30.1/mita_3.30.1_amd64.deb
curl -LSO https://github.com/enfein/mieru/releases/download/v3.31.0/mita_3.31.0_amd64.deb
# Debian / Ubuntu - ARM 64
curl -LSO https://github.com/enfein/mieru/releases/download/v3.30.1/mita_3.30.1_arm64.deb
curl -LSO https://github.com/enfein/mieru/releases/download/v3.31.0/mita_3.31.0_arm64.deb
# RedHat / CentOS / Rocky Linux - X86_64
curl -LSO https://github.com/enfein/mieru/releases/download/v3.30.1/mita-3.30.1-1.x86_64.rpm
curl -LSO https://github.com/enfein/mieru/releases/download/v3.31.0/mita-3.31.0-1.x86_64.rpm
# RedHat / CentOS / Rocky Linux - ARM 64
curl -LSO https://github.com/enfein/mieru/releases/download/v3.30.1/mita-3.30.1-1.aarch64.rpm
curl -LSO https://github.com/enfein/mieru/releases/download/v3.31.0/mita-3.31.0-1.aarch64.rpm
```
## Install mita package
```sh
# Debian / Ubuntu - X86_64
sudo dpkg -i mita_3.30.1_amd64.deb
sudo dpkg -i mita_3.31.0_amd64.deb
# Debian / Ubuntu - ARM 64
sudo dpkg -i mita_3.30.1_arm64.deb
sudo dpkg -i mita_3.31.0_arm64.deb
# RedHat / CentOS / Rocky Linux - X86_64
sudo rpm -Uvh --force mita-3.30.1-1.x86_64.rpm
sudo rpm -Uvh --force mita-3.31.0-1.x86_64.rpm
# RedHat / CentOS / Rocky Linux - ARM 64
sudo rpm -Uvh --force mita-3.30.1-1.aarch64.rpm
sudo rpm -Uvh --force mita-3.31.0-1.aarch64.rpm
```
Those instructions can also be used to upgrade the version of mita software package.
@@ -316,6 +316,20 @@ We can use the `users` -> `quotas` property to limit the amount of traffic a use
}
```
### User Hint
Starting from v3.31.0 release, mieru client send user hint to accelerate the decryption of network packets in mita server. This is especially helpful when the number of proxy users is large.
Because mita server will have higher CPU consumption when the user hint is not available, you can apply the following configuration to block old mieru clients.
```
{
"advancedSettings": {
"userHintIsMandatory": true
}
}
```
## [Optional] Install NTP network time synchronization service
The client and proxy server software calculate the key based on the user name, password and system time. The server can decrypt and respond to the client's request only if the client and server have the same key. This requires that the system time of the client and the server must be in sync.
+22 -8
View File
@@ -18,32 +18,32 @@ sudo python3 setup.py --lang=zh
```sh
# Debian / Ubuntu - X86_64
curl -LSO https://github.com/enfein/mieru/releases/download/v3.30.1/mita_3.30.1_amd64.deb
curl -LSO https://github.com/enfein/mieru/releases/download/v3.31.0/mita_3.31.0_amd64.deb
# Debian / Ubuntu - ARM 64
curl -LSO https://github.com/enfein/mieru/releases/download/v3.30.1/mita_3.30.1_arm64.deb
curl -LSO https://github.com/enfein/mieru/releases/download/v3.31.0/mita_3.31.0_arm64.deb
# RedHat / CentOS / Rocky Linux - X86_64
curl -LSO https://github.com/enfein/mieru/releases/download/v3.30.1/mita-3.30.1-1.x86_64.rpm
curl -LSO https://github.com/enfein/mieru/releases/download/v3.31.0/mita-3.31.0-1.x86_64.rpm
# RedHat / CentOS / Rocky Linux - ARM 64
curl -LSO https://github.com/enfein/mieru/releases/download/v3.30.1/mita-3.30.1-1.aarch64.rpm
curl -LSO https://github.com/enfein/mieru/releases/download/v3.31.0/mita-3.31.0-1.aarch64.rpm
```
## 安装 mita 软件包
```sh
# Debian / Ubuntu - X86_64
sudo dpkg -i mita_3.30.1_amd64.deb
sudo dpkg -i mita_3.31.0_amd64.deb
# Debian / Ubuntu - ARM 64
sudo dpkg -i mita_3.30.1_arm64.deb
sudo dpkg -i mita_3.31.0_arm64.deb
# RedHat / CentOS / Rocky Linux - X86_64
sudo rpm -Uvh --force mita-3.30.1-1.x86_64.rpm
sudo rpm -Uvh --force mita-3.31.0-1.x86_64.rpm
# RedHat / CentOS / Rocky Linux - ARM 64
sudo rpm -Uvh --force mita-3.30.1-1.aarch64.rpm
sudo rpm -Uvh --force mita-3.31.0-1.aarch64.rpm
```
上述指令也可以用来升级 mita 软件包的版本。
@@ -316,6 +316,20 @@ Tor 浏览器 -> mieru 客户端 -> GFW -> mita 服务器 -> Tor 网络 -> 目
}
```
### 用户提示
从 v3.31.0 版本开始,mieru 客户端会发送用户提示,以加速 mita 服务器对网络数据包的解密。代理用户数量较多时,这尤其有帮助。
由于在没有用户提示的情况下 mita 服务器的 CPU 消耗会更高,你可以应用以下配置来屏蔽旧版本的 mieru 客户端。
```
{
"advancedSettings": {
"userHintIsMandatory": true
}
}
```
## 【可选】安装 NTP 网络时间同步服务
客户端和代理服务器软件会根据用户名、密码和系统时间,分别计算密钥。只有当客户端和服务器的密钥相同时,服务器才能解密和响应客户端的请求。这要求客户端和服务器的系统时间不能有很大的差别。
+3 -3
View File
@@ -153,7 +153,7 @@ func TestCapacity(t *testing.T) {
}
func TestExpireInterval(t *testing.T) {
cache := replay.NewCache(10, 50*time.Millisecond)
cache := replay.NewCache(10, 100*time.Millisecond)
a := make([]byte, 256)
if _, err := crand.Read(a); err != nil {
t.Fatalf("rand.Read() failed: %v", err)
@@ -170,7 +170,7 @@ func TestExpireInterval(t *testing.T) {
t.Errorf("cache sizes are %d %d, want 1 0.", curr, prev)
}
time.Sleep(75 * time.Millisecond)
time.Sleep(150 * time.Millisecond)
if res := cache.IsDuplicate(a, replay.EmptyTag); res == false {
t.Errorf("IsDuplicate() = false, want true")
@@ -179,7 +179,7 @@ func TestExpireInterval(t *testing.T) {
t.Errorf("cache sizes are %d %d, want 1 1.", curr, prev)
}
time.Sleep(150 * time.Millisecond)
time.Sleep(200 * time.Millisecond)
if res := cache.IsDuplicate(b, replay.EmptyTag); res == true {
t.Errorf("IsDuplicate() = true, want false")
+1 -1
View File
@@ -16,5 +16,5 @@
package version
const (
AppVersion = "3.30.1"
AppVersion = "3.31.0"
)
@@ -11,7 +11,8 @@
"name": "baozi",
"password": "manlianpenfen"
}
]
],
"user_hint_is_mandatory": true
},
{
"type": "mieru",
@@ -24,7 +25,8 @@
"name": "baozi",
"password": "manlianpenfen"
}
]
],
"user_hint_is_mandatory": true
}
],
"outbounds": [],
+1 -1
View File
@@ -19,7 +19,7 @@ jobs:
- uses: actions/create-github-app-token@v3
id: generate-token
with:
app-id: ${{ secrets.MAINTAINER_APPID }}
client-id: ${{ secrets.MAINTAINER_APPID }}
private-key: ${{ secrets.MAINTAINER_APP_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
+52
View File
@@ -85,6 +85,19 @@ type XHTTPOptions struct {
Headers map[string]string `proxy:"headers,omitempty"`
NoGRPCHeader bool `proxy:"no-grpc-header,omitempty"`
XPaddingBytes string `proxy:"x-padding-bytes,omitempty"`
XPaddingObfsMode bool `proxy:"x-padding-obfs-mode,omitempty"`
XPaddingKey string `proxy:"x-padding-key,omitempty"`
XPaddingHeader string `proxy:"x-padding-header,omitempty"`
XPaddingPlacement string `proxy:"x-padding-placement,omitempty"`
XPaddingMethod string `proxy:"x-padding-method,omitempty"`
UplinkHTTPMethod string `proxy:"uplink-http-method,omitempty"`
SessionPlacement string `proxy:"session-placement,omitempty"`
SessionKey string `proxy:"session-key,omitempty"`
SeqPlacement string `proxy:"seq-placement,omitempty"`
SeqKey string `proxy:"seq-key,omitempty"`
UplinkDataPlacement string `proxy:"uplink-data-placement,omitempty"`
UplinkDataKey string `proxy:"uplink-data-key,omitempty"`
UplinkChunkSize string `proxy:"uplink-chunk-size,omitempty"`
ScMaxEachPostBytes string `proxy:"sc-max-each-post-bytes,omitempty"`
ScMinPostsIntervalMs string `proxy:"sc-min-posts-interval-ms,omitempty"`
ReuseSettings *XHTTPReuseSettings `proxy:"reuse-settings,omitempty"` // aka XMUX
@@ -107,6 +120,19 @@ type XHTTPDownloadSettings struct {
Headers *map[string]string `proxy:"headers,omitempty"`
NoGRPCHeader *bool `proxy:"no-grpc-header,omitempty"`
XPaddingBytes *string `proxy:"x-padding-bytes,omitempty"`
XPaddingObfsMode *bool `proxy:"x-padding-obfs-mode,omitempty"`
XPaddingKey *string `proxy:"x-padding-key,omitempty"`
XPaddingHeader *string `proxy:"x-padding-header,omitempty"`
XPaddingPlacement *string `proxy:"x-padding-placement,omitempty"`
XPaddingMethod *string `proxy:"x-padding-method,omitempty"`
UplinkHTTPMethod *string `proxy:"uplink-http-method,omitempty"`
SessionPlacement *string `proxy:"session-placement,omitempty"`
SessionKey *string `proxy:"session-key,omitempty"`
SeqPlacement *string `proxy:"seq-placement,omitempty"`
SeqKey *string `proxy:"seq-key,omitempty"`
UplinkDataPlacement *string `proxy:"uplink-data-placement,omitempty"`
UplinkDataKey *string `proxy:"uplink-data-key,omitempty"`
UplinkChunkSize *string `proxy:"uplink-chunk-size,omitempty"`
ScMaxEachPostBytes *string `proxy:"sc-max-each-post-bytes,omitempty"`
ScMinPostsIntervalMs *string `proxy:"sc-min-posts-interval-ms,omitempty"`
ReuseSettings *XHTTPReuseSettings `proxy:"reuse-settings,omitempty"` // aka XMUX
@@ -552,6 +578,19 @@ func NewVless(option VlessOption) (*Vless, error) {
Headers: v.option.XHTTPOpts.Headers,
NoGRPCHeader: v.option.XHTTPOpts.NoGRPCHeader,
XPaddingBytes: v.option.XHTTPOpts.XPaddingBytes,
XPaddingObfsMode: v.option.XHTTPOpts.XPaddingObfsMode,
XPaddingKey: v.option.XHTTPOpts.XPaddingKey,
XPaddingHeader: v.option.XHTTPOpts.XPaddingHeader,
XPaddingPlacement: v.option.XHTTPOpts.XPaddingPlacement,
XPaddingMethod: v.option.XHTTPOpts.XPaddingMethod,
UplinkHTTPMethod: v.option.XHTTPOpts.UplinkHTTPMethod,
SessionPlacement: v.option.XHTTPOpts.SessionPlacement,
SessionKey: v.option.XHTTPOpts.SessionKey,
SeqPlacement: v.option.XHTTPOpts.SeqPlacement,
SeqKey: v.option.XHTTPOpts.SeqKey,
UplinkDataPlacement: v.option.XHTTPOpts.UplinkDataPlacement,
UplinkDataKey: v.option.XHTTPOpts.UplinkDataKey,
UplinkChunkSize: v.option.XHTTPOpts.UplinkChunkSize,
ScMaxEachPostBytes: v.option.XHTTPOpts.ScMaxEachPostBytes,
ScMinPostsIntervalMs: v.option.XHTTPOpts.ScMinPostsIntervalMs,
ReuseConfig: reuseCfg,
@@ -667,6 +706,19 @@ func NewVless(option VlessOption) (*Vless, error) {
Headers: lo.FromPtrOr(ds.Headers, v.option.XHTTPOpts.Headers),
NoGRPCHeader: lo.FromPtrOr(ds.NoGRPCHeader, v.option.XHTTPOpts.NoGRPCHeader),
XPaddingBytes: lo.FromPtrOr(ds.XPaddingBytes, v.option.XHTTPOpts.XPaddingBytes),
XPaddingObfsMode: lo.FromPtrOr(ds.XPaddingObfsMode, v.option.XHTTPOpts.XPaddingObfsMode),
XPaddingKey: lo.FromPtrOr(ds.XPaddingKey, v.option.XHTTPOpts.XPaddingKey),
XPaddingHeader: lo.FromPtrOr(ds.XPaddingHeader, v.option.XHTTPOpts.XPaddingHeader),
XPaddingPlacement: lo.FromPtrOr(ds.XPaddingPlacement, v.option.XHTTPOpts.XPaddingPlacement),
XPaddingMethod: lo.FromPtrOr(ds.XPaddingMethod, v.option.XHTTPOpts.XPaddingMethod),
UplinkHTTPMethod: lo.FromPtrOr(ds.UplinkHTTPMethod, v.option.XHTTPOpts.UplinkHTTPMethod),
SessionPlacement: lo.FromPtrOr(ds.SessionPlacement, v.option.XHTTPOpts.SessionPlacement),
SessionKey: lo.FromPtrOr(ds.SessionKey, v.option.XHTTPOpts.SessionKey),
SeqPlacement: lo.FromPtrOr(ds.SeqPlacement, v.option.XHTTPOpts.SeqPlacement),
SeqKey: lo.FromPtrOr(ds.SeqKey, v.option.XHTTPOpts.SeqKey),
UplinkDataPlacement: lo.FromPtrOr(ds.UplinkDataPlacement, v.option.XHTTPOpts.UplinkDataPlacement),
UplinkDataKey: lo.FromPtrOr(ds.UplinkDataKey, v.option.XHTTPOpts.UplinkDataKey),
UplinkChunkSize: lo.FromPtrOr(ds.UplinkChunkSize, v.option.XHTTPOpts.UplinkChunkSize),
ScMaxEachPostBytes: lo.FromPtrOr(ds.ScMaxEachPostBytes, v.option.XHTTPOpts.ScMaxEachPostBytes),
ScMinPostsIntervalMs: lo.FromPtrOr(ds.ScMinPostsIntervalMs, v.option.XHTTPOpts.ScMinPostsIntervalMs),
ReuseConfig: downloadReuseCfg,
+42
View File
@@ -825,6 +825,19 @@ proxies: # socks5
# X-Forwarded-For: ""
# no-grpc-header: false
# x-padding-bytes: "100-1000"
# x-padding-obfs-mode: false
# x-padding-key: x_padding
# x-padding-header: Referer
# x-padding-placement: queryInHeader # Available: queryInHeader, cookie, header, query
# x-padding-method: repeat-x # Available: repeat-x, tokenish
# uplink-http-method: POST # Available: POST, PUT, PATCH, DELETE
# session-placement: path # Available: path, query, cookie, header
# session-key: ""
# seq-placement: path # Available: path, query, cookie, header
# seq-key: ""
# uplink-data-placement: body # Available: body, cookie, header
# uplink-data-key: ""
# uplink-chunk-size: 0 # only applicable when uplink-data-placement is not body
# sc-max-each-post-bytes: 1000000
# sc-min-posts-interval-ms: 30
# reuse-settings: # aka XMUX
@@ -842,6 +855,19 @@ proxies: # socks5
# X-Forwarded-For: ""
# no-grpc-header: false
# x-padding-bytes: "100-1000"
# x-padding-obfs-mode: false
# x-padding-key: x_padding
# x-padding-header: Referer
# x-padding-placement: queryInHeader # Available: queryInHeader, cookie, header, query
# x-padding-method: repeat-x # Available: repeat-x, tokenish
# uplink-http-method: POST # Available: POST, PUT, PATCH, DELETE
# session-placement: path # Available: path, query, cookie, header
# session-key: ""
# seq-placement: path # Available: path, query, cookie, header
# seq-key: ""
# uplink-data-placement: body # Available: body, cookie, header
# uplink-data-key: ""
# uplink-chunk-size: 0 # only applicable when uplink-data-placement is not body
# sc-max-each-post-bytes: 1000000
# sc-min-posts-interval-ms: 30
# reuse-settings: # aka XMUX
@@ -1687,6 +1713,20 @@ listeners:
# host: ""
# mode: auto # Available: "stream-one", "stream-up" or "packet-up"
# no-sse-header: false
# x-padding-bytes: "100-1000"
# x-padding-obfs-mode: false
# x-padding-key: x_padding
# x-padding-header: Referer
# x-padding-placement: queryInHeader # Available: queryInHeader, cookie, header, query
# x-padding-method: repeat-x # Available: repeat-x, tokenish
# uplink-http-method: POST # Available: POST, PUT, PATCH, DELETE
# session-placement: path # Available: path, query, cookie, header
# session-key: ""
# seq-placement: path # Available: path, query, cookie, header
# seq-key: ""
# uplink-data-placement: body # Available: body, cookie, header
# uplink-data-key: ""
# uplink-chunk-size: 0 # only applicable when uplink-data-placement is not body
# sc-max-buffered-posts: 30
# sc-stream-up-server-secs: "20-80"
# sc-max-each-post-bytes: 1000000
@@ -1768,6 +1808,8 @@ listeners:
username2: password2
# 一个 base64 字符串用于微调网络行为
# traffic-pattern: ""
# 如果开启,且客户端不发送用户提示,代理服务器将拒绝连接
# user-hint-is-mandatory: false
- name: sudoku-in-1
type: sudoku
+1 -1
View File
@@ -6,7 +6,7 @@ require (
github.com/bahlo/generic-list-go v0.2.0
github.com/coreos/go-iptables v0.8.0
github.com/dlclark/regexp2 v1.11.5
github.com/enfein/mieru/v3 v3.30.1
github.com/enfein/mieru/v3 v3.31.0
github.com/gobwas/ws v1.4.0
github.com/gofrs/uuid/v5 v5.4.0
github.com/golang/snappy v1.0.0
+2 -2
View File
@@ -20,8 +20,8 @@ github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZ
github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/dunglas/httpsfv v1.0.2 h1:iERDp/YAfnojSDJ7PW3dj1AReJz4MrwbECSSE59JWL0=
github.com/dunglas/httpsfv v1.0.2/go.mod h1:zID2mqw9mFsnt7YC3vYQ9/cjq30q41W+1AnDwH8TiMg=
github.com/enfein/mieru/v3 v3.30.1 h1:gHHXQfpQO/5d789o9kokVfej7jl795aJwPihUk3gTDU=
github.com/enfein/mieru/v3 v3.30.1/go.mod h1:zJBUCsi5rxyvHM8fjFf+GLaEl4OEjjBXr1s5F6Qd3hM=
github.com/enfein/mieru/v3 v3.31.0 h1:Fl2ocRCRXJzMygzdRjBHgqI996ZuIDHUmyQyovSf9sA=
github.com/enfein/mieru/v3 v3.31.0/go.mod h1:zJBUCsi5rxyvHM8fjFf+GLaEl4OEjjBXr1s5F6Qd3hM=
github.com/ericlagergren/aegis v0.0.0-20250325060835-cd0defd64358 h1:kXYqH/sL8dS/FdoFjr12ePjnLPorPo2FsnrHNuXSDyo=
github.com/ericlagergren/aegis v0.0.0-20250325060835-cd0defd64358/go.mod h1:hkIFzoiIPZYxdFOOLyDho59b7SrDfo+w3h+yWdlg45I=
github.com/ericlagergren/polyval v0.0.0-20220411101811-e25bc10ba391 h1:8j2RH289RJplhA6WfdaPqzg1MjH2K8wX5e0uhAxrw2g=
+14
View File
@@ -34,6 +34,20 @@ type XHTTPConfig struct {
Path string
Host string
Mode string
XPaddingBytes string
XPaddingObfsMode bool
XPaddingKey string
XPaddingHeader string
XPaddingPlacement string
XPaddingMethod string
UplinkHTTPMethod string
SessionPlacement string
SessionKey string
SeqPlacement string
SeqKey string
UplinkDataPlacement string
UplinkDataKey string
UplinkChunkSize string
NoSSEHeader bool
ScStreamUpServerSecs string
ScMaxBufferedPosts string
+1
View File
@@ -41,6 +41,7 @@ func testInboundAnyTLS(t *testing.T, inboundOptions inbound.AnyTLSOption, outbou
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.Password = userUUID
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewAnyTLS(outboundOptions)
if !assert.NoError(t, err) {
+30
View File
@@ -58,6 +58,30 @@ func init() {
realityPublickey = base64.RawURLEncoding.EncodeToString(privateKey.PublicKey().Bytes())
}
type TestDialer struct {
dialer C.Dialer
ctx context.Context
}
func (t *TestDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
start:
conn, err := t.dialer.DialContext(ctx, network, address)
if err != nil && ctx.Err() == nil && t.ctx.Err() == nil {
// We are conducting tests locally, and they shouldn't fail.
// However, a large number of requests in a short period during concurrent testing can exhaust system ports.
// This can lead to various errors such as WSAECONNREFUSED and WSAENOBUFS.
// So we just retry if the context is not canceled.
goto start
}
return conn, err
}
func (t *TestDialer) ListenPacket(ctx context.Context, network, address string, rAddrPort netip.AddrPort) (net.PacketConn, error) {
return t.dialer.ListenPacket(ctx, network, address, rAddrPort)
}
var _ C.Dialer = (*TestDialer)(nil)
type TestTunnel struct {
HandleTCPConnFn func(conn net.Conn, metadata *C.Metadata)
HandleUDPPacketFn func(packet C.UDPPacket, metadata *C.Metadata)
@@ -65,6 +89,7 @@ type TestTunnel struct {
CloseFn func() error
DoSequentialTestFn func(t *testing.T, proxy C.ProxyAdapter)
DoConcurrentTestFn func(t *testing.T, proxy C.ProxyAdapter)
NewDialerFn func() C.Dialer
}
func (tt *TestTunnel) HandleTCPConn(conn net.Conn, metadata *C.Metadata) {
@@ -96,6 +121,10 @@ func (tt *TestTunnel) DoConcurrentTest(t *testing.T, proxy C.ProxyAdapter) {
tt.DoConcurrentTestFn(t, proxy)
}
func (tt *TestTunnel) NewDialer() C.Dialer {
return tt.NewDialerFn()
}
type TestTunnelListener struct {
ch chan net.Conn
ctx context.Context
@@ -328,6 +357,7 @@ func NewHttpTestTunnel() *TestTunnel {
CloseFn: ln.Close,
DoSequentialTestFn: sequentialTestFn,
DoConcurrentTestFn: concurrentTestFn,
NewDialerFn: func() C.Dialer { return &TestDialer{dialer: dialer.NewDialer(), ctx: ctx} },
}
return tunnel
}
@@ -41,6 +41,7 @@ func testInboundHysteria2(t *testing.T, inboundOptions inbound.Hysteria2Option,
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.Password = userUUID
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewHysteria2(outboundOptions)
if !assert.NoError(t, err) {
+14 -6
View File
@@ -27,9 +27,10 @@ type Mieru struct {
type MieruOption struct {
BaseOption
Transport string `inbound:"transport"`
Users map[string]string `inbound:"users"`
TrafficPattern string `inbound:"traffic-pattern,omitempty"`
Transport string `inbound:"transport"`
Users map[string]string `inbound:"users"`
TrafficPattern string `inbound:"traffic-pattern,omitempty"`
UserHintIsMandatory bool `inbound:"user-hint-is-mandatory,omitempty"`
}
type mieruListenerFactory struct{}
@@ -158,11 +159,18 @@ func buildMieruServerConfig(option *MieruOption, ports utils.IntRanges[uint16])
}
var trafficPattern *mierupb.TrafficPattern
trafficPattern, _ = mierutp.Decode(option.TrafficPattern)
var advancedSettings *mierupb.ServerAdvancedSettings
if option.UserHintIsMandatory {
advancedSettings = &mierupb.ServerAdvancedSettings{
UserHintIsMandatory: proto.Bool(true),
}
}
return &mieruserver.ServerConfig{
Config: &mierupb.ServerConfig{
PortBindings: portBindings,
Users: users,
TrafficPattern: trafficPattern,
PortBindings: portBindings,
Users: users,
TrafficPattern: trafficPattern,
AdvancedSettings: advancedSettings,
},
StreamListenerFactory: mieruListenerFactory{},
PacketListenerFactory: mieruListenerFactory{},
+8 -4
View File
@@ -206,8 +206,9 @@ func testInboundMieruTCP(t *testing.T, handshakeMode string) {
Listen: "127.0.0.1",
Port: strconv.Itoa(port),
},
Transport: "TCP",
Users: map[string]string{"test": "password"},
Transport: "TCP",
Users: map[string]string{"test": "password"},
UserHintIsMandatory: true,
}
in, err := inbound.NewMieru(&inboundOptions)
if !assert.NoError(t, err) {
@@ -236,6 +237,7 @@ func testInboundMieruTCP(t *testing.T, handshakeMode string) {
Password: "password",
HandshakeMode: handshakeMode,
}
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewMieru(outboundOptions)
if !assert.NoError(t, err) {
return
@@ -260,8 +262,9 @@ func testInboundMieruUDP(t *testing.T, handshakeMode string) {
Listen: "127.0.0.1",
Port: strconv.Itoa(port),
},
Transport: "UDP",
Users: map[string]string{"test": "password"},
Transport: "UDP",
Users: map[string]string{"test": "password"},
UserHintIsMandatory: true,
}
in, err := inbound.NewMieru(&inboundOptions)
if !assert.NoError(t, err) {
@@ -290,6 +293,7 @@ func testInboundMieruUDP(t *testing.T, handshakeMode string) {
Password: "password",
HandshakeMode: handshakeMode,
}
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewMieru(outboundOptions)
if !assert.NoError(t, err) {
return
@@ -85,6 +85,7 @@ func testInboundShadowSocks0(t *testing.T, inboundOptions inbound.ShadowSocksOpt
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.Password = password
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewShadowSocks(outboundOptions)
if !assert.NoError(t, err) {
+1
View File
@@ -43,6 +43,7 @@ func testInboundSudoku(t *testing.T, inboundOptions inbound.SudokuOption, outbou
outboundOptions.Name = "sudoku_outbound"
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewSudoku(outboundOptions)
if !assert.NoError(t, err) {
+1
View File
@@ -43,6 +43,7 @@ func testInboundTrojan(t *testing.T, inboundOptions inbound.TrojanOption, outbou
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.Password = userUUID
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewTrojan(outboundOptions)
if !assert.NoError(t, err) {
@@ -42,6 +42,7 @@ func testInboundTrustTunnel(t *testing.T, inboundOptions inbound.TrustTunnelOpti
outboundOptions.Port = int(addrPort.Port())
outboundOptions.UserName = "test"
outboundOptions.Password = userUUID
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewTrustTunnel(outboundOptions)
if !assert.NoError(t, err) {
+1
View File
@@ -69,6 +69,7 @@ func testInboundTuic0(t *testing.T, inboundOptions inbound.TuicOption, outboundO
outboundOptions.Name = "tuic_outbound"
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewTuic(outboundOptions)
if !assert.NoError(t, err) {
+27
View File
@@ -35,6 +35,20 @@ type XHTTPConfig struct {
Path string `inbound:"path,omitempty"`
Host string `inbound:"host,omitempty"`
Mode string `inbound:"mode,omitempty"`
XPaddingBytes string `inbound:"xpadding-bytes,omitempty"`
XPaddingObfsMode bool `inbound:"xpadding-obfs-mode,omitempty"`
XPaddingKey string `inbound:"xpadding-key,omitempty"`
XPaddingHeader string `inbound:"xpadding-header,omitempty"`
XPaddingPlacement string `inbound:"xpadding-placement,omitempty"`
XPaddingMethod string `inbound:"xpadding-method,omitempty"`
UplinkHTTPMethod string `inbound:"uplink-http-method,omitempty"`
SessionPlacement string `inbound:"session-placement,omitempty"`
SessionKey string `inbound:"session-key,omitempty"`
SeqPlacement string `inbound:"seq-placement,omitempty"`
SeqKey string `inbound:"seq-key,omitempty"`
UplinkDataPlacement string `inbound:"uplink-data-placement,omitempty"`
UplinkDataKey string `inbound:"uplink-data-key,omitempty"`
UplinkChunkSize string `inbound:"uplink-chunk-size,omitempty"`
NoSSEHeader bool `inbound:"no-sse-header,omitempty"`
ScStreamUpServerSecs string `inbound:"sc-stream-up-server-secs,omitempty"`
ScMaxBufferedPosts string `inbound:"sc-max-buffered-posts,omitempty"`
@@ -47,6 +61,19 @@ func (o XHTTPConfig) Build() LC.XHTTPConfig {
Host: o.Host,
Mode: o.Mode,
NoSSEHeader: o.NoSSEHeader,
XPaddingBytes: o.XPaddingBytes,
XPaddingObfsMode: o.XPaddingObfsMode,
XPaddingKey: o.XPaddingKey,
XPaddingHeader: o.XPaddingHeader,
XPaddingPlacement: o.XPaddingPlacement,
UplinkHTTPMethod: o.UplinkHTTPMethod,
SessionPlacement: o.SessionPlacement,
SessionKey: o.SessionKey,
SeqPlacement: o.SeqPlacement,
SeqKey: o.SeqKey,
UplinkDataPlacement: o.UplinkDataPlacement,
UplinkDataKey: o.UplinkDataKey,
UplinkChunkSize: o.UplinkChunkSize,
ScStreamUpServerSecs: o.ScStreamUpServerSecs,
ScMaxBufferedPosts: o.ScMaxBufferedPosts,
ScMaxEachPostBytes: o.ScMaxEachPostBytes,
+121
View File
@@ -44,6 +44,7 @@ func testInboundVless(t *testing.T, inboundOptions inbound.VlessOption, outbound
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
outboundOptions.UUID = userUUID
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewVless(outboundOptions)
if !assert.NoError(t, err) {
@@ -488,6 +489,77 @@ func TestInboundVless_XHTTP_Reality(t *testing.T) {
}
}
func TestInboundVless_XHTTP_Encryption(t *testing.T) {
privateKeyBase64, passwordBase64, _, err := encryption.GenX25519("")
if err != nil {
t.Fatal(err)
return
}
testCases := []struct {
mode string
}{
{mode: "auto"},
{mode: "stream-one"},
{mode: "stream-up"},
{mode: "packet-up"},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.mode, func(t *testing.T) {
getConfig := func() (inbound.VlessOption, outbound.VlessOption) {
inboundOptions := inbound.VlessOption{
Decryption: "mlkem768x25519plus.native.600s." + privateKeyBase64,
XHTTPConfig: inbound.XHTTPConfig{
Path: "/vless-xhttp",
Host: "example.com",
Mode: testCase.mode,
},
}
outboundOptions := outbound.VlessOption{
Encryption: "mlkem768x25519plus.native.0rtt." + passwordBase64,
Network: "xhttp",
XHTTPOpts: outbound.XHTTPOptions{
Path: "/vless-xhttp",
Host: "example.com",
Mode: testCase.mode,
},
}
return inboundOptions, outboundOptions
}
t.Run("nosplit", func(t *testing.T) {
t.Run("single", func(t *testing.T) {
inboundOptions, outboundOptions := getConfig()
testInboundVless(t, inboundOptions, outboundOptions)
})
t.Run("reuse", func(t *testing.T) {
inboundOptions, outboundOptions := getConfig()
testInboundVless(t, inboundOptions, withXHTTPReuse(outboundOptions))
})
})
t.Run("split", func(t *testing.T) {
if testCase.mode == "stream-one" { // stream-one not supported download settings
return
}
t.Run("single", func(t *testing.T) {
inboundOptions, outboundOptions := getConfig()
outboundOptions.XHTTPOpts.DownloadSettings = &outbound.XHTTPDownloadSettings{}
testInboundVless(t, inboundOptions, outboundOptions)
})
t.Run("reuse", func(t *testing.T) {
inboundOptions, outboundOptions := getConfig()
outboundOptions.XHTTPOpts.DownloadSettings = &outbound.XHTTPDownloadSettings{}
testInboundVless(t, inboundOptions, withXHTTPReuse(outboundOptions))
})
})
})
}
}
func TestInboundVless_XHTTP_PacketUp_H1(t *testing.T) {
getConfig := func() (inbound.VlessOption, outbound.VlessOption) {
inboundOptions := inbound.VlessOption{
@@ -524,6 +596,55 @@ func TestInboundVless_XHTTP_PacketUp_H1(t *testing.T) {
})
}
func TestInboundVless_XHTTP_PacketUp_H1_Encryption(t *testing.T) {
privateKeyBase64, passwordBase64, _, err := encryption.GenX25519("")
if err != nil {
t.Fatal(err)
return
}
getConfig := func() (inbound.VlessOption, outbound.VlessOption) {
inboundOptions := inbound.VlessOption{
Decryption: "mlkem768x25519plus.native.600s." + privateKeyBase64,
XHTTPConfig: inbound.XHTTPConfig{
Path: "/vless-xhttp",
Host: "example.com",
Mode: "packet-up",
},
}
outboundOptions := outbound.VlessOption{
Encryption: "mlkem768x25519plus.native.0rtt." + passwordBase64,
Network: "xhttp",
ALPN: []string{"http/1.1"},
XHTTPOpts: outbound.XHTTPOptions{
Path: "/vless-xhttp",
Host: "example.com",
Mode: "packet-up",
},
}
return inboundOptions, outboundOptions
}
t.Run("default", func(t *testing.T) {
inboundOptions, outboundOptions := getConfig()
testInboundVless(t, inboundOptions, outboundOptions)
t.Run("xtls-rprx-vision", func(t *testing.T) {
outboundOptions := outboundOptions
outboundOptions.Flow = "xtls-rprx-vision"
testInboundVless(t, inboundOptions, outboundOptions)
})
})
t.Run("reuse", func(t *testing.T) {
inboundOptions, outboundOptions := getConfig()
testInboundVless(t, inboundOptions, outboundOptions)
t.Run("xtls-rprx-vision", func(t *testing.T) {
outboundOptions := outboundOptions
outboundOptions.Flow = "xtls-rprx-vision"
testInboundVless(t, inboundOptions, outboundOptions)
})
})
}
func withXHTTPReuse(out outbound.VlessOption) outbound.VlessOption {
out.XHTTPOpts.ReuseSettings = &outbound.XHTTPReuseSettings{
MaxConnections: "0",
+1
View File
@@ -45,6 +45,7 @@ func testInboundVMess(t *testing.T, inboundOptions inbound.VmessOption, outbound
outboundOptions.UUID = userUUID
outboundOptions.AlterID = 0
outboundOptions.Cipher = "auto"
outboundOptions.DialerForAPI = tunnel.NewDialer()
out, err := outbound.NewVmess(outboundOptions)
if !assert.NoError(t, err) {
+14
View File
@@ -159,6 +159,20 @@ func New(config LC.VlessServer, tunnel C.Tunnel, additions ...inbound.Addition)
Host: config.XHTTPConfig.Host,
Path: config.XHTTPConfig.Path,
Mode: config.XHTTPConfig.Mode,
XPaddingBytes: config.XHTTPConfig.XPaddingBytes,
XPaddingObfsMode: config.XHTTPConfig.XPaddingObfsMode,
XPaddingKey: config.XHTTPConfig.XPaddingKey,
XPaddingHeader: config.XHTTPConfig.XPaddingHeader,
XPaddingPlacement: config.XHTTPConfig.XPaddingPlacement,
XPaddingMethod: config.XHTTPConfig.XPaddingMethod,
UplinkHTTPMethod: config.XHTTPConfig.UplinkHTTPMethod,
SessionPlacement: config.XHTTPConfig.SessionPlacement,
SessionKey: config.XHTTPConfig.SessionKey,
SeqPlacement: config.XHTTPConfig.SeqPlacement,
SeqKey: config.XHTTPConfig.SeqKey,
UplinkDataPlacement: config.XHTTPConfig.UplinkDataPlacement,
UplinkDataKey: config.XHTTPConfig.UplinkDataKey,
UplinkChunkSize: config.XHTTPConfig.UplinkChunkSize,
NoSSEHeader: config.XHTTPConfig.NoSSEHeader,
ScStreamUpServerSecs: config.XHTTPConfig.ScStreamUpServerSecs,
ScMaxBufferedPosts: config.XHTTPConfig.ScMaxBufferedPosts,
+249
View File
@@ -0,0 +1,249 @@
package xhttp
import (
"math"
"strconv"
"strings"
"time"
"github.com/metacubex/http"
"github.com/metacubex/randv2"
)
// The Chrome version generator will suffer from deviation of a normal distribution.
func ChromeVersion() int {
// Start from Chrome 144, released on 2026.1.13.
var startVersion int = 144
var timeStart int64 = time.Date(2026, 1, 13, 0, 0, 0, 0, time.UTC).Unix() / 86400
var timeCurrent int64 = time.Now().Unix() / 86400
var timeDiff int = int((timeCurrent - timeStart - 35)) - int(math.Floor(math.Pow(randv2.Float64(), 2)*105))
return startVersion + (timeDiff / 35) // It's 31.15 currently.
}
var safariMinorMap [25]int = [25]int{0, 0, 0, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 4, 4,
4, 5, 5, 5, 5, 5, 6, 6, 6, 6}
// The following version generators use deterministic generators, but with the distribution scaled by a curve.
func CurlVersion() string {
// curl 8.0.0 was released on 20/03/2023.
var timeCurrent int64 = time.Now().Unix() / 86400
var timeStart int64 = time.Date(2023, 3, 20, 0, 0, 0, 0, time.UTC).Unix() / 86400
var timeDiff int = int((timeCurrent - timeStart - 60)) - int(math.Floor(math.Pow(randv2.Float64(), 2)*165))
var minorValue int = int(timeDiff / 57) // The release cadence is actually 56.67 days.
return "8." + strconv.Itoa(minorValue) + ".0"
}
func FirefoxVersion() int {
// Firefox 128 ESR was released on 09/07/2023.
var timeCurrent int64 = time.Now().Unix() / 86400
var timeStart int64 = time.Date(2024, 7, 29, 0, 0, 0, 0, time.UTC).Unix() / 86400
var timeDiff = timeCurrent - timeStart - 25 - int64(math.Floor(math.Pow(randv2.Float64(), 2)*50))
return int(timeDiff/30) + 128
}
func SafariVersion() string {
var anchoredTime time.Time = time.Now()
var releaseYear int = anchoredTime.Year()
var splitPoint time.Time = time.Date(releaseYear, 9, 23, 0, 0, 0, 0, time.UTC)
var delayedDays = int(math.Floor(math.Pow(randv2.Float64(), 3) * 75))
splitPoint = splitPoint.AddDate(0, 0, delayedDays)
if anchoredTime.Compare(splitPoint) < 0 {
releaseYear--
splitPoint = time.Date(releaseYear, 9, 23, 0, 0, 0, 0, time.UTC)
splitPoint = splitPoint.AddDate(0, 0, delayedDays)
}
var minorVersion = safariMinorMap[(anchoredTime.Unix()-splitPoint.Unix())/1296000]
return strconv.Itoa(releaseYear-1999) + "." + strconv.Itoa(minorVersion)
}
// The full Chromium brand GREASE implementation
var clientHintGreaseNA = []string{" ", "(", ":", "-", ".", "/", ")", ";", "=", "?", "_"}
var clientHintVersionNA = []string{"8", "99", "24"}
var clientHintShuffle3 = [][3]int{{0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}}
var clientHintShuffle4 = [][4]int{
{0, 1, 2, 3}, {0, 1, 3, 2}, {0, 2, 1, 3}, {0, 2, 3, 1}, {0, 3, 1, 2}, {0, 3, 2, 1},
{1, 0, 2, 3}, {1, 0, 3, 2}, {1, 2, 0, 3}, {1, 2, 3, 0}, {1, 3, 0, 2}, {1, 3, 2, 0},
{2, 0, 1, 3}, {2, 0, 3, 1}, {2, 1, 0, 3}, {2, 1, 3, 0}, {2, 3, 0, 1}, {2, 3, 1, 0},
{3, 0, 1, 2}, {3, 0, 2, 1}, {3, 1, 0, 2}, {3, 1, 2, 0}, {3, 2, 0, 1}, {3, 2, 1, 0}}
func getGreasedChInvalidBrand(seed int) string {
return "\"Not" + clientHintGreaseNA[seed%len(clientHintGreaseNA)] + "A" + clientHintGreaseNA[(seed+1)%len(clientHintGreaseNA)] + "Brand\";v=\"" + clientHintVersionNA[seed%len(clientHintVersionNA)] + "\""
}
func getGreasedChOrder(brandLength int, seed int) []int {
switch brandLength {
case 1:
return []int{0}
case 2:
return []int{seed % brandLength, (seed + 1) % brandLength}
case 3:
return clientHintShuffle3[seed%len(clientHintShuffle3)][:]
default:
return clientHintShuffle4[seed%len(clientHintShuffle4)][:]
}
//return []int{}
}
func getUngreasedChUa(majorVersion int, forkName string) []string {
// Set the capacity to 4, the maximum allowed brand size, so Go will never allocate memory twice
baseChUa := make([]string, 0, 4)
baseChUa = append(baseChUa, getGreasedChInvalidBrand(majorVersion),
"\"Chromium\";v=\""+strconv.Itoa(majorVersion)+"\"")
switch forkName {
case "chrome":
baseChUa = append(baseChUa, "\"Google Chrome\";v=\""+strconv.Itoa(majorVersion)+"\"")
case "edge":
baseChUa = append(baseChUa, "\"Microsoft Edge\";v=\""+strconv.Itoa(majorVersion)+"\"")
}
return baseChUa
}
func getGreasedChUa(majorVersion int, forkName string) string {
ungreasedCh := getUngreasedChUa(majorVersion, forkName)
shuffleMap := getGreasedChOrder(len(ungreasedCh), majorVersion)
shuffledCh := make([]string, len(ungreasedCh))
for i, e := range shuffleMap {
shuffledCh[e] = ungreasedCh[i]
}
return strings.Join(shuffledCh, ", ")
}
// The code below provides a coherent default browser user agent string based on a CPU-seeded PRNG.
var CurlUA = "curl/" + CurlVersion()
var AnchoredFirefoxVersion = strconv.Itoa(FirefoxVersion())
var FirefoxUA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:" + AnchoredFirefoxVersion + ".0) Gecko/20100101 Firefox/" + AnchoredFirefoxVersion + ".0"
var SafariUA = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/" + SafariVersion() + " Safari/605.1.15"
// Chromium browsers.
var AnchoredChromeVersion = ChromeVersion()
var ChromeUA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/" + strconv.Itoa(AnchoredChromeVersion) + ".0.0.0 Safari/537.36"
var ChromeUACH = getGreasedChUa(AnchoredChromeVersion, "chrome")
var MSEdgeUA = ChromeUA + "Edg/" + strconv.Itoa(AnchoredChromeVersion) + ".0.0.0"
var MSEdgeUACH = getGreasedChUa(AnchoredChromeVersion, "edge")
func applyMasqueradedHeaders(header http.Header, browser string, variant string) {
// Browser-specific.
switch browser {
case "chrome":
header["Sec-CH-UA"] = []string{ChromeUACH}
header["Sec-CH-UA-Mobile"] = []string{"?0"}
header["Sec-CH-UA-Platform"] = []string{"\"Windows\""}
header["DNT"] = []string{"1"}
header.Set("User-Agent", ChromeUA)
header.Set("Accept-Language", "en-US,en;q=0.9")
case "edge":
header["Sec-CH-UA"] = []string{MSEdgeUACH}
header["Sec-CH-UA-Mobile"] = []string{"?0"}
header["Sec-CH-UA-Platform"] = []string{"\"Windows\""}
header["DNT"] = []string{"1"}
header.Set("User-Agent", MSEdgeUA)
header.Set("Accept-Language", "en-US,en;q=0.9")
case "firefox":
header.Set("User-Agent", FirefoxUA)
header["DNT"] = []string{"1"}
header.Set("Accept-Language", "en-US,en;q=0.5")
case "safari":
header.Set("User-Agent", SafariUA)
header.Set("Accept-Language", "en-US,en;q=0.9")
case "golang":
// Expose the default net/http header.
header.Del("User-Agent")
return
case "curl":
header.Set("User-Agent", CurlUA)
return
}
// Context-specific.
switch variant {
case "nav":
if header.Get("Cache-Control") == "" {
switch browser {
case "chrome", "edge":
header.Set("Cache-Control", "max-age=0")
}
}
header.Set("Upgrade-Insecure-Requests", "1")
if header.Get("Accept") == "" {
switch browser {
case "chrome", "edge":
header.Set("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/jxl,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7")
case "firefox", "safari":
header.Set("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
}
}
header.Set("Sec-Fetch-Site", "none")
header.Set("Sec-Fetch-Mode", "navigate")
switch browser {
case "safari":
default:
header.Set("Sec-Fetch-User", "?1")
}
header.Set("Sec-Fetch-Dest", "document")
header.Set("Priority", "u=0, i")
case "ws":
header.Set("Sec-Fetch-Mode", "websocket")
switch browser {
case "safari":
// Safari is NOT web-compliant here!
header.Set("Sec-Fetch-Dest", "websocket")
default:
header.Set("Sec-Fetch-Dest", "empty")
}
header.Set("Sec-Fetch-Site", "same-origin")
if header.Get("Cache-Control") == "" {
header.Set("Cache-Control", "no-cache")
}
if header.Get("Pragma") == "" {
header.Set("Pragma", "no-cache")
}
if header.Get("Accept") == "" {
header.Set("Accept", "*/*")
}
case "fetch":
header.Set("Sec-Fetch-Mode", "cors")
header.Set("Sec-Fetch-Dest", "empty")
header.Set("Sec-Fetch-Site", "same-origin")
if header.Get("Priority") == "" {
switch browser {
case "chrome", "edge":
header.Set("Priority", "u=1, i")
case "firefox":
header.Set("Priority", "u=4")
case "safari":
header.Set("Priority", "u=3, i")
}
}
if header.Get("Cache-Control") == "" {
header.Set("Cache-Control", "no-cache")
}
if header.Get("Pragma") == "" {
header.Set("Pragma", "no-cache")
}
if header.Get("Accept") == "" {
header.Set("Accept", "*/*")
}
}
}
func TryDefaultHeadersWith(header http.Header, variant string) {
// The global UA special value handler for transports. Used to be called HandleTransportUASettings.
// Just a FYI to whoever needing to fix this piece of code after some spontaneous event, I tried to make the two methods separate to let the code be cleaner and more organized.
if len(header.Values("User-Agent")) < 1 {
applyMasqueradedHeaders(header, "chrome", variant)
} else {
switch header.Get("User-Agent") {
case "chrome":
applyMasqueradedHeaders(header, "chrome", variant)
case "firefox":
applyMasqueradedHeaders(header, "firefox", variant)
case "safari":
applyMasqueradedHeaders(header, "safari", variant)
case "edge":
applyMasqueradedHeaders(header, "edge", variant)
case "curl":
applyMasqueradedHeaders(header, "curl", variant)
case "golang":
applyMasqueradedHeaders(header, "golang", variant)
}
}
}
+3 -9
View File
@@ -21,7 +21,6 @@ import (
"github.com/metacubex/quic-go"
"github.com/metacubex/quic-go/http3"
"github.com/metacubex/tls"
"golang.org/x/sync/semaphore"
)
// ConnIdleTimeout defines the maximum time an idle TCP session can survive in the tunnel,
@@ -115,7 +114,7 @@ func (c *PacketUpWriter) write(b []byte) (int, error) {
Path: c.cfg.NormalizedPath(),
}
req, err := http.NewRequestWithContext(c.ctx, http.MethodPost, u.String(), nil)
req, err := http.NewRequestWithContext(c.ctx, c.cfg.GetNormalizedUplinkHTTPMethod(), u.String(), nil)
if err != nil {
return 0, err
}
@@ -177,12 +176,7 @@ func NewTransport(dialRaw DialRawFunc, wrapTLS WrapTLSFunc, dialQUIC DialQUICFun
}
}
if len(alpn) == 1 && alpn[0] == "http/1.1" { // `alpn: [http/1.1]` means using http/1.1 mode
w := semaphore.NewWeighted(20) // limit concurrent dialing to avoid WSAECONNREFUSED on Windows
dialContext := func(ctx context.Context, network, addr string) (net.Conn, error) {
if err := w.Acquire(ctx, 1); err != nil {
return nil, err
}
defer w.Release(1)
raw, err := dialRaw(ctx)
if err != nil {
return nil, err
@@ -359,7 +353,7 @@ func (c *Client) DialStreamOne() (net.Conn, error) {
},
})
req, err := http.NewRequestWithContext(ctx, http.MethodPost, requestURL.String(), pr)
req, err := http.NewRequestWithContext(ctx, c.cfg.GetNormalizedUplinkHTTPMethod(), requestURL.String(), pr)
if err != nil {
_ = pr.Close()
_ = pw.Close()
@@ -470,7 +464,7 @@ func (c *Client) DialStreamUp() (net.Conn, error) {
uploadReq, err := http.NewRequestWithContext(
c.ctx,
http.MethodPost,
c.cfg.GetNormalizedUplinkHTTPMethod(),
streamURL.String(),
pr,
)
+337 -74
View File
@@ -2,6 +2,7 @@ package xhttp
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"math/rand"
@@ -11,6 +12,16 @@ import (
"github.com/metacubex/http"
)
const (
PlacementQueryInHeader = "queryInHeader"
PlacementCookie = "cookie"
PlacementHeader = "header"
PlacementQuery = "query"
PlacementPath = "path"
PlacementBody = "body"
PlacementAuto = "auto"
)
type Config struct {
Host string
Path string
@@ -18,6 +29,19 @@ type Config struct {
Headers map[string]string
NoGRPCHeader bool
XPaddingBytes string
XPaddingObfsMode bool
XPaddingKey string
XPaddingHeader string
XPaddingPlacement string
XPaddingMethod string
UplinkHTTPMethod string
SessionPlacement string
SessionKey string
SeqPlacement string
SeqKey string
UplinkDataPlacement string
UplinkDataKey string
UplinkChunkSize string
NoSSEHeader bool // server only
ScStreamUpServerSecs string // server only
ScMaxBufferedPosts string // server only
@@ -70,37 +94,92 @@ func (c *Config) NormalizedPath() string {
return path
}
func (c *Config) RequestHeader() http.Header {
func (c *Config) GetRequestHeader() http.Header {
h := http.Header{}
for k, v := range c.Headers {
h.Set(k, v)
}
if h.Get("User-Agent") == "" {
h.Set("User-Agent", "Mozilla/5.0")
}
if h.Get("Accept") == "" {
h.Set("Accept", "*/*")
}
if h.Get("Accept-Language") == "" {
h.Set("Accept-Language", "en-US,en;q=0.9")
}
if h.Get("Cache-Control") == "" {
h.Set("Cache-Control", "no-cache")
}
if h.Get("Pragma") == "" {
h.Set("Pragma", "no-cache")
}
TryDefaultHeadersWith(h, "fetch")
return h
}
func (c *Config) RandomPadding() (string, error) {
r, err := ParseRange(c.XPaddingBytes, "100-1000")
if err != nil {
return "", fmt.Errorf("invalid x-padding-bytes: %w", err)
func (c *Config) GetRequestHeaderWithPayload(payload []byte, uplinkChunkSize Range) http.Header {
header := c.GetRequestHeader()
key := c.UplinkDataKey
encodedData := base64.RawURLEncoding.EncodeToString(payload)
for i := 0; len(encodedData) > 0; i++ {
chunkSize := uplinkChunkSize.Rand()
if len(encodedData) < chunkSize {
chunkSize = len(encodedData)
}
chunk := encodedData[:chunkSize]
encodedData = encodedData[chunkSize:]
headerKey := fmt.Sprintf("%s-%d", key, i)
header.Set(headerKey, chunk)
}
return strings.Repeat("X", r.Rand()), nil
return header
}
func (c *Config) GetRequestCookiesWithPayload(payload []byte, uplinkChunkSize Range) []*http.Cookie {
cookies := []*http.Cookie{}
key := c.UplinkDataKey
encodedData := base64.RawURLEncoding.EncodeToString(payload)
for i := 0; len(encodedData) > 0; i++ {
chunkSize := uplinkChunkSize.Rand()
if len(encodedData) < chunkSize {
chunkSize = len(encodedData)
}
chunk := encodedData[:chunkSize]
encodedData = encodedData[chunkSize:]
cookieName := fmt.Sprintf("%s_%d", key, i)
cookies = append(cookies, &http.Cookie{Name: cookieName, Value: chunk})
}
return cookies
}
func (c *Config) WriteResponseHeader(writer http.ResponseWriter, requestMethod string, requestHeader http.Header) {
if origin := requestHeader.Get("Origin"); origin == "" {
writer.Header().Set("Access-Control-Allow-Origin", "*")
} else {
// Chrome says: The value of the 'Access-Control-Allow-Origin' header in the response must not be the wildcard '*' when the request's credentials mode is 'include'.
writer.Header().Set("Access-Control-Allow-Origin", origin)
}
if c.GetNormalizedSessionPlacement() == PlacementCookie ||
c.GetNormalizedSeqPlacement() == PlacementCookie ||
c.XPaddingPlacement == PlacementCookie ||
c.GetNormalizedUplinkDataPlacement() == PlacementCookie {
writer.Header().Set("Access-Control-Allow-Credentials", "true")
}
if requestMethod == "OPTIONS" {
requestedMethod := requestHeader.Get("Access-Control-Request-Method")
if requestedMethod != "" {
writer.Header().Set("Access-Control-Allow-Methods", requestedMethod)
} else {
writer.Header().Set("Access-Control-Allow-Methods", "*")
}
requestedHeaders := requestHeader.Get("Access-Control-Request-Headers")
if requestedHeaders == "" {
writer.Header().Set("Access-Control-Allow-Headers", "*")
} else {
writer.Header().Set("Access-Control-Allow-Headers", requestedHeaders)
}
}
}
func (c *Config) GetNormalizedUplinkHTTPMethod() string {
if c.UplinkHTTPMethod == "" {
return "POST"
}
return c.UplinkHTTPMethod
}
func (c *Config) GetNormalizedScStreamUpServerSecs() (Range, error) {
@@ -144,6 +223,84 @@ func (c *Config) GetNormalizedScMinPostsIntervalMs() (Range, error) {
return r, nil
}
func (c *Config) GetNormalizedUplinkChunkSize() (Range, error) {
uplinkChunkSize, err := ParseRange(c.UplinkChunkSize, "")
if err != nil {
return Range{}, fmt.Errorf("invalid uplink-chunk-size: %w", err)
}
if uplinkChunkSize.Max == 0 {
switch c.GetNormalizedUplinkDataPlacement() {
case PlacementCookie:
return Range{
Min: 2 * 1024, // 2 KiB
Max: 3 * 1024, // 3 KiB
}, nil
case PlacementHeader:
return Range{
Min: 3 * 1024, // 3 KiB
Max: 4 * 1024, // 4 KiB
}, nil
default:
return c.GetNormalizedScMaxEachPostBytes()
}
} else if uplinkChunkSize.Min < 64 {
uplinkChunkSize.Min = 64
if uplinkChunkSize.Max < 64 {
uplinkChunkSize.Max = 64
}
}
return uplinkChunkSize, nil
}
func (c *Config) GetNormalizedSessionPlacement() string {
if c.SessionPlacement == "" {
return PlacementPath
}
return c.SessionPlacement
}
func (c *Config) GetNormalizedSeqPlacement() string {
if c.SeqPlacement == "" {
return PlacementPath
}
return c.SeqPlacement
}
func (c *Config) GetNormalizedUplinkDataPlacement() string {
if c.UplinkDataPlacement == "" {
return PlacementBody
}
return c.UplinkDataPlacement
}
func (c *Config) GetNormalizedSessionKey() string {
if c.SessionKey != "" {
return c.SessionKey
}
switch c.GetNormalizedSessionPlacement() {
case PlacementHeader:
return "X-Session"
case PlacementCookie, PlacementQuery:
return "x_session"
default:
return ""
}
}
func (c *Config) GetNormalizedSeqKey() string {
if c.SeqKey != "" {
return c.SeqKey
}
switch c.GetNormalizedSeqPlacement() {
case PlacementHeader:
return "X-Seq"
case PlacementCookie, PlacementQuery:
return "x_seq"
default:
return ""
}
}
type Range struct {
Min int
Max int
@@ -231,32 +388,6 @@ func (c *ReuseConfig) ResolveEntryConfig() (Range, Range, Range, error) {
return cMaxReuseTimes, hMaxRequestTimes, hMaxReusableSecs, nil
}
func (c *Config) FillStreamRequest(req *http.Request, sessionID string) error {
req.Header = c.RequestHeader()
paddingValue, err := c.RandomPadding()
if err != nil {
return err
}
if paddingValue != "" {
rawURL := req.URL.String()
sep := "?"
if strings.Contains(rawURL, "?") {
sep = "&"
}
req.Header.Set("Referer", rawURL+sep+"x_padding="+paddingValue)
}
c.ApplyMetaToRequest(req, sessionID, "")
if req.Body != nil && !c.NoGRPCHeader {
req.Header.Set("Content-Type", "application/grpc")
}
return nil
}
func appendToPath(path, value string) string {
if strings.HasSuffix(path, "/") {
return path + value
@@ -264,53 +395,185 @@ func appendToPath(path, value string) string {
return path + "/" + value
}
func (c *Config) ApplyMetaToRequest(req *http.Request, sessionID string, seqStr string) {
if sessionID != "" {
req.URL.Path = appendToPath(req.URL.Path, sessionID)
func (c *Config) ApplyMetaToRequest(req *http.Request, sessionId string, seqStr string) {
sessionPlacement := c.GetNormalizedSessionPlacement()
seqPlacement := c.GetNormalizedSeqPlacement()
sessionKey := c.GetNormalizedSessionKey()
seqKey := c.GetNormalizedSeqKey()
if sessionId != "" {
switch sessionPlacement {
case PlacementPath:
req.URL.Path = appendToPath(req.URL.Path, sessionId)
case PlacementQuery:
q := req.URL.Query()
q.Set(sessionKey, sessionId)
req.URL.RawQuery = q.Encode()
case PlacementHeader:
req.Header.Set(sessionKey, sessionId)
case PlacementCookie:
req.AddCookie(&http.Cookie{Name: sessionKey, Value: sessionId})
}
}
if seqStr != "" {
req.URL.Path = appendToPath(req.URL.Path, seqStr)
switch seqPlacement {
case PlacementPath:
req.URL.Path = appendToPath(req.URL.Path, seqStr)
case PlacementQuery:
q := req.URL.Query()
q.Set(seqKey, seqStr)
req.URL.RawQuery = q.Encode()
case PlacementHeader:
req.Header.Set(seqKey, seqStr)
case PlacementCookie:
req.AddCookie(&http.Cookie{Name: seqKey, Value: seqStr})
}
}
}
func (c *Config) FillPacketRequest(req *http.Request, sessionID string, seqStr string, payload []byte) error {
req.Header = c.RequestHeader()
req.Body = io.NopCloser(bytes.NewReader(payload))
req.ContentLength = int64(len(payload))
func (c *Config) ExtractMetaFromRequest(req *http.Request, path string) (sessionId string, seqStr string) {
sessionPlacement := c.GetNormalizedSessionPlacement()
seqPlacement := c.GetNormalizedSeqPlacement()
sessionKey := c.GetNormalizedSessionKey()
seqKey := c.GetNormalizedSeqKey()
paddingValue, err := c.RandomPadding()
var subpath []string
pathPart := 0
if sessionPlacement == PlacementPath || seqPlacement == PlacementPath {
subpath = strings.Split(req.URL.Path[len(path):], "/")
}
switch sessionPlacement {
case PlacementPath:
if len(subpath) > pathPart {
sessionId = subpath[pathPart]
pathPart += 1
}
case PlacementQuery:
sessionId = req.URL.Query().Get(sessionKey)
case PlacementHeader:
sessionId = req.Header.Get(sessionKey)
case PlacementCookie:
if cookie, e := req.Cookie(sessionKey); e == nil {
sessionId = cookie.Value
}
}
switch seqPlacement {
case PlacementPath:
if len(subpath) > pathPart {
seqStr = subpath[pathPart]
pathPart += 1
}
case PlacementQuery:
seqStr = req.URL.Query().Get(seqKey)
case PlacementHeader:
seqStr = req.Header.Get(seqKey)
case PlacementCookie:
if cookie, e := req.Cookie(seqKey); e == nil {
seqStr = cookie.Value
}
}
return sessionId, seqStr
}
func (c *Config) FillStreamRequest(req *http.Request, sessionID string) error {
req.Header = c.GetRequestHeader()
xPaddingBytes, err := c.GetNormalizedXPaddingBytes()
if err != nil {
return err
}
if paddingValue != "" {
rawURL := req.URL.String()
sep := "?"
if strings.Contains(rawURL, "?") {
sep = "&"
length := xPaddingBytes.Rand()
config := XPaddingConfig{Length: length}
if c.XPaddingObfsMode {
config.Placement = XPaddingPlacement{
Placement: c.XPaddingPlacement,
Key: c.XPaddingKey,
Header: c.XPaddingHeader,
RawURL: req.URL.String(),
}
config.Method = PaddingMethod(c.XPaddingMethod)
} else {
config.Placement = XPaddingPlacement{
Placement: PlacementQueryInHeader,
Key: "x_padding",
Header: "Referer",
RawURL: req.URL.String(),
}
req.Header.Set("Referer", rawURL+sep+"x_padding="+paddingValue)
}
c.ApplyMetaToRequest(req, sessionID, seqStr)
c.ApplyXPaddingToRequest(req, config)
c.ApplyMetaToRequest(req, sessionID, "")
if req.Body != nil && !c.NoGRPCHeader { // stream-up/one
req.Header.Set("Content-Type", "application/grpc")
}
return nil
}
func (c *Config) FillDownloadRequest(req *http.Request, sessionID string) error {
req.Header = c.RequestHeader()
return c.FillStreamRequest(req, sessionID)
}
paddingValue, err := c.RandomPadding()
func (c *Config) FillPacketRequest(request *http.Request, sessionId string, seqStr string, data []byte) error {
dataPlacement := c.GetNormalizedUplinkDataPlacement()
if dataPlacement == PlacementBody || dataPlacement == PlacementAuto {
request.Header = c.GetRequestHeader()
request.Body = io.NopCloser(bytes.NewReader(data))
request.ContentLength = int64(len(data))
} else {
request.Body = nil
request.ContentLength = 0
switch dataPlacement {
case PlacementHeader:
uplinkChunkSize, err := c.GetNormalizedUplinkChunkSize()
if err != nil {
return err
}
request.Header = c.GetRequestHeaderWithPayload(data, uplinkChunkSize)
case PlacementCookie:
request.Header = c.GetRequestHeader()
uplinkChunkSize, err := c.GetNormalizedUplinkChunkSize()
if err != nil {
return err
}
for _, cookie := range c.GetRequestCookiesWithPayload(data, uplinkChunkSize) {
request.AddCookie(cookie)
}
}
}
xPaddingBytes, err := c.GetNormalizedXPaddingBytes()
if err != nil {
return err
}
if paddingValue != "" {
rawURL := req.URL.String()
sep := "?"
if strings.Contains(rawURL, "?") {
sep = "&"
length := xPaddingBytes.Rand()
config := XPaddingConfig{Length: length}
if c.XPaddingObfsMode {
config.Placement = XPaddingPlacement{
Placement: c.XPaddingPlacement,
Key: c.XPaddingKey,
Header: c.XPaddingHeader,
RawURL: request.URL.String(),
}
config.Method = PaddingMethod(c.XPaddingMethod)
} else {
config.Placement = XPaddingPlacement{
Placement: PlacementQueryInHeader,
Key: "x_padding",
Header: "Referer",
RawURL: request.URL.String(),
}
req.Header.Set("Referer", rawURL+sep+"x_padding="+paddingValue)
}
c.ApplyMetaToRequest(req, sessionID, "")
c.ApplyXPaddingToRequest(request, config)
c.ApplyMetaToRequest(request, sessionId, seqStr)
return nil
}
+229 -137
View File
@@ -1,6 +1,9 @@
package xhttp
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"net"
"strconv"
@@ -98,6 +101,7 @@ type requestHandler struct {
connHandler func(net.Conn)
httpHandler http.Handler
xPaddingBytes Range
scMaxEachPostBytes Range
scStreamUpServerSecs Range
scMaxBufferedPosts Range
@@ -107,6 +111,10 @@ type requestHandler struct {
}
func NewServerHandler(opt ServerOption) (http.Handler, error) {
xPaddingBytes, err := opt.Config.GetNormalizedXPaddingBytes()
if err != nil {
return nil, err
}
scMaxEachPostBytes, err := opt.Config.GetNormalizedScMaxEachPostBytes()
if err != nil {
return nil, err
@@ -125,6 +133,7 @@ func NewServerHandler(opt ServerOption) (http.Handler, error) {
config: opt.Config,
connHandler: opt.ConnHandler,
httpHandler: opt.HttpHandler,
xPaddingBytes: xPaddingBytes,
scMaxEachPostBytes: scMaxEachPostBytes,
scStreamUpServerSecs: scStreamUpServerSecs,
scMaxBufferedPosts: scMaxBufferedPosts,
@@ -134,7 +143,7 @@ func NewServerHandler(opt ServerOption) (http.Handler, error) {
}), nil
}
func (h *requestHandler) getOrCreateSession(sessionID string) *httpSession {
func (h *requestHandler) upsertSession(sessionID string) *httpSession {
h.mu.Lock()
defer h.mu.Unlock()
@@ -161,8 +170,6 @@ func (h *requestHandler) getOrCreateSession(sessionID string) *httpSession {
return s
}
func (h *requestHandler) deleteSession(sessionID string) {
h.mu.Lock()
defer h.mu.Unlock()
@@ -239,11 +246,227 @@ func (h *requestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
rest := strings.TrimPrefix(r.URL.Path, path)
parts := splitNonEmpty(rest)
h.config.WriteResponseHeader(w, r.Method, r.Header)
length := h.xPaddingBytes.Rand()
config := XPaddingConfig{Length: length}
if h.config.XPaddingObfsMode {
config.Placement = XPaddingPlacement{
Placement: h.config.XPaddingPlacement,
Key: h.config.XPaddingKey,
Header: h.config.XPaddingHeader,
}
config.Method = PaddingMethod(h.config.XPaddingMethod)
} else {
config.Placement = XPaddingPlacement{
Placement: PlacementHeader,
Header: "X-Padding",
}
}
h.config.ApplyXPaddingToResponse(w, config)
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
paddingValue, _ := h.config.ExtractXPaddingFromRequest(r, h.config.XPaddingObfsMode)
if !h.config.IsPaddingValid(paddingValue, h.xPaddingBytes.Min, h.xPaddingBytes.Max, PaddingMethod(h.config.XPaddingMethod)) {
http.Error(w, "invalid xpadding", http.StatusBadRequest)
return
}
sessionId, seqStr := h.config.ExtractMetaFromRequest(r, path)
var currentSession *httpSession
if sessionId != "" {
currentSession = h.upsertSession(sessionId)
}
// stream-up upload: POST /path/{session}
if r.Method != http.MethodGet && sessionId != "" && seqStr == "" && h.allowStreamUpUpload() {
httpSC := newHTTPServerConn(w, r.Body)
err := currentSession.uploadQueue.Push(Packet{
Reader: httpSC,
})
if err != nil {
http.Error(w, err.Error(), http.StatusConflict)
return
}
// magic header instructs nginx + apache to not buffer response body
w.Header().Set("X-Accel-Buffering", "no")
// A web-compliant header telling all middleboxes to disable caching.
// Should be able to prevent overloading the cache, or stop CDNs from
// teeing the response stream into their cache, causing slowdowns.
w.Header().Set("Cache-Control", "no-store")
if !h.config.NoSSEHeader {
// magic header to make the HTTP middle box consider this as SSE to disable buffer
w.Header().Set("Content-Type", "text/event-stream")
}
w.WriteHeader(http.StatusOK)
referrer := r.Header.Get("Referer")
if referrer != "" && h.scStreamUpServerSecs.Max > 0 {
go func() {
for {
_, err := httpSC.Write(bytes.Repeat([]byte{'X'}, int(h.xPaddingBytes.Rand())))
if err != nil {
break
}
time.Sleep(time.Duration(h.scStreamUpServerSecs.Rand()) * time.Second)
}
}()
}
select {
case <-r.Context().Done():
case <-httpSC.Wait():
}
_ = httpSC.Close()
return
}
// packet-up upload: POST /path/{session}/{seq}
if r.Method != http.MethodGet && sessionId != "" && seqStr != "" && h.allowPacketUpUpload() {
scMaxEachPostBytes := h.scMaxEachPostBytes.Max
dataPlacement := h.config.GetNormalizedUplinkDataPlacement()
uplinkDataKey := h.config.UplinkDataKey
var headerPayload []byte
var err error
if dataPlacement == PlacementAuto || dataPlacement == PlacementHeader {
var headerPayloadChunks []string
for i := 0; true; i++ {
chunk := r.Header.Get(fmt.Sprintf("%s-%d", uplinkDataKey, i))
if chunk == "" {
break
}
headerPayloadChunks = append(headerPayloadChunks, chunk)
}
headerPayloadEncoded := strings.Join(headerPayloadChunks, "")
headerPayload, err = base64.RawURLEncoding.DecodeString(headerPayloadEncoded)
if err != nil {
http.Error(w, "invalid base64 in header's payload", http.StatusBadRequest)
return
}
}
var cookiePayload []byte
if dataPlacement == PlacementAuto || dataPlacement == PlacementCookie {
var cookiePayloadChunks []string
for i := 0; true; i++ {
cookieName := fmt.Sprintf("%s_%d", uplinkDataKey, i)
if c, _ := r.Cookie(cookieName); c != nil {
cookiePayloadChunks = append(cookiePayloadChunks, c.Value)
} else {
break
}
}
cookiePayloadEncoded := strings.Join(cookiePayloadChunks, "")
cookiePayload, err = base64.RawURLEncoding.DecodeString(cookiePayloadEncoded)
if err != nil {
http.Error(w, "invalid base64 in cookies' payload", http.StatusBadRequest)
return
}
}
var bodyPayload []byte
if dataPlacement == PlacementAuto || dataPlacement == PlacementBody {
if r.ContentLength > int64(scMaxEachPostBytes) {
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
return
}
bodyPayload, err = io.ReadAll(io.LimitReader(r.Body, int64(scMaxEachPostBytes)+1))
if err != nil {
http.Error(w, "failed to read body", http.StatusBadRequest)
return
}
}
var payload []byte
switch dataPlacement {
case PlacementHeader:
payload = headerPayload
case PlacementCookie:
payload = cookiePayload
case PlacementBody:
payload = bodyPayload
case PlacementAuto:
payload = headerPayload
payload = append(payload, cookiePayload...)
payload = append(payload, bodyPayload...)
}
if len(payload) > h.scMaxEachPostBytes.Max {
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
return
}
seq, err := strconv.ParseUint(seqStr, 10, 64)
if err != nil {
http.Error(w, "invalid xhttp seq", http.StatusBadRequest)
return
}
err = currentSession.uploadQueue.Push(Packet{
Seq: seq,
Payload: payload,
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if len(payload) == 0 {
// Methods without a body are usually cached by default.
w.Header().Set("Cache-Control", "no-store")
}
w.WriteHeader(http.StatusOK)
return
}
// stream-up/packet-up download: GET /path/{session}
if r.Method == http.MethodGet && sessionId != "" && seqStr == "" && h.allowSessionDownload() {
currentSession.markConnected()
// magic header instructs nginx + apache to not buffer response body
w.Header().Set("X-Accel-Buffering", "no")
// A web-compliant header telling all middleboxes to disable caching.
// Should be able to prevent overloading the cache, or stop CDNs from
// teeing the response stream into their cache, causing slowdowns.
w.Header().Set("Cache-Control", "no-store")
if !h.config.NoSSEHeader {
// magic header to make the HTTP middle box consider this as SSE to disable buffer
w.Header().Set("Content-Type", "text/event-stream")
}
w.WriteHeader(http.StatusOK)
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
httpSC := newHTTPServerConn(w, r.Body)
conn := &Conn{
writer: httpSC,
reader: currentSession.uploadQueue,
onClose: func() {
h.deleteSession(sessionId)
},
}
httputils.SetAddrFromRequest(&conn.NetAddr, r)
go h.connHandler(N.NewDeadlineConn(conn))
select {
case <-r.Context().Done():
case <-httpSC.Wait():
}
_ = conn.Close()
return
}
// stream-one: POST /path
if r.Method == http.MethodPost && len(parts) == 0 && h.allowStreamOne() {
if r.Method != http.MethodGet && sessionId == "" && seqStr == "" && h.allowStreamOne() {
w.Header().Set("X-Accel-Buffering", "no")
w.Header().Set("Cache-Control", "no-store")
w.WriteHeader(http.StatusOK)
@@ -269,137 +492,6 @@ func (h *requestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
// stream-up/packet-up download: GET /path/{session}
if r.Method == http.MethodGet && len(parts) == 1 && h.allowSessionDownload() {
sessionID := parts[0]
session := h.getOrCreateSession(sessionID)
session.markConnected()
// magic header instructs nginx + apache to not buffer response body
w.Header().Set("X-Accel-Buffering", "no")
// A web-compliant header telling all middleboxes to disable caching.
// Should be able to prevent overloading the cache, or stop CDNs from
// teeing the response stream into their cache, causing slowdowns.
w.Header().Set("Cache-Control", "no-store")
if !h.config.NoSSEHeader {
// magic header to make the HTTP middle box consider this as SSE to disable buffer
w.Header().Set("Content-Type", "text/event-stream")
}
w.WriteHeader(http.StatusOK)
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
httpSC := newHTTPServerConn(w, r.Body)
conn := &Conn{
writer: httpSC,
reader: session.uploadQueue,
onClose: func() {
h.deleteSession(sessionID)
},
}
httputils.SetAddrFromRequest(&conn.NetAddr, r)
go h.connHandler(N.NewDeadlineConn(conn))
select {
case <-r.Context().Done():
case <-httpSC.Wait():
}
_ = conn.Close()
return
}
// stream-up upload: POST /path/{session}
if r.Method == http.MethodPost && len(parts) == 1 && h.allowStreamUpUpload() {
sessionID := parts[0]
session := h.getOrCreateSession(sessionID)
httpSC := newHTTPServerConn(w, r.Body)
err := session.uploadQueue.Push(Packet{
Reader: httpSC,
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// magic header instructs nginx + apache to not buffer response body
w.Header().Set("X-Accel-Buffering", "no")
// A web-compliant header telling all middleboxes to disable caching.
// Should be able to prevent overloading the cache, or stop CDNs from
// teeing the response stream into their cache, causing slowdowns.
w.Header().Set("Cache-Control", "no-store")
if !h.config.NoSSEHeader {
// magic header to make the HTTP middle box consider this as SSE to disable buffer
w.Header().Set("Content-Type", "text/event-stream")
}
w.WriteHeader(http.StatusOK)
referrer := r.Header.Get("Referer")
if referrer != "" && h.scStreamUpServerSecs.Max > 0 {
go func() {
for {
paddingValue, _ := h.config.RandomPadding()
if paddingValue == "" {
break
}
_, err = httpSC.Write([]byte(paddingValue))
if err != nil {
break
}
time.Sleep(time.Duration(h.scStreamUpServerSecs.Rand()) * time.Second)
}
}()
}
select {
case <-r.Context().Done():
case <-httpSC.Wait():
}
_ = httpSC.Close()
return
}
// packet-up upload: POST /path/{session}/{seq}
if r.Method == http.MethodPost && len(parts) == 2 && h.allowPacketUpUpload() {
sessionID := parts[0]
seq, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
http.Error(w, "invalid xhttp seq", http.StatusBadRequest)
return
}
session := h.getOrCreateSession(sessionID)
if r.ContentLength > int64(h.scMaxEachPostBytes.Max) {
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
return
}
body, err := io.ReadAll(io.LimitReader(r.Body, int64(h.scMaxEachPostBytes.Max)+1))
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
err = session.uploadQueue.Push(Packet{
Seq: seq,
Payload: body,
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if len(body) == 0 {
w.Header().Set("Cache-Control", "no-store")
}
w.WriteHeader(http.StatusOK)
return
}
http.NotFound(w, r)
}
+9 -7
View File
@@ -78,22 +78,24 @@ func TestServerHandlerModeRestrictions(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
config := Config{
Path: "/xhttp",
Mode: testCase.mode,
}
handler, err := NewServerHandler(ServerOption{
Config: Config{
Path: "/xhttp",
Mode: testCase.mode,
},
Config: config,
ConnHandler: func(conn net.Conn) {
_ = conn.Close()
},
})
if err != nil {
panic(err)
}
assert.NoError(t, err)
req := httptest.NewRequest(testCase.method, testCase.target, io.NopCloser(http.NoBody))
recorder := httptest.NewRecorder()
err = config.FillStreamRequest(req, "")
assert.NoError(t, err)
handler.ServeHTTP(recorder, req)
assert.Equal(t, testCase.wantStatus, recorder.Result().StatusCode)
+333
View File
@@ -0,0 +1,333 @@
package xhttp
import (
"crypto/rand"
"fmt"
"math"
"net/url"
"strings"
"github.com/metacubex/http"
"golang.org/x/net/http2/hpack"
)
type PaddingMethod string
const (
PaddingMethodRepeatX PaddingMethod = "repeat-x"
PaddingMethodTokenish PaddingMethod = "tokenish"
)
const charsetBase62 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
// Huffman encoding gives ~20% size reduction for base62 sequences
const avgHuffmanBytesPerCharBase62 = 0.8
const validationTolerance = 2
type XPaddingPlacement struct {
Placement string
Key string
Header string
RawURL string
}
type XPaddingConfig struct {
Length int
Placement XPaddingPlacement
Method PaddingMethod
}
func randStringFromCharset(n int, charset string) (string, bool) {
if n <= 0 || len(charset) == 0 {
return "", false
}
m := len(charset)
limit := byte(256 - (256 % m))
result := make([]byte, n)
i := 0
buf := make([]byte, 256)
for i < n {
if _, err := rand.Read(buf); err != nil {
return "", false
}
for _, rb := range buf {
if rb >= limit {
continue
}
result[i] = charset[int(rb)%m]
i++
if i == n {
break
}
}
}
return string(result), true
}
func absInt(x int) int {
if x < 0 {
return -x
}
return x
}
func GenerateTokenishPaddingBase62(targetHuffmanBytes int) string {
n := int(math.Ceil(float64(targetHuffmanBytes) / avgHuffmanBytesPerCharBase62))
if n < 1 {
n = 1
}
randBase62Str, ok := randStringFromCharset(n, charsetBase62)
if !ok {
return ""
}
const maxIter = 150
adjustChar := byte('X')
// Adjust until close enough
for iter := 0; iter < maxIter; iter++ {
currentLength := int(hpack.HuffmanEncodeLength(randBase62Str))
diff := currentLength - targetHuffmanBytes
if absInt(diff) <= validationTolerance {
return randBase62Str
}
if diff < 0 {
// Too small -> append padding char(s)
randBase62Str += string(adjustChar)
// Avoid a long run of identical chars
if adjustChar == 'X' {
adjustChar = 'Z'
} else {
adjustChar = 'X'
}
} else {
// Too big -> remove from the end
if len(randBase62Str) <= 1 {
return randBase62Str
}
randBase62Str = randBase62Str[:len(randBase62Str)-1]
}
}
return randBase62Str
}
func GeneratePadding(method PaddingMethod, length int) string {
if length <= 0 {
return ""
}
// https://www.rfc-editor.org/rfc/rfc7541.html#appendix-B
// h2's HPACK Header Compression feature employs a huffman encoding using a static table.
// 'X' and 'Z' are assigned an 8 bit code, so HPACK compression won't change actual padding length on the wire.
// https://www.rfc-editor.org/rfc/rfc9204.html#section-4.1.2-2
// h3's similar QPACK feature uses the same huffman table.
switch method {
case PaddingMethodRepeatX:
return strings.Repeat("X", length)
case PaddingMethodTokenish:
paddingValue := GenerateTokenishPaddingBase62(length)
if paddingValue == "" {
return strings.Repeat("X", length)
}
return paddingValue
default:
return strings.Repeat("X", length)
}
}
func ApplyPaddingToCookie(req *http.Request, name, value string) {
if req == nil || name == "" || value == "" {
return
}
req.AddCookie(&http.Cookie{
Name: name,
Value: value,
Path: "/",
})
}
func ApplyPaddingToResponseCookie(writer http.ResponseWriter, name, value string) {
if name == "" || value == "" {
return
}
http.SetCookie(writer, &http.Cookie{
Name: name,
Value: value,
Path: "/",
})
}
func ApplyPaddingToQuery(u *url.URL, key, value string) {
if u == nil || key == "" || value == "" {
return
}
q := u.Query()
q.Set(key, value)
u.RawQuery = q.Encode()
}
func (c *Config) GetNormalizedXPaddingBytes() (Range, error) {
r, err := ParseRange(c.XPaddingBytes, "100-1000")
if err != nil {
return Range{}, fmt.Errorf("invalid x-padding-bytes: %w", err)
}
return r, nil
}
func (c *Config) ApplyXPaddingToHeader(h http.Header, config XPaddingConfig) {
if h == nil {
return
}
paddingValue := GeneratePadding(config.Method, config.Length)
switch p := config.Placement; p.Placement {
case PlacementHeader:
h.Set(p.Header, paddingValue)
case PlacementQueryInHeader:
u, err := url.Parse(p.RawURL)
if err != nil || u == nil {
return
}
u.RawQuery = p.Key + "=" + paddingValue
h.Set(p.Header, u.String())
}
}
func (c *Config) ApplyXPaddingToRequest(req *http.Request, config XPaddingConfig) {
if req == nil {
return
}
if req.Header == nil {
req.Header = make(http.Header)
}
placement := config.Placement.Placement
if placement == PlacementHeader || placement == PlacementQueryInHeader {
c.ApplyXPaddingToHeader(req.Header, config)
return
}
paddingValue := GeneratePadding(config.Method, config.Length)
switch placement {
case PlacementCookie:
ApplyPaddingToCookie(req, config.Placement.Key, paddingValue)
case PlacementQuery:
ApplyPaddingToQuery(req.URL, config.Placement.Key, paddingValue)
}
}
func (c *Config) ApplyXPaddingToResponse(writer http.ResponseWriter, config XPaddingConfig) {
placement := config.Placement.Placement
if placement == PlacementHeader || placement == PlacementQueryInHeader {
c.ApplyXPaddingToHeader(writer.Header(), config)
return
}
paddingValue := GeneratePadding(config.Method, config.Length)
switch placement {
case PlacementCookie:
ApplyPaddingToResponseCookie(writer, config.Placement.Key, paddingValue)
}
}
func (c *Config) ExtractXPaddingFromRequest(req *http.Request, obfsMode bool) (string, string) {
if req == nil {
return "", ""
}
if !obfsMode {
referrer := req.Header.Get("Referer")
if referrer != "" {
if referrerURL, err := url.Parse(referrer); err == nil {
paddingValue := referrerURL.Query().Get("x_padding")
paddingPlacement := PlacementQueryInHeader + "=Referer, key=x_padding"
return paddingValue, paddingPlacement
}
} else {
paddingValue := req.URL.Query().Get("x_padding")
return paddingValue, PlacementQuery + ", key=x_padding"
}
}
key := c.XPaddingKey
header := c.XPaddingHeader
if cookie, err := req.Cookie(key); err == nil {
if cookie != nil && cookie.Value != "" {
paddingValue := cookie.Value
paddingPlacement := PlacementCookie + ", key=" + key
return paddingValue, paddingPlacement
}
}
headerValue := req.Header.Get(header)
if headerValue != "" {
if c.XPaddingPlacement == PlacementHeader {
paddingPlacement := PlacementHeader + "=" + header
return headerValue, paddingPlacement
}
if parsedURL, err := url.Parse(headerValue); err == nil {
paddingPlacement := PlacementQueryInHeader + "=" + header + ", key=" + key
return parsedURL.Query().Get(key), paddingPlacement
}
}
queryValue := req.URL.Query().Get(key)
if queryValue != "" {
paddingPlacement := PlacementQuery + ", key=" + key
return queryValue, paddingPlacement
}
return "", ""
}
func (c *Config) IsPaddingValid(paddingValue string, from, to int, method PaddingMethod) bool {
if paddingValue == "" {
return false
}
if to <= 0 {
if r, err := c.GetNormalizedXPaddingBytes(); err == nil {
from, to = r.Min, r.Max
}
}
switch method {
case PaddingMethodRepeatX:
n := len(paddingValue)
return n >= from && n <= to
case PaddingMethodTokenish:
const tolerance = validationTolerance
n := int(hpack.HuffmanEncodeLength(paddingValue))
f := from - tolerance
t := to + tolerance
if f < 0 {
f = 0
}
return n >= f && n <= t
default:
n := len(paddingValue)
return n >= from && n <= to
}
}
+2 -2
View File
@@ -8,12 +8,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=ddns-go
PKG_VERSION:=6.16.8
PKG_VERSION:=6.16.9
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/jeessy2/ddns-go/tar.gz/v$(PKG_VERSION)?
PKG_HASH:=1fe5a923fc2ebaf73f46dabf905c307e8149bb33cda5b5d81a962f4cc47bef9c
PKG_HASH:=1a961050870e5a706c124de498ab84510f19ea49d9284b890a64860e1570ead2
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=LICENSE
+1 -1
View File
@@ -16,7 +16,7 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=luci-app-amlogic
PKG_VERSION:=3.1.288
PKG_VERSION:=3.1.289
PKG_RELEASE:=1
PKG_LICENSE:=GPL-2.0 License
@@ -4,7 +4,7 @@ config amlogic 'config'
option amlogic_firmware_repo 'https://github.com/breakingbadboy/OpenWrt'
option amlogic_firmware_tag 'ARMv8'
option amlogic_firmware_suffix '.img.gz'
option amlogic_kernel_path 'https://github.com/breakingbadboy/OpenWrt'
option amlogic_kernel_path 'https://github.com/ophub/kernel'
option amlogic_kernel_tags ''
option amlogic_kernel_branch ''
option amlogic_firmware_config '1'
@@ -133,6 +133,8 @@ else
kernel_tag="rk3588"
elif [[ "${kernel_uname}" =~ -rk35xx ]]; then
kernel_tag="rk35xx"
elif [[ "${kernel_uname}" =~ -flippy ]]; then
kernel_tag="flippy"
elif [[ "${kernel_uname}" =~ -h6|-zicai ]]; then
kernel_tag="h6"
else
@@ -0,0 +1,38 @@
#
# Copyright (C) 2017-2019 Chen Minqiang <ptpt52@gmail.com>
# Copyright 2022-2025 sirpdboy <herboy2008@gmail.com>
# This is free software, licensed under the GNU General Public License v3.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
LUCI_TITLE:=LuCI Support for wizard
LUCI_DEPENDS:=
LUCI_PKGARCH:=all
PKG_NAME:=luci-app-netwizard
PKG_VERSION:=2.1.5
PKG_RELEASE:=20260312
PKG_LICENSE:=GPLv3
PKG_LICENSE_FILES:=LICENSE
PKG_MAINTAINER:=sirpdboy <herboy2008@gmail.com>
define Package/luci-app-netwizard
CATEGORY:=X
SUBMENU:=Configuration netWizard Support
TITLE:=LuCI Support for netwizard
PKGARCH:=all
endef
define Package/luci-app-netwizard/description
LuCI Support for netwizard.
endef
define Package/luci-app-netwizard/conffiles
/etc/config/netwizard
endef
include $(TOPDIR)/feeds/luci/luci.mk
# call BuildPackage - OpenWrt buildroot signature
@@ -0,0 +1,93 @@
// Copyright 2023-2026 sirpdboy
'use strict';
'require baseclass';
'require rpc';
var callOTACheck = rpc.declare({
object: 'ota',
method: 'check'
});
const callUciGet = rpc.declare({
object: 'uci',
method: 'get',
params: ['config', 'section', 'option']
});
return baseclass.extend({
title: _('Firmware Update'),
load: function() {
return Promise.resolve({ code: -1 });
},
render: function() {
callUciGet('netwizard', 'default', 'updatacheck')
.then((res) => {
const updatacheck = res?.value ?? '0';
console.log('Update check setting:', updatacheck);
// 只有当配置为1时才检测更新
if (updatacheck == 1 || updatacheck == '1') {
setTimeout(() => {
this.checkOTAUpdate();
}, 1000);
}
})
.catch((err) => {
const updatacheck = '0';
});
return null;
},
checkOTAUpdate: function() {
if (window.otaCheckStarted) return;
window.otaCheckStarted = true;
callOTACheck()
.then(data => {
if (data && data.code === 0) {
this.addUpdateButton();
}
})
.catch(() => {
});
},
addUpdateButton: function() {
if (document.getElementById('ota-notice')) {
return;
}
var flashindicators = document.querySelector('#indicators');
if (!flashindicators) return;
var notice = document.createElement('div');
notice.id = 'ota-notice';
notice.innerHTML = [
'<div style="color: white;">',
' <a href="' + L.url('admin/system/ota') + '" ',
' class="cbi-button cbi-button-action"',
' style="color: white; background: linear-gradient(135deg, #ff6b6b, #ee5a52);"',
' onmouseover="this.style.transform=\'translateY(-2px)\'; this.style.boxShadow=\'0 4px 12px rgba(0,0,0,0.15)\'"',
' onmouseout="this.style.transform=\'translateY(0)\'; this.style.boxShadow=\'none\'">',
' <i class="icon icon-forward"></i>',
' ' + _('Update available!') + '',
' </a>',
'</div>'
].join('');
flashindicators.parentNode.insertBefore(notice, flashindicators);
this.addResponsiveStyle();
},
addResponsiveStyle: function() {
if (document.getElementById('ota-responsive-style')) return;
var style = document.createElement('style');
style.id = 'ota-responsive-style';
style.textContent = '@media (max-width: 480px) { header>.fill>.container>.flex1>.brand { display: none; } }';
document.head.appendChild(style);
}
});
+1
View File
@@ -0,0 +1 @@
zh_Hans

Some files were not shown because too many files have changed in this diff Show More