This commit is contained in:
TenderIronh
2025-11-24 10:53:02 +08:00
parent 471aa5e6ea
commit 8e303e93f8
19 changed files with 1808 additions and 843 deletions
+6 -3
View File
@@ -1,10 +1,8 @@
__debug_bin
__debug_bin.exe
# .vscode
test/
openp2p.exe*
*.log*
go.sum
*.tar.gz
*.zip
*.exe
@@ -21,4 +19,9 @@ wintun.dll
app/.idea/
*_debug_bin*
cmd/openp2p
vendor/
vendor/
config.json
openp2p
lib/openp2p.dll
cmd/config.json0
test/docker/Dockerfile
+8
View File
@@ -24,6 +24,14 @@ allprojects {
jcenter() // Warning: this repository is going to shut down soon
}
}
allprojects {
repositories {
maven { url 'https://maven.aliyun.com/repository/google' }
maven { url 'https://maven.aliyun.com/repository/central' }
maven { url 'https://maven.aliyun.com/repository/public' }
maven { url 'https://jitpack.io' }
}
}
task clean(type: Delete) {
delete rootProject.buildDir
+52 -41
View File
@@ -51,9 +51,10 @@ type AppConfig struct {
}
const (
PunchPriorityTCPFirst = 1
PunchPriorityUDPDisable = 1 << 1
PunchPriorityTCPDisable = 1 << 2
PunchPriorityUDPFirst = 0
PunchPriorityTCPFirst = 1
PunchPriorityTCPOnly = 1 << 1
PunchPriorityUDPOnly = 1 << 2
)
func (c *AppConfig) ID() uint64 {
@@ -147,6 +148,12 @@ func (c *Config) setSDWAN(s SDWANInfo) {
}
}
c.sdwan = s
if c.sdwan.TunnelNum < 2 {
c.sdwan.TunnelNum = 2 // DEBUG
}
if c.sdwan.TunnelNum > 3 {
c.sdwan.TunnelNum = 3
}
}
func (c *Config) switchApp(app AppConfig, enabled int) {
@@ -163,26 +170,16 @@ func (c *Config) switchApp(app AppConfig, enabled int) {
c.save()
}
// TODO: move to p2pnetwork
func (c *Config) retryApp(peerNode string) {
GNetwork.apps.Range(func(id, i interface{}) bool {
app := i.(*p2pApp)
if app.config.PeerNode == peerNode {
gLog.Println(LvDEBUG, "retry app ", app.config.LogPeerNode())
app.config.retryNum = 0
app.config.nextRetryTime = time.Now()
app.retryRelayNum = 0
app.nextRetryRelayTime = time.Now()
app.hbMtx.Lock()
app.hbTimeRelay = time.Now().Add(-TunnelHeartbeatTime * 3)
app.hbMtx.Unlock()
app.Retry(true)
}
if app.config.RelayNode == peerNode {
gLog.Println(LvDEBUG, "retry app relay=", app.config.LogPeerNode())
app.retryRelayNum = 0
app.nextRetryRelayTime = time.Now()
app.hbMtx.Lock()
app.hbTimeRelay = time.Now().Add(-TunnelHeartbeatTime * 3)
app.hbMtx.Unlock()
app.Retry(false)
gLog.d("retry app relay=%s", app.config.LogPeerNode())
}
return true
})
@@ -191,14 +188,7 @@ func (c *Config) retryApp(peerNode string) {
func (c *Config) retryAllApp() {
GNetwork.apps.Range(func(id, i interface{}) bool {
app := i.(*p2pApp)
gLog.Println(LvDEBUG, "retry app ", app.config.LogPeerNode())
app.config.retryNum = 0
app.config.nextRetryTime = time.Now()
app.retryRelayNum = 0
app.nextRetryRelayTime = time.Now()
app.hbMtx.Lock()
defer app.hbMtx.Unlock()
app.hbTimeRelay = time.Now().Add(-TunnelHeartbeatTime * 3)
app.Retry(true)
return true
})
}
@@ -209,19 +199,15 @@ func (c *Config) retryAllMemApp() {
if app.config.SrcPort != 0 {
return true
}
gLog.Println(LvDEBUG, "retry app ", app.config.LogPeerNode())
app.config.retryNum = 0
app.config.nextRetryTime = time.Now()
app.retryRelayNum = 0
app.nextRetryRelayTime = time.Now()
app.hbMtx.Lock()
defer app.hbMtx.Unlock()
app.hbTimeRelay = time.Now().Add(-TunnelHeartbeatTime * 3)
app.Retry(true)
return true
})
}
func (c *Config) add(app AppConfig, override bool) {
if app.AppName == "" {
app.AppName = fmt.Sprintf("%d", app.ID())
}
c.mtx.Lock()
defer c.mtx.Unlock()
defer c.save()
@@ -386,15 +372,19 @@ type NetworkConfig struct {
ShareBandwidth int
// server info
ServerHost string
ServerIP string
ServerPort int
NATDetectPort1 int
NATDetectPort2 int
PublicIPPort int
natDetectPort1 int
natDetectPort2 int
PublicIPPort int // both tcp and udp
specTunnel int
}
func parseParams(subCommand string, cmd string) {
fset := flag.NewFlagSet(subCommand, flag.ExitOnError)
installPath := fset.String("installpath", "", "custom install path")
serverHost := fset.String("serverhost", "api.openp2p.cn", "server host ")
insecure := fset.Bool("insecure", false, "not verify TLS certificate")
serverPort := fset.Int("serverport", WsPort, "server port ")
// serverHost := flag.String("serverhost", "127.0.0.1", "server host ") // for debug
token := fset.Uint64("token", 0, "token")
@@ -427,7 +417,6 @@ func parseParams(subCommand string, cmd string) {
fset.Parse(args)
}
gLog.setMaxSize(int64(*maxLogSize))
config := AppConfig{Enabled: 1}
config.PeerNode = *peerNode
config.DstHost = *dstIP
@@ -439,6 +428,19 @@ func parseParams(subCommand string, cmd string) {
config.PunchPriority = *punchPriority
config.AppName = *appName
config.RelayNode = *relayNode
if *installPath != "" {
defaultInstallPath = *installPath
}
if subCommand == "install" {
if err := os.MkdirAll(defaultInstallPath, 0775); err != nil {
gLog.e("parseParams MkdirAll %s error:%s", defaultInstallPath, err)
return
}
if err := os.Chdir(defaultInstallPath); err != nil {
gLog.e("parseParams Chdir error:%s", err)
return
}
}
if !*newconfig {
gConf.load() // load old config. otherwise will clear all apps
}
@@ -470,11 +472,20 @@ func parseParams(subCommand string, cmd string) {
if f.Name == "token" {
gConf.setToken(*token)
}
if f.Name == "serverport" {
gConf.Network.ServerPort = *serverPort
}
if f.Name == "insecure" {
gConf.TLSInsecureSkipVerify = *insecure
}
})
// set default value
if gConf.Network.ServerHost == "" {
gConf.Network.ServerHost = *serverHost
}
if gConf.Network.ServerPort == 0 {
gConf.Network.ServerPort = *serverPort
}
if *node != "" {
gConf.setNode(*node)
} else {
@@ -488,7 +499,7 @@ func parseParams(subCommand string, cmd string) {
}
if gConf.Network.PublicIPPort == 0 {
if *publicIPPort == 0 {
p := int(gConf.nodeID()%15000 + 50000)
p := int(gConf.nodeID()%8192 + 1025)
publicIPPort = &p
}
gConf.Network.PublicIPPort = *publicIPPort
@@ -501,9 +512,9 @@ func parseParams(subCommand string, cmd string) {
}
}
}
gConf.Network.ServerPort = *serverPort
gConf.Network.NATDetectPort1 = NATDetectPort1
gConf.Network.NATDetectPort2 = NATDetectPort2
gConf.Network.natDetectPort1 = NATDetectPort1
gConf.Network.natDetectPort2 = NATDetectPort2
gLog.setLevel(LogLevel(gConf.LogLevel))
gLog.setMaxSize(int64(gConf.MaxLogSize))
if *notVerbose {
+74 -39
View File
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"net"
"os"
@@ -68,7 +69,10 @@ func handlePush(subType uint16, msg []byte) error {
gLog.e("Unmarshal %v:%s", reflect.TypeOf(req), err)
return err
}
gLog.Println(LvDEBUG, "handle MsgPushServerSideSaveMemApp:", prettyJson(req))
gLog.d("handle MsgPushServerSideSaveMemApp:%s", prettyJson(req))
if req.RelayIndex > uint32(gConf.sdwan.TunnelNum-1) {
return errors.New("wrong relay index")
}
var existTunnel *P2PTunnel
i, ok := GNetwork.allTunnels.Load(req.TunnelID)
if !ok {
@@ -81,18 +85,24 @@ func handlePush(subType uint16, msg []byte) error {
}
existTunnel = i.(*P2PTunnel)
peerID := NodeNameToID(req.From)
existApp, appok := GNetwork.apps.Load(peerID)
appIdx := peerID
if req.SrcPort != 0 {
appIdx = req.AppID
}
existApp, appok := GNetwork.apps.Load(appIdx)
if appok {
app := existApp.(*p2pApp)
app.config.AppName = fmt.Sprintf("%d", peerID)
app.id = req.AppID
app.setRelayTunnelID(req.RelayTunnelID)
app.relayMode = req.RelayMode
app.hbTimeRelay = time.Now()
app.key = req.AppKey
app.PreCalcKeyBytes()
app.relayMode[req.RelayIndex] = req.RelayMode
app.hbTime[req.RelayIndex] = time.Now()
if req.RelayTunnelID == 0 {
app.setDirectTunnel(existTunnel)
app.SetTunnel(existTunnel, 0)
} else {
app.setRelayTunnel(existTunnel)
app.SetTunnel(existTunnel, int(req.RelayIndex)) // TODO: merge two func
app.SetRelayTunnelID(req.RelayTunnelID, int(req.RelayIndex)) // direct tunnel rtid=0, no need set rtid
}
gLog.d("found existing memapp, update it")
} else {
@@ -102,22 +112,32 @@ func handlePush(subType uint16, msg []byte) error {
appConfig.AppName = fmt.Sprintf("%d", peerID)
appConfig.PeerNode = req.From
app := p2pApp{
id: req.AppID,
config: appConfig,
relayMode: req.RelayMode,
running: true,
hbTimeRelay: time.Now(),
id: req.AppID,
config: appConfig,
running: true,
// asyncWriteChan: make(chan []byte, WriteDataChanSize),
key: req.AppKey,
}
app.PreCalcKeyBytes()
tunnelNum := 2
if req.TunnelNum > uint32(tunnelNum) {
tunnelNum = int(req.TunnelNum)
}
app.Init(tunnelNum)
app.relayMode[req.RelayIndex] = req.RelayMode
app.hbTime[req.RelayIndex] = time.Now()
if req.RelayTunnelID == 0 {
app.setDirectTunnel(existTunnel)
app.SetTunnel(existTunnel, 0)
} else {
app.setRelayTunnel(existTunnel)
app.setRelayTunnelID(req.RelayTunnelID)
app.SetTunnel(existTunnel, int(req.RelayIndex))
app.SetRelayTunnelID(req.RelayTunnelID, int(req.RelayIndex))
}
if req.RelayTunnelID != 0 {
app.relayNode = req.Node
app.relayNode[req.RelayIndex] = req.Node
}
GNetwork.apps.Store(NodeNameToID(req.From), &app)
app.Start(false)
GNetwork.apps.Store(appIdx, &app)
gLog.d("store memapp %d %d", appIdx, req.SrcPort)
}
return nil
@@ -155,6 +175,9 @@ func handlePush(subType uint16, msg []byte) error {
}
gConf.setNode(req.NewName)
gConf.setShareBandwidth(req.Bandwidth)
gConf.Forcev6 = (req.Forcev6 != 0)
gLog.i("set forcev6 to %v", gConf.Forcev6)
gConf.save()
os.Exit(0)
case MsgPushSwitchApp:
gLog.i("MsgPushSwitchApp")
@@ -178,6 +201,16 @@ func handlePush(subType uint16, msg []byte) error {
}
gLog.i("%s online, retryApp", req.Node)
gConf.retryApp(req.Node)
case MsgPushSpecTunnel:
req := SpecTunnel{}
if err = json.Unmarshal(msg[openP2PHeaderSize:], &req); err != nil {
gLog.e("Unmarshal %v:%s %s", reflect.TypeOf(req), err, string(msg[openP2PHeaderSize:]))
return err
}
gLog.i("SpecTunnel %d", req.TunnelIndex)
gConf.Network.specTunnel = int(req.TunnelIndex)
case MsgPushSDWanRefresh:
GNetwork.write(MsgSDWAN, MsgSDWANInfoReq, nil)
default:
i, ok := GNetwork.msgMap.Load(pushHead.From)
if !ok {
@@ -295,28 +328,24 @@ func handleReportApps() (err error) {
linkMode := LinkModeUDPPunch
var connectTime string
var retryTime string
var app *p2pApp
i, ok := GNetwork.apps.Load(config.ID())
if ok {
app = i.(*p2pApp)
if app.isActive() {
app := GNetwork.findApp(config)
if app != nil {
if app.IsActive() {
appActive = 1
}
if app.config.SrcPort == 0 { // memapp
continue
}
specRelayNode = app.config.RelayNode
if !app.isDirect() { // TODO: should always report relay node for app edit
relayNode = app.relayNode
relayMode = app.relayMode
t, tidx := app.AvailableTunnel()
if tidx != 0 { // TODO: should always report relay node for app edit
relayNode = app.relayNode[tidx]
relayMode = app.relayMode[tidx]
}
if app.Tunnel() != nil {
linkMode = app.Tunnel().linkModeWeb
if t != nil {
linkMode = t.linkModeWeb
}
retryTime = app.RetryTime().Local().Format("2006-01-02T15:04:05-0700")
connectTime = app.ConnectTime().Local().Format("2006-01-02T15:04:05-0700")
}
appInfo := AppInfo{
AppName: config.AppName,
@@ -358,13 +387,16 @@ func handleReportMemApps() (err error) {
i, ok := GNetwork.apps.Load(node.id)
var app *p2pApp
var t *P2PTunnel
var tidx int
if ok {
app = i.(*p2pApp)
if app.isActive() {
t, tidx = app.AvailableTunnel()
if app.IsActive() {
appActive = 1
}
if !app.isDirect() {
relayMode = app.relayMode
if tidx != 0 {
relayMode = app.relayMode[tidx]
}
retryTime = app.RetryTime().Local().Format("2006-01-02T15:04:05-0700")
connectTime = app.ConnectTime().Local().Format("2006-01-02T15:04:05-0700")
@@ -381,12 +413,13 @@ func handleReportMemApps() (err error) {
appInfo.Protocol = app.config.Protocol
appInfo.Whitelist = app.config.Whitelist
appInfo.SrcPort = app.config.SrcPort
if !app.isDirect() {
appInfo.RelayNode = app.relayNode
if tidx != 0 {
appInfo.RelayNode = app.relayNode[tidx]
}
if app.Tunnel() != nil {
appInfo.LinkMode = app.Tunnel().linkModeWeb
if t != nil {
appInfo.LinkMode = t.linkModeWeb
}
appInfo.DstHost = app.config.DstHost
appInfo.DstPort = app.config.DstPort
@@ -399,7 +432,9 @@ func handleReportMemApps() (err error) {
req.Apps = append(req.Apps, appInfo)
return true
})
gLog.Println(LvDEBUG, "handleReportMemApps res:", prettyJson(req))
req.TunError = GNetwork.sdwan.tunErr
gLog.d("handleReportMemApps res:%s", prettyJson(req))
gConf.retryAllMemApp()
return GNetwork.write(MsgReport, MsgReportMemApps, &req)
}
+1 -1
View File
@@ -149,7 +149,7 @@ func publicIPTest(publicIP string, echoPort int) (hasPublicIP int, hasUPNPorNATP
break
}
defer conn.Close()
dst, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", gConf.Network.ServerHost, gConf.Network.ServerPort))
dst, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", gConf.Network.ServerIP, gConf.Network.ServerPort))
if err != nil {
break
}
+35 -16
View File
@@ -2,6 +2,7 @@ package openp2p
import (
"fmt"
"log"
"math/rand"
"os"
"path/filepath"
@@ -27,13 +28,31 @@ func Run() {
case "uninstall":
uninstall(true)
return
case "start":
d := daemon{}
err := d.Control("start", "", nil)
if err != nil {
log.Println("openp2p start error:", err)
return
}
log.Println("openp2p start ok")
return
case "stop":
d := daemon{}
err := d.Control("stop", "", nil)
if err != nil {
log.Println("openp2p stop error:", err)
return
}
log.Println("openp2p stop ok")
return
}
} else {
installByFilename()
}
parseParams("", "")
gLog.Println(LvINFO, "openp2p start. version: ", OpenP2PVersion)
gLog.Println(LvINFO, "Contact: QQ group 16947733, Email openp2p.cn@gmail.com")
gLog.i("openp2p start. version: %s", OpenP2PVersion)
gLog.i("Contact: QQ group 16947733, Email openp2p.cn@gmail.com")
if gConf.daemonMode {
d := daemon{}
@@ -41,18 +60,18 @@ func Run() {
return
}
gLog.Printf(LvINFO, "node=%s, serverHost=%s, serverPort=%d", gConf.Network.Node, gConf.Network.ServerHost, gConf.Network.ServerPort)
gLog.i("node=%s, serverHost=%s, serverPort=%d", gConf.Network.Node, gConf.Network.ServerHost, gConf.Network.ServerPort)
setFirewall()
err := setRLimit()
if err != nil {
gLog.Println(LvINFO, "setRLimit error:", err)
gLog.i("setRLimit error:%s", err)
}
GNetwork = P2PNetworkInstance()
P2PNetworkInstance()
if ok := GNetwork.Connect(30000); !ok {
gLog.Println(LvERROR, "P2PNetwork login error")
gLog.e("P2PNetwork login error")
return
}
// gLog.Println(LvINFO, "waiting for connection...")
// gLog.i("waiting for connection...")
forever := make(chan bool)
<-forever
}
@@ -76,16 +95,16 @@ func RunAsModule(baseDir string, token string, bw int, logLevel int) *P2PNetwork
}
// gLog.setLevel(LogLevel(logLevel))
gConf.setShareBandwidth(bw)
gLog.Println(LvINFO, "openp2p start. version: ", OpenP2PVersion)
gLog.Println(LvINFO, "Contact: QQ group 16947733, Email openp2p.cn@gmail.com")
gLog.Printf(LvINFO, "node=%s, serverHost=%s, serverPort=%d", gConf.Network.Node, gConf.Network.ServerHost, gConf.Network.ServerPort)
gLog.i("openp2p start. version: %s", OpenP2PVersion)
gLog.i("Contact: QQ group 16947733, Email openp2p.cn@gmail.com")
gLog.i("node=%s, serverHost=%s, serverPort=%d", gConf.Network.Node, gConf.Network.ServerHost, gConf.Network.ServerPort)
GNetwork = P2PNetworkInstance()
P2PNetworkInstance()
if ok := GNetwork.Connect(30000); !ok {
gLog.Println(LvERROR, "P2PNetwork login error")
gLog.e("P2PNetwork login error")
return nil
}
// gLog.Println(LvINFO, "waiting for connection...")
// gLog.i("waiting for connection...")
return GNetwork
}
@@ -99,11 +118,11 @@ func RunCmd(cmd string) {
setFirewall()
err := setRLimit()
if err != nil {
gLog.Println(LvINFO, "setRLimit error:", err)
gLog.i("setRLimit error:%s", err)
}
GNetwork = P2PNetworkInstance()
P2PNetworkInstance()
if ok := GNetwork.Connect(30000); !ok {
gLog.Println(LvERROR, "P2PNetwork login error")
gLog.e("P2PNetwork login error")
return
}
forever := make(chan bool)
+7 -5
View File
@@ -10,14 +10,16 @@ import (
)
const (
tunIfaceName = "utun"
PIHeaderSize = 4 // utun has no IFF_NO_PI
tunIfaceName = "utun"
PIHeaderSize = 4 // utun has no IFF_NO_PI
ReadTunBuffSize = 2048
ReadTunBuffNum = 16
)
func (t *optun) Start(localAddr string, detail *SDWANInfo) error {
var err error
t.tunName = tunIfaceName
t.dev, err = tun.CreateTUN(t.tunName, 1420)
t.dev, err = tun.CreateTUN(t.tunName, int(detail.Mtu))
if err != nil {
return err
}
@@ -72,10 +74,10 @@ func delRoutesByGateway(gateway string) error {
cmd := exec.Command("route", "delete", fields[0], gateway)
err := cmd.Run()
if err != nil {
gLog.Printf(LvERROR, "Delete route %s error:%s", fields[0], err)
gLog.e("Delete route %s error:%s", fields[0], err)
continue
}
gLog.Printf(LvINFO, "Delete route ok: %s %s\n", fields[0], gateway)
gLog.i("Delete route ok: %s %s\n", fields[0], gateway)
}
}
return nil
+13 -5
View File
@@ -7,6 +7,7 @@ package openp2p
import (
"fmt"
"net"
"os"
"os/exec"
"strings"
@@ -17,6 +18,9 @@ import (
const (
tunIfaceName = "optun"
PIHeaderSize = 0
// sdwan
ReadTunBuffSize = 2048
ReadTunBuffNum = 16
)
var previousIP = ""
@@ -24,10 +28,14 @@ var previousIP = ""
func (t *optun) Start(localAddr string, detail *SDWANInfo) error {
var err error
t.tunName = tunIfaceName
t.dev, err = tun.CreateTUN(t.tunName, 1420)
t.dev, err = tun.CreateTUN(t.tunName, int(detail.Mtu))
if err != nil {
return err
}
err = os.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte("1"), 0644)
if err != nil {
gLog.e("write ip_forward error:%s", err)
}
return nil
}
@@ -44,8 +52,8 @@ func setTunAddr(ifname, localAddr, remoteAddr string, wintun interface{}) error
if err != nil {
return err
}
netlink.LinkSetMTU(ifce, 1375)
netlink.LinkSetTxQLen(ifce, 100)
netlink.LinkSetMTU(ifce, int(gConf.getSDWAN().Mtu))
netlink.LinkSetTxQLen(ifce, 1000)
netlink.LinkSetUp(ifce)
ln, err := netlink.ParseIPNet(localAddr)
@@ -124,10 +132,10 @@ func delRoutesByGateway(gateway string) error {
delCmd := exec.Command("route", "del", "-net", fields[0], "gw", gateway)
err := delCmd.Run()
if err != nil {
gLog.Printf(LvERROR, "Delete route %s error:%s", fields[0], err)
gLog.e("Delete route %s error:%s", fields[0], err)
continue
}
gLog.Printf(LvINFO, "Delete route ok: %s %s %s\n", fields[0], fields[1], gateway)
gLog.i("Delete route ok: %s %s %s\n", fields[0], fields[1], gateway)
}
}
return nil
+9 -7
View File
@@ -14,8 +14,10 @@ import (
)
const (
tunIfaceName = "optun"
PIHeaderSize = 0
tunIfaceName = "optun"
PIHeaderSize = 0
ReadTunBuffSize = 2048
ReadTunBuffNum = 16
)
var previousIP = ""
@@ -23,7 +25,7 @@ var previousIP = ""
func (t *optun) Start(localAddr string, detail *SDWANInfo) error {
var err error
t.tunName = tunIfaceName
t.dev, err = tun.CreateTUN(t.tunName, 1420)
t.dev, err = tun.CreateTUN(t.tunName, int(detail.Mtu))
if err != nil {
return err
}
@@ -43,8 +45,8 @@ func setTunAddr(ifname, localAddr, remoteAddr string, wintun interface{}) error
if err != nil {
return err
}
netlink.LinkSetMTU(ifce, 1375)
netlink.LinkSetTxQLen(ifce, 100)
netlink.LinkSetMTU(ifce, int(gConf.getSDWAN().Mtu))
netlink.LinkSetTxQLen(ifce, 1000)
netlink.LinkSetUp(ifce)
ln, err := netlink.ParseIPNet(localAddr)
@@ -123,10 +125,10 @@ func delRoutesByGateway(gateway string) error {
delCmd := exec.Command("route", "del", "-net", fields[0], "gw", gateway)
err := delCmd.Run()
if err != nil {
gLog.Printf(LvERROR, "Delete route %s error:%s", fields[0], err)
gLog.e("Delete route %s error:%s", fields[0], err)
continue
}
gLog.Printf(LvINFO, "Delete route ok: %s %s %s\n", fields[0], fields[1], gateway)
gLog.i("Delete route ok: %s %s %s\n", fields[0], fields[1], gateway)
}
}
return nil
+9 -6
View File
@@ -19,6 +19,9 @@ import (
const (
tunIfaceName = "optun"
PIHeaderSize = 0
// sdwan
ReadTunBuffSize = 1024 * 64 // wintun will read date len > mtu, default 64k
ReadTunBuffNum = 4
)
func (t *optun) Start(localAddr string, detail *SDWANInfo) error {
@@ -42,9 +45,9 @@ func (t *optun) Start(localAddr string, detail *SDWANInfo) error {
Data3: 0x4567,
Data4: [8]byte{0x80, 0x42, 0x83, 0x7e, 0xf4, 0x56, 0xce, 0x13},
}
t.dev, err = tun.CreateTUNWithRequestedGUID(t.tunName, uuid, 1420)
t.dev, err = tun.CreateTUNWithRequestedGUID(t.tunName, uuid, int(detail.Mtu))
if err != nil { // retry
t.dev, err = tun.CreateTUNWithRequestedGUID(t.tunName, uuid, 1420)
t.dev, err = tun.CreateTUNWithRequestedGUID(t.tunName, uuid, int(detail.Mtu))
}
if err != nil {
@@ -67,12 +70,12 @@ func setTunAddr(ifname, localAddr, remoteAddr string, wintun interface{}) error
link := winipcfg.LUID(nativeTunDevice.LUID())
ip, err := netip.ParsePrefix(localAddr)
if err != nil {
gLog.Printf(LvERROR, "ParsePrefix error:%s, luid:%d,localAddr:%s", err, nativeTunDevice.LUID(), localAddr)
gLog.e("ParsePrefix error:%s, luid:%d,localAddr:%s", err, nativeTunDevice.LUID(), localAddr)
return err
}
err = link.SetIPAddresses([]netip.Prefix{ip})
if err != nil {
gLog.Printf(LvERROR, "SetIPAddresses error:%s, netip.Prefix:%+v", err, []netip.Prefix{ip})
gLog.e("SetIPAddresses error:%s, netip.Prefix:%+v", err, []netip.Prefix{ip})
return err
}
return nil
@@ -133,10 +136,10 @@ func delRoutesByGateway(gateway string) error {
cmd := exec.Command("route", "delete", fields[0], "mask", fields[1], gateway)
err := cmd.Run()
if err != nil {
gLog.Printf(LvERROR, "Delete route %s error:%s", fields[0], err)
gLog.e("Delete route %s error:%s", fields[0], err)
continue
}
gLog.Printf(LvINFO, "Delete route ok: %s %s %s\n", fields[0], fields[1], gateway)
gLog.i("Delete route ok: %s %s %s\n", fields[0], fields[1], gateway)
}
}
return nil
+30 -34
View File
@@ -5,6 +5,7 @@ import (
"encoding/binary"
"errors"
"net"
"sync"
"time"
)
@@ -21,18 +22,24 @@ func (e *DeadlineExceededError) Error() string { return "i/o timeout" }
func (e *DeadlineExceededError) Timeout() bool { return true }
func (e *DeadlineExceededError) Temporary() bool { return true }
var overlayConns sync.Map // both TCP and UDP
func closeOverlayConns(appID uint64) {
overlayConns.Range(func(_, i interface{}) bool {
oConn := i.(*overlayConn)
if oConn.app.id == appID {
oConn.Close()
}
return true
})
}
// implement io.Writer
type overlayConn struct {
tunnel *P2PTunnel // TODO: del
app *p2pApp
connTCP net.Conn
id uint64
rtid uint64
running bool
isClient bool
appID uint64 // TODO: del
appKey uint64 // TODO: del
appKeyBytes []byte // TODO: del
app *p2pApp
connTCP net.Conn
id uint64
running bool
isClient bool
// for udp
connUDP *net.UDPConn
remoteAddr net.Addr
@@ -41,42 +48,31 @@ type overlayConn struct {
}
func (oConn *overlayConn) run() {
gLog.Printf(LvDEBUG, "oid:%d overlayConn run start", oConn.id)
defer gLog.Printf(LvDEBUG, "oid:%d overlayConn run end", oConn.id)
gLog.d("oid:%d overlayConn run start", oConn.id)
defer gLog.d("oid:%d overlayConn run end", oConn.id)
oConn.lastReadUDPTs = time.Now()
buffer := make([]byte, ReadBuffLen+PaddingSize) // 16 bytes for padding
reuseBuff := buffer[:ReadBuffLen]
encryptData := make([]byte, ReadBuffLen+PaddingSize) // 16 bytes for padding
tunnelHead := new(bytes.Buffer)
relayHead := new(bytes.Buffer)
binary.Write(relayHead, binary.LittleEndian, oConn.rtid)
binary.Write(tunnelHead, binary.LittleEndian, oConn.id)
for oConn.running && oConn.tunnel.isRuning() {
overlayHead := new(bytes.Buffer)
binary.Write(overlayHead, binary.LittleEndian, oConn.id)
for oConn.running && oConn.app.running {
readBuff, dataLen, err := oConn.Read(reuseBuff)
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Timeout() {
continue
}
// overlay tcp connection normal close, debug log
gLog.Printf(LvDEBUG, "oid:%d overlayConn read error:%s,close it", oConn.id, err)
gLog.d("oid:%d overlayConn read error:%s,close it", oConn.id, err)
break
}
payload := readBuff[:dataLen]
if oConn.appKey != 0 {
payload, _ = encryptBytes(oConn.appKeyBytes, encryptData, readBuff[:dataLen], dataLen)
}
writeBytes := append(tunnelHead.Bytes(), payload...)
// TODO: app.write
if oConn.rtid == 0 {
oConn.tunnel.conn.WriteBytes(MsgP2P, MsgOverlayData, writeBytes)
gLog.Printf(LvDev, "oid:%d write overlay data to tid:%d bodylen=%d", oConn.id, oConn.tunnel.id, oConn.id, len(writeBytes))
} else {
// write raley data
all := append(relayHead.Bytes(), encodeHeader(MsgP2P, MsgOverlayData, uint32(len(writeBytes)))...)
all = append(all, writeBytes...)
oConn.tunnel.conn.WriteBytes(MsgP2P, MsgRelayData, all)
gLog.Printf(LvDev, "oid:%d write relay data to tid:%d,rtid:%d bodylen=%d", oConn.id, oConn.tunnel.id, oConn.rtid, len(writeBytes))
if oConn.app.key != 0 {
payload, _ = encryptBytes(oConn.app.appKeyBytes, encryptData, readBuff[:dataLen], dataLen)
}
writeBytes := append(overlayHead.Bytes(), payload...)
oConn.app.WriteBytes(writeBytes)
}
if oConn.connTCP != nil {
oConn.connTCP.Close()
@@ -84,10 +80,10 @@ func (oConn *overlayConn) run() {
if oConn.connUDP != nil {
oConn.connUDP.Close()
}
oConn.tunnel.overlayConns.Delete(oConn.id)
overlayConns.Delete(oConn.id)
// notify peer disconnect
req := OverlayDisconnectReq{ID: oConn.id}
oConn.tunnel.WriteMessage(oConn.rtid, MsgP2P, MsgOverlayDisconnectReq, &req)
oConn.app.WriteMessage(MsgP2P, MsgOverlayDisconnectReq, &req)
}
func (oConn *overlayConn) Read(reuseBuff []byte) (buff []byte, dataLen int, err error) {
+457 -224
View File
@@ -4,162 +4,215 @@ import (
"bytes"
"encoding/binary"
"fmt"
"math"
"math/rand"
"net"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
const DefaultRtt int32 = 1000
const MaxWindowSize = 1024 * 128 // max 32k packets in flight
const MergeAckDelay = 40 // 40ms linux kernel tcp
const RetransmissonTime = MergeAckDelay + 2000 // ms
type appMsgCtx struct {
head *openP2PHeader
body []byte
ts time.Time
}
type p2pApp struct {
config AppConfig
listener net.Listener
listenerUDP *net.UDPConn
directTunnel *P2PTunnel
relayTunnel *P2PTunnel
tunnelMtx sync.Mutex
iptree *IPTree // for whitelist
rtid uint64 // relay tunnelID
relayNode string
relayMode string // public/private
hbTimeRelay time.Time
hbMtx sync.Mutex
running bool
id uint64
key uint64 // aes
wg sync.WaitGroup
relayHead *bytes.Buffer
once sync.Once
// for relayTunnel
retryRelayNum int
retryRelayTime time.Time
nextRetryRelayTime time.Time
errMsg string
connectTime time.Time
config AppConfig
listener net.Listener
listenerUDP *net.UDPConn
tunnelMtx sync.Mutex
iptree *IPTree // for whitelist
hbMtx sync.Mutex
running bool
id uint64
key uint64 // aes
appKeyBytes []byte // pre-calc
wg sync.WaitGroup
msgChan chan appMsgCtx
once sync.Once
tunnelNum int
allTunnels []*P2PTunnel
retryNum []int
retryTime []time.Time
nextRetryTime []time.Time
rtt []atomic.Int32
relayHead []*bytes.Buffer
rtid []uint64 // peer relay tunnelID
relayNode []string
relayMode []string // public/private
hbTime []time.Time
whbTime []time.Time // calc each tunnel rtt by hb
unAckSeqStart []atomic.Uint64 // record unack packet for retransmission
unAckSeqEnd []atomic.Uint64
errMsg string
connectTime time.Time
// asyncWriteChan chan []byte
maxWindowSize uint64
unAckTs []atomic.Int64
writeTs []atomic.Int64
readCacheTs atomic.Int64
seqW uint64
seqR uint64
seqRMtx sync.Mutex
handleAckMtx sync.Mutex
mergeAckSeq []atomic.Uint64
mergeAckTs []atomic.Int64
preDirectSuccessIP string
}
func (app *p2pApp) Tunnel() *P2PTunnel {
app.tunnelMtx.Lock()
defer app.tunnelMtx.Unlock()
if app.directTunnel != nil {
return app.directTunnel
func (app *p2pApp) Tunnel(idx int) *P2PTunnel {
if idx > app.tunnelNum-1 {
return nil
}
return app.relayTunnel
}
func (app *p2pApp) DirectTunnel() *P2PTunnel {
app.tunnelMtx.Lock()
defer app.tunnelMtx.Unlock()
return app.directTunnel
return app.allTunnels[idx]
}
func (app *p2pApp) setDirectTunnel(t *P2PTunnel) {
func (app *p2pApp) SetTunnel(t *P2PTunnel, idx int) {
app.tunnelMtx.Lock()
defer app.tunnelMtx.Unlock()
app.directTunnel = t
}
app.allTunnels[idx] = t
func (app *p2pApp) RelayTunnel() *P2PTunnel {
app.tunnelMtx.Lock()
defer app.tunnelMtx.Unlock()
return app.relayTunnel
}
func (app *p2pApp) setRelayTunnel(t *P2PTunnel) {
app.tunnelMtx.Lock()
defer app.tunnelMtx.Unlock()
app.relayTunnel = t
}
func (app *p2pApp) isDirect() bool {
return app.directTunnel != nil
}
func (app *p2pApp) RelayTunnelID() uint64 {
if app.isDirect() {
return 0
}
return app.rtid
app.rtt[idx].Store(DefaultRtt)
app.unAckTs[idx].Store(0)
app.writeTs[idx].Store(0)
}
func (app *p2pApp) ConnectTime() time.Time {
if app.isDirect() {
if app.allTunnels[0] != nil {
return app.config.connectTime
}
return app.connectTime
}
func (app *p2pApp) RetryTime() time.Time {
if app.isDirect() {
if app.allTunnels[0] != nil {
return app.config.retryTime
}
return app.retryRelayTime
return app.retryTime[1]
}
func (app *p2pApp) checkP2PTunnel() error {
func (app *p2pApp) Init(tunnelNum int) {
app.tunnelNum = tunnelNum
app.allTunnels = make([]*P2PTunnel, tunnelNum)
app.retryNum = make([]int, tunnelNum)
app.retryTime = make([]time.Time, tunnelNum)
app.nextRetryTime = make([]time.Time, tunnelNum)
app.rtt = make([]atomic.Int32, tunnelNum)
app.relayHead = make([]*bytes.Buffer, tunnelNum)
app.rtid = make([]uint64, tunnelNum)
app.relayNode = make([]string, tunnelNum)
app.relayMode = make([]string, tunnelNum)
app.hbTime = make([]time.Time, tunnelNum)
app.whbTime = make([]time.Time, tunnelNum)
app.unAckSeqEnd = make([]atomic.Uint64, tunnelNum)
app.unAckTs = make([]atomic.Int64, tunnelNum)
app.writeTs = make([]atomic.Int64, tunnelNum)
app.unAckSeqStart = make([]atomic.Uint64, tunnelNum)
app.mergeAckSeq = make([]atomic.Uint64, tunnelNum)
app.mergeAckTs = make([]atomic.Int64, tunnelNum)
app.msgChan = make(chan appMsgCtx, 50)
for i := 0; i < tunnelNum; i++ {
app.hbTime[i] = time.Now()
}
// app.unAckSeqStart.Store(0)
// app.mergeAckTs.Store(0)
// for i := 0; i < relayNum; i++ {
// app.mergeAckTsRelay[i].Store(0)
// }
}
func (app *p2pApp) Start(isClient bool) {
app.maxWindowSize = MaxWindowSize
app.PreCalcKeyBytes()
if isClient {
go app.daemonP2PTunnel()
}
}
func (app *p2pApp) daemonP2PTunnel() error {
for app.running {
app.checkDirectTunnel()
app.checkRelayTunnel()
app.daemonDirectTunnel()
if app.config.peerIP == gConf.Network.publicIP {
time.Sleep(time.Second * 10) // if peerIP is local IP, delay relay tunnel
}
for i := 1; i < app.tunnelNum; i++ {
app.daemonRelayTunnel(i)
}
time.Sleep(time.Second * 3)
}
return nil
}
func (app *p2pApp) directRetryLimit() int {
if app.config.peerIP == gConf.Network.publicIP && compareVersion(app.config.peerVersion, SupportIntranetVersion) >= 0 {
return retryLimit
func (app *p2pApp) daemonDirectTunnel() error {
if !GNetwork.online {
return nil
}
if IsIPv6(app.config.peerIPv6) && IsIPv6(gConf.IPv6()) {
return retryLimit
}
if app.config.hasIPv4 == 1 || gConf.Network.hasIPv4 == 1 || app.config.hasUPNPorNATPMP == 1 || gConf.Network.hasUPNPorNATPMP == 1 {
return retryLimit
}
if gConf.Network.natType == NATCone && app.config.peerNatType == NATCone {
return retryLimit
}
if app.config.peerNatType == NATSymmetric && gConf.Network.natType == NATSymmetric {
return 0
}
return retryLimit / 10 // c2s or s2c
}
func (app *p2pApp) checkDirectTunnel() error {
if app.config.ForceRelay == 1 && app.config.RelayNode != app.config.PeerNode {
return nil
}
if app.DirectTunnel() != nil && app.DirectTunnel().isActive() {
if app.Tunnel(0) != nil && app.Tunnel(0).isActive() {
return nil
}
if app.config.nextRetryTime.After(time.Now()) || app.config.Enabled == 0 || app.config.retryNum >= app.directRetryLimit() {
if app.config.nextRetryTime.After(time.Now()) || app.config.Enabled == 0 {
return nil
}
if time.Now().Add(-time.Minute * 15).After(app.config.retryTime) { // run normally 15min, reset retrynum
app.config.retryNum = 1
}
if app.config.retryNum > 0 { // first time not show reconnect log
gLog.Printf(LvINFO, "appid:%d checkDirectTunnel detect peer %s disconnect, reconnecting the %d times...", app.id, app.config.LogPeerNode(), app.config.retryNum)
gLog.i("appid:%d checkDirectTunnel detect peer %s disconnect, reconnecting the %d times...", app.id, app.config.LogPeerNode(), app.config.retryNum)
}
app.config.retryNum++
app.config.retryTime = time.Now()
app.config.nextRetryTime = time.Now().Add(retryInterval)
app.config.connectTime = time.Now()
err := app.buildDirectTunnel()
if err != nil {
app.config.errMsg = err.Error()
if err == ErrPeerOffline && app.config.retryNum > 2 { // stop retry, waiting for online
app.config.retryNum = retryLimit
gLog.Printf(LvINFO, "appid:%d checkDirectTunnel %s offline, it will auto reconnect when peer node online", app.id, app.config.LogPeerNode())
gLog.i("appid:%d checkDirectTunnel %s offline, it will auto reconnect when peer node online", app.id, app.config.LogPeerNode())
}
if err == ErrBuildTunnelBusy {
app.config.retryNum--
}
}
if app.Tunnel() != nil {
interval := calcRetryTimeRelay(float64(app.config.retryNum))
if app.preDirectSuccessIP == app.config.peerIP {
interval = math.Min(interval, 1800) // if peerIP has been direct link succeed, retry 30min max
}
app.config.nextRetryTime = time.Now().Add(time.Duration(interval) * time.Second)
if app.Tunnel(0) != nil {
app.preDirectSuccessIP = app.config.peerIP
app.once.Do(func() {
go app.listen()
// memapp also need
go app.relayHeartbeatLoop()
for i := 1; i < app.tunnelNum; i++ {
go app.relayHeartbeatLoop(i)
}
})
}
return nil
@@ -174,7 +227,7 @@ func (app *p2pApp) buildDirectTunnel() error {
pn := GNetwork
initErr := pn.requestPeerInfo(&app.config)
if initErr != nil {
gLog.Printf(LvERROR, "appid:%d buildDirectTunnel %s requestPeerInfo error:%s", app.id, app.config.LogPeerNode(), initErr)
gLog.w("appid:%d buildDirectTunnel %s requestPeerInfo error:%s", app.id, app.config.LogPeerNode(), initErr)
return initErr
}
t, err = pn.addDirectTunnel(app.config, 0)
@@ -212,64 +265,77 @@ func (app *p2pApp) buildDirectTunnel() error {
AppID: app.id,
AppKey: app.key,
}
gLog.Printf(LvDEBUG, "appid:%d buildDirectTunnel sync appkey to %s", app.id, app.config.LogPeerNode())
gLog.d("appid:%d buildDirectTunnel sync appkey to %s", app.id, app.config.LogPeerNode())
pn.push(app.config.PeerNode, MsgPushAPPKey, &syncKeyReq)
app.setDirectTunnel(t)
app.SetTunnel(t, 0)
// if memapp notify peer addmemapp
if app.config.SrcPort == 0 {
req := ServerSideSaveMemApp{From: gConf.Network.Node, Node: gConf.Network.Node, TunnelID: t.id, RelayTunnelID: 0, AppID: app.id}
pn.push(app.config.PeerNode, MsgPushServerSideSaveMemApp, &req)
gLog.Printf(LvDEBUG, "appid:%d buildDirectTunnel push %s ServerSideSaveMemApp: %s", app.id, app.config.LogPeerNode(), prettyJson(req))
}
gLog.Printf(LvDEBUG, "appid:%d buildDirectTunnel ok. %s use tid %d", app.id, app.config.AppName, t.id)
// if app.config.SrcPort == 0 {
req2 := ServerSideSaveMemApp{From: gConf.Network.Node, Node: gConf.Network.Node, TunnelID: t.id, RelayTunnelID: 0, TunnelNum: uint32(app.tunnelNum), AppID: app.id, AppKey: app.key, SrcPort: uint32(app.config.SrcPort)}
pn.push(app.config.PeerNode, MsgPushServerSideSaveMemApp, &req2)
gLog.d("appid:%d buildDirectTunnel push %s ServerSideSaveMemApp: %s", app.id, app.config.LogPeerNode(), prettyJson(req2))
// }
gLog.d("appid:%d buildDirectTunnel ok. %s use tid %d", app.id, app.config.AppName, t.id)
return nil
}
func (app *p2pApp) checkRelayTunnel() error {
func (app *p2pApp) daemonRelayTunnel(idx int) error {
if !GNetwork.online {
return nil
}
if app.Tunnel(0) != nil && app.Tunnel(0).linkModeWeb == LinkModeIntranet { // in the same Lan, no relay
return nil
}
// if app.config.ForceRelay == 1 && (gConf.sdwan.CentralNode == app.config.PeerNode && compareVersion(app.config.peerVersion, SupportDualTunnelVersion) < 0) {
if app.config.SrcPort == 0 && (gConf.sdwan.CentralNode == app.config.PeerNode || gConf.sdwan.CentralNode == gConf.Network.Node) { // memapp central node not build relay tunnel
return nil
}
if gConf.sdwan.CentralNode != "" && idx > 1 { // if central node exist only need one relayTunnel
return nil
}
app.hbMtx.Lock()
if app.RelayTunnel() != nil && time.Now().Before(app.hbTimeRelay.Add(TunnelHeartbeatTime*2)) { // must check app.hbtime instead of relayTunnel
if app.Tunnel(idx) != nil && time.Now().Before(app.hbTime[idx].Add(TunnelHeartbeatTime*2)) { // must check app.hbtime instead of relayTunnel
app.hbMtx.Unlock()
return nil
}
app.hbMtx.Unlock()
if app.nextRetryRelayTime.After(time.Now()) || app.config.Enabled == 0 || app.retryRelayNum >= retryLimit {
if app.nextRetryTime[idx].After(time.Now()) || app.config.Enabled == 0 {
return nil
}
if time.Now().Add(-time.Minute * 15).After(app.retryRelayTime) { // run normally 15min, reset retrynum
app.retryRelayNum = 1
if time.Now().Add(-time.Minute * 15).After(app.retryTime[idx]) { // run normally 15min, reset retrynum
app.retryNum[idx] = 1
}
if app.retryRelayNum > 0 { // first time not show reconnect log
gLog.Printf(LvINFO, "appid:%d checkRelayTunnel detect peer %s relay disconnect, reconnecting the %d times...", app.id, app.config.LogPeerNode(), app.retryRelayNum)
if app.retryNum[idx] > 0 { // first time not show reconnect log
gLog.i("appid:%d checkRelayTunnel detect peer %s relay disconnect, reconnecting the %d times...", app.id, app.config.LogPeerNode(), app.retryNum[idx])
}
app.setRelayTunnel(nil) // reset relayTunnel
app.retryRelayNum++
app.retryRelayTime = time.Now()
app.nextRetryRelayTime = time.Now().Add(retryInterval)
app.SetTunnel(nil, idx) // reset relayTunnel
app.retryNum[idx]++
app.retryTime[idx] = time.Now()
app.connectTime = time.Now()
err := app.buildRelayTunnel()
err := app.buildRelayTunnel(idx)
if err != nil {
app.errMsg = err.Error()
if err == ErrPeerOffline && app.retryRelayNum > 2 { // stop retry, waiting for online
app.retryRelayNum = retryLimit
gLog.Printf(LvINFO, "appid:%d checkRelayTunnel %s offline, it will auto reconnect when peer node online", app.id, app.config.LogPeerNode())
if err == ErrPeerOffline && app.retryNum[idx] > 2 { // stop retry, waiting for online
app.retryNum[idx] = retryLimit
gLog.i("appid:%d checkRelayTunnel %s offline, it will auto reconnect when peer node online", app.id, app.config.LogPeerNode())
}
}
if app.Tunnel() != nil {
interval := calcRetryTimeRelay(float64(app.retryNum[idx]))
app.nextRetryTime[idx] = time.Now().Add(time.Duration(interval) * time.Second)
if app.Tunnel(idx) != nil {
app.once.Do(func() {
go app.listen()
// memapp also need
go app.relayHeartbeatLoop()
for i := 1; i < app.tunnelNum; i++ {
go app.relayHeartbeatLoop(i)
}
})
}
return nil
}
func (app *p2pApp) buildRelayTunnel() error {
func (app *p2pApp) buildRelayTunnel(idx int) error {
var rtid uint64
relayNode := ""
relayMode := ""
@@ -282,11 +348,15 @@ func (app *p2pApp) buildRelayTunnel() error {
config := app.config
initErr := pn.requestPeerInfo(&config)
if initErr != nil {
gLog.Printf(LvERROR, "appid:%d buildRelayTunnel %s init error:%s", app.id, config.LogPeerNode(), initErr)
gLog.w("appid:%d buildRelayTunnel %s init error:%s", app.id, config.LogPeerNode(), initErr)
return initErr
}
t, rtid, relayMode, err = pn.addRelayTunnel(config)
ExcludeNodes := ""
kk := 1 + ((idx - 1) ^ 1)
if app.tunnelNum > 2 && app.allTunnels[kk] != nil {
ExcludeNodes = app.allTunnels[1+((idx-1)^1)].config.PeerNode
}
t, rtid, relayMode, err = pn.addRelayTunnel(config, ExcludeNodes)
if t != nil {
relayNode = t.config.PeerNode
}
@@ -318,21 +388,21 @@ func (app *p2pApp) buildRelayTunnel() error {
AppID: app.id,
AppKey: app.key,
}
gLog.Printf(LvDEBUG, "appid:%d buildRelayTunnel sync appkey relay to %s", app.id, config.LogPeerNode())
gLog.d("appid:%d buildRelayTunnel sync appkey relay to %s", app.id, config.LogPeerNode())
pn.push(config.PeerNode, MsgPushAPPKey, &syncKeyReq)
app.setRelayTunnelID(rtid)
app.setRelayTunnel(t)
app.relayNode = relayNode
app.relayMode = relayMode
app.hbTimeRelay = time.Now()
app.SetRelayTunnelID(rtid, idx)
app.SetTunnel(t, idx)
app.relayNode[idx] = relayNode
app.relayMode[idx] = relayMode
app.hbTime[idx] = time.Now()
// if memapp notify peer addmemapp
if config.SrcPort == 0 {
req := ServerSideSaveMemApp{From: gConf.Network.Node, Node: relayNode, TunnelID: rtid, RelayTunnelID: t.id, AppID: app.id, RelayMode: relayMode}
pn.push(config.PeerNode, MsgPushServerSideSaveMemApp, &req)
gLog.Printf(LvDEBUG, "appid:%d buildRelayTunnel push %s relay ServerSideSaveMemApp: %s", app.id, config.LogPeerNode(), prettyJson(req))
}
gLog.Printf(LvDEBUG, "appid:%d buildRelayTunnel %s use tunnel %d", app.id, app.config.AppName, t.id)
// if config.SrcPort == 0 {
req2 := ServerSideSaveMemApp{From: gConf.Network.Node, Node: relayNode, TunnelID: rtid, RelayTunnelID: t.id, AppID: app.id, AppKey: app.key, RelayMode: relayMode, RelayIndex: uint32(idx), TunnelNum: uint32(app.tunnelNum), SrcPort: uint32(app.config.SrcPort)}
pn.push(config.PeerNode, MsgPushServerSideSaveMemApp, &req2)
gLog.d("appid:%d buildRelayTunnel push %s relay ServerSideSaveMemApp: %s", app.id, config.LogPeerNode(), prettyJson(req2))
// }
gLog.d("appid:%d buildRelayTunnel %s use tunnel %d", app.id, app.config.AppName, t.id)
return nil
}
@@ -341,47 +411,71 @@ func (app *p2pApp) buildOfficialTunnel() error {
}
// cache relayHead, refresh when rtid change
func (app *p2pApp) RelayHead() *bytes.Buffer {
if app.relayHead == nil {
app.relayHead = new(bytes.Buffer)
binary.Write(app.relayHead, binary.LittleEndian, app.rtid)
func (app *p2pApp) RelayHead(idx int) *bytes.Buffer {
if app.relayHead[idx] == nil {
app.relayHead[idx] = new(bytes.Buffer)
binary.Write(app.relayHead[idx], binary.LittleEndian, app.rtid[idx])
}
return app.relayHead
return app.relayHead[idx]
}
func (app *p2pApp) setRelayTunnelID(rtid uint64) {
app.rtid = rtid
app.relayHead = new(bytes.Buffer)
binary.Write(app.relayHead, binary.LittleEndian, app.rtid)
func (app *p2pApp) SetRelayTunnelID(rtid uint64, idx int) {
app.rtid[idx] = rtid
app.relayHead[idx] = new(bytes.Buffer)
binary.Write(app.relayHead[idx], binary.LittleEndian, app.rtid[idx])
}
func (app *p2pApp) isActive() bool {
if app.Tunnel() == nil {
// gLog.Printf(LvDEBUG, "isActive app.tunnel==nil")
func (app *p2pApp) IsActive() bool {
if t, _ := app.AvailableTunnel(); t == nil {
// gLog.d("isActive app.tunnel==nil")
return false
}
if app.isDirect() { // direct mode app heartbeat equals to tunnel heartbeat
return app.Tunnel().isActive()
if app.Tunnel(0) != nil { // direct mode app heartbeat equals to tunnel heartbeat
return app.Tunnel(0).isActive()
}
// relay mode calc app heartbeat
app.hbMtx.Lock()
defer app.hbMtx.Unlock()
res := time.Now().Before(app.hbTimeRelay.Add(TunnelHeartbeatTime * 2))
if app.Tunnel(1) != nil {
return time.Now().Before(app.hbTime[1].Add(TunnelHeartbeatTime * 2))
}
res := time.Now().Before(app.hbTime[2].Add(TunnelHeartbeatTime * 2))
// if !res {
// gLog.Printf(LvDEBUG, "%d app isActive false. peer=%s", app.id, app.config.PeerNode)
// gLog.d("%d app isActive false. peer=%s", app.id, app.config.PeerNode)
// }
return res
}
func (app *p2pApp) updateHeartbeat() {
func (app *p2pApp) UpdateHeartbeat(rtid uint64) {
app.hbMtx.Lock()
defer app.hbMtx.Unlock()
app.hbTimeRelay = time.Now()
tidx := 1
if app.tunnelNum > 2 && rtid == app.rtid[2] || (app.Tunnel(2) != nil && app.Tunnel(2).id == rtid) { // ack return rtid!=
tidx = 2
}
app.hbTime[tidx] = time.Now()
rtt := int32(time.Since(app.whbTime[tidx]) / time.Millisecond)
preRtt := app.rtt[tidx].Load()
if preRtt != DefaultRtt {
rtt = int32(float64(preRtt)*(1-ma20) + float64(rtt)*ma20)
}
app.rtt[tidx].Store(rtt)
gLog.dev("appid:%d relay heartbeat %d store rtt %d", app.id, tidx, rtt)
}
func (app *p2pApp) UpdateRelayHeartbeatTs(rtid uint64) {
app.hbMtx.Lock()
defer app.hbMtx.Unlock()
relayIdx := 1
if app.tunnelNum > 2 && rtid == app.rtid[2] || (app.Tunnel(2) != nil && app.Tunnel(2).id == rtid) { // ack return rtid!=
relayIdx = 2
}
app.whbTime[relayIdx] = time.Now() // one side did not write relay hb, so write whbtime in this.
}
func (app *p2pApp) listenTCP() error {
gLog.Printf(LvDEBUG, "appid:%d tcp accept on port %d start", app.id, app.config.SrcPort)
defer gLog.Printf(LvDEBUG, "appid:%d tcp accept on port %d end", app.id, app.config.SrcPort)
gLog.d("appid:%d tcp accept on port %d start", app.id, app.config.SrcPort)
defer gLog.d("appid:%d tcp accept on port %d end", app.id, app.config.SrcPort)
var err error
listenAddr := ""
if IsLocalhost(app.config.Whitelist) { // not expose port
@@ -389,7 +483,7 @@ func (app *p2pApp) listenTCP() error {
}
app.listener, err = net.Listen("tcp", fmt.Sprintf("%s:%d", listenAddr, app.config.SrcPort))
if err != nil {
gLog.Printf(LvERROR, "appid:%d listen error:%s", app.id, err)
gLog.e("appid:%d listen tcp error:%s", app.id, err)
return err
}
defer app.listener.Close()
@@ -397,12 +491,13 @@ func (app *p2pApp) listenTCP() error {
conn, err := app.listener.Accept()
if err != nil {
if app.running {
gLog.Printf(LvERROR, "appid:%d accept error:%s", app.id, err)
gLog.e("appid:%d accept error:%s", app.id, err)
}
break
}
if app.Tunnel() == nil {
gLog.Printf(LvDEBUG, "appid:%d srcPort=%d, app.Tunnel()==nil, not ready", app.id, app.config.SrcPort)
t, tidx := app.AvailableTunnel()
if t == nil {
gLog.d("appid:%d srcPort=%d, app.Tunnel()==nil, not ready", app.id, app.config.SrcPort)
time.Sleep(time.Second)
continue
}
@@ -411,32 +506,20 @@ func (app *p2pApp) listenTCP() error {
remoteIP := conn.RemoteAddr().(*net.TCPAddr).IP.String()
if !app.iptree.Contains(remoteIP) && !IsLocalhost(remoteIP) {
conn.Close()
gLog.Printf(LvERROR, "appid:%d %s not in whitelist, access denied", app.id, remoteIP)
gLog.e("appid:%d %s not in whitelist, access denied", app.id, remoteIP)
continue
}
}
oConn := overlayConn{
tunnel: app.Tunnel(),
app: app,
connTCP: conn,
id: rand.Uint64(),
isClient: true,
appID: app.id,
appKey: app.key,
running: true,
}
if !app.isDirect() {
oConn.rtid = app.rtid
}
// pre-calc key bytes for encrypt
if oConn.appKey != 0 {
encryptKey := make([]byte, AESKeySize)
binary.LittleEndian.PutUint64(encryptKey, oConn.appKey)
binary.LittleEndian.PutUint64(encryptKey[8:], oConn.appKey)
oConn.appKeyBytes = encryptKey
}
app.Tunnel().overlayConns.Store(oConn.id, &oConn)
gLog.Printf(LvDEBUG, "appid:%d Accept TCP overlayID:%d, %s", app.id, oConn.id, oConn.connTCP.RemoteAddr())
overlayConns.Store(oConn.id, &oConn)
gLog.d("appid:%d Accept TCP overlayID:%d, %s", app.id, oConn.id, oConn.connTCP.RemoteAddr())
// tell peer connect
req := OverlayConnectReq{ID: oConn.id,
Token: gConf.Network.Token,
@@ -445,48 +528,52 @@ func (app *p2pApp) listenTCP() error {
Protocol: app.config.Protocol,
AppID: app.id,
}
if !app.isDirect() {
req.RelayTunnelID = app.Tunnel().id
if tidx != 0 {
req.RelayTunnelID = t.id
}
app.WriteMessage(MsgP2P, MsgOverlayConnectReq, &req)
head, _ := app.ReadMessage(MsgP2P, MsgOverlayConnectRsp, time.Second*3)
if head == nil {
gLog.w("appid:%d read MsgOverlayConnectRsp error", app.id)
}
app.Tunnel().WriteMessage(app.RelayTunnelID(), MsgP2P, MsgOverlayConnectReq, &req)
// TODO: wait OverlayConnectRsp instead of sleep
time.Sleep(time.Second) // waiting remote node connection ok
go oConn.run()
}
return nil
}
func (app *p2pApp) listenUDP() error {
gLog.Printf(LvDEBUG, "appid:%d udp accept on port %d start", app.id, app.config.SrcPort)
defer gLog.Printf(LvDEBUG, "appid:%d udp accept on port %d end", app.id, app.config.SrcPort)
gLog.d("appid:%d udp accept on port %d start", app.id, app.config.SrcPort)
defer gLog.d("appid:%d udp accept on port %d end", app.id, app.config.SrcPort)
var err error
app.listenerUDP, err = net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: app.config.SrcPort})
if err != nil {
gLog.Printf(LvERROR, "appid:%d listen error:%s", app.id, err)
gLog.e("appid:%d listen udp error:%s", app.id, err)
return err
}
defer app.listenerUDP.Close()
buffer := make([]byte, 64*1024+PaddingSize)
udpID := make([]byte, 8)
for {
for app.running {
app.listenerUDP.SetReadDeadline(time.Now().Add(UDPReadTimeout))
len, remoteAddr, err := app.listenerUDP.ReadFrom(buffer)
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Timeout() {
continue
} else {
gLog.Printf(LvERROR, "appid:%d udp read failed:%s", app.id, err)
gLog.e("appid:%d udp read failed:%s", app.id, err)
break
}
} else {
if app.Tunnel() == nil {
gLog.Printf(LvDEBUG, "appid:%d srcPort=%d, app.Tunnel()==nil, not ready", app.id, app.config.SrcPort)
t, tidx := app.AvailableTunnel()
if t == nil {
gLog.d("appid:%d srcPort=%d, app.Tunnel()==nil, not ready", app.id, app.config.SrcPort)
time.Sleep(time.Second)
continue
}
dupData := bytes.Buffer{} // should uses memory pool
dupData.Write(buffer[:len+PaddingSize])
// load from app.tunnel.overlayConns by remoteAddr error, new udp connection
// load from app.overlayConns by remoteAddr error, new udp connection
remoteIP := strings.Split(remoteAddr.String(), ":")[0]
port, _ := strconv.Atoi(strings.Split(remoteAddr.String(), ":")[1])
a := net.ParseIP(remoteIP)
@@ -497,31 +584,19 @@ func (app *p2pApp) listenUDP() error {
udpID[4] = byte(port)
udpID[5] = byte(port >> 8)
id := binary.LittleEndian.Uint64(udpID) // convert remoteIP:port to uint64
s, ok := app.Tunnel().overlayConns.Load(id)
s, ok := overlayConns.Load(id)
if !ok {
oConn := overlayConn{
tunnel: app.Tunnel(),
app: app,
connUDP: app.listenerUDP,
remoteAddr: remoteAddr,
udpData: make(chan []byte, 1000),
id: id,
isClient: true,
appID: app.id,
appKey: app.key,
running: true,
}
if !app.isDirect() {
oConn.rtid = app.rtid
}
// calc key bytes for encrypt
if oConn.appKey != 0 {
encryptKey := make([]byte, AESKeySize)
binary.LittleEndian.PutUint64(encryptKey, oConn.appKey)
binary.LittleEndian.PutUint64(encryptKey[8:], oConn.appKey)
oConn.appKeyBytes = encryptKey
}
app.Tunnel().overlayConns.Store(oConn.id, &oConn)
gLog.Printf(LvDEBUG, "appid:%d Accept UDP overlayID:%d", app.id, oConn.id)
overlayConns.Store(oConn.id, &oConn)
gLog.d("appid:%d Accept UDP overlayID:%d", app.id, oConn.id)
// tell peer connect
req := OverlayConnectReq{ID: oConn.id,
Token: gConf.Network.Token,
@@ -530,17 +605,19 @@ func (app *p2pApp) listenUDP() error {
Protocol: app.config.Protocol,
AppID: app.id,
}
if !app.isDirect() {
req.RelayTunnelID = app.Tunnel().id
if tidx != 0 {
req.RelayTunnelID = t.id
}
app.WriteMessage(MsgP2P, MsgOverlayConnectReq, &req)
head, _ := app.ReadMessage(MsgP2P, MsgOverlayConnectRsp, time.Second*3)
if head == nil {
gLog.w("appid:%d read MsgOverlayConnectRsp error", app.id)
}
app.Tunnel().WriteMessage(app.RelayTunnelID(), MsgP2P, MsgOverlayConnectReq, &req)
// TODO: wait OverlayConnectRsp instead of sleep
time.Sleep(time.Second) // waiting remote node connection ok
go oConn.run()
oConn.udpData <- dupData.Bytes()
}
// load from app.tunnel.overlayConns by remoteAddr ok, write relay data
// load from overlayConns by remoteAddr ok, write relay data
overlayConn, ok := s.(*overlayConn)
if !ok {
continue
@@ -555,8 +632,8 @@ func (app *p2pApp) listen() error {
if app.config.SrcPort == 0 {
return nil
}
gLog.Printf(LvINFO, "appid:%d LISTEN ON PORT %s:%d START", app.id, app.config.Protocol, app.config.SrcPort)
defer gLog.Printf(LvINFO, "appid:%d LISTEN ON PORT %s:%d END", app.id, app.config.Protocol, app.config.SrcPort)
gLog.i("appid:%d LISTEN ON PORT %s:%d START", app.id, app.config.Protocol, app.config.SrcPort)
defer gLog.i("appid:%d LISTEN ON PORT %s:%d END", app.id, app.config.Protocol, app.config.SrcPort)
app.wg.Add(1)
defer app.wg.Done()
for app.running {
@@ -573,7 +650,7 @@ func (app *p2pApp) listen() error {
return nil
}
func (app *p2pApp) close() {
func (app *p2pApp) Close() {
app.running = false
if app.listener != nil {
app.listener.Close()
@@ -581,36 +658,192 @@ func (app *p2pApp) close() {
if app.listenerUDP != nil {
app.listenerUDP.Close()
}
if app.DirectTunnel() != nil {
app.DirectTunnel().closeOverlayConns(app.id)
}
if app.RelayTunnel() != nil {
app.RelayTunnel().closeOverlayConns(app.id)
}
closeOverlayConns(app.id)
app.wg.Wait()
}
// TODO: many relay app on the same P2PTunnel will send a lot of relay heartbeat
func (app *p2pApp) relayHeartbeatLoop() {
func (app *p2pApp) relayHeartbeatLoop(idx int) {
app.wg.Add(1)
defer app.wg.Done()
gLog.Printf(LvDEBUG, "appid:%d %s relayHeartbeat to rtid:%d start", app.id, app.config.LogPeerNode(), app.rtid)
defer gLog.Printf(LvDEBUG, "appid:%d %s relayHeartbeat to rtid%d end", app.id, app.config.LogPeerNode(), app.rtid)
gLog.d("appid:%d %s relayHeartbeat to rtid:%d start", app.id, app.config.LogPeerNode(), app.rtid[idx])
defer gLog.d("appid:%d %s relayHeartbeat to rtid%d end", app.id, app.config.LogPeerNode(), app.rtid[idx])
for app.running {
if app.RelayTunnel() == nil || !app.RelayTunnel().isRuning() {
if app.Tunnel(idx) == nil || !app.Tunnel(idx).isRuning() {
time.Sleep(TunnelHeartbeatTime)
continue
}
req := RelayHeartbeat{From: gConf.Network.Node, RelayTunnelID: app.RelayTunnel().id,
req := RelayHeartbeat{From: gConf.Network.Node, RelayTunnelID: app.Tunnel(idx).id, RelayTunnelID2: app.rtid[idx],
AppID: app.id}
err := app.RelayTunnel().WriteMessage(app.rtid, MsgP2P, MsgRelayHeartbeat, &req)
err := app.Tunnel(idx).WriteMessage(app.rtid[idx], MsgP2P, MsgRelayHeartbeat, &req)
if err != nil {
gLog.Printf(LvERROR, "appid:%d %s rtid:%d write relay tunnel heartbeat error %s", app.id, app.config.LogPeerNode(), app.rtid, err)
return
gLog.e("appid:%d %s rtid:%d write relay tunnel heartbeat error %s", app.id, app.config.LogPeerNode(), app.rtid[idx], err)
app.SetTunnel(nil, idx)
continue
}
app.whbTime[idx] = time.Now()
// TODO: debug relay heartbeat
gLog.Printf(LvDEBUG, "appid:%d %s rtid:%d write relay tunnel heartbeat ok", app.id, app.config.LogPeerNode(), app.rtid)
gLog.dev("appid:%d %s rtid:%d write relay tunnel heartbeat ok", app.id, app.config.LogPeerNode(), app.rtid[idx])
time.Sleep(TunnelHeartbeatTime)
}
}
func (app *p2pApp) WriteMessage(mainType uint16, subType uint16, req interface{}) error {
t, tidx := app.AvailableTunnel()
if t == nil {
return ErrAppWithoutTunnel
}
return t.WriteMessage(app.rtid[tidx], mainType, subType, req)
}
func (app *p2pApp) WriteMessageWithAppID(mainType uint16, subType uint16, req interface{}) error {
t, tidx := app.AvailableTunnel()
if t == nil {
return ErrAppWithoutTunnel
}
appID := app.id
if app.config.SrcPort == 0 {
appID = NodeNameToID(app.config.PeerNode)
}
return t.WriteMessageWithAppID(appID, app.rtid[tidx], mainType, subType, req)
}
func (app *p2pApp) WriteBytes(data []byte) error {
t, tidx := app.AvailableTunnel()
if t == nil {
return ErrAppWithoutTunnel
}
if tidx == 0 {
return t.conn.WriteBytes(MsgP2P, MsgOverlayData, data)
}
all := append(app.relayHead[tidx].Bytes(), encodeHeader(MsgP2P, MsgOverlayData, uint32(len(data)))...)
all = append(all, data...)
t.conn.WriteBytes(MsgP2P, MsgRelayData, all)
return nil
}
func (app *p2pApp) PreCalcKeyBytes() {
// pre-calc key bytes for encrypt
if app.key != 0 {
encryptKey := make([]byte, AESKeySize)
binary.LittleEndian.PutUint64(encryptKey, app.key)
binary.LittleEndian.PutUint64(encryptKey[8:], app.key)
app.appKeyBytes = encryptKey
}
}
func (app *p2pApp) WriteNodeDataMP(IPPacket []byte) (err error) {
t, tidx := app.fastestTunnel()
if t == nil {
return ErrAppWithoutTunnel
}
dataWithSeq := new(bytes.Buffer)
binary.Write(dataWithSeq, binary.LittleEndian, gConf.nodeID())
binary.Write(dataWithSeq, binary.LittleEndian, app.seqW)
dataWithSeq.Write(IPPacket)
// gLog.d("DEBUG writeTs=%d, unAckSeqStart=%d", wu.writeTs.UnixMilli(), app.unAckSeqStart[tidx].Load())
if tidx == 0 {
t.asyncWriteNodeData(gConf.nodeID(), app.seqW, IPPacket, nil)
gLog.dev("appid:%d asyncWriteDirect IPPacket len=%d", app.id, len(IPPacket))
} else {
t.asyncWriteNodeData(gConf.nodeID(), app.seqW, IPPacket, app.RelayHead(tidx).Bytes())
gLog.dev("appid:%d asyncWriteRelay%d IPPacket len=%d", app.id, tidx, len(IPPacket))
}
app.seqW++
return err
}
func (app *p2pApp) handleNodeDataMP(seq uint64, data []byte, t *P2PTunnel) {
GNetwork.nodeData <- data
}
func (app *p2pApp) isReliable() bool {
// return app.config.SrcPort != 0
return true
}
func (app *p2pApp) AvailableTunnel() (*P2PTunnel, int) {
for i := 0; i < app.tunnelNum; i++ {
if app.allTunnels[i] != nil {
return app.allTunnels[i], i
}
}
return nil, 0
}
func (app *p2pApp) fastestTunnel() (t *P2PTunnel, idx int) {
// gLog.d("appid:%d fastestTunnel %d %d",app.id, app.DirectRTT(), app.MinRelayRTT())
if gConf.Network.specTunnel > 0 {
if app.Tunnel(gConf.Network.specTunnel) != nil {
return app.Tunnel(gConf.Network.specTunnel), gConf.Network.specTunnel
}
}
t = app.Tunnel(0)
idx = 0
if app.Tunnel(1) != nil {
t = app.Tunnel(1)
idx = 1
}
return
}
func (app *p2pApp) ResetWindow() {
app.seqW = 0
app.seqR = 0
for i := 0; i < app.tunnelNum; i++ {
app.unAckSeqEnd[i].Store(0)
app.unAckTs[i].Store(0)
app.writeTs[i].Store(0)
}
}
func (app *p2pApp) Retry(all bool) {
gLog.d("appid:%d retry app %s", app.id, app.config.LogPeerNode())
for i := 0; i < app.tunnelNum; i++ {
app.retryNum[i] = 0
app.nextRetryTime[i] = time.Now()
if all && i == 0 {
app.hbMtx.Lock()
app.hbTime[i] = time.Now().Add(-TunnelHeartbeatTime * 3)
app.hbMtx.Unlock()
app.config.retryNum = 0
app.config.nextRetryTime = time.Now()
app.ResetWindow()
}
}
}
func (app *p2pApp) StoreMessage(head *openP2PHeader, body []byte) {
app.msgChan <- appMsgCtx{head, body, time.Now()}
}
func (app *p2pApp) ReadMessage(mainType uint16, subType uint16, timeout time.Duration) (head *openP2PHeader, body []byte) {
for {
select {
case <-time.After(timeout):
gLog.e("appid:%d app.ReadMessage error %d:%d timeout", app.id, mainType, subType)
return
case msg := <-app.msgChan:
if time.Since(msg.ts) > ReadMsgTimeout {
gLog.d("appid:%d app.ReadMessage error expired %d:%d", app.id, mainType, subType)
continue
}
if msg.head.MainType != mainType || msg.head.SubType != subType {
gLog.d("appid:%d app.ReadMessage error type %d:%d, requeue it", app.id, msg.head.MainType, msg.head.SubType)
app.msgChan <- msg
time.Sleep(time.Second)
continue
}
head = msg.head
body = msg.body[8:]
return
}
}
}
+411 -203
View File
File diff suppressed because it is too large Load Diff
+363 -186
View File
@@ -2,6 +2,7 @@ package openp2p
import (
"bytes"
"context"
"encoding/binary"
"encoding/json"
"errors"
@@ -14,22 +15,27 @@ import (
"time"
)
const WriteDataChanSize int = 3000
const WriteDataChanSize int = 8192
var buildTunnelMtx sync.Mutex
const (
StatusIdle = 0
StatusWriting = 1
)
type P2PTunnel struct {
conn underlay
hbTime time.Time
hbMtx sync.Mutex
whbTime time.Time
config AppConfig
localHoleAddr *net.UDPAddr // local hole address
remoteHoleAddr *net.UDPAddr // remote hole address
overlayConns sync.Map // both TCP and UDP
id uint64 // client side alloc rand.uint64 = server side
running bool
runMtx sync.Mutex
tunnelServer bool // different from underlayServer
coneLocalPort int
coneNatPort int
linkModeWeb string // use config.linkmode
@@ -40,30 +46,33 @@ type P2PTunnel struct {
func (t *P2PTunnel) initPort() {
t.running = true
localPort := int(rand.Uint32()%15000 + 50000) // if the process has bug, will add many upnp port. use specify p2p port by param
if t.config.linkMode == LinkModeTCP6 || t.config.linkMode == LinkModeTCP4 || t.config.linkMode == LinkModeIntranet {
localPort := int(rand.Uint32()%8192 + 1025) // if the process has bug, will add many upnp port. use specify p2p port by param
if t.config.linkMode == LinkModeTCP6 || t.config.linkMode == LinkModeTCP4 || t.config.linkMode == LinkModeUDP4 || t.config.linkMode == LinkModeIntranet {
t.coneLocalPort = gConf.Network.PublicIPPort
t.coneNatPort = gConf.Network.PublicIPPort // symmetric doesn't need coneNatPort
}
if t.config.linkMode == LinkModeUDPPunch {
// prepare one random cone hole manually
_, natPort, _ := natDetectUDP(gConf.Network.ServerHost, NATDetectPort1, localPort)
_, natPort, _ := natDetectUDP(gConf.Network.ServerIP, NATDetectPort1, localPort)
t.coneLocalPort = localPort
t.coneNatPort = natPort
}
if t.config.linkMode == LinkModeTCPPunch {
// prepare one random cone hole by system automatically
_, natPort, localPort2, _ := natDetectTCP(gConf.Network.ServerHost, NATDetectPort1, 0)
_, natPort, localPort2, _ := natDetectTCP(gConf.Network.ServerIP, NATDetectPort1, 0)
t.coneLocalPort = localPort2
t.coneNatPort = natPort
}
if t.config.linkMode == LinkModeTCP6 && compareVersion(t.config.peerVersion, IPv6PunchVersion) >= 0 {
t.coneLocalPort = localPort
t.coneNatPort = localPort
}
t.localHoleAddr = &net.UDPAddr{IP: net.ParseIP(gConf.Network.localIP), Port: t.coneLocalPort}
gLog.Printf(LvDEBUG, "prepare punching port %d:%d", t.coneLocalPort, t.coneNatPort)
gLog.d("prepare punching port %d:%d", t.coneLocalPort, t.coneNatPort)
}
func (t *P2PTunnel) connect() error {
gLog.Printf(LvDEBUG, "start p2pTunnel to %s ", t.config.LogPeerNode())
t.tunnelServer = false
gLog.d("start p2pTunnel to %s ", t.config.LogPeerNode())
appKey := uint64(0)
req := PushConnectReq{
Token: t.config.peerToken,
@@ -91,7 +100,7 @@ func (t *P2PTunnel) connect() error {
}
rsp := PushConnectRsp{}
if err := json.Unmarshal(body, &rsp); err != nil {
gLog.Printf(LvERROR, "wrong %v:%s", reflect.TypeOf(rsp), err)
gLog.e("wrong %v:%s", reflect.TypeOf(rsp), err)
return err
}
// gLog.Println(LevelINFO, rsp)
@@ -108,7 +117,7 @@ func (t *P2PTunnel) connect() error {
t.punchTs = rsp.PunchTs
err := t.start()
if err != nil {
gLog.Println(LvERROR, "handshake error:", err)
gLog.d("handshake error:%s", err)
}
return err
}
@@ -133,7 +142,7 @@ func (t *P2PTunnel) isActive() bool {
defer t.hbMtx.Unlock()
res := time.Now().Before(t.hbTime.Add(TunnelHeartbeatTime * 2))
if !res {
gLog.Printf(LvDEBUG, "%d tunnel isActive false", t.id)
gLog.d("%d tunnel isActive false", t.id)
}
return res
}
@@ -154,7 +163,7 @@ func (t *P2PTunnel) checkActive() bool {
t.hbMtx.Unlock()
time.Sleep(time.Millisecond * 100)
}
gLog.Printf(LvINFO, "checkActive %t. hbtime=%d", isActive, t.hbTime)
gLog.d("checkActive %t. hbtime=%d", isActive, t.hbTime)
return isActive
}
@@ -169,7 +178,7 @@ func (t *P2PTunnel) close() {
t.conn.Close()
}
GNetwork.allTunnels.Delete(t.id)
gLog.Printf(LvINFO, "%d p2ptunnel close %s ", t.id, t.config.LogPeerNode())
gLog.i("%d p2ptunnel close %s ", t.id, t.config.LogPeerNode())
}
func (t *P2PTunnel) start() error {
@@ -180,7 +189,7 @@ func (t *P2PTunnel) start() error {
}
err := t.connectUnderlay()
if err != nil {
gLog.Println(LvERROR, err)
gLog.d("connectUnderlay error:%s", err)
return err
}
return nil
@@ -195,16 +204,16 @@ func (t *P2PTunnel) handshake() error {
}
}
if compareVersion(t.config.peerVersion, SyncServerTimeVersion) < 0 {
gLog.Printf(LvDEBUG, "peer version %s less than %s", t.config.peerVersion, SyncServerTimeVersion)
gLog.d("peer version %s less than %s", t.config.peerVersion, SyncServerTimeVersion)
} else {
ts := time.Duration(int64(t.punchTs) + GNetwork.dt + GNetwork.ddtma*int64(time.Since(GNetwork.hbTime)+PunchTsDelay)/int64(NetworkHeartbeatTime) - time.Now().UnixNano())
if ts > PunchTsDelay || ts < 0 {
ts = PunchTsDelay
}
gLog.Printf(LvDEBUG, "sleep %d ms", ts/time.Millisecond)
gLog.d("sleep %d ms", ts/time.Millisecond)
time.Sleep(ts)
}
gLog.Println(LvDEBUG, "handshake to ", t.config.LogPeerNode())
gLog.d("handshake to %s", t.config.LogPeerNode())
var err error
if gConf.Network.natType == NATCone && t.config.peerNatType == NATCone {
err = handshakeC2C(t)
@@ -219,19 +228,25 @@ func (t *P2PTunnel) handshake() error {
return errors.New("unknown error")
}
if err != nil {
gLog.Println(LvERROR, "punch handshake error:", err)
gLog.d("punch handshake error:%s", err)
return err
}
gLog.Printf(LvDEBUG, "handshake to %s ok", t.config.LogPeerNode())
gLog.d("handshake to %s ok", t.config.LogPeerNode())
return nil
}
func (t *P2PTunnel) connectUnderlay() (err error) {
switch t.config.linkMode {
case LinkModeTCP6:
t.conn, err = t.connectUnderlayTCP6()
if compareVersion(t.config.peerVersion, IPv6PunchVersion) >= 0 {
t.conn, err = t.connectUnderlayTCP()
} else {
t.conn, err = t.connectUnderlayTCP6()
}
case LinkModeTCP4:
t.conn, err = t.connectUnderlayTCP()
case LinkModeUDP4:
t.conn, err = t.connectUnderlayUDP()
case LinkModeTCPPunch:
if gConf.Network.natType == NATSymmetric || t.config.peerNatType == NATSymmetric {
t.conn, err = t.connectUnderlayTCPSymmetric()
@@ -257,24 +272,35 @@ func (t *P2PTunnel) connectUnderlay() (err error) {
}
func (t *P2PTunnel) connectUnderlayUDP() (c underlay, err error) {
gLog.Printf(LvDEBUG, "connectUnderlayUDP %s start ", t.config.LogPeerNode())
defer gLog.Printf(LvDEBUG, "connectUnderlayUDP %s end ", t.config.LogPeerNode())
gLog.d("connectUnderlayUDP %s start ", t.config.LogPeerNode())
defer gLog.d("connectUnderlayUDP %s end ", t.config.LogPeerNode())
var ul underlay
underlayProtocol := t.config.UnderlayProtocol
if underlayProtocol == "" {
underlayProtocol = "quic"
}
if t.config.isUnderlayServer == 1 {
// TODO: move to a func
time.Sleep(time.Millisecond * 10) // punching udp port will need some times in some env
go GNetwork.push(t.config.PeerNode, MsgPushUnderlayConnect, nil)
if t.config.UnderlayProtocol == "kcp" {
ul, err = listenKCP(t.localHoleAddr.String(), TunnelIdleTimeout)
if t.config.linkMode == LinkModeUDP4 {
if v4l != nil {
ul = v4l.getUnderlay(t.id)
}
if ul == nil {
return nil, fmt.Errorf("listen UDP4 error")
}
gLog.d("UDP4 connection ok")
} else {
ul, err = listenQuic(t.localHoleAddr.String(), TunnelIdleTimeout)
if t.config.UnderlayProtocol == "kcp" {
ul, err = listenKCP(t.localHoleAddr.String(), TunnelIdleTimeout)
} else {
ul, err = listenQuic(t.localHoleAddr.String(), TunnelIdleTimeout)
}
}
if err != nil {
gLog.Printf(LvINFO, "listen %s error:%s", underlayProtocol, err)
gLog.i("listen %s error:%s", underlayProtocol, err)
return nil, err
}
@@ -284,53 +310,63 @@ func (t *P2PTunnel) connectUnderlayUDP() (c underlay, err error) {
return nil, fmt.Errorf("read start msg error:%s", err)
}
if buff != nil {
gLog.Println(LvDEBUG, string(buff))
gLog.d("handshake flag:%s", string(buff))
}
ul.WriteBytes(MsgP2P, MsgTunnelHandshakeAck, []byte("OpenP2P,hello2"))
gLog.Printf(LvDEBUG, "%s connection ok", underlayProtocol)
gLog.d("%s connection ok", underlayProtocol)
return ul, nil
}
//else
conn, errL := net.ListenUDP("udp", t.localHoleAddr)
//client side
listenAddr := t.localHoleAddr
if t.config.linkMode == LinkModeUDP4 {
listenAddr = &net.UDPAddr{IP: net.ParseIP(gConf.Network.localIP), Port: 0}
t.remoteHoleAddr, err = net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", t.config.peerIP, t.config.peerConeNatPort))
if err != nil {
return nil, err
}
}
conn, errL := net.ListenUDP("udp", listenAddr)
if errL != nil {
time.Sleep(time.Millisecond * 10)
conn, errL = net.ListenUDP("udp", t.localHoleAddr)
conn, errL = net.ListenUDP("udp", listenAddr)
if errL != nil {
return nil, fmt.Errorf("%s listen error:%s", underlayProtocol, errL)
}
}
GNetwork.read(t.config.PeerNode, MsgPush, MsgPushUnderlayConnect, ReadMsgTimeout)
gLog.Printf(LvDEBUG, "%s dial to %s", underlayProtocol, t.remoteHoleAddr.String())
gLog.d("%s dial to %s", underlayProtocol, t.remoteHoleAddr.String())
if t.config.UnderlayProtocol == "kcp" {
ul, errL = dialKCP(conn, t.remoteHoleAddr, TunnelIdleTimeout)
ul, errL = dialKCP(conn, t.remoteHoleAddr, UnderlayConnectTimeout)
} else {
ul, errL = dialQuic(conn, t.remoteHoleAddr, TunnelIdleTimeout)
ul, errL = dialQuic(conn, t.remoteHoleAddr, UnderlayConnectTimeout)
}
if errL != nil {
return nil, fmt.Errorf("%s dial to %s error:%s", underlayProtocol, t.remoteHoleAddr.String(), errL)
}
handshakeBegin := time.Now()
ul.WriteBytes(MsgP2P, MsgTunnelHandshake, []byte("OpenP2P,hello"))
tidBuff := new(bytes.Buffer)
binary.Write(tidBuff, binary.LittleEndian, t.id)
ul.WriteBytes(MsgP2P, MsgTunnelHandshake, tidBuff.Bytes())
_, buff, err := ul.ReadBuffer() // TODO: kcp need timeout
if err != nil {
ul.Close()
return nil, fmt.Errorf("read MsgTunnelHandshake error:%s", err)
}
if buff != nil {
gLog.Println(LvDEBUG, string(buff))
gLog.d("handshake flag:%s", string(buff))
}
gLog.Println(LvINFO, "rtt=", time.Since(handshakeBegin))
gLog.Printf(LvINFO, "%s connection ok", underlayProtocol)
gLog.i("rtt=%dms", time.Since(handshakeBegin)/time.Millisecond)
gLog.i("%s connection ok", underlayProtocol)
t.linkModeWeb = LinkModeUDPPunch
return ul, nil
}
func (t *P2PTunnel) connectUnderlayTCP() (c underlay, err error) {
gLog.Printf(LvDEBUG, "connectUnderlayTCP %s start ", t.config.LogPeerNode())
defer gLog.Printf(LvDEBUG, "connectUnderlayTCP %s end ", t.config.LogPeerNode())
gLog.d("connectUnderlayTCP %s start ", t.config.LogPeerNode())
defer gLog.d("connectUnderlayTCP %s end ", t.config.LogPeerNode())
var ul underlay
peerIP := t.config.peerIP
if t.config.linkMode == LinkModeIntranet {
@@ -342,11 +378,15 @@ func (t *P2PTunnel) connectUnderlayTCP() (c underlay, err error) {
if err != nil {
return nil, fmt.Errorf("listen TCP error:%s", err)
}
gLog.Println(LvINFO, "TCP connection ok")
t.linkModeWeb = LinkModeIPv4
if t.config.linkMode == LinkModeIntranet {
t.linkModeWeb = LinkModeIntranet
}
if t.config.linkMode == LinkModeTCP6 {
t.linkModeWeb = LinkModeIPv6
}
gLog.i("%s TCP connection ok", t.linkModeWeb)
return ul, nil
}
@@ -355,49 +395,60 @@ func (t *P2PTunnel) connectUnderlayTCP() (c underlay, err error) {
GNetwork.read(t.config.PeerNode, MsgPush, MsgPushUnderlayConnect, ReadMsgTimeout)
} else { //tcp punch should sleep for punch the same time
if compareVersion(t.config.peerVersion, SyncServerTimeVersion) < 0 {
gLog.Printf(LvDEBUG, "peer version %s less than %s", t.config.peerVersion, SyncServerTimeVersion)
gLog.d("peer version %s less than %s", t.config.peerVersion, SyncServerTimeVersion)
} else {
ts := time.Duration(int64(t.punchTs) + GNetwork.dt + GNetwork.ddtma*int64(time.Since(GNetwork.hbTime)+PunchTsDelay)/int64(NetworkHeartbeatTime) - time.Now().UnixNano())
if ts > PunchTsDelay || ts < 0 {
ts = PunchTsDelay
}
gLog.Printf(LvDEBUG, "sleep %d ms", ts/time.Millisecond)
gLog.d("sleep %d ms", ts/time.Millisecond)
time.Sleep(ts)
}
}
ul, err = dialTCP(peerIP, t.config.peerConeNatPort, t.coneLocalPort, t.config.linkMode)
host := peerIP
if t.config.linkMode == LinkModeTCP6 {
host = t.config.peerIPv6
}
ul, err = dialTCP(host, t.config.peerConeNatPort, t.coneLocalPort, t.config.linkMode)
if err != nil {
return nil, fmt.Errorf("TCP dial to %s:%d error:%s", t.config.peerIP, t.config.peerConeNatPort, err)
return nil, fmt.Errorf("TCP dial to %s:%d error:%s", host, t.config.peerConeNatPort, err)
}
handshakeBegin := time.Now()
tidBuff := new(bytes.Buffer)
binary.Write(tidBuff, binary.LittleEndian, t.id)
// fake_http_hostname := "speedtest.cn"
// user_agent := "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
// ul.WriteMessage(MsgP2P, 100, fmt.Sprintf("GET / HTTP/1.1\r\nHost: %s\r\nUser-Agent: %s\r\nAccept: */*\r\n\r\n",
// fake_http_hostname, user_agent))
ul.WriteBytes(MsgP2P, MsgTunnelHandshake, tidBuff.Bytes()) // tunnelID
_, buff, err := ul.ReadBuffer()
if err != nil {
return nil, fmt.Errorf("read MsgTunnelHandshake error:%s", err)
}
if buff != nil {
gLog.Println(LvDEBUG, "hello ", string(buff))
gLog.d("hello %s", string(buff))
}
gLog.Println(LvINFO, "rtt=", time.Since(handshakeBegin))
gLog.Println(LvINFO, "TCP connection ok")
gLog.i("rtt=%dms", time.Since(handshakeBegin)/time.Millisecond)
t.linkModeWeb = LinkModeIPv4
if t.config.linkMode == LinkModeIntranet {
t.linkModeWeb = LinkModeIntranet
}
if t.config.linkMode == LinkModeTCP6 {
t.linkModeWeb = LinkModeIPv6
}
gLog.i("%s TCP connection ok", t.linkModeWeb)
return ul, nil
}
func (t *P2PTunnel) connectUnderlayTCPSymmetric() (c underlay, err error) {
gLog.Printf(LvDEBUG, "connectUnderlayTCPSymmetric %s start ", t.config.LogPeerNode())
defer gLog.Printf(LvDEBUG, "connectUnderlayTCPSymmetric %s end ", t.config.LogPeerNode())
gLog.d("connectUnderlayTCPSymmetric %s start ", t.config.LogPeerNode())
defer gLog.d("connectUnderlayTCPSymmetric %s end ", t.config.LogPeerNode())
ts := time.Duration(int64(t.punchTs) + GNetwork.dt + GNetwork.ddtma*int64(time.Since(GNetwork.hbTime)+PunchTsDelay)/int64(NetworkHeartbeatTime) - time.Now().UnixNano())
if ts > PunchTsDelay || ts < 0 {
ts = PunchTsDelay
}
gLog.Printf(LvDEBUG, "sleep %d ms", ts/time.Millisecond)
gLog.d("sleep %d ms", ts/time.Millisecond)
time.Sleep(ts)
startTime := time.Now()
t.linkModeWeb = LinkModeTCPPunch
@@ -424,8 +475,8 @@ func (t *P2PTunnel) connectUnderlayTCPSymmetric() (c underlay, err error) {
return
}
_, buff, err := ul.ReadBuffer()
if err != nil {
gLog.Println(LvDEBUG, "c2s ul.ReadBuffer error:", err)
if err != nil || buff == nil {
gLog.d("c2s ul.ReadBuffer error:%s", err)
return
}
req := P2PHandshakeReq{}
@@ -435,7 +486,7 @@ func (t *P2PTunnel) connectUnderlayTCPSymmetric() (c underlay, err error) {
if req.ID != t.id {
return
}
gLog.Printf(LvINFO, "handshakeS2C TCP ok. cost %dms", time.Since(startTime)/time.Millisecond)
gLog.i("handshakeS2C TCP ok. cost %dms", time.Since(startTime)/time.Millisecond)
gotCh <- ul
close(gotCh)
@@ -453,8 +504,8 @@ func (t *P2PTunnel) connectUnderlayTCPSymmetric() (c underlay, err error) {
}
_, buff, err := ul.ReadBuffer()
if err != nil {
gLog.Println(LvDEBUG, "s2c ul.ReadBuffer error:", err)
if err != nil || buff == nil {
gLog.d("s2c ul.ReadBuffer error:%s", err)
return
}
req := P2PHandshakeReq{}
@@ -485,91 +536,116 @@ func (t *P2PTunnel) connectUnderlayTCPSymmetric() (c underlay, err error) {
}
func (t *P2PTunnel) connectUnderlayTCP6() (c underlay, err error) {
gLog.Printf(LvDEBUG, "connectUnderlayTCP6 %s start ", t.config.LogPeerNode())
defer gLog.Printf(LvDEBUG, "connectUnderlayTCP6 %s end ", t.config.LogPeerNode())
var ul *underlayTCP6
gLog.d("connectUnderlayTCP6 %s start ", t.config.LogPeerNode())
defer gLog.d("connectUnderlayTCP6 %s end ", t.config.LogPeerNode())
tidBuff := new(bytes.Buffer)
binary.Write(tidBuff, binary.LittleEndian, t.id)
if t.config.isUnderlayServer == 1 {
GNetwork.push(t.config.PeerNode, MsgPushUnderlayConnect, nil)
ul, err = listenTCP6(t.coneNatPort, UnderlayConnectTimeout)
if err != nil {
// ul, err = listenTCP6(t.coneNatPort, UnderlayConnectTimeout)
tid := t.id
if compareVersion(t.config.peerVersion, PublicIPVersion) < 0 { // old version
ipBytes := net.ParseIP(t.config.peerIP).To4()
tid = uint64(binary.BigEndian.Uint32(ipBytes))
gLog.d("compatible with old client, use ip as key:%d", tid)
}
if v4l != nil {
c = v4l.getUnderlay(tid)
}
if c == nil {
return nil, fmt.Errorf("listen TCP6 error:%s", err)
}
_, buff, err := ul.ReadBuffer()
_, buff, err := c.ReadBuffer()
if err != nil {
return nil, fmt.Errorf("read start msg error:%s", err)
}
if buff != nil {
gLog.Println(LvDEBUG, string(buff))
gLog.d("handshake flag:%s", string(buff))
}
ul.WriteBytes(MsgP2P, MsgTunnelHandshakeAck, []byte("OpenP2P,hello2"))
gLog.Println(LvDEBUG, "TCP6 connection ok")
c.WriteBytes(MsgP2P, MsgTunnelHandshake, tidBuff.Bytes()) // tunnelID
// ul.WriteBytes(MsgP2P, MsgTunnelHandshakeAck, []byte("OpenP2P,hello2"))
gLog.d("TCP6 connection ok")
t.linkModeWeb = LinkModeIPv6
return ul, nil
return c, nil
}
//else
GNetwork.read(t.config.PeerNode, MsgPush, MsgPushUnderlayConnect, ReadMsgTimeout)
gLog.Println(LvDEBUG, "TCP6 dial to ", t.config.peerIPv6)
ul, err = dialTCP6(t.config.peerIPv6, t.config.peerConeNatPort)
gLog.d("TCP6 dial to %s", t.config.peerIPv6)
ul, err := dialTCP(fmt.Sprintf("[%s]", t.config.peerIPv6), t.config.peerConeNatPort, 0, LinkModeTCP6)
if err != nil || ul == nil {
return nil, fmt.Errorf("TCP6 dial to %s:%d error:%s", t.config.peerIPv6, t.config.peerConeNatPort, err)
}
handshakeBegin := time.Now()
ul.WriteBytes(MsgP2P, MsgTunnelHandshake, []byte("OpenP2P,hello"))
ul.WriteBytes(MsgP2P, MsgTunnelHandshake, tidBuff.Bytes()) // tunnelID
// ul.WriteBytes(MsgP2P, MsgTunnelHandshake, []byte("OpenP2P,hello"))
_, buff, errR := ul.ReadBuffer()
if errR != nil {
return nil, fmt.Errorf("read MsgTunnelHandshake error:%s", errR)
}
if buff != nil {
gLog.Println(LvDEBUG, string(buff))
gLog.d("handshake flag:%s", string(buff))
}
gLog.Println(LvINFO, "rtt=", time.Since(handshakeBegin))
gLog.Println(LvINFO, "TCP6 connection ok")
gLog.i("rtt=%dms", time.Since(handshakeBegin))
gLog.i("TCP6 connection ok")
t.linkModeWeb = LinkModeIPv6
return ul, nil
}
func (t *P2PTunnel) readLoop() {
decryptData := make([]byte, ReadBuffLen+PaddingSize) // 16 bytes for padding
gLog.Printf(LvDEBUG, "%d tunnel readloop start", t.id)
gLog.d("%d tunnel readloop start", t.id)
for t.isRuning() {
t.conn.SetReadDeadline(time.Now().Add(TunnelHeartbeatTime * 2))
head, body, err := t.conn.ReadBuffer()
if err != nil {
if err != nil || head == nil {
if t.isRuning() {
gLog.Printf(LvERROR, "%d tunnel read error:%s", t.id, err)
gLog.w("%d tunnel read error:%s", t.id, err)
}
break
}
if head.MainType != MsgP2P {
gLog.Printf(LvWARN, "%d head.MainType != MsgP2P", t.id)
gLog.w("%d head.MainType(%d) != MsgP2P", head.MainType, t.id)
continue
}
// gLog.d("%d tunnel read %d:%d len=%d", t.id, head.MainType, head.SubType, head.DataLen)
// TODO: replace some case implement to functions
switch head.SubType {
case MsgTunnelHeartbeat:
t.hbMtx.Lock()
t.hbTime = time.Now()
t.hbMtx.Unlock()
t.conn.WriteBytes(MsgP2P, MsgTunnelHeartbeatAck, nil)
gLog.Printf(LvDev, "%d read tunnel heartbeat", t.id)
memAppPeerID := new(bytes.Buffer)
binary.Write(memAppPeerID, binary.LittleEndian, gConf.Network.nodeID)
t.conn.WriteBytes(MsgP2P, MsgTunnelHeartbeatAck, memAppPeerID.Bytes())
gLog.dev("%d read tunnel heartbeat", t.id)
case MsgTunnelHeartbeatAck:
t.hbMtx.Lock()
t.hbTime = time.Now()
t.hbMtx.Unlock()
gLog.Printf(LvDev, "%d read tunnel heartbeat ack", t.id)
if head.DataLen >= 8 {
memAppPeerID := binary.LittleEndian.Uint64(body[:8])
existApp, appok := GNetwork.apps.Load(memAppPeerID)
if appok {
app := existApp.(*p2pApp)
app.rtt[0].Store(int32(time.Since(t.whbTime) / time.Millisecond))
}
}
gLog.dev("%d read tunnel heartbeat ack, rtt=%dms", t.id, time.Since(t.whbTime)/time.Millisecond)
case MsgOverlayData:
if len(body) < overlayHeaderSize {
gLog.Printf(LvWARN, "%d len(body) < overlayHeaderSize", t.id)
gLog.w("%d len(body) < overlayHeaderSize", t.id)
continue
}
overlayID := binary.LittleEndian.Uint64(body[:8])
gLog.Printf(LvDev, "%d tunnel read overlay data %d bodylen=%d", t.id, overlayID, head.DataLen)
s, ok := t.overlayConns.Load(overlayID)
gLog.dev("%d tunnel read overlay data %d bodylen=%d", t.id, overlayID, head.DataLen)
s, ok := overlayConns.Load(overlayID)
if !ok {
// debug level, when overlay connection closed, always has some packet not found tunnel
gLog.Printf(LvDEBUG, "%d tunnel not found overlay connection %d", t.id, overlayID)
gLog.d("%d tunnel not found overlay connection %d", t.id, overlayID)
continue
}
overlayConn, ok := s.(*overlayConn)
@@ -578,101 +654,86 @@ func (t *P2PTunnel) readLoop() {
}
payload := body[overlayHeaderSize:]
var err error
if overlayConn.appKey != 0 {
payload, _ = decryptBytes(overlayConn.appKeyBytes, decryptData, body[overlayHeaderSize:], int(head.DataLen-uint32(overlayHeaderSize)))
if overlayConn.app.key != 0 {
payload, _ = decryptBytes(overlayConn.app.appKeyBytes, decryptData, body[overlayHeaderSize:], int(head.DataLen-uint32(overlayHeaderSize)))
}
_, err = overlayConn.Write(payload)
if err != nil {
gLog.Println(LvERROR, "overlay write error:", err)
gLog.e("overlay write error:%s", err)
}
case MsgNodeData:
case MsgNodeDataMP:
t.handleNodeDataMP(head, body)
case MsgNodeDataMPAck:
t.handleNodeDataMPAck(head, body)
case MsgNodeData: // unused
t.handleNodeData(head, body, false)
case MsgRelayNodeData:
case MsgRelayNodeData: // unused
t.handleNodeData(head, body, true)
case MsgRelayData:
if len(body) < 8 {
continue
}
tunnelID := binary.LittleEndian.Uint64(body[:8])
gLog.Printf(LvDev, "relay data to %d, len=%d", tunnelID, head.DataLen-RelayHeaderSize)
gLog.dev("relay data to %d, len=%d", tunnelID, head.DataLen-RelayHeaderSize)
if err := GNetwork.relay(tunnelID, body[RelayHeaderSize:]); err != nil {
gLog.Printf(LvERROR, "%s:%d relay to %d len=%d error:%s", t.config.LogPeerNode(), t.id, tunnelID, len(body), ErrRelayTunnelNotFound)
gLog.d("%s:%d relay to %d len=%d error:%s", t.config.LogPeerNode(), t.id, tunnelID, len(body), ErrRelayTunnelNotFound)
}
case MsgRelayHeartbeat:
case MsgRelayHeartbeat: // only client side will write relay heartbeat, different with tunnel heartbeat
req := RelayHeartbeat{}
if err := json.Unmarshal(body, &req); err != nil {
gLog.Printf(LvERROR, "wrong %v:%s", reflect.TypeOf(req), err)
gLog.e("wrong %v:%s", reflect.TypeOf(req), err)
continue
}
// TODO: debug relay heartbeat
gLog.Printf(LvDEBUG, "read MsgRelayHeartbeat from rtid:%d,appid:%d", req.RelayTunnelID, req.AppID)
gLog.dev("read MsgRelayHeartbeat from rtid:%d,appid:%d", req.RelayTunnelID, req.AppID)
// update app hbtime
GNetwork.updateAppHeartbeat(req.AppID)
GNetwork.updateAppHeartbeat(req.AppID, req.RelayTunnelID, true)
req.From = gConf.Network.Node
t.WriteMessage(req.RelayTunnelID, MsgP2P, MsgRelayHeartbeatAck, &req)
case MsgRelayHeartbeatAck:
req := RelayHeartbeat{}
err := json.Unmarshal(body, &req)
if err != nil {
gLog.Printf(LvERROR, "wrong RelayHeartbeat:%s", err)
gLog.e("wrong RelayHeartbeat:%s", err)
continue
}
// TODO: debug relay heartbeat
gLog.Printf(LvDEBUG, "read MsgRelayHeartbeatAck to appid:%d", req.AppID)
GNetwork.updateAppHeartbeat(req.AppID)
case MsgOverlayConnectReq:
req := OverlayConnectReq{}
if err := json.Unmarshal(body, &req); err != nil {
gLog.Printf(LvERROR, "wrong %v:%s", reflect.TypeOf(req), err)
continue
}
// app connect only accept token(not relay totp token), avoid someone using the share relay node's token
if req.Token != gConf.Network.Token {
gLog.Println(LvERROR, "Access Denied:", req.Token)
continue
}
overlayID := req.ID
gLog.Printf(LvDEBUG, "App:%d overlayID:%d connect %s:%d", req.AppID, overlayID, req.DstIP, req.DstPort)
oConn := overlayConn{
tunnel: t,
id: overlayID,
isClient: false,
rtid: req.RelayTunnelID,
appID: req.AppID,
appKey: GetKey(req.AppID),
running: true,
}
if req.Protocol == "udp" {
oConn.connUDP, err = net.DialUDP("udp", nil, &net.UDPAddr{IP: net.ParseIP(req.DstIP), Port: req.DstPort})
} else {
oConn.connTCP, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", req.DstIP, req.DstPort), ReadMsgTimeout)
}
gLog.dev("read MsgRelayHeartbeatAck to appid:%d", req.AppID)
GNetwork.updateAppHeartbeat(req.AppID, req.RelayTunnelID, false)
req.From = gConf.Network.Node
t.WriteMessage(req.RelayTunnelID2, MsgP2P, MsgRelayHeartbeatAck2, &req)
case MsgRelayHeartbeatAck2:
req := RelayHeartbeat{}
err := json.Unmarshal(body, &req)
if err != nil {
gLog.Println(LvERROR, err)
gLog.e("wrong RelayHeartbeat:%s", err)
continue
}
// calc key bytes for encrypt
if oConn.appKey != 0 {
encryptKey := make([]byte, AESKeySize)
binary.LittleEndian.PutUint64(encryptKey, oConn.appKey)
binary.LittleEndian.PutUint64(encryptKey[8:], oConn.appKey)
oConn.appKeyBytes = encryptKey
gLog.dev("read MsgRelayHeartbeatAck2 to appid:%d", req.AppID)
GNetwork.updateAppHeartbeat(req.AppID, req.RelayTunnelID, false)
case MsgOverlayConnectReq: // TODO: send this msg withAppID, and app handle it
// app connect only accept token(not relay totp token), avoid someone using the share relay node's token
// targetApp := GNetwork.GetAPPByID(req.AppID)
t.handleOverlayConnectReq(body, err)
case MsgOverlayConnectRsp:
appID := binary.LittleEndian.Uint64(body[:8])
i, ok := GNetwork.apps.Load(appID)
if !ok {
gLog.e("MsgOverlayConnectRsp app not found %d", appID)
return
}
t.overlayConns.Store(oConn.id, &oConn)
go oConn.run()
app := i.(*p2pApp)
// ndmp := NodeDataMPHeader{fromNodeID: gConf.Network.nodeID, seq: seq}
app.StoreMessage(head, body)
case MsgOverlayDisconnectReq:
req := OverlayDisconnectReq{}
if err := json.Unmarshal(body, &req); err != nil {
gLog.Printf(LvERROR, "wrong %v:%s", reflect.TypeOf(req), err)
gLog.e("wrong %v:%s", reflect.TypeOf(req), err)
continue
}
overlayID := req.ID
gLog.Printf(LvDEBUG, "%d disconnect overlay connection %d", t.id, overlayID)
i, ok := t.overlayConns.Load(overlayID)
gLog.d("%d disconnect overlay connection %d", t.id, overlayID)
i, ok := overlayConns.Load(overlayID)
if ok {
oConn := i.(*overlayConn)
oConn.Close()
@@ -681,7 +742,55 @@ func (t *P2PTunnel) readLoop() {
}
}
t.close()
gLog.Printf(LvDEBUG, "%d tunnel readloop end", t.id)
gLog.d("%d tunnel readloop end", t.id)
}
func (*P2PTunnel) handleOverlayConnectReq(body []byte, err error) {
req := OverlayConnectReq{}
if err := json.Unmarshal(body, &req); err != nil {
gLog.e("wrong %v:%s", reflect.TypeOf(req), err)
return
}
if req.Token != gConf.Network.Token {
gLog.e("Access Denied,token=%d", req.Token)
return
}
overlayID := req.ID
gLog.d("App:%d overlayID:%d connect %s:%d", req.AppID, overlayID, req.DstIP, req.DstPort)
i, ok := GNetwork.apps.Load(req.AppID)
if !ok {
return
}
targetApp := i.(*p2pApp)
oConn := overlayConn{
app: targetApp,
id: overlayID,
isClient: false,
running: true,
}
// connect local service should use sys dns
sysResolver := &net.Resolver{}
ips, err := sysResolver.LookupIP(context.Background(), "ip4", req.DstIP)
if err != nil {
gLog.e("handleOverlayConnectReq dial error:%s", err)
return
}
if req.Protocol == "udp" {
oConn.connUDP, err = net.DialUDP("udp", nil, &net.UDPAddr{IP: ips[0], Port: req.DstPort})
} else {
oConn.connTCP, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", ips[0].String(), req.DstPort), ReadMsgTimeout)
}
if err != nil {
gLog.e("handleOverlayConnectReq dial error:%s", err)
return
}
overlayConns.Store(oConn.id, &oConn)
go oConn.run()
targetApp.WriteMessageWithAppID(MsgP2P, MsgOverlayConnectRsp, nil)
}
func (t *P2PTunnel) writeLoop() {
@@ -690,29 +799,39 @@ func (t *P2PTunnel) writeLoop() {
t.hbMtx.Unlock()
tc := time.NewTicker(TunnelHeartbeatTime)
defer tc.Stop()
gLog.Printf(LvDEBUG, "%s:%d tunnel writeLoop start", t.config.LogPeerNode(), t.id)
defer gLog.Printf(LvDEBUG, "%s:%d tunnel writeLoop end", t.config.LogPeerNode(), t.id)
gLog.d("%s:%d tunnel writeLoop start", t.config.LogPeerNode(), t.id)
defer gLog.d("%s:%d tunnel writeLoop end", t.config.LogPeerNode(), t.id)
writeHb := func() {
// tunnel send
t.whbTime = time.Now()
err := t.conn.WriteBytes(MsgP2P, MsgTunnelHeartbeat, nil)
if err != nil {
gLog.w("%d write tunnel heartbeat error %s", t.id, err)
t.close()
return
}
gLog.dev("%d write tunnel heartbeat ok", t.id)
}
writeHb()
for t.isRuning() {
select {
case buff := <-t.writeDataSmall:
t.conn.WriteBuffer(buff)
// gLog.Printf(LvDEBUG, "write icmp %d", time.Now().Unix())
// gLog.d("write icmp %d", time.Now().Unix())
default:
select {
case buff := <-t.writeDataSmall:
t.conn.WriteBuffer(buff)
// gLog.Printf(LvDEBUG, "write icmp %d", time.Now().Unix())
// gLog.d("write icmp %d", time.Now().Unix())
case buff := <-t.writeData:
t.conn.WriteBuffer(buff)
case <-tc.C:
// tunnel send
err := t.conn.WriteBytes(MsgP2P, MsgTunnelHeartbeat, nil)
err := t.conn.WriteBuffer(buff)
if err != nil {
gLog.Printf(LvERROR, "%d write tunnel heartbeat error %s", t.id, err)
gLog.e("%d write tunnel error %s", t.id, err)
t.close()
return
}
gLog.Printf(LvDev, "%d write tunnel heartbeat ok", t.id)
case <-tc.C:
writeHb()
}
}
}
@@ -742,54 +861,74 @@ func (t *P2PTunnel) listen() error {
}
GNetwork.push(t.config.PeerNode, MsgPushConnectRsp, rsp)
gLog.Printf(LvDEBUG, "p2ptunnel wait for connecting")
t.tunnelServer = true
gLog.d("p2ptunnel wait for connecting")
return t.start()
}
func (t *P2PTunnel) closeOverlayConns(appID uint64) {
t.overlayConns.Range(func(_, i interface{}) bool {
oConn := i.(*overlayConn)
if oConn.appID == appID {
oConn.Close()
}
return true
})
}
func (t *P2PTunnel) handleNodeData(head *openP2PHeader, body []byte, isRelay bool) {
gLog.Printf(LvDev, "%d tunnel read node data bodylen=%d, relay=%t", t.id, head.DataLen, isRelay)
gLog.dev("%d tunnel read node data bodylen=%d, relay=%t", t.id, head.DataLen, isRelay)
ch := GNetwork.nodeData
// if body[9] == 1 { // TODO: deal relay
// ch = GNetwork.nodeDataSmall
// gLog.Printf(LvDEBUG, "read icmp %d", time.Now().Unix())
// gLog.d("read icmp %d", time.Now().Unix())
// }
if isRelay {
fromPeerID := binary.LittleEndian.Uint64(body[:8])
ch <- &NodeData{fromPeerID, body[8:]} // TODO: cache peerNodeID; encrypt/decrypt
// fromPeerID := binary.LittleEndian.Uint64(body[:8]) // unused
ch <- body[8:] // TODO: cache peerNodeID; encrypt/decrypt
} else {
ch <- &NodeData{NodeNameToID(t.config.PeerNode), body} // TODO: cache peerNodeID; encrypt/decrypt
ch <- body // TODO: cache peerNodeID; encrypt/decrypt
}
}
func (t *P2PTunnel) asyncWriteNodeData(mainType, subType uint16, data []byte) {
writeBytes := append(encodeHeader(mainType, subType, uint32(len(data))), data...)
// if len(data) < 192 {
if data[9] == 1 { // icmp
select {
case t.writeDataSmall <- writeBytes:
// gLog.Printf(LvWARN, "%s:%d t.writeDataSmall write %d", t.config.PeerNode, t.id, len(t.writeDataSmall))
default:
gLog.Printf(LvWARN, "%s:%d t.writeDataSmall is full, drop it", t.config.LogPeerNode(), t.id)
}
} else {
select {
case t.writeData <- writeBytes:
default:
gLog.Printf(LvWARN, "%s:%d t.writeData is full, drop it", t.config.LogPeerNode(), t.id)
}
func (t *P2PTunnel) handleNodeDataMP(head *openP2PHeader, body []byte) {
gLog.dev("%s tid:%d tunnel read node data mp bodylen=%d", t.config.LogPeerNode(), t.id, head.DataLen) // Debug
if head.DataLen < 16 {
return
}
// TODO: reorder write tun
fromNodeID := binary.LittleEndian.Uint64(body[:8])
seq := binary.LittleEndian.Uint64(body[8:16])
i, ok := GNetwork.apps.Load(fromNodeID)
if !ok {
gLog.e("handleNodeDataMP peer not found,from=%s nodeID=%d, seq=%d", t.config.LogPeerNode(), fromNodeID, seq)
return
}
app := i.(*p2pApp)
// ndmp := NodeDataMPHeader{fromNodeID: gConf.Network.nodeID, seq: seq}
app.handleNodeDataMP(seq, body[16:], t)
}
func (t *P2PTunnel) handleNodeDataMPAck(head *openP2PHeader, body []byte) {
}
func (t *P2PTunnel) asyncWriteNodeData(id uint64, seq uint64, IPPacket []byte, relayHead []byte) {
all := new(bytes.Buffer)
if relayHead != nil {
all.Write(encodeHeader(MsgP2P, MsgRelayData, uint32(openP2PHeaderSize+len(relayHead)+16+len(IPPacket))))
all.Write(relayHead)
}
all.Write(encodeHeader(MsgP2P, MsgNodeDataMP, 16+uint32(len(IPPacket)))) // id+seq=16 bytes
binary.Write(all, binary.LittleEndian, id)
binary.Write(all, binary.LittleEndian, seq)
all.Write(IPPacket)
// if len(data) < 192 {
if IPPacket[9] == 1 { // icmp
select {
case t.writeDataSmall <- all.Bytes():
// gLog.w("%s:%d t.writeDataSmall write %d", t.config.PeerNode, t.id, len(t.writeDataSmall))
default:
gLog.w("%s:%d t.writeDataSmall is full, drop it", t.config.LogPeerNode(), t.id)
}
} else {
t.writeData <- all.Bytes()
// select {
// case t.writeData <- writeBytes:
// default:
// gLog.w("%s:%d t.writeData is full, drop it", t.config.LogPeerNode(), t.id)
// }
}
}
func (t *P2PTunnel) WriteMessage(rtid uint64, mainType uint16, subType uint16, req interface{}) error {
@@ -803,3 +942,41 @@ func (t *P2PTunnel) WriteMessage(rtid uint64, mainType uint16, subType uint16, r
return t.conn.WriteBytes(mainType, MsgRelayData, msgWithHead)
}
func (t *P2PTunnel) WriteMessageWithAppID(appID uint64, rtid uint64, mainType uint16, subType uint16, req interface{}) error {
data, err := json.Marshal(req)
if err != nil {
return err
}
head := new(bytes.Buffer)
binary.Write(head, binary.LittleEndian, appID)
msgWithAppID := append(head.Bytes(), data...)
if rtid == 0 {
return t.conn.WriteBytes(mainType, subType, msgWithAppID)
}
relayHead := new(bytes.Buffer)
binary.Write(relayHead, binary.LittleEndian, rtid)
msg, _ := newMessageWithBuff(mainType, subType, msgWithAppID)
msgWithHead := append(relayHead.Bytes(), msg...)
return t.conn.WriteBytes(mainType, MsgRelayData, msgWithHead)
}
func (t *P2PTunnel) WriteBytes(rtid uint64, mainType uint16, subType uint16, data []byte) error {
if rtid == 0 {
return t.conn.WriteBytes(mainType, subType, data)
}
all := new(bytes.Buffer)
binary.Write(all, binary.LittleEndian, rtid)
all.Write(encodeHeader(mainType, subType, uint32(len(data))))
all.Write(data)
return t.conn.WriteBytes(mainType, MsgRelayData, all.Bytes())
}
// func (t *P2PTunnel) RTT() int {
// if t.isWriting.Load() && t.rtt.Load() < int32(time.Now().Add(time.Duration(-t.writingTs.Load())).Unix()/int64(time.Millisecond)) {
// return int(time.Now().Add(time.Duration(-t.writingTs.Load())).Unix() / int64(time.Millisecond))
// }
// return int(t.rtt.Load())
// }
+17 -12
View File
@@ -10,7 +10,7 @@ import (
"time"
)
const OpenP2PVersion = "3.21.12"
const OpenP2PVersion = "3.24.28"
const ProductName string = "openp2p"
const LeastSupportVersion = "3.0.0"
const SyncServerTimeVersion = "3.9.0"
@@ -18,13 +18,14 @@ const SymmetricSimultaneouslySendVersion = "3.10.7"
const PublicIPVersion = "3.11.2"
const SupportIntranetVersion = "3.14.5"
const SupportDualTunnelVersion = "3.15.5"
const IPv6PunchVersion = "3.24.9"
const SupportUDP4DirectVersion = "3.24.16"
const (
IfconfigPort1 = 27180
IfconfigPort2 = 27181
NATDetectPort1 = 27180
NATDetectPort2 = 27181
WsPort = 27183
NATDetectPort1 = 27182
NATDetectPort2 = 27183
UDPPort1 = 27182
UDPPort2 = 27183
)
type openP2PHeader struct {
@@ -48,6 +49,12 @@ type overlayHeader struct {
id uint64
}
type NodeDataMPAck struct {
FromNodeID uint64
Seq uint64
Delay uint32 // delay write mergeack ms
}
var overlayHeaderSize = binary.Size(overlayHeader{})
func decodeHeader(data []byte) (*openP2PHeader, error) {
@@ -111,6 +118,7 @@ const (
MsgPushCheckRemoteService = 19
MsgPushSpecTunnel = 20
MsgPushReportHeap = 21
MsgPushSDWanRefresh = 22
)
// MsgP2P sub type message
@@ -172,16 +180,12 @@ const (
MaxRetry = 10
Cone2ConeTCPPunchMaxRetry = 1
Cone2ConeUDPPunchMaxRetry = 1
PublicIPEchoTimeout = time.Second * 3
PublicIPEchoTimeout = time.Second * 5
NatDetectTimeout = time.Second * 5
UDPReadTimeout = time.Second * 5
ClientAPITimeout = time.Second * 10
UnderlayConnectTimeout = time.Second * 10
MaxDirectTry = 3
// sdwan
ReadTunBuffSize = 1600
ReadTunBuffNum = 10
)
// NATNone has public ip
@@ -349,7 +353,8 @@ type TunnelMsg struct {
}
type RelayNodeReq struct {
PeerNode string `json:"peerNode,omitempty"`
PeerNode string `json:"peerNode,omitempty"`
ExcludeNodes string `json:"excludeNodes,omitempty"` //TODO: add exclude ip
}
type RelayNodeRsp struct {
+95 -49
View File
@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"net"
"reflect"
"runtime"
"strings"
"sync"
@@ -43,8 +44,8 @@ type sdwanNode struct {
}
type p2pSDWAN struct {
nodeName string
tun *optun
tunErr string
sysRoute sync.Map // ip:sdwanNode
subnet *net.IPNet
gateway net.IP
@@ -53,36 +54,37 @@ type p2pSDWAN struct {
}
func (s *p2pSDWAN) reset() {
gLog.Println(LvINFO, "reset sdwan when network disconnected")
gLog.i("reset sdwan when network disconnected")
// clear sysroute
delRoutesByGateway(s.gateway.String())
// clear internel route
s.internalRoute = NewIPTree("")
// clear p2papp
for _, node := range gConf.getAddNodes() {
for _, node := range gConf.getSDWAN().Nodes {
gConf.delete(AppConfig{SrcPort: 0, PeerNode: node.Name})
GNetwork.DeleteApp(AppConfig{SrcPort: 0, PeerNode: node.Name})
}
gConf.resetSDWAN()
}
func (s *p2pSDWAN) init(name string) error {
func (s *p2pSDWAN) init() error {
gConf.Network.previousIP = gConf.Network.publicIP
if gConf.getSDWAN().Gateway == "" {
gLog.Println(LvDEBUG, "sdwan init: not in sdwan clear all ")
gLog.d("sdwan init: not in sdwan clear all ")
}
if s.internalRoute == nil {
s.internalRoute = NewIPTree("")
}
s.nodeName = name
if gw, sn, err := net.ParseCIDR(gConf.getSDWAN().Gateway); err == nil { // preserve old gateway
s.gateway = gw
s.subnet = sn
}
for _, node := range gConf.getDelNodes() {
gLog.Println(LvDEBUG, "sdwan init: deal deleted node: ", node.Name)
gLog.Printf(LvDEBUG, "sdwan init: delRoute: %s, %s ", node.IP, s.gateway.String())
delRoute(node.IP, s.gateway.String())
gLog.d("sdwan init: deal deleted node: %s", node.Name)
gLog.d("sdwan init: delRoute: %s, %s ", node.IP, s.gateway.String())
// delRoute(node.IP, s.gateway.String()) // TODO: seems no need delelte each node
s.internalRoute.Del(node.IP, node.IP)
ipNum, _ := inetAtoN(node.IP)
s.sysRoute.Delete(ipNum)
@@ -106,26 +108,26 @@ func (s *p2pSDWAN) init(name string) error {
}
s.internalRoute.Del(minIP.String(), maxIP.String())
delRoute(ipnet.String(), s.gateway.String())
gLog.Printf(LvDEBUG, "sdwan init: resource delRoute: %s, %s ", ipnet.String(), s.gateway.String())
gLog.d("sdwan init: resource delRoute: %s, %s ", ipnet.String(), s.gateway.String())
}
}
for _, node := range gConf.getAddNodes() {
gLog.Println(LvDEBUG, "sdwan init: deal add node: ", node.Name)
gLog.d("sdwan init: deal add node: %s", node.Name)
ipNet := &net.IPNet{
IP: net.ParseIP(node.IP),
Mask: s.subnet.Mask,
}
if node.Name == s.nodeName {
if node.Name == gConf.Network.Node {
s.virtualIP = ipNet
gLog.Println(LvINFO, "sdwan init: start tun ", ipNet.String())
gLog.i("sdwan init: start tun %s", ipNet.String())
err := s.StartTun()
if err != nil {
gLog.Println(LvERROR, "sdwan init: start tun error:", err)
gLog.e("sdwan init: start tun error:%s", err)
return err
}
gLog.Println(LvINFO, "sdwan init: start tun ok")
gLog.i("sdwan init: start tun ok")
allowTunForward()
gLog.Printf(LvDEBUG, "sdwan init: addRoute %s %s %s", s.subnet.String(), s.gateway.String(), s.tun.tunName)
gLog.d("sdwan init: addRoute %s %s %s", s.subnet.String(), s.gateway.String(), s.tun.tunName)
addRoute(s.subnet.String(), s.gateway.String(), s.tun.tunName)
// addRoute("255.255.255.255/32", s.gateway.String(), s.tun.tunName) // for broadcast
// addRoute("224.0.0.0/4", s.gateway.String(), s.tun.tunName) // for multicast
@@ -140,11 +142,11 @@ func (s *p2pSDWAN) init(name string) error {
s.internalRoute.AddIntIP(ip, ip, &sdwanNode{name: node.Name, id: NodeNameToID(node.Name)})
}
for _, node := range gConf.getAddNodes() {
if node.Name == s.nodeName { // not deal resource itself
if node.Name == gConf.Network.Node { // not deal resource itself
continue
}
if len(node.Resource) > 0 {
gLog.Printf(LvINFO, "sdwan init: deal add node: %s resource: %s", node.Name, node.Resource)
gLog.i("sdwan init: deal add node: %s resource: %s", node.Name, node.Resource)
arr := strings.Split(node.Resource, ",")
for _, r := range arr {
// add internal route
@@ -154,17 +156,17 @@ func (s *p2pSDWAN) init(name string) error {
continue
}
if ipnet.Contains(net.ParseIP(gConf.Network.localIP)) { // local ip and resource in the same lan
gLog.Printf(LvDEBUG, "sdwan init: local ip %s in this resource %s, ignore", gConf.Network.localIP, ipnet.IP.String())
gLog.d("sdwan init: local ip %s in this resource %s, ignore", gConf.Network.localIP, ipnet.IP.String())
continue
}
// local net could access this single ip
if ipnet.Mask[0] == 255 && ipnet.Mask[1] == 255 && ipnet.Mask[2] == 255 && ipnet.Mask[3] == 255 {
gLog.Printf(LvDEBUG, "sdwan init: ping %s start", ipnet.IP.String())
gLog.d("sdwan init: ping %s start", ipnet.IP.String())
if _, err := Ping(ipnet.IP.String()); err == nil {
gLog.Printf(LvDEBUG, "sdwan init: ping %s ok, ignore this resource", ipnet.IP.String())
gLog.d("sdwan init: ping %s ok, ignore this resource", ipnet.IP.String())
continue
}
gLog.Printf(LvDEBUG, "sdwan init: ping %s failed", ipnet.IP.String())
gLog.d("sdwan init: ping %s failed", ipnet.IP.String())
}
minIP := ipnet.IP
maxIP := make(net.IP, len(minIP))
@@ -174,13 +176,13 @@ func (s *p2pSDWAN) init(name string) error {
}
s.internalRoute.Add(minIP.String(), maxIP.String(), &sdwanNode{name: node.Name, id: NodeNameToID(node.Name)})
// add sys route
gLog.Printf(LvDEBUG, "sdwan init: addRoute %s %s %s", ipnet.String(), s.gateway.String(), s.tun.tunName)
gLog.d("sdwan init: addRoute %s %s %s", ipnet.String(), s.gateway.String(), s.tun.tunName)
addRoute(ipnet.String(), s.gateway.String(), s.tun.tunName)
}
}
}
gConf.retryAllMemApp()
gLog.Printf(LvINFO, "sdwan init ok")
gLog.i("sdwan init ok")
return nil
}
@@ -193,28 +195,28 @@ func (s *p2pSDWAN) run() {
}
func (s *p2pSDWAN) readNodeLoop() {
gLog.Printf(LvDEBUG, "sdwan readNodeLoop start")
defer gLog.Printf(LvDEBUG, "sdwan readNodeLoop end")
gLog.d("sdwan readNodeLoop start")
defer gLog.d("sdwan readNodeLoop end")
writeBuff := make([][]byte, 1)
for {
nd := GNetwork.ReadNode(time.Second * 10) // TODO: read multi packet
if nd == nil {
gLog.Printf(LvDev, "waiting for node data")
gLog.dev("waiting for node data")
continue
}
head := PacketHeader{}
parseHeader(nd.Data, &head)
gLog.Printf(LvDev, "write tun dst ip=%s,len=%d", net.IP{byte(head.dst >> 24), byte(head.dst >> 16), byte(head.dst >> 8), byte(head.dst)}.String(), len(nd.Data))
parseHeader(nd, &head)
gLog.dev("write tun dst ip=%s,len=%d", net.IP{byte(head.dst >> 24), byte(head.dst >> 16), byte(head.dst >> 8), byte(head.dst)}.String(), len(nd))
if PIHeaderSize == 0 {
writeBuff[0] = nd.Data
writeBuff[0] = nd
} else {
writeBuff[0] = make([]byte, PIHeaderSize+len(nd.Data))
copy(writeBuff[0][PIHeaderSize:], nd.Data)
writeBuff[0] = make([]byte, PIHeaderSize+len(nd))
copy(writeBuff[0][PIHeaderSize:], nd)
}
len, err := s.tun.Write(writeBuff, PIHeaderSize)
if err != nil {
gLog.Printf(LvDEBUG, "write tun dst ip=%s,len=%d,error:%s", net.IP{byte(head.dst >> 24), byte(head.dst >> 16), byte(head.dst >> 8), byte(head.dst)}.String(), len, err)
gLog.d("write tun dst ip=%s,len=%d,error:%s", net.IP{byte(head.dst >> 24), byte(head.dst >> 16), byte(head.dst >> 8), byte(head.dst)}.String(), len, err)
}
}
}
@@ -230,9 +232,11 @@ func (s *p2pSDWAN) routeTunPacket(p []byte, head *PacketHeader) {
v, ok := s.internalRoute.Load(head.dst)
if !ok || v == nil {
if isBroadcastOrMulticast(head.dst, s.subnet) {
gLog.Printf(LvDev, "multicast ip=%s", net.IP{byte(head.dst >> 24), byte(head.dst >> 16), byte(head.dst >> 8), byte(head.dst)}.String())
gLog.dev("multicast ip=%s", net.IP{byte(head.dst >> 24), byte(head.dst >> 16), byte(head.dst >> 8), byte(head.dst)}.String())
GNetwork.WriteBroadcast(p)
return
}
gLog.dev("internalRoute not found ip:%s", net.IP{byte(head.dst >> 24), byte(head.dst >> 16), byte(head.dst >> 8), byte(head.dst)}.String())
return
} else {
node = v.(*sdwanNode)
@@ -240,13 +244,13 @@ func (s *p2pSDWAN) routeTunPacket(p []byte, head *PacketHeader) {
err := GNetwork.WriteNode(node.id, p)
if err != nil {
gLog.Printf(LvDev, "write packet to %s fail: %s", node.name, err)
gLog.dev("write packet to %s fail: %s", node.name, err)
}
}
func (s *p2pSDWAN) readTunLoop() {
gLog.Printf(LvDEBUG, "sdwan readTunLoop start")
defer gLog.Printf(LvDEBUG, "sdwan readTunLoop end")
gLog.d("sdwan readTunLoop start")
defer gLog.d("sdwan readTunLoop end")
readBuff := make([][]byte, ReadTunBuffNum)
for i := 0; i < ReadTunBuffNum; i++ {
readBuff[i] = make([]byte, ReadTunBuffSize+PIHeaderSize)
@@ -256,16 +260,16 @@ func (s *p2pSDWAN) readTunLoop() {
for {
n, err := s.tun.Read(readBuff, readBuffSize, PIHeaderSize)
if err != nil {
gLog.Printf(LvERROR, "read tun fail: ", err)
gLog.e("read tun fail: %s", err)
return
}
for i := 0; i < n; i++ {
if readBuffSize[i] > ReadTunBuffSize {
gLog.Printf(LvERROR, "read tun overflow: len=", readBuffSize[i])
gLog.e("read tun overflow: len=%d", readBuffSize[i])
continue
}
parseHeader(readBuff[i][PIHeaderSize:readBuffSize[i]+PIHeaderSize], &ih)
gLog.Printf(LvDev, "read tun dst ip=%s,len=%d", net.IP{byte(ih.dst >> 24), byte(ih.dst >> 16), byte(ih.dst >> 8), byte(ih.dst)}.String(), readBuffSize[0])
gLog.dev("read tun dst ip=%s,len=%d", net.IP{byte(ih.dst >> 24), byte(ih.dst >> 16), byte(ih.dst >> 8), byte(ih.dst)}.String(), readBuffSize[0])
s.routeTunPacket(readBuff[i][PIHeaderSize:readBuffSize[i]+PIHeaderSize], &ih)
}
}
@@ -277,23 +281,25 @@ func (s *p2pSDWAN) StartTun() error {
tun := &optun{}
err := tun.Start(s.virtualIP.String(), &sdwan)
if err != nil {
gLog.Println(LvERROR, "open tun fail:", err)
gLog.e("open tun fail:%v", err)
s.tunErr = err.Error()
return err
}
s.tun = tun
s.tunErr = ""
go s.readTunLoop()
go s.readNodeLoop() // multi-thread read will cause packets out of order, resulting in slower speeds
}
err := setTunAddr(s.tun.tunName, s.virtualIP.String(), sdwan.Gateway, s.tun.dev)
if err != nil {
gLog.Printf(LvERROR, "setTunAddr error:%s,%s,%s,%s", err, s.tun.tunName, s.virtualIP.String(), sdwan.Gateway)
gLog.e("setTunAddr error:%s,%s,%s,%s", err, s.tun.tunName, s.virtualIP.String(), sdwan.Gateway)
return err
}
return nil
}
func handleSDWAN(subType uint16, msg []byte) error {
gLog.Printf(LvDEBUG, "handle sdwan msg type:%d", subType)
gLog.d("handle sdwan msg type:%d", subType)
var err error
switch subType {
case MsgSDWANInfoRsp:
@@ -301,15 +307,25 @@ func handleSDWAN(subType uint16, msg []byte) error {
if err = json.Unmarshal(msg[openP2PHeaderSize:], &rsp); err != nil {
return ErrMsgFormat
}
gLog.Println(LvINFO, "sdwan init:", prettyJson(rsp))
if runtime.GOOS == "android" {
AndroidSDWANConfig <- msg[openP2PHeaderSize:]
}
gLog.i("sdwan init:%s", prettyJson(rsp))
// GNetwork.sdwan.detail = &rsp
if gConf.Network.previousIP != gConf.Network.publicIP || gConf.getSDWAN().CentralNode != rsp.CentralNode {
GNetwork.sdwan.reset()
preAndroidSDWANConfig = "" // let androind app reset vpnservice
}
gConf.setSDWAN(rsp)
err = GNetwork.sdwan.init(gConf.Network.Node)
if runtime.GOOS == "android" {
if !compareResources(preAndroidSDWANConfig, string(msg[openP2PHeaderSize:])) { // when config change, notify android app
select {
case AndroidSDWANConfig <- msg[openP2PHeaderSize:]:
default:
}
preAndroidSDWANConfig = string(msg[openP2PHeaderSize:])
}
}
err = GNetwork.sdwan.init()
if err != nil {
gLog.Println(LvERROR, "sdwan init fail: ", err)
gLog.e("sdwan init fail: %s", err)
if GNetwork.sdwan.tun != nil {
GNetwork.sdwan.tun.Stop()
GNetwork.sdwan.tun = nil
@@ -321,3 +337,33 @@ func handleSDWAN(subType uint16, msg []byte) error {
}
return err
}
// for android vpnservice
func compareResources(json1, json2 string) bool {
var net1, net2 SDWANInfo
if err := json.Unmarshal([]byte(json1), &net1); err != nil {
fmt.Println("Error parsing json1:", err)
fmt.Println("Error parsing json1:", string(json1))
return false
}
if err := json.Unmarshal([]byte(json2), &net2); err != nil {
fmt.Println("Error parsing json2:", err)
fmt.Println("Error parsing json1:", string(json2))
return false
}
// 获取所有资源并比较
resources1 := getResources(net1)
resources2 := getResources(net2)
return reflect.DeepEqual(resources1, resources2)
}
func getResources(network SDWANInfo) []string {
var resources []string
for _, node := range network.Nodes {
if node.Resource != "" {
resources = append(resources, node.Resource)
}
}
return resources
}
+7 -7
View File
@@ -2,12 +2,12 @@ package main
import (
"fmt"
op "openp2p/core"
op2p "openp2p/core"
"time"
)
func main() {
op.Run()
op2p.Run()
for i := 0; i < 10; i++ {
go echoClient("5800-debug")
}
@@ -15,28 +15,28 @@ func main() {
}
func echoClient(peerNode string) {
sendDatalen := op.ReadBuffLen
sendDatalen := op2p.ReadBuffLen
sendBuff := make([]byte, sendDatalen)
for i := 0; i < len(sendBuff); i++ {
sendBuff[i] = byte('A' + i/100)
}
// peerNode = "YOUR-PEER-NODE-NAME"
if err := op.GNetwork.ConnectNode(peerNode); err != nil {
if err := op2p.GNetwork.ConnectNode(peerNode); err != nil {
fmt.Println("connect error:", err)
return
}
for i := 0; ; i++ {
sendBuff[1] = 'A' + byte(i%26)
if err := op.GNetwork.WriteNode(op.NodeNameToID(peerNode), sendBuff[:sendDatalen]); err != nil {
if err := op2p.GNetwork.WriteNode(op2p.NodeNameToID(peerNode), sendBuff[:sendDatalen]); err != nil {
fmt.Println("write error:", err)
break
}
nd := op.GNetwork.ReadNode(time.Second * 10)
nd := op2p.GNetwork.ReadNode(time.Second * 10)
if nd == nil {
fmt.Printf("waiting for node data\n")
time.Sleep(time.Second * 10)
continue
}
fmt.Printf("read %d len=%d data=%s\n", nd.NodeID, len(nd.Data), nd.Data[:16]) // only print 16 bytes
fmt.Printf("read len=%d data=%s\n", len(nd), nd[:16]) // only print 16 bytes
}
}
+5 -5
View File
@@ -2,12 +2,12 @@ package main
import (
"fmt"
op "openp2p/core"
op2p "openp2p/core"
"time"
)
func main() {
op.Run()
op2p.Run()
echoServer()
forever := make(chan bool)
<-forever
@@ -16,15 +16,15 @@ func main() {
func echoServer() {
// peerID := fmt.Sprintf("%d", core.NodeNameToID(peerNode))
for {
nd := op.GNetwork.ReadNode(time.Second * 10)
nd := op2p.GNetwork.ReadNode(time.Second * 10)
if nd == nil {
fmt.Printf("waiting for node data\n")
// time.Sleep(time.Second * 10)
continue
}
// fmt.Printf("read %s len=%d data=%s\n", nd.Node, len(nd.Data), nd.Data[:16])
nd.Data[0] = 'R' // echo server mark as replied
if err := op.GNetwork.WriteNode(nd.NodeID, nd.Data); err != nil {
nd[0] = 'R' // echo server mark as replied
if err := op2p.GNetwork.WriteNode(0, nd); err != nil {
fmt.Println("write error:", err)
break
}
+209
View File
@@ -0,0 +1,209 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.7.0-rc.1 h1:YojYx61/OLFsiv6Rw1Z96LpldJIy31o+UHmwAUMJ6/U=
github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60=
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
github.com/klauspost/cpuid v1.2.4/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/reedsolomon v1.9.9/go.mod h1:O7yFFHiQwDR6b2t63KPUpccPtNdp5ADgh1gg4fd12wo=
github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY=
github.com/klauspost/reedsolomon v1.11.8/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ5MGv0Qd8a47h6A=
github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104/go.mod h1:wqKykBG2QzQDJEzvRkcS8x6MiSJkF52hXZsXcjaB3ls=
github.com/onsi/ginkgo/v2 v2.2.0 h1:3ZNA3L1c5FYDFTTxbFeVGGD8jYvjYauHD30YgLxVsNI=
github.com/onsi/ginkgo/v2 v2.2.0/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
github.com/openp2p-cn/go-reuseport v0.3.2 h1:TO78WsyJ1F6g7rLp3hpTKOBxtZTU5Lz+Y4Mj+fVUfZc=
github.com/openp2p-cn/go-reuseport v0.3.2/go.mod h1:+EwCusXz50jaYkPNZcCrK4cLoA9tr2jEiJC+bjzpWc8=
github.com/openp2p-cn/service v1.0.0 h1:1++FroLvW4Mc/PStFIAF0mzudVW6E8EAeqWyIESTGZA=
github.com/openp2p-cn/service v1.0.0/go.mod h1:U4VHekhSJldZ332W6bLviB1fipDrS4omY4dHVc/kgts=
github.com/openp2p-cn/totp v0.0.0-20230421034602-0f3320ffb25e h1:QqP3Va/nPj45wq0C8OmGiyZ4HhbTcV6yGuhcYCMgbjg=
github.com/openp2p-cn/totp v0.0.0-20230421034602-0f3320ffb25e/go.mod h1:RYVP3CTIvHD9IwQe2M3zy5iLKNjusRVDz/4gQuKcc/o=
github.com/openp2p-cn/wireguard-go v0.0.20241020 h1:cNgG8o2ctYT9YanqalfMQo+jVju7MrdJFI6WLZZRr7M=
github.com/openp2p-cn/wireguard-go v0.0.20241020/go.mod h1:ka26SCScyLEd+uFrnq6w4n65Sxq1W/xIJfXEXLLvJEc=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/quic-go/qtls-go1-19 v0.3.2 h1:tFxjCFcTQzK+oMxG6Zcvp4Dq8dx4yD3dDiIiyc86Z5U=
github.com/quic-go/qtls-go1-19 v0.3.2/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
github.com/quic-go/qtls-go1-20 v0.2.2 h1:WLOPx6OY/hxtTxKV1Zrq20FtXtDEkeY00CGQm8GEa3E=
github.com/quic-go/qtls-go1-20 v0.2.2/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
github.com/quic-go/quic-go v0.34.0 h1:OvOJ9LFjTySgwOTYUZmNoq0FzVicP8YujpV0kB7m2lU=
github.com/quic-go/quic-go v0.34.0/go.mod h1:+4CVgVppm0FNjpG3UcX8Joi/frKOH7/ciD5yGcwOO1g=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
github.com/templexxx/cpu v0.0.7/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
github.com/templexxx/cpu v0.1.0 h1:wVM+WIJP2nYaxVxqgHPD4wGA2aJ9rvrQRV8CvFzNb40=
github.com/templexxx/cpu v0.1.0/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
github.com/templexxx/xorsimd v0.4.1/go.mod h1:W+ffZz8jJMH2SXwuKu9WhygqBMbFnp14G2fqEr8qaNo=
github.com/templexxx/xorsimd v0.4.2 h1:ocZZ+Nvu65LGHmCLZ7OoCtg8Fx8jnHKK37SjvngUoVI=
github.com/templexxx/xorsimd v0.4.2/go.mod h1:HgwaPoDREdi6OnULpSfxhzaiiSUY4Fi3JPn1wpt28NI=
github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w=
github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho=
github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE=
github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54 h1:8mhqcHPqTMhSPoslhGYihEgSfc77+7La1P6kiB6+9So=
github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/xtaci/kcp-go/v5 v5.5.17 h1:bkdaqtER0PMlP05BBHfu6W+71kt/NwbAk93KH7F78Ck=
github.com/xtaci/kcp-go/v5 v5.5.17/go.mod h1:pVx3jb4LT5edTmPayc77tIU9nRsjGck8wep5ZV/RBO0=
github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae h1:J0GxkO96kL4WF+AIT3M4mfUVinOCPgf2uUWYFUzN0sM=
github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
golang.org/x/arch v0.0.0-20190909030613-46d78d1859ac/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200808120158-1030fc2bf1d9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200425043458-8463f397d07c/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200808161706-5bf02b21f123/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173 h1:/jFs0duh4rdb8uIfPMv78iAJGcPKDeqAFnaLBropIC4=
golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173/go.mod h1:tkCQ4FQXmpAgYVh++1cq16/dH4QJtmvpRv19DWGAHSA=
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gvisor.dev/gvisor v0.0.0-20241128011400-745828301c93 h1:QyA/pFgC67EZ5+0oRfiNFhfEGd3NqZM1A2HQEuPKC3c=
gvisor.dev/gvisor v0.0.0-20241128011400-745828301c93/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=