mirror of
https://github.com/kubenetworks/kubevpn.git
synced 2026-04-22 23:17:23 +08:00
237 lines
8.1 KiB
Go
237 lines
8.1 KiB
Go
package cmds
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
"github.com/spf13/cobra"
|
|
"google.golang.org/grpc"
|
|
"google.golang.org/grpc/codes"
|
|
"google.golang.org/grpc/status"
|
|
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
|
"k8s.io/kubectl/pkg/util/i18n"
|
|
"k8s.io/kubectl/pkg/util/templates"
|
|
"k8s.io/utils/ptr"
|
|
|
|
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
|
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
|
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
|
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
|
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
|
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
|
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
|
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
|
|
)
|
|
|
|
func CmdConnect(f cmdutil.Factory) *cobra.Command {
|
|
var extraRoute = &handler.ExtraRouteInfo{}
|
|
var sshConf = &pkgssh.SshConfig{}
|
|
var transferImage, foreground bool
|
|
var enableSocks bool
|
|
var socksListen string
|
|
var imagePullSecretName string
|
|
var managerNamespace string
|
|
cmd := &cobra.Command{
|
|
Use: "connect",
|
|
Short: i18n.T("Connect to kubernetes cluster network"),
|
|
Long: templates.LongDesc(i18n.T(`
|
|
Connect to kubernetes cluster network
|
|
|
|
Upon establishing a connection to the Kubernetes cluster network, you can directly access Pod IPs and Service IPs from your local machine.
|
|
This includes capabilities such as ping Pod IPs or curl Service IPs.
|
|
Full Kubernetes DNS resolution is supported, enabling access via standard naming conventions,
|
|
This allows you to run applications locally while seamlessly connecting to all resources within the Kubernetes cluster, as if operating inside the cluster itself.
|
|
`)),
|
|
Example: templates.Examples(i18n.T(`
|
|
# Connect to k8s cluster network
|
|
kubevpn connect
|
|
|
|
# Connect and start a managed local SOCKS5 proxy for nested VPN cases
|
|
kubevpn connect --socks
|
|
curl --proxy socks5h://127.0.0.1:1080 http://productpage.default.svc.cluster.local:9080
|
|
|
|
# Connect to api-server behind of bastion host or ssh jump host
|
|
kubevpn connect --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
|
|
|
# It also supports ProxyJump, like
|
|
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
|
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
|
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
|
kubevpn connect --ssh-alias <alias>
|
|
|
|
# Support ssh auth GSSAPI
|
|
kubevpn connect --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
|
kubevpn connect --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
|
kubevpn connect --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
|
|
|
# Support ssh jump inline
|
|
kubevpn connect --ssh-jump "--ssh-addr jump.naison.org --ssh-username naison --gssapi-password xxx" --ssh-username root --ssh-addr 127.0.0.1:22 --ssh-keyfile ~/.ssh/dst.pem
|
|
`)),
|
|
PreRunE: func(cmd *cobra.Command, args []string) error {
|
|
plog.InitLoggerForClient()
|
|
// startup daemon process and sudo process
|
|
err := daemon.StartupDaemon(cmd.Context())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if transferImage {
|
|
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
|
|
}
|
|
return err
|
|
},
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if !sshConf.IsEmpty() && !sshConf.IsLoopback() {
|
|
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
|
|
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
|
|
}
|
|
}
|
|
req := &rpc.ConnectRequest{
|
|
KubeconfigBytes: string(bytes),
|
|
Namespace: ns,
|
|
ExtraRoute: extraRoute.ToRPC(),
|
|
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
|
|
|
SshJump: sshConf.ToRPC(),
|
|
TransferImage: transferImage,
|
|
Image: config.Image,
|
|
ImagePullSecretName: imagePullSecretName,
|
|
Level: int32(util.If(config.Debug, log.DebugLevel, log.InfoLevel)),
|
|
ManagerNamespace: managerNamespace,
|
|
}
|
|
// if is foreground, send to sudo daemon server
|
|
cli, err := daemon.GetClient(false)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
var resp grpc.BidiStreamingClient[rpc.ConnectRequest, rpc.ConnectResponse]
|
|
resp, err = cli.Connect(context.Background())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = resp.Send(req)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
connectionID, err := printConnectGRPCStream(cmd.Context(), resp, os.Stdout)
|
|
if err != nil {
|
|
if status.Code(err) == codes.Canceled {
|
|
return nil
|
|
}
|
|
return err
|
|
}
|
|
if enableSocks {
|
|
if connectionID == "" {
|
|
return fmt.Errorf("connected but missing connection ID for socks proxy startup")
|
|
}
|
|
if err := startManagedSocksProxy(connectionID, bytes, ns, socksListen); err != nil {
|
|
return err
|
|
}
|
|
printManagedSocksStarted(os.Stdout, connectionID, socksListen)
|
|
}
|
|
if !foreground {
|
|
_, _ = fmt.Fprintln(os.Stdout, config.Slogan)
|
|
} else {
|
|
<-cmd.Context().Done()
|
|
err = disconnect(cli, bytes, ns, sshConf)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, _ = fmt.Fprint(os.Stdout, "Disconnect completed")
|
|
}
|
|
return nil
|
|
},
|
|
}
|
|
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName)
|
|
cmd.Flags().BoolVar(&foreground, "foreground", false, "Hang up")
|
|
cmd.Flags().BoolVar(&enableSocks, "socks", false, "Start a managed local SOCKS5 proxy after connect for nested VPN or route-conflict cases")
|
|
cmd.Flags().StringVar(&socksListen, "socks-listen", "127.0.0.1:1080", "Listen address for the managed local SOCKS5 proxy")
|
|
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
|
|
|
|
handler.AddExtraRoute(cmd.Flags(), extraRoute)
|
|
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
|
return cmd
|
|
}
|
|
|
|
func printConnectGRPCStream(ctx context.Context, clientStream grpc.ClientStream, out io.Writer) (string, error) {
|
|
go func() {
|
|
if ctx != nil {
|
|
<-ctx.Done()
|
|
_ = clientStream.SendMsg(&rpc.Cancel{})
|
|
}
|
|
}()
|
|
|
|
var connectionID string
|
|
for {
|
|
resp := new(rpc.ConnectResponse)
|
|
err := clientStream.RecvMsg(resp)
|
|
if errors.Is(err, io.EOF) {
|
|
return connectionID, nil
|
|
}
|
|
if err != nil {
|
|
return connectionID, err
|
|
}
|
|
if resp.ConnectionID != "" {
|
|
connectionID = resp.ConnectionID
|
|
}
|
|
if out != nil && resp.GetMessage() != "" {
|
|
_, _ = fmt.Fprint(out, resp.GetMessage())
|
|
}
|
|
}
|
|
}
|
|
|
|
func disconnect(cli rpc.DaemonClient, bytes []byte, ns string, sshConf *pkgssh.SshConfig) error {
|
|
connectionID, _ := connectionIDFromKubeconfigBytes(bytes, ns)
|
|
resp, err := cli.Disconnect(context.Background())
|
|
if err != nil {
|
|
plog.G(context.Background()).Errorf("Disconnect error: %v", err)
|
|
return err
|
|
}
|
|
err = resp.Send(&rpc.DisconnectRequest{
|
|
KubeconfigBytes: ptr.To(string(bytes)),
|
|
Namespace: ptr.To(ns),
|
|
SshJump: sshConf.ToRPC(),
|
|
})
|
|
if err != nil {
|
|
plog.G(context.Background()).Errorf("Disconnect error: %v", err)
|
|
return err
|
|
}
|
|
err = util.PrintGRPCStream[rpc.DisconnectResponse](nil, resp)
|
|
if err != nil {
|
|
if status.Code(err) == codes.Canceled {
|
|
return nil
|
|
}
|
|
return err
|
|
}
|
|
if connectionID != "" {
|
|
_ = stopManagedProxy(connectionID)
|
|
printManagedSocksStopped(os.Stdout, connectionID)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func connectionIDFromKubeconfigBytes(bytes []byte, ns string) (string, error) {
|
|
file, err := util.ConvertToTempKubeconfigFile(bytes, "")
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
defer os.Remove(file)
|
|
|
|
factory := util.InitFactoryByPath(file, ns)
|
|
connect := &handler.ConnectOptions{}
|
|
if err := connect.InitClient(factory); err != nil {
|
|
return "", err
|
|
}
|
|
return util.GetConnectionID(context.Background(), connect.GetClientset().CoreV1().Namespaces(), connect.Namespace)
|
|
}
|