mirror of
https://github.com/gravitl/netmaker.git
synced 2026-04-22 16:07:11 +08:00
292af315dd
* feat(go): add user schema; * feat(go): migrate to user schema; * feat(go): add audit fields; * feat(go): remove unused fields from the network model; * feat(go): add network schema; * feat(go): migrate to network schema; * refactor(go): add comment to clarify migration logic; * fix(go): test failures; * fix(go): test failures; * feat(go): change membership table to store memberships at all scopes; * feat(go): add schema for access grants; * feat(go): remove nameservers from new networks table; ensure db passed for schema functions; * feat(go): set max conns for sqlite to 1; * fix(go): issues updating user account status; * NM-236: streamline operations in HA mode * NM-236: only master pod should subscribe to updates from clients * refactor(go): remove converters and access grants; * refactor(go): add json tags in schema models; * refactor(go): rename file to migrate_v1_6_0.go; * refactor(go): add user groups and user roles tables; use schema tables; * refactor(go): inline get and list from schema package; * refactor(go): inline get network and list users from schema package; * fix(go): staticcheck issues; * fix(go): remove test not in use; fix test case; * fix(go): validate network; * fix(go): resolve static checks; * fix(go): new models errors; * fix(go): test errors; * fix(go): handle no records; * fix(go): add validations for user object; * fix(go): set correct extclient status; * fix(go): test error; * feat(go): make schema the base package; * feat(go): add host schema; * feat(go): use schema host everywhere; * feat(go): inline get host, list hosts and delete host; * feat(go): use non-ptr value; * feat(go): use save to upsert all fields; * feat(go): use save to upsert all fields; * feat(go): save turn endpoint as string; * feat(go): check for gorm error record not found; * fix(go): test failures; * fix(go): update all network fields; * fix(go): update all network fields; * feat(go): add paginated list networks api; * feat(go): add paginated list users api; * feat(go): add paginated list hosts api; * feat(go): add pagination to list groups api; * fix(go): comment; * fix(go): implement marshal and unmarshal text for custom types; * fix(go): implement marshal and unmarshal json for custom types; * fix(go): just use the old model for unmarshalling; * fix(go): implement marshal and unmarshal json for custom types; * NM-271:Import swap: compress/gzip replaced with github.com/klauspost/compress/gzip (2-4x faster, wire-compatible output). Added sync import. Two sync.Pool variables (gzipWriterPool, bufferPool): reuse gzip.Writer and bytes.Buffer across calls instead of allocating fresh ones per publish. compressPayload rewritten: pulls writer + buffer from pools, resets them, compresses at gzip.BestSpeed (level 1), copies the result out of the pooled buffer, and returns both objects to the pools. * feat(go): remove paginated list networks api; * feat(go): use custom paginated response object; * NM-271: Improve server scalability under high host count - Replace stdlib compress/gzip with klauspost/compress at BestSpeed and pool gzip writers and buffers via sync.Pool to eliminate compression as the dominant CPU hotspot. - Debounce peer update broadcasts with a 500ms resettable window capped at 3s max-wait, coalescing rapid-fire PublishPeerUpdate calls into a single broadcast cycle. - Cache HostPeerInfo (batch-refreshed by debounce worker) and HostPeerUpdate (stored as side-effect of each publish) so the pull API and peer_info API serve from pre-computed maps instead of triggering expensive per-host computations under thundering herd conditions. - Warm both caches synchronously at startup before the first publish cycle so early pull requests are served instantly. - Bound concurrent MQTT publishes to 5 via semaphore to prevent broker TCP buffer overflows that caused broken pipe disconnects. - Remove manual Disconnect+SetupMQTT from ConnectionLostHandler and rely on the paho client's built-in AutoReconnect; add a 5s retry wait in publish() to ride out brief reconnection windows. * NM-271: Reduce server CPU contention under high concurrent load - Cache ServerSettings with atomic.Value to eliminate repeated DB reads on every pull request (was 32+ goroutines blocked on read lock) - Batch UpdateNodeCheckin writes in memory, flush every 30s to reduce per-checkin write lock contention (was 88+ goroutines blocked) - Enable SQLite WAL mode + busy_timeout and remove global dbMutex; let SQLite handle concurrency natively (reads no longer block writes) - Move ResetFailedOverPeer/ResetAutoRelayedPeer to async in pull() handler since results don't affect the cached response - Skip no-op UpsertNode writes in failover/relay reset functions (early return when node has no failover/relay state) - Remove CheckHostPorts from hostUpdateFallback hot path - Switch to pure-Go SQLite driver (glebarez/sqlite), set CGO_ENABLED=0 * fix(go): ensure default values for page and per_page are used when not passed; * fix(go): rename v1.6.0 to v1.5.1; * fix(go): check for gorm.ErrRecordNotFound instead of database.IsEmptyRecord; * fix(go): use host id, not pending host id; * NM-271: Revert pure-Go SQLite and FIPS disable to verify impact Revert to CGO-based mattn/go-sqlite3 driver and re-enable FIPS to isolate whether these changes are still needed now that the global dbMutex has been removed and WAL mode is enabled. Keep WAL mode pragma with mattn-compatible DSN format. * feat(go): add filters to paginated apis; * feat(go): add filters to paginated apis; * feat(go): remove check for max username length; * feat(go): add filters to count as well; * feat(go): use library to check email address validity; * feat(go): ignore pagination if params not passed; * fix(go): pagination issues; * fix(go): check exists before using; * fix(go): remove debug log; * NM-271: rm debug logs * NM-271: check if caching is enabled * NM-271: add server sync mq topic for HA mode * NM-271: fix build * NM-271: push metrics in batch to exproter over api * NM-271: use basic auth for exporter metrics api * fix(go): use gorm err record not found; * NM-271: Add monitoring stack on demand * NM-271: -m arg for install script should only add monitoring stack * fix(go): use gorm err record not found; * NM-271: update docker compose file for prometheus * NM-271: update docker compose file for prometheus * fix(go): use user principal name when creating pending user; * fix(go): use schema package for consts; * NM-236: rm duplicate network hook * NM-271: add server topic to reset idp hooks on master node * fix(go): prevent disabling superadmin user; Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * fix(go): swap is admin and is superadmin; Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * fix(go): remove dead code block; https://github.com/gravitl/netmaker/pull/3910#discussion_r2928837937 * fix(go): incorrect message when trying to disable self; https://github.com/gravitl/netmaker/pull/3910#discussion_r2928837934 * NM-271: fix stale peers on reset_failovered pull and add HTTP timeout to metrics exporter Run the failover/relay reset synchronously in the pull handler so the response reflects post-reset topology instead of serving stale cached peers. Add a 30s timeout to the metrics exporter HTTP client to prevent PushAllMetricsToExporter from blocking the Keepalive loop. * NM-271: fix gzip pool corruption, MQTT topic mismatch, stale settings cache, and reduce redundant DB fetches - Only return gzip.Writer to pool after successful Close to prevent silently malformed MQTT payloads from a previously errored writer. - Fix serversync subscription to exact topic match since syncType is now in the message payload, not the topic path. - Prevent zero-value ServerSettings from being cached indefinitely when the DB record is missing or unmarshal fails on startup. - Return fetched hosts/nodes from RefreshHostPeerInfoCache so warmPeerCaches reuses them instead of querying the DB twice. - Compute fresh HostPeerUpdate on reset_failovered pull instead of serving stale cache, and store result back for subsequent requests. * NM-271: fix gzip writer pool leak, log checkin flush errors, and fix master pod ordinal parsing - Reset gzip.Writer to io.Discard before returning to pool so errored writers are never leaked or silently reused with corrupt state. - Track and log failed DB inserts in FlushNodeCheckins so operators have visibility when check-in timestamps are lost. - Parse StatefulSet pod ordinal as integer instead of using HasSuffix to prevent netmaker-10 from being misidentified as master pod. * NM-271: simplify masterpod logic * fix(go): use correct header; Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * fix(go): return after error response; Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * fix(go): use correct order of params; https://github.com/gravitl/netmaker/pull/3910#discussion_r2929593036 * fix(go): set default values for page and page size; use v2 instead of /list; * NM-271: use host name * Update mq/serversync.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * NM-271: fix duplicate serversynce case * NM-271: streamline gw updates * Update logic/auth.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * Update schema/user_roles.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * fix(go): syntax error; * fix(go): set default values when page and per_page are not passed or 0; * fix(go): use uuid.parse instead of uuid.must parse; * fix(go): review errors; * fix(go): review errors; * Update controllers/user.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * Update controllers/user.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * NM-163: fix errors: * Update db/types/options.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * fix(go): persist return user in event; * Update db/types/options.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * NM-271: signal pull on ip changes * NM-163: duplicate lines of code * NM-163: fix(go): fix missing return and filter parsing in user controller - Add missing return after error response in updateUserAccountStatus to prevent double-response and spurious ext-client side-effects - Use switch statements in listUsers to skip unrecognized account_status and mfa_status filter values * NM-271: signal pull req on node ip change * fix(go): check for both min and max page size; * NM-271: refresh node object before update * fix(go): enclose transfer superadmin in transaction; * fix(go): review errors; * fix(go): remove free tier checks; * fix(go): review fixes; * NM-271: streamline ip pool ops * NM-271: fix tests, set max idle conns * NM-271: fix(go): fix data races in settings cache and peer update worker - Use pointer type in atomic.Value for serverSettingsCache to avoid replacing the variable non-atomically in InvalidateServerSettingsCache - Swap peerUpdateReplace flag before draining the channel to prevent a concurrent replacePeers=true from being consumed by the wrong cycle --------- Co-authored-by: VishalDalwadi <dalwadivishal26@gmail.com> Co-authored-by: Vishal Dalwadi <51291657+VishalDalwadi@users.noreply.github.com> Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com>
809 lines
22 KiB
Go
809 lines
22 KiB
Go
package logic
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"net"
|
|
"sort"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/c-robinson/iplib"
|
|
"github.com/google/uuid"
|
|
"github.com/gravitl/netmaker/database"
|
|
"github.com/gravitl/netmaker/db"
|
|
"github.com/gravitl/netmaker/logger"
|
|
"github.com/gravitl/netmaker/logic/acls/nodeacls"
|
|
"github.com/gravitl/netmaker/models"
|
|
"github.com/gravitl/netmaker/schema"
|
|
"github.com/gravitl/netmaker/servercfg"
|
|
"golang.org/x/exp/slog"
|
|
"gorm.io/gorm"
|
|
)
|
|
|
|
var (
|
|
networkCacheMutex = &sync.RWMutex{}
|
|
allocatedIpMap = make(map[string]map[string]net.IP)
|
|
)
|
|
|
|
// SetAllocatedIpMap - set allocated ip map for networks
|
|
func SetAllocatedIpMap() error {
|
|
if !servercfg.CacheEnabled() {
|
|
return nil
|
|
}
|
|
logger.Log(0, "start setting up allocated ip map")
|
|
if allocatedIpMap == nil {
|
|
allocatedIpMap = map[string]map[string]net.IP{}
|
|
}
|
|
|
|
currentNetworks, err := (&schema.Network{}).ListAll(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, v := range currentNetworks {
|
|
pMap := map[string]net.IP{}
|
|
netName := v.Name
|
|
|
|
//nodes
|
|
nodes, err := GetNetworkNodes(netName)
|
|
if err != nil {
|
|
slog.Error("could not load node for network", netName, "error", err.Error())
|
|
} else {
|
|
for _, n := range nodes {
|
|
|
|
if n.Address.IP != nil {
|
|
pMap[n.Address.IP.String()] = n.Address.IP
|
|
}
|
|
if n.Address6.IP != nil {
|
|
pMap[n.Address6.IP.String()] = n.Address6.IP
|
|
}
|
|
}
|
|
|
|
}
|
|
|
|
//extClients
|
|
extClients, err := GetNetworkExtClients(netName)
|
|
if err != nil {
|
|
slog.Error("could not load extClient for network", netName, "error", err.Error())
|
|
} else {
|
|
for _, extClient := range extClients {
|
|
if extClient.Address != "" {
|
|
pMap[extClient.Address] = net.ParseIP(extClient.Address)
|
|
}
|
|
if extClient.Address6 != "" {
|
|
pMap[extClient.Address6] = net.ParseIP(extClient.Address6)
|
|
}
|
|
}
|
|
}
|
|
|
|
allocatedIpMap[netName] = pMap
|
|
}
|
|
logger.Log(0, "setting up allocated ip map done")
|
|
return nil
|
|
}
|
|
|
|
// ClearAllocatedIpMap - set allocatedIpMap to nil
|
|
func ClearAllocatedIpMap() {
|
|
if !servercfg.CacheEnabled() {
|
|
return
|
|
}
|
|
allocatedIpMap = nil
|
|
}
|
|
|
|
func AddIpToAllocatedIpMap(networkName string, ip net.IP) {
|
|
if !servercfg.CacheEnabled() {
|
|
return
|
|
}
|
|
networkCacheMutex.Lock()
|
|
if m, ok := allocatedIpMap[networkName]; ok {
|
|
m[ip.String()] = ip
|
|
}
|
|
networkCacheMutex.Unlock()
|
|
}
|
|
|
|
func RemoveIpFromAllocatedIpMap(networkName string, ip string) {
|
|
if !servercfg.CacheEnabled() {
|
|
return
|
|
}
|
|
networkCacheMutex.Lock()
|
|
if m, ok := allocatedIpMap[networkName]; ok {
|
|
delete(m, ip)
|
|
}
|
|
networkCacheMutex.Unlock()
|
|
}
|
|
|
|
// AddNetworkToAllocatedIpMap - add network to allocated ip map when network is added
|
|
func AddNetworkToAllocatedIpMap(networkName string) {
|
|
//add new network to allocated ip map
|
|
if !servercfg.CacheEnabled() {
|
|
return
|
|
}
|
|
networkCacheMutex.Lock()
|
|
allocatedIpMap[networkName] = make(map[string]net.IP)
|
|
networkCacheMutex.Unlock()
|
|
}
|
|
|
|
// RemoveNetworkFromAllocatedIpMap - remove network from allocated ip map when network is deleted
|
|
func RemoveNetworkFromAllocatedIpMap(networkName string) {
|
|
if !servercfg.CacheEnabled() {
|
|
return
|
|
}
|
|
networkCacheMutex.Lock()
|
|
delete(allocatedIpMap, networkName)
|
|
networkCacheMutex.Unlock()
|
|
}
|
|
|
|
// DeleteNetwork - deletes a network
|
|
func DeleteNetwork(network string, force bool, done chan struct{}) error {
|
|
|
|
nodeCount, err := GetNetworkNonServerNodeCount(network)
|
|
if nodeCount == 0 || database.IsEmptyRecord(err) {
|
|
_network := &schema.Network{
|
|
Name: network,
|
|
}
|
|
// delete server nodes first then db records
|
|
return _network.Delete(db.WithContext(context.TODO()))
|
|
}
|
|
|
|
// Remove All Nodes
|
|
go func() {
|
|
nodes, err := GetNetworkNodes(network)
|
|
if err == nil {
|
|
for _, node := range nodes {
|
|
node := node
|
|
host := &schema.Host{ID: node.HostID}
|
|
if err := host.Get(db.WithContext(context.TODO())); err != nil {
|
|
continue
|
|
}
|
|
if node.IsGw {
|
|
// delete ext clients belonging to gateway
|
|
DeleteGatewayExtClients(node.ID.String(), node.Network)
|
|
}
|
|
DissasociateNodeFromHost(&node, host)
|
|
}
|
|
}
|
|
// remove ACL for network
|
|
err = nodeacls.DeleteACLContainer(nodeacls.NetworkID(network))
|
|
if err != nil {
|
|
logger.Log(1, "failed to remove the node acls during network delete for network,", network)
|
|
}
|
|
// delete server nodes first then db records
|
|
_network := &schema.Network{
|
|
Name: network,
|
|
}
|
|
err = _network.Delete(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
return
|
|
}
|
|
done <- struct{}{}
|
|
close(done)
|
|
}()
|
|
|
|
// Delete default network enrollment key
|
|
keys, _ := GetAllEnrollmentKeys()
|
|
for _, key := range keys {
|
|
if key.Tags[0] == network {
|
|
if key.Default {
|
|
DeleteEnrollmentKey(key.Value, true)
|
|
break
|
|
}
|
|
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// AssignVirtualNATDefaults determines safe defaults based on VPN CIDR
|
|
func AssignVirtualNATDefaults(network *schema.Network, vpnCIDR string) {
|
|
const (
|
|
cgnatCIDR = "100.64.0.0/10"
|
|
fallbackIPv4Pool = "198.18.0.0/15"
|
|
|
|
defaultIPv4SitePrefix = 24
|
|
)
|
|
|
|
// Parse CGNAT CIDR (should always succeed, but check for safety)
|
|
_, cgnatNet, err := net.ParseCIDR(cgnatCIDR)
|
|
if err != nil {
|
|
// Fallback to default pool if CGNAT parsing fails (shouldn't happen)
|
|
network.VirtualNATPoolIPv4 = fallbackIPv4Pool
|
|
network.VirtualNATSitePrefixLenIPv4 = defaultIPv4SitePrefix
|
|
return
|
|
}
|
|
|
|
var virtualIPv4Pool string
|
|
// Parse VPN CIDR - if it fails or is empty, use fallback
|
|
if vpnCIDR == "" {
|
|
virtualIPv4Pool = fallbackIPv4Pool
|
|
} else {
|
|
_, vpnNet, err := net.ParseCIDR(vpnCIDR)
|
|
if err != nil || vpnNet == nil {
|
|
// Invalid VPN CIDR, use fallback
|
|
virtualIPv4Pool = fallbackIPv4Pool
|
|
} else if !cidrOverlaps(vpnNet, cgnatNet) {
|
|
// Safe to reuse VPN CIDR for Virtual NAT
|
|
virtualIPv4Pool = vpnCIDR
|
|
} else {
|
|
// VPN is CGNAT — must not reuse
|
|
virtualIPv4Pool = fallbackIPv4Pool
|
|
}
|
|
}
|
|
|
|
network.VirtualNATPoolIPv4 = virtualIPv4Pool
|
|
network.VirtualNATSitePrefixLenIPv4 = defaultIPv4SitePrefix
|
|
}
|
|
|
|
// cidrOverlaps checks if two CIDR blocks overlap
|
|
func cidrOverlaps(a, b *net.IPNet) bool {
|
|
return a.Contains(b.IP) || b.Contains(a.IP)
|
|
}
|
|
|
|
// CreateNetwork - creates a network in database
|
|
func CreateNetwork(_network *schema.Network) error {
|
|
if _network.AddressRange != "" {
|
|
normalizedRange, err := NormalizeCIDR(_network.AddressRange)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_network.AddressRange = normalizedRange
|
|
}
|
|
if _network.AddressRange6 != "" {
|
|
normalizedRange, err := NormalizeCIDR(_network.AddressRange6)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_network.AddressRange6 = normalizedRange
|
|
}
|
|
if !IsNetworkCIDRUnique(GetNetworkNetworkCIDR4(_network), GetNetworkNetworkCIDR6(_network)) {
|
|
return errors.New("network cidr already in use")
|
|
}
|
|
|
|
_network.NodesUpdatedAt = time.Now().UTC()
|
|
|
|
err := ValidateNetwork(_network, false)
|
|
if err != nil {
|
|
//logic.ReturnErrorResponse(w, r, logic.FormatError(err, "badrequest"))
|
|
return err
|
|
}
|
|
|
|
err = _network.Create(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
_, _ = CreateEnrollmentKey(
|
|
0,
|
|
time.Time{},
|
|
[]string{_network.Name},
|
|
[]string{_network.Name},
|
|
[]models.TagID{},
|
|
true,
|
|
uuid.Nil,
|
|
true,
|
|
false,
|
|
false,
|
|
)
|
|
|
|
return nil
|
|
}
|
|
|
|
func GetNetworkNetworkCIDR4(network *schema.Network) *net.IPNet {
|
|
if network.AddressRange == "" {
|
|
return nil
|
|
}
|
|
_, netCidr, _ := net.ParseCIDR(network.AddressRange)
|
|
return netCidr
|
|
}
|
|
func GetNetworkNetworkCIDR6(network *schema.Network) *net.IPNet {
|
|
if network.AddressRange6 == "" {
|
|
return nil
|
|
}
|
|
_, netCidr, _ := net.ParseCIDR(network.AddressRange6)
|
|
return netCidr
|
|
}
|
|
|
|
// GetNetworkNonServerNodeCount - get number of network non server nodes
|
|
func GetNetworkNonServerNodeCount(networkName string) (int, error) {
|
|
nodes, err := GetNetworkNodes(networkName)
|
|
return len(nodes), err
|
|
}
|
|
|
|
func IsNetworkCIDRUnique(cidr4 *net.IPNet, cidr6 *net.IPNet) bool {
|
|
networks, err := (&schema.Network{}).ListAll(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
return errors.Is(err, gorm.ErrRecordNotFound)
|
|
}
|
|
for _, network := range networks {
|
|
if intersect(GetNetworkNetworkCIDR4(&network), cidr4) ||
|
|
intersect(GetNetworkNetworkCIDR6(&network), cidr6) {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
}
|
|
|
|
func intersect(n1, n2 *net.IPNet) bool {
|
|
if n1 == nil || n2 == nil {
|
|
return false
|
|
}
|
|
return n2.Contains(n1.IP) || n1.Contains(n2.IP)
|
|
}
|
|
|
|
// UniqueAddress - get a unique ipv4 address
|
|
func UniqueAddressCache(networkName string, reverse bool) (net.IP, error) {
|
|
add := net.IP{}
|
|
network := &schema.Network{Name: networkName}
|
|
err := network.Get(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
logger.Log(0, "UniqueAddressServer encountered an error")
|
|
return add, err
|
|
}
|
|
|
|
if network.AddressRange == "" {
|
|
return add, fmt.Errorf("IPv4 not active on network %s", networkName)
|
|
}
|
|
//ensure AddressRange is valid
|
|
if _, _, err := net.ParseCIDR(network.AddressRange); err != nil {
|
|
logger.Log(0, "UniqueAddress encountered an error")
|
|
return add, err
|
|
}
|
|
net4 := iplib.Net4FromStr(network.AddressRange)
|
|
newAddrs := net4.FirstAddress()
|
|
|
|
if reverse {
|
|
newAddrs = net4.LastAddress()
|
|
}
|
|
|
|
networkCacheMutex.RLock()
|
|
ipAllocated := allocatedIpMap[networkName]
|
|
for {
|
|
if _, ok := ipAllocated[newAddrs.String()]; !ok {
|
|
networkCacheMutex.RUnlock()
|
|
return newAddrs, nil
|
|
}
|
|
if reverse {
|
|
newAddrs, err = net4.PreviousIP(newAddrs)
|
|
} else {
|
|
newAddrs, err = net4.NextIP(newAddrs)
|
|
}
|
|
if err != nil {
|
|
break
|
|
}
|
|
}
|
|
networkCacheMutex.RUnlock()
|
|
|
|
return add, errors.New("ERROR: No unique addresses available. Check network subnet")
|
|
}
|
|
|
|
// UniqueAddress - get a unique ipv4 address
|
|
func UniqueAddressDB(networkName string, reverse bool) (net.IP, error) {
|
|
add := net.IP{}
|
|
network := &schema.Network{Name: networkName}
|
|
err := network.Get(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
logger.Log(0, "UniqueAddressServer encountered an error")
|
|
return add, err
|
|
}
|
|
|
|
if network.AddressRange == "" {
|
|
return add, fmt.Errorf("IPv4 not active on network %s", networkName)
|
|
}
|
|
//ensure AddressRange is valid
|
|
if _, _, err := net.ParseCIDR(network.AddressRange); err != nil {
|
|
logger.Log(0, "UniqueAddress encountered an error")
|
|
return add, err
|
|
}
|
|
net4 := iplib.Net4FromStr(network.AddressRange)
|
|
newAddrs := net4.FirstAddress()
|
|
|
|
if reverse {
|
|
newAddrs = net4.LastAddress()
|
|
}
|
|
|
|
for {
|
|
if IsIPUnique(networkName, newAddrs.String(), database.NODES_TABLE_NAME, false) &&
|
|
IsIPUnique(networkName, newAddrs.String(), database.EXT_CLIENT_TABLE_NAME, false) {
|
|
return newAddrs, nil
|
|
}
|
|
if reverse {
|
|
newAddrs, err = net4.PreviousIP(newAddrs)
|
|
} else {
|
|
newAddrs, err = net4.NextIP(newAddrs)
|
|
}
|
|
if err != nil {
|
|
break
|
|
}
|
|
}
|
|
|
|
return add, errors.New("ERROR: No unique addresses available. Check network subnet")
|
|
}
|
|
|
|
// IsIPUnique - checks if an IP is unique
|
|
func IsIPUnique(network string, ip string, tableName string, isIpv6 bool) bool {
|
|
|
|
isunique := true
|
|
if tableName == database.NODES_TABLE_NAME {
|
|
nodes, err := GetNetworkNodes(network)
|
|
if err != nil {
|
|
return isunique
|
|
}
|
|
for _, node := range nodes {
|
|
if isIpv6 {
|
|
if node.Address6.IP.String() == ip && node.Network == network {
|
|
return false
|
|
}
|
|
} else {
|
|
if node.Address.IP.String() == ip && node.Network == network {
|
|
return false
|
|
}
|
|
}
|
|
}
|
|
|
|
} else if tableName == database.EXT_CLIENT_TABLE_NAME {
|
|
|
|
extClients, err := GetNetworkExtClients(network)
|
|
if err != nil {
|
|
return isunique
|
|
}
|
|
for _, extClient := range extClients { // filter
|
|
if isIpv6 {
|
|
if (extClient.Address6 == ip) && extClient.Network == network {
|
|
return false
|
|
}
|
|
|
|
} else {
|
|
if (extClient.Address == ip) && extClient.Network == network {
|
|
return false
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return isunique
|
|
}
|
|
func UniqueAddress(networkName string, reverse bool) (net.IP, error) {
|
|
if servercfg.CacheEnabled() {
|
|
return UniqueAddressCache(networkName, reverse)
|
|
}
|
|
return UniqueAddressDB(networkName, reverse)
|
|
}
|
|
|
|
func UniqueAddress6(networkName string, reverse bool) (net.IP, error) {
|
|
if servercfg.CacheEnabled() {
|
|
return UniqueAddress6Cache(networkName, reverse)
|
|
}
|
|
return UniqueAddress6DB(networkName, reverse)
|
|
}
|
|
|
|
// UniqueAddress6DB - see if ipv6 address is unique
|
|
func UniqueAddress6DB(networkName string, reverse bool) (net.IP, error) {
|
|
add := net.IP{}
|
|
network := &schema.Network{Name: networkName}
|
|
err := network.Get(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
return add, err
|
|
}
|
|
if network.AddressRange6 == "" {
|
|
return add, fmt.Errorf("IPv6 not active on network %s", networkName)
|
|
}
|
|
|
|
//ensure AddressRange is valid
|
|
if _, _, err := net.ParseCIDR(network.AddressRange6); err != nil {
|
|
return add, err
|
|
}
|
|
net6 := iplib.Net6FromStr(network.AddressRange6)
|
|
|
|
newAddrs, err := net6.NextIP(net6.FirstAddress())
|
|
if reverse {
|
|
newAddrs, err = net6.PreviousIP(net6.LastAddress())
|
|
}
|
|
if err != nil {
|
|
return add, err
|
|
}
|
|
|
|
for {
|
|
if IsIPUnique(networkName, newAddrs.String(), database.NODES_TABLE_NAME, true) &&
|
|
IsIPUnique(networkName, newAddrs.String(), database.EXT_CLIENT_TABLE_NAME, true) {
|
|
return newAddrs, nil
|
|
}
|
|
if reverse {
|
|
newAddrs, err = net6.PreviousIP(newAddrs)
|
|
} else {
|
|
newAddrs, err = net6.NextIP(newAddrs)
|
|
}
|
|
if err != nil {
|
|
break
|
|
}
|
|
}
|
|
|
|
return add, errors.New("ERROR: No unique IPv6 addresses available. Check network subnet")
|
|
}
|
|
|
|
// UniqueAddress6Cache - see if ipv6 address is unique using cache
|
|
func UniqueAddress6Cache(networkName string, reverse bool) (net.IP, error) {
|
|
add := net.IP{}
|
|
network := &schema.Network{Name: networkName}
|
|
err := network.Get(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
return add, err
|
|
}
|
|
if network.AddressRange6 == "" {
|
|
return add, fmt.Errorf("IPv6 not active on network %s", networkName)
|
|
}
|
|
|
|
//ensure AddressRange is valid
|
|
if _, _, err := net.ParseCIDR(network.AddressRange6); err != nil {
|
|
return add, err
|
|
}
|
|
net6 := iplib.Net6FromStr(network.AddressRange6)
|
|
|
|
newAddrs, err := net6.NextIP(net6.FirstAddress())
|
|
if reverse {
|
|
newAddrs, err = net6.PreviousIP(net6.LastAddress())
|
|
}
|
|
if err != nil {
|
|
return add, err
|
|
}
|
|
|
|
networkCacheMutex.RLock()
|
|
ipAllocated := allocatedIpMap[networkName]
|
|
for {
|
|
if _, ok := ipAllocated[newAddrs.String()]; !ok {
|
|
networkCacheMutex.RUnlock()
|
|
return newAddrs, nil
|
|
}
|
|
if reverse {
|
|
newAddrs, err = net6.PreviousIP(newAddrs)
|
|
} else {
|
|
newAddrs, err = net6.NextIP(newAddrs)
|
|
}
|
|
if err != nil {
|
|
break
|
|
}
|
|
}
|
|
networkCacheMutex.RUnlock()
|
|
|
|
return add, errors.New("ERROR: No unique IPv6 addresses available. Check network subnet")
|
|
}
|
|
|
|
// IsNetworkNameUnique - checks to see if any other networks have the same name (id)
|
|
func IsNetworkNameUnique(network *schema.Network) (bool, error) {
|
|
_network := &schema.Network{
|
|
Name: network.Name,
|
|
}
|
|
err := _network.Get(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
if errors.Is(err, gorm.ErrRecordNotFound) {
|
|
return true, nil
|
|
}
|
|
|
|
return false, err
|
|
}
|
|
|
|
return false, nil
|
|
}
|
|
|
|
func UpsertNetwork(_network *schema.Network) error {
|
|
return _network.Update(db.WithContext(context.TODO()))
|
|
}
|
|
|
|
// UpdateNetwork - updates a network with another network's fields
|
|
func UpdateNetwork(currentNetwork, newNetwork *schema.Network) error {
|
|
if err := ValidateNetwork(newNetwork, true); err != nil {
|
|
return err
|
|
}
|
|
if newNetwork.Name != currentNetwork.Name {
|
|
return errors.New("failed to update network " + newNetwork.Name + ", cannot change netid.")
|
|
}
|
|
featureFlags := GetFeatureFlags()
|
|
if featureFlags.EnableDeviceApproval {
|
|
currentNetwork.AutoJoin = newNetwork.AutoJoin
|
|
} else {
|
|
currentNetwork.AutoJoin = true
|
|
}
|
|
currentNetwork.AutoRemove = newNetwork.AutoRemove
|
|
currentNetwork.AutoRemoveThreshold = newNetwork.AutoRemoveThreshold
|
|
currentNetwork.AutoRemoveTags = newNetwork.AutoRemoveTags
|
|
currentNetwork.DefaultACL = newNetwork.DefaultACL
|
|
|
|
// Validate and update Virtual NAT IPv4 settings
|
|
if newNetwork.VirtualNATPoolIPv4 != "" {
|
|
_, poolNet, err := net.ParseCIDR(newNetwork.VirtualNATPoolIPv4)
|
|
if err != nil {
|
|
return fmt.Errorf("invalid Virtual NAT IPv4 pool CIDR: %w", err)
|
|
}
|
|
poolPrefixLen, _ := poolNet.Mask.Size()
|
|
|
|
if newNetwork.VirtualNATSitePrefixLenIPv4 <= 0 || newNetwork.VirtualNATSitePrefixLenIPv4 > 32 {
|
|
return fmt.Errorf("invalid Virtual NAT IPv4 site prefix length: must be between 1 and 32, got %d", newNetwork.VirtualNATSitePrefixLenIPv4)
|
|
}
|
|
// Validate that site prefix length is not larger (less specific) than pool prefix length
|
|
// e.g., pool /24 and site /8 is invalid because /8 is less specific (larger CIDR) than /24
|
|
// Site prefix must be >= pool prefix (more specific or equal)
|
|
if newNetwork.VirtualNATSitePrefixLenIPv4 < poolPrefixLen {
|
|
return fmt.Errorf("invalid Virtual NAT IPv4 site prefix length: site prefix length /%d cannot be larger (less specific) than pool prefix length /%d. Site prefix must be >= pool prefix (more specific or equal)", newNetwork.VirtualNATSitePrefixLenIPv4, poolPrefixLen)
|
|
}
|
|
currentNetwork.VirtualNATPoolIPv4 = newNetwork.VirtualNATPoolIPv4
|
|
currentNetwork.VirtualNATSitePrefixLenIPv4 = newNetwork.VirtualNATSitePrefixLenIPv4
|
|
} else if newNetwork.VirtualNATSitePrefixLenIPv4 > 0 {
|
|
// If pool is empty but site prefix is provided, validate against existing pool
|
|
if currentNetwork.VirtualNATPoolIPv4 != "" {
|
|
_, poolNet, err := net.ParseCIDR(currentNetwork.VirtualNATPoolIPv4)
|
|
if err == nil {
|
|
poolPrefixLen, _ := poolNet.Mask.Size()
|
|
if newNetwork.VirtualNATSitePrefixLenIPv4 > 32 {
|
|
return fmt.Errorf("invalid Virtual NAT IPv4 site prefix length: must be between 1 and 32, got %d", newNetwork.VirtualNATSitePrefixLenIPv4)
|
|
}
|
|
// Validate that site prefix length is not larger (less specific) than pool prefix length
|
|
if newNetwork.VirtualNATSitePrefixLenIPv4 < poolPrefixLen {
|
|
return fmt.Errorf("invalid Virtual NAT IPv4 site prefix length: site prefix length /%d cannot be larger (less specific) than pool prefix length /%d. Site prefix must be >= pool prefix (more specific or equal)", newNetwork.VirtualNATSitePrefixLenIPv4, poolPrefixLen)
|
|
}
|
|
}
|
|
}
|
|
currentNetwork.VirtualNATSitePrefixLenIPv4 = newNetwork.VirtualNATSitePrefixLenIPv4
|
|
} else {
|
|
// If both are empty, clear the settings
|
|
currentNetwork.VirtualNATPoolIPv4 = newNetwork.VirtualNATPoolIPv4
|
|
currentNetwork.VirtualNATSitePrefixLenIPv4 = newNetwork.VirtualNATSitePrefixLenIPv4
|
|
}
|
|
return currentNetwork.Update(db.WithContext(context.TODO()))
|
|
}
|
|
|
|
// validateNetName - checks if a netid of a network uses valid characters
|
|
func validateNetName(network *schema.Network) error {
|
|
var validationErr error
|
|
|
|
if len(network.Name) == 0 {
|
|
validationErr = errors.Join(validationErr, errors.New("network name cannot be empty"))
|
|
}
|
|
|
|
if len(network.Name) > 32 {
|
|
validationErr = errors.Join(validationErr, errors.New("network name cannot be longer than 32 characters"))
|
|
}
|
|
|
|
charset := "abcdefghijklmnopqrstuvwxyz1234567890-_"
|
|
for _, char := range network.Name {
|
|
if !strings.Contains(charset, string(char)) {
|
|
validationErr = errors.Join(validationErr, errors.New("invalid character(s) in network name"))
|
|
break
|
|
}
|
|
}
|
|
|
|
return validationErr
|
|
}
|
|
|
|
// Validate - validates fields of an network struct
|
|
func ValidateNetwork(network *schema.Network, isUpdate bool) error {
|
|
var validationErr error
|
|
err := validateNetName(network)
|
|
if err != nil {
|
|
validationErr = errors.Join(validationErr, err)
|
|
}
|
|
|
|
if !isUpdate {
|
|
nameUnique, _ := IsNetworkNameUnique(network)
|
|
if !nameUnique {
|
|
validationErr = errors.Join(validationErr, errors.New("invalid network name"))
|
|
}
|
|
}
|
|
|
|
if network.AddressRange != "" {
|
|
_, _, err = net.ParseCIDR(network.AddressRange)
|
|
if err != nil {
|
|
validationErr = errors.Join(validationErr, err)
|
|
}
|
|
}
|
|
|
|
if network.AddressRange6 != "" {
|
|
_, _, err = net.ParseCIDR(network.AddressRange6)
|
|
if err != nil {
|
|
validationErr = errors.Join(validationErr, err)
|
|
}
|
|
}
|
|
|
|
if network.DefaultKeepAlive > 1000 {
|
|
validationErr = errors.Join(validationErr, errors.New("default keep alive must be less than 1000"))
|
|
}
|
|
|
|
return validationErr
|
|
}
|
|
|
|
// SaveNetwork - save network struct to database
|
|
func SaveNetwork(_network *schema.Network) error {
|
|
_existingNetwork := schema.Network{Name: _network.Name}
|
|
// Check if network exists to preserve ID
|
|
err := _existingNetwork.Get(db.WithContext(context.TODO()))
|
|
if err == nil {
|
|
_network.ID = _existingNetwork.ID
|
|
return _network.Update(db.WithContext(context.TODO()))
|
|
}
|
|
|
|
return _network.Create(db.WithContext(context.TODO()))
|
|
}
|
|
|
|
// NetworkExists - check if network exists
|
|
func NetworkExists(name string) (bool, error) {
|
|
err := (&schema.Network{Name: name}).Get(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
if errors.Is(err, gorm.ErrRecordNotFound) {
|
|
return false, nil
|
|
}
|
|
|
|
return false, err
|
|
}
|
|
|
|
return true, nil
|
|
}
|
|
|
|
// SortNetworks - Sorts slice of Networks by their NetID alphabetically with numbers first
|
|
func SortNetworks(unsortedNetworks []schema.Network) {
|
|
sort.Slice(unsortedNetworks, func(i, j int) bool {
|
|
return unsortedNetworks[i].Name < unsortedNetworks[j].Name
|
|
})
|
|
}
|
|
|
|
var NetworkHook models.HookFunc = func(params ...interface{}) error {
|
|
networks, err := (&schema.Network{}).ListAll(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
allNodes, err := GetAllNodes()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for _, network := range networks {
|
|
if !network.AutoRemove || network.AutoRemoveThreshold == 0 {
|
|
continue
|
|
}
|
|
nodes := GetNetworkNodesMemory(allNodes, network.Name)
|
|
for _, node := range nodes {
|
|
if !node.Connected {
|
|
continue
|
|
}
|
|
exists := false
|
|
for _, tagI := range network.AutoRemoveTags {
|
|
if tagI == "*" {
|
|
exists = true
|
|
break
|
|
}
|
|
if _, ok := node.Tags[models.TagID(tagI)]; ok {
|
|
exists = true
|
|
break
|
|
}
|
|
}
|
|
if !exists {
|
|
continue
|
|
}
|
|
if time.Since(node.LastCheckIn) > time.Duration(network.AutoRemoveThreshold)*time.Minute {
|
|
if err := DeleteNode(&node, true); err != nil {
|
|
continue
|
|
}
|
|
node.PendingDelete = true
|
|
node.Action = models.NODE_DELETE
|
|
DeleteNodesCh <- &node
|
|
host := &schema.Host{ID: node.HostID}
|
|
if err := host.Get(db.WithContext(context.TODO())); err == nil && len(host.Nodes) == 0 {
|
|
(&schema.Host{ID: host.ID}).Delete(db.WithContext(context.TODO()))
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func InitNetworkHooks() {
|
|
HookManagerCh <- models.HookDetails{
|
|
ID: "network-hook",
|
|
Hook: NetworkHook,
|
|
Interval: time.Duration(GetServerSettings().CleanUpInterval) * time.Minute,
|
|
}
|
|
}
|
|
|
|
// == Private ==
|
|
|
|
var addressLock = &sync.Mutex{}
|