mirror of
https://github.com/gravitl/netmaker.git
synced 2026-04-23 00:17:10 +08:00
edda2868fc
* feat(go): add user schema; * feat(go): migrate to user schema; * feat(go): add audit fields; * feat(go): remove unused fields from the network model; * feat(go): add network schema; * feat(go): migrate to network schema; * refactor(go): add comment to clarify migration logic; * fix(go): test failures; * fix(go): test failures; * feat(go): change membership table to store memberships at all scopes; * feat(go): add schema for access grants; * feat(go): remove nameservers from new networks table; ensure db passed for schema functions; * feat(go): set max conns for sqlite to 1; * fix(go): issues updating user account status; * refactor(go): remove converters and access grants; * refactor(go): add json tags in schema models; * refactor(go): rename file to migrate_v1_6_0.go; * refactor(go): add user groups and user roles tables; use schema tables; * refactor(go): inline get and list from schema package; * refactor(go): inline get network and list users from schema package; * fix(go): staticcheck issues; * fix(go): remove test not in use; fix test case; * fix(go): validate network; * fix(go): resolve static checks; * fix(go): new models errors; * fix(go): test errors; * fix(go): handle no records; * fix(go): add validations for user object; * fix(go): set correct extclient status; * fix(go): test error; * feat(go): make schema the base package; * feat(go): add host schema; * feat(go): use schema host everywhere; * feat(go): inline get host, list hosts and delete host; * feat(go): use non-ptr value; * feat(go): use save to upsert all fields; * feat(go): use save to upsert all fields; * feat(go): save turn endpoint as string; * feat(go): check for gorm error record not found; * fix(go): test failures; * fix(go): update all network fields; * fix(go): update all network fields; * feat(go): add paginated list networks api; * feat(go): add paginated list users api; * feat(go): add paginated list hosts api; * feat(go): add pagination to list groups api; * fix(go): comment; * fix(go): implement marshal and unmarshal text for custom types; * fix(go): implement marshal and unmarshal json for custom types; * fix(go): just use the old model for unmarshalling; * fix(go): implement marshal and unmarshal json for custom types; * feat(go): remove paginated list networks api; * feat(go): use custom paginated response object; * fix(go): ensure default values for page and per_page are used when not passed; * fix(go): rename v1.6.0 to v1.5.1; * fix(go): check for gorm.ErrRecordNotFound instead of database.IsEmptyRecord; * fix(go): use host id, not pending host id; * feat(go): add filters to paginated apis; * feat(go): add filters to paginated apis; * feat(go): remove check for max username length; * feat(go): add filters to count as well; * feat(go): use library to check email address validity; * feat(go): ignore pagination if params not passed; * fix(go): pagination issues; * fix(go): check exists before using; * fix(go): remove debug log; * fix(go): use gorm err record not found; * fix(go): use gorm err record not found; * fix(go): use user principal name when creating pending user; * fix(go): use schema package for consts; * fix(go): prevent disabling superadmin user; Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * fix(go): swap is admin and is superadmin; Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * fix(go): remove dead code block; https://github.com/gravitl/netmaker/pull/3910#discussion_r2928837937 * fix(go): incorrect message when trying to disable self; https://github.com/gravitl/netmaker/pull/3910#discussion_r2928837934 * fix(go): use correct header; Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * fix(go): return after error response; Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * fix(go): use correct order of params; https://github.com/gravitl/netmaker/pull/3910#discussion_r2929593036 * fix(go): set default values for page and page size; use v2 instead of /list; * Update logic/auth.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * Update schema/user_roles.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * fix(go): syntax error; * fix(go): set default values when page and per_page are not passed or 0; * fix(go): use uuid.parse instead of uuid.must parse; * fix(go): review errors; * fix(go): review errors; * Update controllers/user.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * Update controllers/user.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * NM-163: fix errors: * Update db/types/options.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * fix(go): persist return user in event; * Update db/types/options.go Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com> * NM-163: duplicate lines of code * NM-163: fix(go): fix missing return and filter parsing in user controller - Add missing return after error response in updateUserAccountStatus to prevent double-response and spurious ext-client side-effects - Use switch statements in listUsers to skip unrecognized account_status and mfa_status filter values * fix(go): check for both min and max page size; * fix(go): enclose transfer superadmin in transaction; * fix(go): review errors; * fix(go): remove free tier checks; * fix(go): review fixes; --------- Co-authored-by: VishalDalwadi <dalwadivishal26@gmail.com> Co-authored-by: Vishal Dalwadi <51291657+VishalDalwadi@users.noreply.github.com> Co-authored-by: tenki-reviewer[bot] <262613592+tenki-reviewer[bot]@users.noreply.github.com>
913 lines
24 KiB
Go
913 lines
24 KiB
Go
package migrate
|
|
|
|
import (
|
|
"context"
|
|
"crypto/sha1"
|
|
"encoding/binary"
|
|
"encoding/json"
|
|
"fmt"
|
|
"log"
|
|
"math/big"
|
|
"net"
|
|
"time"
|
|
|
|
"golang.org/x/exp/slog"
|
|
"gorm.io/datatypes"
|
|
|
|
"github.com/google/uuid"
|
|
"github.com/gravitl/netmaker/database"
|
|
"github.com/gravitl/netmaker/db"
|
|
"github.com/gravitl/netmaker/logger"
|
|
"github.com/gravitl/netmaker/logic"
|
|
"github.com/gravitl/netmaker/logic/acls"
|
|
"github.com/gravitl/netmaker/logic/acls/nodeacls"
|
|
"github.com/gravitl/netmaker/models"
|
|
"github.com/gravitl/netmaker/mq"
|
|
"github.com/gravitl/netmaker/schema"
|
|
"github.com/gravitl/netmaker/servercfg"
|
|
)
|
|
|
|
// Run - runs all migrations
|
|
func Run() {
|
|
migrateSettings()
|
|
updateEnrollmentKeys()
|
|
assignSuperAdmin()
|
|
createDefaultTagsAndPolicies()
|
|
syncUsers()
|
|
updateNodes()
|
|
updateAcls()
|
|
updateNewAcls()
|
|
logic.MigrateToGws()
|
|
migrateToEgressV1()
|
|
updateNetworks()
|
|
resync()
|
|
deleteOldExtclients()
|
|
checkAndDeprecateOldAcls()
|
|
}
|
|
|
|
func checkAndDeprecateOldAcls() {
|
|
// check if everything is allowed on old acl and disable old acls
|
|
nets, _ := (&schema.Network{}).ListAll(db.WithContext(context.TODO()))
|
|
disableOldAcls := true
|
|
for _, netI := range nets {
|
|
networkACL, err := nodeacls.FetchAllACLs(nodeacls.NetworkID(netI.Name))
|
|
if err != nil {
|
|
continue
|
|
}
|
|
for _, aclNode := range networkACL {
|
|
for _, allowed := range aclNode {
|
|
if allowed != acls.Allowed {
|
|
disableOldAcls = false
|
|
break
|
|
}
|
|
}
|
|
}
|
|
if disableOldAcls {
|
|
netI.DefaultACL = "yes"
|
|
logic.UpsertNetwork(&netI)
|
|
}
|
|
}
|
|
if disableOldAcls {
|
|
settings := logic.GetServerSettings()
|
|
settings.OldAClsSupport = false
|
|
logic.UpsertServerSettings(settings)
|
|
}
|
|
|
|
}
|
|
|
|
func updateNetworks() {
|
|
initializeVirtualNATSettings()
|
|
}
|
|
|
|
func initializeVirtualNATSettings() {
|
|
if !servercfg.IsPro {
|
|
return
|
|
}
|
|
if !logic.GetFeatureFlags().EnableOverlappingEgressRanges {
|
|
return
|
|
}
|
|
logger.Log(1, "Initializing Virtual NAT settings for existing networks")
|
|
defer logger.Log(1, "Completed initializing Virtual NAT settings for existing networks")
|
|
|
|
networks, err := (&schema.Network{}).ListAll(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
logger.Log(0, "failed to get networks for Virtual NAT migration:", err.Error())
|
|
return
|
|
}
|
|
|
|
// Track allocated pools to ensure uniqueness
|
|
allocatedPools := make(map[string]struct{})
|
|
|
|
// First pass: collect already-allocated pools
|
|
for _, network := range networks {
|
|
if network.VirtualNATPoolIPv4 != "" && network.VirtualNATSitePrefixLenIPv4 > 0 {
|
|
allocatedPools[network.VirtualNATPoolIPv4] = struct{}{}
|
|
}
|
|
}
|
|
|
|
// Allocate unique pools from fallback pool for networks that need them
|
|
const fallbackPool = "198.18.0.0/15"
|
|
const poolPrefixLen = 22 // /22 gives 1024 addresses per network, enough for virtual NAT
|
|
_, fallbackNet, err := net.ParseCIDR(fallbackPool)
|
|
if err != nil || fallbackNet == nil {
|
|
logger.Log(0, "failed to parse fallback pool for Virtual NAT migration:", err.Error())
|
|
return
|
|
}
|
|
_, cgnatNet, err := net.ParseCIDR("100.64.0.0/10")
|
|
if err != nil || cgnatNet == nil {
|
|
logger.Log(0, "failed to parse CGNAT CIDR for Virtual NAT migration:", err.Error())
|
|
return
|
|
}
|
|
|
|
// Second pass: initialize networks
|
|
for _, network := range networks {
|
|
// Skip if already initialized
|
|
if network.VirtualNATPoolIPv4 != "" && network.VirtualNATSitePrefixLenIPv4 > 0 {
|
|
continue
|
|
}
|
|
|
|
vpnCIDR := network.AddressRange
|
|
needsUniquePool := false
|
|
|
|
if vpnCIDR == "" {
|
|
needsUniquePool = true
|
|
vpnCIDR = fallbackPool
|
|
} else {
|
|
// Check if overlaps with CGNAT
|
|
_, vpnNet, err := net.ParseCIDR(vpnCIDR)
|
|
if err != nil || vpnNet == nil {
|
|
needsUniquePool = true
|
|
vpnCIDR = fallbackPool
|
|
} else {
|
|
if cidrOverlaps(vpnNet, cgnatNet) {
|
|
needsUniquePool = true
|
|
vpnCIDR = fallbackPool
|
|
}
|
|
}
|
|
}
|
|
|
|
// If this network needs a unique pool, allocate one
|
|
if needsUniquePool {
|
|
uniquePool := allocateUniquePoolFromFallback(fallbackNet, poolPrefixLen, allocatedPools, network.Name)
|
|
if uniquePool != "" {
|
|
vpnCIDR = uniquePool
|
|
allocatedPools[uniquePool] = struct{}{}
|
|
}
|
|
}
|
|
|
|
// Initialize virtual NAT defaults
|
|
logic.AssignVirtualNATDefaults(&network, vpnCIDR)
|
|
|
|
// Save the updated network
|
|
if err := logic.UpsertNetwork(&network); err != nil {
|
|
logger.Log(0, "failed to update network", network.Name, "with Virtual NAT settings:", err.Error())
|
|
continue
|
|
}
|
|
logger.Log(1, "initialized Virtual NAT settings for network", network.Name, "pool:", network.VirtualNATPoolIPv4)
|
|
}
|
|
}
|
|
|
|
// allocateUniquePoolFromFallback allocates a unique /22 subnet from the fallback pool
|
|
func allocateUniquePoolFromFallback(pool *net.IPNet, newPrefixLen int, allocated map[string]struct{}, seed string) string {
|
|
if pool == nil {
|
|
return ""
|
|
}
|
|
|
|
poolPrefixLen, bits := pool.Mask.Size()
|
|
if newPrefixLen < poolPrefixLen || newPrefixLen > bits {
|
|
return ""
|
|
}
|
|
|
|
total := 1 << uint(newPrefixLen-poolPrefixLen)
|
|
start := hashIndex(seed, total)
|
|
|
|
for i := 0; i < total; i++ {
|
|
idx := (start + i) % total
|
|
cand := nthSubnet(pool, newPrefixLen, idx)
|
|
if cand == nil {
|
|
continue
|
|
}
|
|
cs := cand.String()
|
|
if _, used := allocated[cs]; !used {
|
|
return cs
|
|
}
|
|
}
|
|
|
|
return ""
|
|
}
|
|
|
|
// nthSubnet calculates the nth subnet of a given prefix length within a pool
|
|
func nthSubnet(pool *net.IPNet, newPrefixLen int, n int) *net.IPNet {
|
|
if pool == nil {
|
|
return nil
|
|
}
|
|
|
|
poolPrefixLen, bits := pool.Mask.Size()
|
|
if newPrefixLen < poolPrefixLen || newPrefixLen > bits || n < 0 {
|
|
return nil
|
|
}
|
|
|
|
base := ipToBigInt(pool.IP)
|
|
size := new(big.Int).Lsh(big.NewInt(1), uint(bits-newPrefixLen))
|
|
offset := new(big.Int).Mul(big.NewInt(int64(n)), size)
|
|
ipInt := new(big.Int).Add(base, offset)
|
|
ip := bigIntToIP(ipInt, bits)
|
|
|
|
mask := net.CIDRMask(newPrefixLen, bits)
|
|
return &net.IPNet{IP: ip.Mask(mask), Mask: mask}
|
|
}
|
|
|
|
// ipToBigInt converts an IP address to a big.Int
|
|
func ipToBigInt(ip net.IP) *big.Int {
|
|
ip = ip.To16()
|
|
if ip == nil {
|
|
return big.NewInt(0)
|
|
}
|
|
return new(big.Int).SetBytes(ip)
|
|
}
|
|
|
|
// bigIntToIP converts a big.Int back to an IP address
|
|
func bigIntToIP(i *big.Int, bits int) net.IP {
|
|
b := i.Bytes()
|
|
byteLen := bits / 8
|
|
if len(b) < byteLen {
|
|
pad := make([]byte, byteLen-len(b))
|
|
b = append(pad, b...)
|
|
}
|
|
ip := net.IP(b)
|
|
if bits == 32 {
|
|
return ip.To4()
|
|
}
|
|
return ip
|
|
}
|
|
|
|
// hashIndex generates a deterministic index from a seed string
|
|
func hashIndex(seed string, mod int) int {
|
|
if mod <= 1 {
|
|
return 0
|
|
}
|
|
sum := sha1.Sum([]byte(seed))
|
|
v := binary.BigEndian.Uint32(sum[:4])
|
|
return int(v % uint32(mod))
|
|
}
|
|
|
|
// cidrOverlaps checks if two CIDR blocks overlap
|
|
func cidrOverlaps(a, b *net.IPNet) bool {
|
|
return a.Contains(b.IP) || b.Contains(a.IP)
|
|
}
|
|
|
|
// removes if any stale configurations from previous run.
|
|
func resync() {
|
|
|
|
nodes, _ := logic.GetAllNodes()
|
|
for _, node := range nodes {
|
|
if !node.IsGw {
|
|
if len(node.RelayedNodes) > 0 {
|
|
logic.DeleteRelay(node.Network, node.ID.String())
|
|
}
|
|
if node.IsIngressGateway {
|
|
logic.DeleteIngressGateway(node.ID.String())
|
|
}
|
|
if len(node.InetNodeReq.InetNodeClientIDs) > 0 || node.IsInternetGateway {
|
|
logic.UnsetInternetGw(&node)
|
|
logic.UpsertNode(&node)
|
|
}
|
|
}
|
|
if node.IsRelayed {
|
|
if node.RelayedBy == "" {
|
|
node.IsRelayed = false
|
|
node.InternetGwID = ""
|
|
logic.UpsertNode(&node)
|
|
}
|
|
if node.RelayedBy != "" {
|
|
// check if node exists
|
|
_, err := logic.GetNodeByID(node.RelayedBy)
|
|
if err != nil {
|
|
node.RelayedBy = ""
|
|
node.InternetGwID = ""
|
|
logic.UpsertNode(&node)
|
|
}
|
|
}
|
|
}
|
|
if node.InternetGwID != "" {
|
|
_, err := logic.GetNodeByID(node.InternetGwID)
|
|
if err != nil {
|
|
node.InternetGwID = ""
|
|
logic.UpsertNode(&node)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func assignSuperAdmin() {
|
|
users, err := logic.GetUsers()
|
|
if err != nil || len(users) == 0 {
|
|
return
|
|
}
|
|
|
|
if ok, _ := logic.HasSuperAdmin(); ok {
|
|
return
|
|
}
|
|
createdSuperAdmin := false
|
|
owner := servercfg.GetOwnerEmail()
|
|
if owner != "" {
|
|
user := &schema.User{Username: owner}
|
|
err = user.Get(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
log.Fatal("error getting user", "user", owner, "error", err.Error())
|
|
}
|
|
user.PlatformRoleID = schema.SuperAdminRole
|
|
err = logic.UpsertUser(*user)
|
|
if err != nil {
|
|
log.Fatal(
|
|
"error updating user to superadmin",
|
|
"user",
|
|
user.Username,
|
|
"error",
|
|
err.Error(),
|
|
)
|
|
}
|
|
return
|
|
}
|
|
for _, u := range users {
|
|
var isAdmin bool
|
|
if u.PlatformRoleID == schema.AdminRole {
|
|
isAdmin = true
|
|
}
|
|
if u.PlatformRoleID == "" && u.IsAdmin {
|
|
isAdmin = true
|
|
}
|
|
|
|
if isAdmin {
|
|
user := &schema.User{Username: u.UserName}
|
|
err = user.Get(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
slog.Error("error getting user", "user", u.UserName, "error", err.Error())
|
|
continue
|
|
}
|
|
user.PlatformRoleID = schema.SuperAdminRole
|
|
err = logic.UpsertUser(*user)
|
|
if err != nil {
|
|
slog.Error(
|
|
"error updating user to superadmin",
|
|
"user",
|
|
user.Username,
|
|
"error",
|
|
err.Error(),
|
|
)
|
|
continue
|
|
} else {
|
|
createdSuperAdmin = true
|
|
}
|
|
break
|
|
}
|
|
}
|
|
|
|
if !createdSuperAdmin {
|
|
slog.Error("failed to create superadmin!!")
|
|
}
|
|
}
|
|
|
|
func updateEnrollmentKeys() {
|
|
rows, err := database.FetchRecords(database.ENROLLMENT_KEYS_TABLE_NAME)
|
|
if err != nil {
|
|
return
|
|
}
|
|
for _, row := range rows {
|
|
var key models.EnrollmentKey
|
|
if err = json.Unmarshal([]byte(row), &key); err != nil {
|
|
continue
|
|
}
|
|
if key.Type != models.Undefined {
|
|
logger.Log(2, "migration: enrollment key type already set")
|
|
continue
|
|
} else {
|
|
logger.Log(2, "migration: updating enrollment key type")
|
|
if key.Unlimited {
|
|
key.Type = models.Unlimited
|
|
} else if key.UsesRemaining > 0 {
|
|
key.Type = models.Uses
|
|
} else if !key.Expiration.IsZero() {
|
|
key.Type = models.TimeExpiration
|
|
}
|
|
}
|
|
data, err := json.Marshal(key)
|
|
if err != nil {
|
|
logger.Log(0, "migration: marshalling enrollment key: "+err.Error())
|
|
continue
|
|
}
|
|
if err = database.Insert(key.Value, string(data), database.ENROLLMENT_KEYS_TABLE_NAME); err != nil {
|
|
logger.Log(0, "migration: inserting enrollment key: "+err.Error())
|
|
continue
|
|
}
|
|
|
|
}
|
|
|
|
existingKeys, err := logic.GetAllEnrollmentKeys()
|
|
if err != nil {
|
|
return
|
|
}
|
|
// check if any tags are duplicate
|
|
existingTags := make(map[string]struct{})
|
|
for _, existingKey := range existingKeys {
|
|
for _, t := range existingKey.Tags {
|
|
existingTags[t] = struct{}{}
|
|
}
|
|
}
|
|
networks, _ := (&schema.Network{}).ListAll(db.WithContext(context.TODO()))
|
|
for _, network := range networks {
|
|
if _, ok := existingTags[network.Name]; ok {
|
|
continue
|
|
}
|
|
_, _ = logic.CreateEnrollmentKey(
|
|
0,
|
|
time.Time{},
|
|
[]string{network.Name},
|
|
[]string{network.Name},
|
|
[]models.TagID{},
|
|
true,
|
|
uuid.Nil,
|
|
true,
|
|
false,
|
|
false,
|
|
)
|
|
}
|
|
}
|
|
|
|
func updateNodes() {
|
|
nodes, err := logic.GetAllNodes()
|
|
if err != nil {
|
|
slog.Error("migration failed for nodes", "error", err)
|
|
return
|
|
}
|
|
for _, node := range nodes {
|
|
node := node
|
|
if node.Tags == nil {
|
|
node.Tags = make(map[models.TagID]struct{})
|
|
logic.UpsertNode(&node)
|
|
}
|
|
if node.IsIngressGateway {
|
|
host := &schema.Host{
|
|
ID: node.HostID,
|
|
}
|
|
err = host.Get(db.WithContext(context.TODO()))
|
|
if err == nil {
|
|
go logic.DeleteRole(models.GetRAGRoleID(node.Network, host.ID.String()), true)
|
|
}
|
|
}
|
|
if node.IsEgressGateway {
|
|
egressRanges, update := removeInterGw(node.EgressGatewayRanges)
|
|
if update {
|
|
node.EgressGatewayRequest.Ranges = egressRanges
|
|
node.EgressGatewayRanges = egressRanges
|
|
logic.UpsertNode(&node)
|
|
}
|
|
if len(node.EgressGatewayRequest.Ranges) > 0 && len(node.EgressGatewayRequest.RangesWithMetric) == 0 {
|
|
for _, egressRangeI := range node.EgressGatewayRequest.Ranges {
|
|
node.EgressGatewayRequest.RangesWithMetric = append(node.EgressGatewayRequest.RangesWithMetric, models.EgressRangeMetric{
|
|
Network: egressRangeI,
|
|
RouteMetric: 256,
|
|
})
|
|
}
|
|
logic.UpsertNode(&node)
|
|
}
|
|
|
|
}
|
|
}
|
|
extclients, _ := logic.GetAllExtClients()
|
|
for _, extclient := range extclients {
|
|
if extclient.Tags == nil {
|
|
extclient.Tags = make(map[models.TagID]struct{})
|
|
logic.SaveExtClient(&extclient)
|
|
}
|
|
}
|
|
}
|
|
|
|
func removeInterGw(egressRanges []string) ([]string, bool) {
|
|
update := false
|
|
for i := len(egressRanges) - 1; i >= 0; i-- {
|
|
if egressRanges[i] == "0.0.0.0/0" || egressRanges[i] == "::/0" {
|
|
update = true
|
|
egressRanges = append(egressRanges[:i], egressRanges[i+1:]...)
|
|
}
|
|
}
|
|
return egressRanges, update
|
|
}
|
|
|
|
func updateAcls() {
|
|
// get all networks
|
|
if !logic.GetServerSettings().OldAClsSupport {
|
|
return
|
|
}
|
|
networks, err := (&schema.Network{}).ListAll(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
slog.Error("acls migration failed. error getting networks", "error", err)
|
|
return
|
|
}
|
|
|
|
// get current acls per network
|
|
for _, network := range networks {
|
|
var networkAcl acls.ACLContainer
|
|
networkAcl, err := networkAcl.Get(acls.ContainerID(network.Name))
|
|
if err != nil {
|
|
if database.IsEmptyRecord(err) {
|
|
continue
|
|
}
|
|
slog.Error(fmt.Sprintf("error during acls migration. error getting acls for network: %s", network.Name), "error", err)
|
|
continue
|
|
}
|
|
// convert old acls to new acls with clients
|
|
// TODO: optimise O(n^2) operation
|
|
clients, err := logic.GetNetworkExtClients(network.Name)
|
|
if err != nil {
|
|
slog.Error(fmt.Sprintf("error during acls migration. error getting clients for network: %s", network.Name), "error", err)
|
|
continue
|
|
}
|
|
clientsIdMap := make(map[string]struct{})
|
|
for _, client := range clients {
|
|
clientsIdMap[client.ClientID] = struct{}{}
|
|
}
|
|
nodeIdsMap := make(map[string]struct{})
|
|
for nodeId := range networkAcl {
|
|
nodeIdsMap[string(nodeId)] = struct{}{}
|
|
}
|
|
/*
|
|
initially, networkACL has only node acls so we add client acls to it
|
|
final shape:
|
|
{
|
|
"node1": {
|
|
"node2": 2,
|
|
"client1": 2,
|
|
"client2": 1,
|
|
},
|
|
"node2": {
|
|
"node1": 2,
|
|
"client1": 2,
|
|
"client2": 1,
|
|
},
|
|
"client1": {
|
|
"node1": 2,
|
|
"node2": 2,
|
|
"client2": 1,
|
|
},
|
|
"client2": {
|
|
"node1": 1,
|
|
"node2": 1,
|
|
"client1": 1,
|
|
},
|
|
}
|
|
*/
|
|
for _, client := range clients {
|
|
networkAcl[acls.AclID(client.ClientID)] = acls.ACL{}
|
|
// add client values to node acls and create client acls with node values
|
|
for id, nodeAcl := range networkAcl {
|
|
// skip if not a node
|
|
if _, ok := nodeIdsMap[string(id)]; !ok {
|
|
continue
|
|
}
|
|
if nodeAcl == nil {
|
|
slog.Warn("acls migration bad data: nil node acl", "node", id, "network", network.Name)
|
|
continue
|
|
}
|
|
nodeAcl[acls.AclID(client.ClientID)] = acls.Allowed
|
|
networkAcl[acls.AclID(client.ClientID)][id] = acls.Allowed
|
|
if client.DeniedACLs == nil {
|
|
continue
|
|
} else if _, ok := client.DeniedACLs[string(id)]; ok {
|
|
nodeAcl[acls.AclID(client.ClientID)] = acls.NotAllowed
|
|
networkAcl[acls.AclID(client.ClientID)][id] = acls.NotAllowed
|
|
}
|
|
}
|
|
// add clients to client acls response
|
|
for _, c := range clients {
|
|
if c.ClientID == client.ClientID {
|
|
continue
|
|
}
|
|
networkAcl[acls.AclID(client.ClientID)][acls.AclID(c.ClientID)] = acls.Allowed
|
|
if client.DeniedACLs == nil {
|
|
continue
|
|
} else if _, ok := client.DeniedACLs[c.ClientID]; ok {
|
|
networkAcl[acls.AclID(client.ClientID)][acls.AclID(c.ClientID)] = acls.NotAllowed
|
|
}
|
|
}
|
|
// delete oneself from its own acl
|
|
delete(networkAcl[acls.AclID(client.ClientID)], acls.AclID(client.ClientID))
|
|
}
|
|
|
|
// remove non-existent client and node acls
|
|
for objId := range networkAcl {
|
|
if _, ok := nodeIdsMap[string(objId)]; ok {
|
|
continue
|
|
}
|
|
if _, ok := clientsIdMap[string(objId)]; ok {
|
|
continue
|
|
}
|
|
// remove all occurances of objId from all acls
|
|
for objId2 := range networkAcl {
|
|
delete(networkAcl[objId2], objId)
|
|
}
|
|
delete(networkAcl, objId)
|
|
}
|
|
|
|
// save new acls
|
|
slog.Debug(fmt.Sprintf("(migration) saving new acls for network: %s", network.Name), "networkAcl", networkAcl)
|
|
if _, err := networkAcl.Save(acls.ContainerID(network.Name)); err != nil {
|
|
slog.Error(fmt.Sprintf("error during acls migration. error saving new acls for network: %s", network.Name), "error", err)
|
|
continue
|
|
}
|
|
slog.Info(fmt.Sprintf("(migration) successfully saved new acls for network: %s", network.Name))
|
|
}
|
|
}
|
|
|
|
func updateNewAcls() {
|
|
if servercfg.IsPro {
|
|
userGroups, _ := (&schema.UserGroup{}).ListAll(db.WithContext(context.TODO()))
|
|
userGroupMap := make(map[schema.UserGroupID]schema.UserGroup)
|
|
for _, userGroup := range userGroups {
|
|
userGroupMap[userGroup.ID] = userGroup
|
|
}
|
|
|
|
acls := logic.ListAcls()
|
|
for _, acl := range acls {
|
|
aclSrc := make([]models.AclPolicyTag, 0)
|
|
for _, src := range acl.Src {
|
|
if src.ID == models.UserGroupAclID {
|
|
userGroup, ok := userGroupMap[schema.UserGroupID(src.Value)]
|
|
if !ok {
|
|
// if the group doesn't exist, don't add it to the acl's src.
|
|
continue
|
|
} else {
|
|
_, allNetworkAccess := userGroup.NetworkRoles.Data()[schema.AllNetworks]
|
|
if !allNetworkAccess {
|
|
_, ok := userGroup.NetworkRoles.Data()[acl.NetworkID]
|
|
if !ok {
|
|
// if the group doesn't have permissions for the acl's
|
|
// network, don't add it to the acl's src.
|
|
continue
|
|
}
|
|
}
|
|
}
|
|
}
|
|
aclSrc = append(aclSrc, src)
|
|
}
|
|
|
|
if len(aclSrc) == 0 {
|
|
// if there are no acl sources, delete the acl.
|
|
_ = logic.DeleteAcl(acl)
|
|
} else if len(aclSrc) != len(acl.Src) {
|
|
// if some user groups were removed from the acl source,
|
|
// update the acl.
|
|
acl.Src = aclSrc
|
|
_ = logic.UpsertAcl(acl)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func MigrateEmqx() {
|
|
|
|
err := mq.SendPullSYN()
|
|
if err != nil {
|
|
logger.Log(0, "failed to send pull syn to clients", "error", err.Error())
|
|
|
|
}
|
|
time.Sleep(time.Second * 3)
|
|
slog.Info("proceeding to kicking out clients from emqx")
|
|
err = mq.KickOutClients()
|
|
if err != nil {
|
|
logger.Log(2, "failed to migrate emqx: ", "kickout-error", err.Error())
|
|
}
|
|
|
|
}
|
|
|
|
func syncUsers() {
|
|
logger.Log(1, "Migrating Users (SyncUsers)")
|
|
defer logger.Log(1, "Completed migrating Users (SyncUsers)")
|
|
// create default network user roles for existing networks
|
|
if servercfg.IsPro {
|
|
networks, _ := (&schema.Network{}).ListAll(db.WithContext(context.TODO()))
|
|
for _, netI := range networks {
|
|
logic.CreateDefaultNetworkRolesAndGroups(schema.NetworkID(netI.Name))
|
|
}
|
|
}
|
|
|
|
users, err := (&schema.User{}).ListAll(db.WithContext(context.TODO()))
|
|
if err == nil {
|
|
for _, user := range users {
|
|
user := user
|
|
user.AuthType = schema.BasicAuth
|
|
if logic.IsOauthUser(&user) == nil {
|
|
user.AuthType = schema.OAuth
|
|
}
|
|
if len(user.UserGroups.Data()) == 0 {
|
|
user.UserGroups = datatypes.NewJSONType(make(map[schema.UserGroupID]struct{}))
|
|
}
|
|
|
|
logic.AddGlobalNetRolesToAdmins(&user)
|
|
logic.UpsertUser(user)
|
|
}
|
|
}
|
|
|
|
}
|
|
|
|
func createDefaultTagsAndPolicies() {
|
|
networks, err := (&schema.Network{}).ListAll(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
return
|
|
}
|
|
for _, network := range networks {
|
|
logic.CreateDefaultTags(schema.NetworkID(network.Name))
|
|
logic.CreateDefaultAclNetworkPolicies(schema.NetworkID(network.Name))
|
|
// delete old remote access gws policy
|
|
logic.DeleteAcl(models.Acl{ID: fmt.Sprintf("%s.%s", network.Name, "all-remote-access-gws")})
|
|
}
|
|
logic.MigrateAclPolicies()
|
|
if !servercfg.IsPro {
|
|
nodes, _ := logic.GetAllNodes()
|
|
for _, node := range nodes {
|
|
if node.IsGw {
|
|
node.Tags = make(map[models.TagID]struct{})
|
|
node.Tags[models.TagID(fmt.Sprintf("%s.%s", node.Network, models.GwTagName))] = struct{}{}
|
|
logic.UpsertNode(&node)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func migrateToEgressV1() {
|
|
nodes, _ := logic.GetAllNodes()
|
|
user, err := logic.GetSuperAdmin()
|
|
if err != nil {
|
|
return
|
|
}
|
|
for _, node := range nodes {
|
|
if node.IsEgressGateway {
|
|
host := &schema.Host{
|
|
ID: node.HostID,
|
|
}
|
|
err := host.Get(db.WithContext(context.TODO()))
|
|
if err != nil {
|
|
continue
|
|
}
|
|
for _, rangeMetric := range node.EgressGatewayRequest.RangesWithMetric {
|
|
e := &schema.Egress{Range: rangeMetric.Network}
|
|
if err := e.DoesEgressRouteExists(db.WithContext(context.TODO())); err == nil {
|
|
e.Nodes[node.ID.String()] = rangeMetric.RouteMetric
|
|
e.Update(db.WithContext(context.TODO()))
|
|
continue
|
|
}
|
|
e = &schema.Egress{
|
|
ID: uuid.New().String(),
|
|
Name: fmt.Sprintf("%s egress", rangeMetric.Network),
|
|
Description: "",
|
|
Network: node.Network,
|
|
Nodes: datatypes.JSONMap{
|
|
node.ID.String(): rangeMetric.RouteMetric,
|
|
},
|
|
Tags: make(datatypes.JSONMap),
|
|
Range: rangeMetric.Network,
|
|
Nat: node.EgressGatewayRequest.NatEnabled == "yes",
|
|
Status: true,
|
|
CreatedBy: user.UserName,
|
|
CreatedAt: time.Now().UTC(),
|
|
}
|
|
err = e.Create(db.WithContext(context.TODO()))
|
|
if err == nil {
|
|
acl := models.Acl{
|
|
ID: uuid.New().String(),
|
|
Name: "egress node policy",
|
|
MetaData: "",
|
|
Default: false,
|
|
ServiceType: models.Any,
|
|
NetworkID: schema.NetworkID(node.Network),
|
|
Proto: models.ALL,
|
|
RuleType: models.DevicePolicy,
|
|
Src: []models.AclPolicyTag{
|
|
|
|
{
|
|
ID: models.NodeTagID,
|
|
Value: "*",
|
|
},
|
|
},
|
|
Dst: []models.AclPolicyTag{
|
|
{
|
|
ID: models.EgressID,
|
|
Value: e.ID,
|
|
},
|
|
},
|
|
|
|
AllowedDirection: models.TrafficDirectionBi,
|
|
Enabled: true,
|
|
CreatedBy: "auto",
|
|
CreatedAt: time.Now().UTC(),
|
|
}
|
|
logic.InsertAcl(acl)
|
|
acl = models.Acl{
|
|
ID: uuid.New().String(),
|
|
Name: "egress node policy",
|
|
MetaData: "",
|
|
Default: false,
|
|
ServiceType: models.Any,
|
|
NetworkID: schema.NetworkID(node.Network),
|
|
Proto: models.ALL,
|
|
RuleType: models.UserPolicy,
|
|
Src: []models.AclPolicyTag{
|
|
|
|
{
|
|
ID: models.UserAclID,
|
|
Value: "*",
|
|
},
|
|
},
|
|
Dst: []models.AclPolicyTag{
|
|
{
|
|
ID: models.EgressID,
|
|
Value: e.ID,
|
|
},
|
|
},
|
|
|
|
AllowedDirection: models.TrafficDirectionBi,
|
|
Enabled: true,
|
|
CreatedBy: "auto",
|
|
CreatedAt: time.Now().UTC(),
|
|
}
|
|
logic.InsertAcl(acl)
|
|
}
|
|
|
|
}
|
|
node.IsEgressGateway = false
|
|
node.EgressGatewayRequest = models.EgressGatewayRequest{}
|
|
node.EgressGatewayNatEnabled = false
|
|
node.EgressGatewayRanges = []string{}
|
|
logic.UpsertNode(&node)
|
|
|
|
}
|
|
}
|
|
}
|
|
|
|
func migrateSettings() {
|
|
settingsD := make(map[string]interface{})
|
|
data, err := database.FetchRecord(database.SERVER_SETTINGS, logic.ServerSettingsDBKey)
|
|
if database.IsEmptyRecord(err) {
|
|
logic.UpsertServerSettings(logic.GetServerSettingsFromEnv())
|
|
} else if err == nil {
|
|
json.Unmarshal([]byte(data), &settingsD)
|
|
}
|
|
settings := logic.GetServerSettings()
|
|
if _, ok := settingsD["old_acl_support"]; !ok {
|
|
settings.OldAClsSupport = servercfg.IsOldAclEnabled()
|
|
}
|
|
if settings.PeerConnectionCheckInterval == "" {
|
|
settings.PeerConnectionCheckInterval = "15"
|
|
}
|
|
if settings.PostureCheckInterval == "" {
|
|
settings.PostureCheckInterval = "30"
|
|
}
|
|
if settings.CleanUpInterval == 0 {
|
|
settings.CleanUpInterval = 10
|
|
}
|
|
if settings.IPDetectionInterval == 0 {
|
|
settings.IPDetectionInterval = 15
|
|
}
|
|
if settings.AuditLogsRetentionPeriodInDays == 0 {
|
|
settings.AuditLogsRetentionPeriodInDays = 7
|
|
}
|
|
if settings.DefaultDomain == "" {
|
|
settings.DefaultDomain = servercfg.GetDefaultDomain()
|
|
}
|
|
if settings.JwtValidityDurationClients == 0 {
|
|
settings.JwtValidityDurationClients = servercfg.GetJwtValidityDurationFromEnv() / 60
|
|
}
|
|
if settings.StunServers == "" {
|
|
settings.StunServers = servercfg.GetStunServers()
|
|
}
|
|
logic.UpsertServerSettings(settings)
|
|
}
|
|
|
|
func deleteOldExtclients() {
|
|
extclients, _ := logic.GetAllExtClients()
|
|
userExtclientMap := make(map[string][]models.ExtClient)
|
|
for _, extclient := range extclients {
|
|
if extclient.RemoteAccessClientID == "" {
|
|
continue
|
|
}
|
|
|
|
if extclient.Enabled {
|
|
continue
|
|
}
|
|
|
|
if _, ok := userExtclientMap[extclient.OwnerID]; !ok {
|
|
userExtclientMap[extclient.OwnerID] = make([]models.ExtClient, 0)
|
|
}
|
|
|
|
userExtclientMap[extclient.OwnerID] = append(userExtclientMap[extclient.OwnerID], extclient)
|
|
}
|
|
|
|
for _, userExtclients := range userExtclientMap {
|
|
if len(userExtclients) > 1 {
|
|
for _, extclient := range userExtclients[1:] {
|
|
_ = logic.DeleteExtClient(extclient.Network, extclient.Network, false)
|
|
}
|
|
}
|
|
}
|
|
}
|