merge conflicts resolved

This commit is contained in:
Abhishek Kondur
2023-04-03 22:43:33 +04:00
33 changed files with 699 additions and 246 deletions
+1
View File
@@ -31,6 +31,7 @@ body:
label: Version
description: What version are you running?
options:
- v0.18.5
- v0.18.4
- v0.18.3
- v0.18.2
+58
View File
@@ -0,0 +1,58 @@
name: Deploy and Test Branch
on:
workflow_dispatch:
pull_request:
types: [opened, synchronize, reopened]
branches: [develop]
jobs:
skip-check:
runs-on: ubuntu-latest
outputs:
skip: ${{ steps.check.outputs.skip }}
steps:
- id: skip
uses: fkirc/skip-duplicate-actions@v5
with:
concurrent_skipping: 'always'
getbranch:
runs-on: ubuntu-latest
needs: skip-check
if: ${{ needs.skip-check.outputs.skip != 'true' }}
outputs:
netclientbranch: ${{ steps.checkbranch.outputs.netclientbranch }}
steps:
- name: checkout
uses: actions/checkout@v3
with:
repository: gravitl/netclient
ref: develop
- name: check if branch exists
id: checkbranch
run: |
if git show-ref ${{ github.head_ref}}; then
echo branch exists
echo "netclientbranch=${{ github.head_ref }}" >> $GITHUB_OUTPUT
else
echo branch does not exist
echo "netclientbranch=develop" >> $GITHUB_OUTPUT
fi
terraform:
needs: getbranch
uses: gravitl/devops/.github/workflows/terraform.yml@master
with:
netmakerbranch: ${{ github.head_ref }}
netclientbranch: ${{ needs.getbranch.outputs.netclientbranch }}
secrets: inherit
testbranch:
needs: [getbranch, terraform]
uses: gravitl/devops/.github/workflows/branchtest.yml@master
with:
tag: ${{ github.run_id }}-${{ github.run_attempt }}
network: terraform
secrets: inherit
+38
View File
@@ -0,0 +1,38 @@
name: Delete Droplets
on:
workflow_run:
workflows: [Deploy and Test Branch]
types:
- completed
jobs:
on-success:
runs-on: ubuntu-latest
if: ${{ github.event.workflow_run.conclusion == 'success' }}
steps:
- name: delete droplets
run: |
sleep 15m
curl -X GET \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $DIGITALOCEAN_TOKEN" \
"https://api.digitalocean.com/v2/droplets?tag_name=$TAG"
env:
DIGITALOCEAN_TOKEN: ${{ secrets.DIGITALOCEAN_TOKEN }}
TAG: ${{ github.event.workflow_run.run_id }}-${{ github.event.workflow_run.run_attempt }}
on-failure:
runs-on: ubuntu-latest
if: ${{ github.event.workflow_run.conclusion == 'failure' }}
steps:
- name: delete droplets
run: |
sleep 6h
curl -X GET \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $DIGITALOCEAN_TOKEN" \
"https://api.digitalocean.com/v2/droplets?tag_name=$TAG"
env:
DIGITALOCEAN_TOKEN: ${{ secrets.DIGITALOCEAN_TOKEN }}
TAG: ${{ github.event.workflow_run.run_id }}-${{ github.event.workflow_run.run_attempt }}
+1 -1
View File
@@ -17,7 +17,7 @@
<p align="center">
<a href="https://github.com/gravitl/netmaker/releases">
<img src="https://img.shields.io/badge/Version-0.18.4-informational?style=flat-square" />
<img src="https://img.shields.io/badge/Version-0.18.5-informational?style=flat-square" />
</a>
<a href="https://hub.docker.com/r/gravitl/netmaker/tags">
<img src="https://img.shields.io/docker/pulls/gravitl/netmaker?label=downloads" />
+1 -1
View File
@@ -3,7 +3,7 @@ version: "3.4"
services:
netmaker:
container_name: netmaker
image: gravitl/netmaker:v0.18.4
image: gravitl/netmaker:v0.18.5
restart: always
volumes:
- dnsconfig:/root/config/dnsconfig
+1 -1
View File
@@ -3,7 +3,7 @@ version: "3.4"
services:
netclient:
container_name: netclient
image: 'gravitl/netclient:v0.18.4'
image: 'gravitl/netclient:v0.18.5'
hostname: netmaker-1
network_mode: host
restart: always
+1 -1
View File
@@ -10,7 +10,7 @@
//
// Schemes: https
// BasePath: /
// Version: 0.18.4
// Version: 0.18.5
// Host: netmaker.io
//
// Consumes:
+17
View File
@@ -182,9 +182,21 @@ func handleHostRegister(w http.ResponseWriter, r *http.Request) {
logic.ReturnErrorResponse(w, r, logic.FormatError(fmt.Errorf("invalid enrollment key"), "badrequest"))
return
}
hostPass := newHost.HostPass
if !hostExists {
// register host
logic.CheckHostPorts(&newHost)
// create EMQX credentials and ACLs for host
if servercfg.GetBrokerType() == servercfg.EmqxBrokerType {
if err := mq.CreateEmqxUser(newHost.ID.String(), newHost.HostPass, false); err != nil {
logger.Log(0, "failed to create host credentials for EMQX: ", err.Error())
return
}
if err := mq.CreateHostACL(newHost.ID.String(), servercfg.GetServerInfo().Server); err != nil {
logger.Log(0, "failed to add host ACL rules to EMQX: ", err.Error())
return
}
}
if err = logic.CreateHost(&newHost); err != nil {
logger.Log(0, "host", newHost.ID.String(), newHost.Name, "failed registration -", err.Error())
logic.ReturnErrorResponse(w, r, logic.FormatError(err, "internal"))
@@ -205,6 +217,11 @@ func handleHostRegister(w http.ResponseWriter, r *http.Request) {
// ready the response
server := servercfg.GetServerInfo()
server.TrafficKey = key
if servercfg.GetBrokerType() == servercfg.EmqxBrokerType {
// set MQ username and password for EMQX clients
server.MQUserName = newHost.ID.String()
server.MQPassword = hostPass
}
response := models.RegisterResponse{
ServerConf: server,
RequestedHost: newHost,
+1 -1
View File
@@ -215,7 +215,7 @@ func getExtClientConf(w http.ResponseWriter, r *http.Request) {
if network.DefaultKeepalive != 0 {
keepalive = "PersistentKeepalive = " + strconv.Itoa(int(network.DefaultKeepalive))
}
gwendpoint := host.EndpointIP.String() + ":" + strconv.Itoa(logic.GetPeerListenPort(host))
gwendpoint := host.EndpointIP.String() + ":" + strconv.Itoa(host.ListenPort)
newAllowedIPs := network.AddressRange
if newAllowedIPs != "" && network.AddressRange6 != "" {
newAllowedIPs += ","
+49
View File
@@ -1,6 +1,7 @@
package controller
import (
"context"
"encoding/json"
"errors"
"fmt"
@@ -26,6 +27,7 @@ func hostHandlers(r *mux.Router) {
r.HandleFunc("/api/hosts/{hostid}/relay", logic.SecurityCheck(false, http.HandlerFunc(createHostRelay))).Methods(http.MethodPost)
r.HandleFunc("/api/hosts/{hostid}/relay", logic.SecurityCheck(false, http.HandlerFunc(deleteHostRelay))).Methods(http.MethodDelete)
r.HandleFunc("/api/hosts/adm/authenticate", authenticateHost).Methods(http.MethodPost)
r.HandleFunc("/api/v1/host", authorize(true, false, "host", http.HandlerFunc(pull))).Methods(http.MethodGet)
}
// swagger:route GET /api/hosts hosts getHosts
@@ -53,6 +55,53 @@ func getHosts(w http.ResponseWriter, r *http.Request) {
json.NewEncoder(w).Encode(apiHosts)
}
// swagger:route GET /api/v1/host pull pullHost
//
// Used by clients for "pull" command
//
// Schemes: https
//
// Security:
// oauth
//
// Responses:
// 200: pull
func pull(w http.ResponseWriter, r *http.Request) {
hostID := r.Header.Get(hostIDHeader) // return JSON/API formatted keys
if len(hostID) == 0 {
logger.Log(0, "no host authorized to pull")
logic.ReturnErrorResponse(w, r, logic.FormatError(fmt.Errorf("no host authorized to pull"), "internal"))
return
}
host, err := logic.GetHost(hostID)
if err != nil {
logger.Log(0, "no host found during pull", hostID)
logic.ReturnErrorResponse(w, r, logic.FormatError(err, "internal"))
return
}
hPU, err := logic.GetPeerUpdateForHost(context.Background(), "", host, nil, nil)
if err != nil {
logger.Log(0, "could not pull peers for host", hostID)
logic.ReturnErrorResponse(w, r, logic.FormatError(err, "internal"))
return
}
serverConf := servercfg.GetServerInfo()
if servercfg.GetBrokerType() == servercfg.EmqxBrokerType {
serverConf.MQUserName = hostID
}
response := models.HostPull{
Host: *host,
ServerConfig: serverConf,
Peers: hPU.Peers,
PeerIDs: hPU.PeerIDs,
}
logger.Log(1, hostID, "completed a pull")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(&response)
}
// swagger:route PUT /api/hosts/{hostid} hosts updateHost
//
// Updates a Netclient host on Netmaker server.
+1
View File
@@ -61,6 +61,7 @@ func migrate(w http.ResponseWriter, r *http.Request) {
return
}
if !logic.HostExists(&data.NewHost) {
logic.CheckHostPorts(&data.NewHost)
if err = logic.CreateHost(&data.NewHost); err != nil {
logic.ReturnErrorResponse(w, r, logic.FormatError(err, "badrequest"))
return
+12 -5
View File
@@ -19,6 +19,8 @@ import (
"golang.org/x/crypto/bcrypt"
)
var hostIDHeader = "host-id"
func nodeHandlers(r *mux.Router) {
r.HandleFunc("/api/nodes", authorize(false, false, "user", http.HandlerFunc(getAllNodes))).Methods(http.MethodGet)
@@ -152,7 +154,7 @@ func authenticate(response http.ResponseWriter, request *http.Request) {
// even if it's technically ok
// This is kind of a poor man's RBAC. There's probably a better/smarter way.
// TODO: Consider better RBAC implementations
func authorize(nodesAllowed, networkCheck bool, authNetwork string, next http.Handler) http.HandlerFunc {
func authorize(hostAllowed, networkCheck bool, authNetwork string, next http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var errorResponse = models.ErrorResponse{
Code: http.StatusUnauthorized, Message: logic.Unauthorized_Msg,
@@ -184,11 +186,11 @@ func authorize(nodesAllowed, networkCheck bool, authNetwork string, next http.Ha
logic.ReturnErrorResponse(w, r, errorResponse)
return
}
//check if node instead of user
if nodesAllowed {
// check if host instead of user
if hostAllowed {
// TODO --- should ensure that node is only operating on itself
if _, _, _, err := logic.VerifyToken(authToken); err == nil {
if hostID, _, _, err := logic.VerifyHostToken(authToken); err == nil {
r.Header.Set(hostIDHeader, hostID)
// this indicates request is from a node
// used for failover - if a getNode comes from node, this will trigger a metrics wipe
next.ServeHTTP(w, r)
@@ -244,6 +246,7 @@ func authorize(nodesAllowed, networkCheck bool, authNetwork string, next http.Ha
} else {
isAuthorized = (nodeID == params["netid"])
}
case "host":
case "user":
isAuthorized = true
default:
@@ -394,6 +397,10 @@ func getNode(w http.ResponseWriter, r *http.Request) {
return
}
server := servercfg.GetServerInfo()
if servercfg.GetBrokerType() == servercfg.EmqxBrokerType {
// set MQ username for EMQX clients
server.MQUserName = host.ID.String()
}
response := models.NodeGet{
Node: node,
Host: *host,
+6 -7
View File
@@ -115,7 +115,7 @@ func createHostRelay(w http.ResponseWriter, r *http.Request) {
return
}
relay.HostID = params["hostid"]
relayHost, _, err := logic.CreateHostRelay(relay)
relayHost, relayedHosts, err := logic.CreateHostRelay(relay)
if err != nil {
logger.Log(0, r.Header.Get("user"),
fmt.Sprintf("failed to create relay on host [%s]: %v", relay.HostID, err))
@@ -131,15 +131,14 @@ func createHostRelay(w http.ResponseWriter, r *http.Request) {
}
logger.Log(1, r.Header.Get("user"), "created relay on host", relay.HostID)
go func(relayHostID string) {
relatedhosts := logic.GetRelatedHosts(relayHostID)
for _, relatedHost := range relatedhosts {
relatedHost.ProxyEnabled = true
logic.UpsertHost(&relatedHost)
for _, relayedHost := range relayedHosts {
relayedHost.ProxyEnabled = true
logic.UpsertHost(&relayedHost)
if err := mq.HostUpdate(&models.HostUpdate{
Action: models.UpdateHost,
Host: relatedHost,
Host: relayedHost,
}); err != nil {
logger.Log(0, "failed to send host update: ", relatedHost.ID.String(), err.Error())
logger.Log(0, "failed to send host update: ", relayedHost.ID.String(), err.Error())
}
}
if err := mq.PublishPeerUpdate(); err != nil {
+1 -1
View File
@@ -1,6 +1,6 @@
{
# ZeroSSL account
acme_ca https://acme.zerossl.com/v2/DV90
# acme_ca https://acme.zerossl.com/v2/DV90
email YOUR_EMAIL
}
+1 -1
View File
@@ -16,7 +16,7 @@ spec:
hostNetwork: true
containers:
- name: netclient
image: gravitl/netclient:v0.18.4
image: gravitl/netclient:v0.18.5
env:
- name: TOKEN
value: "TOKEN_VALUE"
+1 -1
View File
@@ -28,7 +28,7 @@ spec:
# - "<node label value>"
containers:
- name: netclient
image: gravitl/netclient:v0.18.4
image: gravitl/netclient:v0.18.5
env:
- name: TOKEN
value: "TOKEN_VALUE"
+1 -1
View File
@@ -79,7 +79,7 @@ spec:
value: "Kubernetes"
- name: VERBOSITY
value: "3"
image: gravitl/netmaker:v0.18.4
image: gravitl/netmaker:v0.18.5
imagePullPolicy: Always
name: netmaker
ports:
+1 -1
View File
@@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: netmaker-ui
image: gravitl/netmaker-ui:v0.18.4
image: gravitl/netmaker-ui:v0.18.5
ports:
- containerPort: 443
env:
+17
View File
@@ -194,6 +194,23 @@ func UpdateExtClient(newclientid string, network string, enabled bool, client *m
if err != nil {
return client, err
}
if newclientid != client.ClientID {
//name change only
client.ClientID = newclientid
client.LastModified = time.Now().Unix()
data, err := json.Marshal(&client)
if err != nil {
return nil, err
}
key, err := GetRecordKey(client.ClientID, client.Network)
if err != nil {
return nil, err
}
if err = database.Insert(key, string(data), database.EXT_CLIENT_TABLE_NAME); err != nil {
return client, err
}
return client, nil
}
client.ClientID = newclientid
client.Enabled = enabled
SetClientACLs(client, newACLs)
+2 -2
View File
@@ -129,8 +129,8 @@ func VerifyUserToken(tokenString string) (username string, networks []string, is
return "", nil, false, err
}
// VerifyToken - [nodes] Only
func VerifyToken(tokenString string) (hostID string, mac string, network string, err error) {
// VerifyHostToken - [hosts] Only
func VerifyHostToken(tokenString string) (hostID string, mac string, network string, err error) {
claims := &models.Claims{}
// this may be a stupid way of serving up a master key
+10
View File
@@ -52,6 +52,16 @@ func GetNetworkNodesMemory(allNodes []models.Node, network string) []models.Node
return nodes
}
// UpdateNodeCheckin - updates the checkin time of a node
func UpdateNodeCheckin(node *models.Node) error {
node.SetLastCheckIn()
data, err := json.Marshal(node)
if err != nil {
return err
}
return database.Insert(node.ID.String(), string(data), database.NODES_TABLE_NAME)
}
// UpdateNode - takes a node and updates another node with it's values
func UpdateNode(currentNode *models.Node, newNode *models.Node) error {
if newNode.Address.IP.String() != currentNode.Address.IP.String() {
+1 -1
View File
@@ -221,7 +221,7 @@ func GetPeerUpdateForHost(ctx context.Context, network string, host *models.Host
}
peerConfig.Endpoint = &net.UDPAddr{
IP: peerHost.EndpointIP,
Port: GetPeerListenPort(peerHost),
Port: peerHost.ListenPort,
}
if uselocal {
+1 -1
View File
@@ -28,7 +28,7 @@ import (
"github.com/gravitl/netmaker/turnserver"
)
var version = "v0.18.4"
var version = "v0.18.5"
// Start DB Connection and start API Request Handler
func main() {
+2
View File
@@ -96,6 +96,8 @@ const (
Acknowledgement = "ACK"
// RequestAck - request an ACK
RequestAck = "REQ_ACK"
// CheckIn - update last check in times and public address and interfaces
CheckIn = "CHECK_IN"
)
// HostUpdate - struct for host update
+8
View File
@@ -198,6 +198,14 @@ type TrafficKeys struct {
Server []byte `json:"server" bson:"server" yaml:"server"`
}
// HostPull - response of a host's pull
type HostPull struct {
Host Host `json:"host" yaml:"host"`
Peers []wgtypes.PeerConfig `json:"peers" yaml:"peers"`
ServerConfig ServerConfig `json:"server_config" yaml:"server_config"`
PeerIDs PeerMap `json:"peer_ids,omitempty" yaml:"peer_ids,omitempty"`
}
// NodeGet - struct for a single node get response
type NodeGet struct {
Node Node `json:"node" bson:"node" yaml:"node"`
+237
View File
@@ -6,6 +6,7 @@ import (
"fmt"
"io"
"net/http"
"sync"
"github.com/gravitl/netmaker/servercfg"
)
@@ -29,6 +30,17 @@ type (
Token string `json:"token"`
Version string `json:"version"`
}
aclRule struct {
Topic string `json:"topic"`
Permission string `json:"permission"`
Action string `json:"action"`
}
aclObject struct {
Rules []aclRule `json:"rules"`
Username string `json:"username,omitempty"`
}
)
func getEmqxAuthToken() (string, error) {
@@ -152,3 +164,228 @@ func CreateEmqxDefaultAuthenticator() error {
}
return nil
}
// CreateEmqxDefaultAuthorizer - creates a default ACL authorization mechanism based on the built in database
func CreateEmqxDefaultAuthorizer() error {
token, err := getEmqxAuthToken()
if err != nil {
return err
}
payload, err := json.Marshal(&struct {
Enable bool `json:"enable"`
Type string `json:"type"`
}{Enable: true, Type: "built_in_database"})
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPost, servercfg.GetEmqxRestEndpoint()+"/api/v5/authorization/sources", bytes.NewReader(payload))
if err != nil {
return err
}
req.Header.Add("content-type", "application/json")
req.Header.Add("authorization", "Bearer "+token)
resp, err := (&http.Client{}).Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
msg, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
return fmt.Errorf("error creating default EMQX ACL authorization mechanism %v", string(msg))
}
return nil
}
// GetUserACL - returns ACL rules by username
func GetUserACL(username string) (*aclObject, error) {
token, err := getEmqxAuthToken()
if err != nil {
return nil, err
}
req, err := http.NewRequest(http.MethodGet, servercfg.GetEmqxRestEndpoint()+"/api/v5/authorization/sources/built_in_database/rules/users/"+username, nil)
if err != nil {
return nil, err
}
req.Header.Add("content-type", "application/json")
req.Header.Add("authorization", "Bearer "+token)
resp, err := (&http.Client{}).Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
response, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("error fetching ACL rules %v", string(response))
}
body := new(aclObject)
if err := json.Unmarshal(response, body); err != nil {
return nil, err
}
return body, nil
}
// CreateDefaultDenyRule - creates a rule to deny access to all topics for all users by default
// to allow user access to topics use the `mq.CreateUserAccessRule` function
func CreateDefaultDenyRule() error {
token, err := getEmqxAuthToken()
if err != nil {
return err
}
payload, err := json.Marshal(&aclObject{Rules: []aclRule{{Topic: "#", Permission: "deny", Action: "all"}}})
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPost, servercfg.GetEmqxRestEndpoint()+"/api/v5/authorization/sources/built_in_database/rules/all", bytes.NewReader(payload))
if err != nil {
return err
}
req.Header.Add("content-type", "application/json")
req.Header.Add("authorization", "Bearer "+token)
resp, err := (&http.Client{}).Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
msg, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
return fmt.Errorf("error creating default ACL rules %v", string(msg))
}
return nil
}
// CreateHostACL - create host ACL rules
func CreateHostACL(hostID, serverName string) error {
token, err := getEmqxAuthToken()
if err != nil {
return err
}
payload, err := json.Marshal(&aclObject{
Username: hostID,
Rules: []aclRule{
{
Topic: fmt.Sprintf("peers/host/%s/%s", hostID, serverName),
Permission: "allow",
Action: "all",
},
{
Topic: fmt.Sprintf("host/update/%s/%s", hostID, serverName),
Permission: "allow",
Action: "all",
},
{
Topic: fmt.Sprintf("dns/all/%s/%s", hostID, serverName),
Permission: "allow",
Action: "all",
},
{
Topic: fmt.Sprintf("dns/update/%s/%s", hostID, serverName),
Permission: "allow",
Action: "all",
},
{
Topic: fmt.Sprintf("host/serverupdate/%s/%s", serverName, hostID),
Permission: "allow",
Action: "all",
},
},
})
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, servercfg.GetEmqxRestEndpoint()+"/api/v5/authorization/sources/built_in_database/rules/users/"+hostID, bytes.NewReader(payload))
if err != nil {
return err
}
req.Header.Add("content-type", "application/json")
req.Header.Add("authorization", "Bearer "+token)
resp, err := (&http.Client{}).Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
msg, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
return fmt.Errorf("error adding ACL Rules for user %s Error: %v", hostID, string(msg))
}
return nil
}
// a lock required for preventing simultaneous updates to the same ACL object leading to overwriting each other
// might occur when multiple nodes belonging to the same host are created at the same time
var nodeAclMux sync.Mutex
// AppendNodeUpdateACL - adds ACL rule for subscribing to node updates for a node ID
func AppendNodeUpdateACL(hostID, nodeNetwork, nodeID, serverName string) error {
nodeAclMux.Lock()
defer nodeAclMux.Unlock()
token, err := getEmqxAuthToken()
if err != nil {
return err
}
aclObject, err := GetUserACL(hostID)
if err != nil {
return err
}
aclObject.Rules = append(aclObject.Rules, []aclRule{
{
Topic: fmt.Sprintf("node/update/%s/%s", nodeNetwork, nodeID),
Permission: "allow",
Action: "subscribe",
},
{
Topic: fmt.Sprintf("ping/%s/%s", serverName, nodeID),
Permission: "allow",
Action: "all",
},
{
Topic: fmt.Sprintf("update/%s/%s", serverName, nodeID),
Permission: "allow",
Action: "all",
},
{
Topic: fmt.Sprintf("signal/%s/%s", serverName, nodeID),
Permission: "allow",
Action: "all",
},
{
Topic: fmt.Sprintf("metrics/%s/%s", serverName, nodeID),
Permission: "allow",
Action: "all",
},
}...)
payload, err := json.Marshal(aclObject)
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, servercfg.GetEmqxRestEndpoint()+"/api/v5/authorization/sources/built_in_database/rules/users/"+hostID, bytes.NewReader(payload))
if err != nil {
return err
}
req.Header.Add("content-type", "application/json")
req.Header.Add("authorization", "Bearer "+token)
resp, err := (&http.Client{}).Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
msg, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
return fmt.Errorf("error adding ACL Rules for user %s Error: %v", hostID, string(msg))
}
return nil
}
+50 -88
View File
@@ -23,94 +23,6 @@ func DefaultHandler(client mqtt.Client, msg mqtt.Message) {
logger.Log(0, "MQTT Message: Topic: ", string(msg.Topic()), " Message: ", string(msg.Payload()))
}
// Ping message Handler -- handles ping topic from client nodes
func Ping(client mqtt.Client, msg mqtt.Message) {
id, err := getID(msg.Topic())
if err != nil {
logger.Log(0, "error getting node.ID sent on ping topic ")
return
}
node, err := logic.GetNodeByID(id)
if err != nil {
logger.Log(3, "mq-ping error getting node: ", err.Error())
node, err := logic.GetNodeByID(id)
if err != nil {
logger.Log(3, "mq-ping error getting node: ", err.Error())
if database.IsEmptyRecord(err) {
h := logic.GetHostByNodeID(id) // check if a host is still associated
if h != nil { // inform host that node should be removed
fakeNode := models.Node{}
fakeNode.ID, _ = uuid.Parse(id)
fakeNode.Action = models.NODE_DELETE
fakeNode.PendingDelete = true
if err := NodeUpdate(&fakeNode); err != nil {
logger.Log(0, "failed to inform host", h.Name, h.ID.String(), "to remove node", id, err.Error())
}
}
}
return
}
decrypted, decryptErr := decryptMsg(&node, msg.Payload())
if decryptErr != nil {
logger.Log(0, "error decrypting when updating node ", node.ID.String(), decryptErr.Error())
return
}
var checkin models.NodeCheckin
if err := json.Unmarshal(decrypted, &checkin); err != nil {
logger.Log(1, "error unmarshaling payload ", err.Error())
return
}
host, err := logic.GetHost(node.HostID.String())
if err != nil {
logger.Log(0, "error retrieving host for node ", node.ID.String(), err.Error())
return
}
node.SetLastCheckIn()
host.Version = checkin.Version
node.Connected = checkin.Connected
host.Interfaces = checkin.Ifaces
for i := range host.Interfaces {
host.Interfaces[i].AddressString = host.Interfaces[i].Address.String()
}
if err := logic.UpdateNode(&node, &node); err != nil {
logger.Log(0, "error updating node", node.ID.String(), " on checkin", err.Error())
return
}
return
}
decrypted, decryptErr := decryptMsg(&node, msg.Payload())
if decryptErr != nil {
logger.Log(0, "error decrypting when updating node ", node.ID.String(), decryptErr.Error())
return
}
var checkin models.NodeCheckin
if err := json.Unmarshal(decrypted, &checkin); err != nil {
logger.Log(1, "error unmarshaling payload ", err.Error())
return
}
host, err := logic.GetHost(node.HostID.String())
if err != nil {
logger.Log(0, "error retrieving host for node ", node.ID.String(), err.Error())
return
}
node.SetLastCheckIn()
host.Version = checkin.Version
node.Connected = checkin.Connected
host.Interfaces = checkin.Ifaces
for i := range host.Interfaces {
host.Interfaces[i].AddressString = host.Interfaces[i].Address.String()
}
if err := logic.UpdateNode(&node, &node); err != nil {
logger.Log(0, "error updating node", node.ID.String(), " on checkin", err.Error())
return
}
logger.Log(3, "ping processed for node", node.ID.String())
// --TODO --set client version once feature is implemented.
//node.SetClientVersion(msg.Payload())
}
// UpdateNode message Handler -- handles updates from client nodes
func UpdateNode(client mqtt.Client, msg mqtt.Message) {
id, err := getID(msg.Topic())
@@ -179,6 +91,8 @@ func UpdateHost(client mqtt.Client, msg mqtt.Message) {
logger.Log(3, fmt.Sprintf("recieved host update: %s\n", hostUpdate.Host.ID.String()))
var sendPeerUpdate bool
switch hostUpdate.Action {
case models.CheckIn:
sendPeerUpdate = handleHostCheckin(&hostUpdate.Host, currentHost)
case models.Acknowledgement:
hu := hostactions.GetAction(currentHost.ID.String())
if hu != nil {
@@ -186,6 +100,12 @@ func UpdateHost(client mqtt.Client, msg mqtt.Message) {
logger.Log(0, "failed to send new node to host", hostUpdate.Host.Name, currentHost.ID.String(), err.Error())
return
} else {
if servercfg.GetBrokerType() == servercfg.EmqxBrokerType {
if err = AppendNodeUpdateACL(hu.Host.ID.String(), hu.Node.Network, hu.Node.ID.String(), servercfg.GetServer()); err != nil {
logger.Log(0, "failed to add ACLs for EMQX node", err.Error())
return
}
}
if err = PublishSingleHostPeerUpdate(context.Background(), currentHost, nil, nil); err != nil {
logger.Log(0, "failed peers publish after join acknowledged", hostUpdate.Host.Name, currentHost.ID.String(), err.Error())
return
@@ -447,3 +367,45 @@ func handleNewNodeDNS(host *models.Host, node *models.Node) error {
}
return nil
}
func handleHostCheckin(h, currentHost *models.Host) bool {
if h == nil {
return false
}
for i := range currentHost.Nodes {
currNodeID := currentHost.Nodes[i]
node, err := logic.GetNodeByID(currNodeID)
if err != nil {
if database.IsEmptyRecord(err) {
fakeNode := models.Node{}
fakeNode.ID, _ = uuid.Parse(currNodeID)
fakeNode.Action = models.NODE_DELETE
fakeNode.PendingDelete = true
if err := NodeUpdate(&fakeNode); err != nil {
logger.Log(0, "failed to inform host", currentHost.Name, currentHost.ID.String(), "to remove node", currNodeID, err.Error())
}
}
continue
}
if err := logic.UpdateNodeCheckin(&node); err != nil {
logger.Log(0, "error updating node", node.ID.String(), " on checkin", err.Error())
}
}
for i := range h.Interfaces {
h.Interfaces[i].AddressString = h.Interfaces[i].Address.String()
}
ifaceDelta := len(h.Interfaces) != len(currentHost.Interfaces) || !h.EndpointIP.Equal(currentHost.EndpointIP)
currentHost.EndpointIP = h.EndpointIP
currentHost.Interfaces = h.Interfaces
currentHost.DefaultInterface = h.DefaultInterface
if err := logic.UpsertHost(currentHost); err != nil {
logger.Log(0, "failed to update host after check-in", h.Name, h.ID.String(), err.Error())
return false
}
logger.Log(0, "ping processed for host", h.Name, h.ID.String())
return ifaceDelta
}
+8 -4
View File
@@ -50,15 +50,19 @@ func SetupMQTT() {
if err := CreateEmqxUser(servercfg.GetMqUserName(), servercfg.GetMqPassword(), true); err != nil {
log.Fatal(err)
}
// create an ACL authorization source for the built in EMQX MNESIA database
if err := CreateEmqxDefaultAuthorizer(); err != nil {
logger.Log(0, err.Error())
}
// create a default deny ACL to all topics for all users
if err := CreateDefaultDenyRule(); err != nil {
log.Fatal(err)
}
}
opts := mqtt.NewClientOptions()
setMqOptions(servercfg.GetMqUserName(), servercfg.GetMqPassword(), opts)
opts.SetOnConnectHandler(func(client mqtt.Client) {
serverName := servercfg.GetServer()
if token := client.Subscribe(fmt.Sprintf("ping/%s/#", serverName), 2, mqtt.MessageHandler(Ping)); token.WaitTimeout(MQ_TIMEOUT*time.Second) && token.Error() != nil {
client.Disconnect(240)
logger.Log(0, "ping subscription failed")
}
if token := client.Subscribe(fmt.Sprintf("update/%s/#", serverName), 0, mqtt.MessageHandler(UpdateNode)); token.WaitTimeout(MQ_TIMEOUT*time.Second) && token.Error() != nil {
client.Disconnect(240)
logger.Log(0, "node update subscription failed")
+11 -7
View File
@@ -88,14 +88,18 @@ func PublishSingleHostPeerUpdate(ctx context.Context, host *models.Host, deleted
if len(peerUpdate.Peers) == 0 { // no peers to send
return nil
}
if host.ProxyEnabled || host.ShouldUseTurn {
proxyUpdate, err := logic.GetProxyUpdateForHost(ctx, host)
if err != nil {
return err
}
proxyUpdate.Action = models.ProxyUpdate
peerUpdate.ProxyUpdate = proxyUpdate
proxyUpdate, err := logic.GetProxyUpdateForHost(ctx, host)
if err != nil {
return err
}
proxyUpdate.Server = servercfg.GetServer()
if host.ProxyEnabled {
proxyUpdate.Action = models.ProxyUpdate
} else {
proxyUpdate.Action = models.NoProxy
}
peerUpdate.ProxyUpdate = proxyUpdate
data, err := json.Marshal(&peerUpdate)
if err != nil {
+1 -1
View File
@@ -1,4 +1,4 @@
# Netmaker v0.18.4
# Netmaker v0.18.5
## **Wait till out of pre-release to fully upgrade**
+14 -33
View File
@@ -1,6 +1,6 @@
#!/bin/bash
LATEST="v0.18.4"
LATEST="v0.18.5"
print_logo() {(
cat << "EOF"
@@ -43,7 +43,7 @@ usage () {
echo " -t tag of build; if buildtype=version, tag=version. If builtype=branch or builtype=local, tag=branch"
echo " -a auto-build; skip prompts and use defaults, if none provided"
echo "examples:"
echo " nm-quick.sh -e -b version -t v0.18.4"
echo " nm-quick.sh -e -b version -t $LATEST"
echo " nm-quick.sh -e -b local -t feature_v0.17.2_newfeature"
echo " nm-quick.sh -e -b branch -t develop"
exit 1
@@ -178,21 +178,6 @@ install_yq() {
# setup_netclient - adds netclient to docker-compose
setup_netclient() {
# yq ".services.netclient += {\"container_name\": \"netclient\"}" -i /root/docker-compose.yml
# yq ".services.netclient += {\"image\": \"gravitl/netclient:$IMAGE_TAG\"}" -i /root/docker-compose.yml
# yq ".services.netclient += {\"hostname\": \"netmaker-1\"}" -i /root/docker-compose.yml
# yq ".services.netclient += {\"network_mode\": \"host\"}" -i /root/docker-compose.yml
# yq ".services.netclient.depends_on += [\"netmaker\"]" -i /root/docker-compose.yml
# yq ".services.netclient += {\"restart\": \"always\"}" -i /root/docker-compose.yml
# yq ".services.netclient.environment += {\"TOKEN\": \"$TOKEN\"}" -i /root/docker-compose.yml
# yq ".services.netclient.volumes += [\"/etc/netclient:/etc/netclient\"]" -i /root/docker-compose.yml
# yq ".services.netclient.cap_add += [\"NET_ADMIN\"]" -i /root/docker-compose.yml
# yq ".services.netclient.cap_add += [\"NET_RAW\"]" -i /root/docker-compose.yml
# yq ".services.netclient.cap_add += [\"SYS_MODULE\"]" -i /root/docker-compose.yml
# docker-compose up -d
set +e
netclient uninstall
set -e
@@ -200,7 +185,7 @@ setup_netclient() {
wget -O netclient https://github.com/gravitl/netclient/releases/download/$LATEST/netclient_linux_amd64
chmod +x netclient
./netclient install
netclient join -t $TOKEN
netclient register -t $TOKEN
echo "waiting for client to become available"
wait_seconds 10
@@ -210,9 +195,9 @@ setup_netclient() {
configure_netclient() {
NODE_ID=$(sudo cat /etc/netclient/nodes.yml | yq -r .netmaker.commonnode.id)
echo "join complete. New node ID: $NODE_ID"
echo "register complete. New node ID: $NODE_ID"
HOST_ID=$(sudo cat /etc/netclient/netclient.yml | yq -r .host.id)
echo "For first join, making host a default"
echo "making host a default"
echo "Host ID: $HOST_ID"
# set as a default host
set +e
@@ -225,11 +210,8 @@ configure_netclient() {
# setup_nmctl - pulls nmctl and makes it executable
setup_nmctl() {
# DEV_TEMP - Temporary instructions for testing
wget -O /usr/bin/nmctl https://fileserver.netmaker.org/testing/nmctl
wget -O /usr/bin/nmctl https://github.com/gravitl/netmaker/releases/download/$LATEST/nmctl_linux_amd64
# RELEASE_REPLACE - Use this once release is ready
# wget https://github.com/gravitl/netmaker/releases/download/v0.17.1/nmctl
chmod +x /usr/bin/nmctl
echo "using server api.$NETMAKER_BASE_DOMAIN"
echo "using master key $MASTER_KEY"
@@ -295,7 +277,7 @@ install_dependencies() {
OS=$(uname)
if [ -f /etc/debian_version ]; then
dependencies="git wireguard wireguard-tools jq docker.io docker-compose"
dependencies="git wireguard wireguard-tools dnsutils jq docker.io docker-compose"
update_cmd='apt update'
install_cmd='apt-get install -y'
elif [ -f /etc/alpine-release ]; then
@@ -303,19 +285,19 @@ install_dependencies() {
update_cmd='apk update'
install_cmd='apk --update add'
elif [ -f /etc/centos-release ]; then
dependencies="git wireguard jq docker.io docker-compose"
dependencies="git wireguard jq bind-utils docker.io docker-compose"
update_cmd='yum update'
install_cmd='yum install -y'
elif [ -f /etc/fedora-release ]; then
dependencies="git wireguard jq docker.io docker-compose"
dependencies="git wireguard bind-utils jq docker.io docker-compose"
update_cmd='dnf update'
install_cmd='dnf install -y'
elif [ -f /etc/redhat-release ]; then
dependencies="git wireguard jq docker.io docker-compose"
dependencies="git wireguard jq docker.io bind-utils docker-compose"
update_cmd='yum update'
install_cmd='yum install -y'
elif [ -f /etc/arch-release ]; then
dependecies="git wireguard-tools jq docker.io docker-compose"
dependecies="git wireguard-tools dnsutils jq docker.io docker-compose"
update_cmd='pacman -Sy'
install_cmd='pacman -S --noconfirm'
elif [ "${OS}" = "FreeBSD" ]; then
@@ -660,11 +642,10 @@ setup_mesh() {
wait_seconds 5
echo "Creating netmaker access key"
echo "Creating netmaker enrollment key"
nmctl keys create test1 99999 --name netmaker-key
tokenJson=$(nmctl keys create netmaker 2)
TOKEN=$(jq -r '.accessstring' <<< ${tokenJson})
tokenJson=$(nmctl enrollment_key create --unlimited --networks netmaker)
TOKEN=$(jq -r '.token' <<< ${tokenJson})
wait_seconds 3
+144 -86
View File
@@ -1,6 +1,9 @@
#!/bin/bash
LATEST="testing"
LATEST="v0.18.5"
INSTALL_PATH="/root"
trap restore_old_netmaker_instructions
# check_version - make sure current version is 0.17.1 before continuing
check_version() {
@@ -15,6 +18,51 @@ check_version() {
fi
}
backup_v17_files() {
mkdir $INSTALL_PATH/netmaker_0.17.1_backup
cp $INSTALL_PATH/docker-compose.yml $INSTALL_PATH/netmaker_0.17.1_backup/docker-compose.yml
cp $INSTALL_PATH/Caddyfile $INSTALL_PATH/netmaker_0.17.1_backup/Caddyfile
cp $INSTALL_PATH/mosquitto.conf %INSTALL_PATH/netmaker_0.17.1_backup/mosquitto.conf
cp $INSTALL_PATH/wait.sh $INSTALL_PATH/netmaker_0.17.1_backup/wait.sh
}
backup_volumes() {
cp -r /var/lib/docker/volumes/root_caddy_conf/ /var/lib/docker/volumes/root_caddy_conf-backup/
cp -r /var/lib/docker/volumes/root_caddy_data/ /var/lib/docker/volumes/root_caddy_data-backup/
cp -r /var/lib/docker/volumes/root_dnsconfig/ /var/lib/docker/volumes/root_dnsconfig-backup/
cp -r /var/lib/docker/volumes/root_mosquitto_data/ /var/lib/docker/volumes/root_mosquitto_data-backup/
cp -r /var/lib/docker/volumes/root_mosquitto_logs/ /var/lib/docker/volumes/root_mosquitto_logs-backup/
cp -r /var/lib/docker/volumes/root_sqldata/ /var/lib/docker/volumes/root_sqldata-backup/
}
restore_old_netmaker_instructions() {
echo "There was a problem with the installation. Your config files and volumes have been backed up."
echo "To restore Netmaker back to v0.17.1, copy all the netmaker volume backups (caddy_conf-backup, caddy_data-backup, dnsconfig-backup, mosquitto_data-backup, mosquitto_logs-backup, and sqldata-backup) back to their regular names with out the -backup."
echo "Your config files should be located in ${INSALL_PATH}/netmaker_0.17.1_backup. Simply run cp ${INSALL_PATH}/netmaker_0.17.1_backup/* . (include the .) and run docker-compose up -d."
echo "Your netmaker should be back to v0.17.1"
}
get_install_path() {
echo "-----------------------------------------------------"
echo "Is your docker-compose located in $INSTALL_PATH ?"
echo "-----------------------------------------------------"
select install_option in "yes" "no (enter manually)"; do
case $REPLY in
1)
echo "using $INSTALL_PATH for an installation path."
break
;;
2)
read -p "Enter path where your docker-compose is located: " install_path
SERVER_HTTP_HOST=$install_path
echo "using $INSTALL_PATH"
break
;;
*) echo "invalid option $REPLY";;
esac
done
}
# wait_seconds - wait a number of seconds, print a log
wait_seconds() {
for ((a=1; a <= $1; a++))
@@ -40,23 +88,23 @@ confirm() {
install_dependencies() {
OS=$(uname)
if [ -f /etc/debian_version ]; then
dependencies="jq wireguard jq docker.io docker-compose"
dependencies="jq wireguard jq dnsutils docker-compose"
update_cmd='apt update'
install_cmd='apt install -y'
elif [ -f /etc/centos-release ]; then
dependencies="wireguard jq docker.io docker-compose"
dependencies="wireguard jq bind-utils docker-compose"
update_cmd='yum update'
install_cmd='yum install -y'
elif [ -f /etc/fedora-release ]; then
dependencies="wireguard jq docker.io docker-compose"
dependencies="wireguard jq bind-utils docker-compose"
update_cmd='dnf update'
install_cmd='dnf install -y'
elif [ -f /etc/redhat-release ]; then
dependencies="wireguard jq docker.io docker-compose"
dependencies="wireguard jq bind-utils docker-compose"
update_cmd='yum update'
install_cmd='yum install -y'
elif [ -f /etc/arch-release ]; then
dependecies="wireguard-tools jq docker.io docker-compose netclient"
dependencies="wireguard-tools jq dnsutils docker-compose netclient"
update_cmd='pacman -Sy'
install_cmd='pacman -S --noconfirm'
else
@@ -66,6 +114,14 @@ install_dependencies() {
set -- $dependencies
if command -v docker >/dev/null 2>&1 ; then
echo "Docker found"
echo "version: $(docker version)"
else
echo "Docker not found. adding to dependencies"
$dependencies += " docker.io"
fi
${update_cmd}
set +e
@@ -264,17 +320,17 @@ collect_node_settings() {
# setup_caddy - updates Caddy with new info
setup_caddy() {
echo "backing up Caddyfile to /root/Caddyfile.backup"
cp /root/Caddyfile /root/Caddyfile.backup
echo "backing up Caddyfile to ${INSTALL_PATH}/Caddyfile.backup"
cp $INSTALL_PATH/Caddyfile $INSTALL_PATH/Caddyfile.backup
if grep -wq "acme.zerossl.com/v2/DV90" Caddyfile; then
echo "zerossl already set, continuing"
else
echo "editing Caddyfile"
sed -i '0,/email/{s~email~acme_ca https://acme.zerossl.com/v2/DV90\n\t&~}' /root/Caddyfile
sed -i '0,/email/{s~email~acme_ca https://acme.zerossl.com/v2/DV90\n\t&~}' $INSTALL_PATH/Caddyfile
fi
cat <<EOT >> /root/Caddyfile
cat <<EOT >> $INSTALL_PATH/Caddyfile
# STUN
https://$STUN_DOMAIN {
@@ -334,69 +390,63 @@ set_compose() {
set_mq_credentials
echo "retrieving updated wait script and mosquitto conf"
rm /root/wait.sh
rm /root/mosquitto.conf
rm $INSTALL_PATH/wait.sh
rm $INSTALL_PATH/mosquitto.conf
wget -O $INSTALL_PATH/wait.sh https://raw.githubusercontent.com/gravitl/netmaker/master/docker/wait.sh
chmod +x $INSTALL_PATH/wait.sh
wget -O $INSTALL_PATH/mosquitto.conf https://raw.githubusercontent.com/gravitl/netmaker/master/docker/mosquitto.conf
chmod +x $INSTALL_PATH/mosquitto.conf
# DEV_TEMP
wget -O /root/wait.sh https://raw.githubusercontent.com/gravitl/netmaker/develop/docker/wait.sh
# RELEASE_REPLACE - Use this once release is ready
# wget -O /root/wait.sh https://raw.githubusercontent.com/gravitl/netmaker/master/docker/wait.sh
chmod +x /root/wait.sh
# DEV_TEMP
wget -O /root/mosquitto.conf https://raw.githubusercontent.com/gravitl/netmaker/develop/docker/mosquitto.conf
# RELEASE_REPLACE - Use this once release is ready
# wget -O /root/wait.sh https://raw.githubusercontent.com/gravitl/netmaker/master/docker/wait.sh
chmod +x /root/mosquitto.conf
# DEV_TEMP
sed -i "s/v0.17.1/$LATEST/g" /root/docker-compose.yml
sed -i "s/v0.17.1/$LATEST/g" $INSTALL_PATH/docker-compose.yml
STUN_PORT=3478
# RELEASE_REPLACE - Use this once release is ready
#sed -i "s/v0.17.1/v0.18.4/g" /root/docker-compose.yml
yq ".services.netmaker.environment.SERVER_NAME = \"$SERVER_NAME\"" -i /root/docker-compose.yml
yq ".services.netmaker.environment += {\"BROKER_ENDPOINT\": \"wss://$BROKER_NAME\"}" -i /root/docker-compose.yml
yq ".services.netmaker.environment += {\"SERVER_BROKER_ENDPOINT\": \"ws://mq:1883\"}" -i /root/docker-compose.yml
yq ".services.netmaker.environment += {\"STUN_LIST\": \"$STUN_DOMAIN:$STUN_PORT,stun1.netmaker.io:3478,stun2.netmaker.io:3478,stun1.l.google.com:19302,stun2.l.google.com:19302\"}" -i /root/docker-compose.yml
yq ".services.netmaker.environment += {\"MQ_PASSWORD\": \"$MQ_PASSWORD\"}" -i /root/docker-compose.yml
yq ".services.netmaker.environment += {\"MQ_USERNAME\": \"$MQ_USERNAME\"}" -i /root/docker-compose.yml
yq ".services.netmaker.environment += {\"STUN_PORT\": \"$STUN_PORT\"}" -i /root/docker-compose.yml
yq ".services.netmaker.ports += \"3478:3478/udp\"" -i /root/docker-compose.yml
#sed -i "s/v0.17.1/v0.18.5/g" $INSTALL_PATH/docker-compose.yml
yq ".services.netmaker.environment.SERVER_NAME = \"$SERVER_NAME\"" -i $INSTALL_PATH/docker-compose.yml
yq ".services.netmaker.environment += {\"BROKER_ENDPOINT\": \"wss://$BROKER_NAME\"}" -i $INSTALL_PATH/docker-compose.yml
yq ".services.netmaker.environment += {\"SERVER_BROKER_ENDPOINT\": \"ws://mq:1883\"}" -i $INSTALL_PATH/docker-compose.yml
yq ".services.netmaker.environment += {\"STUN_LIST\": \"$STUN_DOMAIN:$STUN_PORT,stun1.netmaker.io:3478,stun2.netmaker.io:3478,stun1.l.google.com:19302,stun2.l.google.com:19302\"}" -i $INSTALL_PATH/docker-compose.yml
yq ".services.netmaker.environment += {\"MQ_PASSWORD\": \"$MQ_PASSWORD\"}" -i $INSTALL_PATH/docker-compose.yml
yq ".services.netmaker.environment += {\"MQ_USERNAME\": \"$MQ_USERNAME\"}" -i $INSTALL_PATH/docker-compose.yml
yq ".services.netmaker.environment += {\"STUN_PORT\": \"$STUN_PORT\"}" -i $INSTALL_PATH/docker-compose.yml
yq ".services.netmaker.ports += \"3478:3478/udp\"" -i $INSTALL_PATH/docker-compose.yml
yq ".services.mq.environment += {\"MQ_PASSWORD\": \"$MQ_PASSWORD\"}" -i /root/docker-compose.yml
yq ".services.mq.environment += {\"MQ_USERNAME\": \"$MQ_USERNAME\"}" -i /root/docker-compose.yml
yq ".services.mq.environment += {\"MQ_PASSWORD\": \"$MQ_PASSWORD\"}" -i $INSTALL_PATH/docker-compose.yml
yq ".services.mq.environment += {\"MQ_USERNAME\": \"$MQ_USERNAME\"}" -i $INSTALL_PATH/docker-compose.yml
#remove unnecessary ports
yq eval 'del( .services.netmaker.ports[] | select(. == "51821*") )' -i /root/docker-compose.yml
yq eval 'del( .services.mq.ports[] | select(. == "8883*") )' -i /root/docker-compose.yml
yq eval 'del( .services.mq.ports[] | select(. == "1883*") )' -i /root/docker-compose.yml
yq eval 'del( .services.mq.expose[] | select(. == "8883*") )' -i /root/docker-compose.yml
yq eval 'del( .services.mq.expose[] | select(. == "1883*") )' -i /root/docker-compose.yml
yq eval 'del( .services.netmaker.ports[] | select(. == "51821*") )' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del( .services.mq.ports[] | select(. == "8883*") )' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del( .services.mq.ports[] | select(. == "1883*") )' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del( .services.mq.expose[] | select(. == "8883*") )' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del( .services.mq.expose[] | select(. == "1883*") )' -i $INSTALL_PATH/docker-compose.yml
# delete unnecessary compose sections
yq eval 'del(.services.netmaker.cap_add)' -i /root/docker-compose.yml
yq eval 'del(.services.netmaker.sysctls)' -i /root/docker-compose.yml
yq eval 'del(.services.netmaker.environment.MQ_ADMIN_PASSWORD)' -i /root/docker-compose.yml
yq eval 'del(.services.netmaker.environment.MQ_HOST)' -i /root/docker-compose.yml
yq eval 'del(.services.netmaker.environment.MQ_PORT)' -i /root/docker-compose.yml
yq eval 'del(.services.netmaker.environment.MQ_SERVER_PORT)' -i /root/docker-compose.yml
yq eval 'del(.services.netmaker.environment.PORT_FORWARD_SERVICES)' -i /root/docker-compose.yml
yq eval 'del(.services.netmaker.environment.CLIENT_MODE)' -i /root/docker-compose.yml
yq eval 'del(.services.netmaker.environment.HOST_NETWORK)' -i /root/docker-compose.yml
yq eval 'del(.services.mq.environment.NETMAKER_SERVER_HOST)' -i /root/docker-compose.yml
yq eval 'del( .services.netmaker.volumes[] | select(. == "mosquitto_data*") )' -i /root/docker-compose.yml
yq eval 'del( .services.mq.volumes[] | select(. == "mosquitto_data*") )' -i /root/docker-compose.yml
yq eval 'del( .volumes.mosquitto_data )' -i /root/docker-compose.yml
yq eval 'del(.services.netmaker.cap_add)' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del(.services.netmaker.sysctls)' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del(.services.netmaker.environment.MQ_ADMIN_PASSWORD)' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del(.services.netmaker.environment.MQ_HOST)' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del(.services.netmaker.environment.MQ_PORT)' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del(.services.netmaker.environment.MQ_SERVER_PORT)' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del(.services.netmaker.environment.PORT_FORWARD_SERVICES)' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del(.services.netmaker.environment.CLIENT_MODE)' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del(.services.netmaker.environment.HOST_NETWORK)' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del(.services.mq.environment.NETMAKER_SERVER_HOST)' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del( .services.netmaker.volumes[] | select(. == "mosquitto_data*") )' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del( .services.mq.volumes[] | select(. == "mosquitto_data*") )' -i $INSTALL_PATH/docker-compose.yml
yq eval 'del( .volumes.mosquitto_data )' -i $INSTALL_PATH/docker-compose.yml
}
# start_containers - run docker-compose up -d
start_containers() {
docker-compose -f /root/docker-compose.yml up -d
docker-compose -f $INSTALL_PATH/docker-compose.yml up -d
}
# test_caddy - make sure caddy is working
@@ -427,29 +477,28 @@ test_caddy() {
# setup_netclient - adds netclient to docker-compose
setup_netclient() {
# yq ".services.netclient += {\"container_name\": \"netclient\"}" -i /root/docker-compose.yml
# yq ".services.netclient += {\"image\": \"gravitl/netclient:testing\"}" -i /root/docker-compose.yml
# yq ".services.netclient += {\"hostname\": \"netmaker-1\"}" -i /root/docker-compose.yml
# yq ".services.netclient += {\"network_mode\": \"host\"}" -i /root/docker-compose.yml
# yq ".services.netclient.depends_on += [\"netmaker\"]" -i /root/docker-compose.yml
# yq ".services.netclient += {\"restart\": \"always\"}" -i /root/docker-compose.yml
# yq ".services.netclient.environment += {\"TOKEN\": \"$KEY\"}" -i /root/docker-compose.yml
# yq ".services.netclient.volumes += [\"/etc/netclient:/etc/netclient\"]" -i /root/docker-compose.yml
# yq ".services.netclient.cap_add += [\"NET_ADMIN\"]" -i /root/docker-compose.yml
# yq ".services.netclient.cap_add += [\"NET_RAW\"]" -i /root/docker-compose.yml
# yq ".services.netclient.cap_add += [\"SYS_MODULE\"]" -i /root/docker-compose.yml
# docker-compose up -d
set +e
netclient uninstall
set -e
HAS_APT=false
set -e
if command -v apt >/dev/null; then
HAS_APT=true
fi
set +e
wget -O /tmp/netclient https://fileserver.netmaker.org/$LATEST/netclient
if [ "$HAS_APT" = "true" ]; then
curl -sL 'https://apt.netmaker.org/gpg.key' | sudo tee /etc/apt/trusted.gpg.d/netclient.asc
curl -sL 'https://apt.netmaker.org/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/netclient.list
sudo apt update
sudo apt install netclient
else
wget -O /tmp/netclient https://github.com/gravitl/netclient/releases/download/$LATEST/netclient_linux_amd64
chmod +x /tmp/netclient
/tmp/netclient install
netclient join -t $KEY
chmod +x /tmp/netclient
/tmp/netclient install
fi
netclient register -t $KEY
echo "waiting for client to become available"
wait_seconds 10
@@ -459,11 +508,8 @@ setup_netclient() {
# setup_nmctl - pulls nmctl and makes it executable
setup_nmctl() {
# DEV_TEMP - Temporary instructions for testing
wget https://fileserver.netmaker.org/testing/nmctl
# RELEASE_REPLACE - Use this once release is ready
# wget https://github.com/gravitl/netmaker/releases/download/v0.17.1/nmctl
wget -O nmctl https://github.com/gravitl/netmaker/releases/download/$LATEST/nmctl_linux_amd64
chmod +x nmctl
echo "using server $SERVER_HTTP_HOST"
echo "using master key $MASTER_KEY"
@@ -500,6 +546,11 @@ join_networks() {
HAS_EGRESS="yes"
echo " egress ranges: $(jq -r ".[$NUM].egressgatewayranges" ./nodejson.tmp | tr -d '[]\n"[:space:]')"
EGRESS_RANGES=$(jq -r ".[$NUM].egressgatewayranges" ./nodejson.tmp | tr -d '[]\n"[:space:]')
EGRESS_RANGES=${EGRESS_RANGES//0.0.0.0\/0/0.0.0.0\/5,8.0.0.0\/7,11.0.0.0\/8,12.0.0.0\/6,16.0.0.0\/4,32.0.0.0\/3,64.0.0.0\/2,128.0.0.0\/3,160.0.0.0\/5,168.0.0.0\/6,172.0.0.0\/12,172.32.0.0\/11,172.64.0.0\/10,172.128.0.0\/9,173.0.0.0\/8,174.0.0.0\/7,176.0.0.0\/4,192.0.0.0\/9,192.128.0.0\/11,192.160.0.0\/13,192.169.0.0\/16,192.170.0.0\/15,192.172.0.0\/14,192.176.0.0\/12,192.192.0.0\/10,193.0.0.0\/8,194.0.0.0\/7,196.0.0.0\/6,200.0.0.0\/5,208.0.0.0\/4}
EGRESS_RANGES=${EGRESS_RANGES//0::\/0/}
EGRESS_RANGES=${EGRESS_RANGES//,,/,}
EGRESS_RANGES=`echo $EGRESS_RANGES | sed 's/,*$//g'`
EGRESS_RANGES=`echo $EGRESS_RANGES | sed 's/^,*//g'`
fi
echo " is ingress: $(jq -r ".[$NUM].isingressgateway" ./nodejson.tmp)"
@@ -520,11 +571,11 @@ join_networks() {
confirm
if [[ $NUM -eq 0 ]]; then
echo "running command: ./nmctl keys create $NETWORK 1"
KEY_JSON=$(./nmctl keys create $NETWORK 1)
KEY=$(echo $KEY_JSON | jq -r .accessstring)
echo "running command: ./nmctl enrollment_key create --uses 1 --networks $NETWORK"
KEY_JSON=$(./nmctl enrollment_key create --uses 1 --networks $NETWORK)
KEY=$(jq -r '.token' <<< ${KEY_JSON})
echo "join key created: $KEY"
echo "enrollment key created: $KEY"
setup_netclient
else
@@ -603,6 +654,13 @@ fi
set +e
#backup volumes and v0.17.1 configs in case of failure.
backup_volumes
backup_v17_files
# get the installation path for docker-compose.yml and other config files
get_install_path
echo "...installing dependencies for script"
install_dependencies
@@ -624,7 +682,7 @@ echo "...retrieving current server node settings"
collect_node_settings
echo "...backing up docker compose to docker-compose.yml.backup"
cp /root/docker-compose.yml /root/docker-compose.yml.backup
cp $INSTALL_PATH/docker-compose.yml $INSTALL_PATH/docker-compose.yml.backup
echo "...setting Caddyfile values"
setup_caddy
+1 -1
View File
@@ -704,7 +704,7 @@ info:
API calls must be authenticated via a header of the format -H “Authorization: Bearer <YOUR_SECRET_KEY>” There are two methods to obtain YOUR_SECRET_KEY: 1. Using the masterkey. By default, this value is “secret key,” but you should change this on your instance and keep it secure. This value can be set via env var at startup or in a config file (config/environments/< env >.yaml). See the [Netmaker](https://docs.netmaker.org/index.html) documentation for more details. 2. Using a JWT received for a node. This can be retrieved by calling the /api/nodes/<network>/authenticate endpoint, as documented below.
title: Netmaker
version: 0.18.4
version: 0.18.5
paths:
/api/dns:
get: