Compare commits
5 Commits
9be70e5900
...
c28620324a
| Author | SHA1 | Date | |
|---|---|---|---|
| c28620324a | |||
| d5fea5e7b8 | |||
| 79eecdf211 | |||
| f83a26ff6b | |||
| 612f2a159f |
79
app/app.go
79
app/app.go
@@ -25,7 +25,7 @@ func Run() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
config := GetConfig(*configPath)
|
config := GetConfig(*configPath)
|
||||||
log.Printf("Initialized config from %s", *configPath)
|
log.Printf("[INFO] initialized config from %s", *configPath)
|
||||||
|
|
||||||
token := fmt.Sprintf(`%s@%s!%s`, config.PVE.Token.USER, config.PVE.Token.REALM, config.PVE.Token.ID)
|
token := fmt.Sprintf(`%s@%s!%s`, config.PVE.Token.USER, config.PVE.Token.REALM, config.PVE.Token.ID)
|
||||||
client = NewClient(config.PVE.URL, token, config.PVE.Token.Secret)
|
client = NewClient(config.PVE.URL, token, config.PVE.Token.Secret)
|
||||||
@@ -35,13 +35,13 @@ func Run() {
|
|||||||
cluster := Cluster{}
|
cluster := Cluster{}
|
||||||
cluster.Init(client)
|
cluster.Init(client)
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
log.Printf("Starting cluster sync\n")
|
log.Printf("[INFO] starting cluster sync\n")
|
||||||
cluster.Sync()
|
cluster.Sync()
|
||||||
log.Printf("Synced cluster in %fs\n", time.Since(start).Seconds())
|
log.Printf("[INFO] synced cluster in %fs\n", time.Since(start).Seconds())
|
||||||
|
|
||||||
// set repeating update for full rebuilds
|
// set repeating update for full rebuilds
|
||||||
ticker := time.NewTicker(time.Duration(config.ReloadInterval) * time.Second)
|
ticker := time.NewTicker(time.Duration(config.ReloadInterval) * time.Second)
|
||||||
log.Printf("Initialized cluster sync interval of %ds", config.ReloadInterval)
|
log.Printf("[INFO] initialized cluster sync interval of %ds", config.ReloadInterval)
|
||||||
channel := make(chan bool)
|
channel := make(chan bool)
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
@@ -50,9 +50,9 @@ func Run() {
|
|||||||
return
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
log.Printf("Starting cluster sync\n")
|
log.Printf("[INFO] starting cluster sync\n")
|
||||||
cluster.Sync()
|
cluster.Sync()
|
||||||
log.Printf("Synced cluster in %fs\n", time.Since(start).Seconds())
|
log.Printf("[INFO] synced cluster in %fs\n", time.Since(start).Seconds())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -66,13 +66,24 @@ func Run() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
router.GET("/", func(c *gin.Context) {
|
||||||
|
v, err := cluster.Get()
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
c.JSON(http.StatusOK, gin.H{"cluster": v})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
router.GET("/nodes/:node", func(c *gin.Context) {
|
router.GET("/nodes/:node", func(c *gin.Context) {
|
||||||
nodeid := c.Param("node")
|
nodeid := c.Param("node")
|
||||||
|
|
||||||
node, err := cluster.GetNode(nodeid)
|
node, err := cluster.GetNode(nodeid)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
c.JSON(http.StatusOK, gin.H{"node": node})
|
c.JSON(http.StatusOK, gin.H{"node": node})
|
||||||
@@ -80,25 +91,11 @@ func Run() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
router.GET("/nodes/:node/devices", func(c *gin.Context) {
|
|
||||||
nodeid := c.Param("node")
|
|
||||||
|
|
||||||
node, err := cluster.GetNode(nodeid)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
c.JSON(http.StatusOK, gin.H{"devices": node.Devices})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
router.GET("/nodes/:node/instances/:vmid", func(c *gin.Context) {
|
router.GET("/nodes/:node/instances/:vmid", func(c *gin.Context) {
|
||||||
nodeid := c.Param("node")
|
nodeid := c.Param("node")
|
||||||
vmid, err := strconv.ParseUint(c.Param("vmid"), 10, 64)
|
vmid, err := strconv.ParseUint(c.Param("vmid"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s could not be converted to vmid (uint)", c.Param("instance"))})
|
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s could not be converted to vmid (uint)", c.Param("instance"))})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,7 +107,7 @@ func Run() {
|
|||||||
} else {
|
} else {
|
||||||
instance, err := node.GetInstance(uint(vmid))
|
instance, err := node.GetInstance(uint(vmid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
c.JSON(http.StatusOK, gin.H{"instance": instance})
|
c.JSON(http.StatusOK, gin.H{"instance": instance})
|
||||||
@@ -122,9 +119,16 @@ func Run() {
|
|||||||
router.POST("/sync", func(c *gin.Context) {
|
router.POST("/sync", func(c *gin.Context) {
|
||||||
//go func() {
|
//go func() {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
log.Printf("Starting cluster sync\n")
|
log.Printf("[INFO] starting cluster sync\n")
|
||||||
cluster.Sync()
|
err := cluster.Sync()
|
||||||
log.Printf("Synced cluster in %fs\n", time.Since(start).Seconds())
|
if err != nil {
|
||||||
|
log.Printf("[ERR ] failed to sync cluster: %s", err)
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
log.Printf("[INFO] synced cluster in %fs\n", time.Since(start).Seconds())
|
||||||
|
return
|
||||||
|
}
|
||||||
//}()
|
//}()
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -132,13 +136,14 @@ func Run() {
|
|||||||
nodeid := c.Param("node")
|
nodeid := c.Param("node")
|
||||||
//go func() {
|
//go func() {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
log.Printf("Starting %s sync\n", nodeid)
|
log.Printf("[INFO] starting %s sync\n", nodeid)
|
||||||
err := cluster.RebuildHost(nodeid)
|
err := cluster.RebuildNode(nodeid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to sync %s: %s", nodeid, err.Error())
|
log.Printf("[ERR ] failed to sync %s: %s", nodeid, err.Error())
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Synced %s in %fs\n", nodeid, time.Since(start).Seconds())
|
log.Printf("[INFO] synced %s in %fs\n", nodeid, time.Since(start).Seconds())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
//}()
|
//}()
|
||||||
@@ -154,30 +159,34 @@ func Run() {
|
|||||||
|
|
||||||
//go func() {
|
//go func() {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
log.Printf("Starting %s.%d sync\n", nodeid, vmid)
|
log.Printf("[INFO] starting %s.%d sync\n", nodeid, vmid)
|
||||||
|
|
||||||
node, err := cluster.GetNode(nodeid)
|
node, err := cluster.GetNode(nodeid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to sync %s.%d: %s", nodeid, vmid, err.Error())
|
log.Printf("[ERR ] failed to sync %s.%d: %s", nodeid, vmid, err.Error())
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
instance, err := node.GetInstance(uint(vmid))
|
instance, err := node.GetInstance(uint(vmid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to sync %s.%d: %s", nodeid, vmid, err.Error())
|
log.Printf("[ERR ] failed to sync %s.%d: %s", nodeid, vmid, err.Error())
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = node.RebuildInstance(instance.Type, uint(vmid))
|
err = node.RebuildInstance(instance.Type, uint(vmid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to sync %s.%d: %s", nodeid, vmid, err.Error())
|
log.Printf("[ERR ] failed to sync %s.%d: %s", nodeid, vmid, err.Error())
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Synced %s.%d in %fs\n", nodeid, vmid, time.Since(start).Seconds())
|
log.Printf("[INFO] synced %s.%d in %fs\n", nodeid, vmid, time.Since(start).Seconds())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
//}()
|
//}()
|
||||||
})
|
})
|
||||||
|
|
||||||
|
log.Printf("[INFO] starting API listening on 0.0.0.0:%d", config.ListenPort)
|
||||||
router.Run("0.0.0.0:" + strconv.Itoa(config.ListenPort))
|
router.Run("0.0.0.0:" + strconv.Itoa(config.ListenPort))
|
||||||
}
|
}
|
||||||
|
|||||||
122
app/model.go
122
app/model.go
@@ -10,7 +10,26 @@ func (cluster *Cluster) Init(pve ProxmoxClient) {
|
|||||||
cluster.pve = pve
|
cluster.pve = pve
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cluster *Cluster) Get() (*Cluster, error) {
|
||||||
|
cluster_ch := make(chan *Cluster)
|
||||||
|
err_ch := make(chan error)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// aquire cluster lock
|
||||||
|
cluster.lock.Lock()
|
||||||
|
defer cluster.lock.Unlock()
|
||||||
|
cluster_ch <- cluster
|
||||||
|
err_ch <- nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
return <-cluster_ch, <-err_ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// hard sync cluster
|
||||||
func (cluster *Cluster) Sync() error {
|
func (cluster *Cluster) Sync() error {
|
||||||
|
err_ch := make(chan error)
|
||||||
|
|
||||||
|
go func() {
|
||||||
// aquire lock on cluster, release on return
|
// aquire lock on cluster, release on return
|
||||||
cluster.lock.Lock()
|
cluster.lock.Lock()
|
||||||
defer cluster.lock.Unlock()
|
defer cluster.lock.Unlock()
|
||||||
@@ -20,19 +39,23 @@ func (cluster *Cluster) Sync() error {
|
|||||||
// get all nodes
|
// get all nodes
|
||||||
nodes, err := cluster.pve.Nodes()
|
nodes, err := cluster.pve.Nodes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
err_ch <- err
|
||||||
|
return
|
||||||
}
|
}
|
||||||
// for each node:
|
// for each node:
|
||||||
for _, hostName := range nodes {
|
for _, hostName := range nodes {
|
||||||
// rebuild node
|
// rebuild node
|
||||||
err := cluster.RebuildHost(hostName)
|
err := cluster.RebuildNode(hostName)
|
||||||
if err != nil { // if an error was encountered, continue and log the error
|
if err != nil { // if an error was encountered, continue and log the error
|
||||||
log.Print(err.Error())
|
log.Printf("[ERR ] %s", err)
|
||||||
continue
|
} else { // otherwise log success
|
||||||
|
log.Printf("[INFO] successfully synced node %s", hostName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
err_ch <- nil
|
||||||
|
}()
|
||||||
|
|
||||||
return nil
|
return <-err_ch
|
||||||
}
|
}
|
||||||
|
|
||||||
// get a node in the cluster
|
// get a node in the cluster
|
||||||
@@ -59,47 +82,59 @@ func (cluster *Cluster) GetNode(hostName string) (*Node, error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
host := <-host_ch
|
return <-host_ch, <-err_ch
|
||||||
err := <-err_ch
|
|
||||||
|
|
||||||
return host, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cluster *Cluster) RebuildHost(hostName string) error {
|
// hard sync node
|
||||||
|
// returns error if the node could not be reached
|
||||||
|
func (cluster *Cluster) RebuildNode(hostName string) error {
|
||||||
|
err_ch := make(chan error)
|
||||||
|
|
||||||
|
go func() {
|
||||||
host, err := cluster.pve.Node(hostName)
|
host, err := cluster.pve.Node(hostName)
|
||||||
if err != nil { // host is probably down or otherwise unreachable
|
if err != nil && cluster.Nodes[hostName] == nil { // host is unreachable and did not exist previously
|
||||||
return fmt.Errorf("error retrieving %s: %s, possibly down?", hostName, err.Error())
|
// return an error because we requested to sync a node that was not already in the cluster
|
||||||
|
err_ch <- fmt.Errorf("error retrieving %s: %s", hostName, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// aquire lock on host, release on return
|
// aquire lock on host, release on return
|
||||||
host.lock.Lock()
|
host.lock.Lock()
|
||||||
defer host.lock.Unlock()
|
defer host.lock.Unlock()
|
||||||
|
|
||||||
|
if err != nil && cluster.Nodes[hostName] != nil { // host is unreachable and did exist previously
|
||||||
|
// assume the node is down or gone and delete from cluster
|
||||||
|
delete(cluster.Nodes, hostName)
|
||||||
|
err_ch <- nil
|
||||||
|
}
|
||||||
|
|
||||||
cluster.Nodes[hostName] = host
|
cluster.Nodes[hostName] = host
|
||||||
|
|
||||||
// get node's VMs
|
// get node's VMs
|
||||||
vms, err := host.VirtualMachines()
|
vms, err := host.VirtualMachines()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
err_ch <- err
|
||||||
|
|
||||||
}
|
}
|
||||||
for _, vmid := range vms {
|
for _, vmid := range vms {
|
||||||
err := host.RebuildInstance(VM, vmid)
|
err := host.RebuildInstance(VM, vmid)
|
||||||
if err != nil { // if an error was encountered, continue and log the error
|
if err != nil { // if an error was encountered, continue and log the error
|
||||||
log.Print(err.Error())
|
log.Printf("[ERR ] %s", err)
|
||||||
continue
|
} else {
|
||||||
|
log.Printf("[INFO] successfully synced vm %s.%d", hostName, vmid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// get node's CTs
|
// get node's CTs
|
||||||
cts, err := host.Containers()
|
cts, err := host.Containers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
err_ch <- err
|
||||||
}
|
}
|
||||||
for _, vmid := range cts {
|
for _, vmid := range cts {
|
||||||
err := host.RebuildInstance(CT, vmid)
|
err := host.RebuildInstance(CT, vmid)
|
||||||
if err != nil {
|
if err != nil { // if an error was encountered, continue and log the error
|
||||||
return err
|
log.Printf("[ERR ] %s", err)
|
||||||
|
} else {
|
||||||
|
log.Printf("[INFO] successfully synced ct %s.%d", hostName, vmid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -112,7 +147,9 @@ func (cluster *Cluster) RebuildHost(hostName string) error {
|
|||||||
device.Reserved = reserved
|
device.Reserved = reserved
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
err_ch <- nil
|
||||||
|
}()
|
||||||
|
return <-err_ch
|
||||||
}
|
}
|
||||||
|
|
||||||
func (host *Node) GetInstance(vmid uint) (*Instance, error) {
|
func (host *Node) GetInstance(vmid uint) (*Instance, error) {
|
||||||
@@ -138,33 +175,41 @@ func (host *Node) GetInstance(vmid uint) (*Instance, error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
instance := <-instance_ch
|
return <-instance_ch, <-err_ch
|
||||||
err := <-err_ch
|
|
||||||
return instance, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hard sync instance
|
||||||
|
// returns error if the instance could not be reached
|
||||||
func (host *Node) RebuildInstance(instancetype InstanceType, vmid uint) error {
|
func (host *Node) RebuildInstance(instancetype InstanceType, vmid uint) error {
|
||||||
|
err_ch := make(chan error)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
instanceID := InstanceID(vmid)
|
||||||
var instance *Instance
|
var instance *Instance
|
||||||
|
var err error
|
||||||
if instancetype == VM {
|
if instancetype == VM {
|
||||||
var err error
|
|
||||||
instance, err = host.VirtualMachine(vmid)
|
instance, err = host.VirtualMachine(vmid)
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error retrieving %d: %s, possibly down?", vmid, err.Error())
|
|
||||||
}
|
|
||||||
} else if instancetype == CT {
|
} else if instancetype == CT {
|
||||||
var err error
|
|
||||||
instance, err = host.Container(vmid)
|
instance, err = host.Container(vmid)
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error retrieving %d: %s, possibly down?", vmid, err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err != nil && host.Instances[instanceID] == nil { // instance is unreachable and did not exist previously
|
||||||
|
// return an error because we requested to sync an instance that was not already in the cluster
|
||||||
|
err_ch <- fmt.Errorf("error retrieving %s.%d: %s", host.Name, instanceID, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// aquire lock on instance, release on return
|
// aquire lock on instance, release on return
|
||||||
instance.lock.Lock()
|
instance.lock.Lock()
|
||||||
defer instance.lock.Unlock()
|
defer instance.lock.Unlock()
|
||||||
|
|
||||||
host.Instances[InstanceID(vmid)] = instance
|
if err != nil && host.Instances[instanceID] != nil { // host is unreachable and did exist previously
|
||||||
|
// assume the instance is gone and delete from cluster
|
||||||
|
delete(host.Instances, instanceID)
|
||||||
|
err_ch <- nil
|
||||||
|
}
|
||||||
|
|
||||||
|
host.Instances[instanceID] = instance
|
||||||
|
|
||||||
for volid := range instance.configDisks {
|
for volid := range instance.configDisks {
|
||||||
instance.RebuildVolume(host, volid)
|
instance.RebuildVolume(host, volid)
|
||||||
@@ -182,7 +227,10 @@ func (host *Node) RebuildInstance(instancetype InstanceType, vmid uint) error {
|
|||||||
instance.RebuildBoot()
|
instance.RebuildBoot()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
err_ch <- nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
return <-err_ch
|
||||||
}
|
}
|
||||||
|
|
||||||
func (instance *Instance) RebuildVolume(host *Node, volid string) error {
|
func (instance *Instance) RebuildVolume(host *Node, volid string) error {
|
||||||
@@ -215,10 +263,11 @@ func (instance *Instance) RebuildNet(netid string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (instance *Instance) RebuildDevice(host *Node, deviceid string) error {
|
func (instance *Instance) RebuildDevice(host *Node, deviceid string) {
|
||||||
instanceDevice, ok := instance.configHostPCIs[deviceid]
|
instanceDevice, ok := instance.configHostPCIs[deviceid]
|
||||||
if !ok { // if device does not exist
|
if !ok { // if device does not exist
|
||||||
return fmt.Errorf("%s not found in devices", deviceid)
|
log.Printf("[WARN] %s not found in devices", deviceid)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hostDeviceBusID := DeviceID(strings.Split(instanceDevice, ",")[0])
|
hostDeviceBusID := DeviceID(strings.Split(instanceDevice, ",")[0])
|
||||||
@@ -234,9 +283,6 @@ func (instance *Instance) RebuildDevice(host *Node, deviceid string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
instance.Devices[DeviceID(instanceDeviceBusID)].Device_ID = DeviceID(deviceid)
|
instance.Devices[DeviceID(instanceDeviceBusID)].Device_ID = DeviceID(deviceid)
|
||||||
instance.Devices[DeviceID(instanceDeviceBusID)].Value = instanceDevice
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (instance *Instance) RebuildBoot() {
|
func (instance *Instance) RebuildBoot() {
|
||||||
@@ -265,7 +311,7 @@ func (instance *Instance) RebuildBoot() {
|
|||||||
instance.Boot.Enabled = append(instance.Boot.Enabled, val)
|
instance.Boot.Enabled = append(instance.Boot.Enabled, val)
|
||||||
delete(eligibleBoot, bootTarget)
|
delete(eligibleBoot, bootTarget)
|
||||||
} else { // item is not eligible for boot but is included in the boot order
|
} else { // item is not eligible for boot but is included in the boot order
|
||||||
log.Printf("Encountered enabled but non-eligible boot target %s in instance %s\n", bootTarget, instance.Name)
|
log.Printf("[WARN] encountered enabled but non-eligible boot target %s in instance %s\n", bootTarget, instance.Name)
|
||||||
delete(eligibleBoot, bootTarget)
|
delete(eligibleBoot, bootTarget)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -277,7 +323,7 @@ func (instance *Instance) RebuildBoot() {
|
|||||||
} else if val, ok := instance.Nets[NetID(bootTarget)]; ok && isEligible { // if the item is eligible and is in nets
|
} else if val, ok := instance.Nets[NetID(bootTarget)]; ok && isEligible { // if the item is eligible and is in nets
|
||||||
instance.Boot.Disabled = append(instance.Boot.Disabled, val)
|
instance.Boot.Disabled = append(instance.Boot.Disabled, val)
|
||||||
} else { // item is not eligible and is not already in the boot order, skip adding to model
|
} else { // item is not eligible and is not already in the boot order, skip adding to model
|
||||||
log.Printf("Encountered disabled and non-eligible boot target %s in instance %s\n", bootTarget, instance.Name)
|
log.Printf("[WARN] encountered disabled and non-eligible boot target %s in instance %s\n", bootTarget, instance.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
type Cluster struct {
|
type Cluster struct {
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
pve ProxmoxClient
|
pve ProxmoxClient
|
||||||
Nodes map[string]*Node
|
Nodes map[string]*Node `json:"nodes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Node struct {
|
type Node struct {
|
||||||
@@ -88,7 +88,6 @@ type Device struct {
|
|||||||
Vendor_Name string `json:"vendor_name"`
|
Vendor_Name string `json:"vendor_name"`
|
||||||
Functions map[FunctionID]*Function `json:"functions"`
|
Functions map[FunctionID]*Function `json:"functions"`
|
||||||
Reserved bool `json:"reserved"`
|
Reserved bool `json:"reserved"`
|
||||||
Value string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type FunctionID string
|
type FunctionID string
|
||||||
|
|||||||
37
go.mod
37
go.mod
@@ -1,32 +1,33 @@
|
|||||||
module proxmoxaas-fabric
|
module proxmoxaas-fabric
|
||||||
|
|
||||||
go 1.25.1
|
go 1.25.5
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/gin-gonic/gin v1.10.1
|
github.com/gin-gonic/gin v1.11.0
|
||||||
github.com/luthermonson/go-proxmox v0.2.3
|
github.com/luthermonson/go-proxmox v0.2.3
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/buger/goterm v1.0.4 // indirect
|
github.com/buger/goterm v1.0.4 // indirect
|
||||||
github.com/bytedance/gopkg v0.1.3 // indirect
|
github.com/bytedance/gopkg v0.1.3 // indirect
|
||||||
github.com/bytedance/sonic v1.14.1 // indirect
|
github.com/bytedance/sonic v1.14.2 // indirect
|
||||||
github.com/bytedance/sonic/loader v0.3.0 // indirect
|
github.com/bytedance/sonic/loader v0.4.0 // indirect
|
||||||
github.com/cloudwego/base64x v0.1.6 // indirect
|
github.com/cloudwego/base64x v0.1.6 // indirect
|
||||||
github.com/diskfs/go-diskfs v1.7.0 // indirect
|
github.com/diskfs/go-diskfs v1.7.0 // indirect
|
||||||
github.com/djherbis/times v1.6.0 // indirect
|
github.com/djherbis/times v1.6.0 // indirect
|
||||||
github.com/elliotwutingfeng/asciiset v0.0.0-20250812055617-fb43ac3ba420 // indirect
|
github.com/elliotwutingfeng/asciiset v0.0.0-20250912055424-93680c478db2 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.11 // indirect
|
||||||
github.com/gin-contrib/sse v1.1.0 // indirect
|
github.com/gin-contrib/sse v1.1.0 // indirect
|
||||||
github.com/go-playground/locales v0.14.1 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
github.com/go-playground/validator/v10 v10.27.0 // indirect
|
github.com/go-playground/validator/v10 v10.28.0 // indirect
|
||||||
github.com/goccy/go-json v0.10.5 // indirect
|
github.com/goccy/go-json v0.10.5 // indirect
|
||||||
|
github.com/goccy/go-yaml v1.19.0 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/gorilla/websocket v1.5.3 // indirect
|
github.com/gorilla/websocket v1.5.3 // indirect
|
||||||
github.com/jinzhu/copier v0.4.0 // indirect
|
github.com/jinzhu/copier v0.4.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/compress v1.18.0 // indirect
|
github.com/klauspost/compress v1.18.2 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||||
github.com/leodido/go-urn v1.4.0 // indirect
|
github.com/leodido/go-urn v1.4.0 // indirect
|
||||||
github.com/magefile/mage v1.15.0 // indirect
|
github.com/magefile/mage v1.15.0 // indirect
|
||||||
@@ -36,14 +37,16 @@ require (
|
|||||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||||
github.com/pkg/xattr v0.4.12 // indirect
|
github.com/pkg/xattr v0.4.12 // indirect
|
||||||
|
github.com/quic-go/qpack v0.6.0 // indirect
|
||||||
|
github.com/quic-go/quic-go v0.57.1 // indirect
|
||||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||||
github.com/ugorji/go/codec v1.3.0 // indirect
|
github.com/ugorji/go/codec v1.3.1 // indirect
|
||||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
github.com/ulikunitz/xz v0.5.15 // indirect
|
||||||
golang.org/x/arch v0.21.0 // indirect
|
go.uber.org/mock v0.6.0 // indirect
|
||||||
golang.org/x/crypto v0.42.0 // indirect
|
golang.org/x/arch v0.23.0 // indirect
|
||||||
golang.org/x/net v0.44.0 // indirect
|
golang.org/x/crypto v0.46.0 // indirect
|
||||||
golang.org/x/sys v0.36.0 // indirect
|
golang.org/x/net v0.48.0 // indirect
|
||||||
golang.org/x/text v0.29.0 // indirect
|
golang.org/x/sys v0.39.0 // indirect
|
||||||
google.golang.org/protobuf v1.36.9 // indirect
|
golang.org/x/text v0.32.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
google.golang.org/protobuf v1.36.10 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user