add sync endpoints,

refactor hosts to nodes,
change json return values to lowercase for instances and nodes,
combine rebuildVM and rebuildCT to rebuildInstance
This commit is contained in:
Arthur Lu 2025-02-11 07:11:05 +00:00
parent 3cd9526c8b
commit 9bd8c351d9
4 changed files with 152 additions and 100 deletions

@ -67,50 +67,50 @@ func Run() {
}) })
router.GET("/nodes/:node", func(c *gin.Context) { router.GET("/nodes/:node", func(c *gin.Context) {
node := c.Param("node") nodeid := c.Param("node")
host, err := cluster.GetHost(node) node, err := cluster.GetNode(nodeid)
if err != nil { if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err}) c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return return
} else { } else {
c.JSON(http.StatusOK, gin.H{"node": host}) c.JSON(http.StatusOK, gin.H{"node": node})
return return
} }
}) })
router.GET("/nodes/:node/devices", func(c *gin.Context) { router.GET("/nodes/:node/devices", func(c *gin.Context) {
node := c.Param("node") nodeid := c.Param("node")
host, err := cluster.GetHost(node) node, err := cluster.GetNode(nodeid)
if err != nil { if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err}) c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return return
} else { } else {
c.JSON(http.StatusOK, gin.H{"devices": host.Devices}) c.JSON(http.StatusOK, gin.H{"devices": node.Devices})
return return
} }
}) })
router.GET("/nodes/:node/instances/:instance", func(c *gin.Context) { router.GET("/nodes/:node/instances/:vmid", func(c *gin.Context) {
node := c.Param("node") nodeid := c.Param("node")
vmid, err := strconv.ParseUint(c.Param("instance"), 10, 64) vmid, err := strconv.ParseUint(c.Param("vmid"), 10, 64)
if err != nil { if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s could not be converted to vmid (uint)", c.Param("instance"))}) c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s could not be converted to vmid (uint)", c.Param("instance"))})
return return
} }
host, err := cluster.GetHost(node) node, err := cluster.GetNode(nodeid)
if err != nil { if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("vmid %s not found in cluster", node)}) c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return return
} else { } else {
instance, err := host.GetInstance(uint(vmid)) instance, err := node.GetInstance(uint(vmid))
if err != nil { if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%d not found in %s", vmid, node)}) c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return return
} else { } else {
c.JSON(http.StatusOK, gin.H{"instance": instance}) c.JSON(http.StatusOK, gin.H{"instance": instance})
@ -119,5 +119,65 @@ func Run() {
} }
}) })
router.POST("/sync", func(c *gin.Context) {
go func() {
start := time.Now()
log.Printf("Starting cluster sync\n")
cluster.Sync()
log.Printf("Synced cluster in %fs\n", time.Since(start).Seconds())
}()
})
router.POST("/nodes/:node/sync", func(c *gin.Context) {
nodeid := c.Param("node")
go func() {
start := time.Now()
log.Printf("Starting %s sync\n", nodeid)
err := cluster.RebuildHost(nodeid)
if err != nil {
log.Printf("Failed to sync %s: %s", nodeid, err.Error())
return
} else {
log.Printf("Synced %s in %fs\n", nodeid, time.Since(start).Seconds())
return
}
}()
})
router.POST("/nodes/:node/instances/:vmid/sync", func(c *gin.Context) {
nodeid := c.Param("node")
vmid, err := strconv.ParseUint(c.Param("vmid"), 10, 64)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s could not be converted to vmid (uint)", c.Param("instance"))})
return
}
go func() {
start := time.Now()
log.Printf("Starting %s.%d sync\n", nodeid, vmid)
node, err := cluster.GetNode(nodeid)
if err != nil {
log.Printf("Failed to sync %s.%d: %s", nodeid, vmid, err.Error())
return
}
instance, err := node.GetInstance(uint(vmid))
if err != nil {
log.Printf("Failed to sync %s.%d: %s", nodeid, vmid, err.Error())
return
}
err = node.RebuildInstance(instance.Type, uint(vmid))
if err != nil {
log.Printf("Failed to sync %s.%d: %s", nodeid, vmid, err.Error())
return
} else {
log.Printf("Synced %s.%d in %fs\n", nodeid, vmid, time.Since(start).Seconds())
return
}
}()
})
router.Run("0.0.0.0:" + strconv.Itoa(config.ListenPort)) router.Run("0.0.0.0:" + strconv.Itoa(config.ListenPort))
} }

@ -15,7 +15,7 @@ func (cluster *Cluster) Sync() error {
cluster.lock.Lock() cluster.lock.Lock()
defer cluster.lock.Unlock() defer cluster.lock.Unlock()
cluster.Hosts = make(map[string]*Host) cluster.Nodes = make(map[string]*Node)
// get all nodes // get all nodes
nodes, err := cluster.pve.Nodes() nodes, err := cluster.pve.Nodes()
@ -35,8 +35,8 @@ func (cluster *Cluster) Sync() error {
} }
// get a node in the cluster // get a node in the cluster
func (cluster *Cluster) GetHost(hostName string) (*Host, error) { func (cluster *Cluster) GetNode(hostName string) (*Node, error) {
host_ch := make(chan *Host) host_ch := make(chan *Node)
err_ch := make(chan error) err_ch := make(chan error)
go func() { go func() {
@ -44,21 +44,23 @@ func (cluster *Cluster) GetHost(hostName string) (*Host, error) {
cluster.lock.Lock() cluster.lock.Lock()
defer cluster.lock.Unlock() defer cluster.lock.Unlock()
// get host // get host
host, ok := cluster.Hosts[hostName] host, ok := cluster.Nodes[hostName]
if !ok { if !ok {
host_ch <- nil host_ch <- nil
err_ch <- fmt.Errorf("%s not in cluster", hostName) err_ch <- fmt.Errorf("%s not in cluster", hostName)
} } else {
// aquire host lock to wait in case of a concurrent write // aquire host lock to wait in case of a concurrent write
host.lock.Lock() host.lock.Lock()
defer host.lock.Unlock() defer host.lock.Unlock()
host_ch <- host host_ch <- host
err_ch <- nil err_ch <- nil
}
}() }()
host := <-host_ch host := <-host_ch
err := <-err_ch err := <-err_ch
return host, err return host, err
} }
@ -72,7 +74,7 @@ func (cluster *Cluster) RebuildHost(hostName string) error {
host.lock.Lock() host.lock.Lock()
defer host.lock.Unlock() defer host.lock.Unlock()
cluster.Hosts[hostName] = host cluster.Nodes[hostName] = host
// get node's VMs // get node's VMs
vms, err := host.VirtualMachines() vms, err := host.VirtualMachines()
@ -81,7 +83,7 @@ func (cluster *Cluster) RebuildHost(hostName string) error {
} }
for _, vmid := range vms { for _, vmid := range vms {
err := host.RebuildVM(vmid) err := host.RebuildInstance(VM, vmid)
if err != nil { if err != nil {
return err return err
} }
@ -93,7 +95,7 @@ func (cluster *Cluster) RebuildHost(hostName string) error {
return err return err
} }
for _, vmid := range cts { for _, vmid := range cts {
err := host.RebuildCT(vmid) err := host.RebuildInstance(CT, vmid)
if err != nil { if err != nil {
return err return err
} }
@ -102,7 +104,7 @@ func (cluster *Cluster) RebuildHost(hostName string) error {
return nil return nil
} }
func (host *Host) GetInstance(vmid uint) (*Instance, error) { func (host *Node) GetInstance(vmid uint) (*Instance, error) {
instance_ch := make(chan *Instance) instance_ch := make(chan *Instance)
err_ch := make(chan error) err_ch := make(chan error)
@ -115,13 +117,14 @@ func (host *Host) GetInstance(vmid uint) (*Instance, error) {
if !ok { if !ok {
instance_ch <- nil instance_ch <- nil
err_ch <- fmt.Errorf("vmid %d not in host %s", vmid, host.Name) err_ch <- fmt.Errorf("vmid %d not in host %s", vmid, host.Name)
} } else {
// aquire instance lock to wait in case of a concurrent write // aquire instance lock to wait in case of a concurrent write
instance.lock.Lock() instance.lock.Lock()
defer instance.lock.Unlock() defer instance.lock.Unlock()
instance_ch <- instance instance_ch <- instance
err_ch <- nil err_ch <- nil
}
}() }()
instance := <-instance_ch instance := <-instance_ch
@ -129,10 +132,21 @@ func (host *Host) GetInstance(vmid uint) (*Instance, error) {
return instance, err return instance, err
} }
func (host *Host) RebuildVM(vmid uint) error { func (host *Node) RebuildInstance(instancetype InstanceType, vmid uint) error {
instance, err := host.VirtualMachine(vmid) var instance *Instance
if err != nil { if instancetype == VM {
return err var err error
instance, err = host.VirtualMachine(vmid)
if err != nil {
return err
}
} else if instancetype == CT {
var err error
instance, err = host.Container(vmid)
if err != nil {
return err
}
} }
// aquire lock on instance, release on return // aquire lock on instance, release on return
@ -156,33 +170,10 @@ func (host *Host) RebuildVM(vmid uint) error {
return nil return nil
} }
func (host *Host) RebuildCT(vmid uint) error { func (instance *Instance) RebuildVolume(host *Node, volid string) error {
instance, err := host.Container(vmid)
if err != nil {
return err
}
// aquire lock on instance, release on return
instance.lock.Lock()
defer instance.lock.Unlock()
host.Instances[vmid] = instance
for volid := range instance.configDisks {
instance.RebuildVolume(host, volid)
}
for netid := range instance.configNets {
instance.RebuildNet(netid)
}
return nil
}
func (instance *Instance) RebuildVolume(host *Host, volid string) error {
volumeDataString := instance.configDisks[volid] volumeDataString := instance.configDisks[volid]
volume, _, _, err := GetVolumeInfo(host, volumeDataString) volume, err := GetVolumeInfo(host, volumeDataString)
if err != nil { if err != nil {
return err return err
} }
@ -209,7 +200,7 @@ func (instance *Instance) RebuildNet(netid string) error {
return nil return nil
} }
func (instance *Instance) RebuildDevice(host *Host, deviceid string) error { func (instance *Instance) RebuildDevice(host *Node, deviceid string) error {
instanceDevice, ok := instance.configHostPCIs[deviceid] instanceDevice, ok := instance.configHostPCIs[deviceid]
if !ok { // if device does not exist if !ok { // if device does not exist
return fmt.Errorf("%s not found in devices", deviceid) return fmt.Errorf("%s not found in devices", deviceid)

@ -57,8 +57,8 @@ func (pve ProxmoxClient) Nodes() ([]string, error) {
} }
// Gets a Node's resources but does not recursively expand instances // Gets a Node's resources but does not recursively expand instances
func (pve ProxmoxClient) Node(nodeName string) (*Host, error) { func (pve ProxmoxClient) Node(nodeName string) (*Node, error) {
host := Host{} host := Node{}
host.Devices = make(map[string]*Device) host.Devices = make(map[string]*Device)
host.Instances = make(map[uint]*Instance) host.Instances = make(map[uint]*Instance)
@ -87,7 +87,7 @@ func (pve ProxmoxClient) Node(nodeName string) (*Host, error) {
} }
// Get all VM IDs on specified host // Get all VM IDs on specified host
func (host *Host) VirtualMachines() ([]uint, error) { func (host *Node) VirtualMachines() ([]uint, error) {
vms, err := host.pvenode.VirtualMachines(context.Background()) vms, err := host.pvenode.VirtualMachines(context.Background())
if err != nil { if err != nil {
return nil, err return nil, err
@ -100,7 +100,7 @@ func (host *Host) VirtualMachines() ([]uint, error) {
} }
// Get a VM's CPU, Memory but does not recursively link Devices, Disks, Drives, Nets // Get a VM's CPU, Memory but does not recursively link Devices, Disks, Drives, Nets
func (host *Host) VirtualMachine(VMID uint) (*Instance, error) { func (host *Node) VirtualMachine(VMID uint) (*Instance, error) {
instance := Instance{} instance := Instance{}
vm, err := host.pvenode.VirtualMachine(context.Background(), int(VMID)) vm, err := host.pvenode.VirtualMachine(context.Background(), int(VMID))
if err != nil { if err != nil {
@ -135,7 +135,7 @@ func MergeVMDisksAndUnused(vmc *proxmox.VirtualMachineConfig) map[string]string
} }
// Get all CT IDs on specified host // Get all CT IDs on specified host
func (host *Host) Containers() ([]uint, error) { func (host *Node) Containers() ([]uint, error) {
cts, err := host.pvenode.Containers(context.Background()) cts, err := host.pvenode.Containers(context.Background())
if err != nil { if err != nil {
return nil, err return nil, err
@ -148,7 +148,7 @@ func (host *Host) Containers() ([]uint, error) {
} }
// Get a CT's CPU, Memory, Swap but does not recursively link Devices, Disks, Drives, Nets // Get a CT's CPU, Memory, Swap but does not recursively link Devices, Disks, Drives, Nets
func (host *Host) Container(VMID uint) (*Instance, error) { func (host *Node) Container(VMID uint) (*Instance, error) {
instance := Instance{} instance := Instance{}
ct, err := host.pvenode.Container(context.Background(), int(VMID)) ct, err := host.pvenode.Container(context.Background(), int(VMID))
if err != nil { if err != nil {
@ -185,31 +185,32 @@ func MergeCTDisksAndUnused(cc *proxmox.ContainerConfig) map[string]string {
return mergedDisks return mergedDisks
} }
// get volume fornmat, size, volumeid, and storageid from instance volume data string (eg: local:100/vm-100-disk-0.raw ... ) // get volume format, size, volumeid, and storageid from instance volume data string (eg: local:100/vm-100-disk-0.raw ... )
func GetVolumeInfo(host *Host, volume string) (*Volume, string, string, error) { func GetVolumeInfo(host *Node, volume string) (*Volume, error) {
volumeData := Volume{} volumeData := Volume{}
storageID := strings.Split(volume, ":")[0] storageID := strings.Split(volume, ":")[0]
volumeID := strings.Split(volume, ",")[0] volumeID := strings.Split(volume, ",")[0]
storage, err := host.pvenode.Storage(context.Background(), storageID) storage, err := host.pvenode.Storage(context.Background(), storageID)
if err != nil { if err != nil {
return &volumeData, volumeID, storageID, nil return &volumeData, nil
} }
content, err := storage.GetContent(context.Background()) content, err := storage.GetContent(context.Background())
if err != nil { if err != nil {
return &volumeData, volumeID, storageID, nil return &volumeData, nil
} }
for _, c := range content { for _, c := range content {
if c.Volid == volumeID { if c.Volid == volumeID {
volumeData.Storage = storageID
volumeData.Format = c.Format volumeData.Format = c.Format
volumeData.Size = uint64(c.Size) volumeData.Size = uint64(c.Size)
volumeData.Volid = volumeID volumeData.Volid = volumeID
} }
} }
return &volumeData, volumeID, storageID, nil return &volumeData, nil
} }
func GetNetInfo(net string) (*Net, error) { func GetNetInfo(net string) (*Net, error) {

@ -9,17 +9,17 @@ import (
type Cluster struct { type Cluster struct {
lock sync.Mutex lock sync.Mutex
pve ProxmoxClient pve ProxmoxClient
Hosts map[string]*Host Nodes map[string]*Node
} }
type Host struct { type Node struct {
lock sync.Mutex lock sync.Mutex
Name string Name string `json:"name"`
Cores uint64 Cores uint64 `json:"cores"`
Memory uint64 Memory uint64 `json:"memory"`
Swap uint64 Swap uint64 `json:"swap"`
Devices map[string]*Device Devices map[string]*Device `json:"devices"`
Instances map[uint]*Instance Instances map[uint]*Instance `json:"instances"`
pvenode *proxmox.Node pvenode *proxmox.Node
} }
@ -32,15 +32,15 @@ const (
type Instance struct { type Instance struct {
lock sync.Mutex lock sync.Mutex
Type InstanceType Type InstanceType `json:"type"`
Name string Name string `json:"name"`
Proctype string Proctype string `json:"cpu"`
Cores uint64 Cores uint64 `json:"cores"`
Memory uint64 Memory uint64 `json:"memory"`
Swap uint64 Swap uint64 `json:"swap"`
Volumes map[string]*Volume Volumes map[string]*Volume `json:"volumes"`
Nets map[uint]*Net Nets map[uint]*Net `json:"nets"`
Devices map[uint][]*Device Devices map[uint][]*Device `json:"devices"`
pveconfig interface{} pveconfig interface{}
configDisks map[string]string configDisks map[string]string
configNets map[string]string configNets map[string]string
@ -48,15 +48,15 @@ type Instance struct {
} }
type Volume struct { type Volume struct {
Path string Storage string `json:"storage"`
Format string Format string `json:"format"`
Size uint64 Size uint64 `json:"size"`
Volid string Volid string `json:"volid"`
} }
type Net struct { type Net struct {
Rate uint64 Rate uint64 `json:"rate"`
VLAN uint64 VLAN uint64 `json:"vlan"`
} }
type Device struct { type Device struct {
@ -65,5 +65,5 @@ type Device struct {
VendorName string `json:"vendor_name"` VendorName string `json:"vendor_name"`
SubsystemDeviceName string `json:"subsystem_device_name"` SubsystemDeviceName string `json:"subsystem_device_name"`
SubsystemVendorName string `json:"subsystem_vendor_name"` SubsystemVendorName string `json:"subsystem_vendor_name"`
Reserved bool Reserved bool `json:"reserved"`
} }