add basic synchronization using mutex
This commit is contained in:
100
app/model.go
100
app/model.go
@@ -6,16 +6,15 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Cluster struct {
|
||||
pve ProxmoxClient
|
||||
Hosts map[string]*Host
|
||||
}
|
||||
|
||||
func (cluster *Cluster) Init(pve ProxmoxClient) {
|
||||
cluster.pve = pve
|
||||
}
|
||||
|
||||
func (cluster *Cluster) Rebuild() error {
|
||||
func (cluster *Cluster) Sync() error {
|
||||
// aquire lock on cluster, release on return
|
||||
cluster.lock.Lock()
|
||||
defer cluster.lock.Unlock()
|
||||
|
||||
cluster.Hosts = make(map[string]*Host)
|
||||
|
||||
// get all nodes
|
||||
@@ -26,7 +25,7 @@ func (cluster *Cluster) Rebuild() error {
|
||||
// for each node:
|
||||
for _, hostName := range nodes {
|
||||
// rebuild node
|
||||
err := cluster.RebuildNode(hostName)
|
||||
err := cluster.RebuildHost(hostName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -35,17 +34,51 @@ func (cluster *Cluster) Rebuild() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cluster *Cluster) RebuildNode(hostName string) error {
|
||||
// get a node in the cluster
|
||||
func (cluster *Cluster) GetHost(hostName string) (*Host, error) {
|
||||
host_ch := make(chan *Host)
|
||||
err_ch := make(chan error)
|
||||
|
||||
go func() {
|
||||
// aquire cluster lock
|
||||
cluster.lock.Lock()
|
||||
defer cluster.lock.Unlock()
|
||||
// get host
|
||||
host, ok := cluster.Hosts[hostName]
|
||||
if !ok {
|
||||
host_ch <- nil
|
||||
err_ch <- fmt.Errorf("%s not in cluster", hostName)
|
||||
}
|
||||
// aquire host lock to wait in case of a concurrent write
|
||||
host.lock.Lock()
|
||||
defer host.lock.Unlock()
|
||||
|
||||
host_ch <- host
|
||||
err_ch <- nil
|
||||
}()
|
||||
|
||||
host := <-host_ch
|
||||
err := <-err_ch
|
||||
return host, err
|
||||
}
|
||||
|
||||
func (cluster *Cluster) RebuildHost(hostName string) error {
|
||||
host, err := cluster.pve.Node(hostName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cluster.Hosts[hostName] = &host
|
||||
|
||||
// aquire lock on host, release on return
|
||||
host.lock.Lock()
|
||||
defer host.lock.Unlock()
|
||||
|
||||
cluster.Hosts[hostName] = host
|
||||
|
||||
// get node's VMs
|
||||
vms, err := host.VirtualMachines()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
for _, vmid := range vms {
|
||||
err := host.RebuildVM(vmid)
|
||||
@@ -69,13 +102,44 @@ func (cluster *Cluster) RebuildNode(hostName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (host *Host) GetInstance(vmid uint) (*Instance, error) {
|
||||
instance_ch := make(chan *Instance)
|
||||
err_ch := make(chan error)
|
||||
|
||||
go func() {
|
||||
// aquire host lock
|
||||
host.lock.Lock()
|
||||
defer host.lock.Unlock()
|
||||
// get instance
|
||||
instance, ok := host.Instances[vmid]
|
||||
if !ok {
|
||||
instance_ch <- nil
|
||||
err_ch <- fmt.Errorf("vmid %d not in host %s", vmid, host.Name)
|
||||
}
|
||||
// aquire instance lock to wait in case of a concurrent write
|
||||
instance.lock.Lock()
|
||||
defer instance.lock.Unlock()
|
||||
|
||||
instance_ch <- instance
|
||||
err_ch <- nil
|
||||
}()
|
||||
|
||||
instance := <-instance_ch
|
||||
err := <-err_ch
|
||||
return instance, err
|
||||
}
|
||||
|
||||
func (host *Host) RebuildVM(vmid uint) error {
|
||||
instance, err := host.VirtualMachine(vmid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host.Instances[vmid] = &instance
|
||||
// aquire lock on instance, release on return
|
||||
instance.lock.Lock()
|
||||
defer instance.lock.Unlock()
|
||||
|
||||
host.Instances[vmid] = instance
|
||||
|
||||
for volid := range instance.configDisks {
|
||||
instance.RebuildVolume(host, volid)
|
||||
@@ -86,7 +150,7 @@ func (host *Host) RebuildVM(vmid uint) error {
|
||||
}
|
||||
|
||||
for deviceid := range instance.configHostPCIs {
|
||||
instance.RebuildDevice(*host, deviceid)
|
||||
instance.RebuildDevice(host, deviceid)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -98,7 +162,11 @@ func (host *Host) RebuildCT(vmid uint) error {
|
||||
return err
|
||||
}
|
||||
|
||||
host.Instances[vmid] = &instance
|
||||
// aquire lock on instance, release on return
|
||||
instance.lock.Lock()
|
||||
defer instance.lock.Unlock()
|
||||
|
||||
host.Instances[vmid] = instance
|
||||
|
||||
for volid := range instance.configDisks {
|
||||
instance.RebuildVolume(host, volid)
|
||||
@@ -114,12 +182,12 @@ func (host *Host) RebuildCT(vmid uint) error {
|
||||
func (instance *Instance) RebuildVolume(host *Host, volid string) error {
|
||||
volumeDataString := instance.configDisks[volid]
|
||||
|
||||
volume, _, _, err := GetVolumeInfo(*host, volumeDataString)
|
||||
volume, _, _, err := GetVolumeInfo(host, volumeDataString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
instance.Volumes[volid] = &volume
|
||||
instance.Volumes[volid] = volume
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -136,12 +204,12 @@ func (instance *Instance) RebuildNet(netid string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
instance.Nets[uint(idnum)] = &netinfo
|
||||
instance.Nets[uint(idnum)] = netinfo
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (instance *Instance) RebuildDevice(host Host, deviceid string) error {
|
||||
func (instance *Instance) RebuildDevice(host *Host, deviceid string) error {
|
||||
instanceDevice, ok := instance.configHostPCIs[deviceid]
|
||||
if !ok { // if device does not exist
|
||||
return fmt.Errorf("%s not found in devices", deviceid)
|
||||
|
Reference in New Issue
Block a user