add default export to db and package module,
start grouping routes by type, use dynamic imports and express routers
This commit is contained in:
parent
fcb2151019
commit
76ccfabf59
@ -1,257 +0,0 @@
|
||||
import { WebSocketServer } from "ws";
|
||||
import * as cookie from "cookie";
|
||||
|
||||
import { requestPVE } from "./pve.js";
|
||||
import { checkAuth, getObjectHash, getTimeLeft } from "./utils.js";
|
||||
import { db, pveAPIToken } from "./db.js";
|
||||
|
||||
// maps usernames to socket object(s)
|
||||
const userSocketMap = {};
|
||||
// maps pool ids to users
|
||||
const poolUserMap = {};
|
||||
// maps sockets to their requested rates
|
||||
const requestedRates = {};
|
||||
// stores the next queued interrupt handler
|
||||
let timer = null;
|
||||
// previous cluster state for interrupt handler
|
||||
let prevState = {};
|
||||
// target ms value
|
||||
let targetMSTime = null;
|
||||
|
||||
export function setupClientSync (app, server, options) {
|
||||
const schemes = options.schemes;
|
||||
const resourceTypes = options.resourcetypes;
|
||||
/**
|
||||
* GET - get list of supported synchronization schemes
|
||||
* responses:
|
||||
* - 200 : {always: boolean, hash: boolean, interrupt: boolean}
|
||||
*/
|
||||
app.get("/api/sync/schemes", async (req, res) => {
|
||||
res.send(schemes);
|
||||
});
|
||||
if (schemes.always.enabled) {
|
||||
console.log("clientsync: enabled always sync");
|
||||
}
|
||||
// setup hash scheme
|
||||
if (schemes.hash.enabled) {
|
||||
/**
|
||||
* GET - get hash of current cluster resources states
|
||||
* Client can use this endpoint to check for cluster state changes to avoid costly data transfers to the client.
|
||||
* responses:
|
||||
* - 401: {auth: false}
|
||||
* - 200: string
|
||||
*/
|
||||
app.get("/api/sync/hash", async (req, res) => {
|
||||
// check auth
|
||||
const auth = await checkAuth(req.cookies, res);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// get current cluster resources
|
||||
const status = (await requestPVE("/cluster/resources", "GET", req.cookies)).data.data;
|
||||
// filter out just state information of resources that are needed
|
||||
const state = extractClusterState(status, resourceTypes);
|
||||
res.status(200).send(getObjectHash(state));
|
||||
});
|
||||
console.log("clientsync: enabled hash sync");
|
||||
}
|
||||
// setup interupt scheme
|
||||
if (schemes.interrupt.enabled) {
|
||||
const wsServer = new WebSocketServer({ noServer: true, path: "/api/sync/interrupt" });
|
||||
wsServer.on("connection", (socket, username, pool) => {
|
||||
// add new socket to userSocketmap
|
||||
if (userSocketMap[username]) {
|
||||
const index = Object.keys(userSocketMap[username]).length;
|
||||
userSocketMap[username][index] = socket;
|
||||
socket.userIndex = index;
|
||||
}
|
||||
else {
|
||||
userSocketMap[username] = { 0: socket };
|
||||
socket.userIndex = 0;
|
||||
}
|
||||
// add user to associated pool in poolUserMap
|
||||
if (poolUserMap[pool]) {
|
||||
poolUserMap[pool][username] = true;
|
||||
}
|
||||
else {
|
||||
poolUserMap[pool] = {};
|
||||
poolUserMap[pool][username] = true;
|
||||
}
|
||||
// add socket entry into requestedRates
|
||||
const index = Object.keys(requestedRates).length;
|
||||
requestedRates[index] = Infinity;
|
||||
socket.rateIndex = index;
|
||||
// handle socket error
|
||||
socket.on("error", console.error);
|
||||
// handle socket close
|
||||
socket.on("close", () => {
|
||||
// remove closed socket from userSocketMap user entry
|
||||
delete userSocketMap[username][socket.userIndex];
|
||||
// if there are no more sockets for this user, delete the userSocketMap user entry and the poolUserMap user entry
|
||||
if (Object.keys(userSocketMap[username]).length === 0) {
|
||||
// delete the user entry
|
||||
delete userSocketMap[username];
|
||||
// remove user from poolUserMap pool entry
|
||||
delete poolUserMap[pool][username];
|
||||
// if the poolUserMap pool entry is empty, delete the entry
|
||||
if (Object.keys(poolUserMap[pool]).length === 0) {
|
||||
delete poolUserMap[pool];
|
||||
}
|
||||
}
|
||||
// remove socket entry from requestedRates
|
||||
delete requestedRates[socket.rateIndex];
|
||||
if (Object.keys(requestedRates).length === 0) { // if there are no requested rates left, clear the timer
|
||||
clearTimeout(timer);
|
||||
timer = null;
|
||||
targetMSTime = null;
|
||||
}
|
||||
// terminate socket
|
||||
socket.terminate();
|
||||
});
|
||||
// handle socket incoming message
|
||||
socket.on("message", (message) => {
|
||||
const parsed = message.toString().split(" ");
|
||||
const cmd = parsed[0];
|
||||
// command is rate and the value is valid
|
||||
if (cmd === "rate" && parsed[1] >= schemes.interrupt["min-rate"] && parsed[1] <= schemes.interrupt["max-rate"]) {
|
||||
// get requested rate in ms
|
||||
const rate = Number(parsed[1]) * 1000;
|
||||
// if timer has not started, start it with requested rate
|
||||
if (!timer) {
|
||||
timer = setTimeout(handleInterruptSync, rate);
|
||||
const time = global.process.uptime();
|
||||
targetMSTime = time - Math.floor(time);
|
||||
}
|
||||
// otherwise, if the timer has started but the rate is lower than the current minimum
|
||||
// AND if the next event trigger is more than the new rate in the future,
|
||||
// restart the timer with the new rate
|
||||
// avoids a large requested rate preventing a faster rate from being fulfilled
|
||||
else if (rate < Math.min.apply(null, Object.values(requestedRates)) && getTimeLeft(timer) > rate) {
|
||||
clearTimeout(timer);
|
||||
timer = setTimeout(handleInterruptSync, rate);
|
||||
const time = global.process.uptime();
|
||||
targetMSTime = time - Math.floor(time);
|
||||
}
|
||||
// otherwise just add the rate to the list, when the next even trigger happens it will be requeued with the new requested rates
|
||||
requestedRates[socket.rateIndex] = rate;
|
||||
}
|
||||
// command is rate but the requested value is out of bounds, terminate socket
|
||||
else if (cmd === "rate") {
|
||||
socket.send(`error: rate must be in range [${schemes.interrupt["min-rate"]}, ${schemes.interrupt["max-rate"]}].`);
|
||||
socket.terminate();
|
||||
}
|
||||
// otherwise, command is invalid, terminate socket
|
||||
else {
|
||||
socket.send(`error: ${cmd} command not found.`);
|
||||
socket.terminate();
|
||||
}
|
||||
});
|
||||
});
|
||||
// handle the wss upgrade request
|
||||
server.on("upgrade", async (req, socket, head) => {
|
||||
const cookies = cookie.parse(req.headers.cookie || "");
|
||||
const auth = (await requestPVE("/version", "GET", cookies)).status === 200;
|
||||
if (!auth) {
|
||||
socket.destroy();
|
||||
}
|
||||
else {
|
||||
wsServer.handleUpgrade(req, socket, head, (socket) => {
|
||||
const pool = db.getUserConfig(cookies.username).cluster.pool;
|
||||
wsServer.emit("connection", socket, cookies.username, pool);
|
||||
});
|
||||
}
|
||||
});
|
||||
const handleInterruptSync = async () => {
|
||||
// queue timeout for next iteration with delay of minimum rate
|
||||
const minRequestedRate = Math.min.apply(null, Object.values(requestedRates));
|
||||
// if the minimum rate is not Infinity, schedule the next timer
|
||||
if (minRequestedRate < Infinity) {
|
||||
const time = global.process.uptime() - targetMSTime;
|
||||
const delay = (time - Math.round(time)) * 1000;
|
||||
timer = setTimeout(handleInterruptSync, minRequestedRate - delay);
|
||||
}
|
||||
// if the minimum rate is Infinity, then don't schedule anything and set timer to null
|
||||
else {
|
||||
timer = null;
|
||||
targetMSTime = null;
|
||||
return;
|
||||
}
|
||||
// get current cluster resources
|
||||
const status = (await requestPVE("/cluster/resources", "GET", null, null, pveAPIToken)).data.data;
|
||||
// filter out just state information of resources that are needed, and hash each one
|
||||
const currState = extractClusterState(status, resourceTypes, true);
|
||||
// get a map of users to send sync notifications
|
||||
const syncUsers = {};
|
||||
// for each current resource in the cluster, check for state changes
|
||||
Object.keys(currState).forEach((resource) => {
|
||||
// if the resource's current state has changed, add all relevant users to syncUsers
|
||||
const resourceCurrState = currState[resource];
|
||||
const resourcePrevState = prevState[resource];
|
||||
// if the previous state did not exist, or the status or pool have changed, then a resource state was added or modified
|
||||
if (!resourcePrevState || resourceCurrState.hash !== resourcePrevState.hash) {
|
||||
// if the resource is a node, send sync to all users
|
||||
if (resourceCurrState.type === "node") {
|
||||
Object.keys(userSocketMap).forEach((user) => {
|
||||
syncUsers[user] = true;
|
||||
});
|
||||
}
|
||||
// if the resource is qemu or lxc, send sync to users in the same pool if there is a pool and if the pool has users
|
||||
else if (resourceCurrState.pool && poolUserMap[resourceCurrState.pool]) {
|
||||
Object.keys(poolUserMap[resourceCurrState.pool]).forEach((user) => {
|
||||
syncUsers[user] = true;
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
// for each previous resource in the cluster, check for state changes
|
||||
Object.keys(prevState).forEach((resource) => {
|
||||
const resourceCurrState = currState[resource];
|
||||
const resourcePrevState = prevState[resource];
|
||||
// if the resource no longer exists in the current state, then it is lost or deleted
|
||||
if (!resourceCurrState) {
|
||||
// if the resource is a node, send sync to all users
|
||||
if (resourcePrevState.type === "node") {
|
||||
Object.keys(userSocketMap).forEach((user) => {
|
||||
syncUsers[user] = true;
|
||||
});
|
||||
}
|
||||
// if the resource is qemu or lxc, send sync to users in the same pool if there is a pool and if the pool has users
|
||||
else if (resourcePrevState.pool && poolUserMap[resourcePrevState.pool]) {
|
||||
Object.keys(poolUserMap[resourcePrevState.pool]).forEach((user) => {
|
||||
syncUsers[user] = true;
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
// for each user in syncUsers, send a sync message over their registered sockets
|
||||
for (const user of Object.keys(syncUsers)) {
|
||||
for (const socket of Object.keys(userSocketMap[user])) {
|
||||
userSocketMap[user][socket].send("sync");
|
||||
}
|
||||
}
|
||||
// set prevState for next iteration
|
||||
prevState = currState;
|
||||
};
|
||||
console.log("clientsync: enabled interrupt sync");
|
||||
}
|
||||
}
|
||||
|
||||
function extractClusterState (status, resourceTypes, hashIndividual = false) {
|
||||
const state = {};
|
||||
status.forEach((resource) => {
|
||||
if (resourceTypes.includes(resource.type)) {
|
||||
state[resource.id] = {
|
||||
name: resource.name || null,
|
||||
type: resource.type,
|
||||
status: resource.status,
|
||||
node: resource.node,
|
||||
pool: resource.pool || null
|
||||
};
|
||||
if (hashIndividual) {
|
||||
const hash = getObjectHash(state[resource.id]);
|
||||
state[resource.id].hash = hash;
|
||||
}
|
||||
}
|
||||
});
|
||||
return state;
|
||||
}
|
12
src/db.js
12
src/db.js
@ -7,6 +7,11 @@ class LocalDB {
|
||||
constructor () {
|
||||
try {
|
||||
this.load();
|
||||
this.pveAPI = this.getGlobalConfig().application.pveAPI;
|
||||
this.pveAPIToken = this.getGlobalConfig().application.pveAPIToken;
|
||||
this.listenPort = this.getGlobalConfig().application.listenPort;
|
||||
this.hostname = this.getGlobalConfig().application.hostname;
|
||||
this.domain = this.getGlobalConfig().application.domain;
|
||||
}
|
||||
catch {
|
||||
console.log("Error: localdb.json was not found. Please follow the directions in the README to initialize localdb.json.");
|
||||
@ -46,9 +51,4 @@ class LocalDB {
|
||||
}
|
||||
}
|
||||
|
||||
export const db = new LocalDB();
|
||||
export const pveAPI = db.getGlobalConfig().application.pveAPI;
|
||||
export const pveAPIToken = db.getGlobalConfig().application.pveAPIToken;
|
||||
export const listenPort = db.getGlobalConfig().application.listenPort;
|
||||
export const hostname = db.getGlobalConfig().application.hostname;
|
||||
export const domain = db.getGlobalConfig().application.domain;
|
||||
export default new LocalDB();
|
||||
|
971
src/main.js
971
src/main.js
File diff suppressed because it is too large
Load Diff
@ -1,2 +1,2 @@
|
||||
import { readFileSync } from "fs";
|
||||
export const api = JSON.parse(readFileSync("package.json"));
|
||||
export default JSON.parse(readFileSync("package.json"));
|
||||
|
@ -1,5 +1,7 @@
|
||||
import axios from "axios";
|
||||
import { pveAPI, pveAPIToken } from "./db.js";
|
||||
import db from "./db.js";
|
||||
const pveAPI = db.pveAPI;
|
||||
const pveAPIToken = db.pveAPIToken;
|
||||
|
||||
/**
|
||||
* Send HTTP request to proxmox API. Allows requests to be made with user cookie credentials or an API token for controlled priviledge elevation.
|
||||
|
61
src/routes/auth.js
Normal file
61
src/routes/auth.js
Normal file
@ -0,0 +1,61 @@
|
||||
import { Router } from "express";
|
||||
export const router = Router();
|
||||
|
||||
const domain = global.db.domain;
|
||||
const checkAuth = global.utils.checkAuth;
|
||||
const requestPVE = global.pve.requestPVE;
|
||||
|
||||
/**
|
||||
* GET - check authentication
|
||||
* responses:
|
||||
* - 200: {auth: true}
|
||||
* - 401: {auth: false}
|
||||
*/
|
||||
router.get("/", async (req, res) => {
|
||||
const auth = await checkAuth(req.cookies, res);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
res.status(200).send({ auth: true });
|
||||
});
|
||||
|
||||
/**
|
||||
* POST - safer ticket generation using proxmox authentication but adding HttpOnly
|
||||
* request:
|
||||
* - username: string
|
||||
* - password: string
|
||||
* responses:
|
||||
* - 200: {auth: true}
|
||||
* - 401: {auth: false}
|
||||
*/
|
||||
router.post("/ticket", async (req, res) => {
|
||||
const response = await requestPVE("/access/ticket", "POST", null, JSON.stringify(req.body));
|
||||
if (!(response.status === 200)) {
|
||||
res.status(response.status).send({ auth: false });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
const ticket = response.data.data.ticket;
|
||||
const csrftoken = response.data.data.CSRFPreventionToken;
|
||||
const username = response.data.data.username;
|
||||
const expire = new Date(Date.now() + (2 * 60 * 60 * 1000));
|
||||
res.cookie("PVEAuthCookie", ticket, { domain, path: "/", httpOnly: true, secure: true, expires: expire });
|
||||
res.cookie("CSRFPreventionToken", csrftoken, { domain, path: "/", httpOnly: true, secure: true, expires: expire });
|
||||
res.cookie("username", username, { domain, path: "/", secure: true, expires: expire });
|
||||
res.cookie("auth", 1, { domain, path: "/", secure: true, expires: expire });
|
||||
res.status(200).send({ auth: true });
|
||||
});
|
||||
|
||||
/**
|
||||
* DELETE - request to destroy ticket
|
||||
* responses:
|
||||
* - 200: {auth: false}
|
||||
*/
|
||||
router.delete("/ticket", async (req, res) => {
|
||||
const expire = new Date(0);
|
||||
res.cookie("PVEAuthCookie", "", { domain, path: "/", httpOnly: true, secure: true, expires: expire });
|
||||
res.cookie("CSRFPreventionToken", "", { domain, path: "/", httpOnly: true, secure: true, expires: expire });
|
||||
res.cookie("username", "", { domain, path: "/", httpOnly: true, secure: true, expires: expire });
|
||||
res.cookie("auth", 0, { domain, path: "/", expires: expire });
|
||||
res.status(200).send({ auth: false });
|
||||
});
|
29
src/routes/cluster.js
Normal file
29
src/routes/cluster.js
Normal file
@ -0,0 +1,29 @@
|
||||
import { Router } from "express";
|
||||
export const router = Router();
|
||||
|
||||
const nodeRegexP = "[\\w-]+";
|
||||
const typeRegexP = "qemu|lxc";
|
||||
const vmidRegexP = "\\d+";
|
||||
|
||||
const basePath = `/:node(${nodeRegexP})/:type(${typeRegexP})/:vmid(${vmidRegexP})`;
|
||||
|
||||
import("./cluster/disk.js").then((module) => {
|
||||
router.use(`${basePath}/disk`, (req, res, next) => {
|
||||
req.routeparams = Object.assign({}, req.routeparams, req.params);
|
||||
next();
|
||||
}, module.router);
|
||||
});
|
||||
|
||||
import("./cluster/net.js").then((module) => {
|
||||
router.use(`${basePath}/net`, (req, res, next) => {
|
||||
req.routeparams = Object.assign({}, req.routeparams, req.params);
|
||||
next();
|
||||
}, module.router);
|
||||
});
|
||||
|
||||
import("./cluster/pci.js").then((module) => {
|
||||
router.use(`${basePath}/pci`,(req, res, next) => {
|
||||
req.routeparams = Object.assign({}, req.routeparams, req.params);
|
||||
next();
|
||||
}, module.router);
|
||||
});
|
358
src/routes/cluster/disk.js
Normal file
358
src/routes/cluster/disk.js
Normal file
@ -0,0 +1,358 @@
|
||||
import { Router } from "express";
|
||||
export const router = Router();
|
||||
|
||||
const requestPVE = global.pve.requestPVE;
|
||||
const handleResponse = global.pve.handleResponse;
|
||||
const getDiskInfo = global.pve.getDiskInfo;
|
||||
const checkAuth = global.utils.checkAuth;
|
||||
const approveResources = global.utils.approveResources;
|
||||
const pveAPIToken = global.db.pveAPIToken;
|
||||
|
||||
/**
|
||||
* POST - detach mounted disk from instance
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* -y tpe: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number
|
||||
* - disk: string - disk id (sata0, NOT unused)
|
||||
* responses:
|
||||
* - 200: PVE Task Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {error: string}
|
||||
* - 500: PVE Task Object
|
||||
*/
|
||||
router.post(`/:disk/detach`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
disk: req.params.disk
|
||||
};
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// get current config
|
||||
const config = (await requestPVE(`${vmpath}/config`, "GET", req.cookies, null, null)).data.data;
|
||||
// disk must exist
|
||||
if (!config[params.disk]) {
|
||||
res.status(500).send({ error: `Disk ${params.disk} does not exist.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// disk cannot be unused
|
||||
if (params.disk.includes("unused")) {
|
||||
res.status(500).send({ error: `Requested disk ${params.disk} cannot be unused. Use /disk/delete to permanently delete unused disks.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
const action = JSON.stringify({ delete: params.disk });
|
||||
const method = params.type === "qemu" ? "POST" : "PUT";
|
||||
const result = await requestPVE(`${vmpath}/config`, method, req.cookies, action, pveAPIToken);
|
||||
await handleResponse(params.node, result, res);
|
||||
});
|
||||
|
||||
/**
|
||||
* POST - attach unused disk image to instance
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* - type: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number
|
||||
* - disk: string - disk id (sata0 NOT unused)
|
||||
* - source: number - source unused disk number (0 => unused0)
|
||||
* responses:
|
||||
* - 200: PVE Task Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {error: string}
|
||||
* - 500: PVE Task Object
|
||||
*/
|
||||
router.post(`/:disk/attach`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
disk: req.params.disk,
|
||||
source: req.body.source
|
||||
};
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// get current config
|
||||
const config = (await requestPVE(`${vmpath}/config`, "GET", req.cookies, null, null)).data.data;
|
||||
// disk must exist
|
||||
if (!config[`unused${params.source}`]) {
|
||||
res.status(403).send({ error: `Requested disk unused${params.source} does not exist.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// target disk must be allowed according to source disk's storage options
|
||||
const diskConfig = await getDiskInfo(params.node, params.type, params.vmid, `unused${params.source}`); // get target disk
|
||||
const resourceConfig = db.getGlobalConfig().resources;
|
||||
if (!resourceConfig[diskConfig.storage].disks.some(diskPrefix => params.disk.startsWith(diskPrefix))) {
|
||||
res.status(500).send({ error: `Requested target ${params.disk} is not in allowed list [${resourceConfig[diskConfig.storage].disks}].` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// setup action using source disk info from vm config
|
||||
let action = {};
|
||||
action[params.disk] = config[`unused${params.source}`];
|
||||
action = JSON.stringify(action);
|
||||
const method = params.type === "qemu" ? "POST" : "PUT";
|
||||
// commit action
|
||||
const result = await requestPVE(`${vmpath}/config`, method, req.cookies, action, pveAPIToken);
|
||||
await handleResponse(params.node, result, res);
|
||||
});
|
||||
|
||||
/**
|
||||
* POST - increase size of mounted disk
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* - type: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number
|
||||
* - disk: string - disk id (sata0 NOT unused)
|
||||
* - size: number - increase size in GiB
|
||||
* responses:
|
||||
* - 200: PVE Task Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {error: string}
|
||||
* - 500: {request: Object, error: string}
|
||||
* - 500: PVE Task Object
|
||||
*/
|
||||
router.post(`/:disk/resize`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
disk: req.params.disk,
|
||||
size: req.body.size
|
||||
};
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// check disk existence
|
||||
const diskConfig = await getDiskInfo(params.node, params.type, params.vmid, params.disk); // get target disk
|
||||
if (!diskConfig) { // exit if disk does not exist
|
||||
res.status(500).send({ error: `requested disk ${params.disk} does not exist.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// setup request
|
||||
const storage = diskConfig.storage; // get the storage
|
||||
const request = {};
|
||||
request[storage] = Number(params.size * 1024 ** 3); // setup request object
|
||||
// check request approval
|
||||
if (!await approveResources(req, req.cookies.username, request)) {
|
||||
res.status(500).send({ request, error: `Storage ${storage} could not fulfill request of size ${params.size}G.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// action approved, commit to action
|
||||
const action = JSON.stringify({ disk: params.disk, size: `+${params.size}G` });
|
||||
const result = await requestPVE(`${vmpath}/resize`, "PUT", req.cookies, action, pveAPIToken);
|
||||
await handleResponse(params.node, result, res);
|
||||
});
|
||||
|
||||
/**
|
||||
* POST - move mounted disk from one storage to another
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* - type: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number
|
||||
* - disk: string - disk id (sata0 NOT unused)
|
||||
* - storage: string - target storage to move disk
|
||||
* - delete: number - delete original disk (0, 1)
|
||||
* responses:
|
||||
* - 200: PVE Task Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {error: string}
|
||||
* - 500: {request: Object, error: string}
|
||||
* - 500: PVE Task Object
|
||||
*/
|
||||
router.post(`/:disk/move`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
disk: req.params.disk,
|
||||
storage: req.body.storage,
|
||||
delete: req.body.delete
|
||||
};
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// check disk existence
|
||||
const diskConfig = await getDiskInfo(params.node, params.type, params.vmid, params.disk); // get target disk
|
||||
if (!diskConfig) { // exit if disk does not exist
|
||||
res.status(500).send({ error: `requested disk ${params.disk} does not exist.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// setup request
|
||||
const size = parseInt(diskConfig.size); // get source disk size
|
||||
const dstStorage = params.storage; // get destination storage
|
||||
const request = {};
|
||||
if (!params.delete) { // if not delete, then request storage, otherwise it is net 0
|
||||
request[dstStorage] = Number(size); // always decrease destination storage by size
|
||||
}
|
||||
// check request approval
|
||||
if (!await approveResources(req, req.cookies.username, request)) {
|
||||
res.status(500).send({ request, error: `Storage ${params.storage} could not fulfill request of size ${params.size}G.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// create action
|
||||
let action = { storage: params.storage, delete: params.delete };
|
||||
if (params.type === "qemu") {
|
||||
action.disk = params.disk;
|
||||
}
|
||||
else {
|
||||
action.volume = params.disk;
|
||||
}
|
||||
action = JSON.stringify(action);
|
||||
const route = params.type === "qemu" ? "move_disk" : "move_volume";
|
||||
// commit action
|
||||
const result = await requestPVE(`${vmpath}/${route}`, "POST", req.cookies, action, pveAPIToken);
|
||||
await handleResponse(params.node, result, res);
|
||||
});
|
||||
|
||||
/**
|
||||
* DELETE - delete unused disk permanently
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* - type: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number
|
||||
* - disk: string - disk id (unused0 or ide0)
|
||||
* responses:
|
||||
* - 200: PVE Task Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {error: string}
|
||||
* - 500: PVE Task Object
|
||||
*/
|
||||
router.delete(`/:disk/delete`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
disk: req.params.disk
|
||||
};
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// get current config
|
||||
const config = (await requestPVE(`${vmpath}/config`, "GET", req.cookies, null, null)).data.data;
|
||||
// disk must exist
|
||||
if (!config[params.disk]) {
|
||||
res.status(403).send({ error: `Requested disk unused${params.source} does not exist.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// only ide or unused are allowed to be deleted
|
||||
if (!params.disk.includes("unused") && !params.disk.includes("ide")) { // must be ide or unused
|
||||
res.status(500).send({ error: `Requested disk ${params.disk} must be unused or ide. Use /disk/detach to detach disks in use.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// create action
|
||||
const action = JSON.stringify({ delete: params.disk });
|
||||
const method = params.type === "qemu" ? "POST" : "PUT";
|
||||
// commit action
|
||||
const result = await requestPVE(`${vmpath}/config`, method, req.cookies, action, pveAPIToken);
|
||||
await handleResponse(params.node, result, res);
|
||||
});
|
||||
|
||||
/**
|
||||
* POST - create a new disk in storage of specified size
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* - type: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number
|
||||
* - disk: string - disk id (sata0, ide0, NOT unused)
|
||||
* - storage: string - storage to hold disk
|
||||
* - size: number - size of disk in GiB
|
||||
* - iso: string (optional) - file name to mount as cdrom
|
||||
* responses:
|
||||
* - 200: PVE Task Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {request: Object, error: string}
|
||||
* - 500: PVE Task Object
|
||||
*/
|
||||
router.post(`/:disk/create`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
disk: req.params.disk,
|
||||
storage: req.body.storage,
|
||||
size: req.body.size,
|
||||
iso: req.body.iso
|
||||
};
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// get current config
|
||||
const config = (await requestPVE(`${vmpath}/config`, "GET", req.cookies, null, null)).data.data;
|
||||
// disk must not exist
|
||||
if (config[params.disk]) {
|
||||
res.status(403).send({ error: `Requested disk ${params.disk} already exists.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// setup request
|
||||
const request = {};
|
||||
if (!params.disk.includes("ide")) {
|
||||
// setup request
|
||||
request[params.storage] = Number(params.size * 1024 ** 3);
|
||||
// check request approval
|
||||
if (!await approveResources(req, req.cookies.username, request)) {
|
||||
res.status(500).send({ request, error: `Storage ${params.storage} could not fulfill request of size ${params.size}G.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// target disk must be allowed according to storage options
|
||||
const resourceConfig = db.getGlobalConfig().resources;
|
||||
if (!resourceConfig[params.storage].disks.some(diskPrefix => params.disk.startsWith(diskPrefix))) {
|
||||
res.status(500).send({ error: `Requested target ${params.disk} is not in allowed list [${resourceConfig[params.storage].disks}].` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
}
|
||||
// setup action
|
||||
let action = {};
|
||||
if (params.disk.includes("ide") && params.iso) {
|
||||
action[params.disk] = `${params.iso},media=cdrom`;
|
||||
}
|
||||
else if (params.type === "qemu") { // type is qemu, use sata
|
||||
action[params.disk] = `${params.storage}:${params.size}`;
|
||||
}
|
||||
else { // type is lxc, use mp and add mp and backup values
|
||||
action[params.disk] = `${params.storage}:${params.size},mp=/${params.disk}/,backup=1`;
|
||||
}
|
||||
action = JSON.stringify(action);
|
||||
const method = params.type === "qemu" ? "POST" : "PUT";
|
||||
// commit action
|
||||
const result = await requestPVE(`${vmpath}/config`, method, req.cookies, action, pveAPIToken);
|
||||
await handleResponse(params.node, result, res);
|
||||
});
|
180
src/routes/cluster/net.js
Normal file
180
src/routes/cluster/net.js
Normal file
@ -0,0 +1,180 @@
|
||||
import { Router } from "express";
|
||||
export const router = Router();
|
||||
|
||||
const requestPVE = global.pve.requestPVE;
|
||||
const handleResponse = global.pve.handleResponse;
|
||||
const checkAuth = global.utils.checkAuth;
|
||||
const approveResources = global.utils.approveResources;
|
||||
const pveAPIToken = global.db.pveAPIToken;
|
||||
|
||||
/**
|
||||
* POST - create new virtual network interface
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* - type: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number
|
||||
* - netid: number - network interface id number (0 => net0)
|
||||
* - rate: number - new bandwidth rate for interface in MB/s
|
||||
* - name: string, optional - required interface name for lxc only
|
||||
* responses:
|
||||
* - 200: PVE Task Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {error: string}
|
||||
* - 500: {request: Object, error: string}
|
||||
* - 500: PVE Task Object
|
||||
*/
|
||||
router.post(`/:netid/create`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
netid: req.params.netid.replace("net", ""),
|
||||
rate: req.body.rate,
|
||||
name: req.body.name
|
||||
};
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// get current config
|
||||
const currentConfig = await requestPVE(`/nodes/${params.node}/${params.type}/${params.vmid}/config`, "GET", null, null, pveAPIToken);
|
||||
// net interface must not exist
|
||||
if (currentConfig.data.data[`net${params.netid}`]) {
|
||||
res.status(500).send({ error: `Network interface net${params.netid} already exists.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
if (params.type === "lxc" && !params.name) {
|
||||
res.status(500).send({ error: "Network interface must have name parameter." });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
const request = {
|
||||
network: Number(params.rate)
|
||||
};
|
||||
// check resource approval
|
||||
if (!await approveResources(req, req.cookies.username, request)) {
|
||||
res.status(500).send({ request, error: `Could not fulfil network request of ${params.rate}MB/s.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// setup action
|
||||
const nc = db.getUserConfig(req.cookies.username).templates.network[params.type];
|
||||
let action = {};
|
||||
if (params.type === "lxc") {
|
||||
action[`net${params.netid}`] = `name=${params.name},bridge=${nc.bridge},ip=${nc.ip},ip6=${nc.ip6},tag=${nc.vlan},type=${nc.type},rate=${params.rate}`;
|
||||
}
|
||||
else {
|
||||
action[`net${params.netid}`] = `${nc.type},bridge=${nc.bridge},tag=${nc.vlan},rate=${params.rate}`;
|
||||
}
|
||||
action = JSON.stringify(action);
|
||||
const method = params.type === "qemu" ? "POST" : "PUT";
|
||||
// commit action
|
||||
const result = await requestPVE(`${vmpath}/config`, method, req.cookies, action, pveAPIToken);
|
||||
await handleResponse(params.node, result, res);
|
||||
});
|
||||
|
||||
/**
|
||||
* POST - modify virtual network interface
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* - type: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number
|
||||
* - netid: number - network interface id number (0 => net0)
|
||||
* - rate: number - new bandwidth rate for interface in MB/s
|
||||
* responses:
|
||||
* - 200: PVE Task Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {error: string}
|
||||
* - 500: {request: Object, error: string}
|
||||
* - 500: PVE Task Object
|
||||
*/
|
||||
router.post(`/:netid/modify`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
netid: req.params.netid.replace("net", ""),
|
||||
rate: req.body.rate
|
||||
};
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// get current config
|
||||
const currentConfig = await requestPVE(`/nodes/${params.node}/${params.type}/${params.vmid}/config`, "GET", null, null, pveAPIToken);
|
||||
// net interface must already exist
|
||||
if (!currentConfig.data.data[`net${params.netid}`]) {
|
||||
res.status(500).send({ error: `Network interface net${params.netid} does not exist.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
const currentNetworkConfig = currentConfig.data.data[`net${params.netid}`];
|
||||
const currentNetworkRate = currentNetworkConfig.split("rate=")[1].split(",")[0];
|
||||
const request = {
|
||||
network: Number(params.rate) - Number(currentNetworkRate)
|
||||
};
|
||||
// check resource approval
|
||||
if (!await approveResources(req, req.cookies.username, request)) {
|
||||
res.status(500).send({ request, error: `Could not fulfil network request of ${params.rate}MB/s.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// setup action
|
||||
let action = {};
|
||||
action[`net${params.netid}`] = currentNetworkConfig.replace(`rate=${currentNetworkRate}`, `rate=${params.rate}`);
|
||||
action = JSON.stringify(action);
|
||||
const method = params.type === "qemu" ? "POST" : "PUT";
|
||||
// commit action
|
||||
const result = await requestPVE(`${vmpath}/config`, method, req.cookies, action, pveAPIToken);
|
||||
await handleResponse(params.node, result, res);
|
||||
});
|
||||
|
||||
/**
|
||||
* DELETE - delete virtual network interface
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* - type: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number
|
||||
* - netid: number - network interface id number (0 => net0)
|
||||
* responses:
|
||||
* - 200: PVE Task Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {error: string}
|
||||
* - 500: PVE Task Object
|
||||
*/
|
||||
router.delete(`/:netid/delete`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
netid: req.params.netid.replace("net", "")
|
||||
};
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// get current config
|
||||
const currentConfig = await requestPVE(`/nodes/${params.node}/${params.type}/${params.vmid}/config`, "GET", null, null, pveAPIToken);
|
||||
// net interface must already exist
|
||||
if (!currentConfig.data.data[`net${params.netid}`]) {
|
||||
res.status(500).send({ error: `Network interface net${params.netid} does not exist.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// setup action
|
||||
const action = JSON.stringify({ delete: `net${params.netid}` });
|
||||
const method = params.type === "qemu" ? "POST" : "PUT";
|
||||
// commit action
|
||||
const result = await requestPVE(`${vmpath}/config`, method, req.cookies, action, pveAPIToken);
|
||||
await handleResponse(params.node, result, res);
|
||||
});
|
278
src/routes/cluster/pci.js
Normal file
278
src/routes/cluster/pci.js
Normal file
@ -0,0 +1,278 @@
|
||||
import { Router } from "express";
|
||||
export const router = Router();
|
||||
|
||||
const requestPVE = global.pve.requestPVE;
|
||||
const handleResponse = global.pve.handleResponse;
|
||||
const getDeviceInfo = global.pve.getDeviceInfo;
|
||||
const getNodeAvailDevices = global.pve.getNodeAvailDevices;
|
||||
const checkAuth = global.utils.checkAuth;
|
||||
const approveResources = global.utils.approveResources;
|
||||
const getUserResources = global.utils.getUserResources;
|
||||
const pveAPIToken = global.db.pveAPIToken;
|
||||
|
||||
/**
|
||||
* GET - get instance pcie device data
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* - type: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number to destroy
|
||||
* - hostpci: string - hostpci number
|
||||
* responses:
|
||||
* - 200: PVE PCI Device Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {error: string}
|
||||
*/
|
||||
router.get(`/:hostpci`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
hostpci: req.params.hostpci.replace("hostpci", "")
|
||||
};
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// check device is in instance config
|
||||
const config = (await requestPVE(`${vmpath}/config`, "GET", req.cookies)).data.data;
|
||||
if (!config[`hostpci${params.hostpci}`]) {
|
||||
res.status(500).send({ error: `Could not find hostpci${params.hostpci} in ${params.vmid}.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
const device = config[`hostpci${params.hostpci}`].split(",")[0];
|
||||
// get node's pci devices
|
||||
const deviceData = await getDeviceInfo(params.node, params.type, params.vmid, device);
|
||||
if (!deviceData) {
|
||||
res.status(500).send({ error: `Could not find hostpci${params.hostpci}=${device} in ${params.node}.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
res.status(200).send(deviceData);
|
||||
res.end();
|
||||
});
|
||||
|
||||
/**
|
||||
* POST - modify existing instance pci device
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* - type: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number to destroy
|
||||
* - hostpci: string - hostpci number
|
||||
* - device: string - new device id
|
||||
* - pcie: Boolean - whether to use pci express or pci
|
||||
* response:
|
||||
* - 200: PVE Task Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {request: Object, error: string}
|
||||
* - 500: PVE Task Object
|
||||
*/
|
||||
router.post(`/:hostpci/modify`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
hostpci: req.params.hostpci.replace("hostpci", ""),
|
||||
device: req.body.device,
|
||||
pcie: req.body.pcie
|
||||
};
|
||||
// check if type is qemu
|
||||
if (params.type !== "qemu") {
|
||||
res.status(500).send({ error: "Type must be qemu (vm)." });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// force all functions
|
||||
params.device = params.device.split(".")[0];
|
||||
// get instance config to check if device has not changed
|
||||
const config = (await requestPVE(`/nodes/${params.node}/${params.type}/${params.vmid}/config`, "GET", params.cookies, null, pveAPIToken)).data.data;
|
||||
const currentDeviceData = await getDeviceInfo(params.node, params.type, params.vmid, config[`hostpci${params.hostpci}`].split(",")[0]);
|
||||
if (!currentDeviceData) {
|
||||
res.status(500).send({ error: `No device in hostpci${params.hostpci}.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// only check user and node availability if base id is different
|
||||
if (currentDeviceData.id.split(".")[0] !== params.device) {
|
||||
// setup request
|
||||
const deviceData = await getDeviceInfo(params.node, params.type, params.vmid, params.device);
|
||||
const request = { pci: deviceData.device_name };
|
||||
// check resource approval
|
||||
if (!await approveResources(req, req.cookies.username, request)) {
|
||||
res.status(500).send({ request, error: `Could not fulfil request for ${deviceData.device_name}.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// check node availability
|
||||
const nodeAvailPci = await getNodeAvailDevices(params.node, req.cookies);
|
||||
if (!nodeAvailPci.some(element => element.id.split(".")[0] === params.device)) {
|
||||
res.status(500).send({ error: `Device ${params.device} is already in use on ${params.node}.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
}
|
||||
// setup action
|
||||
let action = {};
|
||||
action[`hostpci${params.hostpci}`] = `${params.device},pcie=${params.pcie}`;
|
||||
action = JSON.stringify(action);
|
||||
// commit action
|
||||
const rootauth = await requestPVE("/access/ticket", "POST", null, JSON.stringify(db.getGlobalConfig().application.pveroot), null);
|
||||
if (!(rootauth.status === 200)) {
|
||||
res.status(rootauth.status).send({ auth: false, error: "API could not authenticate as root user." });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
const rootcookies = {
|
||||
PVEAuthCookie: rootauth.data.data.ticket,
|
||||
CSRFPreventionToken: rootauth.data.data.CSRFPreventionToken
|
||||
};
|
||||
const result = await requestPVE(`${vmpath}/config`, "POST", rootcookies, action, null);
|
||||
await handleResponse(params.node, result, res);
|
||||
});
|
||||
|
||||
/**
|
||||
* POST - add new instance pci device
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* - type: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number to destroy
|
||||
* - device: string - new device id
|
||||
* - pcie: Boolean - whether to use pci express or pci
|
||||
* response:
|
||||
* - 200: PVE Task Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {request: Object, error: string}
|
||||
* - 500: PVE Task Object
|
||||
*/
|
||||
router.post(`/create`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
device: req.body.device,
|
||||
pcie: req.body.pcie
|
||||
};
|
||||
// check if type is qemu
|
||||
if (params.type !== "qemu") {
|
||||
res.status(500).send({ error: "Type must be qemu (vm)." });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// force all functions
|
||||
params.device = params.device.split(".")[0];
|
||||
// get instance config to find next available hostpci slot
|
||||
const config = requestPVE(`/nodes/${params.node}/${params.type}/${params.vmid}/config`, "GET", params.cookies, null, null);
|
||||
let hostpci = 0;
|
||||
while (config[`hostpci${hostpci}`]) {
|
||||
hostpci++;
|
||||
}
|
||||
// setup request
|
||||
const deviceData = await getDeviceInfo(params.node, params.type, params.vmid, params.device);
|
||||
const request = {
|
||||
pci: deviceData.device_name
|
||||
};
|
||||
// check resource approval
|
||||
if (!await approveResources(req, req.cookies.username, request)) {
|
||||
res.status(500).send({ request, error: `Could not fulfil request for ${deviceData.device_name}.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// check node availability
|
||||
const nodeAvailPci = await getNodeAvailDevices(params.node, req.cookies);
|
||||
if (!nodeAvailPci.some(element => element.id.split(".")[0] === params.device)) {
|
||||
res.status(500).send({ error: `Device ${params.device} is already in use on ${params.node}.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// setup action
|
||||
let action = {};
|
||||
action[`hostpci${hostpci}`] = `${params.device},pcie=${params.pcie}`;
|
||||
action = JSON.stringify(action);
|
||||
// commit action
|
||||
const rootauth = await requestPVE("/access/ticket", "POST", null, JSON.stringify(db.getGlobalConfig().application.pveroot), null);
|
||||
if (!(rootauth.status === 200)) {
|
||||
res.status(rootauth.status).send({ auth: false, error: "API could not authenticate as root user." });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
const rootcookies = {
|
||||
PVEAuthCookie: rootauth.data.data.ticket,
|
||||
CSRFPreventionToken: rootauth.data.data.CSRFPreventionToken
|
||||
};
|
||||
const result = await requestPVE(`${vmpath}/config`, "POST", rootcookies, action, null);
|
||||
await handleResponse(params.node, result, res);
|
||||
});
|
||||
|
||||
/**
|
||||
* DELETE - delete instance pci device
|
||||
* request:
|
||||
* - node: string - vm host node id
|
||||
* - type: string - vm type (lxc, qemu)
|
||||
* - vmid: number - vm id number to destroy
|
||||
* - hostpci: string - hostpci number
|
||||
* response:
|
||||
* - 200: PVE Task Object
|
||||
* - 401: {auth: false, path: string}
|
||||
* - 500: {request: Object, error: string}
|
||||
* - 500: PVE Task Object
|
||||
*/
|
||||
router.delete(`/:hostpci/delete`, async (req, res) => {
|
||||
req.params = Object.assign({}, req.routeparams, req.params);
|
||||
const params = {
|
||||
node: req.params.node,
|
||||
type: req.params.type,
|
||||
vmid: req.params.vmid,
|
||||
hostpci: req.params.hostpci.replace("hostpci", "")
|
||||
};
|
||||
// check if type is qemu
|
||||
if (params.type !== "qemu") {
|
||||
res.status(500).send({ error: "Type must be qemu (vm)." });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// check auth for specific instance
|
||||
const vmpath = `/nodes/${params.node}/${params.type}/${params.vmid}`;
|
||||
const auth = await checkAuth(req.cookies, res, vmpath);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// check device is in instance config
|
||||
const config = (await requestPVE(`${vmpath}/config`, "GET", req.cookies)).data.data;
|
||||
if (!config[`hostpci${params.hostpci}`]) {
|
||||
res.status(500).send({ error: `Could not find hostpci${params.hostpci} in ${params.vmid}.` });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
// setup action
|
||||
const action = JSON.stringify({ delete: `hostpci${params.hostpci}` });
|
||||
// commit action, need to use root user here because proxmox api only allows root to modify hostpci for whatever reason
|
||||
const rootauth = await requestPVE("/access/ticket", "POST", null, JSON.stringify(db.getGlobalConfig().application.pveroot), null);
|
||||
if (!(rootauth.status === 200)) {
|
||||
res.status(rootauth.status).send({ auth: false, error: "API could not authenticate as root user." });
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
const rootcookies = {
|
||||
PVEAuthCookie: rootauth.data.data.ticket,
|
||||
CSRFPreventionToken: rootauth.data.data.CSRFPreventionToken
|
||||
};
|
||||
const result = await requestPVE(`${vmpath}/config`, "POST", rootcookies, action, null);
|
||||
await handleResponse(params.node, result, res);
|
||||
});
|
24
src/routes/proxmox.js
Normal file
24
src/routes/proxmox.js
Normal file
@ -0,0 +1,24 @@
|
||||
import { Router } from "express";
|
||||
export const router = Router();
|
||||
|
||||
const requestPVE = global.pve.requestPVE;
|
||||
|
||||
/**
|
||||
* GET - proxy proxmox api without privilege elevation
|
||||
* request and responses passed through to/from proxmox
|
||||
*/
|
||||
router.get("/*", async (req, res) => { // proxy endpoint for GET proxmox api with no token
|
||||
const path = req.url.replace("/api/proxmox", "");
|
||||
const result = await requestPVE(path, "GET", req.cookies);
|
||||
res.status(result.status).send(result.data);
|
||||
});
|
||||
|
||||
/**
|
||||
* POST - proxy proxmox api without privilege elevation
|
||||
* request and responses passed through to/from proxmox
|
||||
*/
|
||||
router.post("/*", async (req, res) => { // proxy endpoint for POST proxmox api with no token
|
||||
const path = req.url.replace("/api/proxmox", "");
|
||||
const result = await requestPVE(path, "POST", req.cookies, JSON.stringify(req.body)); // need to stringify body because of other issues
|
||||
res.status(result.status).send(result.data);
|
||||
});
|
264
src/routes/sync.js
Normal file
264
src/routes/sync.js
Normal file
@ -0,0 +1,264 @@
|
||||
import { WebSocketServer } from "ws";
|
||||
import * as cookie from "cookie";
|
||||
|
||||
import { Router } from "express";
|
||||
export const router = Router();
|
||||
|
||||
const requestPVE = global.pve.requestPVE;
|
||||
const checkAuth = global.utils.checkAuth;
|
||||
const db = global.db;
|
||||
const pveAPIToken = global.db.pveAPIToken;
|
||||
const getObjectHash = global.utils.getObjectHash;
|
||||
const getTimeLeft = global.utils.getTimeLeft;
|
||||
|
||||
// maps usernames to socket object(s)
|
||||
const userSocketMap = {};
|
||||
// maps pool ids to users
|
||||
const poolUserMap = {};
|
||||
// maps sockets to their requested rates
|
||||
const requestedRates = {};
|
||||
// stores the next queued interrupt handler
|
||||
let timer = null;
|
||||
// previous cluster state for interrupt handler
|
||||
let prevState = {};
|
||||
// target ms value
|
||||
let targetMSTime = null;
|
||||
|
||||
const schemes = db.getGlobalConfig().clientsync.schemes;
|
||||
const resourceTypes = db.getGlobalConfig().clientsync.resourcetypes;
|
||||
/**
|
||||
* GET - get list of supported synchronization schemes
|
||||
* responses:
|
||||
* - 200 : {always: boolean, hash: boolean, interrupt: boolean}
|
||||
*/
|
||||
router.get("/schemes", async (req, res) => {
|
||||
res.send(schemes);
|
||||
});
|
||||
|
||||
if (schemes.always.enabled) {
|
||||
console.log("clientsync: enabled always sync");
|
||||
}
|
||||
|
||||
// setup hash scheme
|
||||
if (schemes.hash.enabled) {
|
||||
/**
|
||||
* GET - get hash of current cluster resources states
|
||||
* Client can use this endpoint to check for cluster state changes to avoid costly data transfers to the client.
|
||||
* responses:
|
||||
* - 401: {auth: false}
|
||||
* - 200: string
|
||||
*/
|
||||
router.get("/hash", async (req, res) => {
|
||||
// check auth
|
||||
const auth = await checkAuth(req.cookies, res);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// get current cluster resources
|
||||
const status = (await requestPVE("/cluster/resources", "GET", req.cookies)).data.data;
|
||||
// filter out just state information of resources that are needed
|
||||
const state = extractClusterState(status, resourceTypes);
|
||||
res.status(200).send(getObjectHash(state));
|
||||
});
|
||||
console.log("clientsync: enabled hash sync");
|
||||
}
|
||||
|
||||
// setup interupt scheme
|
||||
if (schemes.interrupt.enabled) {
|
||||
const wsServer = new WebSocketServer({ noServer: true, path: "/api/sync/interrupt" });
|
||||
wsServer.on("connection", (socket, username, pool) => {
|
||||
// add new socket to userSocketmap
|
||||
if (userSocketMap[username]) {
|
||||
const index = Object.keys(userSocketMap[username]).length;
|
||||
userSocketMap[username][index] = socket;
|
||||
socket.userIndex = index;
|
||||
}
|
||||
else {
|
||||
userSocketMap[username] = { 0: socket };
|
||||
socket.userIndex = 0;
|
||||
}
|
||||
// add user to associated pool in poolUserMap
|
||||
if (poolUserMap[pool]) {
|
||||
poolUserMap[pool][username] = true;
|
||||
}
|
||||
else {
|
||||
poolUserMap[pool] = {};
|
||||
poolUserMap[pool][username] = true;
|
||||
}
|
||||
// add socket entry into requestedRates
|
||||
const index = Object.keys(requestedRates).length;
|
||||
requestedRates[index] = Infinity;
|
||||
socket.rateIndex = index;
|
||||
// handle socket error
|
||||
socket.on("error", console.error);
|
||||
// handle socket close
|
||||
socket.on("close", () => {
|
||||
// remove closed socket from userSocketMap user entry
|
||||
delete userSocketMap[username][socket.userIndex];
|
||||
// if there are no more sockets for this user, delete the userSocketMap user entry and the poolUserMap user entry
|
||||
if (Object.keys(userSocketMap[username]).length === 0) {
|
||||
// delete the user entry
|
||||
delete userSocketMap[username];
|
||||
// remove user from poolUserMap pool entry
|
||||
delete poolUserMap[pool][username];
|
||||
// if the poolUserMap pool entry is empty, delete the entry
|
||||
if (Object.keys(poolUserMap[pool]).length === 0) {
|
||||
delete poolUserMap[pool];
|
||||
}
|
||||
}
|
||||
// remove socket entry from requestedRates
|
||||
delete requestedRates[socket.rateIndex];
|
||||
if (Object.keys(requestedRates).length === 0) { // if there are no requested rates left, clear the timer
|
||||
clearTimeout(timer);
|
||||
timer = null;
|
||||
targetMSTime = null;
|
||||
}
|
||||
// terminate socket
|
||||
socket.terminate();
|
||||
});
|
||||
// handle socket incoming message
|
||||
socket.on("message", (message) => {
|
||||
const parsed = message.toString().split(" ");
|
||||
const cmd = parsed[0];
|
||||
// command is rate and the value is valid
|
||||
if (cmd === "rate" && parsed[1] >= schemes.interrupt["min-rate"] && parsed[1] <= schemes.interrupt["max-rate"]) {
|
||||
// get requested rate in ms
|
||||
const rate = Number(parsed[1]) * 1000;
|
||||
// if timer has not started, start it with requested rate
|
||||
if (!timer) {
|
||||
timer = setTimeout(handleInterruptSync, rate);
|
||||
const time = global.process.uptime();
|
||||
targetMSTime = time - Math.floor(time);
|
||||
}
|
||||
// otherwise, if the timer has started but the rate is lower than the current minimum
|
||||
// AND if the next event trigger is more than the new rate in the future,
|
||||
// restart the timer with the new rate
|
||||
// avoids a large requested rate preventing a faster rate from being fulfilled
|
||||
else if (rate < Math.min.apply(null, Object.values(requestedRates)) && getTimeLeft(timer) > rate) {
|
||||
clearTimeout(timer);
|
||||
timer = setTimeout(handleInterruptSync, rate);
|
||||
const time = global.process.uptime();
|
||||
targetMSTime = time - Math.floor(time);
|
||||
}
|
||||
// otherwise just add the rate to the list, when the next even trigger happens it will be requeued with the new requested rates
|
||||
requestedRates[socket.rateIndex] = rate;
|
||||
}
|
||||
// command is rate but the requested value is out of bounds, terminate socket
|
||||
else if (cmd === "rate") {
|
||||
socket.send(`error: rate must be in range [${schemes.interrupt["min-rate"]}, ${schemes.interrupt["max-rate"]}].`);
|
||||
socket.terminate();
|
||||
}
|
||||
// otherwise, command is invalid, terminate socket
|
||||
else {
|
||||
socket.send(`error: ${cmd} command not found.`);
|
||||
socket.terminate();
|
||||
}
|
||||
});
|
||||
});
|
||||
// handle the wss upgrade request
|
||||
global.server.on("upgrade", async (req, socket, head) => {
|
||||
const cookies = cookie.parse(req.headers.cookie || "");
|
||||
const auth = (await requestPVE("/version", "GET", cookies)).status === 200;
|
||||
if (!auth) {
|
||||
socket.destroy();
|
||||
}
|
||||
else {
|
||||
wsServer.handleUpgrade(req, socket, head, (socket) => {
|
||||
const pool = db.getUserConfig(cookies.username).cluster.pool;
|
||||
wsServer.emit("connection", socket, cookies.username, pool);
|
||||
});
|
||||
}
|
||||
});
|
||||
const handleInterruptSync = async () => {
|
||||
// queue timeout for next iteration with delay of minimum rate
|
||||
const minRequestedRate = Math.min.apply(null, Object.values(requestedRates));
|
||||
// if the minimum rate is not Infinity, schedule the next timer
|
||||
if (minRequestedRate < Infinity) {
|
||||
const time = global.process.uptime() - targetMSTime;
|
||||
const delay = (time - Math.round(time)) * 1000;
|
||||
timer = setTimeout(handleInterruptSync, minRequestedRate - delay);
|
||||
}
|
||||
// if the minimum rate is Infinity, then don't schedule anything and set timer to null
|
||||
else {
|
||||
timer = null;
|
||||
targetMSTime = null;
|
||||
return;
|
||||
}
|
||||
// get current cluster resources
|
||||
const status = (await requestPVE("/cluster/resources", "GET", null, null, pveAPIToken)).data.data;
|
||||
// filter out just state information of resources that are needed, and hash each one
|
||||
const currState = extractClusterState(status, resourceTypes, true);
|
||||
// get a map of users to send sync notifications
|
||||
const syncUsers = {};
|
||||
// for each current resource in the cluster, check for state changes
|
||||
Object.keys(currState).forEach((resource) => {
|
||||
// if the resource's current state has changed, add all relevant users to syncUsers
|
||||
const resourceCurrState = currState[resource];
|
||||
const resourcePrevState = prevState[resource];
|
||||
// if the previous state did not exist, or the status or pool have changed, then a resource state was added or modified
|
||||
if (!resourcePrevState || resourceCurrState.hash !== resourcePrevState.hash) {
|
||||
// if the resource is a node, send sync to all users
|
||||
if (resourceCurrState.type === "node") {
|
||||
Object.keys(userSocketMap).forEach((user) => {
|
||||
syncUsers[user] = true;
|
||||
});
|
||||
}
|
||||
// if the resource is qemu or lxc, send sync to users in the same pool if there is a pool and if the pool has users
|
||||
else if (resourceCurrState.pool && poolUserMap[resourceCurrState.pool]) {
|
||||
Object.keys(poolUserMap[resourceCurrState.pool]).forEach((user) => {
|
||||
syncUsers[user] = true;
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
// for each previous resource in the cluster, check for state changes
|
||||
Object.keys(prevState).forEach((resource) => {
|
||||
const resourceCurrState = currState[resource];
|
||||
const resourcePrevState = prevState[resource];
|
||||
// if the resource no longer exists in the current state, then it is lost or deleted
|
||||
if (!resourceCurrState) {
|
||||
// if the resource is a node, send sync to all users
|
||||
if (resourcePrevState.type === "node") {
|
||||
Object.keys(userSocketMap).forEach((user) => {
|
||||
syncUsers[user] = true;
|
||||
});
|
||||
}
|
||||
// if the resource is qemu or lxc, send sync to users in the same pool if there is a pool and if the pool has users
|
||||
else if (resourcePrevState.pool && poolUserMap[resourcePrevState.pool]) {
|
||||
Object.keys(poolUserMap[resourcePrevState.pool]).forEach((user) => {
|
||||
syncUsers[user] = true;
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
// for each user in syncUsers, send a sync message over their registered sockets
|
||||
for (const user of Object.keys(syncUsers)) {
|
||||
for (const socket of Object.keys(userSocketMap[user])) {
|
||||
userSocketMap[user][socket].send("sync");
|
||||
}
|
||||
}
|
||||
// set prevState for next iteration
|
||||
prevState = currState;
|
||||
};
|
||||
console.log("clientsync: enabled interrupt sync");
|
||||
}
|
||||
|
||||
function extractClusterState (status, resourceTypes, hashIndividual = false) {
|
||||
const state = {};
|
||||
status.forEach((resource) => {
|
||||
if (resourceTypes.includes(resource.type)) {
|
||||
state[resource.id] = {
|
||||
name: resource.name || null,
|
||||
type: resource.type,
|
||||
status: resource.status,
|
||||
node: resource.node,
|
||||
pool: resource.pool || null
|
||||
};
|
||||
if (hashIndividual) {
|
||||
const hash = getObjectHash(state[resource.id]);
|
||||
state[resource.id].hash = hash;
|
||||
}
|
||||
}
|
||||
});
|
||||
return state;
|
||||
}
|
76
src/routes/user.js
Normal file
76
src/routes/user.js
Normal file
@ -0,0 +1,76 @@
|
||||
import { Router } from "express";
|
||||
export const router = Router();
|
||||
|
||||
const requestPVE = global.pve.requestPVE;
|
||||
const checkAuth = global.utils.checkAuth;
|
||||
const getUserResources = global.utils.getUserResources;
|
||||
const pveAPIToken = global.db.pveAPIToken;
|
||||
|
||||
/**
|
||||
* GET - get db user resource information including allocated, free, and maximum resource values along with resource metadata
|
||||
* responses:
|
||||
* - 200: {avail: Object, max: Object, used: Object, resources: Object}
|
||||
* - 401: {auth: false}
|
||||
*/
|
||||
router.get("/dynamic/resources", async (req, res) => {
|
||||
// check auth
|
||||
const auth = await checkAuth(req.cookies, res);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
const resources = await getUserResources(req, req.cookies.username);
|
||||
res.status(200).send(resources);
|
||||
});
|
||||
|
||||
/**
|
||||
* GET - get db user configuration by key
|
||||
* request:
|
||||
* - key: string - user config key
|
||||
* responses:
|
||||
* - 200: Object
|
||||
* - 401: {auth: false}
|
||||
* - 401: {auth: false, error: string}
|
||||
*/
|
||||
router.get("/config/:key", async (req, res) => {
|
||||
const params = {
|
||||
key: req.params.key
|
||||
};
|
||||
// check auth
|
||||
const auth = await checkAuth(req.cookies, res);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
const allowKeys = ["resources", "cluster", "nodes"];
|
||||
if (allowKeys.includes(params.key)) {
|
||||
const config = db.getUserConfig(req.cookies.username);
|
||||
res.status(200).send(config[params.key]);
|
||||
}
|
||||
else {
|
||||
res.status(401).send({ auth: false, error: `User is not authorized to access /user/config/${params.key}.` });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* GET - get user accessible iso files
|
||||
* response:
|
||||
* - 200: Array.<Object>
|
||||
* - 401: {auth: false}
|
||||
*/
|
||||
router.get("/iso", async (req, res) => {
|
||||
// check auth
|
||||
const auth = await checkAuth(req.cookies, res);
|
||||
if (!auth) {
|
||||
return;
|
||||
}
|
||||
// get user iso config
|
||||
const userIsoConfig = db.getGlobalConfig().useriso;
|
||||
// get all isos
|
||||
const isos = (await requestPVE(`/nodes/${userIsoConfig.node}/storage/${userIsoConfig.storage}/content?content=iso`, "GET", null, null, pveAPIToken)).data.data;
|
||||
const userIsos = [];
|
||||
isos.forEach((iso) => {
|
||||
iso.name = iso.volid.replace(`${userIsoConfig.storage}:iso/`, "");
|
||||
userIsos.push(iso);
|
||||
});
|
||||
userIsos.sort();
|
||||
res.status(200).send(userIsos);
|
||||
});
|
@ -1,7 +1,7 @@
|
||||
import { createHash } from "crypto";
|
||||
|
||||
import { getUsedResources, requestPVE } from "./pve.js";
|
||||
import { db } from "./db.js";
|
||||
import db from "./db.js";
|
||||
|
||||
/**
|
||||
* Check if a user is authorized to access a specified vm, or the cluster in general.
|
||||
|
Loading…
Reference in New Issue
Block a user