refator_comm1
This commit is contained in:
182
app.py
182
app.py
@@ -64,7 +64,7 @@ def stop_if_running(unit: str, out: List[str]) -> None:
|
||||
def ha_node_maint(enable: bool, node: str, out: List[str]) -> None:
|
||||
cmd = ["ha-manager", "crm-command", "node-maintenance", "enable" if enable else "disable", node]
|
||||
out.append("$ " + " ".join(shlex.quote(x) for x in cmd))
|
||||
r = run(cmd, timeout=timeout or 25)
|
||||
r = run(cmd, timeout=25)
|
||||
if r.returncode != 0:
|
||||
out.append(f"ERR: {r.stderr.strip()}")
|
||||
|
||||
@@ -169,11 +169,11 @@ def cluster_vmct_index() -> Dict[str, str]:
|
||||
items = get_json(["pvesh", "get", "/cluster/resources"]) or []
|
||||
idx: Dict[str, str] = {}
|
||||
for it in items:
|
||||
t = it.get("type")
|
||||
if t not in ("qemu", "lxc"): continue
|
||||
vmid = it.get("vmid"); node = it.get("node")
|
||||
sid = (("vm" if t=="qemu" else "ct") + f":{vmid}") if vmid is not None else norm_sid(it.get("id"))
|
||||
if sid and node: idx[sid] = node
|
||||
t = it.get("type")
|
||||
if t not in ("qemu", "lxc"): continue
|
||||
vmid = it.get("vmid"); node = it.get("node")
|
||||
sid = (("vm" if t=="qemu" else "ct") + f":{vmid}") if vmid is not None else norm_sid(it.get("id"))
|
||||
if sid and node: idx[sid] = node
|
||||
return idx
|
||||
|
||||
def cluster_vmct_meta() -> Dict[str, Dict[str, Any]]:
|
||||
@@ -195,34 +195,21 @@ def merge_resources(api_res: List[Dict[str, Any]],
|
||||
parsed_res: List[Dict[str, Any]],
|
||||
vmct_idx: Dict[str, str]) -> List[Dict[str, Any]]:
|
||||
by_sid: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
# seed from /cluster/ha/resources
|
||||
for r in (api_res or []):
|
||||
sid = norm_sid(r.get("sid"))
|
||||
if not sid:
|
||||
continue
|
||||
x = dict(r)
|
||||
x["sid"] = sid
|
||||
by_sid[sid] = x
|
||||
|
||||
# merge runtime from ha-manager
|
||||
if not sid: continue
|
||||
x = dict(r); x["sid"] = sid; by_sid[sid] = x
|
||||
for r in (parsed_res or []):
|
||||
sid = norm_sid(r.get("sid"))
|
||||
if not sid:
|
||||
continue
|
||||
if not sid: continue
|
||||
x = by_sid.get(sid, {"sid": sid})
|
||||
for k, v in r.items():
|
||||
if k == "sid":
|
||||
continue
|
||||
if v not in (None, ""):
|
||||
x[k] = v
|
||||
if k == "sid": continue
|
||||
if v not in (None, ""): x[k] = v
|
||||
by_sid[sid] = x
|
||||
|
||||
# fill node from /cluster/resources
|
||||
for sid, x in by_sid.items():
|
||||
if not x.get("node") and sid in vmct_idx:
|
||||
x["node"] = vmct_idx[sid]
|
||||
|
||||
return list(by_sid.values())
|
||||
|
||||
# ---------------- VM details ----------------
|
||||
@@ -265,20 +252,11 @@ def node_detail_payload(name: str) -> Dict[str, Any]:
|
||||
timeinfo = get_json(["pvesh", "get", f"/nodes/{name}/time"]) or {}
|
||||
services = get_json(["pvesh", "get", f"/nodes/{name}/services"]) or []
|
||||
network_cfg = get_json(["pvesh", "get", f"/nodes/{name}/network"]) or []
|
||||
netstat = get_json(["pvesh", "get", f"/nodes/{name}/netstat"]) or [] # may be empty on some versions
|
||||
netstat = get_json(["pvesh", "get", f"/nodes/{name}/netstat"]) or []
|
||||
disks = get_json(["pvesh", "get", f"/nodes/{name}/disks/list"]) or []
|
||||
subscription = get_json(["pvesh", "get", f"/nodes/{name}/subscription"]) or {}
|
||||
return {
|
||||
"node": name,
|
||||
"status": status,
|
||||
"version": version,
|
||||
"time": timeinfo,
|
||||
"services": services,
|
||||
"network_cfg": network_cfg,
|
||||
"netstat": netstat,
|
||||
"disks": disks,
|
||||
"subscription": subscription
|
||||
}
|
||||
return {"node": name,"status": status,"version": version,"time": timeinfo,"services": services,
|
||||
"network_cfg": network_cfg,"netstat": netstat,"disks": disks,"subscription": subscription}
|
||||
|
||||
def node_ha_services(node: str) -> Dict[str, str]:
|
||||
svcs = get_json(["pvesh", "get", f"/nodes/{node}/services"]) or []
|
||||
@@ -294,40 +272,20 @@ def units_for_node(node: str) -> Dict[str, str]:
|
||||
wanted = {"watchdog-mux", "pve-ha-crm", "pve-ha-lrm"}
|
||||
svc = get_json(["pvesh", "get", f"/nodes/{node}/services"]) or []
|
||||
states: Dict[str, str] = {}
|
||||
|
||||
def norm_state(s: dict) -> str:
|
||||
raw_active = str(
|
||||
s.get("active", "") or
|
||||
s.get("active-state", "") or
|
||||
s.get("ActiveState", "") or
|
||||
s.get("activestate", "")
|
||||
).lower()
|
||||
|
||||
status = str(s.get("status", "")).lower()
|
||||
substate = str(s.get("substate", "")).lower()
|
||||
state = str(s.get("state", "")).lower()
|
||||
|
||||
any_active = (
|
||||
raw_active in ("active", "running", "1", "true") or
|
||||
status in ("active", "running") or
|
||||
substate in ("running", "active") or
|
||||
("running" in state or "active" in state)
|
||||
)
|
||||
raw_active = str((s.get("active","") or s.get("active-state","") or s.get("ActiveState","") or s.get("activestate",""))).lower()
|
||||
status = str(s.get("status","")).lower()
|
||||
substate = str(s.get("substate","")).lower()
|
||||
state = str(s.get("state","")).lower()
|
||||
any_active = (raw_active in ("active","running","1","true") or status in ("active","running") or substate in ("running","active") or ("running" in state or "active" in state))
|
||||
return "active" if any_active else "inactive"
|
||||
|
||||
for s in svc:
|
||||
name_raw = (s.get("name") or "")
|
||||
name = re.sub(r"\.service$", "", name_raw)
|
||||
if name in wanted:
|
||||
states[name] = norm_state(s)
|
||||
|
||||
name_raw = (s.get("name") or ""); name = re.sub(r"\.service$","",name_raw)
|
||||
if name in wanted: states[name] = norm_state(s)
|
||||
for u in wanted:
|
||||
if states.get(u) != "active" and is_active(u):
|
||||
states[u] = "active"
|
||||
|
||||
for u in wanted:
|
||||
states.setdefault(u, "inactive")
|
||||
|
||||
for u in wanted: states.setdefault(u, "inactive")
|
||||
return states
|
||||
|
||||
# ---------------- snapshot ----------------
|
||||
@@ -343,16 +301,12 @@ def status_snapshot(node: str) -> Dict[str, Any]:
|
||||
if not ha_status and parsed.get("nodes"):
|
||||
ha_status = [{
|
||||
"node": n.get("node"), "state": n.get("state"),
|
||||
"crm_state": n.get("crm", ""), "lrm_state": n.get("lrm", "")
|
||||
"crm_state": n.get("crm",""), "lrm_state": n.get("lrm","")
|
||||
} for n in parsed["nodes"]]
|
||||
if not ha_status:
|
||||
for it in api.get("cluster_status", []):
|
||||
if it.get("type") == "node":
|
||||
ha_status.append({
|
||||
"node": it.get("name"),
|
||||
"state": "online" if it.get("online") else "offline",
|
||||
"crm_state": "", "lrm_state": ""
|
||||
})
|
||||
ha_status.append({"node": it.get("name"),"state": "online" if it.get("online") else "offline","crm_state":"", "lrm_state":""})
|
||||
|
||||
enriched = []
|
||||
for n in ha_status:
|
||||
@@ -360,23 +314,17 @@ def status_snapshot(node: str) -> Dict[str, Any]:
|
||||
if node_name and (not crm or not lrm):
|
||||
try:
|
||||
svc = node_ha_services(node_name)
|
||||
if not crm: n["crm_state"] = svc.get("crm_state", "")
|
||||
if not lrm: n["lrm_state"] = svc.get("lrm_state", "")
|
||||
if not crm: n["crm_state"] = svc.get("crm_state","")
|
||||
if not lrm: n["lrm_state"] = svc.get("lrm_state","")
|
||||
except Exception:
|
||||
pass
|
||||
enriched.append(n)
|
||||
api["ha_status"] = enriched
|
||||
|
||||
api["ha_resources"] = merge_resources(api.get("ha_resources", []), parsed.get("resources", []), vmct_ix)
|
||||
|
||||
units = units_for_node(node or socket.gethostname())
|
||||
return {
|
||||
"node_arg": node, "hostname": socket.gethostname(),
|
||||
"votequorum": vq, "units": units,
|
||||
"cfgtool": get_cfgtool().strip(), "pvecm": get_pvecm_status().strip(),
|
||||
"ha_raw": ha_raw, "replication": get_pvesr_status().strip(),
|
||||
"api": api, "ts": int(time.time())
|
||||
}
|
||||
return {"node_arg": node, "hostname": socket.gethostname(), "votequorum": vq, "units": units,
|
||||
"cfgtool": get_cfgtool().strip(), "pvecm": get_pvecm_status().strip(),
|
||||
"ha_raw": ha_raw, "replication": get_pvesr_status().strip(), "api": api, "ts": int(time.time())}
|
||||
|
||||
# ---------------- web ----------------
|
||||
@app.get("/")
|
||||
@@ -384,11 +332,67 @@ def index():
|
||||
node = request.args.get("node", DEFAULT_NODE)
|
||||
return render_template("index.html", title=APP_TITLE, node=node)
|
||||
|
||||
# Stary zbiorczy snapshot — zostaje
|
||||
@app.get("/api/info")
|
||||
def api_info():
|
||||
node = request.args.get("node", DEFAULT_NODE)
|
||||
return jsonify(status_snapshot(node))
|
||||
|
||||
# --- NOWE lżejsze endpointy (szybsze ładowanie strony) ---
|
||||
|
||||
@app.get("/api/cluster")
|
||||
def api_cluster_brief():
|
||||
vq = votequorum_brief()
|
||||
api = api_cluster_data()
|
||||
ha_raw = get_ha_status_raw().strip()
|
||||
parsed = parse_ha_manager(ha_raw)
|
||||
vmct_ix = cluster_vmct_index()
|
||||
api["ha_resources"] = merge_resources(api.get("ha_resources", []), parsed.get("resources", []), vmct_ix)
|
||||
return jsonify({
|
||||
"votequorum": vq,
|
||||
"cluster_status": api.get("cluster_status", []),
|
||||
"ha_resources": api.get("ha_resources", []),
|
||||
"pvecm": get_pvecm_status().strip(),
|
||||
"cfgtool": get_cfgtool().strip(),
|
||||
"hostname": socket.gethostname(),
|
||||
"ts": int(time.time())
|
||||
})
|
||||
|
||||
@app.get("/api/nodes/summary")
|
||||
def api_nodes_summary():
|
||||
nodes = enrich_nodes((api_cluster_data().get("nodes") or []))
|
||||
return jsonify({ "nodes": nodes })
|
||||
|
||||
@app.get("/api/units")
|
||||
def api_units():
|
||||
node = request.args.get("node", DEFAULT_NODE)
|
||||
return jsonify({ "units": units_for_node(node) })
|
||||
|
||||
# Replication ze wszystkich nodów
|
||||
@app.get("/api/replication/all")
|
||||
def api_replication_all():
|
||||
jobs: List[Dict[str, Any]] = []
|
||||
nodes = [n.get("node") for n in (api_cluster_data().get("nodes") or []) if n.get("node")]
|
||||
for name in nodes:
|
||||
# PVE 7/8: GET /nodes/{node}/replication lub /nodes/{node}/replication/jobs
|
||||
data = get_json(["pvesh", "get", f"/nodes/{name}/replication"]) or get_json(["pvesh","get",f"/nodes/{name}/replication/jobs"]) or []
|
||||
# fallback: pvesr status na zdalnym nodzie bywa utrudnione, więc parsujemy standardową strukturę API
|
||||
for it in (data or []):
|
||||
jobs.append({
|
||||
"node": name,
|
||||
"job": it.get("id") or it.get("job") or "",
|
||||
"enabled": "yes" if (it.get("enable",1) in (1, "1", True)) else "no",
|
||||
"target": it.get("target") or it.get("target-node") or "",
|
||||
"last": it.get("last_sync") or it.get("last_sync", ""),
|
||||
"next": it.get("next_sync") or it.get("next_sync", ""),
|
||||
"dur": it.get("duration") or it.get("duration", ""),
|
||||
"fail": int(it.get("fail_count") or it.get("failcount") or 0),
|
||||
"state": it.get("state") or it.get("status") or ""
|
||||
})
|
||||
return jsonify({ "jobs": jobs })
|
||||
|
||||
# --- istniejące endpointy detali i list ---
|
||||
|
||||
@app.get("/api/vm")
|
||||
def api_vm_detail():
|
||||
sid = request.args.get("sid", "")
|
||||
@@ -404,10 +408,7 @@ def api_list_vmct():
|
||||
meta = cluster_vmct_meta()
|
||||
ha_sids = {norm_sid(r.get("sid")) for r in (api_cluster_data().get("ha_resources") or []) if r.get("sid")}
|
||||
nonha = [v for k, v in meta.items() if k not in ha_sids]
|
||||
return jsonify({
|
||||
"nonha": nonha, "ha_index": list(ha_sids),
|
||||
"count_nonha": len(nonha), "count_all_vmct": len(meta)
|
||||
})
|
||||
return jsonify({"nonha": nonha, "ha_index": list(ha_sids), "count_nonha": len(nonha), "count_all_vmct": len(meta)})
|
||||
|
||||
@app.post("/api/enable")
|
||||
def api_enable():
|
||||
@@ -431,8 +432,7 @@ def api_disable():
|
||||
ha_node_maint(False, node, log)
|
||||
return jsonify(ok=True, log=log)
|
||||
|
||||
|
||||
# --- VM/CT admin actions API ---
|
||||
# --- VM/CT admin actions API (pozostaje jak było; bez usuwania) ---
|
||||
|
||||
def vm_locked(typ: str, node: str, vmid: int) -> bool:
|
||||
base = f"/nodes/{node}/{typ}/{vmid}"
|
||||
@@ -499,8 +499,6 @@ def api_vm_action():
|
||||
except Exception as e:
|
||||
return jsonify(ok=False, error=str(e)), 500
|
||||
|
||||
|
||||
|
||||
@app.get("/api/task-status")
|
||||
def api_task_status():
|
||||
upid = request.args.get("upid", "").strip()
|
||||
@@ -510,7 +508,6 @@ def api_task_status():
|
||||
st = get_json(["pvesh", "get", f"/nodes/{node}/tasks/{upid}/status"]) or {}
|
||||
return jsonify(ok=True, status=st)
|
||||
|
||||
|
||||
@app.get("/api/task-log")
|
||||
def api_task_log():
|
||||
upid = request.args.get("upid", "").strip()
|
||||
@@ -522,12 +519,9 @@ def api_task_log():
|
||||
start_i = 0
|
||||
if not upid or not node:
|
||||
return jsonify(ok=False, error="upid and node required"), 400
|
||||
# Returns a list of {n: <line_no>, t: <text>}
|
||||
lines = get_json(["pvesh", "get", f"/nodes/{node}/tasks/{upid}/log", "-start", str(start_i)]) or []
|
||||
# Compute next start
|
||||
next_start = start_i
|
||||
if isinstance(lines, list) and lines:
|
||||
# find max n
|
||||
try:
|
||||
next_start = max((int(x.get("n", start_i)) for x in lines if isinstance(x, dict)), default=start_i) + 1
|
||||
except Exception:
|
||||
@@ -542,4 +536,4 @@ if __name__ == "__main__":
|
||||
args = p.parse_args()
|
||||
DEFAULT_NODE = args.node
|
||||
host, port = args.bind.split(":")
|
||||
app.run(host=host, port=int(port), debug=False, threaded=True)
|
||||
app.run(host=host, port=int(port), debug=False, threaded=True)
|
||||
|
Reference in New Issue
Block a user