Compare commits

..

No commits in common. "80eadbdddb3cabb001a94e50ed9578d7681b30dd" and "2083479945dcac46d1e7986abefe9b4d34413c43" have entirely different histories.

6 changed files with 41 additions and 52 deletions

40
flake.lock generated
View File

@ -1,13 +1,18 @@
{
"nodes": {
"debBundler": {
"flake": false,
"inputs": {
"home-manager": "home-manager",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1705083181,
"narHash": "sha256-o1zolChrQk7UpMmFLjymjQWuoDIL1XStV56kuOprMDQ=",
"lastModified": 1699154900,
"narHash": "sha256-y+PK9ToYcAyY86EoM7Iam7gC++rCuAGndlnPTEzd3EA=",
"owner": "illustris",
"repo": "flake",
"rev": "6a9df656834b5111f7ffb0b1f6d97a0d8700de58",
"rev": "a56221a54571b0e4326af29cf75b4cec081b8de7",
"type": "github"
},
"original": {
@ -16,13 +21,34 @@
"type": "github"
}
},
"home-manager": {
"inputs": {
"nixpkgs": [
"debBundler",
"nixpkgs"
]
},
"locked": {
"lastModified": 1699025595,
"narHash": "sha256-e+o4PoSu2Z6Ww8y/AVUmMU200rNZoRK+p2opQ7Db8Rg=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "8765d4e38aa0be53cdeee26f7386173e6c65618d",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "home-manager",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1704722960,
"narHash": "sha256-mKGJ3sPsT6//s+Knglai5YflJUF2DGj7Ai6Ynopz0kI=",
"lastModified": 1699099776,
"narHash": "sha256-X09iKJ27mGsGambGfkKzqvw5esP1L/Rf8H3u3fCqIiU=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "317484b1ead87b9c1b8ac5261a8d2dd748a0492d",
"rev": "85f1ba3e51676fa8cc604a3d863d729026a6b8eb",
"type": "github"
},
"original": {

View File

@ -5,7 +5,7 @@ rec {
nixpkgs.url = github:nixos/nixpkgs/nixos-unstable;
debBundler = {
url = github:illustris/flake;
flake = false;
inputs.nixpkgs.follows = "nixpkgs";
};
};
@ -14,7 +14,7 @@ rec {
packages.x86_64-linux = with nixpkgs.legacyPackages.x86_64-linux; rec {
pvemon = python3Packages.buildPythonApplication {
pname = "pvemon";
version = "1.1.6";
version = "1.1.1";
src = ./src;
propagatedBuildInputs = with python3Packages; [
pexpect
@ -28,7 +28,7 @@ rec {
};
};
default = pvemon;
deb = (import "${debBundler}/bundlers/deb" { inherit pkgs; }) default;
deb = debBundler.bundlers.x86_64-linux.deb default;
updateRelease = writeScriptBin "update-release" (builtins.readFile ./utils/update-release.sh);
};

View File

@ -30,20 +30,11 @@ def ttl_cache_with_randomness(max_ttl, randomness_factor):
result = func(*args, **kwargs)
cache[key] = (result, time.time())
return result
def invalidate_cache(*args, **kwargs):
key = str(args) + str(kwargs)
if key in cache:
del cache[key]
# Attach the invalidation function to the wrapper
wrapper.invalidate_cache = invalidate_cache
return wrapper
return decorator
@ttl_cache_with_randomness(qm_max_ttl, qm_rand)
def qm_term_cmd(vm_id, cmd, timeout=global_qm_timeout): # TODO: ignore cmd timeout in cache key
def qm_term_cmd(vm_id, cmd, timeout=global_qm_timeout):
global deferred_closing
child = pexpect.spawn(f'qm monitor {vm_id}')
try:

View File

@ -51,21 +51,6 @@ info_settings = [
flag_to_label_value = lambda args, match: next((args[i+1] for i, x in enumerate(args[:-1]) if x == match), "unknown").split(",")[0]
def parse_mem(cmdline):
ret = flag_to_label_value(cmdline, "-m")
# lazy way to detect NUMA
# the token after -m might look something like 'size=1024,slots=255,maxmem=4194304M'
if ret.isnumeric():
return int(ret)*1024
# probably using NUMA
ret = 0
for arg in cmdline:
if "memory-backend-ram" in arg:
assert(arg[-1]=='M')
ret += 1024*int(arg.split("=")[-1][:-1])
return ret
def create_or_get_gauge(metric_name, labels, dynamic_gauges, gauge_lock):
with gauge_lock:
if metric_name not in dynamic_gauges:
@ -147,17 +132,11 @@ def collect_kvm_metrics():
for proc in psutil.process_iter(['pid', 'name', 'exe', 'cmdline', 'cpu_percent', 'memory_percent', 'num_threads']):
try:
if proc.info['exe'] == '/usr/bin/qemu-system-x86_64':
vmid = flag_to_label_value(proc.info['cmdline'], "-id")
# Check if VM definition exists. If it is missing, qm commands will fail.
# VM configs are typically missing when a VM is migrating in.
# The config file is moved after the drives and memory are synced.
if not os.path.exists(f'/etc/pve/qemu-server/{vmid}.conf'):
continue
procs.append(
(
proc,
proc.info['cmdline'],
vmid
flag_to_label_value(proc.info['cmdline'], "-id")
)
)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
@ -172,7 +151,7 @@ def collect_kvm_metrics():
d = {
"kvm_vcores": flag_to_label_value(cmdline,"-smp"),
"kvm_maxmem": parse_mem(cmdline),
"kvm_maxmem": int(flag_to_label_value(cmdline,"-m"))*1024,
"kvm_memory_percent": proc.info['memory_percent'],
"kvm_threads": proc.info['num_threads'],
}

View File

@ -5,8 +5,6 @@ import json
import pvecommon
extract_disk_info_max_retries = 1
def get_device(disk_path):
try:
return os.readlink(disk_path).split('/')[-1]
@ -28,7 +26,7 @@ def handle_json_path(path):
raise ValueError('No host_device driver found or filename is missing')
return filename
def extract_disk_info_from_monitor(vm_id, retries = 0):
def extract_disk_info_from_monitor(vm_id):
raw_output = pvecommon.qm_term_cmd(vm_id, 'info block')
disks_map = {}
disks = [x.strip() for x in raw_output.split("drive-")[1:]]
@ -75,11 +73,6 @@ def extract_disk_info_from_monitor(vm_id, retries = 0):
disks_map[disk_name]["vg_name"] = vg_name
disks_map[disk_name]["vol_name"] = vol_name
disks_map[disk_name]["device"] = get_device(disk_path)
# At this point, if disks_map[disk_name]["device"] exists and is None, the cache might be stale
# Flush the cache for this VMID and try again
if "device" in disks_map[disk_name] and disks_map[disk_name]["device"] == None and retries < extract_disk_info_max_retries:
pvecommon.qm_term_cmd.invalidate_cache(vm_id, 'info block')
return extract_disk_info_from_monitor(vm_id, retries+1)
for line in data[1:-1]:
if "Attached to" in line:
attached_to = line.split(":")[-1].strip()

View File

@ -2,7 +2,7 @@ from setuptools import setup, find_packages
setup(
name='pvemon',
version = "1.1.6",
version = "1.1.1",
packages=find_packages(),
entry_points={
'console_scripts': [