Compare commits

..

15 Commits

Author SHA1 Message Date
illustris
bd955e8067 Bump version to 1.3.3 2025-05-11 05:28:45 +05:30
illustris
b1e9b1e0b5 Add host binding option 2025-05-11 05:27:37 +05:30
illustris
4ac1ba1f24 bump inputs 2025-05-11 05:22:46 +05:30
illustris
b57db23a35 Bump version to 1.3.2 2025-04-14 08:33:25 +05:30
illustris
ebcc08cc8f Use zpool command to measure ZFS pool sizes accurately
Replace statvfs with zpool list command for ZFS storage pools to get accurate
size and free space metrics. This resolves the 'mountpoint' key error for ZFS
pools and provides more accurate capacity information.
2025-04-14 08:30:44 +05:30
illustris
1d06e1c180 convert bool labels in storage info to strings 2025-04-14 08:19:05 +05:30
illustris
4cc5a1f207 Bump version to 1.3.1 2025-04-14 08:07:13 +05:30
illustris
8207792bf7 Fix storage config parsing for keys without values
Set keys without values (like 'sparse') to True when parsing storage configuration.
2025-04-14 08:06:10 +05:30
illustris
066753ebc7 Bump version to 1.3.0 2025-03-08 15:08:56 +05:30
illustris
46bd7d67d2 Add pool information to VM metrics
- Parse /etc/pve/user.cfg to extract pool membership for VMs
- Add pool-related labels to pve_kvm info metrics:
  - pool: Full hierarchical pool name
  - pool_levels: Number of pool hierarchy levels
  - pool1/pool2/pool3: Individual pool hierarchy levels
- Cache pool data based on file modification time to avoid repeated reads
2025-03-08 15:07:02 +05:30
illustris
7923d425a5 Create LICENSE 2024-09-09 01:06:49 +05:30
illustris
14db1fa68c Bump version to 1.2.0 2024-09-09 01:02:02 +05:30
illustris
9745364a72 collect storage info, size and free space 2024-09-09 00:53:59 +05:30
illustris
2eb85eed75 collect disk size 2024-09-09 00:53:41 +05:30
illustris
07c07a6b7d bump inputs 2024-09-09 00:53:41 +05:30
7 changed files with 354 additions and 21 deletions

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2024 Harikrishnan R
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

12
flake.lock generated
View File

@@ -3,11 +3,11 @@
"debBundler": { "debBundler": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1705083181, "lastModified": 1746317543,
"narHash": "sha256-o1zolChrQk7UpMmFLjymjQWuoDIL1XStV56kuOprMDQ=", "narHash": "sha256-1Xph5g1Lazzkc9XuY1nOkG5Fn7+lmSdldAC91boDawY=",
"owner": "illustris", "owner": "illustris",
"repo": "flake", "repo": "flake",
"rev": "6a9df656834b5111f7ffb0b1f6d97a0d8700de58", "rev": "e86bd104d76d22b2ba36fede405e7bff290ef489",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -18,11 +18,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1704722960, "lastModified": 1746663147,
"narHash": "sha256-mKGJ3sPsT6//s+Knglai5YflJUF2DGj7Ai6Ynopz0kI=", "narHash": "sha256-Ua0drDHawlzNqJnclTJGf87dBmaO/tn7iZ+TCkTRpRc=",
"owner": "nixos", "owner": "nixos",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "317484b1ead87b9c1b8ac5261a8d2dd748a0492d", "rev": "dda3dcd3fe03e991015e9a74b22d35950f264a54",
"type": "github" "type": "github"
}, },
"original": { "original": {

View File

@@ -14,7 +14,7 @@ rec {
packages.x86_64-linux = with nixpkgs.legacyPackages.x86_64-linux; rec { packages.x86_64-linux = with nixpkgs.legacyPackages.x86_64-linux; rec {
pvemon = python3Packages.buildPythonApplication { pvemon = python3Packages.buildPythonApplication {
pname = "pvemon"; pname = "pvemon";
version = "1.1.6"; version = "1.3.3";
src = ./src; src = ./src;
propagatedBuildInputs = with python3Packages; [ propagatedBuildInputs = with python3Packages; [
pexpect pexpect

View File

@@ -19,11 +19,22 @@ from concurrent.futures import ThreadPoolExecutor
from threading import Lock from threading import Lock
import pvecommon import pvecommon
import pvestorage
import qmblock import qmblock
import builtins
# Cache for pool data
pool_cache = {
'last_mtime': 0,
'vm_pool_map': {},
'pools': {}
}
DEFAULT_PORT = 9116 DEFAULT_PORT = 9116
DEFAULT_INTERVAL = 10 DEFAULT_INTERVAL = 10
DEFAULT_PREFIX = "pve" DEFAULT_PREFIX = "pve"
DEFAULT_HOST = "0.0.0.0"
gauge_settings = [ gauge_settings = [
('kvm_cpu', 'CPU time for VM', ['id', 'mode']), ('kvm_cpu', 'CPU time for VM', ['id', 'mode']),
@@ -41,6 +52,8 @@ gauge_settings = [
('kvm_io_write_chars', 'Number of bytes written including buffers', ['id']), ('kvm_io_write_chars', 'Number of bytes written including buffers', ['id']),
('kvm_nic_queues', 'Number of queues in multiqueue config', ['id', 'ifname']), ('kvm_nic_queues', 'Number of queues in multiqueue config', ['id', 'ifname']),
('kvm_disk_size', 'Size of virtual disk', ['id', 'disk_name']),
] ]
label_flags = [ "-id", "-name", "-cpu" ] label_flags = [ "-id", "-name", "-cpu" ]
@@ -67,12 +80,14 @@ def parse_mem(cmdline):
return ret return ret
def create_or_get_gauge(metric_name, labels, dynamic_gauges, gauge_lock): def create_or_get_gauge(metric_name, labels, dynamic_gauges, gauge_lock):
logging.debug(f"create_or_get_gauge({metric_name=}, labels={str(labels)}")
with gauge_lock: with gauge_lock:
if metric_name not in dynamic_gauges: if metric_name not in dynamic_gauges:
dynamic_gauges[metric_name] = GaugeMetricFamily(f"{prefix}_{metric_name}", f'{metric_name} for KVM process', labels=labels) dynamic_gauges[metric_name] = GaugeMetricFamily(f"{prefix}_{metric_name}", f'{metric_name} for KVM process', labels=labels)
return dynamic_gauges[metric_name] return dynamic_gauges[metric_name]
def create_or_get_info(info_name, labels, dynamic_infos, info_lock): def create_or_get_info(info_name, labels, dynamic_infos, info_lock):
logging.debug(f"create_or_get_info({info_name=}, labels={str(labels)}")
with info_lock: with info_lock:
if (info_name,str(labels)) not in dynamic_infos: if (info_name,str(labels)) not in dynamic_infos:
dynamic_infos[(info_name,str(labels))] = InfoMetricFamily(f"{prefix}_{info_name}", f'{info_name} for {str(labels)}', labels=labels) dynamic_infos[(info_name,str(labels))] = InfoMetricFamily(f"{prefix}_{info_name}", f'{info_name} for {str(labels)}', labels=labels)
@@ -128,6 +143,67 @@ def read_interface_stats(ifname):
pass pass
return stats return stats
def get_pool_info():
"""
Read pool information from /etc/pve/user.cfg, caching based on file modification time.
Returns a tuple of (vm_to_pool_map, pool_info) where:
- vm_to_pool_map maps VM IDs to their pool names
- pool_info contains details about each pool (levels, etc.)
"""
pool_cfg_path = '/etc/pve/user.cfg'
try:
# Check modification time
current_mtime = os.path.getmtime(pool_cfg_path)
# If file hasn't changed, return cached data
if current_mtime <= pool_cache['last_mtime'] and pool_cache['vm_pool_map']:
return pool_cache['vm_pool_map'], pool_cache['pools']
# File has changed or first run, parse it
logging.debug(f"Reading pool configuration from {pool_cfg_path}")
vm_pool_map = {}
pools = {}
with open(pool_cfg_path, 'r') as f:
for line in f:
if line.startswith('pool:'):
parts = line.strip().split(':')
if len(parts) < 3:
continue
pool_name = parts[1]
vm_list = parts[3] if len(parts) > 3 else ''
# Store pool info
pool_parts = pool_name.split('/')
pool_level_count = len(pool_parts)
pools[pool_name] = {
'level_count': pool_level_count,
'level1': pool_parts[0] if pool_level_count > 0 else '',
'level2': pool_parts[1] if pool_level_count > 1 else '',
'level3': pool_parts[2] if pool_level_count > 2 else ''
}
# Map VMs to this pool
if vm_list:
for vm_id in vm_list.split(','):
if vm_id.strip():
vm_pool_map[vm_id.strip()] = pool_name
# Update cache
pool_cache['last_mtime'] = current_mtime
pool_cache['vm_pool_map'] = vm_pool_map
pool_cache['pools'] = pools
return vm_pool_map, pools
except (FileNotFoundError, PermissionError) as e:
logging.warning(f"Could not read pool configuration: {e}")
return {}, {}
def collect_kvm_metrics(): def collect_kvm_metrics():
logging.debug("collect_kvm_metrics() called") logging.debug("collect_kvm_metrics() called")
gauge_dict = {} gauge_dict = {}
@@ -163,11 +239,34 @@ def collect_kvm_metrics():
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
continue continue
# Get VM to pool mapping
vm_pool_map, pools = get_pool_info()
for proc, cmdline, id in procs: for proc, cmdline, id in procs:
# Extract vm labels from cmdline # Extract vm labels from cmdline
info_label_dict = {get_label_name(l): flag_to_label_value(cmdline,l) for l in label_flags} info_label_dict = {get_label_name(l): flag_to_label_value(cmdline,l) for l in label_flags}
info_label_dict['pid'] = str(proc.pid) info_label_dict['pid'] = str(proc.pid)
logging.debug(f"got PID: {proc.pid}") logging.debug(f"got PID: {proc.pid}")
# Add pool information if available
if id in vm_pool_map:
pool_name = vm_pool_map[id]
pool_info = pools[pool_name]
info_label_dict['pool'] = pool_name
info_label_dict['pool_levels'] = str(pool_info['level_count'])
info_label_dict['pool1'] = pool_info['level1']
info_label_dict['pool2'] = pool_info['level2']
info_label_dict['pool3'] = pool_info['level3']
logging.debug(f"VM {id} belongs to pool {pool_name}")
else:
# VM not in any pool
info_label_dict['pool'] = ''
info_label_dict['pool_levels'] = '0'
info_label_dict['pool1'] = ''
info_label_dict['pool2'] = ''
info_label_dict['pool3'] = ''
info_dict["kvm"].add_metric([], info_label_dict) info_dict["kvm"].add_metric([], info_label_dict)
d = { d = {
@@ -221,6 +320,11 @@ def collect_kvm_metrics():
disk_labels = {"id": id, "disk_name": disk_name} disk_labels = {"id": id, "disk_name": disk_name}
prom_disk_info = create_or_get_info("kvm_disk", disk_labels.keys(), dynamic_infos, info_lock) prom_disk_info = create_or_get_info("kvm_disk", disk_labels.keys(), dynamic_infos, info_lock)
prom_disk_info.add_metric(disk_labels.values(), disk_info) prom_disk_info.add_metric(disk_labels.values(), disk_info)
disk_size = qmblock.get_disk_size(disk_info["disk_path"], disk_info["disk_type"])
if disk_size == None and disk_info["disk_type"] != "qcow2":
logging.debug(f"collect_kvm_metrics: failed to get disk size for {disk_info=}")
else:
gauge_dict["kvm_disk_size"].add_metric([id, disk_name], qmblock.get_disk_size(disk_info["disk_path"], disk_info["disk_type"]))
list(executor.map(map_netstat_proc, [ proc[2] for proc in procs ])) list(executor.map(map_netstat_proc, [ proc[2] for proc in procs ]))
list(executor.map(map_disk_proc, [ proc[2] for proc in procs ])) list(executor.map(map_disk_proc, [ proc[2] for proc in procs ]))
@@ -243,12 +347,17 @@ class PVECollector(object):
if cli_args.collect_running_vms.lower() == 'true': if cli_args.collect_running_vms.lower() == 'true':
for x in collect_kvm_metrics(): for x in collect_kvm_metrics():
yield x yield x
if cli_args.collect_storage.lower() == 'true':
for x in pvestorage.collect_storage_metrics():
yield x
def main(): def main():
parser = argparse.ArgumentParser(description='PVE metrics exporter for Prometheus') parser = argparse.ArgumentParser(description='PVE metrics exporter for Prometheus')
parser.add_argument('--port', type=int, default=DEFAULT_PORT, help='Port for the exporter to listen on') parser.add_argument('--port', type=int, default=DEFAULT_PORT, help='Port for the exporter to listen on')
parser.add_argument('--host', type=str, default=DEFAULT_HOST, help='Host address to bind the exporter to')
parser.add_argument('--interval', type=int, default=DEFAULT_INTERVAL, help='THIS OPTION DOES NOTHING') parser.add_argument('--interval', type=int, default=DEFAULT_INTERVAL, help='THIS OPTION DOES NOTHING')
parser.add_argument('--collect-running-vms', type=str, default='true', help='Enable or disable collecting running VMs metric (true/false)') parser.add_argument('--collect-running-vms', type=str, default='true', help='Enable or disable collecting running VMs metric (true/false)')
parser.add_argument('--collect-storage', type=str, default='true', help='Enable or disable collecting storage info (true/false)')
parser.add_argument('--metrics-prefix', type=str, default=DEFAULT_PREFIX, help='<prefix>_ will be prepended to each metric name') parser.add_argument('--metrics-prefix', type=str, default=DEFAULT_PREFIX, help='<prefix>_ will be prepended to each metric name')
parser.add_argument('--loglevel', type=str, default='INFO', help='Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)') parser.add_argument('--loglevel', type=str, default='INFO', help='Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)')
parser.add_argument('--profile', type=str, default='false', help='collect metrics once, and print profiling stats') parser.add_argument('--profile', type=str, default='false', help='collect metrics once, and print profiling stats')
@@ -257,23 +366,22 @@ def main():
parser.add_argument('--qm-rand', type=int, default=60, help='randomize qm monitor cache expiry') parser.add_argument('--qm-rand', type=int, default=60, help='randomize qm monitor cache expiry')
parser.add_argument('--qm-monitor-defer-close', type=str, default="true", help='defer and retry closing unresponsive qm monitor sessions') parser.add_argument('--qm-monitor-defer-close', type=str, default="true", help='defer and retry closing unresponsive qm monitor sessions')
args = parser.parse_args() # hack to access cli_args across modules
global cli_args builtins.cli_args = parser.parse_args()
cli_args = args
loglevel = getattr(logging, args.loglevel.upper(), None) loglevel = getattr(logging, cli_args.loglevel.upper(), None)
if not isinstance(loglevel, int): if not isinstance(loglevel, int):
raise ValueError(f'Invalid log level: {args.loglevel}') raise ValueError(f'Invalid log level: {cli_args.loglevel}')
logging.basicConfig(level=loglevel,format='%(asctime)s: %(message)s') logging.basicConfig(level=loglevel,format='%(asctime)s: %(message)s')
global prefix global prefix
prefix = args.metrics_prefix prefix = cli_args.metrics_prefix
pvecommon.global_qm_timeout = args.qm_terminal_timeout pvecommon.global_qm_timeout = cli_args.qm_terminal_timeout
pvecommon.qm_max_ttl = args.qm_max_ttl pvecommon.qm_max_ttl = cli_args.qm_max_ttl
pvecommon.qm_rand = args.qm_rand pvecommon.qm_rand = cli_args.qm_rand
pvecommon.qm_monitor_defer_close = args.qm_monitor_defer_close pvecommon.qm_monitor_defer_close = cli_args.qm_monitor_defer_close
if args.profile.lower() == 'true': if cli_args.profile.lower() == 'true':
profiler = cProfile.Profile() profiler = cProfile.Profile()
profiler.enable() profiler.enable()
collect_kvm_metrics() collect_kvm_metrics()
@@ -282,7 +390,7 @@ def main():
return return
else: else:
REGISTRY.register(PVECollector()) REGISTRY.register(PVECollector())
start_http_server(args.port) start_http_server(cli_args.port, addr=cli_args.host)
while True: while True:
time.sleep(100) time.sleep(100)

183
src/pvestorage/__init__.py Normal file
View File

@@ -0,0 +1,183 @@
import os
import re
import logging
import pprint
from prometheus_client.core import InfoMetricFamily, GaugeMetricFamily, CounterMetricFamily, REGISTRY
gauge_settings = [
('node_storage_size', 'Size of the storage pool. This number is inaccurate for ZFS.', ['name', 'type']),
('node_storage_free', 'Free space on the storage pool', ['name', 'type'])
]
info_settings = [
('node_storage', 'information for each PVE storage'),
]
# Sanitize the key to match Prometheus label requirements
# Replace any character that is not a letter, digit, or underscore with an underscore
sanitize_key = lambda key: re.sub(r"[^a-zA-Z0-9_]", "_", key)
_cached_storage_data = None
_cached_mtime = None
def parse_storage_cfg(file_path='/etc/pve/storage.cfg'):
logging.debug(f"parse_storage_cfg({file_path=}) called")
global _cached_storage_data, _cached_mtime
# Check if file exists
if not os.path.exists(file_path):
raise FileNotFoundError(f"The file {file_path} does not exist.")
# Get the file's modification time
current_mtime = os.path.getmtime(file_path)
# If the data is already cached and the file hasn't changed, return the cached data
if _cached_storage_data is not None and _cached_mtime == current_mtime:
logging.debug("parse_storage_cfg: returning cached data")
return _cached_storage_data
logging.debug("parse_storage_cfg: file modified, dropping cache")
# Initialize list to store storages
storage_list = []
current_storage = None
with open(file_path, 'r') as file:
for line in file:
line = line.strip()
if not line or line.startswith("#"):
# Ignore empty lines or comments
continue
if ":" in line:
# If we were processing a previous section, append it to the list
if current_storage:
storage_list.append(current_storage)
# Start a new storage definition
section_type, section_name = line.split(":", 1)
current_storage = {
'type': sanitize_key(section_type.strip()), # Sanitize the section type
'name': sanitize_key(section_name.strip()), # Sanitize the section name
}
else:
# Parse key-value pairs within the current storage
if current_storage:
parts = line.split(None, 1)
key = parts[0].strip()
sanitized_key = sanitize_key(key)
if len(parts) > 1:
# Regular key-value pair
current_storage[sanitized_key] = parts[1].strip()
else:
# Key with no value, set it to True
current_storage[sanitized_key] = True
# Append the last storage section to the list if any
if current_storage:
storage_list.append(current_storage)
# Update the cache
_cached_storage_data = storage_list
_cached_mtime = current_mtime
return storage_list
def get_storage_size(storage):
try:
if storage["type"] == "zfspool":
if "pool" not in storage:
logging.debug(f"ZFS pool {storage['name']} has no pool name configured")
return None
# Extract the pool name (could be in format like rpool/data)
pool_name = storage["pool"].split("/")[0]
# Use zpool command to get accurate size information
import subprocess
try:
result = subprocess.run(
["zpool", "list", pool_name, "-p"],
capture_output=True,
text=True,
check=True
)
# Parse the output
lines = result.stdout.strip().split("\n")
if len(lines) < 2:
logging.warn(f"Unexpected zpool list output format for {pool_name}")
return None
# Extract values from the second line (the data line)
values = lines[1].split()
if len(values) < 4:
logging.warn(f"Insufficient data in zpool list output for {pool_name}")
return None
# Values are: NAME SIZE ALLOC FREE ...
# We need the SIZE and FREE values (index 1 and 3)
total_size = int(values[1])
free_space = int(values[3])
return {
"total": total_size,
"free": free_space
}
except (subprocess.SubprocessError, ValueError, IndexError) as e:
logging.warn(f"Error running zpool list for {pool_name}: {e}")
return None
elif storage["type"] in ["dir", "nfs", "cephfs"]:
# For non-ZFS storage, use statvfs
path = storage["path"]
stats = os.statvfs(path)
total_size = stats.f_frsize * stats.f_blocks
free_space = stats.f_frsize * stats.f_bavail
return {
"total": total_size,
"free": free_space
}
# TODO: handle lvmthin
# could parse /etc/lvm/backup/<vg-name> to collect this data
# TODO: handle rbd
except Exception as e:
logging.warn(f"get_storage_size: unknown error, {storage=}, error: {e}")
# Return None if the case is not handled
return None
def collect_storage_metrics():
logging.debug("collect_storage_metrics() called")
gauge_dict = {}
info_dict = {}
prefix = cli_args.metrics_prefix
for name, description, labels in gauge_settings:
gauge_dict[name] = GaugeMetricFamily(f"{prefix}_{name}", description, labels=labels)
for name, description in info_settings:
info_dict[name] = InfoMetricFamily(f"{prefix}_{name}", description)
storage_pools = parse_storage_cfg()
for storage in storage_pools:
# Convert any non-string values to strings for InfoMetricFamily
storage_info = {}
for key, value in storage.items():
storage_info[key] = str(value) if not isinstance(value, str) else value
info_dict["node_storage"].add_metric([], storage_info)
size = get_storage_size(storage)
if size != None:
gauge_dict["node_storage_size"].add_metric([storage["name"], storage["type"]], size["total"])
gauge_dict["node_storage_free"].add_metric([storage["name"], storage["type"]], size["free"])
for v in info_dict.values():
yield v
for v in gauge_dict.values():
yield v
logging.debug("collect_storage_metrics() return")

View File

@@ -2,6 +2,7 @@ import pexpect
import re import re
import os import os
import json import json
import stat
import pvecommon import pvecommon
@@ -66,6 +67,8 @@ def extract_disk_info_from_monitor(vm_id, retries = 0):
disks_map[disk_name]["disk_type"] = "rbd" disks_map[disk_name]["disk_type"] = "rbd"
rbd_parts = disk_path.split('/') rbd_parts = disk_path.split('/')
disks_map[disk_name]["cluster_id"] = rbd_parts[-3] disks_map[disk_name]["cluster_id"] = rbd_parts[-3]
disks_map[disk_name]["pool"] = rbd_parts[-2]
# Keeping for backwards compatibility
disks_map[disk_name]["pool_name"] = rbd_parts[-2] disks_map[disk_name]["pool_name"] = rbd_parts[-2]
disks_map[disk_name]["vol_name"] = rbd_parts[-1] disks_map[disk_name]["vol_name"] = rbd_parts[-1]
disks_map[disk_name]["device"] = get_device(disk_path) disks_map[disk_name]["device"] = get_device(disk_path)
@@ -94,6 +97,24 @@ def extract_disk_info_from_monitor(vm_id, retries = 0):
disks_map[disk_name]["detect_zeroes"] = "on" disks_map[disk_name]["detect_zeroes"] = "on"
return disks_map return disks_map
def get_disk_size(disk_path, disk_type):
if stat.S_ISBLK(os.stat(disk_path).st_mode):
disk_name = os.path.basename(os.path.realpath(disk_path))
size_file_path = f"/sys/block/{disk_name}/size"
sector_size_file_path = f"/sys/block/{disk_name}/queue/hw_sector_size"
with open(size_file_path, 'r') as f:
sectors = int(f.read().strip())
with open(sector_size_file_path, 'r') as sector_size_file:
sector_size = int(sector_size_file.read().strip())
size_in_bytes = sectors * sector_size
else:
size_in_bytes = os.path.getsize(disk_path)
return size_in_bytes
if __name__ == "__main__": if __name__ == "__main__":
import json import json
import sys import sys

View File

@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
setup( setup(
name='pvemon', name='pvemon',
version = "1.1.6", version = "1.3.3",
packages=find_packages(), packages=find_packages(),
entry_points={ entry_points={
'console_scripts': [ 'console_scripts': [