diff --git a/tests/unit/test_helpers.py b/tests/unit/test_helpers.py
index e196166..87c4ece 100644
--- a/tests/unit/test_helpers.py
+++ b/tests/unit/test_helpers.py
@@ -16,20 +16,26 @@ def Destroy(self):
pass
-def test_get_bool_env_with_default_value():
- value = get_bool_env('INEXISTENT_ENV', True)
-
- assert value
-
-
-def test_get_bool_env_with_a_valid_env():
- key = "TEST_BOOLEAN_VALUE"
-
- os.environ[key] = "True"
-
- value = get_bool_env(key, False)
-
- assert value
+def test_get_bool_env():
+ # Expected behaviour
+ assert get_bool_env('NON_EXISTENT_ENV', True)
+
+ # #102 'bool("False") will evaluate to True in Python'
+ os.environ['VSPHERE_COLLECT_VMS'] = "False"
+ assert not get_bool_env('VSPHERE_COLLECT_VMS', True)
+
+ # Environment is higher prio than defaults
+ os.environ['ENVHIGHERPRIO'] = "True"
+ assert get_bool_env('ENVHIGHERPRIO', False)
+ assert get_bool_env('ENVHIGHERPRIO', True)
+
+ os.environ['ENVHIGHERPRIO_F'] = "False"
+ assert not get_bool_env('ENVHIGHERPRIO_F', False)
+ assert not get_bool_env('ENVHIGHERPRIO_F', True)
+
+ # Accent upper and lower case in env vars
+ os.environ['ENVHIGHERPRIO_F'] = "false"
+ assert not get_bool_env('ENVHIGHERPRIO_F', True)
def test_batch_fetch_properties():
diff --git a/tests/unit/test_vmware_exporter.py b/tests/unit/test_vmware_exporter.py
index 1d99e93..081d3f0 100644
--- a/tests/unit/test_vmware_exporter.py
+++ b/tests/unit/test_vmware_exporter.py
@@ -9,8 +9,11 @@
from twisted.internet import defer
from twisted.internet.error import ReactorAlreadyRunning
from twisted.web.server import NOT_DONE_YET
+from twisted.web.resource import Resource
-from vmware_exporter.vmware_exporter import main, HealthzResource, VmwareCollector, VMWareMetricsResource
+
+from vmware_exporter.vmware_exporter import main, registerEndpoints
+from vmware_exporter.vmware_exporter import HealthzResource, VmwareCollector, VMWareMetricsResource, IndexResource
from vmware_exporter.defer import BranchingDeferred
@@ -482,7 +485,7 @@ def test_collect():
).return_value = _succeed(True)
stack.enter_context(mock.patch.object(collector, '_vmware_get_datastores')).return_value = _succeed(True)
stack.enter_context(mock.patch.object(collector, '_vmware_get_hosts')).return_value = _succeed(True)
- stack.enter_context(mock.patch.object(collector, '_vmware_disconnect')).return_value = _succeed(None)
+ stack.enter_context(mock.patch.object(collector, '_vmware_disconnect')).return_value = _succeed(True)
metrics = yield collector.collect()
assert metrics[0].name == 'vmware_vm_power_state'
@@ -726,6 +729,35 @@ def test_healthz():
assert response == b'Server is UP'
+def test_index_page():
+ request = mock.Mock()
+
+ resource = IndexResource()
+ response = resource.render_GET(request)
+
+ request.setResponseCode.assert_called_with(200)
+
+ assert response == b"""
+
VMware Exporter
+
+ VMware Exporter
+ Metrics
+
+ """
+
+
+def test_register_endpoints():
+ args = mock.Mock()
+ args.config_file = None
+
+ registered_routes = [b'', b'metrics', b'healthz']
+
+ evaluation_var = registerEndpoints(args)
+ assert isinstance(evaluation_var, Resource)
+ for route in registered_routes:
+ assert evaluation_var.getStaticEntity(route) is not None
+
+
def test_vmware_resource():
request = mock.Mock()
diff --git a/vmware_exporter/helpers.py b/vmware_exporter/helpers.py
index 5589a02..afcf205 100644
--- a/vmware_exporter/helpers.py
+++ b/vmware_exporter/helpers.py
@@ -3,8 +3,9 @@
from pyVmomi import vmodl
-def get_bool_env(key, default=None):
- return bool(os.environ.get(key, default))
+def get_bool_env(key: str, default: bool):
+ value = os.environ.get(key, default)
+ return value if type(value) == bool else value.lower() == 'true'
def batch_fetch_properties(content, obj_type, properties):
diff --git a/vmware_exporter/vmware_exporter.py b/vmware_exporter/vmware_exporter.py
index 96f7f30..acadf1a 100755
--- a/vmware_exporter/vmware_exporter.py
+++ b/vmware_exporter/vmware_exporter.py
@@ -188,7 +188,7 @@ def collect(self):
metrics = self._create_metric_containers()
- logging.info(f"Start collecting metrics from {vsphere_host}")
+ logging.info("Start collecting metrics from {vsphere_host}".format(vsphere_host=vsphere_host))
self._labels = {}
@@ -212,9 +212,9 @@ def collect(self):
yield parallelize(*tasks)
- yield self._vmware_disconnect
+ yield self._vmware_disconnect()
- logging.info(f"Finished collecting metrics from {vsphere_host}")
+ logging.info("Finished collecting metrics from {vsphere_host}".format(vsphere_host=vsphere_host))
return list(metrics.values()) # noqa: F705
@@ -243,7 +243,7 @@ def connection(self):
return vmware_connect
except vmodl.MethodFault as error:
- logging.error(f"Caught vmodl fault: {error.msg}")
+ logging.error("Caught vmodl fault: {error}".format(error=error.msg))
return None
@run_once_property
@@ -290,7 +290,8 @@ def datastore_inventory(self):
vim.Datastore,
properties
)
- logging.info(f"Fetched vim.Datastore inventory ({datetime.datetime.utcnow() - start})")
+ fetch_time = datetime.datetime.utcnow() - start
+ logging.info("Fetched vim.Datastore inventory ({fetch_time})".format(fetch_time=fetch_time))
return datastores
@run_once_property
@@ -316,7 +317,8 @@ def host_system_inventory(self):
vim.HostSystem,
properties,
)
- logging.info(f"Fetched vim.HostSystem inventory ({datetime.datetime.utcnow() - start})")
+ fetch_time = datetime.datetime.utcnow() - start
+ logging.info("Fetched vim.HostSystem inventory ({fetch_time})".format(fetch_time=fetch_time))
return host_systems
@run_once_property
@@ -353,7 +355,8 @@ def vm_inventory(self):
vim.VirtualMachine,
properties,
)
- logging.info(f"Fetched vim.VirtualMachine inventory ({datetime.datetime.utcnow() - start})")
+ fetch_time = datetime.datetime.utcnow() - start
+ logging.info("Fetched vim.VirtualMachine inventory ({fetch_time})".format(fetch_time=fetch_time))
return virtual_machines
@run_once_property
@@ -373,21 +376,21 @@ def datastore_labels(self):
def _collect(node, level=1, dc=None, storagePod=""):
inventory = {}
if isinstance(node, vim.Folder) and not isinstance(node, vim.StoragePod):
- logging.debug(f"[Folder ] {('-' * level).ljust(7)} {node.name}")
+ logging.debug("[Folder ] {level} {name}".format(name=node.name, level=('-' * level).ljust(7)))
for child in node.childEntity:
- inventory.update(_collect(child, level+1, dc))
+ inventory.update(_collect(child, level + 1, dc))
elif isinstance(node, vim.Datacenter):
- logging.debug(f"[Datacenter] {('-' * level).ljust(7)} {node.name}")
- inventory.update(_collect(node.datastoreFolder, level+1, node.name))
+ logging.debug("[Datacenter] {level} {name}".format(name=node.name, level=('-' * level).ljust(7)))
+ inventory.update(_collect(node.datastoreFolder, level + 1, node.name))
elif isinstance(node, vim.Folder) and isinstance(node, vim.StoragePod):
- logging.debug(f"[StoragePod] {('-' * level).ljust(7)} {node.name}")
+ logging.debug("[StoragePod] {level} {name}".format(name=node.name, level=('-' * level).ljust(7)))
for child in node.childEntity:
- inventory.update(_collect(child, level+1, dc, node.name))
+ inventory.update(_collect(child, level + 1, dc, node.name))
elif isinstance(node, vim.Datastore):
- logging.debug(f"[Datastore ] {('-' * level).ljust(7)} {node.name}")
+ logging.debug("[Datastore ] {level} {name}".format(name=node.name, level=('-' * level).ljust(7)))
inventory[node.name] = [node.name, dc, storagePod]
else:
- logging.debug(f"[? ] {('-' * level).ljust(7)} {node}")
+ logging.debug("[? ] {level} {node}".format(node=node, level=('-' * level).ljust(7)))
return inventory
labels = {}
@@ -404,25 +407,25 @@ def host_labels(self):
def _collect(node, level=1, dc=None, folder=None):
inventory = {}
if isinstance(node, vim.Folder) and not isinstance(node, vim.StoragePod):
- logging.debug(f"[Folder ] {('-' * level).ljust(7)} {node.name}")
+ logging.debug("[Folder ] {level} {name}".format(level=('-' * level).ljust(7), name=node.name))
for child in node.childEntity:
- inventory.update(_collect(child, level+1, dc))
+ inventory.update(_collect(child, level + 1, dc))
elif isinstance(node, vim.Datacenter):
- logging.debug(f"[Datacenter] {('-' * level).ljust(7)} {node.name}")
- inventory.update(_collect(node.hostFolder, level+1, node.name))
+ logging.debug("[Datacenter] {level} {name}".format(level=('-' * level).ljust(7), name=node.name))
+ inventory.update(_collect(node.hostFolder, level + 1, node.name))
elif isinstance(node, vim.ComputeResource):
- logging.debug(f"[ComputeRes] {('-' * level).ljust(7)} {node.name}")
+ logging.debug("[ComputeRes] {level} {name}".format(level=('-' * level).ljust(7), name=node.name))
for host in node.host:
- inventory.update(_collect(host, level+1, dc, node))
+ inventory.update(_collect(host, level + 1, dc, node))
elif isinstance(node, vim.HostSystem):
- logging.debug(f"[HostSystem] {('-' * level).ljust(7)} {node.name}")
+ logging.debug("[HostSystem] {level} {name}".format(level=('-' * level).ljust(7), name=node.name))
inventory[node._moId] = [
node.summary.config.name.rstrip('.'),
dc,
folder.name if isinstance(folder, vim.ClusterComputeResource) else ''
]
else:
- logging.debug(f"[? ] {('-' * level).ljust(7)} {node}")
+ logging.debug("[? ] {level} {node}".format(level=('-' * level).ljust(7), node=node))
return inventory
labels = {}
@@ -505,7 +508,11 @@ def _vmware_get_datastores(self, ds_metrics):
name = datastore['name']
labels = datastore_labels[name]
except KeyError as e:
- logging.info(f"Key error, unable to register datastore {e}, datastores are {datastore_labels}")
+ logging.info(
+ "Key error, unable to register datastore {error}, datastores are {datastore_labels}".format(
+ error=e, datastore_labels=datastore_labels
+ )
+ )
continue
ds_capacity = float(datastore.get('summary.capacity', 0))
@@ -522,8 +529,8 @@ def _vmware_get_datastores(self, ds_metrics):
ds_metrics['vmware_datastore_vms'].add_metric(labels, len(datastore.get('vm', [])))
ds_metrics['vmware_datastore_maintenance_mode'].add_metric(
- labels + [datastore.get('summary.maintenanceMode', 'unknown')],
- 1
+ labels + [datastore.get('summary.maintenanceMode', 'unknown')],
+ 1
)
ds_metrics['vmware_datastore_type'].add_metric(
@@ -689,7 +696,11 @@ def _vmware_get_hosts(self, host_metrics):
try:
labels = host_labels[host_id]
except KeyError as e:
- logging.info(f"Key error, unable to register host {e}, host labels are {host_labels}")
+ logging.info(
+ "Key error, unable to register host {error}, host labels are {host_labels}".format(
+ error=e, host_labels=host_labels
+ )
+ )
continue
# Power state
@@ -893,6 +904,35 @@ def render_GET(self, request):
return 'Server is UP'.encode()
+class IndexResource(Resource):
+ isLeaf = False
+
+ def getChild(self, name, request):
+ if name == b'':
+ return self
+ return Resource.getChild(self, name, request)
+
+ def render_GET(self, request):
+ output = """
+ VMware Exporter
+
+ VMware Exporter
+ Metrics
+
+ """
+ request.setHeader("Content-Type", "text/html; charset=UTF-8")
+ request.setResponseCode(200)
+ return output.encode()
+
+
+def registerEndpoints(args):
+ root = Resource()
+ root.putChild(b'', IndexResource())
+ root.putChild(b'metrics', VMWareMetricsResource(args))
+ root.putChild(b'healthz', HealthzResource())
+ return root
+
+
def main(argv=None):
""" start up twisted reactor """
parser = argparse.ArgumentParser(description='VMWare metrics exporter for Prometheus')
@@ -907,18 +947,13 @@ def main(argv=None):
numeric_level = getattr(logging, args.loglevel.upper(), None)
if not isinstance(numeric_level, int):
- raise ValueError(f"Invalid log level: {args.loglevel}")
+ raise ValueError("Invalid log level: {level}".format(level=args.loglevel))
logging.basicConfig(level=numeric_level, format='%(asctime)s %(levelname)s:%(message)s')
reactor.suggestThreadPoolSize(25)
- # Start up the server to expose the metrics.
- root = Resource()
- root.putChild(b'metrics', VMWareMetricsResource(args))
- root.putChild(b'healthz', HealthzResource())
-
- factory = Site(root)
- logging.info(f"Starting web server on port {args.port}")
+ factory = Site(registerEndpoints(args))
+ logging.info("Starting web server on port {port}".format(port=args.port))
endpoint = endpoints.TCP4ServerEndpoint(reactor, args.port)
endpoint.listen(factory)
reactor.run()