Skip to content

Commit 238722a

Browse files
odedfosleahecole
andauthored
#4237 BT metricscaler - Filter cpu query to get the metrics for the correct resources (#4238)
* #4237 Added a filter to the cpu & storage queries to get the metrics for the correct resources * fix lint * fix lint again Co-authored-by: Leah E. Cole <[email protected]> Co-authored-by: Leah Cole <[email protected]>
1 parent a7feb50 commit 238722a

File tree

2 files changed

+32
-15
lines changed

2 files changed

+32
-15
lines changed

bigtable/metricscaler/metricscaler.py

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
programmatically scale a Google Cloud Bigtable cluster."""
1717

1818
import argparse
19+
import logging
1920
import os
2021
import time
2122

@@ -26,8 +27,12 @@
2627

2728
PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
2829

30+
logger = logging.getLogger('bigtable.metricscaler')
31+
logger.addHandler(logging.StreamHandler())
32+
logger.setLevel(logging.INFO)
2933

30-
def get_cpu_load():
34+
35+
def get_cpu_load(bigtable_instance, bigtable_cluster):
3136
"""Returns the most recent Cloud Bigtable CPU load measurement.
3237
3338
Returns:
@@ -40,12 +45,13 @@ def get_cpu_load():
4045
metric_type='bigtable.googleapis.com/'
4146
'cluster/cpu_load',
4247
minutes=5)
48+
cpu_query = cpu_query.select_resources(instance=bigtable_instance, cluster=bigtable_cluster)
4349
cpu = next(cpu_query.iter())
4450
return cpu.points[0].value.double_value
4551
# [END bigtable_cpu]
4652

4753

48-
def get_storage_utilization():
54+
def get_storage_utilization(bigtable_instance, bigtable_cluster):
4955
"""Returns the most recent Cloud Bigtable storage utilization measurement.
5056
5157
Returns:
@@ -58,6 +64,7 @@ def get_storage_utilization():
5864
metric_type='bigtable.googleapis.com/'
5965
'cluster/storage_utilization',
6066
minutes=5)
67+
utilization_query = utilization_query.select_resources(instance=bigtable_instance, cluster=bigtable_cluster)
6168
utilization = next(utilization_query.iter())
6269
return utilization.points[0].value.double_value
6370
# [END bigtable_metric_scaler_storage_utilization]
@@ -111,15 +118,15 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
111118
current_node_count + size_change_step, max_node_count)
112119
cluster.serve_nodes = new_node_count
113120
cluster.update()
114-
print('Scaled up from {} to {} nodes.'.format(
121+
logger.info('Scaled up from {} to {} nodes.'.format(
115122
current_node_count, new_node_count))
116123
else:
117124
if current_node_count > min_node_count:
118125
new_node_count = max(
119126
current_node_count - size_change_step, min_node_count)
120127
cluster.serve_nodes = new_node_count
121128
cluster.update()
122-
print('Scaled down from {} to {} nodes.'.format(
129+
logger.info('Scaled down from {} to {} nodes.'.format(
123130
current_node_count, new_node_count))
124131
# [END bigtable_scale]
125132

@@ -145,10 +152,10 @@ def main(
145152
long_sleep (int): How long to sleep after the number of nodes is
146153
changed
147154
"""
148-
cluster_cpu = get_cpu_load()
149-
cluster_storage = get_storage_utilization()
150-
print('Detected cpu of {}'.format(cluster_cpu))
151-
print('Detected storage utilization of {}'.format(cluster_storage))
155+
cluster_cpu = get_cpu_load(bigtable_instance, bigtable_cluster)
156+
cluster_storage = get_storage_utilization(bigtable_instance, bigtable_cluster)
157+
logger.info('Detected cpu of {}'.format(cluster_cpu))
158+
logger.info('Detected storage utilization of {}'.format(cluster_storage))
152159
try:
153160
if cluster_cpu > high_cpu_threshold or cluster_storage > high_storage_threshold:
154161
scale_bigtable(bigtable_instance, bigtable_cluster, True)
@@ -158,10 +165,10 @@ def main(
158165
scale_bigtable(bigtable_instance, bigtable_cluster, False)
159166
time.sleep(long_sleep)
160167
else:
161-
print('CPU within threshold, sleeping.')
168+
logger.info('CPU within threshold, sleeping.')
162169
time.sleep(short_sleep)
163170
except Exception as e:
164-
print("Error during scaling: %s", e)
171+
logger.error("Error during scaling: %s", e)
165172

166173

167174
if __name__ == '__main__':

bigtable/metricscaler/metricscaler_test.py

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020

2121
from google.cloud import bigtable
2222
from google.cloud.bigtable import enums
23-
from mock import patch
23+
from mock import Mock, patch
2424

2525
import pytest
2626

@@ -41,12 +41,18 @@
4141
# System tests to verify API calls succeed
4242

4343

44-
def test_get_cpu_load():
45-
assert float(get_cpu_load()) > 0.0
44+
@patch('metricscaler.query')
45+
def test_get_cpu_load(monitoring_v3_query):
46+
iter_mock = monitoring_v3_query.Query().select_resources().iter
47+
iter_mock.return_value = iter([Mock(points=[Mock(value=Mock(double_value=1.0))])])
48+
assert float(get_cpu_load(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE)) > 0.0
4649

4750

48-
def test_get_storage_utilization():
49-
assert float(get_storage_utilization()) > 0.0
51+
@patch('metricscaler.query')
52+
def test_get_storage_utilization(monitoring_v3_query):
53+
iter_mock = monitoring_v3_query.Query().select_resources().iter
54+
iter_mock.return_value = iter([Mock(points=[Mock(value=Mock(double_value=1.0))])])
55+
assert float(get_storage_utilization(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE)) > 0.0
5056

5157

5258
@pytest.fixture()
@@ -198,3 +204,7 @@ def test_main(scale_bigtable, get_cpu_load, get_storage_utilization, sleep):
198204
scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE,
199205
BIGTABLE_INSTANCE, True)
200206
scale_bigtable.reset_mock()
207+
208+
209+
if __name__ == '__main__':
210+
test_get_cpu_load()

0 commit comments

Comments
 (0)