16
16
programmatically scale a Google Cloud Bigtable cluster."""
17
17
18
18
import argparse
19
+ import logging
19
20
import os
20
21
import time
21
22
26
27
27
28
PROJECT = os .environ ['GOOGLE_CLOUD_PROJECT' ]
28
29
30
+ logger = logging .getLogger ('bigtable.metricscaler' )
31
+ logger .addHandler (logging .StreamHandler ())
32
+ logger .setLevel (logging .INFO )
29
33
30
- def get_cpu_load ():
34
+
35
+ def get_cpu_load (bigtable_instance , bigtable_cluster ):
31
36
"""Returns the most recent Cloud Bigtable CPU load measurement.
32
37
33
38
Returns:
@@ -40,12 +45,13 @@ def get_cpu_load():
40
45
metric_type = 'bigtable.googleapis.com/'
41
46
'cluster/cpu_load' ,
42
47
minutes = 5 )
48
+ cpu_query = cpu_query .select_resources (instance = bigtable_instance , cluster = bigtable_cluster )
43
49
cpu = next (cpu_query .iter ())
44
50
return cpu .points [0 ].value .double_value
45
51
# [END bigtable_cpu]
46
52
47
53
48
- def get_storage_utilization ():
54
+ def get_storage_utilization (bigtable_instance , bigtable_cluster ):
49
55
"""Returns the most recent Cloud Bigtable storage utilization measurement.
50
56
51
57
Returns:
@@ -58,6 +64,7 @@ def get_storage_utilization():
58
64
metric_type = 'bigtable.googleapis.com/'
59
65
'cluster/storage_utilization' ,
60
66
minutes = 5 )
67
+ utilization_query = utilization_query .select_resources (instance = bigtable_instance , cluster = bigtable_cluster )
61
68
utilization = next (utilization_query .iter ())
62
69
return utilization .points [0 ].value .double_value
63
70
# [END bigtable_metric_scaler_storage_utilization]
@@ -111,15 +118,15 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
111
118
current_node_count + size_change_step , max_node_count )
112
119
cluster .serve_nodes = new_node_count
113
120
cluster .update ()
114
- print ('Scaled up from {} to {} nodes.' .format (
121
+ logger . info ('Scaled up from {} to {} nodes.' .format (
115
122
current_node_count , new_node_count ))
116
123
else :
117
124
if current_node_count > min_node_count :
118
125
new_node_count = max (
119
126
current_node_count - size_change_step , min_node_count )
120
127
cluster .serve_nodes = new_node_count
121
128
cluster .update ()
122
- print ('Scaled down from {} to {} nodes.' .format (
129
+ logger . info ('Scaled down from {} to {} nodes.' .format (
123
130
current_node_count , new_node_count ))
124
131
# [END bigtable_scale]
125
132
@@ -145,10 +152,10 @@ def main(
145
152
long_sleep (int): How long to sleep after the number of nodes is
146
153
changed
147
154
"""
148
- cluster_cpu = get_cpu_load ()
149
- cluster_storage = get_storage_utilization ()
150
- print ('Detected cpu of {}' .format (cluster_cpu ))
151
- print ('Detected storage utilization of {}' .format (cluster_storage ))
155
+ cluster_cpu = get_cpu_load (bigtable_instance , bigtable_cluster )
156
+ cluster_storage = get_storage_utilization (bigtable_instance , bigtable_cluster )
157
+ logger . info ('Detected cpu of {}' .format (cluster_cpu ))
158
+ logger . info ('Detected storage utilization of {}' .format (cluster_storage ))
152
159
try :
153
160
if cluster_cpu > high_cpu_threshold or cluster_storage > high_storage_threshold :
154
161
scale_bigtable (bigtable_instance , bigtable_cluster , True )
@@ -158,10 +165,10 @@ def main(
158
165
scale_bigtable (bigtable_instance , bigtable_cluster , False )
159
166
time .sleep (long_sleep )
160
167
else :
161
- print ('CPU within threshold, sleeping.' )
168
+ logger . info ('CPU within threshold, sleeping.' )
162
169
time .sleep (short_sleep )
163
170
except Exception as e :
164
- print ("Error during scaling: %s" , e )
171
+ logger . error ("Error during scaling: %s" , e )
165
172
166
173
167
174
if __name__ == '__main__' :
0 commit comments