Skip to content

Commit 078a266

Browse files
dpebotJon Wayne Parrott
authored and
Jon Wayne Parrott
committed
Auto-update dependencies. (#1005)
* Auto-update dependencies. * Fix bigtable lint * Fix IOT iam interaction
1 parent bc0924a commit 078a266

File tree

7 files changed

+25
-27
lines changed

7 files changed

+25
-27
lines changed

bigtable/hello/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
google-cloud-bigtable==0.24.0
1+
google-cloud-bigtable==0.25.0
22
google-cloud-core==0.25.0

bigtable/metricscaler/metricscaler.py

Lines changed: 18 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
from google.cloud import monitoring
2323

2424

25-
2625
def get_cpu_load():
2726
"""Returns the most recent Cloud Bigtable CPU load measurement.
2827
@@ -51,23 +50,22 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
5150
bigtable_cluster (str): Cloud Bigtable cluster ID to scale
5251
scale_up (bool): If true, scale up, otherwise scale down
5352
"""
54-
_MIN_NODE_COUNT = 3
55-
"""
56-
The minimum number of nodes to use. The default minimum is 3. If you have a
57-
lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD
58-
clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used
59-
metric is useful in figuring out the minimum number of nodes.
60-
"""
6153

62-
_MAX_NODE_COUNT = 30
63-
"""
64-
The maximum number of nodes to use. The default maximum is 30 nodes per zone.
65-
If you need more quota, you can request more by following the instructions
66-
<a href="https://cloud.google.com/bigtable/quota">here</a>.
67-
"""
54+
# The minimum number of nodes to use. The default minimum is 3. If you have
55+
# a lot of data, the rule of thumb is to not go below 2.5 TB per node for
56+
# SSD lusters, and 8 TB for HDD. The
57+
# "bigtable.googleapis.com/disk/bytes_used" metric is useful in figuring
58+
# out the minimum number of nodes.
59+
min_node_count = 3
60+
61+
# The maximum number of nodes to use. The default maximum is 30 nodes per
62+
# zone. If you need more quota, you can request more by following the
63+
# instructions at https://cloud.google.com/bigtable/quota.
64+
max_node_count = 30
65+
66+
# The number of nodes to change the cluster by.
67+
size_change_step = 3
6868

69-
_SIZE_CHANGE_STEP = 3
70-
"""The number of nodes to change the cluster by."""
7169
# [START bigtable_scale]
7270
bigtable_client = bigtable.Client(admin=True)
7371
instance = bigtable_client.instance(bigtable_instance)
@@ -79,16 +77,16 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
7977
current_node_count = cluster.serve_nodes
8078

8179
if scale_up:
82-
if current_node_count < _MAX_NODE_COUNT:
83-
new_node_count = min(current_node_count + 3, _MAX_NODE_COUNT)
80+
if current_node_count < max_node_count:
81+
new_node_count = min(current_node_count + 3, max_node_count)
8482
cluster.serve_nodes = new_node_count
8583
cluster.update()
8684
print('Scaled up from {} to {} nodes.'.format(
8785
current_node_count, new_node_count))
8886
else:
89-
if current_node_count > _MIN_NODE_COUNT:
87+
if current_node_count > min_node_count:
9088
new_node_count = max(
91-
current_node_count - _SIZE_CHANGE_STEP, _MIN_NODE_COUNT)
89+
current_node_count - size_change_step, min_node_count)
9290
cluster.serve_nodes = new_node_count
9391
cluster.update()
9492
print('Scaled down from {} to {} nodes.'.format(

bigtable/metricscaler/metricscaler_test.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,13 @@
2020
from google.cloud import bigtable
2121
from mock import patch
2222

23-
from metricscaler import _SIZE_CHANGE_STEP
2423
from metricscaler import get_cpu_load
2524
from metricscaler import main
2625
from metricscaler import scale_bigtable
2726

2827
# tests assume instance and cluster have the same ID
2928
BIGTABLE_INSTANCE = os.environ['BIGTABLE_CLUSTER']
29+
SIZE_CHANGE_STEP = 3
3030

3131
# System tests to verify API calls succeed
3232

@@ -50,7 +50,7 @@ def test_scale_bigtable():
5050
cluster.reload()
5151

5252
new_node_count = cluster.serve_nodes
53-
assert (new_node_count == (original_node_count + _SIZE_CHANGE_STEP))
53+
assert (new_node_count == (original_node_count + SIZE_CHANGE_STEP))
5454

5555
scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False)
5656
time.sleep(3)
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
google-cloud-bigtable==0.24.0
1+
google-cloud-bigtable==0.25.0
22
google-cloud-monitoring==0.25.0

dataproc/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
google-api-python-client==1.6.2
2-
google-cloud==0.25.0
2+
google-cloud==0.26.0

iot/api-client/manager/manager.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def create_iot_topic(topic_name):
5151
topic = pubsub_client.topic(topic_name)
5252
policy = topic.get_iam_policy()
5353
publishers = policy.get('roles/pubsub.publisher', [])
54-
publishers.append(policy.service_account(
54+
publishers.add(policy.service_account(
5555
5656
policy['roles/pubsub.publisher'] = publishers
5757
topic.set_iam_policy(policy)
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
google-api-python-client==1.6.2
22
google-auth-httplib2==0.0.2
33
google-auth==1.0.1
4-
google-cloud==0.25.0
4+
google-cloud==0.26.0

0 commit comments

Comments
 (0)