diff --git a/bigtable/hello/requirements.txt b/bigtable/hello/requirements.txt index 8a8e837c14a..298e582fd8d 100644 --- a/bigtable/hello/requirements.txt +++ b/bigtable/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==0.24.0 +google-cloud-bigtable==0.25.0 google-cloud-core==0.25.0 diff --git a/bigtable/metricscaler/metricscaler.py b/bigtable/metricscaler/metricscaler.py index 8a61ca3eb23..486795ce609 100644 --- a/bigtable/metricscaler/metricscaler.py +++ b/bigtable/metricscaler/metricscaler.py @@ -22,7 +22,6 @@ from google.cloud import monitoring - def get_cpu_load(): """Returns the most recent Cloud Bigtable CPU load measurement. @@ -51,23 +50,22 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): bigtable_cluster (str): Cloud Bigtable cluster ID to scale scale_up (bool): If true, scale up, otherwise scale down """ - _MIN_NODE_COUNT = 3 - """ - The minimum number of nodes to use. The default minimum is 3. If you have a - lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD - clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used - metric is useful in figuring out the minimum number of nodes. - """ - _MAX_NODE_COUNT = 30 - """ - The maximum number of nodes to use. The default maximum is 30 nodes per zone. - If you need more quota, you can request more by following the instructions - here. - """ + # The minimum number of nodes to use. The default minimum is 3. If you have + # a lot of data, the rule of thumb is to not go below 2.5 TB per node for + # SSD lusters, and 8 TB for HDD. The + # "bigtable.googleapis.com/disk/bytes_used" metric is useful in figuring + # out the minimum number of nodes. + min_node_count = 3 + + # The maximum number of nodes to use. The default maximum is 30 nodes per + # zone. If you need more quota, you can request more by following the + # instructions at https://cloud.google.com/bigtable/quota. + max_node_count = 30 + + # The number of nodes to change the cluster by. + size_change_step = 3 - _SIZE_CHANGE_STEP = 3 - """The number of nodes to change the cluster by.""" # [START bigtable_scale] bigtable_client = bigtable.Client(admin=True) instance = bigtable_client.instance(bigtable_instance) @@ -79,16 +77,16 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): current_node_count = cluster.serve_nodes if scale_up: - if current_node_count < _MAX_NODE_COUNT: - new_node_count = min(current_node_count + 3, _MAX_NODE_COUNT) + if current_node_count < max_node_count: + new_node_count = min(current_node_count + 3, max_node_count) cluster.serve_nodes = new_node_count cluster.update() print('Scaled up from {} to {} nodes.'.format( current_node_count, new_node_count)) else: - if current_node_count > _MIN_NODE_COUNT: + if current_node_count > min_node_count: new_node_count = max( - current_node_count - _SIZE_CHANGE_STEP, _MIN_NODE_COUNT) + current_node_count - size_change_step, min_node_count) cluster.serve_nodes = new_node_count cluster.update() print('Scaled down from {} to {} nodes.'.format( diff --git a/bigtable/metricscaler/metricscaler_test.py b/bigtable/metricscaler/metricscaler_test.py index 7a151a0efd2..76561ca6537 100644 --- a/bigtable/metricscaler/metricscaler_test.py +++ b/bigtable/metricscaler/metricscaler_test.py @@ -20,13 +20,13 @@ from google.cloud import bigtable from mock import patch -from metricscaler import _SIZE_CHANGE_STEP from metricscaler import get_cpu_load from metricscaler import main from metricscaler import scale_bigtable # tests assume instance and cluster have the same ID BIGTABLE_INSTANCE = os.environ['BIGTABLE_CLUSTER'] +SIZE_CHANGE_STEP = 3 # System tests to verify API calls succeed @@ -50,7 +50,7 @@ def test_scale_bigtable(): cluster.reload() new_node_count = cluster.serve_nodes - assert (new_node_count == (original_node_count + _SIZE_CHANGE_STEP)) + assert (new_node_count == (original_node_count + SIZE_CHANGE_STEP)) scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False) time.sleep(3) diff --git a/bigtable/metricscaler/requirements.txt b/bigtable/metricscaler/requirements.txt index 480c67b2259..8153a027252 100644 --- a/bigtable/metricscaler/requirements.txt +++ b/bigtable/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==0.24.0 +google-cloud-bigtable==0.25.0 google-cloud-monitoring==0.25.0 diff --git a/dataproc/requirements.txt b/dataproc/requirements.txt index 6ae3a1ebee0..1219a36251c 100644 --- a/dataproc/requirements.txt +++ b/dataproc/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -google-cloud==0.25.0 +google-cloud==0.26.0 diff --git a/iot/api-client/manager/manager.py b/iot/api-client/manager/manager.py index e8a755a5415..1b8a0439d67 100644 --- a/iot/api-client/manager/manager.py +++ b/iot/api-client/manager/manager.py @@ -51,7 +51,7 @@ def create_iot_topic(topic_name): topic = pubsub_client.topic(topic_name) policy = topic.get_iam_policy() publishers = policy.get('roles/pubsub.publisher', []) - publishers.append(policy.service_account( + publishers.add(policy.service_account( 'cloud-iot@system.gserviceaccount.com')) policy['roles/pubsub.publisher'] = publishers topic.set_iam_policy(policy) diff --git a/iot/api-client/manager/requirements.txt b/iot/api-client/manager/requirements.txt index b54e248f0a1..28fa5657ad3 100644 --- a/iot/api-client/manager/requirements.txt +++ b/iot/api-client/manager/requirements.txt @@ -1,4 +1,4 @@ google-api-python-client==1.6.2 google-auth-httplib2==0.0.2 google-auth==1.0.1 -google-cloud==0.25.0 +google-cloud==0.26.0