20
20
# [START export_table]
21
21
def export_table (service , cloud_storage_path ,
22
22
projectId , datasetId , tableId ,
23
+ export_format = "CSV" ,
23
24
num_retries = 5 ):
24
25
"""
25
26
Starts an export job
26
27
27
28
Args:
28
29
service: initialized and authorized bigquery
29
- google-api-client object,
30
+ google-api-client object.
30
31
cloud_storage_path: fully qualified
31
- path to a Google Cloud Storage location,
32
- e.g. gs://mybucket/myfolder/
32
+ path to a Google Cloud Storage location.
33
+ e.g. gs://mybucket/myfolder/
34
+ export_format: format to export in;
35
+ "CSV", "NEWLINE_DELIMITED_JSON", or "AVRO".
33
36
34
37
Returns: an extract job resource representing the
35
- job, see https://cloud.google.com/bigquery/docs/reference/v2/jobs
38
+ job, see https://cloud.google.com/bigquery/docs/reference/v2/jobs
36
39
"""
37
40
# Generate a unique job_id so retries
38
41
# don't accidentally duplicate export
@@ -49,6 +52,7 @@ def export_table(service, cloud_storage_path,
49
52
'tableId' : tableId ,
50
53
},
51
54
'destinationUris' : [cloud_storage_path ],
55
+ 'destinationFormat' : export_format
52
56
}
53
57
}
54
58
}
@@ -61,11 +65,13 @@ def export_table(service, cloud_storage_path,
61
65
# [START run]
62
66
def run (cloud_storage_path ,
63
67
projectId , datasetId , tableId ,
64
- num_retries , interval ):
68
+ num_retries , interval , export_format = "CSV" ):
65
69
66
70
bigquery = get_service ()
67
71
resource = export_table (bigquery , cloud_storage_path ,
68
- projectId , datasetId , tableId , num_retries )
72
+ projectId , datasetId , tableId ,
73
+ num_retries = num_retries ,
74
+ export_format = export_format )
69
75
poll_job (bigquery ,
70
76
resource ['jobReference' ]['projectId' ],
71
77
resource ['jobReference' ]['jobId' ],
0 commit comments