Skip to content

Commit 768c811

Browse files
alixhamibusunkim96
authored andcommitted
Language region tag update [(#1643)](#1643)
1 parent b3b2fdf commit 768c811

File tree

4 files changed

+60
-56
lines changed

4 files changed

+60
-56
lines changed

language/snippets/classify_text/classify_text_tutorial.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -13,15 +13,15 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
# [START classify_text_tutorial]
16+
# [START language_classify_text_tutorial]
1717
"""Using the classify_text method to find content categories of text files,
1818
Then use the content category labels to compare text similarity.
1919
2020
For more information, see the tutorial page at
2121
https://cloud.google.com/natural-language/docs/classify-text-tutorial.
2222
"""
2323

24-
# [START classify_text_tutorial_import]
24+
# [START language_classify_text_tutorial_imports]
2525
import argparse
2626
import io
2727
import json
@@ -30,10 +30,10 @@
3030
from google.cloud import language
3131
import numpy
3232
import six
33-
# [END classify_text_tutorial_import]
33+
# [END language_classify_text_tutorial_imports]
3434

3535

36-
# [START def_classify]
36+
# [START language_classify_text_tutorial_classify]
3737
def classify(text, verbose=True):
3838
"""Classify the input text into categories. """
3939

@@ -61,10 +61,10 @@ def classify(text, verbose=True):
6161
print(u'{:<16}: {}'.format('confidence', category.confidence))
6262

6363
return result
64-
# [END def_classify]
64+
# [END language_classify_text_tutorial_classify]
6565

6666

67-
# [START def_index]
67+
# [START language_classify_text_tutorial_index]
6868
def index(path, index_file):
6969
"""Classify each text file in a directory and write
7070
the results to the index_file.
@@ -91,10 +91,10 @@ def index(path, index_file):
9191

9292
print('Texts indexed in file: {}'.format(index_file))
9393
return result
94-
# [END def_index]
94+
# [END language_classify_text_tutorial_index]
9595

9696

97-
# [START def_split_labels]
97+
# [START language_classify_text_tutorial_split_labels]
9898
def split_labels(categories):
9999
"""The category labels are of the form "/a/b/c" up to three levels,
100100
for example "/Computers & Electronics/Software", and these labels
@@ -121,10 +121,10 @@ def split_labels(categories):
121121
_categories[label] = confidence
122122

123123
return _categories
124-
# [END def_split_labels]
124+
# [END language_classify_text_tutorial_split_labels]
125125

126126

127-
# [START def_similarity]
127+
# [START language_classify_text_tutorial_similarity]
128128
def similarity(categories1, categories2):
129129
"""Cosine similarity of the categories treated as sparse vectors."""
130130
categories1 = split_labels(categories1)
@@ -143,10 +143,10 @@ def similarity(categories1, categories2):
143143
dot += confidence * categories2.get(label, 0.0)
144144

145145
return dot / (norm1 * norm2)
146-
# [END def_similarity]
146+
# [END language_classify_text_tutorial_similarity]
147147

148148

149-
# [START def_query]
149+
# [START language_classify_text_tutorial_query]
150150
def query(index_file, text, n_top=3):
151151
"""Find the indexed files that are the most similar to
152152
the query text.
@@ -176,10 +176,10 @@ def query(index_file, text, n_top=3):
176176
print('\n')
177177

178178
return similarities
179-
# [END def_query]
179+
# [END language_classify_text_tutorial_query]
180180

181181

182-
# [START def_query_category]
182+
# [START language_classify_text_tutorial_query_category]
183183
def query_category(index_file, category_string, n_top=3):
184184
"""Find the indexed files that are the most similar to
185185
the query label.
@@ -211,7 +211,7 @@ def query_category(index_file, category_string, n_top=3):
211211
print('\n')
212212

213213
return similarities
214-
# [END def_query_category]
214+
# [END language_classify_text_tutorial_query_category]
215215

216216

217217
if __name__ == '__main__':
@@ -255,4 +255,4 @@ def query_category(index_file, category_string, n_top=3):
255255
query(args.index_file, args.text)
256256
if args.command == 'query-category':
257257
query_category(args.index_file, args.category)
258-
# [END classify_text_tutorial]
258+
# [END language_classify_text_tutorial]

language/snippets/cloud-client/v1/quickstart.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,16 +18,16 @@
1818
def run_quickstart():
1919
# [START language_quickstart]
2020
# Imports the Google Cloud client library
21-
# [START migration_import]
21+
# [START language_python_migration_imports]
2222
from google.cloud import language
2323
from google.cloud.language import enums
2424
from google.cloud.language import types
25-
# [END migration_import]
25+
# [END language_python_migration_imports]
2626

2727
# Instantiates a client
28-
# [START migration_client]
28+
# [START language_python_migration_client]
2929
client = language.LanguageServiceClient()
30-
# [END migration_client]
30+
# [END language_python_migration_client]
3131

3232
# The text to analyze
3333
text = u'Hello, world!'

language/snippets/cloud-client/v1/snippets.py

Lines changed: 30 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
import six
3131

3232

33-
# [START def_sentiment_text]
33+
# [START language_sentiment_text]
3434
def sentiment_text(text):
3535
"""Detects sentiment in the text."""
3636
client = language.LanguageServiceClient()
@@ -39,45 +39,45 @@ def sentiment_text(text):
3939
text = text.decode('utf-8')
4040

4141
# Instantiates a plain text document.
42-
# [START migration_document_text]
43-
# [START migration_analyze_sentiment]
42+
# [START language_python_migration_document_text]
43+
# [START language_python_migration_sentiment_text]
4444
document = types.Document(
4545
content=text,
4646
type=enums.Document.Type.PLAIN_TEXT)
47-
# [END migration_document_text]
47+
# [END language_python_migration_document_text]
4848

4949
# Detects sentiment in the document. You can also analyze HTML with:
5050
# document.type == enums.Document.Type.HTML
5151
sentiment = client.analyze_sentiment(document).document_sentiment
5252

5353
print('Score: {}'.format(sentiment.score))
5454
print('Magnitude: {}'.format(sentiment.magnitude))
55-
# [END migration_analyze_sentiment]
56-
# [END def_sentiment_text]
55+
# [END language_python_migration_sentiment_text]
56+
# [END language_sentiment_text]
5757

5858

59-
# [START def_sentiment_file]
59+
# [START language_sentiment_gcs]
6060
def sentiment_file(gcs_uri):
6161
"""Detects sentiment in the file located in Google Cloud Storage."""
6262
client = language.LanguageServiceClient()
6363

6464
# Instantiates a plain text document.
65-
# [START migration_document_gcs_uri]
65+
# [START language_python_migration_document_gcs]
6666
document = types.Document(
6767
gcs_content_uri=gcs_uri,
6868
type=enums.Document.Type.PLAIN_TEXT)
69-
# [END migration_document_gcs_uri]
69+
# [END language_python_migration_document_gcs]
7070

7171
# Detects sentiment in the document. You can also analyze HTML with:
7272
# document.type == enums.Document.Type.HTML
7373
sentiment = client.analyze_sentiment(document).document_sentiment
7474

7575
print('Score: {}'.format(sentiment.score))
7676
print('Magnitude: {}'.format(sentiment.magnitude))
77-
# [END def_sentiment_file]
77+
# [END language_sentiment_gcs]
7878

7979

80-
# [START def_entities_text]
80+
# [START language_entities_text]
8181
def entities_text(text):
8282
"""Detects entities in the text."""
8383
client = language.LanguageServiceClient()
@@ -86,7 +86,7 @@ def entities_text(text):
8686
text = text.decode('utf-8')
8787

8888
# Instantiates a plain text document.
89-
# [START migration_analyze_entities]
89+
# [START language_python_migration_entities_text]
9090
document = types.Document(
9191
content=text,
9292
type=enums.Document.Type.PLAIN_TEXT)
@@ -107,11 +107,11 @@ def entities_text(text):
107107
print(u'{:<16}: {}'.format('salience', entity.salience))
108108
print(u'{:<16}: {}'.format('wikipedia_url',
109109
entity.metadata.get('wikipedia_url', '-')))
110-
# [END migration_analyze_entities]
111-
# [END def_entities_text]
110+
# [END language_python_migration_entities_text]
111+
# [END language_entities_text]
112112

113113

114-
# [START def_entities_file]
114+
# [START language_entities_gcs]
115115
def entities_file(gcs_uri):
116116
"""Detects entities in the file located in Google Cloud Storage."""
117117
client = language.LanguageServiceClient()
@@ -137,10 +137,10 @@ def entities_file(gcs_uri):
137137
print(u'{:<16}: {}'.format('salience', entity.salience))
138138
print(u'{:<16}: {}'.format('wikipedia_url',
139139
entity.metadata.get('wikipedia_url', '-')))
140-
# [END def_entities_file]
140+
# [END language_entities_gcs]
141141

142142

143-
# [START def_syntax_text]
143+
# [START language_syntax_text]
144144
def syntax_text(text):
145145
"""Detects syntax in the text."""
146146
client = language.LanguageServiceClient()
@@ -149,7 +149,7 @@ def syntax_text(text):
149149
text = text.decode('utf-8')
150150

151151
# Instantiates a plain text document.
152-
# [START migration_analyze_syntax]
152+
# [START language_python_migration_syntax_text]
153153
document = types.Document(
154154
content=text,
155155
type=enums.Document.Type.PLAIN_TEXT)
@@ -165,11 +165,11 @@ def syntax_text(text):
165165
for token in tokens:
166166
print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag],
167167
token.text.content))
168-
# [END migration_analyze_syntax]
169-
# [END def_syntax_text]
168+
# [END language_python_migration_syntax_text]
169+
# [END language_syntax_text]
170170

171171

172-
# [START def_syntax_file]
172+
# [START language_syntax_gcs]
173173
def syntax_file(gcs_uri):
174174
"""Detects syntax in the file located in Google Cloud Storage."""
175175
client = language.LanguageServiceClient()
@@ -190,10 +190,10 @@ def syntax_file(gcs_uri):
190190
for token in tokens:
191191
print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag],
192192
token.text.content))
193-
# [END def_syntax_file]
193+
# [END language_syntax_gcs]
194194

195195

196-
# [START def_entity_sentiment_text]
196+
# [START language_entity_sentiment_text]
197197
def entity_sentiment_text(text):
198198
"""Detects entity sentiment in the provided text."""
199199
client = language.LanguageServiceClient()
@@ -223,9 +223,10 @@ def entity_sentiment_text(text):
223223
print(u' Type : {}'.format(mention.type))
224224
print(u'Salience: {}'.format(entity.salience))
225225
print(u'Sentiment: {}\n'.format(entity.sentiment))
226-
# [END def_entity_sentiment_text]
226+
# [END language_entity_sentiment_text]
227227

228228

229+
# [START language_entity_sentiment_gcs]
229230
def entity_sentiment_file(gcs_uri):
230231
"""Detects entity sentiment in a Google Cloud Storage file."""
231232
client = language.LanguageServiceClient()
@@ -251,9 +252,10 @@ def entity_sentiment_file(gcs_uri):
251252
print(u' Type : {}'.format(mention.type))
252253
print(u'Salience: {}'.format(entity.salience))
253254
print(u'Sentiment: {}\n'.format(entity.sentiment))
255+
# [END language_entity_sentiment_gcs]
254256

255257

256-
# [START def_classify_text]
258+
# [START language_classify_text]
257259
def classify_text(text):
258260
"""Classifies content categories of the provided text."""
259261
client = language.LanguageServiceClient()
@@ -271,10 +273,10 @@ def classify_text(text):
271273
print(u'=' * 20)
272274
print(u'{:<16}: {}'.format('name', category.name))
273275
print(u'{:<16}: {}'.format('confidence', category.confidence))
274-
# [END def_classify_text]
276+
# [END language_classify_text]
275277

276278

277-
# [START def_classify_file]
279+
# [START language_classify_gcs]
278280
def classify_file(gcs_uri):
279281
"""Classifies content categories of the text in a Google Cloud Storage
280282
file.
@@ -291,7 +293,7 @@ def classify_file(gcs_uri):
291293
print(u'=' * 20)
292294
print(u'{:<16}: {}'.format('name', category.name))
293295
print(u'{:<16}: {}'.format('confidence', category.confidence))
294-
# [END def_classify_file]
296+
# [END language_classify_gcs]
295297

296298

297299
if __name__ == '__main__':

language/snippets/sentiment/sentiment_analysis.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -11,19 +11,19 @@
1111
# See the License for the specific language governing permissions and
1212
# limitations under the License.
1313

14-
# [START sentiment_tutorial]
14+
# [START language_sentiment_tutorial]
1515
"""Demonstrates how to make a simple call to the Natural Language API."""
1616

17-
# [START sentiment_tutorial_import]
17+
# [START language_sentiment_tutorial_imports]
1818
import argparse
1919

2020
from google.cloud import language
2121
from google.cloud.language import enums
2222
from google.cloud.language import types
23-
# [END sentiment_tutorial_import]
23+
# [END language_sentiment_tutorial_imports]
2424

2525

26-
# [START def_print_result]
26+
# [START language_sentiment_tutorial_print_result]
2727
def print_result(annotations):
2828
score = annotations.document_sentiment.score
2929
magnitude = annotations.document_sentiment.magnitude
@@ -36,10 +36,10 @@ def print_result(annotations):
3636
print('Overall Sentiment: score of {} with magnitude of {}'.format(
3737
score, magnitude))
3838
return 0
39-
# [END def_print_result]
39+
# [END language_sentiment_tutorial_print_result]
4040

4141

42-
# [START def_analyze]
42+
# [START language_sentiment_tutorial_analyze_sentiment]
4343
def analyze(movie_review_filename):
4444
"""Run a sentiment analysis request on text within a passed filename."""
4545
client = language.LanguageServiceClient()
@@ -55,9 +55,10 @@ def analyze(movie_review_filename):
5555

5656
# Print the results
5757
print_result(annotations)
58-
# [END def_analyze]
58+
# [END language_sentiment_tutorial_analyze_sentiment]
5959

6060

61+
# [START language_sentiment_tutorial_run_application]
6162
if __name__ == '__main__':
6263
parser = argparse.ArgumentParser(
6364
description=__doc__,
@@ -68,4 +69,5 @@ def analyze(movie_review_filename):
6869
args = parser.parse_args()
6970

7071
analyze(args.movie_review_filename)
71-
# [END sentiment_tutorial]
72+
# [END language_sentiment_tutorial_run_application]
73+
# [END language_sentiment_tutorial]

0 commit comments

Comments
 (0)