Skip to content

Commit d90d9df

Browse files
authored
docs(samples): added sample and tests for annotate assessment API (#155)
Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ ] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/python-recaptcha-enterprise/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea - [ ] Ensure the tests and linter pass - [ ] Code coverage does not decrease (if any source code was changed) - [ ] Appropriate docs were updated (if necessary) Fixes #154 🦕
1 parent 07ed100 commit d90d9df

File tree

3 files changed

+78
-12
lines changed

3 files changed

+78
-12
lines changed
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
# Copyright 2021 Google Inc. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
# [START recaptcha_enterprise_annotate_assessment]
16+
from google.cloud import recaptchaenterprise_v1
17+
18+
19+
def annotate_assessment(project_id: str, assessment_id: str) -> None:
20+
""" Pre-requisite: Create an assessment before annotating.
21+
Annotate an assessment to provide feedback on the correctness of recaptcha prediction.
22+
Args:
23+
project_id: Google Cloud Project ID
24+
assessment_id: Value of the 'name' field returned from the create_assessment() call.
25+
"""
26+
27+
client = recaptchaenterprise_v1.RecaptchaEnterpriseServiceClient()
28+
29+
assessment_name = f"projects/{project_id}/assessments/{assessment_id}"
30+
# Build the annotation request.
31+
# For more info on when/how to annotate, see:
32+
# https://cloud.google.com/recaptcha-enterprise/docs/annotate-assessment#when_to_annotate
33+
request = recaptchaenterprise_v1.AnnotateAssessmentRequest()
34+
request.name = assessment_name
35+
request.annotation = request.Annotation.FRAUDULENT
36+
request.reasons = [request.Reason.FAILED_TWO_FACTOR]
37+
38+
# Empty response is sent back.
39+
client.annotate_assessment(request)
40+
print("Annotated response sent successfully ! ")
41+
42+
43+
# [END recaptcha_enterprise_annotate_assessment]

recaptcha_enterprise/snippets/create_assessment.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,14 @@
1313
# limitations under the License.
1414

1515
# [START recaptcha_enterprise_create_assessment]
16+
1617
from google.cloud import recaptchaenterprise_v1
18+
from google.cloud.recaptchaenterprise_v1 import Assessment
1719

1820

1921
def create_assessment(
2022
project_id: str, recaptcha_site_key: str, token: str, recaptcha_action: str
21-
) -> None:
23+
) -> Assessment:
2224
""" Create an assessment to analyze the risk of a UI action.
2325
Args:
2426
project_id: GCloud Project ID
@@ -72,6 +74,10 @@ def create_assessment(
7274
"The reCAPTCHA score for this token is: "
7375
+ str(response.risk_analysis.score)
7476
)
77+
# Get the assessment name (id). Use this to annotate the assessment.
78+
assessment_name = client.parse_assessment_path(response.name).get("assessment")
79+
print(f"Assessment name: {assessment_name}")
80+
return response
7581

7682

7783
# [END recaptcha_enterprise_create_assessment]

recaptcha_enterprise/snippets/test_create_assessment.py

Lines changed: 28 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -11,26 +11,31 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14-
14+
import multiprocessing
1515
import os
1616
import re
1717
import time
1818
import typing
1919

2020
from _pytest.capture import CaptureFixture
2121
from flask import Flask, render_template, url_for
22+
from google.cloud import recaptchaenterprise_v1
23+
from google.cloud.recaptchaenterprise_v1 import Assessment
2224
import pytest
25+
2326
from selenium import webdriver
2427
from selenium.webdriver.chrome.webdriver import WebDriver
2528

26-
import create_assessment
27-
28-
# TODO(developer): Replace these variables before running the sample.
29+
from annotate_assessment import annotate_assessment
30+
from create_assessment import create_assessment
2931
from create_site_key import create_site_key
3032
from delete_site_key import delete_site_key
3133

34+
3235
GOOGLE_CLOUD_PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
3336
DOMAIN_NAME = "localhost"
37+
# Switch the multi-processing style for Python > 3.7: https://github.com/pytest-dev/pytest-flask/issues/104
38+
multiprocessing.set_start_method("fork")
3439

3540

3641
@pytest.fixture(scope="session")
@@ -72,16 +77,28 @@ def recaptcha_site_key() -> str:
7277

7378

7479
@pytest.mark.usefixtures("live_server")
75-
def test_create_assessment(
80+
def test_assessment(
7681
capsys: CaptureFixture, recaptcha_site_key: str, browser: WebDriver
7782
) -> None:
83+
# Get token.
7884
token, action = get_token(recaptcha_site_key, browser)
79-
assess_token(recaptcha_site_key, token=token, action=action)
80-
out, _ = capsys.readouterr()
81-
assert re.search("The reCAPTCHA score for this token is: ", out)
82-
score = out.rsplit(":", maxsplit=1)[1].strip()
85+
# Create assessment.
86+
assessment_response = assess_token(recaptcha_site_key, token=token, action=action)
87+
score = str(assessment_response.risk_analysis.score)
88+
client = recaptchaenterprise_v1.RecaptchaEnterpriseServiceClient()
89+
# Parse the assessment_response.name which is of the format:
90+
# {'project': 'my-project-id', 'assessment': 'assessment-id'}
91+
assessment_name = client.parse_assessment_path(assessment_response.name).get(
92+
"assessment"
93+
)
94+
assert assessment_name != ""
8395
set_score(browser, score)
8496

97+
# Annotate assessment.
98+
annotate_assessment(project_id=GOOGLE_CLOUD_PROJECT, assessment_id=assessment_name)
99+
out, _ = capsys.readouterr()
100+
assert re.search("Annotated response sent successfully !", out)
101+
85102

86103
def get_token(recaptcha_site_key: str, browser: WebDriver) -> typing.Tuple:
87104
browser.get(url_for("assess", site_key=recaptcha_site_key, _external=True))
@@ -100,8 +117,8 @@ def get_token(recaptcha_site_key: str, browser: WebDriver) -> typing.Tuple:
100117
return token, action
101118

102119

103-
def assess_token(recaptcha_site_key: str, token: str, action: str) -> None:
104-
create_assessment.create_assessment(
120+
def assess_token(recaptcha_site_key: str, token: str, action: str) -> Assessment:
121+
return create_assessment(
105122
project_id=GOOGLE_CLOUD_PROJECT,
106123
recaptcha_site_key=recaptcha_site_key,
107124
token=token,

0 commit comments

Comments
 (0)