Skip to content

Commit d1bdadf

Browse files
w-javedl0lawrence
authored andcommitted
Multi modal docstring improvements (Azure#38193)
* docstring-update * doc string updates * doc string updates
1 parent d32e70f commit d1bdadf

File tree

6 files changed

+21
-16
lines changed

6 files changed

+21
-16
lines changed

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ class ContentSafetyMultimodalEvaluator:
3232
:type kwargs: Any
3333
3434
:return: A function that evaluates multimodal chat messages and generates content safety metrics.
35-
:rtype: Callable
3635
3736
**Usage Example**
3837
@@ -44,7 +43,7 @@ class ContentSafetyMultimodalEvaluator:
4443
"project_name": "<project_name>",
4544
}
4645
eval_fn = ContentSafetyMultimodalEvaluator(azure_ai_project)
47-
result = eval_fn(
46+
result = eval_fn(conversation=
4847
{
4948
"messages": [
5049
{
@@ -103,8 +102,9 @@ def __init__(self, credential, azure_ai_project, **kwargs):
103102
def __call__(self, *, conversation, **kwargs):
104103
"""
105104
Evaluates content-safety metrics for list of messages.
105+
106106
:keyword conversation: The conversation contains list of messages to be evaluated.
107-
Each message should have "role" and "content" keys.
107+
Each message should have "role" and "content" keys. It supports single turn only.
108108
:paramtype conversation: ~azure.ai.evaluation.Conversation
109109
:return: The evaluation score based on the Content Safety Metrics.
110110
:rtype: Dict[str, Union[float, str]]

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ class HateUnfairnessMultimodalEvaluator:
2828
"project_name": "<project_name>",
2929
}
3030
eval_fn = HateUnfairnessMultimodalEvaluator(azure_ai_project)
31-
result = eval_fn(
31+
result = eval_fn(conversation=
3232
{
3333
"messages": [
3434
{
@@ -72,8 +72,9 @@ def __init__(self, credential, azure_ai_project):
7272
def __call__(self, *, conversation, **kwargs):
7373
"""
7474
Evaluates hate unfairness content.
75+
7576
:keyword conversation: The conversation contains list of messages to be evaluated.
76-
Each message should have "role" and "content" keys.
77+
Each message should have "role" and "content" keys. It supports single turn only.
7778
:paramtype conversation: ~azure.ai.evaluation.Conversation
7879
:return: The hate unfairness score.
7980
:rtype: Dict[str, Union[float, str]]

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ class ProtectedMaterialMultimodalEvaluator:
2222
:type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
2323
2424
:return: A dictionary containing the evaluation result label and reasoning.
25-
:rtype: Dict[str, str]
2625
2726
**Usage Example**
2827
@@ -34,7 +33,7 @@ class ProtectedMaterialMultimodalEvaluator:
3433
"project_name": "<project_name>",
3534
}
3635
eval_fn = ProtectedMaterialMultimodalEvaluator(azure_ai_project)
37-
result = eval_fn(
36+
result = eval_fn(conversation=
3837
{
3938
"messages": [
4039
{
@@ -82,8 +81,9 @@ def __call__(self, *, conversation, **kwargs):
8281
"""
8382
Evaluates protected materials content.
8483
85-
:keyword messages: The messages to be evaluated. Each message should have "role" and "content" keys.
86-
:paramtype messages: ~azure.ai.evaluation.Conversation
84+
:keyword conversation: The conversation contains list of messages to be evaluated.
85+
Each message should have "role" and "content" keys. It supports single turn only.
86+
:paramtype conversation: ~azure.ai.evaluation.Conversation
8787
:return: A dictionary containing a boolean label and reasoning.
8888
:rtype: Dict[str, str]
8989
"""
@@ -101,8 +101,9 @@ def __init__(self, credential, azure_ai_project):
101101
async def __call__(self, *, conversation, **kwargs):
102102
"""
103103
Evaluates content according to this evaluator's metric.
104+
104105
:keyword conversation: The conversation contains list of messages to be evaluated.
105-
Each message should have "role" and "content" keys.
106+
Each message should have "role" and "content" keys. It supports single turn only.
106107
:paramtype conversation: ~azure.ai.evaluation.Conversation
107108
:return: The evaluation score computation based on the Content Safety metric (self.metric).
108109
:rtype: Any

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ class SelfHarmMultimodalEvaluator:
2828
"project_name": "<project_name>",
2929
}
3030
eval_fn = SelfHarmMultimodalEvaluator(azure_ai_project)
31-
result = eval_fn(
31+
result = eval_fn(conversation=
3232
{
3333
"messages": [
3434
{
@@ -72,8 +72,9 @@ def __init__(self, credential, azure_ai_project):
7272
def __call__(self, *, conversation, **kwargs):
7373
"""
7474
Evaluates self harm content.
75+
7576
:keyword conversation: The conversation contains list of messages to be evaluated.
76-
Each message should have "role" and "content" keys.
77+
Each message should have "role" and "content" keys. It supports single turn only.
7778
:paramtype conversation: ~azure.ai.evaluation.Conversation
7879
:return: The self harm score.
7980
:rtype: Dict[str, Union[float, str]]

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_multimodal/_sexual.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ class SexualMultimodalEvaluator:
2828
"project_name": "<project_name>",
2929
}
3030
eval_fn = SexualMultimodalEvaluator(azure_ai_project)
31-
result = eval_fn(
31+
result = eval_fn(conversation=
3232
{
3333
"messages": [
3434
{
@@ -72,8 +72,9 @@ def __init__(self, credential, azure_ai_project):
7272
def __call__(self, *, conversation, **kwargs):
7373
"""
7474
Evaluates sexual content.
75+
7576
:keyword conversation: The conversation contains list of messages to be evaluated.
76-
Each message should have "role" and "content" keys.
77+
Each message should have "role" and "content" keys. It supports single turn only.
7778
:paramtype conversation: ~azure.ai.evaluation.Conversation
7879
:return: The sexual score.
7980
:rtype: Dict[str, Union[float, str]]

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_multimodal/_violence.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ class ViolenceMultimodalEvaluator:
2828
"project_name": "<project_name>",
2929
}
3030
eval_fn = ViolenceMultimodalEvaluator(azure_ai_project)
31-
result = eval_fn(
31+
result = eval_fn(conversation=
3232
{
3333
"messages": [
3434
{
@@ -72,8 +72,9 @@ def __init__(self, credential, azure_ai_project):
7272
def __call__(self, *, conversation, **kwargs):
7373
"""
7474
Evaluates violence content.
75+
7576
:keyword conversation: The conversation contains list of messages to be evaluated.
76-
Each message should have "role" and "content" keys.
77+
Each message should have "role" and "content" keys. It supports single turn only.
7778
:paramtype conversation: ~azure.ai.evaluation.Conversation
7879
:return: The violence score.
7980
:rtype: Dict[str, Union[float, str]]

0 commit comments

Comments
 (0)