Skip to content

Remove usage of keys from prompty #37708

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 21 commits into from
Oct 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions sdk/evaluation/azure-ai-evaluation/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,26 @@ evaluate(
)
```

- Simulator now requires a model configuration to call the prompty instead of an Azure AI project scope. This enables the usage of simulator with Entra ID based auth.
Before:
```python
azure_ai_project = {
"subscription_id": os.environ.get("AZURE_SUBSCRIPTION_ID"),
"resource_group_name": os.environ.get("RESOURCE_GROUP"),
"project_name": os.environ.get("PROJECT_NAME"),
}
sim = Simulator(azure_ai_project=azure_ai_project, credentails=DefaultAzureCredentials())
```
After:
```python
model_config = {
"azure_endpoint": os.environ.get("AZURE_OPENAI_ENDPOINT"),
"azure_deployment": os.environ.get("AZURE_DEPLOYMENT"),
}
sim = Simulator(model_config=model_config)
```
If `api_key` is not included in the `model_config`, the prompty runtime in `promptflow-core` will pick up `DefaultAzureCredential`.

### Bugs Fixed

- Fixed issue where Entra ID authentication was not working with `AzureOpenAIModelConfiguration`
Expand Down
64 changes: 30 additions & 34 deletions sdk/evaluation/azure-ai-evaluation/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,6 @@ name: ApplicationPrompty
description: Simulates an application
model:
api: chat
configuration:
type: azure_openai
azure_deployment: ${env:AZURE_DEPLOYMENT}
api_key: ${env:AZURE_OPENAI_API_KEY}
azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
parameters:
temperature: 0.0
top_p: 1.0
Expand Down Expand Up @@ -152,52 +147,55 @@ import asyncio
from typing import Any, Dict, List, Optional
from azure.ai.evaluation.simulator import Simulator
from promptflow.client import load_flow
from azure.identity import DefaultAzureCredential
import os
import wikipedia

azure_ai_project = {
"subscription_id": os.environ.get("AZURE_SUBSCRIPTION_ID"),
"resource_group_name": os.environ.get("RESOURCE_GROUP"),
"project_name": os.environ.get("PROJECT_NAME")
# Set up the model configuration without api_key, using DefaultAzureCredential
model_config = {
"azure_endpoint": os.environ.get("AZURE_OPENAI_ENDPOINT"),
"azure_deployment": os.environ.get("AZURE_DEPLOYMENT"),
# not providing key would make the SDK pick up `DefaultAzureCredential`
# use "api_key": "<your API key>"
}

import wikipedia
wiki_search_term = "Leonardo da vinci"
# Use Wikipedia to get some text for the simulation
wiki_search_term = "Leonardo da Vinci"
wiki_title = wikipedia.search(wiki_search_term)[0]
wiki_page = wikipedia.page(wiki_title)
text = wiki_page.summary[:1000]

def method_to_invoke_application_prompty(query: str):
def method_to_invoke_application_prompty(query: str, messages_list: List[Dict], context: Optional[Dict]):
try:
current_dir = os.path.dirname(__file__)
prompty_path = os.path.join(current_dir, "application.prompty")
_flow = load_flow(source=prompty_path, model={
"configuration": azure_ai_project
})
_flow = load_flow(
source=prompty_path,
model=model_config,
credential=DefaultAzureCredential()
)
response = _flow(
query=query,
context=context,
conversation_history=messages_list
)
return response
except:
print("Something went wrong invoking the prompty")
except Exception as e:
print(f"Something went wrong invoking the prompty: {e}")
return "something went wrong"

async def callback(
messages: List[Dict],
messages: Dict[str, List[Dict]],
stream: bool = False,
session_state: Any = None, # noqa: ANN401
context: Optional[Dict[str, Any]] = None,
) -> dict:
messages_list = messages["messages"]
# get last message
# Get the last message from the user
latest_message = messages_list[-1]
query = latest_message["content"]
context = None
# call your endpoint or ai application here
response = method_to_invoke_application_prompty(query)
# we are formatting the response to follow the openAI chat protocol format
# Call your endpoint or AI application here
response = method_to_invoke_application_prompty(query, messages_list, context)
# Format the response to follow the OpenAI chat protocol format
formatted_response = {
"content": response,
"role": "assistant",
Expand All @@ -208,10 +206,8 @@ async def callback(
messages["messages"].append(formatted_response)
return {"messages": messages["messages"], "stream": stream, "session_state": session_state, "context": context}



async def main():
simulator = Simulator(azure_ai_project=azure_ai_project, credential=DefaultAzureCredential())
simulator = Simulator(model_config=model_config)
outputs = await simulator(
target=callback,
text=text,
Expand All @@ -222,17 +218,17 @@ async def main():
f"I am a teacher and I want to teach my students about {wiki_search_term}"
],
)
print(json.dumps(outputs))
print(json.dumps(outputs, indent=2))

if __name__ == "__main__":
os.environ["AZURE_SUBSCRIPTION_ID"] = ""
os.environ["RESOURCE_GROUP"] = ""
os.environ["PROJECT_NAME"] = ""
os.environ["AZURE_OPENAI_API_KEY"] = ""
os.environ["AZURE_OPENAI_ENDPOINT"] = ""
os.environ["AZURE_DEPLOYMENT"] = ""
# Ensure that the following environment variables are set in your environment:
# AZURE_OPENAI_ENDPOINT and AZURE_DEPLOYMENT
# Example:
# os.environ["AZURE_OPENAI_ENDPOINT"] = "https://your-endpoint.openai.azure.com/"
# os.environ["AZURE_DEPLOYMENT"] = "your-deployment-name"
asyncio.run(main())
print("done!")

```

#### Adversarial Simulator
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,6 @@ name: TaskSimulatorQueryResponse
description: Gets queries and responses from a blob of text
model:
api: chat
configuration:
type: azure_openai
azure_deployment: ${env:AZURE_DEPLOYMENT}
azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
parameters:
temperature: 0.0
top_p: 1.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,6 @@ name: TaskSimulatorWithPersona
description: Simulates a user to complete a conversation
model:
api: chat
configuration:
type: azure_openai
azure_deployment: ${env:AZURE_DEPLOYMENT}
azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
parameters:
temperature: 0.0
top_p: 1.0
Expand Down
Loading
Loading