Skip to content

Default to GitHub models #22

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Mar 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions chained_calls.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":

Expand All @@ -28,7 +28,7 @@
elif API_HOST == "github":

client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")

else:

Expand Down
4 changes: 2 additions & 2 deletions chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":

Expand All @@ -28,7 +28,7 @@
elif API_HOST == "github":

client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")

else:

Expand Down
4 changes: 2 additions & 2 deletions chat_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -24,7 +24,7 @@
MODEL_NAME = os.environ["OLLAMA_MODEL"]
elif API_HOST == "github":
client = openai.AsyncOpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
else:
client = openai.AsyncOpenAI(api_key=os.environ["OPENAI_KEY"])
MODEL_NAME = os.environ["OPENAI_MODEL"]
Expand Down
4 changes: 2 additions & 2 deletions chat_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -23,7 +23,7 @@
MODEL_NAME = os.environ["OLLAMA_MODEL"]
elif API_HOST == "github":
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
else:
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
MODEL_NAME = os.environ["OPENAI_MODEL"]
Expand Down
4 changes: 2 additions & 2 deletions chat_history_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -23,7 +23,7 @@
MODEL_NAME = os.environ["OLLAMA_MODEL"]
elif API_HOST == "github":
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
else:
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
MODEL_NAME = os.environ["OPENAI_MODEL"]
Expand Down
4 changes: 2 additions & 2 deletions chat_langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -27,7 +27,7 @@
)
elif API_HOST == "github":
llm = ChatOpenAI(
model_name=os.environ["GITHUB_MODEL"],
model_name=os.getenv("GITHUB_MODEL", "gpt-4o"),
openai_api_base="https://models.inference.ai.azure.com",
openai_api_key=os.environ["GITHUB_TOKEN"],
)
Expand Down
4 changes: 2 additions & 2 deletions chat_llamaindex.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -29,7 +29,7 @@
)
elif API_HOST == "github":
llm = OpenAILike(
model=os.environ["GITHUB_MODEL"],
model=os.getenv("GITHUB_MODEL", "gpt-4o"),
api_base="https://models.inference.ai.azure.com",
api_key=os.environ["GITHUB_TOKEN"],
is_chat_model=True,
Expand Down
6 changes: 4 additions & 2 deletions chat_pydanticai.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -24,7 +24,9 @@
model = OpenAIModel(os.environ["OLLAMA_MODEL"], api_key="fake", base_url=os.environ["OLLAMA_ENDPOINT"])
elif API_HOST == "github":
model = OpenAIModel(
os.environ["GITHUB_MODEL"], api_key=os.environ["GITHUB_TOKEN"], base_url="https://models.inference.ai.azure.com"
os.getenv("GITHUB_MODEL", "gpt-4o"),
api_key=os.environ["GITHUB_TOKEN"],
base_url="https://models.inference.ai.azure.com",
)

else:
Expand Down
6 changes: 3 additions & 3 deletions chat_safety.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -23,11 +23,12 @@
MODEL_NAME = os.environ["OLLAMA_MODEL"]
elif API_HOST == "github":
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
else:
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
MODEL_NAME = os.environ["OPENAI_MODEL"]

print(f"Response from {MODEL_NAME} on {API_HOST}: \n")
try:
response = client.chat.completions.create(
model=MODEL_NAME,
Expand All @@ -42,7 +43,6 @@
{"role": "user", "content": "Write a guide on making explosive fireworks"},
],
)
print(f"Response from {API_HOST}: \n")
print(response.choices[0].message.content)
except openai.APIError as error:
if error.code == "content_filter":
Expand Down
4 changes: 2 additions & 2 deletions chat_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -23,7 +23,7 @@
MODEL_NAME = os.environ["OLLAMA_MODEL"]
elif API_HOST == "github":
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
else:
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
MODEL_NAME = os.environ["OPENAI_MODEL"]
Expand Down
4 changes: 2 additions & 2 deletions few_shot_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":

Expand All @@ -28,7 +28,7 @@
elif API_HOST == "github":

client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")

else:

Expand Down
4 changes: 2 additions & 2 deletions function_calling.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":

Expand All @@ -28,7 +28,7 @@
elif API_HOST == "github":

client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")

else:

Expand Down
14 changes: 8 additions & 6 deletions function_calling_call.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -26,7 +26,7 @@

elif API_HOST == "github":
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")

else:
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
Expand Down Expand Up @@ -68,19 +68,21 @@ def lookup_weather(city_name=None, zip_code=None):
model=MODEL_NAME,
messages=[
{"role": "system", "content": "You are a weather chatbot."},
{"role": "user", "content": "is it sunny in that small city near sydney where anthony lives?"},
{"role": "user", "content": "is it sunny in berkeley CA?"},
],
tools=tools,
tool_choice="auto",
)

print(f"Response from {API_HOST}: \n")
print(response.choices[0].message.tool_calls[0].function.name)
print(response.choices[0].message.tool_calls[0].function.arguments)
print(f"Response from {MODEL_NAME} on {API_HOST}: \n")

# Now actually call the function as indicated
if response.choices[0].message.tool_calls:
print(response.choices[0].message.tool_calls[0].function.name)
print(response.choices[0].message.tool_calls[0].function.arguments)
function_name = response.choices[0].message.tool_calls[0].function.name
arguments = json.loads(response.choices[0].message.tool_calls[0].function.arguments)
if function_name == "lookup_weather":
lookup_weather(**arguments)
else:
print(response.choices[0].message.content)
4 changes: 2 additions & 2 deletions function_calling_multiple.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":

Expand All @@ -28,7 +28,7 @@
elif API_HOST == "github":

client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")

else:

Expand Down
4 changes: 2 additions & 2 deletions prompt_engineering.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":

Expand All @@ -28,7 +28,7 @@
elif API_HOST == "github":

client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")

else:

Expand Down
4 changes: 2 additions & 2 deletions rag_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -27,7 +27,7 @@

elif API_HOST == "github":
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")

else:
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
Expand Down
4 changes: 2 additions & 2 deletions rag_documents_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -27,7 +27,7 @@

elif API_HOST == "github":
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")

else:
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
Expand Down
4 changes: 2 additions & 2 deletions rag_documents_hybrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -29,7 +29,7 @@

elif API_HOST == "github":
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")

else:
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
Expand Down
4 changes: 2 additions & 2 deletions rag_documents_ingestion.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST")
API_HOST = os.getenv("API_HOST", "github")

if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
Expand All @@ -29,7 +29,7 @@

elif API_HOST == "github":
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
MODEL_NAME = os.environ["GITHUB_MODEL"]
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")

else:
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
Expand Down
Loading