Skip to content

Commit df9ff05

Browse files
committed
Allows using custom OpenAI endpoint (for instance with vLLM)
1 parent 64bef7d commit df9ff05

File tree

1 file changed

+14
-2
lines changed

1 file changed

+14
-2
lines changed

src/lighteval/models/endpoints/openai_model.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ class OpenAIClient(LightevalModel):
8484

8585
def __init__(self, config: OpenAIModelConfig, env_config) -> None:
8686
api_key = os.environ["OPENAI_API_KEY"]
87-
self.client = OpenAI(api_key=api_key)
87+
self.client = OpenAI(api_key=api_key, base_url=os.getenv("OPENAI_BASE_URL"))
8888
self.generation_parameters = config.generation_parameters
8989
self.sampling_params = self.generation_parameters.to_vllm_openai_dict()
9090

@@ -99,7 +99,19 @@ def __init__(self, config: OpenAIModelConfig, env_config) -> None:
9999
self.API_RETRY_MULTIPLIER = 2
100100
self.CONCURENT_CALLS = 100
101101
self.model = config.model
102-
self._tokenizer = tiktoken.encoding_for_model(self.model)
102+
try:
103+
self._tokenizer = tiktoken.encoding_for_model(self.model)
104+
except KeyError:
105+
if "TOKENIZER_PATH" in os.environ:
106+
from transformers import AutoTokenizer
107+
108+
self._tokenizer = AutoTokenizer.from_pretrained(os.getenv("TOKENIZER_PATH"))
109+
elif os.path.exists(self.model) and os.path.isdir(self.model):
110+
from transformers import AutoTokenizer
111+
112+
self._tokenizer = AutoTokenizer.from_pretrained(self.model)
113+
else:
114+
raise
103115
self.pairwise_tokenization = False
104116

105117
def __call_api(self, prompt, return_logits, max_new_tokens, num_samples, logit_bias):

0 commit comments

Comments
 (0)