Skip to content

Commit e74218f

Browse files
authored
Add llm-models function, to list available models for a service (#165)
1 parent 74b6099 commit e74218f

9 files changed

+80
-15
lines changed

NEWS.org

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
* Version 0.24.0
22
- Add =multi-output= as an option, allowing all llm results to return, call, or stream multiple kinds of data via a plist. This allows separating out reasoning, as well as optionally returning text as well as tool uses at the same time.
3+
- Added ~llm-models~ to get a list of models from a provider.
34
- Fix misnamed ~llm-capabilities~ output to refer to =tool-use= and =streaming-tool-use= (which is new).
4-
- Fixed Clude streaming tool use (via Paul Nelson)
5+
- Fixed Claude streaming tool use (via Paul Nelson)
56
- Added Deepseek service
67
- Add Gemini 2.0 pro experimental model, default to 2.0 flash
78
- Add Open AI's o3 mini model

README.org

+1
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,7 @@ For all callbacks, the callback will be executed in the buffer the function was
154154
- ~llm-count-tokens provider string~: Count how many tokens are in ~string~. This may vary by ~provider~, because some provideres implement an API for this, but typically is always about the same. This gives an estimate if the provider has no API support.
155155
- ~llm-cancel-request request~ Cancels the given request, if possible. The ~request~ object is the return value of async and streaming functions.
156156
- ~llm-name provider~. Provides a short name of the model or provider, suitable for showing to users.
157+
- ~llm-models provider~. Return a list of all the available model names for the provider. This could be either embedding or chat models. You can use ~llm-models-match~ to filter on models that have a certain capability (as long as they are in ~llm-models~).
157158
- ~llm-chat-token-limit~. Gets the token limit for the chat model. This isn't possible for some backends like =llama.cpp=, in which the model isn't selected or known by this library.
158159

159160
And the following helper functions:

llm-claude.el

+15-1
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
(require 'llm)
2929
(require 'llm-provider-utils)
3030
(require 'plz-event-source)
31+
(require 'plz)
3132
(require 'rx)
3233

3334
;; Models defined at https://docs.anthropic.com/claude/docs/models-overview
@@ -226,9 +227,13 @@ DATA is a vector of lists produced by `llm-provider-streaming-media-handler'."
226227
(format "Error %s: '%s'" (assoc-default 'type err)
227228
(assoc-default 'message err))))
228229

230+
(defun llm-claude--url (method)
231+
"Return a Claude API URL for METHOD."
232+
(format "https://api.anthropic.com/v1/%s" method))
233+
229234
(cl-defmethod llm-provider-chat-url ((_ llm-claude))
230235
"Return the URL for the Claude API."
231-
"https://api.anthropic.com/v1/messages")
236+
(llm-claude--url "messages"))
232237

233238
(cl-defmethod llm-chat-token-limit ((provider llm-claude))
234239
(llm-provider-utils-model-token-limit (llm-claude-chat-model provider)))
@@ -249,6 +254,15 @@ DATA is a vector of lists produced by `llm-provider-streaming-media-handler'."
249254
'user
250255
'assistant)))
251256

257+
(cl-defmethod llm-models ((provider llm-claude))
258+
(mapcar (lambda (model)
259+
(plist-get model :id))
260+
(append
261+
(plist-get (plz 'get (llm-claude--url "models")
262+
:as (lambda () (json-parse-buffer :object-type 'plist))
263+
:headers (llm-provider-headers provider))
264+
:data)
265+
nil)))
252266

253267
(provide 'llm-claude)
254268

llm-deepseek.el

+1-1
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ RESPONSE can be nil if the response is complete."
7070
(funcall receiver response))))))))))
7171

7272
(cl-defmethod llm-capabilities ((provider llm-deepseek))
73-
(append '(streaming)
73+
(append '(streaming model-list)
7474
(when-let* ((model (llm-models-match (llm-deepseek-chat-model provider))))
7575
(llm-model-capabilities model))))
7676

llm-gemini.el

+17-2
Original file line numberDiff line numberDiff line change
@@ -58,11 +58,14 @@ You can get this at https://makersuite.google.com/app/apikey."
5858
(cl-defmethod llm-provider-embedding-extract-result ((_ llm-gemini) response)
5959
(assoc-default 'values (assoc-default 'embedding response)))
6060

61+
(defconst llm-gemini--base-url "https://generativelanguage.googleapis.com/v1beta/models/")
62+
6163
;; from https://ai.google.dev/tutorials/rest_quickstart
6264
(defun llm-gemini--chat-url (provider streaming-p)
6365
"Return the URL for the chat request, using PROVIDER.
6466
If STREAMING-P is non-nil, use the streaming endpoint."
65-
(format "https://generativelanguage.googleapis.com/v1beta/models/%s:%s?key=%s"
67+
(format "%s%s:%s?key=%s"
68+
llm-gemini--base-url
6669
(llm-gemini-chat-model provider)
6770
(if streaming-p "streamGenerateContent" "generateContent")
6871
(if (functionp (llm-gemini-key provider))
@@ -90,13 +93,25 @@ If STREAMING-P is non-nil, use the streaming endpoint."
9093

9194
(cl-defmethod llm-capabilities ((provider llm-gemini))
9295
(append
93-
(list 'streaming 'embeddings)
96+
'(streaming embeddings model-list)
9497
(when-let ((model (llm-models-match (llm-gemini-chat-model provider)))
9598
(capabilities (llm-model-capabilities model)))
9699
(append
97100
(when (member 'tool-use capabilities) '(tool-use streaming-tool-use))
98101
(seq-intersection capabilities '(image-input audio-input video-input))))))
99102

103+
(cl-defmethod llm-models ((provider llm-gemini))
104+
(mapcar (lambda (model)
105+
(plist-get model :name))
106+
(append
107+
(plist-get (plz 'get (format "%s?key=%s" llm-gemini--base-url
108+
(if (functionp (llm-gemini-key provider))
109+
(funcall (llm-gemini-key provider))
110+
(llm-gemini-key provider)))
111+
:as (lambda () (json-parse-buffer :object-type 'plist)))
112+
:models)
113+
nil)))
114+
100115
(provide 'llm-gemini)
101116

102117
;;; llm-gemini.el ends here

llm-integration-test.el

+7
Original file line numberDiff line numberDiff line change
@@ -422,4 +422,11 @@ else. We really just want to see if it's in the right ballpark."
422422
(should (integerp result))
423423
(should (> result 0))))
424424

425+
(llm-def-integration-test llm-models (provider)
426+
(when (member 'model-list (llm-capabilities provider))
427+
(let ((models (llm-models provider)))
428+
(should models)
429+
(should (listp models))
430+
(should (> (length models) 0)))))
431+
425432
(provide 'llm-integration-test)

llm-ollama.el

+10-1
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
(require 'llm)
3030
(require 'llm-provider-utils)
3131
(require 'llm-models)
32+
(require 'plz)
3233
(require 'json)
3334
(require 'plz-media-type)
3435

@@ -229,7 +230,7 @@ PROVIDER is the llm-ollama provider."
229230
2048))
230231

231232
(cl-defmethod llm-capabilities ((provider llm-ollama))
232-
(append '(streaming json-response)
233+
(append '(streaming json-response model-list)
233234
(when (and (llm-ollama-embedding-model provider)
234235
(let ((embedding-model (llm-models-match
235236
(llm-ollama-embedding-model provider))))
@@ -243,6 +244,14 @@ PROVIDER is the llm-ollama provider."
243244
(when (member 'tool-use capabilities) '(tool-use))
244245
(seq-intersection capabilities '(image-input))))))
245246

247+
(cl-defmethod llm-models ((provider llm-ollama))
248+
(mapcar (lambda (model-data)
249+
(plist-get model-data :name))
250+
(plist-get (plz 'get (llm-ollama--url provider "tags")
251+
:as (lambda ()
252+
(json-parse-buffer :object-type 'plist)))
253+
:models )))
254+
246255
(provide 'llm-ollama)
247256

248257
;;; llm-ollama.el ends here

llm-openai.el

+13-2
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
(require 'llm-provider-utils)
3131
(require 'llm-models)
3232
(require 'json)
33+
(require 'plz)
3334
(require 'plz-event-source)
3435

3536
(defgroup llm-openai nil
@@ -369,18 +370,28 @@ RESPONSE can be nil if the response is complete."
369370
(llm-provider-utils-model-token-limit (llm-openai-chat-model provider)))
370371

371372
(cl-defmethod llm-capabilities ((provider llm-openai))
372-
(append '(streaming embeddings tool-use streaming-tool-use json-response)
373+
(append '(streaming embeddings tool-use streaming-tool-use json-response model-list)
373374
(when-let ((model (llm-models-match (llm-openai-chat-model provider))))
374375
(seq-intersection (llm-model-capabilities model)
375376
'(image-input)))))
376377

377378
(cl-defmethod llm-capabilities ((provider llm-openai-compatible))
378-
(append '(streaming)
379+
(append '(streaming model-list)
379380
(when (llm-openai-embedding-model provider)
380381
'(embeddings embeddings-batch))
381382
(when-let* ((model (llm-models-match (llm-openai-chat-model provider))))
382383
(llm-model-capabilities model))))
383384

385+
(cl-defmethod llm-models ((provider llm-openai))
386+
(mapcar (lambda (model)
387+
(plist-get model :id))
388+
(append
389+
(plist-get (plz 'get (llm-openai--url provider "models")
390+
:as (lambda () (json-parse-buffer :object-type 'plist))
391+
:headers (llm-openai--headers provider))
392+
:data)
393+
nil)))
394+
384395
(provide 'llm-openai)
385396

386397
;;; llm-openai.el ends here

llm.el

+14-7
Original file line numberDiff line numberDiff line change
@@ -491,11 +491,7 @@ be passed to `llm-cancel-request'."
491491
"Log the input to llm-chat-async."
492492
(llm--log 'api-send :provider provider :prompt prompt)
493493
;; We need to wrap the callbacks before we set llm-log to nil.
494-
(let* ((new-partial-callback (lambda (response)
495-
(when partial-callback
496-
(let ((llm-log nil))
497-
(funcall partial-callback response)))))
498-
(new-response-callback (lambda (response)
494+
(let* ((new-response-callback (lambda (response)
499495
(llm--log 'api-receive :provider provider :msg response)
500496
(let ((llm-log nil))
501497
(funcall response-callback response))))
@@ -505,7 +501,7 @@ be passed to `llm-cancel-request'."
505501
(let ((llm-log nil))
506502
(funcall error-callback type err))))
507503
(llm-log nil)
508-
(result (cl-call-next-method provider prompt new-partial-callback
504+
(result (cl-call-next-method provider prompt partial-callback
509505
new-response-callback
510506
new-error-callback multi-output)))
511507
result))
@@ -593,7 +589,9 @@ JSON format.
593589
594590
`video-input': the LLM can accept video as input.
595591
596-
`audio-input': the LLM can accept audio as input."
592+
`audio-input': the LLM can accept audio as input.
593+
594+
`model-list': the provider can return a list of models."
597595
(ignore provider)
598596
nil)
599597

@@ -705,6 +703,15 @@ methods."
705703
(cl-defmethod llm-cancel-request ((proc process))
706704
(delete-process proc))
707705

706+
(cl-defgeneric llm-models (provider)
707+
"Return a list of model names for PROVIDER.
708+
This is not asynchronous, but should be fast.
709+
710+
Not every model provides this, you can check the ones that implement
711+
`model-list' in `llm-capabilities' before calling."
712+
(ignore provider)
713+
(signal 'not-implemented nil))
714+
708715
(cl-defgeneric llm-name (_)
709716
"Return the name of the model in PROVIDER.
710717
This is expected to be suitable for short labels. For example, if

0 commit comments

Comments
 (0)