|
| 1 | +# frozen_string_literal: true |
| 2 | + |
| 3 | +require 'spec_helper' |
| 4 | + |
| 5 | +RSpec.describe RubyLLM::Chat do |
| 6 | + include_context 'with configured RubyLLM' |
| 7 | + |
| 8 | + describe '#assume_model_exists' do |
| 9 | + let(:real_model) { 'gpt-4.1-nano' } |
| 10 | + let(:custom_model) { 'my-custom-model' } |
| 11 | + let(:provider) { :openai } |
| 12 | + # Keep a reference to the original models for cleanup |
| 13 | + let!(:original_models) { RubyLLM::Models.instance.all.dup } |
| 14 | + |
| 15 | + # Clean up the model registry after each test |
| 16 | + after do |
| 17 | + RubyLLM::Models.instance.instance_variable_set(:@models, original_models) |
| 18 | + end |
| 19 | + |
| 20 | + it 'requires provider when assuming model exists' do |
| 21 | + expect do |
| 22 | + described_class.new(model: custom_model, assume_model_exists: true) |
| 23 | + end.to raise_error(ArgumentError, /Provider must be specified/) |
| 24 | + end |
| 25 | + |
| 26 | + it 'skips registry validation when assuming model exists' do # rubocop:disable RSpec/ExampleLength,RSpec/MultipleExpectations |
| 27 | + expect(RubyLLM::Models).not_to receive(:find) # rubocop:disable RSpec/MessageSpies |
| 28 | + |
| 29 | + chat = described_class.new( |
| 30 | + model: custom_model, |
| 31 | + provider: provider, |
| 32 | + assume_model_exists: true |
| 33 | + ) |
| 34 | + |
| 35 | + expect(chat.model.id).to eq(custom_model) |
| 36 | + expect(chat.model.provider).to eq(provider) |
| 37 | + end |
| 38 | + |
| 39 | + it 'works with RubyLLM.chat convenience method' do # rubocop:disable RSpec/ExampleLength |
| 40 | + chat = RubyLLM.chat( |
| 41 | + model: custom_model, |
| 42 | + provider: provider, |
| 43 | + assume_model_exists: true |
| 44 | + ) |
| 45 | + |
| 46 | + expect(chat.model.id).to eq(custom_model) |
| 47 | + end |
| 48 | + |
| 49 | + it 'works with models not in registry but available in API' do # rubocop:disable RSpec/ExampleLength,RSpec/MultipleExpectations |
| 50 | + # Simulate model missing from registry |
| 51 | + filtered_models = original_models.reject { |m| m.id == real_model } |
| 52 | + RubyLLM::Models.instance.instance_variable_set(:@models, filtered_models) |
| 53 | + |
| 54 | + # Should raise error when not assuming existence |
| 55 | + expect do |
| 56 | + RubyLLM.chat(model: real_model) |
| 57 | + end.to raise_error(RubyLLM::ModelNotFoundError) |
| 58 | + |
| 59 | + # Should work when assuming existence |
| 60 | + chat = RubyLLM.chat( |
| 61 | + model: real_model, |
| 62 | + provider: provider, |
| 63 | + assume_model_exists: true |
| 64 | + ) |
| 65 | + |
| 66 | + # Should be able to actually use the model (relies on VCR) |
| 67 | + response = chat.ask('What is 2 + 2?') |
| 68 | + expect(response.content).to include('4') |
| 69 | + end |
| 70 | + |
| 71 | + it 'works with with_model method' do # rubocop:disable RSpec/MultipleExpectations |
| 72 | + chat = RubyLLM.chat |
| 73 | + |
| 74 | + chat.with_model(custom_model, provider: provider, assume_exists: true) |
| 75 | + |
| 76 | + expect(chat.model.id).to eq(custom_model) |
| 77 | + expect(chat.model.provider).to eq(provider) |
| 78 | + end |
| 79 | + end |
| 80 | +end |
0 commit comments