- Notifications
You must be signed in to change notification settings - Fork 6
rails integration
Nagendra Dhanakeerthi edited this page Oct 30, 2024 · 1 revision
This guide demonstrates how to integrate the ChatGPT Ruby gem into a Rails application with best practices and real-world examples.
Add to your Gemfile:
# Gemfile gem 'chatgpt-ruby'
Run:
bundle install
Create an initializer:
# config/initializers/chatgpt.rb ChatGPT.configure do |config| if Rails.env.production? config.api_key = Rails.application.credentials.openai[:api_key] else config.api_key = ENV['OPENAI_API_KEY'] end config.request_timeout = Rails.env.development? ? 60 : 30 config.max_retries = 3 # Default parameters config.default_parameters = { max_tokens: 150, temperature: 0.7, top_p: 1.0 } end
# Development/Test # .env OPENAI_API_KEY=your-api-key # Production rails credentials:edit
# config/credentials.yml.enc openai: api_key: your-production-api-key
# app/services/chat_gpt_service.rb class ChatGPTService def initialize @client = ChatGPT::Client.new end def chat(message, context: []) messages = context + [{ role: "user", content: message }] @client.chat(messages) rescue ChatGPT::Error => e Rails.logger.error "ChatGPT Error: #{e.message}" raise end def stream_chat(message, context: [], &block) messages = context + [{ role: "user", content: message }] @client.chat_stream(messages, &block) rescue ChatGPT::Error => e Rails.logger.error "ChatGPT Error: #{e.message}" raise end end
# app/controllers/chats_controller.rb class ChatsController < ApplicationController def create response = ChatGPTService.new.chat(chat_params[:message]) render json: { message: response.dig("choices", 0, "message", "content") } rescue ChatGPT::Error => e render json: { error: e.message }, status: :unprocessable_entity end def stream response.headers['Content-Type'] = 'text/event-stream' ChatGPTService.new.stream_chat(chat_params[:message]) do |chunk| content = chunk.dig("choices", 0, "delta", "content") response.stream.write("data: #{content}\n\n") if content end rescue ChatGPT::Error => e response.stream.write("error: #{e.message}\n\n") ensure response.stream.close end private def chat_params params.require(:chat).permit(:message) end end
# app/models/conversation.rb class Conversation < ApplicationRecord has_many :messages, dependent: :destroy def chat_context messages.order(:created_at).map do |msg| { role: msg.role, content: msg.content } end end def add_message(role:, content:) messages.create!(role: role, content: content) end def generate_response(user_message) service = ChatGPTService.new # Add user message to conversation add_message(role: "user", content: user_message) # Get response from ChatGPT response = service.chat(user_message, context: chat_context) assistant_message = response.dig("choices", 0, "message", "content") # Save assistant's response add_message(role: "assistant", content: assistant_message) assistant_message end end # app/models/message.rb class Message < ApplicationRecord belongs_to :conversation validates :role, presence: true, inclusion: { in: %w[system user assistant] } validates :content, presence: true end
# app/jobs/chat_completion_job.rb class ChatCompletionJob < ApplicationJob queue_as :chat retry_on ChatGPT::RateLimitError, wait: :exponentially_longer, attempts: 3 retry_on ChatGPT::APIError, wait: 5.seconds, attempts: 3 def perform(conversation_id, user_message) conversation = Conversation.find(conversation_id) response = conversation.generate_response(user_message) # Notify client through ActionCable ChatChannel.broadcast_to( conversation, { message: response } ) end end
# app/channels/chat_channel.rb class ChatChannel < ApplicationCable::Channel def subscribed conversation = Conversation.find(params[:conversation_id]) stream_for conversation end def unsubscribed stop_all_streams end end
// app/javascript/controllers/chat_controller.js import { Controller } from "@hotwired/stimulus" import consumer from "../channels/consumer" export default class extends Controller { static targets = [ "messages", "input" ] connect() { this.channel = consumer.subscriptions.create( { channel: "ChatChannel", conversation_id: this.element.dataset.conversationId }, { received: this.receive.bind(this) } ) } async send(event) { event.preventDefault() const message = this.inputTarget.value try { const response = await fetch('/chats', { method: 'POST', headers: { 'Content-Type': 'application/json', 'X-CSRF-Token': document.querySelector("[name='csrf-token']").content }, body: JSON.stringify({ chat: { message } }) }) if (!response.ok) throw new Error('Network response was not ok') this.inputTarget.value = '' } catch (error) { console.error('Error:', error) } } receive({ message }) { this.messagesTarget.insertAdjacentHTML('beforeend', ` <div class="message assistant"> ${message} </div> `) this.messagesTarget.scrollTop = this.messagesTarget.scrollHeight } }
<%# app/views/conversations/show.html.erb %> <div data-controller="chat" data-conversation-id="<%= @conversation.id %>"> <div class="messages" data-chat-target="messages"> <% @conversation.messages.each do |message| %> <div class="message <%= message.role %>"> <%= message.content %> </div> <% end %> </div> <form data-action="submit->chat#send"> <input type="text" data-chat-target="input" placeholder="Type your message..."> <button type="submit">Send</button> </form> </div>
# app/controllers/concerns/chat_gpt_error_handler.rb module ChatGPTErrorHandler extend ActiveSupport::Concern included do rescue_from ChatGPT::AuthenticationError do |e| Bugsnag.notify(e) render json: { error: 'Authentication failed' }, status: :unauthorized end rescue_from ChatGPT::RateLimitError do |e| render json: { error: 'Rate limit exceeded', retry_after: 30 }, status: :too_many_requests end rescue_from ChatGPT::APIError do |e| Bugsnag.notify(e) render json: { error: 'Service temporarily unavailable' }, status: :service_unavailable end end end
# config/initializers/chatgpt.rb ChatGPT.configure do |config| config.logger = Rails.logger # Custom error handler config.on_error = ->(error) { Rails.error.report( error, context: { service: 'chatgpt', error_type: error.class.name } ) } end
# spec/support/chatgpt_helper.rb module ChatGPTHelper def stub_chat_response(content) { "choices" => [{ "message" => { "role" => "assistant", "content" => content } }] } end end RSpec.configure do |config| config.include ChatGPTHelper end
# spec/controllers/chats_controller_spec.rb RSpec.describe ChatsController, type: :controller do describe "POST #create" do let(:message) { "Hello!" } let(:chat_service) { instance_double(ChatGPTService) } before do allow(ChatGPTService).to receive(:new).and_return(chat_service) end it "returns successful response" do expect(chat_service).to receive(:chat) .with(message) .and_return(stub_chat_response("Hi there!")) post :create, params: { chat: { message: message } } expect(response).to have_http_status(:success) expect(JSON.parse(response.body)["message"]).to eq("Hi there!") end end end
# spec/system/chat_spec.rb RSpec.describe "Chat", type: :system do let(:conversation) { create(:conversation) } before do driven_by(:selenium_chrome_headless) end it "sends and receives messages" do visit conversation_path(conversation) fill_in "Message", with: "Hello!" click_button "Send" expect(page).to have_content("Hello!") expect(page).to have_css(".message.assistant", text: /\w+/) end end
# app/services/chat_gpt_service.rb class ChatGPTService def chat(message, context: []) cache_key = "chatgpt/#{Digest::MD5.hexdigest(message + context.to_s)}" Rails.cache.fetch(cache_key, expires_in: 1.hour) do @client.chat(context + [{ role: "user", content: message }]) end end end
# app/controllers/chats_controller.rb def create conversation = Conversation.find(params[:conversation_id]) ChatCompletionJob.perform_later(conversation.id, chat_params[:message]) head :accepted end
# app/controllers/health_controller.rb class HealthController < ApplicationController def chatgpt client = ChatGPT::Client.new response = client.completions("test", max_tokens: 1) render json: { status: "ok" } rescue ChatGPT::Error => e render json: { status: "error", message: e.message }, status: :service_unavailable end end
# config/application.yml production: OPENAI_API_KEY: <%= Rails.application.credentials.openai[:api_key] %> OPENAI_REQUEST_TIMEOUT: 30 OPENAI_MAX_RETRIES: 3