|
| 1 | +{ |
| 2 | + "cells": [ |
| 3 | + { |
| 4 | + "cell_type": "markdown", |
| 5 | + "id": "d97cd4b2-0127-48ea-8237-641c715f88c9", |
| 6 | + "metadata": {}, |
| 7 | + "source": [ |
| 8 | + "# **Model**" |
| 9 | + ] |
| 10 | + }, |
| 11 | + { |
| 12 | + "cell_type": "code", |
| 13 | + "execution_count": 1, |
| 14 | + "id": "4402b36a-ced8-4157-a393-32f1a56f0913", |
| 15 | + "metadata": {}, |
| 16 | + "outputs": [], |
| 17 | + "source": [ |
| 18 | + "# Setup API Key\n", |
| 19 | + "\n", |
| 20 | + "f = open('keys/.openai_api_key.txt')\n", |
| 21 | + "\n", |
| 22 | + "OPENAI_API_KEY = f.read()" |
| 23 | + ] |
| 24 | + }, |
| 25 | + { |
| 26 | + "cell_type": "markdown", |
| 27 | + "id": "a23aea24-ce81-4540-8b2e-bf4e09fd6a67", |
| 28 | + "metadata": {}, |
| 29 | + "source": [ |
| 30 | + "## **Models - LLM and ChatModel**\n", |
| 31 | + "\n", |
| 32 | + "- A model can be a **LLM** or a **ChatModel**.\n", |
| 33 | + "- LLMs handle various language operations such as translation, summarization, question answering, and content creation. **[Click Here](https://python.langchain.com/docs/integrations/llms/)** to check the complete list of LLMs which can be used with LangChain.\n", |
| 34 | + "- Chat Models are customized for conversational usage. **[Click Here](https://python.langchain.com/docs/integrations/chat/)** to check the complete list of LLMs which can be used with LangChain.\n", |
| 35 | + "- The output of a ChatModel (and therefore, of this chain) is a message.\n", |
| 36 | + "\n", |
| 37 | + "| Module | LLM | Chat Model |\n", |
| 38 | + "| :---: | :---: | :---: |\n", |
| 39 | + "| langchain_openai | OpenAI(api_key=key, model=`gpt-3.5-turbo-instruct`) | ChatOpenAI(api_key=key, model=`gpt-3.5-turbo`) |\n", |
| 40 | + "| langchain_google_genai | GoogleGenerativeAI(api_key=key, model=`gemini-1.5-pro-latest`) | ChatGoogleGenerativeAI(api_key=key, model=`gemini-1.5-pro-latest`) |\n", |
| 41 | + "| langchain_cohere | Cohere(api_key=key, model=`gpt-3.5-turbo`) | ChatCohere(api_key=key, model=`command`) |\n", |
| 42 | + "| langchain_anthropic | Anthropic(api_key=key, model=`claude-2.1`) | ChatAnthropic(api_key=key, model=`claude-3-opus-20240229`) |" |
| 43 | + ] |
| 44 | + }, |
| 45 | + { |
| 46 | + "cell_type": "markdown", |
| 47 | + "id": "806bfea3-54f1-40f4-a08e-5e951df2ddf8", |
| 48 | + "metadata": {}, |
| 49 | + "source": [ |
| 50 | + "## **OpenAI LLM and Chat Model**" |
| 51 | + ] |
| 52 | + }, |
| 53 | + { |
| 54 | + "cell_type": "code", |
| 55 | + "execution_count": 2, |
| 56 | + "id": "cb318b67-fd03-4aab-ac8a-fe15c0231d92", |
| 57 | + "metadata": {}, |
| 58 | + "outputs": [], |
| 59 | + "source": [ |
| 60 | + "# ! pip install langchain-openai" |
| 61 | + ] |
| 62 | + }, |
| 63 | + { |
| 64 | + "cell_type": "code", |
| 65 | + "execution_count": 4, |
| 66 | + "id": "1dbc9226-091d-4c73-81a4-70e4e873112d", |
| 67 | + "metadata": {}, |
| 68 | + "outputs": [ |
| 69 | + { |
| 70 | + "name": "stdout", |
| 71 | + "output_type": "stream", |
| 72 | + "text": [ |
| 73 | + "\n", |
| 74 | + "\n", |
| 75 | + "India has 29 states and 7 union territories, making a total of 36 administrative divisions.\n" |
| 76 | + ] |
| 77 | + } |
| 78 | + ], |
| 79 | + "source": [ |
| 80 | + "# Import OpenAI LLM Model\n", |
| 81 | + "from langchain_openai.llms import OpenAI\n", |
| 82 | + "\n", |
| 83 | + "# Set the OpenAI Key and initialize a LLM model\n", |
| 84 | + "llm = OpenAI(api_key=OPENAI_API_KEY, model=\"gpt-3.5-turbo-instruct\", temperature=1)\n", |
| 85 | + "\n", |
| 86 | + "# Create a prompt\n", |
| 87 | + "prompt = \"How many states are there in India?\"\n", |
| 88 | + "\n", |
| 89 | + "# Pass the prompt to llm\n", |
| 90 | + "print(llm.invoke(prompt))" |
| 91 | + ] |
| 92 | + }, |
| 93 | + { |
| 94 | + "cell_type": "code", |
| 95 | + "execution_count": 5, |
| 96 | + "id": "0745ecff-bf27-4e51-8562-1b68c3cc3d99", |
| 97 | + "metadata": {}, |
| 98 | + "outputs": [ |
| 99 | + { |
| 100 | + "name": "stdout", |
| 101 | + "output_type": "stream", |
| 102 | + "text": [ |
| 103 | + "content='Why was the data scientist always calm? Because they had good \"data-minimization\" techniques!' response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 15, 'total_tokens': 34}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'stop', 'logprobs': None}\n" |
| 104 | + ] |
| 105 | + } |
| 106 | + ], |
| 107 | + "source": [ |
| 108 | + "# Import OpenAI ChatModel\n", |
| 109 | + "from langchain_openai import ChatOpenAI\n", |
| 110 | + "\n", |
| 111 | + "# Set the OpenAI Key and initialize a ChatModel\n", |
| 112 | + "chat_model = ChatOpenAI(api_key=OPENAI_API_KEY, model=\"gpt-3.5-turbo\", temperature=1)\n", |
| 113 | + "\n", |
| 114 | + "prompt = \"Tell me a short joke about Data Science\"\n", |
| 115 | + "\n", |
| 116 | + "print(chat_model.invoke(prompt))" |
| 117 | + ] |
| 118 | + }, |
| 119 | + { |
| 120 | + "cell_type": "code", |
| 121 | + "execution_count": 6, |
| 122 | + "id": "b9f2a7cc-9776-4952-9a68-572b5ff5dc60", |
| 123 | + "metadata": {}, |
| 124 | + "outputs": [ |
| 125 | + { |
| 126 | + "data": { |
| 127 | + "text/plain": [ |
| 128 | + "AIMessage(content='Machine learning is a subset of artificial intelligence that involves developing algorithms and statistical models that allow computers to learn from and make predictions or decisions based on data, without being explicitly programmed to do so. Machine learning algorithms use patterns in data to make informed decisions and improve their performance over time. This technology is used in a wide range of applications, including image and speech recognition, medical diagnosis, recommendation systems, and predictive analytics.', response_metadata={'token_usage': {'completion_tokens': 83, 'prompt_tokens': 12, 'total_tokens': 95}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'stop', 'logprobs': None})" |
| 129 | + ] |
| 130 | + }, |
| 131 | + "execution_count": 6, |
| 132 | + "metadata": {}, |
| 133 | + "output_type": "execute_result" |
| 134 | + } |
| 135 | + ], |
| 136 | + "source": [ |
| 137 | + "# ChatModel\n", |
| 138 | + "from langchain_openai import ChatOpenAI\n", |
| 139 | + "\n", |
| 140 | + "model = ChatOpenAI(openai_api_key=OPENAI_API_KEY, model=\"gpt-3.5-turbo\", temperature=1)\n", |
| 141 | + "\n", |
| 142 | + "prompt = \"What is Machine Learning?\"\n", |
| 143 | + "\n", |
| 144 | + "chain = model\n", |
| 145 | + "\n", |
| 146 | + "chain.invoke(prompt)\n", |
| 147 | + "\n", |
| 148 | + "# Observe that the output is a AI Message" |
| 149 | + ] |
| 150 | + }, |
| 151 | + { |
| 152 | + "cell_type": "markdown", |
| 153 | + "id": "bc81099d-8426-4f8e-8e9d-0cccd4915da3", |
| 154 | + "metadata": {}, |
| 155 | + "source": [ |
| 156 | + "## **Google LLM and Chat Model**" |
| 157 | + ] |
| 158 | + }, |
| 159 | + { |
| 160 | + "cell_type": "code", |
| 161 | + "execution_count": 7, |
| 162 | + "id": "1698394c-12ae-4f5a-bf2f-f731f10a61dd", |
| 163 | + "metadata": {}, |
| 164 | + "outputs": [], |
| 165 | + "source": [ |
| 166 | + "# ! pip install langchain-google-genai" |
| 167 | + ] |
| 168 | + }, |
| 169 | + { |
| 170 | + "cell_type": "code", |
| 171 | + "execution_count": 8, |
| 172 | + "id": "e380313c-365e-4a48-b96b-9828f6ae9f10", |
| 173 | + "metadata": {}, |
| 174 | + "outputs": [], |
| 175 | + "source": [ |
| 176 | + "# Setup API Key\n", |
| 177 | + "\n", |
| 178 | + "f = open('keys/.gemini.txt')\n", |
| 179 | + "\n", |
| 180 | + "GOOGLE_API_KEY = f.read()" |
| 181 | + ] |
| 182 | + }, |
| 183 | + { |
| 184 | + "cell_type": "code", |
| 185 | + "execution_count": 11, |
| 186 | + "id": "e1cad030-0dbf-4040-88bb-fcaf125a9357", |
| 187 | + "metadata": {}, |
| 188 | + "outputs": [ |
| 189 | + { |
| 190 | + "name": "stdout", |
| 191 | + "output_type": "stream", |
| 192 | + "text": [ |
| 193 | + "As of November 2023, India has 28 states. \n", |
| 194 | + "\n" |
| 195 | + ] |
| 196 | + } |
| 197 | + ], |
| 198 | + "source": [ |
| 199 | + "# Import Google LLM Model\n", |
| 200 | + "\n", |
| 201 | + "from langchain_google_genai import GoogleGenerativeAI\n", |
| 202 | + "\n", |
| 203 | + "# Set the OpenAI Key and initialize a LLM model\n", |
| 204 | + "llm = GoogleGenerativeAI(google_api_key=GOOGLE_API_KEY, model=\"gemini-1.5-pro-latest\", temperature=1)\n", |
| 205 | + "\n", |
| 206 | + "# Create a prompt\n", |
| 207 | + "prompt = \"How many states are there in India?\"\n", |
| 208 | + "\n", |
| 209 | + "# Pass the prompt to llm\n", |
| 210 | + "print(llm.invoke(prompt))" |
| 211 | + ] |
| 212 | + }, |
| 213 | + { |
| 214 | + "cell_type": "code", |
| 215 | + "execution_count": 12, |
| 216 | + "id": "079087d1-4adc-437a-bd8e-899e8a255294", |
| 217 | + "metadata": {}, |
| 218 | + "outputs": [ |
| 219 | + { |
| 220 | + "name": "stdout", |
| 221 | + "output_type": "stream", |
| 222 | + "text": [ |
| 223 | + "content='Why did the data scientist get lost on their way to work? \\n\\nThey took a wrong turn on the decision tree! \\n' response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]}\n" |
| 224 | + ] |
| 225 | + } |
| 226 | + ], |
| 227 | + "source": [ |
| 228 | + "# Import Google ChatModel\n", |
| 229 | + "from langchain_google_genai import ChatGoogleGenerativeAI\n", |
| 230 | + "\n", |
| 231 | + "# Set the OpenAI Key and initialize a ChatModel\n", |
| 232 | + "chat_model = ChatGoogleGenerativeAI(google_api_key=GOOGLE_API_KEY, model=\"gemini-1.5-pro-latest\", temperature=1)\n", |
| 233 | + "\n", |
| 234 | + "prompt = \"Tell me a short joke about Data Science\"\n", |
| 235 | + "\n", |
| 236 | + "print(chat_model.invoke(prompt))" |
| 237 | + ] |
| 238 | + } |
| 239 | + ], |
| 240 | + "metadata": { |
| 241 | + "kernelspec": { |
| 242 | + "display_name": "Python 3 (ipykernel)", |
| 243 | + "language": "python", |
| 244 | + "name": "python3" |
| 245 | + }, |
| 246 | + "language_info": { |
| 247 | + "codemirror_mode": { |
| 248 | + "name": "ipython", |
| 249 | + "version": 3 |
| 250 | + }, |
| 251 | + "file_extension": ".py", |
| 252 | + "mimetype": "text/x-python", |
| 253 | + "name": "python", |
| 254 | + "nbconvert_exporter": "python", |
| 255 | + "pygments_lexer": "ipython3", |
| 256 | + "version": "3.9.13" |
| 257 | + } |
| 258 | + }, |
| 259 | + "nbformat": 4, |
| 260 | + "nbformat_minor": 5 |
| 261 | +} |
0 commit comments