diff --git a/.gitignore b/.gitignore index a0974550..5ffffa79 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ __pycache__/ env/ .env .google-adc +repomix-output.* # Testing .coverage diff --git a/LICENSE b/LICENSE index 60d38246..7062cc11 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,7 @@ MIT License Copyright (c) 2024 Andrew Ng +Copyright (c) 2025 Dr. Hubertus Becker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including diff --git a/README.md b/README.md index 19af4b4d..7b63c2df 100644 --- a/README.md +++ b/README.md @@ -1,211 +1,32 @@ -# aisuite +# AISuite -[![PyPI](https://img.shields.io/pypi/v/aisuite)](https://pypi.org/project/aisuite/) -[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) - -Simple, unified interface to multiple Generative AI providers. - -`aisuite` makes it easy for developers to use multiple LLM through a standardized interface. Using an interface similar to OpenAI's, `aisuite` makes it easy to interact with the most popular LLMs and compare the results. It is a thin wrapper around python client libraries, and allows creators to seamlessly swap out and test responses from different LLM providers without changing their code. Today, the library is primarily focussed on chat completions. We will expand it cover more use cases in near future. - -Currently supported providers are - -OpenAI, Anthropic, Azure, Google, AWS, Groq, Mistral, HuggingFace Ollama, Sambanova and Watsonx. -To maximize stability, `aisuite` uses either the HTTP endpoint or the SDK for making calls to the provider. +This is my fork of the original [aisuite](https://github.com/andrewyng/aisuite) repository. Everything remains the same except that a new **GitHub Provider** has been added to enable free GPT-4 access via a GitHub Copilot subscription. This addition is inspired by [freegpt](https://github.com/B00TK1D/freegpt). ## Installation -You can install just the base `aisuite` package, or install a provider's package along with `aisuite`. - -This installs just the base package without installing any provider's SDK. - -```shell -pip install aisuite -``` - -This installs aisuite along with anthropic's library. +Clone the repository and install the dependencies: -```shell -pip install 'aisuite[anthropic]' +```bash +git clone https://github.com/hubertusgbecker/aisuite.git +cd aisuite +pip install -e . ``` -This installs all the provider-specific libraries +## Usage -```shell -pip install 'aisuite[all]' -``` - -## Set up - -To get started, you will need API Keys for the providers you intend to use. You'll need to -install the provider-specific library either separately or when installing aisuite. - -The API Keys can be set as environment variables, or can be passed as config to the aisuite Client constructor. -You can use tools like [`python-dotenv`](https://pypi.org/project/python-dotenv/) or [`direnv`](https://direnv.net/) to set the environment variables manually. Please take a look at the `examples` folder to see usage. - -Here is a short example of using `aisuite` to generate chat completion responses from gpt-4o and claude-3-5-sonnet. - -Set the API keys. - -```shell -export OPENAI_API_KEY="your-openai-api-key" -export ANTHROPIC_API_KEY="your-anthropic-api-key" -``` - -Use the python client. +Use the new GitHub Provider as follows: ```python -import aisuite as ai -client = ai.Client() - -models = ["openai:gpt-4o", "anthropic:claude-3-5-sonnet-20240620"] - -messages = [ - {"role": "system", "content": "Respond in Pirate English."}, - {"role": "user", "content": "Tell me a joke."}, -] - -for model in models: - response = client.chat.completions.create( - model=model, - messages=messages, - temperature=0.75 - ) - print(response.choices[0].message.content) - -``` - -Note that the model name in the create() call uses the format - `:`. -`aisuite` will call the appropriate provider with the right parameters based on the provider value. -For a list of provider values, you can look at the directory - `aisuite/providers/`. The list of supported providers are of the format - `_provider.py` in that directory. We welcome providers adding support to this library by adding an implementation file in this directory. Please see section below for how to contribute. - -For more examples, check out the `examples` directory where you will find several notebooks that you can run to experiment with the interface. - -## Adding support for a provider - -We have made easy for a provider or volunteer to add support for a new platform. - -### Naming Convention for Provider Modules - -We follow a convention-based approach for loading providers, which relies on strict naming conventions for both the module name and the class name. The format is based on the model identifier in the form `provider:model`. +from aisuite.providers.github_provider import GitHubProvider +from aisuite.framework.message import Message -- The provider's module file must be named in the format `_provider.py`. -- The class inside this module must follow the format: the provider name with the first letter capitalized, followed by the suffix `Provider`. - -#### Examples - -- **Hugging Face**: - The provider class should be defined as: - - ```python - class HuggingfaceProvider(BaseProvider) - ``` - - in providers/huggingface_provider.py. - -- **OpenAI**: - The provider class should be defined as: - - ```python - class OpenaiProvider(BaseProvider) - ``` - - in providers/openai_provider.py - -This convention simplifies the addition of new providers and ensures consistency across provider implementations. - -## Tool Calling - -`aisuite` provides a simple abstraction for tool/function calling that works across supported providers. This is in addition to the regular abstraction of passing JSON spec of the tool to the `tools` parameter. The tool calling abstraction makes it easy to use tools with different LLMs without changing your code. - -There are two ways to use tools with `aisuite`: - -### 1. Manual Tool Handling - -This is the default behavior when `max_turns` is not specified. -You can pass tools in the OpenAI tool format: - -```python -def will_it_rain(location: str, time_of_day: str): - """Check if it will rain in a location at a given time today. - - Args: - location (str): Name of the city - time_of_day (str): Time of the day in HH:MM format. - """ - return "YES" - -tools = [{ - "type": "function", - "function": { - "name": "will_it_rain", - "description": "Check if it will rain in a location at a given time today", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "Name of the city" - }, - "time_of_day": { - "type": "string", - "description": "Time of the day in HH:MM format." - } - }, - "required": ["location", "time_of_day"] - } - } -}] - -response = client.chat.completions.create( - model="openai:gpt-4o", - messages=messages, - tools=tools -) -``` - -### 2. Automatic Tool Execution - -When `max_turns` is specified, you can pass a list of callable Python functions as the `tools` parameter. `aisuite` will automatically handle the tool calling flow: - -```python -def will_it_rain(location: str, time_of_day: str): - """Check if it will rain in a location at a given time today. - - Args: - location (str): Name of the city - time_of_day (str): Time of the day in HH:MM format. - """ - return "YES" - -client = ai.Client() -messages = [{ - "role": "user", - "content": "I live in San Francisco. Can you check for weather " - "and plan an outdoor picnic for me at 2pm?" -}] - -# Automatic tool execution with max_turns -response = client.chat.completions.create( - model="openai:gpt-4o", - messages=messages, - tools=[will_it_rain], - max_turns=2 # Maximum number of back-and-forth tool calls -) +provider = GitHubProvider() +prompt = {"content": "Hello, what is the capital of Germany?", "role": "user"} +response = provider.chat_completions_create("gpt-4", [prompt]) print(response.choices[0].message.content) ``` -When `max_turns` is specified, `aisuite` will: -1. Send your message to the LLM -2. Execute any tool calls the LLM requests -3. Send the tool results back to the LLM -4. Repeat until the conversation is complete or max_turns is reached - -In addition to `response.choices[0].message`, there is an additional field `response.choices[0].intermediate_messages`: which contains the list of all messages including tool interactions used. This can be used to continue the conversation with the model. -For more detailed examples of tool calling, check out the `examples/tool_calling_abstraction.ipynb` notebook. - ## License aisuite is released under the MIT License. You are free to use, modify, and distribute the code for both commercial and non-commercial purposes. -## Contributing - -If you would like to contribute, please read our [Contributing Guide](https://github.com/andrewyng/aisuite/blob/main/CONTRIBUTING.md) and join our [Discord](https://discord.gg/T6Nvn8ExSb) server! diff --git a/aisuite/providers/github_provider.py b/aisuite/providers/github_provider.py new file mode 100644 index 00000000..b30d8ebc --- /dev/null +++ b/aisuite/providers/github_provider.py @@ -0,0 +1,159 @@ +import os +import time +import json +import requests + +from aisuite.provider import Provider, LLMError +from aisuite.framework.chat_completion_response import ChatCompletionResponse +from aisuite.framework.message import Message + +class GitHubProvider(Provider): + """ + GitHubProvider integrates GitHub Copilot's experimental GPT-4-based chat completions. + It authenticates via GitHub's device code flow and retrieves an internal session token before + sending chat messages. Access is provided solely through a GitHub Copilot subscription. + """ + + MODEL = 'gpt-4' + + def __init__(self, **config): + # Optionally override client_id via config. + self.client_id = config.get('client_id', 'Iv1.b507a08c87ecfe98') + self.token = None # Session token for API calls + self.access_token = None # GitHub OAuth access token + self.messages = [] # Conversation history + + def _setup(self): + # Request device and user codes for authentication. + resp = requests.post( + 'https://github.com/login/device/code', + headers={ + 'accept': 'application/json', + 'editor-version': 'Neovim/0.6.1', + 'editor-plugin-version': 'copilot.vim/1.16.0', + 'content-type': 'application/json', + 'user-agent': 'GithubCopilot/1.155.0', + 'accept-encoding': 'gzip,deflate,br' + }, + data=json.dumps({"client_id": self.client_id, "scope": "read:user"}) + ) + resp_json = resp.json() + device_code = resp_json.get('device_code') + user_code = resp_json.get('user_code') + verification_uri = resp_json.get('verification_uri') + print(f'Please visit {verification_uri} and enter code {user_code} to authenticate.') + + # Poll for OAuth access token. + while True: + time.sleep(5) + resp = requests.post( + 'https://github.com/login/oauth/access_token', + headers={ + 'accept': 'application/json', + 'editor-version': 'Neovim/0.6.1', + 'editor-plugin-version': 'copilot.vim/1.16.0', + 'content-type': 'application/json', + 'user-agent': 'GithubCopilot/1.155.0', + 'accept-encoding': 'gzip,deflate,br' + }, + data=json.dumps({ + "client_id": self.client_id, + "device_code": device_code, + "grant_type": "urn:ietf:params:oauth:grant-type:device_code" + }) + ) + resp_json = resp.json() + access_token = resp_json.get('access_token') + if access_token: + break + + # Save the access token to a file for reuse. + with open('.copilot_token', 'w') as f: + f.write(access_token) + print('Authentication success!') + self.access_token = access_token + + def _get_token(self): + # Check if the .copilot_token file exists; if not, run the setup. + if not os.path.exists('.copilot_token'): + self._setup() + else: + with open('.copilot_token', 'r') as f: + self.access_token = f.read() + # Retrieve a session token using the stored access token. + resp = requests.get( + 'https://api.github.com/copilot_internal/v2/token', + headers={ + 'authorization': f'token {self.access_token}', + 'editor-version': 'Neovim/0.6.1', + 'editor-plugin-version': 'copilot.vim/1.16.0', + 'user-agent': 'GithubCopilot/1.155.0' + } + ) + resp_json = resp.json() + self.token = resp_json.get('token') + + def chat_completions_create(self, model, messages, **kwargs): + """ + Creates a chat completion using GitHub Copilot's experimental endpoint. + The provided messages are merged with the conversation history. + """ + # Ensure a valid session token exists. + if self.token is None: + self._get_token() + + # Merge incoming messages into the conversation history. + if messages: + # Assuming messages is a list of dicts in the format {"content": ..., "role": ...} + self.messages.extend(messages) + + try: + resp = requests.post( + 'https://api.githubcopilot.com/chat/completions', + headers={ + 'authorization': f'Bearer {self.token}', + 'Editor-Version': 'vscode/1.80.1', + }, + json={ + 'intent': False, + 'model': self.MODEL, + 'temperature': 0, + 'top_p': 1, + 'n': 1, + 'stream': True, + 'messages': self.messages + } + ) + except requests.exceptions.ConnectionError as e: + raise LLMError(f"Connection error: {e}") + + result = '' + # Process the streaming response. + for line in resp.text.split('\n'): + if line.startswith('data: {'): + try: + json_completion = json.loads(line[6:]) + delta = json_completion.get('choices')[0].get('delta', {}) + completion = delta.get('content') + if completion: + result += completion + else: + result += '\n' + except Exception: + continue + + # Append the assistant's response to the conversation history. + self.messages.append({ + "content": result, + "role": "assistant" + }) + + if result == '': + print(resp.status_code) + print(resp.text) + + # Build and return a ChatCompletionResponse object. + response_obj = ChatCompletionResponse() + response_obj.choices[0].message.content = result + response_obj.choices[0].message.role = "assistant" + return response_obj diff --git a/examples/AISuiteDemo.ipynb b/examples/AISuiteDemo.ipynb index b561d944..3955c074 100644 --- a/examples/AISuiteDemo.ipynb +++ b/examples/AISuiteDemo.ipynb @@ -1,637 +1,626 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "hZq_yZRcbxdI" + }, + "source": [ + "AI Suite is a light wrapper to provide a unified interface between LLM providers." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { "colab": { - "provenance": [] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" + "base_uri": "https://localhost:8080/" }, - "language_info": { - "name": "python" + "id": "1mt8kgFHXMvv", + "outputId": "b56619e8-0dd8-4850-d3b2-1f1169672aab" + }, + "outputs": [ + { + "ename": "ModuleNotFoundError", + "evalue": "No module named 'aisuite'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[23], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# Install AI Suite\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;66;03m#!pip install -e \"/User/hubertus/Projects/aisuite/\" # \"aisuite[all]\"\u001b[39;00m\n\u001b[0;32m----> 3\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01maisuite\u001b[39;00m \u001b[38;5;66;03m# [all]'\u001b[39;00m\n", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'aisuite'" + ] } + ], + "source": [ + "# Install AI Suite\n", + "#!pip install -e \"/User/hubertus/Projects/aisuite/\" # \"aisuite[all]\"\n", + "import aisuite # [all]'" + ] }, - "cells": [ - { - "cell_type": "markdown", - "source": [ - "AI Suite is a light wrapper to provide a unified interface between LLM providers." - ], - "metadata": { - "id": "hZq_yZRcbxdI" - } - }, - { - "cell_type": "code", - "source": [ - "# Install AI Suite\n", - "!pip install aisuite[all]" - ], - "metadata": { - "id": "1mt8kgFHXMvv", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "collapsed": true, - "outputId": "b56619e8-0dd8-4850-d3b2-1f1169672aab" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Collecting aisuite[all]\n", - " Downloading aisuite-0.1.5-py3-none-any.whl.metadata (4.1 kB)\n", - "Collecting anthropic<0.31.0,>=0.30.1 (from aisuite[all])\n", - " Downloading anthropic-0.30.1-py3-none-any.whl.metadata (18 kB)\n", - "Collecting groq<0.10.0,>=0.9.0 (from aisuite[all])\n", - " Downloading groq-0.9.0-py3-none-any.whl.metadata (13 kB)\n", - "Requirement already satisfied: openai<2.0.0,>=1.35.8 in /usr/local/lib/python3.10/dist-packages (from aisuite[all]) (1.52.2)\n", - "Requirement already satisfied: anyio<5,>=3.5.0 in /usr/local/lib/python3.10/dist-packages (from anthropic<0.31.0,>=0.30.1->aisuite[all]) (3.7.1)\n", - "Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.10/dist-packages (from anthropic<0.31.0,>=0.30.1->aisuite[all]) (1.9.0)\n", - "Requirement already satisfied: httpx<1,>=0.23.0 in /usr/local/lib/python3.10/dist-packages (from anthropic<0.31.0,>=0.30.1->aisuite[all]) (0.27.2)\n", - "Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from anthropic<0.31.0,>=0.30.1->aisuite[all]) (0.6.1)\n", - "Requirement already satisfied: pydantic<3,>=1.9.0 in /usr/local/lib/python3.10/dist-packages (from anthropic<0.31.0,>=0.30.1->aisuite[all]) (2.9.2)\n", - "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from anthropic<0.31.0,>=0.30.1->aisuite[all]) (1.3.1)\n", - "Requirement already satisfied: tokenizers>=0.13.0 in /usr/local/lib/python3.10/dist-packages (from anthropic<0.31.0,>=0.30.1->aisuite[all]) (0.19.1)\n", - "Requirement already satisfied: typing-extensions<5,>=4.7 in /usr/local/lib/python3.10/dist-packages (from anthropic<0.31.0,>=0.30.1->aisuite[all]) (4.12.2)\n", - "Requirement already satisfied: tqdm>4 in /usr/local/lib/python3.10/dist-packages (from openai<2.0.0,>=1.35.8->aisuite[all]) (4.66.6)\n", - "Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.10/dist-packages (from anyio<5,>=3.5.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (3.10)\n", - "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio<5,>=3.5.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (1.2.2)\n", - "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx<1,>=0.23.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (2024.8.30)\n", - "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.10/dist-packages (from httpx<1,>=0.23.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (1.0.6)\n", - "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore==1.*->httpx<1,>=0.23.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (0.14.0)\n", - "Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.10/dist-packages (from pydantic<3,>=1.9.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (0.7.0)\n", - "Requirement already satisfied: pydantic-core==2.23.4 in /usr/local/lib/python3.10/dist-packages (from pydantic<3,>=1.9.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (2.23.4)\n", - "Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /usr/local/lib/python3.10/dist-packages (from tokenizers>=0.13.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (0.24.7)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (3.16.1)\n", - "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (2024.10.0)\n", - "Requirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (24.1)\n", - "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (6.0.2)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (2.32.3)\n", - "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (3.4.0)\n", - "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.0->anthropic<0.31.0,>=0.30.1->aisuite[all]) (2.2.3)\n", - "Downloading anthropic-0.30.1-py3-none-any.whl (863 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m863.9/863.9 kB\u001b[0m \u001b[31m9.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hDownloading groq-0.9.0-py3-none-any.whl (103 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m103.5/103.5 kB\u001b[0m \u001b[31m5.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hDownloading aisuite-0.1.5-py3-none-any.whl (19 kB)\n", - "Installing collected packages: aisuite, groq, anthropic\n", - "Successfully installed aisuite-0.1.5 anthropic-0.30.1 groq-0.9.0\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "source": [ - "### Custom Pretty Printing Function\n", - "In this section, we define a custom pretty-printing function that enhances the readability of data structures when printed. This function utilizes Python's built-in pprint module, allowing users to specify a custom width for output formatting." - ], - "metadata": { - "id": "KwFlLByRbWKi" - } - }, - { - "cell_type": "code", - "source": [ - "from pprint import pprint as pp\n", - "# Set a custom width for pretty-printing\n", - "def pprint(data, width=80):\n", - " \"\"\"Pretty print data with a specified width.\"\"\"\n", - " pp(data, width=width)# List of model identifiers to query\n" - ], - "metadata": { - "id": "-Wf7j6abbQmw" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "source": [ - "### Setting Up API Keys\n", - "\n", - "Here we will securely set our API keys as environment variables. This is helpful because we don’t want to hardcode sensitive information (like API keys) directly into our code. By using environment variables, we can keep our credentials secure while still allowing our program to access them. Normally we would use a .env file to store our passwords to our enviroments, but since we are going to be working in colab we will do things a little different." - ], - "metadata": { - "id": "Cce1aLBvctaL" - } - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "BsK7GrHyV-c4", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "35fef9dc-e226-4e9d-e6c7-a597882b74f9" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Enter your GROQ API key: ··········\n" - ] - } - ], - "source": [ - "import os\n", - "from getpass import getpass\n", - "os.environ['GROQ_API_KEY'] = getpass('Enter your GROQ API key: ')" - ] - }, - { - "cell_type": "markdown", - "source": [ - "### Creating a Simple Chat Interaction with an AI Language Model\n", - "This code initiates a chat interaction with a language model (specifically Groq’s LLaMA 3.2), where the model responds to the user's input. We use the aisuite library to communicate with the model and retrieve the response." - ], - "metadata": { - "id": "m2mhu-VbSWfF" - } - }, - { - "cell_type": "code", - "source": [ - "import aisuite as ai\n", - "\n", - "# Initialize the AI client for accessing the language model\n", - "client = ai.Client()\n", - "\n", - "# Define a conversation with a system message and a user message\n", - "messages = [\n", - " {\"role\": \"system\", \"content\": \"You are a helpful agent, who answers with brevity.\"},\n", - " {\"role\": \"user\", \"content\": 'Hi'},\n", - "]\n", - "\n", - "# Request a response from the model\n", - "response = client.chat.completions.create(model=\"groq:llama-3.2-3b-preview\", messages=messages)\n", - "\n", - "# Print the model's response\n", - "print(response.choices[0].message.content)" - ], - "metadata": { - "id": "mBEOEq99eGjR", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "446fdba3-9072-4470-b3b8-627717013604" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "How can I assist you?\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "source": [ - "### Defining a Function to Interact with the Language Model\n", - "\n", - "This function, ask, streamlines the process of sending a user message to a language model and retrieving a response. It encapsulates the logic required to set up the conversation and can be reused throughout the notebook for different queries. It will not perserve any history or any continuing conversation. \n", - "\n" - ], - "metadata": { - "id": "YJSahowjiJBE" - } - }, - { - "cell_type": "code", - "source": [ - "def ask(message, sys_message=\"You are a helpful agent.\",\n", - " model=\"groq:llama-3.2-3b-preview\"):\n", - " # Initialize the AI client for accessing the language model\n", - " client = ai.Client()\n", - "\n", - " # Construct the messages list for the chat\n", - " messages = [\n", - " {\"role\": \"system\", \"content\": sys_message},\n", - " {\"role\": \"user\", \"content\": message}\n", - " ]\n", - "\n", - " # Send the messages to the model and get the response\n", - " response = client.chat.completions.create(model=model, messages=messages)\n", - "\n", - " # Return the content of the model's response\n", - " return response.choices[0].message.content\n" - ], - "metadata": { - "id": "n8DK8_RqqXFH" - }, - "execution_count": null, - "outputs": [] + { + "cell_type": "markdown", + "metadata": { + "id": "KwFlLByRbWKi" + }, + "source": [ + "### Custom Pretty Printing Function\n", + "In this section, we define a custom pretty-printing function that enhances the readability of data structures when printed. This function utilizes Python's built-in pprint module, allowing users to specify a custom width for output formatting." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "id": "-Wf7j6abbQmw" + }, + "outputs": [], + "source": [ + "from pprint import pprint as pp\n", + "# Set a custom width for pretty-printing\n", + "def pprint(data, width=80):\n", + " \"\"\"Pretty print data with a specified width.\"\"\"\n", + " pp(data, width=width)# List of model identifiers to query" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Cce1aLBvctaL" + }, + "source": [ + "### Setting Up API Keys\n", + "\n", + "Here we will securely set our API keys as environment variables. This is helpful because we don’t want to hardcode sensitive information (like API keys) directly into our code. By using environment variables, we can keep our credentials secure while still allowing our program to access them. Normally we would use a .env file to store our passwords to our enviroments, but since we are going to be working in colab we will do things a little different." + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "BsK7GrHyV-c4", + "outputId": "35fef9dc-e226-4e9d-e6c7-a597882b74f9" + }, + "outputs": [ { - "cell_type": "code", - "source": [ - "ask(\"Hi. what is capital of Japan?\")" - ], - "metadata": { - "id": "FGcqY4lBjtFj", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 35 - }, - "outputId": "0520933a-8f2f-4185-a8a2-c591283482a3" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "'Hello. The capital of Japan is Tokyo.'" - ], - "application/vnd.google.colaboratory.intrinsic+json": { - "type": "string" - } - }, - "metadata": {}, - "execution_count": 6 - } - ] + "name": "stdin", + "output_type": "stream", + "text": [ + "Enter your GROQ API key: ········\n" + ] + } + ], + "source": [ + "import os\n", + "from getpass import getpass\n", + "os.environ['GROQ_API_KEY'] = getpass('Enter your GROQ API key: ')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "m2mhu-VbSWfF" + }, + "source": [ + "### Creating a Simple Chat Interaction with an AI Language Model\n", + "This code initiates a chat interaction with a language model (specifically Groq’s LLaMA 3.2), where the model responds to the user's input. We use the aisuite library to communicate with the model and retrieve the response." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "mBEOEq99eGjR", + "outputId": "446fdba3-9072-4470-b3b8-627717013604" + }, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "The real value of AI Suite is the ablity to run a variety of different models. Let's first set up a collection of different API keys which we can try out." - ], - "metadata": { - "id": "wpeW6Pj6j_6H" - } + "name": "stdout", + "output_type": "stream", + "text": [ + "How can I assist you?\n" + ] + } + ], + "source": [ + "import aisuite as ai\n", + "\n", + "# Initialize the AI client for accessing the language model\n", + "client = ai.Client()\n", + "\n", + "# Define a conversation with a system message and a user message\n", + "messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a helpful agent, who answers with brevity.\"},\n", + " {\"role\": \"user\", \"content\": 'Hi'},\n", + "]\n", + "\n", + "# Request a response from the model\n", + "response = client.chat.completions.create(model=\"groq:llama-3.2-3b-preview\", messages=messages)\n", + "\n", + "# Print the model's response\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YJSahowjiJBE" + }, + "source": [ + "### Defining a Function to Interact with the Language Model\n", + "\n", + "This function, ask, streamlines the process of sending a user message to a language model and retrieving a response. It encapsulates the logic required to set up the conversation and can be reused throughout the notebook for different queries. It will not perserve any history or any continuing conversation. \n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "n8DK8_RqqXFH" + }, + "outputs": [], + "source": [ + "def ask(message, sys_message=\"You are a helpful agent.\",\n", + " model=\"groq:llama-3.2-3b-preview\"):\n", + " # Initialize the AI client for accessing the language model\n", + " client = ai.Client()\n", + "\n", + " # Construct the messages list for the chat\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": sys_message},\n", + " {\"role\": \"user\", \"content\": message}\n", + " ]\n", + "\n", + " # Send the messages to the model and get the response\n", + " response = client.chat.completions.create(model=model, messages=messages)\n", + "\n", + " # Return the content of the model's response\n", + " return response.choices[0].message.content\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 }, + "id": "FGcqY4lBjtFj", + "outputId": "0520933a-8f2f-4185-a8a2-c591283482a3" + }, + "outputs": [ { - "cell_type": "code", - "source": [ - "os.environ['OPENAI_API_KEY'] = getpass('Enter your OPENAI API key: ')\n", - "os.environ['ANTHROPIC_API_KEY'] = getpass('Enter your ANTHROPIC API key: ')" - ], - "metadata": { - "id": "9_kJlkGfj_NG", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "d45074c6-bbc6-4214-df0c-6d162a176f21" + "data": { + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" }, - "execution_count": null, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Enter your OPENAI API key: ··········\n", - "Enter your ANTHROPIC API key: ··········\n" - ] - } + "text/plain": [ + "'Hello. The capital of Japan is Tokyo.'" ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ask(\"Hi. what is capital of Japan?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wpeW6Pj6j_6H" + }, + "source": [ + "The real value of AI Suite is the ablity to run a variety of different models. Let's first set up a collection of different API keys which we can try out." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "9_kJlkGfj_NG", + "outputId": "d45074c6-bbc6-4214-df0c-6d162a176f21" + }, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "###Confirm each model is using a different provider\n" - ], - "metadata": { - "id": "mfPtlJlbTY6X" - } + "ename": "NameError", + "evalue": "name 'getpass' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[5], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m os\u001b[38;5;241m.\u001b[39menviron[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mOPENAI_API_KEY\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[43mgetpass\u001b[49m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mEnter your OPENAI API key: \u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 2\u001b[0m os\u001b[38;5;241m.\u001b[39menviron[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mANTHROPIC_API_KEY\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;241m=\u001b[39m getpass(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mEnter your ANTHROPIC API key: \u001b[39m\u001b[38;5;124m'\u001b[39m)\n", + "\u001b[0;31mNameError\u001b[0m: name 'getpass' is not defined" + ] + } + ], + "source": [ + "os.environ['OPENAI_API_KEY'] = getpass('Enter your OPENAI API key: ')\n", + "os.environ['ANTHROPIC_API_KEY'] = getpass('Enter your ANTHROPIC API key: ')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mfPtlJlbTY6X" + }, + "source": [ + "###Confirm each model is using a different provider\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "iHVESCGJuWWg", + "outputId": "3102b43a-e754-4288-ec1d-9777791f25b6" + }, + "outputs": [ { - "cell_type": "code", - "source": [ - "print(ask(\"Who is your creator?\"))\n", - "print(ask('Who is your creator?', model='anthropic:claude-3-5-sonnet-20240620'))\n", - "print(ask('Who is your creator?', model='openai:gpt-4o'))\n" - ], - "metadata": { - "id": "iHVESCGJuWWg", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "3102b43a-e754-4288-ec1d-9777791f25b6" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "I was created by Meta AI, a leading artificial intelligence research organization. My knowledge was developed from a large corpus of text, which I use to generate human-like responses to user queries.\n", - "I was created by Anthropic.\n", - "I was developed by OpenAI, an organization that focuses on artificial intelligence research and deployment.\n" - ] - } - ] + "ename": "NameError", + "evalue": "name 'ai' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[6], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[43mask\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWho is your creator?\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m)\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28mprint\u001b[39m(ask(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mWho is your creator?\u001b[39m\u001b[38;5;124m'\u001b[39m, model\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124manthropic:claude-3-5-sonnet-20240620\u001b[39m\u001b[38;5;124m'\u001b[39m))\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28mprint\u001b[39m(ask(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mWho is your creator?\u001b[39m\u001b[38;5;124m'\u001b[39m, model\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mopenai:gpt-4o\u001b[39m\u001b[38;5;124m'\u001b[39m))\n", + "Cell \u001b[0;32mIn[4], line 4\u001b[0m, in \u001b[0;36mask\u001b[0;34m(message, sys_message, model)\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mask\u001b[39m(message, sys_message\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mYou are a helpful agent.\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 2\u001b[0m model\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgroq:llama-3.2-3b-preview\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# Initialize the AI client for accessing the language model\u001b[39;00m\n\u001b[0;32m----> 4\u001b[0m client \u001b[38;5;241m=\u001b[39m \u001b[43mai\u001b[49m\u001b[38;5;241m.\u001b[39mClient()\n\u001b[1;32m 6\u001b[0m \u001b[38;5;66;03m# Construct the messages list for the chat\u001b[39;00m\n\u001b[1;32m 7\u001b[0m messages \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 8\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msystem\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: sys_message},\n\u001b[1;32m 9\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muser\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: message}\n\u001b[1;32m 10\u001b[0m ]\n", + "\u001b[0;31mNameError\u001b[0m: name 'ai' is not defined" + ] + } + ], + "source": [ + "print(ask(\"Who is your creator?\"))\n", + "print(ask('Who is your creator?', model='anthropic:claude-3-5-sonnet-20240620'))\n", + "print(ask('Who is your creator?', model='openai:gpt-4o'))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BWBL4D2H2B_9" + }, + "source": [ + "### Querying Multiple AI Models for a Common Question\n", + "In this section, we will query several different versions of the LLaMA language model to get varied responses to the same question. This approach allows us to compare how different models handle the same prompt, providing insights into their performance and style." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "E_gg-sgYuoOb", + "outputId": "d1c582ba-3471-4b0e-b9ca-317df8a1c1c5" + }, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "### Querying Multiple AI Models for a Common Question\n", - "In this section, we will query several different versions of the LLaMA language model to get varied responses to the same question. This approach allows us to compare how different models handle the same prompt, providing insights into their performance and style." - ], - "metadata": { - "id": "BWBL4D2H2B_9" - } + "name": "stdout", + "output_type": "stream", + "text": [ + "('llama-3.1-8b-instant: \\n'\n", + " ' The origins of Artificial Intelligence (AI) date back to the 1956 Dartmouth '\n", + " 'Summer Research Project on Artificial Intelligence, where a group of '\n", + " 'computer scientists, led by John McCarthy, Marvin Minsky, Nathaniel '\n", + " 'Rochester, and Claude Shannon, coined the term and laid the foundation for '\n", + " 'the development of AI as a distinct field of study. ')\n", + "('llama-3.2-1b-preview: \\n'\n", + " ' The origins of Artificial Intelligence (AI) date back to the mid-20th '\n", + " 'century, when the first computer programs, which mimicked human-like '\n", + " 'intelligence through algorithms and rule-based systems, were developed by '\n", + " 'renowned mathematicians and computer scientists, including Alan Turing, '\n", + " 'Marvin Minsky, and John McCarthy in the 1950s. ')\n", + "('llama-3.2-3b-preview: \\n'\n", + " ' The origins of Artificial Intelligence (AI) date back to the 1950s, with '\n", + " 'the Dartmouth Summer Research Project on Artificial Intelligence, led by '\n", + " 'computer scientists John McCarthy, Marvin Minsky, and Nathaniel Rochester, '\n", + " 'marking the birth of AI as a formal field of research. ')\n", + "('llama3-70b-8192: \\n'\n", + " ' The origins of Artificial Intelligence (AI) can be traced back to the 1950s '\n", + " 'when computer scientist Alan Turing proposed the Turing Test, a method for '\n", + " 'determining whether a machine could exhibit intelligent behavior equivalent '\n", + " 'to, or indistinguishable from, that of a human. ')\n", + "('llama3-8b-8192: \\n'\n", + " ' The origins of Artificial Intelligence (AI) can be traced back to the '\n", + " '1950s, when computer scientists DARPA funded the development of the first AI '\n", + " 'programs, such as the Logical Theorist, which aimed to simulate human '\n", + " 'problem-solving abilities and learn from experience. ')\n" + ] + } + ], + "source": [ + "\n", + "models = [\n", + " 'llama-3.1-8b-instant',\n", + " 'llama-3.2-1b-preview',\n", + " 'llama-3.2-3b-preview',\n", + " 'llama3-70b-8192',\n", + " 'llama3-8b-8192'\n", + "]\n", + "\n", + "# Initialize a list to hold the responses from each model\n", + "ret = []\n", + "\n", + "# Loop through each model and get a response for the specified question\n", + "for x in models:\n", + " ret.append(ask('Write a short one sentence explanation of the origins of AI?', model=f'groq:{x}'))\n", + "\n", + "# Print the model's name and its corresponding response\n", + "for idx, x in enumerate(ret):\n", + " pprint(models[idx] + ': \\n ' + x + ' ')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Z8pnJPdD2NL0" + }, + "source": [ + "### Querying Different AI Providers for a Common Question\n", + "In this section, we will query multiple AI models from different providers to get varied responses to the same question regarding the origins of AI. This comparison allows us to observe how different models from different architectures respond to the same prompt." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, - { - "cell_type": "code", - "source": [ - "\n", - "models = [\n", - " 'llama-3.1-8b-instant',\n", - " 'llama-3.2-1b-preview',\n", - " 'llama-3.2-3b-preview',\n", - " 'llama3-70b-8192',\n", - " 'llama3-8b-8192'\n", - "]\n", - "\n", - "# Initialize a list to hold the responses from each model\n", - "ret = []\n", - "\n", - "# Loop through each model and get a response for the specified question\n", - "for x in models:\n", - " ret.append(ask('Write a short one sentence explanation of the origins of AI?', model=f'groq:{x}'))\n", - "\n", - "# Print the model's name and its corresponding response\n", - "for idx, x in enumerate(ret):\n", - " pprint(models[idx] + ': \\n ' + x + ' ')" - ], - "metadata": { - "id": "E_gg-sgYuoOb", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "d1c582ba-3471-4b0e-b9ca-317df8a1c1c5" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "('llama-3.1-8b-instant: \\n'\n", - " ' The origins of Artificial Intelligence (AI) date back to the 1956 Dartmouth '\n", - " 'Summer Research Project on Artificial Intelligence, where a group of '\n", - " 'computer scientists, led by John McCarthy, Marvin Minsky, Nathaniel '\n", - " 'Rochester, and Claude Shannon, coined the term and laid the foundation for '\n", - " 'the development of AI as a distinct field of study. ')\n", - "('llama-3.2-1b-preview: \\n'\n", - " ' The origins of Artificial Intelligence (AI) date back to the mid-20th '\n", - " 'century, when the first computer programs, which mimicked human-like '\n", - " 'intelligence through algorithms and rule-based systems, were developed by '\n", - " 'renowned mathematicians and computer scientists, including Alan Turing, '\n", - " 'Marvin Minsky, and John McCarthy in the 1950s. ')\n", - "('llama-3.2-3b-preview: \\n'\n", - " ' The origins of Artificial Intelligence (AI) date back to the 1950s, with '\n", - " 'the Dartmouth Summer Research Project on Artificial Intelligence, led by '\n", - " 'computer scientists John McCarthy, Marvin Minsky, and Nathaniel Rochester, '\n", - " 'marking the birth of AI as a formal field of research. ')\n", - "('llama3-70b-8192: \\n'\n", - " ' The origins of Artificial Intelligence (AI) can be traced back to the 1950s '\n", - " 'when computer scientist Alan Turing proposed the Turing Test, a method for '\n", - " 'determining whether a machine could exhibit intelligent behavior equivalent '\n", - " 'to, or indistinguishable from, that of a human. ')\n", - "('llama3-8b-8192: \\n'\n", - " ' The origins of Artificial Intelligence (AI) can be traced back to the '\n", - " '1950s, when computer scientists DARPA funded the development of the first AI '\n", - " 'programs, such as the Logical Theorist, which aimed to simulate human '\n", - " 'problem-solving abilities and learn from experience. ')\n" - ] - } - ] + "collapsed": true, + "id": "j4TqhC5J1YIG", + "jupyter": { + "outputs_hidden": true }, + "outputId": "4a50e300-0a7a-4562-8a34-f31c4b9072d4" + }, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "### Querying Different AI Providers for a Common Question\n", - "In this section, we will query multiple AI models from different providers to get varied responses to the same question regarding the origins of AI. This comparison allows us to observe how different models from different architectures respond to the same prompt." - ], - "metadata": { - "id": "Z8pnJPdD2NL0" - } + "name": "stdout", + "output_type": "stream", + "text": [ + "('groq:llama3-70b-8192: \\n'\n", + " 'The origins of Artificial Intelligence (AI) can be traced back to the 1950s '\n", + " 'when computer scientists like Alan Turing, Marvin Minsky, and John McCarthy '\n", + " 'began exploring ways to create machines that could think and learn like '\n", + " 'humans, leading to the development of the first AI programs and '\n", + " 'algorithms. \\n'\n", + " '\\n')\n", + "('openai:gpt-4o: \\n'\n", + " 'The origins of AI trace back to the mid-20th century, when pioneers like '\n", + " 'Alan Turing and John McCarthy began exploring the possibility of creating '\n", + " 'machines that could simulate human intelligence through computational '\n", + " 'processes. \\n'\n", + " '\\n')\n", + "('anthropic:claude-3-5-sonnet-20240620: \\n'\n", + " 'The origins of AI can be traced back to the 1950s when computer scientists '\n", + " 'began exploring the concept of creating machines that could simulate human '\n", + " 'intelligence and problem-solving abilities. \\n'\n", + " '\\n')\n" + ] + } + ], + "source": [ + "# List of AI model providers to query\n", + "providers = [\n", + " 'groq:llama3-70b-8192',\n", + " 'openai:gpt-4o',\n", + " 'anthropic:claude-3-5-sonnet-20240620'\n", + "]\n", + "\n", + "# Initialize a list to hold the responses from each provider\n", + "ret = []\n", + "\n", + "# Loop through each provider and get a response for the specified question\n", + "for x in providers:\n", + " ret.append(ask('Write a short one sentence explanation of the origins of AI?', model=x))\n", + "\n", + "# Print the provider's name and its corresponding response\n", + "for idx, x in enumerate(ret):\n", + " pprint(providers[idx] + ': \\n' + x + ' \\n\\n')\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OgPCC0y_U4WG" + }, + "source": [ + "### Generating and Evaluating Questions with AI Models\n", + "In this section, we will randomly generate questions using a language model and then have two other models provide answers to those questions. The user will then evaluate which answer is better, allowing for a comparative analysis of responses from different models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 }, - { - "cell_type": "code", - "source": [ - "# List of AI model providers to query\n", - "providers = [\n", - " 'groq:llama3-70b-8192',\n", - " 'openai:gpt-4o',\n", - " 'anthropic:claude-3-5-sonnet-20240620'\n", - "]\n", - "\n", - "# Initialize a list to hold the responses from each provider\n", - "ret = []\n", - "\n", - "# Loop through each provider and get a response for the specified question\n", - "for x in providers:\n", - " ret.append(ask('Write a short one sentence explanation of the origins of AI?', model=x))\n", - "\n", - "# Print the provider's name and its corresponding response\n", - "for idx, x in enumerate(ret):\n", - " pprint(providers[idx] + ': \\n' + x + ' \\n\\n')\n" - ], - "metadata": { - "collapsed": true, - "id": "j4TqhC5J1YIG", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "4a50e300-0a7a-4562-8a34-f31c4b9072d4" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "('groq:llama3-70b-8192: \\n'\n", - " 'The origins of Artificial Intelligence (AI) can be traced back to the 1950s '\n", - " 'when computer scientists like Alan Turing, Marvin Minsky, and John McCarthy '\n", - " 'began exploring ways to create machines that could think and learn like '\n", - " 'humans, leading to the development of the first AI programs and '\n", - " 'algorithms. \\n'\n", - " '\\n')\n", - "('openai:gpt-4o: \\n'\n", - " 'The origins of AI trace back to the mid-20th century, when pioneers like '\n", - " 'Alan Turing and John McCarthy began exploring the possibility of creating '\n", - " 'machines that could simulate human intelligence through computational '\n", - " 'processes. \\n'\n", - " '\\n')\n", - "('anthropic:claude-3-5-sonnet-20240620: \\n'\n", - " 'The origins of AI can be traced back to the 1950s when computer scientists '\n", - " 'began exploring the concept of creating machines that could simulate human '\n", - " 'intelligence and problem-solving abilities. \\n'\n", - " '\\n')\n" - ] - } - ] + "collapsed": true, + "id": "fMx-TfLk09ft", + "jupyter": { + "outputs_hidden": true }, + "outputId": "56153c03-a1e6-4b72-fd16-b36197ccb5ee" + }, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "### Generating and Evaluating Questions with AI Models\n", - "In this section, we will randomly generate questions using a language model and then have two other models provide answers to those questions. The user will then evaluate which answer is better, allowing for a comparative analysis of responses from different models." - ], - "metadata": { - "id": "OgPCC0y_U4WG" - } + "name": "stdout", + "output_type": "stream", + "text": [ + "('Original text:\\n'\n", + " \" Here's a short question suitable for asking an LLM:\\n\"\n", + " '\\n'\n", + " 'What are the potential benefits and risks of artificial intelligence in '\n", + " 'healthcare?\\n'\n", + " '\\n')\n", + "('Option 1 text:\\n'\n", + " ' **Benefits:**\\n'\n", + " '1. Improved diagnostics and personalized treatment plans.\\n'\n", + " '2. Increased efficiency in administrative tasks.\\n'\n", + " '3. Faster drug discovery and development.\\n'\n", + " '4. Enhanced patient monitoring and support.\\n'\n", + " '\\n'\n", + " '**Risks:**\\n'\n", + " '1. Privacy and data security concerns.\\n'\n", + " '2. Potential biases in AI algorithms.\\n'\n", + " '3. Over-reliance on AI systems by healthcare professionals.\\n'\n", + " '4. Ethical and accountability issues in decision-making.\\n'\n", + " '\\n')\n", + "('Option 2 text:\\n'\n", + " ' The potential benefits of artificial intelligence (AI) in healthcare '\n", + " 'include:\\n'\n", + " '\\n'\n", + " '* Improved diagnosis accuracy and speed\\n'\n", + " '* Enhanced patient outcomes through personalized medicine\\n'\n", + " '* Increased efficiency and reduced costs through automation\\n'\n", + " '* Better disease prevention and detection\\n'\n", + " '* Enhanced research capabilities and new treatment discoveries\\n'\n", + " '\\n'\n", + " 'However, there are also potential risks, such as:\\n'\n", + " '\\n'\n", + " '* Bias in AI decision-making due to flawed data or algorithms\\n'\n", + " '* Job displacement of healthcare professionals\\n'\n", + " '* Cybersecurity risks to patient data\\n'\n", + " '* Dependence on technology leading to deskilling of healthcare workers\\n'\n", + " '* Unintended consequences of AI-driven decision-making that may not align '\n", + " 'with human values.\\n'\n", + " '\\n'\n", + " 'These benefits and risks highlight the need for responsible development, '\n", + " 'deployment, and oversight of AI in healthcare.\\n'\n", + " '\\n')\n", + "Which is best 1 or 2. 3 if indistinguishable: 3\n", + "('Original text:\\n'\n", + " ' What are the potential applications of large language models in '\n", + " 'healthcare?\\n'\n", + " '\\n')\n", + "('Option 1 text:\\n'\n", + " ' Large language models have numerous potential applications in healthcare, '\n", + " 'including:\\n'\n", + " '\\n'\n", + " '1. **Clinical Decision Support**: Providing doctors with accurate diagnoses, '\n", + " 'treatment options, and medication recommendations.\\n'\n", + " '2. **Medical Text Analysis**: Analyzing large amounts of medical literature, '\n", + " 'patient records, and clinical notes to identify patterns and insights.\\n'\n", + " '3. **Patient Engagement**: Generating personalized health summaries, '\n", + " 'communicating medical information in simple language, and facilitating '\n", + " 'patient-provider communication.\\n'\n", + " '4. **Disease Surveillance**: Monitoring social media and online platforms '\n", + " 'for disease outbreaks and tracking epidemiological trends.\\n'\n", + " '5. **Medical Writing Assistance**: Assisting healthcare professionals in '\n", + " 'generating medical reports, discharge summaries, and other documents.\\n'\n", + " '6. **Chatbots and Virtual Assistants**: Offering patients timely support and '\n", + " 'answers to medical queries.\\n'\n", + " '7. **Research and Development**: Accelerating biomedical research by '\n", + " 'analyzing large datasets, identifying research gaps, and suggesting '\n", + " 'potential areas of investigation.\\n'\n", + " '\\n'\n", + " 'These applications have the potential to improve healthcare outcomes, reduce '\n", + " 'costs, and enhance patient experiences.\\n'\n", + " '\\n')\n", + "('Option 2 text:\\n'\n", + " ' Large language models in healthcare could potentially be used for:\\n'\n", + " '\\n'\n", + " '1. Clinical decision support\\n'\n", + " '2. Medical literature analysis and summarization\\n'\n", + " '3. Patient triage and symptom checking\\n'\n", + " '4. Medical education and training\\n'\n", + " '5. Automated medical coding and documentation\\n'\n", + " '6. Drug discovery and development\\n'\n", + " '7. Personalized treatment recommendations\\n'\n", + " '8. Health-related chatbots for patient engagement\\n'\n", + " '9. Medical research and hypothesis generation\\n'\n", + " '10. Natural language processing of electronic health records\\n'\n", + " '\\n'\n", + " 'These applications could help improve efficiency, accuracy, and '\n", + " 'accessibility in various aspects of healthcare.\\n'\n", + " '\\n')\n" + ] }, { - "cell_type": "code", - "source": [ - "import random\n", - "\n", - "# Initialize a list to store the best responses\n", - "best = []\n", - "\n", - "# Loop to generate and evaluate questions\n", - "for _ in range(20):\n", - " # Shuffle the providers list to randomly select models for each iteration\n", - " random.shuffle(providers)\n", - "\n", - " # Generate a question using the first provider\n", - " question = ask('Please generate a short question that is suitable for asking an LLM.', model=providers[0])\n", - "\n", - " # Get answers from the second and third providers\n", - " answer_1 = ask('Please give a short answer to this question: ' + question, model=providers[1])\n", - " answer_2 = ask('Please give a short answer to this question: ' + question, model=providers[2])\n", - "\n", - " # Print the generated question and the two answers\n", - " pprint(f\"Original text:\\n {question}\\n\\n\")\n", - " pprint(f\"Option 1 text:\\n {answer_1}\\n\\n\")\n", - " pprint(f\"Option 2 text:\\n {answer_2}\\n\\n\")\n", - "\n", - " # Store the provider names and the user's choice of the best answer\n", - " best.append(str(providers) + ', ' + input(\"Which is best 1 or 2. 3 if indistinguishable: \"))" - ], - "metadata": { - "collapsed": true, - "id": "fMx-TfLk09ft", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "outputId": "56153c03-a1e6-4b72-fd16-b36197ccb5ee" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "('Original text:\\n'\n", - " \" Here's a short question suitable for asking an LLM:\\n\"\n", - " '\\n'\n", - " 'What are the potential benefits and risks of artificial intelligence in '\n", - " 'healthcare?\\n'\n", - " '\\n')\n", - "('Option 1 text:\\n'\n", - " ' **Benefits:**\\n'\n", - " '1. Improved diagnostics and personalized treatment plans.\\n'\n", - " '2. Increased efficiency in administrative tasks.\\n'\n", - " '3. Faster drug discovery and development.\\n'\n", - " '4. Enhanced patient monitoring and support.\\n'\n", - " '\\n'\n", - " '**Risks:**\\n'\n", - " '1. Privacy and data security concerns.\\n'\n", - " '2. Potential biases in AI algorithms.\\n'\n", - " '3. Over-reliance on AI systems by healthcare professionals.\\n'\n", - " '4. Ethical and accountability issues in decision-making.\\n'\n", - " '\\n')\n", - "('Option 2 text:\\n'\n", - " ' The potential benefits of artificial intelligence (AI) in healthcare '\n", - " 'include:\\n'\n", - " '\\n'\n", - " '* Improved diagnosis accuracy and speed\\n'\n", - " '* Enhanced patient outcomes through personalized medicine\\n'\n", - " '* Increased efficiency and reduced costs through automation\\n'\n", - " '* Better disease prevention and detection\\n'\n", - " '* Enhanced research capabilities and new treatment discoveries\\n'\n", - " '\\n'\n", - " 'However, there are also potential risks, such as:\\n'\n", - " '\\n'\n", - " '* Bias in AI decision-making due to flawed data or algorithms\\n'\n", - " '* Job displacement of healthcare professionals\\n'\n", - " '* Cybersecurity risks to patient data\\n'\n", - " '* Dependence on technology leading to deskilling of healthcare workers\\n'\n", - " '* Unintended consequences of AI-driven decision-making that may not align '\n", - " 'with human values.\\n'\n", - " '\\n'\n", - " 'These benefits and risks highlight the need for responsible development, '\n", - " 'deployment, and oversight of AI in healthcare.\\n'\n", - " '\\n')\n", - "Which is best 1 or 2. 3 if indistinguishable: 3\n", - "('Original text:\\n'\n", - " ' What are the potential applications of large language models in '\n", - " 'healthcare?\\n'\n", - " '\\n')\n", - "('Option 1 text:\\n'\n", - " ' Large language models have numerous potential applications in healthcare, '\n", - " 'including:\\n'\n", - " '\\n'\n", - " '1. **Clinical Decision Support**: Providing doctors with accurate diagnoses, '\n", - " 'treatment options, and medication recommendations.\\n'\n", - " '2. **Medical Text Analysis**: Analyzing large amounts of medical literature, '\n", - " 'patient records, and clinical notes to identify patterns and insights.\\n'\n", - " '3. **Patient Engagement**: Generating personalized health summaries, '\n", - " 'communicating medical information in simple language, and facilitating '\n", - " 'patient-provider communication.\\n'\n", - " '4. **Disease Surveillance**: Monitoring social media and online platforms '\n", - " 'for disease outbreaks and tracking epidemiological trends.\\n'\n", - " '5. **Medical Writing Assistance**: Assisting healthcare professionals in '\n", - " 'generating medical reports, discharge summaries, and other documents.\\n'\n", - " '6. **Chatbots and Virtual Assistants**: Offering patients timely support and '\n", - " 'answers to medical queries.\\n'\n", - " '7. **Research and Development**: Accelerating biomedical research by '\n", - " 'analyzing large datasets, identifying research gaps, and suggesting '\n", - " 'potential areas of investigation.\\n'\n", - " '\\n'\n", - " 'These applications have the potential to improve healthcare outcomes, reduce '\n", - " 'costs, and enhance patient experiences.\\n'\n", - " '\\n')\n", - "('Option 2 text:\\n'\n", - " ' Large language models in healthcare could potentially be used for:\\n'\n", - " '\\n'\n", - " '1. Clinical decision support\\n'\n", - " '2. Medical literature analysis and summarization\\n'\n", - " '3. Patient triage and symptom checking\\n'\n", - " '4. Medical education and training\\n'\n", - " '5. Automated medical coding and documentation\\n'\n", - " '6. Drug discovery and development\\n'\n", - " '7. Personalized treatment recommendations\\n'\n", - " '8. Health-related chatbots for patient engagement\\n'\n", - " '9. Medical research and hypothesis generation\\n'\n", - " '10. Natural language processing of electronic health records\\n'\n", - " '\\n'\n", - " 'These applications could help improve efficiency, accuracy, and '\n", - " 'accessibility in various aspects of healthcare.\\n'\n", - " '\\n')\n" - ] - }, - { - "output_type": "error", - "ename": "KeyboardInterrupt", - "evalue": "Interrupted by user", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;31m# Store the provider names and the user's choice of the best answer\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 24\u001b[0;31m \u001b[0mbest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mproviders\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m', '\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Which is best 1 or 2. 3 if indistinguishable: \"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/ipykernel/kernelbase.py\u001b[0m in \u001b[0;36mraw_input\u001b[0;34m(self, prompt)\u001b[0m\n\u001b[1;32m 849\u001b[0m \u001b[0;34m\"raw_input was called, but this frontend does not support input requests.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 850\u001b[0m )\n\u001b[0;32m--> 851\u001b[0;31m return self._input_request(str(prompt),\n\u001b[0m\u001b[1;32m 852\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_parent_ident\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 853\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_parent_header\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/ipykernel/kernelbase.py\u001b[0m in \u001b[0;36m_input_request\u001b[0;34m(self, prompt, ident, parent, password)\u001b[0m\n\u001b[1;32m 893\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mKeyboardInterrupt\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 894\u001b[0m \u001b[0;31m# re-raise KeyboardInterrupt, to truncate traceback\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 895\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mKeyboardInterrupt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Interrupted by user\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 896\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 897\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlog\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwarning\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Invalid Message:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexc_info\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mKeyboardInterrupt\u001b[0m: Interrupted by user" - ] - } - ] + "ename": "KeyboardInterrupt", + "evalue": "Interrupted by user", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;31m# Store the provider names and the user's choice of the best answer\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 24\u001b[0;31m \u001b[0mbest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mproviders\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m', '\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Which is best 1 or 2. 3 if indistinguishable: \"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/ipykernel/kernelbase.py\u001b[0m in \u001b[0;36mraw_input\u001b[0;34m(self, prompt)\u001b[0m\n\u001b[1;32m 849\u001b[0m \u001b[0;34m\"raw_input was called, but this frontend does not support input requests.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 850\u001b[0m )\n\u001b[0;32m--> 851\u001b[0;31m return self._input_request(str(prompt),\n\u001b[0m\u001b[1;32m 852\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_parent_ident\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 853\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_parent_header\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/ipykernel/kernelbase.py\u001b[0m in \u001b[0;36m_input_request\u001b[0;34m(self, prompt, ident, parent, password)\u001b[0m\n\u001b[1;32m 893\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mKeyboardInterrupt\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 894\u001b[0m \u001b[0;31m# re-raise KeyboardInterrupt, to truncate traceback\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 895\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mKeyboardInterrupt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Interrupted by user\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 896\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 897\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlog\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwarning\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Invalid Message:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexc_info\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: Interrupted by user" + ] } - ] -} \ No newline at end of file + ], + "source": [ + "import random\n", + "\n", + "# Initialize a list to store the best responses\n", + "best = []\n", + "\n", + "# Loop to generate and evaluate questions\n", + "for _ in range(20):\n", + " # Shuffle the providers list to randomly select models for each iteration\n", + " random.shuffle(providers)\n", + "\n", + " # Generate a question using the first provider\n", + " question = ask('Please generate a short question that is suitable for asking an LLM.', model=providers[0])\n", + "\n", + " # Get answers from the second and third providers\n", + " answer_1 = ask('Please give a short answer to this question: ' + question, model=providers[1])\n", + " answer_2 = ask('Please give a short answer to this question: ' + question, model=providers[2])\n", + "\n", + " # Print the generated question and the two answers\n", + " pprint(f\"Original text:\\n {question}\\n\\n\")\n", + " pprint(f\"Option 1 text:\\n {answer_1}\\n\\n\")\n", + " pprint(f\"Option 2 text:\\n {answer_2}\\n\\n\")\n", + "\n", + " # Store the provider names and the user's choice of the best answer\n", + " best.append(str(providers) + ', ' + input(\"Which is best 1 or 2. 3 if indistinguishable: \"))" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.16" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/github_provider_example.ipynb b/examples/github_provider_example.ipynb new file mode 100644 index 00000000..145ba5ff --- /dev/null +++ b/examples/github_provider_example.ipynb @@ -0,0 +1,84 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 5, + "id": "84991a44-c1cd-4d5e-98a5-f871314a9310", + "metadata": {}, + "outputs": [], + "source": [ + "import aisuite\n", + "from aisuite.providers.github_provider import GitHubProvider\n", + "from aisuite.framework.message import Message" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "fa318efa-dfdc-4d88-9097-666173446be7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Response from GitHubProvider:\n", + "\n", + "The capital of Germany is Berlin.\n", + "\n" + ] + } + ], + "source": [ + "def main():\n", + " # Instantiate the GitHubProvider\n", + " provider = GitHubProvider()\n", + " \n", + " # Create a prompt message (as a dictionary in the expected format)\n", + " prompt_message = {\n", + " \"content\": \"Hello, what is the capital of Germany?\",\n", + " \"role\": \"user\"\n", + " }\n", + " \n", + " # Call chat_completions_create to send the prompt and get a response\n", + " response = provider.chat_completions_create(\"gpt-4\", [prompt_message])\n", + " \n", + " # Print the assistant's response\n", + " print(\"Response from GitHubProvider:\")\n", + " print(response.choices[0].message.content)\n", + "\n", + "if __name__ == \"__main__\":\n", + " main()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ac238832-1782-4c8b-916d-fd8818194a4d", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}