Categories
AI

Canopy RAG Examples

  1. Open-source RAG framework.
  2. Built on Pinecone vector database.
  3. Simplifies RAG application development.
  4. Manages chat history, query optimization, and context retrieval.
  5. Configurable server for easy RAG chat application deployment.
  6. Supports custom RAG application building.
  7. CLI tool for interactive chat and RAG workflow evaluation.
from datasets import load_dataset

data = load_dataset("jamescalam/ai-arxiv", split="train")
data = data.map(lambda x: {
    "id": x["id"],
    "text": x["content"],
    "source": x["source"],
    "metadata": {
        "title": x["title"],
        "primary_category": x["primary_category"],
        "published": x["published"],
        "updated": x["updated"],
    }
})
data = data.remove_columns([
    "title", "summary", "content",
    "authors", "categories", "comment",
    "journal_ref", "primary_category",
    "published", "updated", "references"
])
data.to_json("ai_arxiv.jsonl", orient="records", lines=True)
export PINECONE_API_KEY="<PINECONE_API_KEY>"
export PINECONE_ENVIRONMENT="<PINECONE_ENVIRONMENT>"
export OPENAI_API_KEY="<OPENAI_API_KEY>"
export INDEX_NAME=<INDEX_NAME>
pip install -qU canopy-sdk datasets
canopy
canopy new
canopy upsert ./ai_arxiv.jsonl
canopy start
canopy chat --no-rag
import openai
openai.api_base = "http://host:port/" 
import openai 
openai_response = openai.Completion.create(..., api_base="http://host:port/")
Categories
AI

GPT-4 Vision: Adding Football Commentator to Video

import cv2
import base64
import openai
from moviepy.editor import VideoFileClip, AudioFileClip

# Initialize OpenAI client
client = openai.OpenAI()

# Load the video
video = cv2.VideoCapture("football.m4v")

# Calculate video length
length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
fps = video.get(cv2.CAP_PROP_FPS)
video_length_seconds = length / fps

print(f'Video length: {video_length_seconds:.2f} seconds')

# Read frames and encode to base64
base64Frames = []
while video.isOpened():
    success, frame = video.read()
    if not success:
        break
    _, buffer = cv2.imencode(".jpg", frame)
    base64Frames.append(base64.b64encode(buffer).decode("utf-8"))

# Release the video object
video.release()
print(len(base64Frames), "frames read.")

# Create OpenAI chat completion
response = client.chat.completions.create(
    model="gpt-4-vision-preview",
    messages=[
        {
            "role": "user", 
            "content": [
                f"These are frames of a video. Create a short voiceover script in the style of a football commentator For {video_length_seconds:.2f} seconds. Only include the narration. Don't talk about the view",
                *map(lambda x: {"image": x, "resize": 768}, base64Frames[0::25]),
            ]
        }
    ],
    max_tokens=500,
)

# Print the response content
print(response.choices[0].message.content)
import time
time.sleep(2)
speech_file_path = "football.mp3"
response = client.audio.speech.create(
  model="tts-1",
  voice="onyx",
  input=response.choices[0].message.content
)

response.stream_to_file(speech_file_path)


video_clip = VideoFileClip("football.m4v")
audio_clip = AudioFileClip("football.mp3")
final_clip = video_clip.set_audio(audio_clip)
final_clip.write_videofile("football_with_commentator.m4v", codec='libx264', audio_codec='aac')
video_clip.close()
audio_clip.close()
final_clip.close()
Categories
AI

Pandas AI Examples

import pandas as pd
from pandasai import SmartDataframe
from pandasai.llm import OpenAI

df = pd.read_excel('data.xlsx')
df = pd.read_csv('data.csv')

llm = OpenAI(model="gpt-3.5-turbo")
df = SmartDataframe(df, config={"llm": llm})

print( df.chat('Which are the 5 happiest countries?'))
print( df.chat('What is the sum of the GDPs of the 2 happiest countries?'))
print( df.chat('Plot the histogram of countries showing for each the gdp, using different colors for each bar',))
Categories
AI

OpenAI Assistants API in Node.js

import OpenAI from "openai";
const openai = new OpenAI();

const assistant = await openai.beta.assistants.create({
    name: "Math Tutor",
    instructions:
        "You are a personal math tutor. Write and run code to answer math questions.",
    tools: [{ type: "code_interpreter" }],
    model: "gpt-4-1106-preview",
});

const thread = await openai.beta.threads.create();

const message = await openai.beta.threads.messages.create(thread.id, {
    role: "user",
    content: "I need to solve the equation `3x + 11 = 14`. Can you help me?",
});

const run = await openai.beta.threads.runs.create(thread.id, {
    assistant_id: assistant.id,
    instructions: "Please address the user as Mervin Praison.",    
});

console.log(run)

const checkStatusAndPrintMessages = async (threadId, runId) => {
    let runStatus = await openai.beta.threads.runs.retrieve(threadId, runId);
    if(runStatus.status === "completed"){
        let messages = await openai.beta.threads.messages.list(threadId);
        messages.data.forEach((msg) => {
            const role = msg.role;
            const content = msg.content[0].text.value; 
            console.log(
                `${role.charAt(0).toUpperCase() + role.slice(1)}: ${content}`
            );
        });
    } else {
        console.log("Run is not completed yet.");
    }  
};

setTimeout(() => {
    checkStatusAndPrintMessages(thread.id, run.id)
}, 10000 );

Categories
AI

ChatGPT Assistants API Python

import openai

# Initialize the client
client = openai.OpenAI()

# Step 1: Create an Assistant
assistant = client.beta.assistants.create(
    name="Math Tutor",
    instructions="You are a personal math tutor. Write and run code to answer math questions.",
    tools=[{"type": "code_interpreter"}],
    model="gpt-4-1106-preview"
)

# Step 2: Create a Thread
thread = client.beta.threads.create()

# Step 3: Add a Message to a Thread
message = client.beta.threads.messages.create(
    thread_id=thread.id,
    role="user",
    content="I need to solve the equation `3x + 11 = 14`. Can you help me?"
)

# Step 4: Run the Assistant
run = client.beta.threads.runs.create(
    thread_id=thread.id,
    assistant_id=assistant.id,
    instructions="Please address the user as Jane Doe. The user has a premium account."
)

print(run.model_dump_json(indent=4))

while True:
    # Wait for 5 seconds
    time.sleep(5)  

    # Retrieve the run status
    run_status = client.beta.threads.runs.retrieve(
        thread_id=thread.id,
        run_id=run.id
    )
    print(run_status.model_dump_json(indent=4))

    # If run is completed, get messages
    if run_status.status == 'completed':
        messages = client.beta.threads.messages.list(
            thread_id=thread.id
        )
        
        # Loop through messages and print content based on role
        for msg in messages.data:
            role = msg.role
            content = msg.content[0].text.value
            print(f"{role.capitalize()}: {content}")
        
        break
Categories
AI

ChatGPT Vision API – Video

import openai
client = openai.OpenAI()
import cv2
import base64

video = cv2.VideoCapture("video.mp4")

base64Frames = []
while video.isOpened():
    success, frame = video.read()
    if not success:
        break
    _, buffer = cv2.imencode(".jpg", frame)
    base64Frames.append(base64.b64encode(buffer).decode("utf-8"))

video.release()

response = client.chat.completions.create(
    model="gpt-4-vision-preview",
    messages=[{"role": "user", "content": [{"image": frame} for frame in base64Frames[0:5]]}]
)
print(response.choices[0].message.content)
Categories
AI

ChatGPT Image Recognition API

import openai
client = openai.OpenAI()

response = client.chat.completions.create(
    model="gpt-4-vision-preview",
    messages = [
        {
            "role" : "user",
            "content" :
            [
                {"type" : "text", "text" : "What's in this image?"},
                {
                    "type" : "image_url",
                    "image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
                }
            ]
        }
    ],
    max_tokens = 300
)

print(response.choices[0].message.content)
Categories
AI

ChatGPT Speech to Text Whisper API

import openai
client = openai.Client()

audio_file= open("speech.mp3", "rb")
transcript = client.audio.transcriptions.create(
    model = "whisper-1",
    file=audio_file
)
print(transcript.text)
Categories
AI

ChatGPT Dalle 3 API

import openai
client = openai.OpenAI()

response = client.images.generate(
    model= "dall-e-3",
    prompt="a white siamese cat",
    size="1024x1024",
    quality="standard",
    n=1
)
image_url = response.data[0].url
print(image_url)
Categories
AI

ChatGPT Text to Speech API

import openai
client = openai.OpenAI()
speech_file_path = "speech.mp3"
response = client.audio.speech.create(
    model="tts-1",
    voice="alloy",
    input="Hi Everyone, This is Mervin Praison"
)
response.stream_to_file(speech_file_path)