Categories
AI Agents

CrewAI Custom Tools

import os
from crewai import Agent, Task, Crew
from duckduckgo_search import DDGS
from langchain.tools import tool

@tool("Internet Search Tool")
def internet_search_tool(query: str) -> list:
    """Search Internet for relevant information based on a query."""
    ddgs = DDGS()
    results = ddgs.text(keywords=query, region='wt-wt', safesearch='moderate', max_results=5)
    return results

# Define agents
researcher = Agent(
    role='Researcher',
    goal='Gather and analyze information on specific topics',
    verbose=True,
    backstory=(
        "As a seasoned researcher, you have a keen eye for detail and a "
        "deep understanding of your field. You're adept at sifting through "
        "information to find the most relevant and accurate data."
    ),
    tools=[internet_search_tool],
    allow_delegation=True
)

writer = Agent(
    role='Writer',
    goal='Compose informative and engaging articles based on research findings',
    verbose=True,
    backstory=(
        "With a talent for storytelling, you translate complex ideas into "
        "clear, compelling narratives. Your articles are well-researched, "
        "thought-provoking, and accessible to a broad audience."
    ),
    tools=[internet_search_tool],
    allow_delegation=False
)

# Define tasks
research_task = Task(
    description=(
        "Investigate the latest trends in renewable energy technologies. "
        "Identify key advancements, challenges, and potential impacts on "
        "global energy policies."
    ),
    expected_output='A detailed report summarizing the findings.',
    tools=[internet_search_tool],
    agent=researcher,
)

writing_task = Task(
    description=(
        "Based on the research findings, write an article that highlights "
        "the importance of renewable energy innovations. The article should "
        "educate the public on how these technologies can contribute to "
        "sustainable development."
    ),
    expected_output='An engaging and informative article suitable for publication.',
    tools=[internet_search_tool],
    agent=writer,
)

# Create and kick off the crew
crew = Crew(
    agents=[researcher, writer],
    tasks=[research_task, writing_task]
)

result = crew.kickoff(inputs={'topic': 'renewable energy'})
print(result)
Categories
AutoGen

AutoGen LangChain Tools

import math
import os
from typing import Optional, Type

# Starndard Langchain example
from langchain.agents import create_spark_sql_agent
from langchain.agents.agent_toolkits import SparkSQLToolkit
from langchain.chat_models import ChatOpenAI

# Import things that are needed generically
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.utilities.spark_sql import SparkSQL
# from pyspark.sql import SparkSession

import autogen

class CircumferenceToolInput(BaseModel):
    radius: float = Field()


class CircumferenceTool(BaseTool):
    name = "circumference_calculator"
    description = "Use this tool when you need to calculate a circumference using the radius of a circle"
    args_schema: Type[BaseModel] = CircumferenceToolInput

    def _run(self, radius: float):
        return float(radius) * 2.0 * math.pi


def get_file_path_of_example():
    current_dir = os.path.dirname(__file__)
    file_path = os.path.join(current_dir, "radius.txt")
    return file_path

# Define a function to generate llm_config from a LangChain tool
def generate_llm_config(tool):
    # Define the function schema based on the tool's args_schema
    function_schema = {
        "name": tool.name.lower().replace(" ", "_"),
        "description": tool.description,
        "parameters": {
            "type": "object",
            "properties": {},
            "required": [],
        },
    }

    if tool.args is not None:
        function_schema["parameters"]["properties"] = tool.args

    return function_schema


# Instantiate the ReadFileTool
read_file_tool = ReadFileTool()
custom_tool = CircumferenceTool()

# Construct the llm_config
llm_config = {
    # Generate functions config for the Tool
    "functions": [
        generate_llm_config(custom_tool),
        generate_llm_config(read_file_tool),
    ],
    "config_list": [{"model": "gpt-3.5-turbo", "api_key": os.environ["OPENAI_API_KEY"]}],
    "timeout": 120,
}

user_proxy = autogen.UserProxyAgent(
    name="user_proxy",
    is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
    human_input_mode="NEVER",
    max_consecutive_auto_reply=10,
    code_execution_config={
        "work_dir": "coding",
        "use_docker": False,
    },  # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
)

# Register the tool and start the conversation
user_proxy.register_function(
    function_map={
        custom_tool.name: custom_tool._run,
        read_file_tool.name: read_file_tool._run,
    }
)

chatbot = autogen.AssistantAgent(
    name="chatbot",
    system_message="For coding tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.",
    llm_config=llm_config,
)

user_proxy.initiate_chat(
    chatbot,
    message=f"Read the file with the path {get_file_path_of_example()}, then calculate the circumference of a circle that has a radius of that files contents.",  # 7.81mm in the file
    llm_config=llm_config,
)

radius.txt

7.81mm
Categories
Tools

Anthropic Tools: Text Classification

from anthropic import Anthropic
import requests
from bs4 import BeautifulSoup
import json

client = Anthropic()
MODEL_NAME = "claude-3-haiku-20240307"

# 4. Text Classification
tools = [
    {
        "name": "print_classification",
        "description": "Prints the classification results.",
        "input_schema": {
            "type": "object",
            "properties": {
                "categories": {
                    "type": "array",
                    "items": {
                        "type": "object",
                        "properties": {
                            "name": {"type": "string", "description": "The category name."},
                            "score": {"type": "number", "description": "The classification score for the category, ranging from 0.0 to 1.0."}
                        },
                        "required": ["name", "score"]
                    }
                }
            },
            "required": ["categories"]
        }
    }
]

text = "The new quantum computing breakthrough could revolutionize the tech industry."

query = f"""
<document>
{text}
</document>

Use the print_classification tool. The categories can be Politics, Sports, Technology, Entertainment, Business.
"""

response = client.beta.tools.messages.create(
    model=MODEL_NAME,
    max_tokens=4096,
    tools=tools,
    messages=[{"role": "user", "content": query}]
)

json_classification = None
for content in response.content:
    if content.type == "tool_use" and content.name == "print_classification":
        json_classification = content.input
        break

if json_classification:
    print("Text Classification (JSON):")
    print(json.dumps(json_classification, indent=2))
else:
    print("No text classification found in the response.")

Categories
Tools

Anthropic Tools: Sentiment Analysis

from anthropic import Anthropic
import requests
from bs4 import BeautifulSoup
import json

client = Anthropic()
MODEL_NAME = "claude-3-haiku-20240307"

# 3. Sentiment Analysis
tools = [
    {
        "name": "print_sentiment_scores",
        "description": "Prints the sentiment scores of a given text.",
        "input_schema": {
            "type": "object",
            "properties": {
                "positive_score": {"type": "number", "description": "The positive sentiment score, ranging from 0.0 to 1.0."},
                "negative_score": {"type": "number", "description": "The negative sentiment score, ranging from 0.0 to 1.0."},
                "neutral_score": {"type": "number", "description": "The neutral sentiment score, ranging from 0.0 to 1.0."}
            },
            "required": ["positive_score", "negative_score", "neutral_score"]
        }
    }
]

text = "The product was okay, but the customer service was terrible. I probably won't buy from them again."

query = f"""
<text>
{text}
</text>

Use the print_sentiment_scores tool.
"""

response = client.beta.tools.messages.create(
    model=MODEL_NAME,
    max_tokens=4096,
    tools=tools,
    messages=[{"role": "user", "content": query}]
)

json_sentiment = None
for content in response.content:
    if content.type == "tool_use" and content.name == "print_sentiment_scores":
        json_sentiment = content.input
        break

if json_sentiment:
    print("Sentiment Analysis (JSON):")
    print(json.dumps(json_sentiment, indent=2))
else:
    print("No sentiment analysis found in the response.")


Categories
Tools

Anthropic Tools: Entity Extraction

from anthropic import Anthropic
import requests
from bs4 import BeautifulSoup
import json

client = Anthropic()
MODEL_NAME = "claude-3-haiku-20240307"

# 2. Entity Extraction
tools = [
    {
        "name": "print_entities",
        "description": "Prints extract named entities.",
        "input_schema": {
            "type": "object",
            "properties": {
                "entities": {
                    "type": "array",
                    "items": {
                        "type": "object",
                        "properties": {
                            "name": {"type": "string", "description": "The extracted entity name."},
                            "type": {"type": "string", "description": "The entity type (e.g., PERSON, ORGANIZATION, LOCATION)."},
                            "context": {"type": "string", "description": "The context in which the entity appears in the text."}
                        },
                        "required": ["name", "type", "context"]
                    }
                }
            },
            "required": ["entities"]
        }
    }
]

text = "John works at Google in New York. He met with Sarah, the CEO of Acme Inc., last week in San Francisco."

query = f"""
<document>
{text}
</document>

Use the print_entities tool.
"""

response = client.beta.tools.messages.create(
    model=MODEL_NAME,
    max_tokens=4096,
    tools=tools,
    messages=[{"role": "user", "content": query}]
)

json_entities = None
for content in response.content:
    if content.type == "tool_use" and content.name == "print_entities":
        json_entities = content.input
        break

if json_entities:
    print("Extracted Entities (JSON):")
    print(json_entities)
else:
    print("No entities found in the response.")
Categories
Tools

Anthropic Tools: Customer Service Integration

import anthropic

client = anthropic.Client()
MODEL_NAME = "claude-3-sonnet-20240229"

tools = [
    {
        "name": "get_customer_info",
        "description": "Retrieves customer information based on their customer ID. Returns the customer's name, email, and phone number.",
        "input_schema": {
            "type": "object",
            "properties": {
                "customer_id": {
                    "type": "string",
                    "description": "The unique identifier for the customer."
                }
            },
            "required": ["customer_id"]
        }
    },
    {
        "name": "get_order_details",
        "description": "Retrieves the details of a specific order based on the order ID. Returns the order ID, product name, quantity, price, and order status.",
        "input_schema": {
            "type": "object",
            "properties": {
                "order_id": {
                    "type": "string",
                    "description": "The unique identifier for the order."
                }
            },
            "required": ["order_id"]
        }
    },
    {
        "name": "cancel_order",
        "description": "Cancels an order based on the provided order ID. Returns a confirmation message if the cancellation is successful.",
        "input_schema": {
            "type": "object",
            "properties": {
                "order_id": {
                    "type": "string",
                    "description": "The unique identifier for the order to be cancelled."
                }
            },
            "required": ["order_id"]
        }
    }
]

def get_customer_info(customer_id):
    # Simulated customer data
    customers = {
        "C1": {"name": "John Doe", "email": "john@example.com", "phone": "123-456-7890"},
        "C2": {"name": "Jane Smith", "email": "jane@example.com", "phone": "987-654-3210"}
    }
    return customers.get(customer_id, "Customer not found")

def get_order_details(order_id):
    # Simulated order data
    orders = {
        "O1": {"id": "O1", "product": "Widget A", "quantity": 2, "price": 19.99, "status": "Shipped"},
        "O2": {"id": "O2", "product": "Gadget B", "quantity": 1, "price": 49.99, "status": "Processing"}
    }
    return orders.get(order_id, "Order not found")

def cancel_order(order_id):
    # Simulated order cancellation
    if order_id in ["O1", "O2"]:
        return True
    else:
        return False

def process_tool_call(tool_name, tool_input):
    if tool_name == "get_customer_info":
        return get_customer_info(tool_input["customer_id"])
    elif tool_name == "get_order_details":
        return get_order_details(tool_input["order_id"])
    elif tool_name == "cancel_order":
        return cancel_order(tool_input["order_id"])

import json

def chatbot_interaction(user_message):
    print(f"\n{'='*50}\nUser Message: {user_message}\n{'='*50}")

    messages = [
        {"role": "user", "content": user_message}
    ]

    response = client.beta.tools.messages.create(
        model=MODEL_NAME,
        max_tokens=4096,
        tools=tools,
        messages=messages
    )

    print(f"\nInitial Response:")
    print(f"Stop Reason: {response.stop_reason}")
    print(f"Content: {response.content}")

    while response.stop_reason == "tool_use":
        tool_use = next(block for block in response.content if block.type == "tool_use")
        tool_name = tool_use.name
        tool_input = tool_use.input

        print(f"\nTool Used: {tool_name}")
        print(f"Tool Input:")
        print(json.dumps(tool_input, indent=2))

        tool_result = process_tool_call(tool_name, tool_input)

        print(f"\nTool Result:")
        print(json.dumps(tool_result, indent=2))

        messages = [
            {"role": "user", "content": user_message},
            {"role": "assistant", "content": response.content},
            {
                "role": "user",
                "content": [
                    {
                        "type": "tool_result",
                        "tool_use_id": tool_use.id,
                        "content": str(tool_result),
                    }
                ],
            },
        ]

        response = client.beta.tools.messages.create(
            model=MODEL_NAME,
            max_tokens=4096,
            tools=tools,
            messages=messages
        )

        print(f"\nResponse:")
        print(f"Stop Reason: {response.stop_reason}")
        print(f"Content: {response.content}")

    final_response = next(
        (block.text for block in response.content if hasattr(block, "text")),
        None,
    )

    print(f"\nFinal Response: {final_response}")

    return final_response


chatbot_interaction("Can you tell me the email address for customer C1?")
chatbot_interaction("What is the status of order O2?")
chatbot_interaction("Please cancel order O1 for me.")
chatbot_interaction("Can you tell me the email address for customer C1? and What is the status of order O2?")

Output

❯ python customer_service.py

==================================================
User Message: Can you tell me the email address for customer C1?
==================================================

Initial Response:
Stop Reason: tool_use
Content: [TextBlock(text='Sure, let me retrieve the customer information by invoking the `get_customer_info` tool:', type='text'), ToolUseBlock(id='toolu_01JUPev89EzMjrJqXnVJtWyA', input={'customer_id': 'C1'}, name='get_customer_info', type='tool_use')]

Tool Used: get_customer_info
Tool Input:
{
  "customer_id": "C1"
}

Tool Result:
{
  "name": "John Doe",
  "email": "john@example.com",
  "phone": "123-456-7890"
}

Response:
Stop Reason: end_turn
Content: [TextBlock(text='The email address for customer C1 is john@example.com.', type='text')]

Final Response: The email address for customer C1 is john@example.com.

==================================================
User Message: What is the status of order O2?
==================================================

Initial Response:
Stop Reason: tool_use
Content: [TextBlock(text='To get the status of order O2, we can use the `get_order_details` tool:', type='text'), ToolUseBlock(id='toolu_01AohV1yMLyLfoDfkdntFmnD', input={'order_id': 'O2'}, name='get_order_details', type='tool_use')]

Tool Used: get_order_details
Tool Input:
{
  "order_id": "O2"
}

Tool Result:
{
  "id": "O2",
  "product": "Gadget B",
  "quantity": 1,
  "price": 49.99,
  "status": "Processing"
}

Response:
Stop Reason: end_turn
Content: [TextBlock(text='The order details show that order O2 is for 1 Gadget B at $49.99, and the current status is "Processing".', type='text')]

Final Response: The order details show that order O2 is for 1 Gadget B at $49.99, and the current status is "Processing".

==================================================
User Message: Please cancel order O1 for me.
==================================================

Initial Response:
Stop Reason: tool_use
Content: [TextBlock(text='Okay, let me cancel that order for you:', type='text'), ToolUseBlock(id='toolu_016GF5zEgmEx6Ajd4p4dgfyN', input={'order_id': 'O1'}, name='cancel_order', type='tool_use')]

Tool Used: cancel_order
Tool Input:
{
  "order_id": "O1"
}
Categories
Tools

Anthropic Tools: Structured JSON Output

from anthropic import Anthropic
import requests
from bs4 import BeautifulSoup
import json

client = Anthropic()
MODEL_NAME = "claude-3-haiku-20240307"

# 1. Article Summary Tool
tools = [
    {
        "name": "print_summary",
        "description": "Prints a summary of the article.",
        "input_schema": {
            "type": "object",
            "properties": {
                "author": {"type": "string", "description": "Name of the article author"},
                "topics": {
                    "type": "array",
                    "items": {"type": "string"},
                    "description": 'Array of topics, e.g. ["tech", "politics"]. Should be as specific as possible, and can overlap.'
                },
                "summary": {"type": "string", "description": "Summary of the article. One or two paragraphs max."},
                "coherence": {"type": "integer", "description": "Coherence of the article's key points, 0-100 (inclusive)"},
                "persuasion": {"type": "number", "description": "Article's persuasion score, 0.0-1.0 (inclusive)"}
            },
            "required": ['author', 'topics', 'summary', 'coherence', 'persuasion', 'counterpoint']
        }
    }
]

url = "https://www.anthropic.com/news/third-party-testing"
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
article = " ".join([p.text for p in soup.find_all("p")])

query = f"""
<article>
{article}
</article>

Use the `print_summary` tool.
"""

response = client.beta.tools.messages.create(
    model=MODEL_NAME,
    max_tokens=4096,
    tools=tools,
    messages=[{"role": "user", "content": query}]
)
json_summary = None
for content in response.content:
    if content.type == "tool_use" and content.name == "print_summary":
        json_summary = content.input
        break

if json_summary:
    print("JSON Summary:")
    print(json.dumps(json_summary, indent=2))
else:
    print("No JSON summary found in the response.")

# 2. Entity Extraction
tools = [
    {
        "name": "print_entities",
        "description": "Prints extract named entities.",
        "input_schema": {
            "type": "object",
            "properties": {
                "entities": {
                    "type": "array",
                    "items": {
                        "type": "object",
                        "properties": {
                            "name": {"type": "string", "description": "The extracted entity name."},
                            "type": {"type": "string", "description": "The entity type (e.g., PERSON, ORGANIZATION, LOCATION)."},
                            "context": {"type": "string", "description": "The context in which the entity appears in the text."}
                        },
                        "required": ["name", "type", "context"]
                    }
                }
            },
            "required": ["entities"]
        }
    }
]

text = "John works at Google in New York. He met with Sarah, the CEO of Acme Inc., last week in San Francisco."

query = f"""
<document>
{text}
</document>

Use the print_entities tool.
"""

response = client.beta.tools.messages.create(
    model=MODEL_NAME,
    max_tokens=4096,
    tools=tools,
    messages=[{"role": "user", "content": query}]
)

json_entities = None
for content in response.content:
    if content.type == "tool_use" and content.name == "print_entities":
        json_entities = content.input
        break

if json_entities:
    print("Extracted Entities (JSON):")
    print(json_entities)
else:
    print("No entities found in the response.")

# 3. Sentiment Analysis
tools = [
    {
        "name": "print_sentiment_scores",
        "description": "Prints the sentiment scores of a given text.",
        "input_schema": {
            "type": "object",
            "properties": {
                "positive_score": {"type": "number", "description": "The positive sentiment score, ranging from 0.0 to 1.0."},
                "negative_score": {"type": "number", "description": "The negative sentiment score, ranging from 0.0 to 1.0."},
                "neutral_score": {"type": "number", "description": "The neutral sentiment score, ranging from 0.0 to 1.0."}
            },
            "required": ["positive_score", "negative_score", "neutral_score"]
        }
    }
]

text = "The product was okay, but the customer service was terrible. I probably won't buy from them again."

query = f"""
<text>
{text}
</text>

Use the print_sentiment_scores tool.
"""

response = client.beta.tools.messages.create(
    model=MODEL_NAME,
    max_tokens=4096,
    tools=tools,
    messages=[{"role": "user", "content": query}]
)

json_sentiment = None
for content in response.content:
    if content.type == "tool_use" and content.name == "print_sentiment_scores":
        json_sentiment = content.input
        break

if json_sentiment:
    print("Sentiment Analysis (JSON):")
    print(json.dumps(json_sentiment, indent=2))
else:
    print("No sentiment analysis found in the response.")

# 4. Text Classification
tools = [
    {
        "name": "print_classification",
        "description": "Prints the classification results.",
        "input_schema": {
            "type": "object",
            "properties": {
                "categories": {
                    "type": "array",
                    "items": {
                        "type": "object",
                        "properties": {
                            "name": {"type": "string", "description": "The category name."},
                            "score": {"type": "number", "description": "The classification score for the category, ranging from 0.0 to 1.0."}
                        },
                        "required": ["name", "score"]
                    }
                }
            },
            "required": ["categories"]
        }
    }
]

text = "The new quantum computing breakthrough could revolutionize the tech industry."

query = f"""
<document>
{text}
</document>

Use the print_classification tool. The categories can be Politics, Sports, Technology, Entertainment, Business.
"""

response = client.beta.tools.messages.create(
    model=MODEL_NAME,
    max_tokens=4096,
    tools=tools,
    messages=[{"role": "user", "content": query}]
)

json_classification = None
for content in response.content:
    if content.type == "tool_use" and content.name == "print_classification":
        json_classification = content.input
        break

if json_classification:
    print("Text Classification (JSON):")
    print(json.dumps(json_classification, indent=2))
else:
    print("No text classification found in the response.")

Output

❯ python structuredoutput.py
JSON Summary:
{
  "author": "Anthropic",
  "topics": [
    "AI safety",
    "AI policy",
    "AI testing",
    "AI regulation"
  ],
  "summary": "The article argues that the AI sector needs effective third-party testing for frontier AI systems to avoid societal harm, whether deliberate or accidental. It discusses what third-party testing looks like, why it's needed, and some research Anthropic has done to arrive at this policy position. The article outlines key ingredients for an effective third-party testing regime, including identifying national security risks. It envisions a diverse ecosystem of third-party testers, similar to product safety testing in other industries. The article also discusses related AI policy ideas Anthropic will advocate for, such as greater funding for AI testing in government and developing tests for specific capabilities. Overall, the article presents Anthropic's position that building a third-party testing ecosystem is one of the best ways to bring more of society into the development and oversight of AI systems.",
  "coherence": 90,
  "persuasion": 0.8
}
Extracted Entities (JSON):
{'entities': [{'name': 'John', 'type': 'PERSON', 'context': 'John works at Google in New York.'}, {'name': 'Google', 'type': 'ORGANIZATION', 'context': 'John works at Google in New York.'}, {'name': 'New York', 'type': 'LOCATION', 'context': 'John works at Google in New York.'}, {'name': 'Sarah', 'type': 'PERSON', 'context': 'He met with Sarah, the CEO of Acme Inc., last week in San Francisco.'}, {'name': 'Acme Inc.', 'type': 'ORGANIZATION', 'context': 'He met with Sarah, the CEO of Acme Inc., last week in San Francisco.'}, {'name': 'San Francisco', 'type': 'LOCATION', 'context': 'He met with Sarah, the CEO of Acme Inc., last week in San Francisco.'}]}
Sentiment Analysis (JSON):
{
  "negative_score": 0.7,
  "neutral_score": 0.2,
  "positive_score": 0.1
}
Text Classification (JSON):
{
  "categories": [
    {
      "name": "Technology",
      "score": 0.7
    },
    {
      "name": "Business",
      "score": 0.3
    }
  ]
}
Categories
Tools

Anthropic Tools: Stock Price Integration

import anthropic
import yfinance as yf

client = anthropic.Anthropic()

# 1. Get Stock Price function / Tool
def get_stock_price(ticker_symbol):
    stock = yf.Ticker(ticker_symbol)
    hist = stock.history(period="1d")
    current_price = hist['Close'].iloc[0]
    return str(current_price)

# 2. Tool Definition 
tools = [
    {
        "name": "get_stock_price",
        "description": "Get the current stock price for a given ticker symbol",
        "input_schema": {
            "type": "object",
            "properties": {
                "ticker_symbol": {
                    "type": "string",
                    "description": "The stock ticker symbol, e.g., AAPL for Apple Inc."
                }
            },
            "required": ["ticker_symbol"]
        }
    }
]

# 3. Ask Claude for the Stock Price
initial_response = client.beta.tools.messages.create(
    model="claude-3-sonnet-20240229",
    max_tokens=1024,
    tools=tools,
    messages=[{"role": "user", "content": "What is the Stock price of Apple?"}]
)

print(f"Stop Reason: {initial_response.stop_reason}")
print(f"Initial Response: {initial_response.content}")

# 4. Parse the Tool Name and Run the Get Stock Price Tool
def process_tool_call(tool_name, tool_input):
    if tool_name == "get_stock_price":
        return get_stock_price(tool_input["ticker_symbol"])

if initial_response.stop_reason == "tool_use":
    tool_use = next(block for block in initial_response.content if block.type == "tool_use")
    tool_name = tool_use.name
    tool_input = tool_use.input

    print(f"\nTool Used: {tool_name}") # get_stock_price
    print(f"Tool Input: {tool_input}") # {'ticker_symbol': 'Apple'}

    tool_result = process_tool_call(tool_name, tool_input)

    print(f"Tool Result: {tool_result}")

    # 5. Send the Tool Result back to the Assistant
    response = client.beta.tools.messages.create(
        model="claude-3-sonnet-20240229",
        max_tokens=4096,
        messages=[
            {"role": "user", "content": "What is the Stock price of Apple?"},
            {"role": "assistant", "content": initial_response.content},
            {
                "role": "user",
                "content": [
                    {
                        "type": "tool_result",
                        "tool_use_id": tool_use.id,
                        "content": tool_result,
                    }
                ],
            },
        ],
        tools=tools,
    )
else:
    response = initial_response

final_response = next(
    (block.text for block in response.content if hasattr(block, "text")),
    None,
)
print(f"\nFinal Response: {final_response}")
❯ python app.py
Stop Reason: tool_use
Initial Response: [TextBlock(text="Okay, let's get the current stock price for Apple Inc. (AAPL).", type='text'), ToolUseBlock(id='toolu_012KPAkjCHG8GUYC3YdEJTTo', input={'ticker_symbol': 'AAPL'}, name='get_stock_price', type='tool_use')]

Tool Used: get_stock_price
Tool Input: {'ticker_symbol': 'AAPL'}
Tool Result: 169.5800018310547

Final Response: The current stock price of Apple Inc. (AAPL) is $169.58.

UI

import anthropic
import yfinance as yf
import gradio as gr

client = anthropic.Anthropic()

# 1. Get Stock Price function / Tool
def get_stock_price(ticker_symbol):
    stock = yf.Ticker(ticker_symbol)
    hist = stock.history(period="1d")
    current_price = hist['Close'].iloc[0]
    return str(current_price)

# 2. Tool Definition
tools = [
    {
        "name": "get_stock_price",
        "description": "Get the current stock price for a given ticker symbol",
        "input_schema": {
            "type": "object",
            "properties": {
                "ticker_symbol": {
                    "type": "string",
                    "description": "The stock ticker symbol, e.g., AAPL for Apple Inc."
                }
            },
            "required": ["ticker_symbol"]
        }
    }
]

def process_tool_call(tool_name, tool_input):
    if tool_name == "get_stock_price":
        return get_stock_price(tool_input["ticker_symbol"])


def run_tool_interaction(user_message, model="claude-3-sonnet-20240229"):
    initial_response = client.beta.tools.messages.create(
        model=model,
        max_tokens=1024,
        tools=tools,
        messages=[{"role": "user", "content": user_message}]
    )

    print(f"Stop Reason: {initial_response.stop_reason}")
    print(f"Initial Response: {initial_response.content}")

    # Process tool call if indicated by stop reason
    if initial_response.stop_reason == "tool_use":
        tool_use = next(block for block in initial_response.content if block.type == "tool_use")
        tool_name = tool_use.name
        tool_input = tool_use.input

        print(f"\nTool Used: {tool_name}")  # Example: get_stock_price
        print(f"Tool Input: {tool_input}")  # Example: {'ticker_symbol': 'AAPL'}

        tool_result = process_tool_call(tool_name, tool_input)
        print(f"Tool Result: {tool_result}")

        # Send the tool result back to the assistant
        response = client.beta.tools.messages.create(
            model=model,
            max_tokens=4096,
            messages=[
                {"role": "user", "content": user_message},
                {"role": "assistant", "content": initial_response.content},
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "tool_result",
                            "tool_use_id": tool_use.id,
                            "content": tool_result,
                        }
                    ],
                },
            ],
            tools=tools,
        )
    else:
        response = initial_response

    # Extract and return the final response
    final_response = next(
        (block.text for block in response.content if hasattr(block, "text")),
        None,
    )
    return final_response

iface = gr.Interface(
    fn=run_tool_interaction,
    inputs=[
        gr.Textbox(label="User Message")
    ],
    outputs=gr.Textbox(label="Response"),
    title="Anthropic Tools",
    description="Get Stock Price"
)

iface.launch()
Categories
AI Agents

Praison AI Tools

  • CodeDocsSearchTool
  • CSVSearchTool
  • DirectorySearchTool
  • DirectoryReadTool
  • DOCXSearchTool
  • FileReadTool
  • GithubSearchTool
  • SerperDevTool
  • TXTSearchTool
  • JSONSearchTool
  • MDXSearchTool
  • PDFSearchTool
  • PGSearchTool
  • RagTool
  • ScrapeElementFromWebsiteTool
  • ScrapeWebsiteTool
  • SeleniumScrapingTool
  • WebsiteSearchTool
  • XMLSearchTool
  • YoutubeChannelSearchTool
  • YoutubeVideoSearchTool
Categories
AutoGen

AutoGen Tools

from typing import Annotated, Literal
import os
from autogen import ConversableAgent

Operator = Literal["+", "-", "*", "/"]

def calculator(a: int, b: int, operator: Annotated[Operator, "operator"]) -> int:
    if operator == "+":
        return a + b
    elif operator == "-":
        return a - b
    elif operator == "*":
        return a * b
    elif operator == "/":
        return int(a / b)
    else:
        raise ValueError("Invalid operator")

assistant = ConversableAgent(
    name="Assistant",
    system_message="You are a helpful AI assistant. "
    "You can help with simple calculations. "
    "Return 'TERMINATE' when the task is done.",
    llm_config={"config_list": [{"model": "gpt-3.5-turbo", "api_key": os.environ["OPENAI_API_KEY"]}]},
)

user_proxy = ConversableAgent(
    name="User",
    llm_config=False,
    is_termination_msg=lambda msg: msg.get("content") is not None and "TERMINATE" in msg["content"],
    human_input_mode="NEVER",
)

from autogen import register_function
# Register the calculator function to the agent and user proxy to add a calculator tool.
register_function(
    calculator,
    caller=assistant,  # The assistant agent can suggest calls to the calculator.
    executor=user_proxy,  # The user proxy agent can execute the calculator calls.
    name="calculator",  # By default, the function name is used as the tool name.
    description="A simple calculator",  # A description of the tool.
)

chat_result = user_proxy.initiate_chat(assistant, message="What is (44232 + 13312 / (232 - 32)) * 5?")