Categories
RAG

Google LangExtract Beginners

pip install langextract
brew install libmagic
export LANGEXTRACT_API_KEY=xxxxxxxxx

Basic

import langextract as lx
import textwrap

# 1. Define the prompt and extraction rules
prompt = textwrap.dedent("""\
    Extract characters, emotions, and relationships in order of appearance.
    Use exact text for extractions. Do not paraphrase or overlap entities.
    Provide meaningful attributes for each entity to add context.""")

# 2. Provide a high-quality example to guide the model
examples = [
    lx.data.ExampleData(
        text="ROMEO. But soft! What light through yonder window breaks? It is the east, and Juliet is the sun.",
        extractions=[
            lx.data.Extraction(
                extraction_class="character",
                extraction_text="ROMEO",
                attributes={"emotional_state": "wonder"}
            ),
            lx.data.Extraction(
                extraction_class="emotion",
                extraction_text="But soft!",
                attributes={"feeling": "gentle awe"}
            ),
            lx.data.Extraction(
                extraction_class="relationship",
                extraction_text="Juliet is the sun",
                attributes={"type": "metaphor"}
            ),
        ]
    )
]

# The input text to be processed
input_text = "Lady Juliet gazed longingly at the stars, her heart aching for Romeo"

# Run the extraction
result = lx.extract(
    text_or_documents=input_text,
    prompt_description=prompt,
    examples=examples,
    model_id="gemini-2.5-flash",
)

# Save the results to a JSONL file
lx.io.save_annotated_documents([result], output_name="extraction_results.jsonl")

# Generate the visualization from the file
html_content = lx.visualize("test_output/extraction_results.jsonl")
with open("test_output/visualization.html", "w") as f:
    f.write(html_content)

Advanced

import langextract as lx
import textwrap
from collections import Counter, defaultdict

# Define comprehensive prompt and examples for complex literary text
prompt = textwrap.dedent("""\
    Extract characters, emotions, and relationships from the given text.

    Provide meaningful attributes for every entity to add context and depth.

    Important: Use exact text from the input for extraction_text. Do not paraphrase.
    Extract entities in order of appearance with no overlapping text spans.

    Note: In play scripts, speaker names appear in ALL-CAPS followed by a period.""")

examples = [
    lx.data.ExampleData(
        text=textwrap.dedent("""\
            ROMEO. But soft! What light through yonder window breaks?
            It is the east, and Juliet is the sun.
            JULIET. O Romeo, Romeo! Wherefore art thou Romeo?"""),
        extractions=[
            lx.data.Extraction(
                extraction_class="character",
                extraction_text="ROMEO",
                attributes={"emotional_state": "wonder"}
            ),
            lx.data.Extraction(
                extraction_class="emotion",
                extraction_text="But soft!",
                attributes={"feeling": "gentle awe", "character": "Romeo"}
            ),
            lx.data.Extraction(
                extraction_class="relationship",
                extraction_text="Juliet is the sun",
                attributes={"type": "metaphor", "character_1": "Romeo", "character_2": "Juliet"}
            ),
            lx.data.Extraction(
                extraction_class="character",
                extraction_text="JULIET",
                attributes={"emotional_state": "yearning"}
            ),
            lx.data.Extraction(
                extraction_class="emotion",
                extraction_text="Wherefore art thou Romeo?",
                attributes={"feeling": "longing question", "character": "Juliet"}
            ),
        ]
    )
]

# Process Romeo & Juliet directly from Project Gutenberg
print("Downloading and processing Romeo and Juliet from Project Gutenberg...")

result = lx.extract(
    text_or_documents="https://www.gutenberg.org/files/1513/1513-0.txt",
    prompt_description=prompt,
    examples=examples,
    model_id="gemini-2.5-flash",
    extraction_passes=3,      # Multiple passes for improved recall
    max_workers=20,           # Parallel processing for speed
    max_char_buffer=1000      # Smaller contexts for better accuracy
)

print(f"Extracted {len(result.extractions)} entities from {len(result.text):,} characters")

# Save and visualize the results
lx.io.save_annotated_documents([result], output_name="romeo_juliet_extractions.jsonl")

# Generate the interactive visualization
html_content = lx.visualize("test_output/romeo_juliet_extractions.jsonl")
with open("test_output/romeo_juliet_visualization.html", "w") as f:
    f.write(html_content)

print("Interactive visualization saved to romeo_juliet_visualization.html")

# Analyze character mentions
characters = {}
for e in result.extractions:
    if e.extraction_class == "character":
        char_name = e.extraction_text
        if char_name not in characters:
            characters[char_name] = {"count": 0, "attributes": set()}
        characters[char_name]["count"] += 1
        if e.attributes:
            for attr_key, attr_val in e.attributes.items():
                characters[char_name]["attributes"].add(f"{attr_key}: {attr_val}")

# Print character summary
print(f"\nCHARACTER SUMMARY ({len(characters)} unique characters)")
print("=" * 60)

sorted_chars = sorted(characters.items(), key=lambda x: x[1]["count"], reverse=True)
for char_name, char_data in sorted_chars[:10]:  # Top 10 characters
    attrs_preview = list(char_data["attributes"])[:3]
    attrs_str = f" ({', '.join(attrs_preview)})" if attrs_preview else ""
    print(f"{char_name}: {char_data['count']} mentions{attrs_str}")

# Entity type breakdown
entity_counts = Counter(e.extraction_class for e in result.extractions)
print(f"\nENTITY TYPE BREAKDOWN")
print("=" * 60)
for entity_type, count in entity_counts.most_common():
    percentage = (count / len(result.extractions)) * 100
    print(f"{entity_type}: {count} ({percentage:.1f}%)")
Categories
Praison AI

Context Agent

pip install "praisonai[mongodb]"
export OPENAI_API_KEY=
export GITHUB_TOKEN=xxxxxxxx (Optional: if getting data from github repo)
from praisonaiagents import ContextAgent

agent = ContextAgent(llm="gpt-4o-mini", auto_analyze=False)

agent.start("https://github.com/MervinPraison/PraisonAI/ Need to add Authentication")

Knowledge

import os
from praisonaiagents import Agent, Task, PraisonAIAgents

# Ensure OpenAI API key is set
if not os.environ.get("OPENAI_API_KEY"):
    raise ValueError("Please set the OPENAI_API_KEY environment variable")

def main():
    # MongoDB knowledge configuration
    mongodb_knowledge_config = {
        "vector_store": {
            "provider": "mongodb",
            "config": {
                "connection_string": "mongodb+srv://Username:Password@cluster2.bofm7.mywebsite.net/?retryWrites=true&w=majority&appName=Cluster2",  # Replace with your MongoDB connection string
                "database": "praisonai_knowledge",
                "collection": "knowledge_base",
                "use_vector_search": True  # Enable Atlas Vector Search
            }
        },
        "embedder": {
            "provider": "openai",
            "config": {
                "model": "text-embedding-3-small",
                "api_key": os.getenv("OPENAI_API_KEY")
            }
        }
    }
    
    # Create a knowledge agent with MongoDB knowledge store
    knowledge_agent = Agent(
        name="MongoDB Knowledge Agent",
        role="Knowledge Specialist",
        goal="Provide accurate information from MongoDB knowledge base",
        backstory="""You are an expert knowledge specialist who can access and 
        retrieve information from a comprehensive MongoDB knowledge base. You excel 
        at finding relevant information, synthesizing knowledge from multiple sources, 
        and providing accurate, context-aware responses.""",
        knowledge_config=mongodb_knowledge_config,
        knowledge=[os.path.join(os.path.dirname(__file__), "llms.md")],
        memory=True,
        verbose=True,
        llm="gpt-4o-mini"
    )
    
    # Create a research assistant agent
    research_agent = Agent(
        name="Research Assistant",
        role="Research Assistant",
        goal="Gather information and store it in the knowledge base",
        backstory="""You are a research assistant who specializes in gathering 
        information from various sources and organizing it for storage in the 
        knowledge base. You ensure information is accurate, well-structured, 
        and properly categorized.""",
        memory=True,
        verbose=True,
        llm="gpt-4o-mini"
    )
    
    # Create tasks for knowledge management
    knowledge_tasks = [
        Task(
            description="""Research and store information about MongoDB Atlas Vector Search:
            1. Gather comprehensive information about MongoDB Atlas Vector Search
            2. Include technical specifications, use cases, and best practices
            3. Store the information in the MongoDB knowledge base
            4. Organize information by categories (features, performance, integration)
            """,
            expected_output="MongoDB Atlas Vector Search information stored in knowledge base",
            agent=research_agent
        ),
        Task(
            description="""Research and store information about AI agent frameworks:
            1. Research popular AI agent frameworks (LangChain, AutoGen, etc.)
            2. Compare their features, capabilities, and use cases
            3. Store comparative analysis in the knowledge base
            4. Include code examples and best practices
            """,
            expected_output="AI agent framework comparison stored in knowledge base",
            agent=research_agent
        ),
        Task(
            description="""Query the knowledge base for MongoDB information:
            1. Search for information about MongoDB Atlas Vector Search
            2. Extract key features and capabilities
            3. Provide a comprehensive summary
            4. Include technical recommendations
            """,
            expected_output="Comprehensive MongoDB Atlas Vector Search summary from knowledge base",
            agent=knowledge_agent
        ),
        Task(
            description="""Query the knowledge base for AI agent framework information:
            1. Search for information about AI agent frameworks
            2. Compare different frameworks based on stored knowledge
            3. Provide recommendations for different use cases
            4. Include best practices and examples
            """,
            expected_output="AI agent framework comparison and recommendations from knowledge base",
            agent=knowledge_agent
        )
    ]
    
    # Initialize the multi-agent system with MongoDB knowledge
    print("🚀 Starting MongoDB Knowledge Management System...")
    print("=" * 60)
    
    knowledge_system = PraisonAIAgents(
        agents=[research_agent, knowledge_agent],
        tasks=knowledge_tasks,
        memory=True,
        verbose=True
    )
    
    # Execute the knowledge management pipeline
    results = knowledge_system.start()
    
if __name__ == "__main__":
    main()
from praisonaiagents import Agent, Task, PraisonAIAgents
from praisonaiagents.memory import Memory
from praisonaiagents.agent import ContextAgent
import pymongo

context_agent = ContextAgent(llm="gpt-4o-mini", auto_analyze=False)

context_output = context_agent.start("https://github.com/MervinPraison/PraisonAI/ Need to add Authentication")

mongodb_memory_config = {
    "provider": "mongodb",
    "config": {
        "connection_string": "mongodb+srv://Username:Password@cluster2.bofm7.mywebsite.net/?retryWrites=true&w=majority&appName=Cluster2",
        "database": "praisonai_memory",
        "use_vector_search": True,
        "max_pool_size": 50,
        "min_pool_size": 10,
        "server_selection_timeout": 5000
    }
}

implementation_agent = Agent(
    name="Implementation Agent",
    role="Authentication Implementation Specialist",
    goal="Implement authentication features based on project requirements",
    backstory="Expert software implementer specializing in authentication systems, security features, and seamless integration with existing codebases",
    memory=True,
    llm="gpt-4o-mini",
)

implementation_task = Task(
    description="Implement authentication features based on the project requirements from context analysis",
    expected_output="Authentication implementation with code, configuration, and integration details",
    agent=implementation_agent,
    context=context_output,
)

implementation_system = PraisonAIAgents(
    agents=[implementation_agent],
    tasks=[implementation_task],
    memory=True,
    memory_config=mongodb_memory_config
)

results = implementation_system.start()
print(f"Results: {results}")
Categories
Langchain Llama Index RAG

Langtrace

export LANGTRACE_API_KEY=xxxxx
export OPENAI_API_KEY=xxxxxx

Langtrace Llama Index

pip install langtrace-python-sdk llama-index openai langchain_community langchain langchain-chroma langchainhub langchain_openai
from langtrace_python_sdk import langtrace # Must precede any llm module imports
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
import os

langtrace.init(
    api_key=os.getenv('LANGTRACE_API_KEY', os.environ.get('LANGTRACE_API_KEY')),
    api_host="http://localhost:3000/api/trace"
)

documents = SimpleDirectoryReader(input_files=["soccer_rules.pdf"]).load_data()
index = VectorStoreIndex.from_documents(documents)

query_engine = index.as_query_engine()
print(query_engine.query("What is a throw in?").response)

Langtrace Langchain

from langtrace_python_sdk import langtrace # Must precede any llm module imports
import os
from langchain import hub
from langchain_chroma import Chroma
from langchain_community.document_loaders import TextLoader
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_openai import ChatOpenAI

langtrace.init(
    api_key=os.getenv('LANGTRACE_API_KEY', os.environ.get('LANGTRACE_API_KEY')),
    api_host="http://localhost:3000/api/trace"
)

llm = ChatOpenAI(model="gpt-4o-mini")
loader = TextLoader("soccer_rules.txt")
docs = loader.load()

text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = text_splitter.split_documents(docs)
vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())

# Retrieve and generate using the relevant snippets of the blog.
retriever = vectorstore.as_retriever()
prompt = hub.pull("rlm/rag-prompt")

retriever = vectorstore.as_retriever()
prompt = hub.pull("rlm/rag-prompt")

def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)


rag_chain = (
    {"context": retriever | format_docs, "question": RunnablePassthrough()}
    | prompt
    | llm
    | StrOutputParser()
)

print(rag_chain.invoke("What is Offside??"))

Langtrace CrewAI

https://github.com/Scale3-Labs/langtrace-recipes/blob/main/integrations/llm-framework/crewai/starter.ipynb

Categories
API

xAI SDK API

pip install xai-sdk
export XAI_API_KEY=xxxxxx
from xai_sdk import Client
from xai_sdk.chat import user, system

client = Client(
  api_host="api.x.ai"
)

chat = client.chat.create(model="grok-4-0709", temperature=0)
chat.append(system("You are a PhD-level mathematician."))
chat.append(user("What is 2 + 2?"))

response = chat.sample()
print(response.content)

Categories
DevOps

Replit Mintlify Config

entrypoint = "index.js"
modules = ["nodejs-20"]
hidden = [".config", "package-lock.json"]

[gitHubImport]
requiredFiles = [".replit", "replit.nix", "package.json", "package-lock.json"]

[nix]
channel = "stable-24_05"

[unitTest]
language = "nodejs"

[deployment]
run = [
  "sh",
  "-c",
  "cd PraisonAI/docs && mintlify dev --host 0.0.0.0 --port 3000",
]
deploymentTarget = "gce"
ignorePorts = false
build = ["sh", "-c", "cd PraisonAI/docs && npm install mintlify"]

[[ports]]
localPort = 3000
externalPort = 80
Categories
Voice

Chatterbox TTS Gradio for Mac

import random
import numpy as np
import torch
import gradio as gr
from chatterbox.tts import ChatterboxTTS

# Detect device (Mac with M1/M2/M3/M4)
device = "mps" if torch.backends.mps.is_available() else "cpu"
map_location = torch.device(device)

torch_load_original = torch.load
def patched_torch_load(*args, **kwargs):
    if 'map_location' not in kwargs:
        kwargs['map_location'] = map_location
    return torch_load_original(*args, **kwargs)

torch.load = patched_torch_load

DEVICE = device


def set_seed(seed: int):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    random.seed(seed)
    np.random.seed(seed)


def load_model():
    model = ChatterboxTTS.from_pretrained(DEVICE)
    return model


def generate(model, text, audio_prompt_path, exaggeration, temperature, seed_num, cfgw):
    if model is None:
        model = ChatterboxTTS.from_pretrained(DEVICE)

    if seed_num != 0:
        set_seed(int(seed_num))

    wav = model.generate(
        text,
        audio_prompt_path=audio_prompt_path,
        exaggeration=exaggeration,
        temperature=temperature,
        cfg_weight=cfgw,
    )
    return (model.sr, wav.squeeze(0).numpy())


with gr.Blocks() as demo:
    model_state = gr.State(None)  # Loaded once per session/user

    with gr.Row():
        with gr.Column():
            text = gr.Textbox(
                value="Now let's make my mum's favourite. So three mars bars into the pan. Then we add the tuna and just stir for a bit, just let the chocolate and fish infuse. A sprinkle of olive oil and some tomato ketchup. Now smell that. Oh boy this is going to be incredible.",
                label="Text to synthesize (max chars 300)",
                max_lines=5
            )
            ref_wav = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Reference Audio File", value=None)
            exaggeration = gr.Slider(0.25, 2, step=.05, label="Exaggeration (Neutral = 0.5, extreme values can be unstable)", value=.5)
            cfg_weight = gr.Slider(0.0, 1, step=.05, label="CFG/Pace", value=0.5)

            with gr.Accordion("More options", open=False):
                seed_num = gr.Number(value=0, label="Random seed (0 for random)")
                temp = gr.Slider(0.05, 5, step=.05, label="temperature", value=.8)

            run_btn = gr.Button("Generate", variant="primary")

        with gr.Column():
            audio_output = gr.Audio(label="Output Audio")

    demo.load(fn=load_model, inputs=[], outputs=model_state)

    run_btn.click(
        fn=generate,
        inputs=[
            model_state,
            text,
            ref_wav,
            exaggeration,
            temp,
            seed_num,
            cfg_weight,
        ],
        outputs=audio_output,
    )

if __name__ == "__main__":
    demo.queue(
        max_size=50,
        default_concurrency_limit=1,
    ).launch(share=True)
Categories
AI Agents

Microsoft Magentic UI Install

ollama pull qwen3
pip install "magentic-ui[ollama]"
magentic ui --port 8081

YAML Config

model_config: &client
  provider: autogen_ext.models.ollama.OllamaChatCompletionClient
  config:
    model: "qwen3" # change to your desired Ollama model
    host: "http://localhost:11434" # change to your ollama host
    model_info: # change per model you use
      vision: true
      function_calling: true # will work if false but not fully
      json_output: false # prefered true
      family: unknown
      structured_output: false
  max_retries: 5

# Note you can define multiple model clients and use them for different agents
# You can also use the OpenAI client instead and access Ollama models
#model_config: &client
#  provider: OpenAIChatCompletionClient
#  config:
#    model: "qwen2.5vl:32b"
#    base_url: "http://localhost:11434/v1" # change to your ollama host
#    model_info: # change per model
#       vision: true 
#       function_calling: true # required true for file_surfer, but will still work if file_surfer is not needed
#       json_output: false
#       family: unknown
#       structured_output: false
#  max_retries: 5

orchestrator_client: *client
coder_client: *client
web_surfer_client: *client
file_surfer_client: *client
action_guard_client: *client

Requirements

https://ollama.com

https://docker.com

Categories
AI Agents

Google ADK Agents

└── app
    ├── __init__.py
    ├── .env
    └── agent.py

agent.py

# 1. Basic Agent
from google.adk.agents import Agent

basic_agent = Agent(
    name="basic_agent",
    model="gemini-2.0-flash",
    description="A simple agent that answers questions",
    instruction="""
    You are a helpful stock market assistant. Be concise.
    If you don't know something, just say so.
    """,
)


# 2. Basic Agent with Tool
from google.adk.agents import Agent
import yfinance as yf

def get_stock_price(ticker: str):
    stock = yf.Ticker(ticker)
    price = stock.info.get("currentPrice", "Price not available")
    return {"price": price, "ticker": ticker}

tool_agent = Agent(
    name="tool_agent",
    model="gemini-2.0-flash",
    description="A simple agent that gets stock prices",
    instruction="""
    You are a stock price assistant. Always use the get_stock_price tool.
    Include the ticker symbol in your response.
    """,
    tools=[get_stock_price],
)


# 3. Agent with State
from google.adk.agents import Agent
from google.adk.tools.tool_context import ToolContext
import yfinance as yf

def get_stock_price(ticker: str, tool_context: ToolContext):
    stock = yf.Ticker(ticker)
    price = stock.info.get("currentPrice", "Price not available")
    
    # Initialize recent_searches if it doesn't exist
    if "recent_searches" not in tool_context.state:
        tool_context.state["recent_searches"] = []
        
    recent_searches = tool_context.state["recent_searches"]
    if ticker not in recent_searches:
        recent_searches.append(ticker)
        tool_context.state["recent_searches"] = recent_searches
    
    return {"price": price, "ticker": ticker}

stateful_agent = Agent(
    name="stateful_agent",
    model="gemini-2.0-flash",
    description="An agent that remembers recent searches",
    instruction="""
    You are a stock price assistant. Use the get_stock_price tool.
    I'll remember your previous searches and can tell you about them if you ask.
    """,
    tools=[get_stock_price],
)


# 4. Multi-Tool Agent
from google.adk.agents import Agent
from google.adk.tools.tool_context import ToolContext
import yfinance as yf

def get_stock_price(ticker: str, tool_context: ToolContext):
    stock = yf.Ticker(ticker)
    price = stock.info.get("currentPrice", "Price not available")
    
    # Initialize recent_searches if it doesn't exist
    if "recent_searches" not in tool_context.state:
        tool_context.state["recent_searches"] = []
        
    recent_searches = tool_context.state["recent_searches"]
    if ticker not in recent_searches:
        recent_searches.append(ticker)
        tool_context.state["recent_searches"] = recent_searches
    
    return {"price": price, "ticker": ticker}

def get_stock_info(ticker: str):
    stock = yf.Ticker(ticker)
    company_name = stock.info.get("shortName", "Name not available")
    sector = stock.info.get("sector", "Sector not available")
    return {
        "ticker": ticker,
        "company_name": company_name,
        "sector": sector
    }

multi_tool_agent = Agent(
    name="multi_tool_agent",
    model="gemini-2.0-flash",
    description="An agent with multiple stock information tools",
    instruction="""
    You are a stock information assistant. You have two tools:
    - get_stock_price: For prices
    - get_stock_info: For company name and sector
    """,
    tools=[get_stock_price, get_stock_info],
)


# 5. Structured Output Agent
from google.adk.agents import LlmAgent
from pydantic import BaseModel, Field
import yfinance as yf

class StockAnalysis(BaseModel):
    ticker: str = Field(description="Stock symbol")
    recommendation: str = Field(description="Buy or Sell recommendation")

# Define a function to get stock data for our prompt
def get_stock_data_for_prompt(ticker):
    stock = yf.Ticker(ticker)
    price = stock.info.get("currentPrice", 0)
    target_price = stock.info.get("targetMeanPrice", 0)
    return price, target_price

structured_agent = LlmAgent(
    name="structured_agent",
    model="gemini-2.0-flash",
    description="An agent with structured output",
    instruction="""
    You are a stock advisor. Analyze the stock ticker provided by the user.
    Return Buy or Sell recommendation in JSON format.
    
    For each ticker, look at the price and target price to make a decision.
    If target price > current price: recommend Buy
    Otherwise: recommend Sell
    """,
    output_schema=StockAnalysis,
    output_key="stock_analysis"
)


# 6. Callback Agent
from google.adk.agents import Agent
from google.adk.tools.tool_context import ToolContext
from google.adk.tools.base_tool import BaseTool
import yfinance as yf
from typing import Dict, Any, Optional

def get_stock_data(ticker: str, tool_context: ToolContext):
    stock = yf.Ticker(ticker)
    price = stock.info.get("currentPrice", 0)
    
    # Initialize tool_usage in state if it doesn't exist
    if "tool_usage" not in tool_context.state:
        tool_context.state["tool_usage"] = {}
    
    return {
        "ticker": ticker,
        "price": price
    }

def before_tool_callback(tool: BaseTool, args: Dict[str, Any], tool_context: ToolContext) -> Optional[Dict]:
    # Initialize tool_usage if it doesn't exist
    if "tool_usage" not in tool_context.state:
        tool_context.state["tool_usage"] = {}
        
    # Track tool usage count
    tool_usage = tool_context.state["tool_usage"]
    tool_name = tool.name
    tool_usage[tool_name] = tool_usage.get(tool_name, 0) + 1
    tool_context.state["tool_usage"] = tool_usage
    
    print(f"[LOG] Running tool: {tool_name}")
    return None

def after_tool_callback(tool: BaseTool, args: Dict[str, Any], tool_context: ToolContext, tool_response: Dict) -> Optional[Dict]:
    print(f"[LOG] Tool {tool.name} completed")
    return None

# Initialize state before creating the agent
initial_state = {"tool_usage": {}}

callback_agent = Agent(
    name="callback_agent",
    model="gemini-2.0-flash",
    description="An agent with callbacks",
    instruction="""
    You are a stock assistant. Use get_stock_data tool to check stock prices.
    This agent keeps track of how many times tools have been used.
    """,
    tools=[get_stock_data],
    before_tool_callback=before_tool_callback,
    after_tool_callback=after_tool_callback,
)

# Choose which agent to run
root_agent = multi_tool_agent

.env

GOOGLE_API_KEY=xxxxxx
GOOGLE_GENAI_USE_VERTEXAI=FALSE

__init__.py

from . import app

Commands

adk run app
adk web
Categories
MCP

Cursor + Playwright MCP

{
  "mcpServers": {
    "playwright": {
      "command": "npx",
      "args": [
        "-y",
        "@playwright/mcp@0.0.22",
        "--vision"
      ]
    }
  }
}
Categories
Python

FastAPI Time Function

from fastapi import FastAPI
from pydantic import BaseModel
from datetime import datetime
import pytz
import uvicorn
import threading

class TimeRequest(BaseModel):
    location: str

class MyTime:
    def __init__(self, location: str, host: str = None, port: int = None, route: str = "/time"):
        self.location = location
        self.route = route
        self.time_str = self._get_time_by_location()
        print(f"[MyTime] {self.location}: {self.time_str}")

        if host and port:
            threading.Thread(
                target=self._start_fastapi_server,
                args=(host, port, route),
                daemon=True
            ).start()

    def _get_time_by_location(self, loc: str = None) -> str:
        loc = loc or self.location
        try:
            timezone = pytz.timezone(loc)
            local_time = datetime.now(timezone)
            return local_time.strftime('%Y-%m-%d %H:%M:%S')
        except pytz.UnknownTimeZoneError:
            return f"Unknown timezone: {loc}"

    def _start_fastapi_server(self, host, port, route):
        app = FastAPI()

        @app.post(route)
        async def get_time(req: TimeRequest):
            return {"time": self._get_time_by_location(req.location)}

        uvicorn.run(app, host=host, port=port, log_level="info")

# Example usage
if __name__ == "__main__":
    MyTime("Asia/Kolkata", host="0.0.0.0", port=7860, route="/time")
curl -X POST http://localhost:7860/time \
     -H "Content-Type: application/json" \
     -d '{"location": "Europe/London"}'