Categories
AI

CrewAI RAG using Tools

pip install crewai langchain-community langchain-openai requests duckduckgo-search chromadb
export NEWSAPI_KEY=xxxxxxxx
export OPENAI_API_KEY=xxxxxxx
from crewai import Agent, Task, Crew, Process
from langchain_openai import ChatOpenAI
from langchain_core.retrievers import BaseRetriever
from langchain_openai import OpenAIEmbeddings
from langchain.tools import tool
from langchain_community.document_loaders import WebBaseLoader
import requests, os
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_community.tools import DuckDuckGoSearchRun

embedding_function = OpenAIEmbeddings()
llm = ChatOpenAI(model="gpt-4-turbo-preview")

# Tool 1 : Save the news articles in a database
class SearchNewsDB:
    @tool("News DB Tool")
    def news(query: str):
        """Fetch news articles and process their contents."""
        API_KEY = os.getenv('NEWSAPI_KEY')  # Fetch API key from environment variable
        base_url = "https://newsapi.org/v2/everything"
        
        params = {
            'q': query,
            'sortBy': 'publishedAt',
            'apiKey': API_KEY,
            'language': 'en',
            'pageSize': 5,
        }
        
        response = requests.get(base_url, params=params)
        if response.status_code != 200:
            return "Failed to retrieve news."
        
        articles = response.json().get('articles', [])
        all_splits = []
        for article in articles:
            # Assuming WebBaseLoader can handle a list of URLs
            loader = WebBaseLoader(article['url'])
            docs = loader.load()

            text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
            splits = text_splitter.split_documents(docs)
            all_splits.extend(splits)  # Accumulate splits from all articles

        # Index the accumulated content splits if there are any
        if all_splits:
            vectorstore = Chroma.from_documents(all_splits, embedding=embedding_function, persist_directory="./chroma_db")
            retriever = vectorstore.similarity_search(query)
            return retriever
        else:
            return "No content available for processing."

# Tool 2 : Get the news articles from the database
class GetNews:
    @tool("Get News Tool")
    def news(query: str) -> str:
        """Search Chroma DB for relevant news information based on a query."""
        vectorstore = Chroma(persist_directory="./chroma_db", embedding_function=embedding_function)
        retriever = vectorstore.similarity_search(query)
        return retriever

# Tool 3 : Search for news articles on the web
search_tool = DuckDuckGoSearchRun()

# 2. Creating Agents
news_search_agent = Agent(
    role='News Seacher',
    goal='Generate key points for each news article from the latest news',
    backstory='Expert in analysing and generating key points from news content for quick updates.',
    tools=[SearchNewsDB().news],
    allow_delegation=True,
    verbose=True,
    llm=llm
)

writer_agent = Agent(
    role='Writer',
    goal='Identify all the topics received. Use the Get News Tool to verify the each topic to search. Use the Search tool for detailed exploration of each topic. Summarise the retrieved information in depth for every topic.',
    backstory='Expert in crafting engaging narratives from complex information.',
    tools=[GetNews().news, search_tool],
    allow_delegation=True,
    verbose=True,
    llm=llm
)

# 3. Creating Tasks
news_search_task = Task(
    description='Search for AI 2024 and create key points for each news.',
    agent=news_search_agent,
    tools=[SearchNewsDB().news]
)

writer_task = Task(
    description="""
    Go step by step.
    Step 1: Identify all the topics received.
    Step 2: Use the Get News Tool to verify the each topic by going through one by one.
    Step 3: Use the Search tool to search for information on each topic one by one. 
    Step 4: Go through every topic and write an in-depth summary of the information retrieved.
    Don't skip any topic.
    """,
    agent=writer_agent,
    context=[news_search_task],
    tools=[GetNews().news, search_tool]
)

# 4. Creating Crew
news_crew = Crew(
    agents=[news_search_agent, writer_agent],
    tasks=[news_search_task, writer_task],
    process=Process.sequential, 
    manager_llm=llm
)

# Execute the crew to see RAG in action
result = news_crew.kickoff()
print(result)
Categories
Ollama

Ollama OpenAI Compatibility Example Code

from openai import OpenAI

client = OpenAI(
    base_url = 'http://localhost:11434/v1',
    api_key='ollama',
)

response = client.chat.completions.create(
  model="mistral",
  messages=[
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Who won the world series in 2020?"},
    {"role": "assistant", "content": "The LA Dodgers won in 2020."},
    {"role": "user", "content": "Where was it played?"}
  ]
)
print(response.choices[0].message.content)
Categories
AI

Prompting Types

Prompting TypeDescriptionExampleWhen to Use
Chain of ThoughtGuides the LLM to break down complex problems into smaller steps and explicitly show its reasoning process.“What is the total cost of 5 apples at $0.80 each? <br>Thought process: The cost of one apple is $0.80. The cost of 5 apples is 5 * $0.80 = $4.00. Answer: $4.00”Complex tasks, emphasizing transparency of reasoning
Few-Shot PromptingProvides a few examples of input/output pairs for the LLM to infer the pattern to apply to new examples.Task: Sentiment Analysis <br> Ex 1: Input: “Great movie!” Output: Positive <br> Ex 2: Input: “Terrible acting.” Output: Negative <br> New Input: “This film was okay.”When you have a few clear examples of the task
Zero-Shot PromptingOnly the task description is given. The LLM relies on prior knowledge and understanding.“Summarize this article in three sentences.”Simple tasks the LLM likely has prior familiarity with
Instruction TuningInvolves training the LLM on a dataset of instructions and corresponding correct responses.Dataset of task descriptions and ideal outputs used to further train/fine-tune the LLM.Long-term enhancement of the LLM’s instruction-following ability
Self-ConsistencyGenerates multiple responses to a prompt. The LLM compares and evaluates the responses or creates a final synthesis.Ask for three headlines for an article, then create a new one combining the best of each.Improving response quality, creative solutions
Meta-PromptingPrompts aim to trigger specific cognitive processes (problem breakdown, self-correction).“Let’s think about this step-by-step. What should we do first?”Eliciting specific reasoning strategies
Tree of Thoughts (ToT)Generates multiple potential “thoughts” at each stage of problem-solving, evaluating and exploring. Often uses search algorithms.“Write a persuasive essay arguing for stricter environmental regulations.”Complex reasoning or decision-making
Prompt CombinationCombines different instructions or questions into a single, multi-faceted prompt.“Explain the difference between AI and machine learning. Can you provide real-world examples of each?”Getting comprehensive, detailed responses
Retrieval Augmented Generation (RAG)LLM can retrieve relevant text passages from a knowledge base before generating a response.“What is the capital of France? Can you also provide some historical facts about it?”Tasks requiring external knowledge
Categories
AI Agents

Job Search CrewAI

Pre-Requisite

API Keys from below websites:

Code

pip install crewai langchain langchain-openai
export OPENAI_API_KEY=xxxxxxxxxxxxx
export ADZUNA_API_KEY=xxxxxxxxxxxxx
export ADZUNA_APP_ID=xxxxxxxxxxxxx
from crewai import Agent, Task, Crew, Process
from langchain_openai import ChatOpenAI
from langchain.tools import tool
from crewai.tasks.task_output import TaskOutput
import json, requests, os

llm = ChatOpenAI(model="gpt-4-turbo-preview")

class JobSearchTools:
    @tool("Job Search Tool")
    def search_jobs(input_json: str) -> str:
        """Search for job listings using the Adzuna API."""
        # Parse input JSON string
        try:
            input_data = json.loads(input_json)
            role = input_data['role']
            location = input_data['location']
            num_results = input_data['num_results']
        except (json.JSONDecodeError, KeyError) as e:
            return """The tool accepts input in JSON format with the 
                    following schema: {'role': '<role>', 'location': '<location>', 'num_results': <number>}. 
                    Ensure to format the input accordingly."""

        app_id = os.getenv('ADZUNA_APP_ID')
        api_key = os.getenv('ADZUNA_API_KEY')
        base_url = "http://api.adzuna.com/v1/api/jobs"
        url = f"{base_url}/us/search/1?app_id={app_id}&app_key={api_key}&results_per_page={num_results}&what={role}&where={location}&content-type=application/json"
        
        try:
            response = requests.get(url)
            response.raise_for_status()  # This will raise an HTTPError if the HTTP request returned an unsuccessful status code.
            jobs_data = response.json()

            job_listings = []
            for job in jobs_data.get('results', []):
                job_details = f"Title: {job['title']}, Company: {job['company']['display_name']}, Location: {job['location']['display_name']}, Description: {job['description'][:100]}..."
                job_listings.append(job_details)
            return '\n'.join(job_listings)
        except requests.exceptions.HTTPError as err:
            raise ToolException(f"HTTP Error: {err}")
        except requests.exceptions.RequestException as e:
            raise ToolException(f"Error: {e}")

def callback_function(output: TaskOutput):
    with open("task_output.txt", "a") as file:
        file.write(f"""{output.result}\n\n""")
    print("Result saved to task_output.txt")

job_searcher_agent = Agent(
    role='Job Searcher',
    goal='Search for jobs in the field of interest, focusing on enhancing relevant skills',
    backstory="""You are actively searching for job opportunities in your field, ready to utilise and expand your skill set in a new role.""",
    verbose=True,
    llm=llm,
    allow_delegation=True,
    tools=[JobSearchTools().search_jobs]
)

skills_development_agent = Agent(
    role='Skills Development Advisor',
    goal='Identify key skills required for jobs of interest and advise on improving them',
    backstory="""As a skills development advisor, you assist job searchers in identifying crucial skills for their target roles and recommend ways to develop these skills.""",
    verbose=True,
    allow_delegation=True,
    llm=llm
)

interview_preparation_coach = Agent(
    role='Interview Preparation Coach',
    goal='Enhance interview skills, focusing on common questions, presentation, and communication',
    backstory="""Expert in coaching job searchers on successful interview techniques, including mock interviews and feedback.""",
    verbose=True,
    allow_delegation=True,
    llm=llm
)

career_advisor = Agent(
    role='Career Advisor',
    goal='Assist in resume building, LinkedIn profile optimization, and networking strategies',
    backstory="""Experienced in guiding candidates through their job search journey, offering personalized advice on career development and application processes.""",
    verbose=True,
    allow_delegation=True,
    llm=llm
)

# Define tasks for your agents
job_search_task = Task(
    description="""Search for current job openings for the Senior Data Scientist role in New York 
    using the Job Search tool. Find 5 vacant positions in total. 
    Emphasize the key skills required.
    The tool accepts input in JSON format with the 
    following schema: {'role': '<role>', 'location': '<location>', 'num_results': <number>}. 
    Ensure to format the input accordingly.""",
    agent=job_searcher_agent,
    tools=[JobSearchTools().search_jobs],
    callback=callback_function
)

skills_highlighting_task = Task(
    description="""Based on the identified job openings, list the key skills required for each position separately.
    Provide recommendations on how candidates can acquire or improve these skills through courses, self-study, or practical experience.""",
    agent=skills_development_agent,
    context=[job_search_task],
    callback=callback_function
)

interview_preparation_task = Task(
    description="""Prepare job searchers for interviews by conducting mock interviews and offering feedback on their responses, presentation, and communication skills, for each role separately.""",
    agent=interview_preparation_coach,
    context=[job_search_task],
    callback=callback_function
)

career_advisory_task = Task(
    description="""Offer guidance on resume building, optimizing LinkedIn profiles, and effective networking strategies to enhance job application success, for each role separately.""",
    agent=career_advisor,
    context=[job_search_task],
    callback=callback_function
)

# Set up your crew with a sequential process (tasks executed sequentially by default)
job_search_crew = Crew(
    agents=[job_searcher_agent, skills_development_agent, interview_preparation_coach, career_advisor],
    tasks=[job_search_task, skills_highlighting_task, interview_preparation_task, career_advisory_task],
    process=Process.hierarchical,
    manager_llm=llm,
)

# Initiate the crew to start working on its tasks
crew_result = job_search_crew.kickoff()

print(crew_result)

Output

❯ python app.py


> Entering new CrewAgentExecutor chain...
Thought: Do I need to use a tool? Yes
Action: Delegate work to co-worker
Action Input: Job Searcher|Search for current job openings|{'role': 'Senior Data Scientist', 'location': 'New York', 'num_results': 5}

> Entering new CrewAgentExecutor chain...
Thought: Do I need to use a tool? Yes
Action: Job Search Tool
Action Input: {"what": "Senior Data Scientist", "where": "New York", "max_days_old": 30, "results_per_page": 5}The tool accepts input in JSON format with the 
                    following schema: {'role': '<role>', 'location': '<location>', 'num_results': <number>}. 
                    Ensure to format the input accordingly.Do I need to use a tool? Yes
Action: Job Search Tool
Action Input: {"role": "Senior Data Scientist", "location": "New York", "num_results": 5}Title: Senior Data Scientist, Company: InVitro Cell Research, LLC, Location: Leonia, Bergen County, Description: We're hiring Senior Data Scientists with expertise in integrating and analyzing multi-omic datasets ...
Title: Expression of Interest: Senior Data Scientist, Company: Fingerprint For Success, Location: Manhattan, New York City, Description: We are inviting professionals in high-growth industries who are thinking about their next move or lo...
Title: Senior Data Scientist, Company: Curinos, Location: New York City, New York, Description: Job Description Curinos is looking for an experienced applied Senior Data Scientist to join our Data...
Title: Senior Data Scientist, Company: Chubb, Location: Hudson County, New Jersey, Description: Chubb's North America Property and Casualty Data Analytics Division is currently seeking an experien...
Title: Senior Data Scientist, Company: Informa, Location: Church Street, Manhattan, Description: Curinos is looking for an experienced applied Senior Data Scientist to join our Data Science & Machi...Do I need to use a tool? No
Final Answer: I found several relevant job openings for the position of Senior Data Scientist in and around New York:

1. **Company:** InVitro Cell Research, LLC
   - **Location:** Leonia, Bergen County
   - **Description:** Hiring Senior Data Scientists with expertise in integrating and analyzing multi-omic datasets.

2. **Company:** Fingerprint For Success
   - **Location:** Manhattan, New York City
   - **Description:** Inviting professionals in high-growth industries thinking about their next move or looking to transition into the field of data science.

3. **Company:** Curinos
   - **Location:** New York City, New York
   - **Description:** Looking for an experienced applied Senior Data Scientist to join our Data Science team.

4. **Company:** Chubb
   - **Location:** Hudson County, New Jersey
   - **Description:** Seeking an experienced Senior Data Scientist for our North America Property and Casualty Data Analytics Division.

5. **Company:** Informa
   - **Location:** Church Street, Manhattan
   - **Description:** Curinos is looking for an experienced applied Senior Data Scientist to join our Data Science & Machine Learning team.

These opportunities could be a great fit based on your interest and desired location.

> Finished chain.
I found several relevant job openings for the position of Senior Data Scientist in and around New York:

1. **Company:** InVitro Cell Research, LLC
   - **Location:** Leonia, Bergen County
   - **Description:** Hiring Senior Data Scientists with expertise in integrating and analyzing multi-omic datasets.

2. **Company:** Fingerprint For Success
   - **Location:** Manhattan, New York City
   - **Description:** Inviting professionals in high-growth industries thinking about their next move or looking to transition into the field of data science.

3. **Company:** Curinos
   - **Location:** New York City, New York
   - **Description:** Looking for an experienced applied Senior Data Scientist to join our Data Science team.

4. **Company:** Chubb
   - **Location:** Hudson County, New Jersey
   - **Description:** Seeking an experienced Senior Data Scientist for our North America Property and Casualty Data Analytics Division.

5. **Company:** Informa
   - **Location:** Church Street, Manhattan
   - **Description:** Curinos is looking for an experienced applied Senior Data Scientist to join our Data Science & Machine Learning team.

These opportunities could be a great fit based on your interest and desired location.Do I need to use a tool? No
Final Answer: The search for current job openings for the Senior Data Scientist role in New York has yielded 5 vacant positions:

1. **InVitro Cell Research, LLC** in Leonia, Bergen County is looking for Senior Data Scientists with expertise in integrating and analyzing multi-omic datasets.
   
2. **Fingerprint For Success** located in Manhattan, New York City, invites professionals in high-growth industries thinking about their next move or looking to transition into the field of data science.
   
3. **Curinos** in New York City, New York, is seeking an experienced applied Senior Data Scientist to join their Data Science team.
   
4. **Chubb**, situated in Hudson County, New Jersey, is in search of an experienced Senior Data Scientist for their North America Property and Casualty Data Analytics Division.
   
5. **Informa** on Church Street, Manhattan, is looking for an experienced applied Senior Data Scientist to join their Data Science & Machine Learning team.

These positions highlight the demand for skills in data integration and analysis, machine learning, and experience in data science applications in various industries.

> Finished chain.
Result saved to task_output.txt


> Entering new CrewAgentExecutor chain...
Thought: Do I need to use a tool? Yes
Action: Delegate work to co-worker
Action Input: Skills Development Advisor|recommend courses and self-study materials|The job openings we found require skills in integrating and analyzing multi-omic datasets, high-growth industry knowledge, applied data science experience, machine learning, and industry-specific data science applications. We need recommendations on how candidates can acquire or improve these skills through courses, self-study, or practical experience.

> Entering new CrewAgentExecutor chain...
Thought: Do I need to use a tool? No
Final Answer: To acquire or improve skills in integrating and analyzing multi-omic datasets, high-growth industry knowledge, applied data science experience, machine learning, and industry-specific data science applications, candidates can follow this structured approach:

1. **Integrating and Analyzing Multi-Omic Datasets**:
   - **Courses**: Look for bioinformatics or computational biology courses available on platforms like Coursera, edX, or Udemy. Specifically, courses that cover genomics, proteomics, and their integration methods will be highly beneficial.
   - **Self-Study Materials**: Review academic and industry publications in journals like Bioinformatics, Nature Methods, or the Journal of Proteomics & Bioinformatics to understand current trends and methodologies.
   - **Practical Experience**: Participate in open-source projects or Kaggle competitions related to bioinformatics. This hands-on experience is invaluable.

2. **High-Growth Industry Knowledge**:
   - **Courses**: Business and technology-focused courses that provide insights into emerging markets and technologies. Platforms like LinkedIn Learning and Coursera offer industry-specific insights and trends courses.
   - **Self-Study Materials**: Regularly read industry reports from McKinsey, BCG, or specific industry publications like TechCrunch for technology sectors. Podcasts and webinars by industry leaders can also provide current insights.
   - **Practical Experience**: Networking with professionals in the industry through platforms like LinkedIn, attending webinars, and participating in industry forums can provide real-world knowledge and trends.

3. **Applied Data Science Experience**:
   - **Courses**: Data science bootcamps or courses that focus on real-world applications of data science, including project-based learning. Check platforms like DataCamp, Coursera, and edX.
   - **Self-Study Materials**: Work through datasets available on platforms like Kaggle or GitHub, applying different data science techniques and documenting your findings and methodologies in a portfolio.
   - **Practical Experience**: Freelance projects or internships where you can apply data science skills in real-world scenarios.

4. **Machine Learning**:
   - **Courses**: Look for machine learning courses that offer both foundational understanding and advanced techniques. Andrew Ng’s Machine Learning course on Coursera is highly recommended.
   - **Self-Study Materials**: Books like "Hands-On Machine Learning with Scikit-Learn, Keras, and TensorFlow" by Aurélien Géron provide comprehensive guides to practical machine learning.
   - **Practical Experience**: Implement machine learning models to solve problems on Kaggle. This provides both experience and a portfolio to show potential employers.

5. **Industry-Specific Data Science Applications**:
   - **Courses**: Seek out courses that focus on the application of data science in specific industries, such as healthcare, finance, or marketing.
   - **Self-Study Materials**: Industry-specific case studies and datasets can help understand how data science is applied uniquely in each sector.
   - **Practical Experience**: Try to engage in projects or competitions that are industry-specific to gain relevant experience.

Remember, the combination of courses, self-study, and practical experience not only enhances learning but also significantly improves employability by demonstrating both knowledge and practical skills to potential employers.

> Finished chain.
To acquire or improve skills in integrating and analyzing multi-omic datasets, high-growth industry knowledge, applied data science experience, machine learning, and industry-specific data science applications, candidates can follow this structured approach:

1. **Integrating and Analyzing Multi-Omic Datasets**:
   - **Courses**: Look for bioinformatics or computational biology courses available on platforms like Coursera, edX, or Udemy. Specifically, courses that cover genomics, proteomics, and their integration methods will be highly beneficial.
   - **Self-Study Materials**: Review academic and industry publications in journals like Bioinformatics, Nature Methods, or the Journal of Proteomics & Bioinformatics to understand current trends and methodologies.
   - **Practical Experience**: Participate in open-source projects or Kaggle competitions related to bioinformatics. This hands-on experience is invaluable.

2. **High-Growth Industry Knowledge**:
   - **Courses**: Business and technology-focused courses that provide insights into emerging markets and technologies. Platforms like LinkedIn Learning and Coursera offer industry-specific insights and trends courses.
   - **Self-Study Materials**: Regularly read industry reports from McKinsey, BCG, or specific industry publications like TechCrunch for technology sectors. Podcasts and webinars by industry leaders can also provide current insights.
   - **Practical Experience**: Networking with professionals in the industry through platforms like LinkedIn, attending webinars, and participating in industry forums can provide real-world knowledge and trends.

3. **Applied Data Science Experience**:
   - **Courses**: Data science bootcamps or courses that focus on real-world applications of data science, including project-based learning. Check platforms like DataCamp, Coursera, and edX.
   - **Self-Study Materials**: Work through datasets available on platforms like Kaggle or GitHub, applying different data science techniques and documenting your findings and methodologies in a portfolio.
   - **Practical Experience**: Freelance projects or internships where you can apply data science skills in real-world scenarios.

4. **Machine Learning**:
   - **Courses**: Look for machine learning courses that offer both foundational understanding and advanced techniques. Andrew Ng’s Machine Learning course on Coursera is highly recommended.
   - **Self-Study Materials**: Books like "Hands-On Machine Learning with Scikit-Learn, Keras, and TensorFlow" by Aurélien Géron provide comprehensive guides to practical machine learning.
   - **Practical Experience**: Implement machine learning models to solve problems on Kaggle. This provides both experience and a portfolio to show potential employers.

5. **Industry-Specific Data Science Applications**:
   - **Courses**: Seek out courses that focus on the application of data science in specific industries, such as healthcare, finance, or marketing.
   - **Self-Study Materials**: Industry-specific case studies and datasets can help understand how data science is applied uniquely in each sector.
   - **Practical Experience**: Try to engage in projects or competitions that are industry-specific to gain relevant experience.

Remember, the combination of courses, self-study, and practical experience not only enhances learning but also significantly improves employability by demonstrating both knowledge and practical skills to potential employers.Do I need to use a tool? No
Final Answer: Based on the identified job openings for the Senior Data Scientist role in New York, here are the key skills required for each position, along with recommendations on how candidates can acquire or improve these skills:

1. **InVitro Cell Research, LLC**:
   - **Skills Required**: Expertise in integrating and analyzing multi-omic datasets.
   - **Recommendations**: Candidates can improve these skills through bioinformatics or computational biology courses, reviewing academic publications, and participating in bioinformatics projects or competitions.

2. **Fingerprint For Success**:
   - **Skills Required**: Knowledge in high-growth industries and transitioning into the field of data science.
   - **Recommendations**: Business and technology-focused courses, reading industry reports, and networking with professionals can enhance understanding of high-growth industries.

3. **Curinos**:
   - **Skills Required**: Applied data science experience.
   - **Recommendations**: Data science bootcamps or courses with a focus on real-world applications, working through datasets, and engaging in freelance projects or internships can improve applied data science skills.

4. **Chubb**:
   - **Skills Required**: Experience in machine learning and data analytics in the property and casualty insurance sector.
   - **Recommendations**: Courses in machine learning, practical books on machine learning, and implementing machine learning models in projects can sharpen these skills. Add industry-specific knowledge through finance or insurance-focused data science courses.

5. **Informa**:
   - **Skills Required**: Experience in data science and machine learning applications.
   - **Recommendations**: Similar to Chubb, focusing on machine learning courses, hands-on practice, and industry-specific applications of data science will be beneficial.

Candidates should pursue a combination of courses, self-study, and practical experience to not only learn but demonstrate their skills effectively to potential employers. Engaging in projects, whether open-source, freelance, or competitions, is crucial for practical experience.

> Finished chain.
Result saved to task_output.txt


> Entering new CrewAgentExecutor chain...
Thought: Do I need to use a tool? Yes
Action: Delegate work to co-worker
Action Input: Job Searcher|Conduct mock interviews|Prepare mock interviews for each of the 5 Senior Data Scientist roles, focusing on the key skills and experiences highlighted for each position. Ensure the interviews simulate the real interview environment as closely as possible to provide constructive feedback on responses, presentation, and communication skills.

> Entering new CrewAgentExecutor chain...
Thought: Do I need to use a tool? No
Final Answer: To conduct mock interviews for each of the 5 Senior Data Scientist roles, we'll first outline a plan based on the key skills and experiences highlighted for each position. This will ensure that the mock interviews closely simulate the real interview environment, providing constructive feedback on responses, presentation, and communication skills.

1. **Role 1: Senior Data Scientist at a FinTech Startup**
   - **Key Skills/Experiences**: Machine Learning, Financial Modeling, Python, SQL, Communication skills.
   - **Mock Interview Plan**: Prepare questions that cover machine learning applications in finance, such as predicting stock prices or identifying fraudulent transactions. Include technical questions on Python and SQL, and a scenario-based question to assess communication skills, like explaining a complex model to a non-technical stakeholder.

2. **Role 2: Senior Data Scientist in Healthcare**
   - **Key Skills/Experiences**: Bioinformatics, Python, R, Data Visualization, Ethics in AI.
   - **Mock Interview Plan**: Design questions around bioinformatics projects, and using Python and R for statistical analysis. Test data visualization skills with a task to present health data insights. Discuss ethical considerations in AI, focusing on patient data privacy and bias in medical predictions.

3. **Role 3: Senior Data Scientist at a Tech Giant**
   - **Key Skills/Experiences**: Big Data, Cloud Computing (AWS, GCP), Python, Deep Learning, Leadership.
   - **Mock Interview Plan**: Include questions on managing big data projects and utilizing cloud resources efficiently. Dive into deep learning techniques, asking for examples of previous projects. Evaluate leadership skills by discussing experiences leading a data science team or project.

4. **Role 4: Senior Data Scientist in Retail**
   - **Key Skills/Experiences**: Customer Analytics, A/B Testing, Python, SQL, Business Acumen.
   - **Mock Interview Plan**: Frame questions around analyzing customer behavior and conducting A/B tests to improve sales. Test SQL and Python proficiency through data manipulation tasks. Assess business acumen by discussing strategies to increase customer retention or average purchase value.

5. **Role 5: Senior Data Scientist in a Government Agency**
   - **Key Skills/Experiences**: Public Policy Analysis, Python, R, Data Ethics, Clear Communication.
   - **Mock Interview Plan**: Prepare questions on using data science for policy analysis and decisions, including technical questions on Python and R. Include a discussion on data ethics, especially regarding public data. Add a component to evaluate clear communication skills, perhaps through a brief presentation or explaining a policy recommendation based on data analysis.

For each mock interview, it's essential to provide a realistic and supportive environment, offering constructive feedback that focuses on areas of improvement, such as answering techniques, technical skill proficiency, and communication effectiveness. This preparation will help simulate the real interview environment and better prepare candidates for their job applications.

> Finished chain.
To conduct mock interviews for each of the 5 Senior Data Scientist roles, we'll first outline a plan based on the key skills and experiences highlighted for each position. This will ensure that the mock interviews closely simulate the real interview environment, providing constructive feedback on responses, presentation, and communication skills.

1. **Role 1: Senior Data Scientist at a FinTech Startup**
   - **Key Skills/Experiences**: Machine Learning, Financial Modeling, Python, SQL, Communication skills.
   - **Mock Interview Plan**: Prepare questions that cover machine learning applications in finance, such as predicting stock prices or identifying fraudulent transactions. Include technical questions on Python and SQL, and a scenario-based question to assess communication skills, like explaining a complex model to a non-technical stakeholder.

2. **Role 2: Senior Data Scientist in Healthcare**
   - **Key Skills/Experiences**: Bioinformatics, Python, R, Data Visualization, Ethics in AI.
   - **Mock Interview Plan**: Design questions around bioinformatics projects, and using Python and R for statistical analysis. Test data visualization skills with a task to present health data insights. Discuss ethical considerations in AI, focusing on patient data privacy and bias in medical predictions.

3. **Role 3: Senior Data Scientist at a Tech Giant**
   - **Key Skills/Experiences**: Big Data, Cloud Computing (AWS, GCP), Python, Deep Learning, Leadership.
   - **Mock Interview Plan**: Include questions on managing big data projects and utilizing cloud resources efficiently. Dive into deep learning techniques, asking for examples of previous projects. Evaluate leadership skills by discussing experiences leading a data science team or project.

4. **Role 4: Senior Data Scientist in Retail**
   - **Key Skills/Experiences**: Customer Analytics, A/B Testing, Python, SQL, Business Acumen.
   - **Mock Interview Plan**: Frame questions around analyzing customer behavior and conducting A/B tests to improve sales. Test SQL and Python proficiency through data manipulation tasks. Assess business acumen by discussing strategies to increase customer retention or average purchase value.

5. **Role 5: Senior Data Scientist in a Government Agency**
   - **Key Skills/Experiences**: Public Policy Analysis, Python, R, Data Ethics, Clear Communication.
   - **Mock Interview Plan**: Prepare questions on using data science for policy analysis and decisions, including technical questions on Python and R. Include a discussion on data ethics, especially regarding public data. Add a component to evaluate clear communication skills, perhaps through a brief presentation or explaining a policy recommendation based on data analysis.

For each mock interview, it's essential to provide a realistic and supportive environment, offering constructive feedback that focuses on areas of improvement, such as answering techniques, technical skill proficiency, and communication effectiveness. This preparation will help simulate the real interview environment and better prepare candidates for their job applications.Do I need to use a tool? No
Final Answer: The preparation for conducting mock interviews for the five Senior Data Scientist roles is well underway, with detailed plans tailored to the key skills and experiences required for each position. These plans include a variety of technical questions, scenario-based discussions, and tasks designed to assess both hard and soft skills relevant to each role. This comprehensive approach ensures a realistic and supportive interview environment, preparing candidates effectively for their respective job applications. With this strategy, candidates will receive constructive feedback focusing on areas for improvement, enhancing their interview performance and increasing their chances of success in securing a role.

> Finished chain.
Result saved to task_output.txt


> Entering new CrewAgentExecutor chain...
Thought: Do I need to use a tool? Yes
Action: Delegate work to co-worker
Action Input: Career Advisor|offer guidance on resume building, optimizing LinkedIn profiles, and effective networking strategies for each of the five Senior Data Scientist roles|The search for current job openings for the Senior Data Scientist role in New York has yielded 5 vacant positions: 1. InVitro Cell Research, LLC in Leonia, Bergen County is looking for Senior Data Scientists with expertise in integrating and analyzing multi-omic datasets. 2. Fingerprint For Success located in Manhattan, New York City, invites professionals in high-growth industries thinking about their next move or looking to transition into the field of data science. 3. Curinos in New York City, New York, is seeking an experienced applied Senior Data Scientist to join their Data Science team. 4. Chubb, situated in Hudson County, New Jersey, is in search of an experienced Senior Data Scientist for their North America Property and Casualty Data Analytics Division. 5. Informa on Church Street, Manhattan, is looking for an experienced applied Senior Data Scientist to join their Data Science & Machine Learning team. These positions highlight the demand for skills in data integration and analysis, machine learning, and experience in data science applications in various industries.

> Entering new CrewAgentExecutor chain...
Thought: Do I need to use a tool? No
Final Answer: To effectively guide candidates in their application for these Senior Data Scientist roles, here's a tailored strategy for resume building, LinkedIn profile optimization, and networking:

**Resume Building:**
1. **InVitro Cell Research, LLC**: Highlight expertise in integrating and analyzing multi-omic datasets. Include projects or roles where you've worked with genomic, transcriptomic, proteomic, or metabolomic data. Mention specific tools and techniques used, such as R, Python, bioinformatics pipelines, or statistical modeling.
   
2. **Fingerprint For Success**: Emphasize experience in high-growth industries and any transitions into data science. Showcase your ability to adapt and learn, including any self-taught data science skills or courses. Highlight leadership or mentoring roles to demonstrate your readiness for a senior position.
   
3. **Curinos**: Focus on your experience with applied data science. Include examples of real-world problems you've solved with data science, the impact of your work, and the technologies you've used (e.g., machine learning models, Python, SQL). Mention any experience in finance or related sectors if applicable.
   
4. **Chubb**: Since this is for a role in the Property and Casualty Data Analytics Division, tailor your resume to showcase experience in analytics related to insurance, risk assessment, or any relevant field. Highlight your expertise in data analysis, predictive modeling, and any specific tools (e.g., SAS, R) relevant to the insurance industry.
   
5. **Informa**: Focus on your applied experience in data science and machine learning. Include projects where you've implemented machine learning algorithms, your role in these projects, and the outcomes. Mention experience with large datasets and the ability to derive insights that drive business decisions.

**LinkedIn Profile Optimization:**
- **Headline**: Use a catchy headline that includes "Senior Data Scientist" and any key specialties (e.g., "Machine Learning," "Bioinformatics," "Risk Analysis").
- **Summary**: Craft a compelling narrative that showcases your journey, key achievements, and the unique value you bring to a senior data scientist role. Tailor this section based on the type of companies you're targeting (e.g., biotech for InVitro Cell Research, LLC, high-growth industries for Fingerprint For Success).
- **Skills & Endorsements**: Prioritize listing skills that are directly relevant to the job descriptions, such as "Data Analysis," "Machine Learning," "Python," "R," and industry-specific skills. Seek endorsements from colleagues to enhance credibility.
- **Recommendations**: Request recommendations from past colleagues or supervisors who can vouch for your expertise and contributions in relevant projects or roles.

**Networking Strategies:**
- **Industry Events and Conferences**: Attend events related to data science, biotechnology, finance, insurance, or your area of interest. These are great opportunities to meet industry professionals and learn about emerging trends.
- **Professional Groups on LinkedIn**: Join and actively participate in LinkedIn groups related to data science and your industry of interest. Share insights, engage in discussions, and connect with members.
- **Informational Interviews**: Reach out to professionals working in your target companies or industry for informational interviews. This can provide valuable insights into the company culture, what they look for in candidates, and potentially lead to referrals.
- **Alumni Networks**: Leverage your college or university alumni network to connect with fellow alums working in data science roles. Alumni are often willing to offer advice, share job leads, or introduce you to their professional contacts.

Tailoring your approach for each role and utilizing these strategies will significantly enhance your chances of success in securing a Senior Data Scientist position.

> Finished chain.
To effectively guide candidates in their application for these Senior Data Scientist roles, here's a tailored strategy for resume building, LinkedIn profile optimization, and networking:

**Resume Building:**
1. **InVitro Cell Research, LLC**: Highlight expertise in integrating and analyzing multi-omic datasets. Include projects or roles where you've worked with genomic, transcriptomic, proteomic, or metabolomic data. Mention specific tools and techniques used, such as R, Python, bioinformatics pipelines, or statistical modeling.
   
2. **Fingerprint For Success**: Emphasize experience in high-growth industries and any transitions into data science. Showcase your ability to adapt and learn, including any self-taught data science skills or courses. Highlight leadership or mentoring roles to demonstrate your readiness for a senior position.
   
3. **Curinos**: Focus on your experience with applied data science. Include examples of real-world problems you've solved with data science, the impact of your work, and the technologies you've used (e.g., machine learning models, Python, SQL). Mention any experience in finance or related sectors if applicable.
   
4. **Chubb**: Since this is for a role in the Property and Casualty Data Analytics Division, tailor your resume to showcase experience in analytics related to insurance, risk assessment, or any relevant field. Highlight your expertise in data analysis, predictive modeling, and any specific tools (e.g., SAS, R) relevant to the insurance industry.
   
5. **Informa**: Focus on your applied experience in data science and machine learning. Include projects where you've implemented machine learning algorithms, your role in these projects, and the outcomes. Mention experience with large datasets and the ability to derive insights that drive business decisions.

**LinkedIn Profile Optimization:**
- **Headline**: Use a catchy headline that includes "Senior Data Scientist" and any key specialties (e.g., "Machine Learning," "Bioinformatics," "Risk Analysis").
- **Summary**: Craft a compelling narrative that showcases your journey, key achievements, and the unique value you bring to a senior data scientist role. Tailor this section based on the type of companies you're targeting (e.g., biotech for InVitro Cell Research, LLC, high-growth industries for Fingerprint For Success).
- **Skills & Endorsements**: Prioritize listing skills that are directly relevant to the job descriptions, such as "Data Analysis," "Machine Learning," "Python," "R," and industry-specific skills. Seek endorsements from colleagues to enhance credibility.
- **Recommendations**: Request recommendations from past colleagues or supervisors who can vouch for your expertise and contributions in relevant projects or roles.

**Networking Strategies:**
- **Industry Events and Conferences**: Attend events related to data science, biotechnology, finance, insurance, or your area of interest. These are great opportunities to meet industry professionals and learn about emerging trends.
- **Professional Groups on LinkedIn**: Join and actively participate in LinkedIn groups related to data science and your industry of interest. Share insights, engage in discussions, and connect with members.
- **Informational Interviews**: Reach out to professionals working in your target companies or industry for informational interviews. This can provide valuable insights into the company culture, what they look for in candidates, and potentially lead to referrals.
- **Alumni Networks**: Leverage your college or university alumni network to connect with fellow alums working in data science roles. Alumni are often willing to offer advice, share job leads, or introduce you to their professional contacts.

Tailoring your approach for each role and utilizing these strategies will significantly enhance your chances of success in securing a Senior Data Scientist position.Do I need to use a tool? No
Final Answer: The tailored strategies for resume building, LinkedIn profile optimization, and networking provided for each of the five Senior Data Scientist roles will guide candidates effectively in their job applications. By highlighting specific skills and experiences relevant to each position, optimizing LinkedIn profiles to showcase their professional journey and key achievements, and engaging in strategic networking, candidates can enhance their visibility and attractiveness to potential employers, thereby increasing their chances of success in securing a position.

> Finished chain.
Result saved to task_output.txt
The tailored strategies for resume building, LinkedIn profile optimization, and networking provided for each of the five Senior Data Scientist roles will guide candidates effectively in their job applications. By highlighting specific skills and experiences relevant to each position, optimizing LinkedIn profiles to showcase their professional journey and key achievements, and engaging in strategic networking, candidates can enhance their visibility and attractiveness to potential employers, thereby increasing their chances of success in securing a position.
Categories
Langchain Ollama

AI Research Assistant using Ollama

ollama pull llama2:7b-chat
pip install arxiv langchain_community langchain gpt4all qdrant-client gradio
import os
import time
import arxiv
from langchain_community.vectorstores import Qdrant
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
from langchain_community.chat_models import ChatOllama
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableParallel, RunnablePassthrough
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import GPT4AllEmbeddings

# Create directory if not exists
dirpath = "arxiv_papers"
if not os.path.exists(dirpath):
    os.makedirs(dirpath)

# Search arXiv for papers related to "LLM"
client = arxiv.Client()
search = arxiv.Search(
    query="LLM",
    max_results=10,
    sort_order=arxiv.SortOrder.Descending
)

# Download and save the papers
for result in client.results(search):
    while True:
        try:
            result.download_pdf(dirpath=dirpath)
            print(f"-> Paper id {result.get_short_id()} with title '{result.title}' is downloaded.")
            break
        except (FileNotFoundError, ConnectionResetError) as e:
            print("Error occurred:", e)
            time.sleep(5)

# Load papers from the directory
papers = []
loader = DirectoryLoader(dirpath, glob="./*.pdf", loader_cls=PyPDFLoader)
try:
    papers = loader.load()
except Exception as e:
    print(f"Error loading file: {e}")
print("Total number of pages loaded:", len(papers)) 

# Concatenate all pages' content into a single string
full_text = ''
for paper in papers:
    full_text += paper.page_content

# Remove empty lines and join lines into a single string
full_text = " ".join(line for line in full_text.splitlines() if line)
print("Total characters in the concatenated text:", len(full_text)) 

# Split the text into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
paper_chunks = text_splitter.create_documents([full_text])

# Create Qdrant vector store
qdrant = Qdrant.from_documents(
    documents=paper_chunks,
    embedding=GPT4AllEmbeddings(),
    path="./tmp/local_qdrant",
    collection_name="arxiv_papers",
)
retriever = qdrant.as_retriever()

# Define prompt template
template = """Answer the question based only on the following context:
{context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)

# Initialize Ollama LLM
ollama_llm = "llama2:7b-chat"
model = ChatOllama(model=ollama_llm)

# Define the processing chain
chain = (
    RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
    | prompt
    | model
    | StrOutputParser()
)

# Add typing for input
class Question(BaseModel):
    __root__: str

# Apply input type to the chain
chain = chain.with_types(input_type=Question)
result = chain.invoke("Explain about Vision Enhancing LLMs")
print(result)

UI

import gradio as gr
import os
import time
import arxiv
from langchain_community.vectorstores import Qdrant
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
from langchain_community.chat_models import ChatOllama
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableParallel, RunnablePassthrough
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import GPT4AllEmbeddings

def process_papers(query, question_text):
    dirpath = "arxiv_papers"
    if not os.path.exists(dirpath):
        os.makedirs(dirpath)
    
    client = arxiv.Client()
    search = arxiv.Search(
        query=query,
        max_results=10,
        sort_order=arxiv.SortOrder.Descending
    )
    
    for result in client.results(search):
        while True:
            try:
                result.download_pdf(dirpath=dirpath)
                print(result)
                print(f"-> Paper id {result.get_short_id()} with title '{result.title}' is downloaded.")
                break
            except (FileNotFoundError, ConnectionResetError) as e:
                print("Error occurred:", e)
                time.sleep(5)
    
    papers = []
    loader = DirectoryLoader(dirpath, glob="./*.pdf", loader_cls=PyPDFLoader)
    try:
        papers = loader.load()
    except Exception as e:
        print(f"Error loading file: {e}")
    full_text = ''
    for paper in papers:
        full_text += paper.page_content
    
    full_text = " ".join(line for line in full_text.splitlines() if line)
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
    paper_chunks = text_splitter.create_documents([full_text])
    
    qdrant = Qdrant.from_documents(
        documents=paper_chunks,
        embedding=GPT4AllEmbeddings(),
        path="./tmp/local_qdrant",
        collection_name="arxiv_papers",
    )
    retriever = qdrant.as_retriever()
    
    template = """Answer the question based only on the following context:
{context}

Question: {question}
"""
    prompt = ChatPromptTemplate.from_template(template)
    
    ollama_llm = "llama2:7b-chat"
    model = ChatOllama(model=ollama_llm)
    
    chain = (
        RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
        | prompt
        | model
        | StrOutputParser()
    )
    
    class Question(BaseModel):
        __root__: str
    
    chain = chain.with_types(input_type=Question)
    result = chain.invoke(question_text)
    return result

iface = gr.Interface(
    fn=process_papers,
    inputs=["text", "text"],
    outputs="text",
    description="Enter a search query and a question to process arXiv papers."
)

iface.launch()
Categories
Embedding

Semantic Chunking

pip install torch sentence-transformers
import os
import logging
import sys
import numpy as np

# Set OPENAI_API_KEY environment variable
os.environ["OPENAI_API_KEY"] = os.getenv('OPENAI_API_KEY')

# Configure logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))

# Importing required modules
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.llms import HuggingFaceLLM, LlamaCPP
from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
from llama_index.llama_pack import download_llama_pack
from llama_index.embeddings import HuggingFaceEmbedding, OpenAIEmbedding
from llama_index.node_parser import SentenceSplitter
from llama_index.indices.postprocessor import SentenceTransformerRerank
# from llama_index.response.notebook_utils import display_source_node

# Download Semantic Chunking Package
download_llama_pack(
    "SemanticChunkingQueryEnginePack",
    "./semantic_chunking_pack",
    skip_load=True,
)

# Load documents from directory
documents = SimpleDirectoryReader(input_files=["essay.txt"]).load_data()

# Initialize LlamaCPP model
llm = LlamaCPP(
    model_url='https://huggingface.co/TheBloke/zephyr-7B-alpha-GGUF/resolve/main/zephyr-7b-alpha.Q5_K_M.gguf',
    model_path=None,
    temperature=0.1,
    max_new_tokens=256,
    context_window=3900,
    generate_kwargs={},
    model_kwargs={"n_gpu_layers": -1},
    messages_to_prompt=messages_to_prompt,
    completion_to_prompt=completion_to_prompt,
    verbose=True,
)

# Initialize HuggingFaceEmbedding model
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")

# Initialize SentenceSplitter with baseline settings
base_splitter = SentenceSplitter(chunk_size=512)

# Initialize SentenceTransformerRerank for reranking
rerank = SentenceTransformerRerank(
    model="cross-encoder/ms-marco-MiniLM-L-12-v2", top_n=3
)

# Create ServiceContext with default settings
service_context = ServiceContext.from_defaults(
    chunk_size=512,
    llm=llm,
    embed_model=embed_model
)

# Get nodes from documents using baseline splitter
base_nodes = base_splitter.get_nodes_from_documents(documents)

# Initialize VectorStoreIndex and QueryEngine with baseline settings
base_vector_index = VectorStoreIndex(base_nodes, service_context=service_context)
base_query_engine = base_vector_index.as_query_engine(node_postprocessors=[rerank])

# Query using baseline query engine
response = base_query_engine.query(
    "Tell me about the author's programming journey through childhood to college"
)
print(str(response))
Categories
AutoGen

AutoGen LinkedIn Post Creator

import autogen
import chromadb
from autogen import AssistantAgent
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent

config_list = [
    {
        "model": "gpt-4-turbo-preview",
    }
]

llm_config_proxy = {
    "temperature": 0,
    "config_list": config_list,
}

assistant = AssistantAgent(
    name="assistant",
    llm_config=llm_config_proxy,
    system_message="""You are a helpful assistant. Provide accurate answers based on the context. Respond "Unsure about answer" if uncertain.""",
)

user = RetrieveUserProxyAgent(
    name="user",
    human_input_mode="NEVER",
    system_message="Assistant who has extra content retrieval power for solving difficult problems.",
    max_consecutive_auto_reply=10,
    retrieve_config={
        "task": "code",
        "docs_path": ['autogen.pdf'],
        "chunk_token_size": 1000,
        "model": config_list[0]["model"],
        "client": chromadb.PersistentClient(path='/tmp/chromadb'),
        "collection_name": "pdfreader",
        "get_or_create": True,
    },
    code_execution_config={"work_dir": "coding"},
)

user_question = """
Compose a short LinkedIn post showcasing how AutoGen is revolutionizing the future of Generative AI 
through the collaboration of various agents. Craft an introduction, main body, and a compelling 
conclusion. Encourage readers to share the post. Keep the post under 500 words.
"""

user.initiate_chat(
    assistant,
    problem=user_question,
)
Categories
Tools

Ollama Function Calling

ollama pull llama2
pip install yfinance pydantic instructor openai
from openai import OpenAI
from pydantic import BaseModel, Field
from typing import List
import yfinance as yf

import instructor

company = "Google"

class StockInfo(BaseModel):
    company: str = Field(..., description="Name of the company")
    ticker: str = Field(..., description="Ticker symbol of the company")

# enables `response_model` in create call
client = instructor.patch(
    OpenAI(
        base_url="http://localhost:11434/v1",
        api_key="ollama",
    ),
    mode=instructor.Mode.JSON,
)

resp = client.chat.completions.create(
    model="llama2",
    messages=[
        {
            "role": "user",
            "content": f"Return the company name and the ticker symbol of the {company}."
        }
    ],
    response_model=StockInfo,
    max_retries=10
)
print(resp.model_dump_json(indent=2))
stock = yf.Ticker(resp.ticker)
hist = stock.history(period="1d")
stock_price = hist['Close'].iloc[-1]
print(f"The stock price of the {resp.company} is {stock_price}. USD")
import requests
import json
import sys
import yfinance as yf

company_name = "Google"

schema = {
    "company": {
        "type": "string",
        "description": "Name of the company"
    },
    "ticker": {
        "type": "string",
        "description": "Ticker symbol of the company"
    }
}

payload = {
    "model": "llama2",
    "messages": [
        {
            "role": "system",
            "content": f"You are a helpful AI assistant. The user will enter a company name and the assistant will return the ticker symbol and current stock price of the company. Output in JSON using the schema defined here: {json.dumps(schema)}."
        },
        {"role": "user", "content": "Apple"},
        {"role": "assistant", "content": json.dumps({"company": "Apple", "ticker": "AAPL"})},  # Example static data
        {"role": "user", "content": company_name}
    ],
    "format": "json",
    "stream": False
}

response = requests.post("http://localhost:11434/api/chat", json=payload)
company_info = json.loads(response.json()["message"]["content"])

# Fetch the current stock price using yfinance
ticker_symbol = company_info['ticker']
stock = yf.Ticker(ticker_symbol)
hist = stock.history(period="1d")
stock_price = hist['Close'].iloc[-1]

print(f"The current stock price of {company_info['company']} ({ticker_symbol}) is USD {stock_price}.")

Other Examples

pip install haversine 
import requests
import json
import sys
from haversine import haversine

country = sys.argv[1]
mylat, mylon = 47.6455, -122.8258
schema = {
    "city": {
        "type": "string",
        "description": "Name of the city"
    },
    "lat": {
        "type": "float",
        "description": "Decimal Latitude of the city"
    },
    "lon": {
        "type": "float",
        "description": "Decimal longitude of the city"
    }
}

payload = {
    "model": "llama2",
    "messages": [
        {
            "role": "system",
            "content": f"You are a helpful AI assistant. The user will enter a country name and the assistant will return the decimal latitude and decimal longitude of the capital of the country. Output in JSON using the schema defined here: {json.dumps(schema)}."
        },
        {"role": "user", "content": "France"},
        {"role": "assistant", "content": json.dumps({"city": "Paris", "lat": 48.8566, "lon": 2.3522})},
        {"role": "user", "content": country}
    ],
    "format": "json",
    "stream": False
}

response = requests.post("http://localhost:11434/api/chat", json=payload)
cityinfo = json.loads(response.json()["message"]["content"])

distance = haversine((mylat, mylon), (cityinfo['lat'], cityinfo['lon']), unit='mi')
print(f"Bainbridge Island is about {int(round(distance, -1))} miles away from {cityinfo['city']}")
python app.py UK
pip install forex-python
ollama pull llama2
import requests
import json
import sys
from forex_python.converter import CurrencyRates

currency_code = sys.argv[1]

schema = {
    "currency": {
        "type": "string",
        "description": "Currency code"
    },
    "rate": {
        "type": "float",
        "description": "Exchange rate against USD"
    },
    "date": {
        "type": "string",
        "description": "Date of the rate"
    }
}

payload = {
    "model": "llama2",
    "messages": [
        {
            "role": "system",
            "content": f"You are a helpful AI assistant. The user will enter a currency code and the assistant will return the exchange rate against USD and the date of the rate. Output in JSON using the schema defined here: {json.dumps(schema)}."
        },
        {"role": "user", "content": "EUR"},
        {"role": "assistant", "content": json.dumps({"currency": "EUR", "rate": 0.85, "date": "13-02-2024"})},
        {"role": "user", "content": currency_code}
    ],
    "format": "json",
    "stream": False
}

response = requests.post("http://localhost:11434/api/chat", json=payload)
currency_info = json.loads(response.json()["message"]["content"])

c = CurrencyRates()
rate_to_usd = c.get_rate(currency_info['currency'], 'USD')

print(f"The exchange rate for {currency_info['currency']} against USD is {rate_to_usd} as of {currency_info['date']}.")
python app.py GBP
pip install yfinance
python app.py Google

Categories
AutoGen

AutoGen RAG with Qdrant

pip install "pyautogen[retrievechat]" "flaml[automl]" "qdrant_client[fastembed]"
from qdrant_client import QdrantClient
import autogen
from autogen.agentchat.contrib.qdrant_retrieve_user_proxy_agent import QdrantRetrieveUserProxyAgent
from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent
from autogen.retrieve_utils import TEXT_FORMATS

config_list = autogen.config_list_from_json(
    env_or_file="OAI_CONFIG_LIST"
)

# 1. create an RetrieveAssistantAgent instance named "assistant"
assistant = RetrieveAssistantAgent(
    name="assistant",
    system_message="You are a helpful assistant.",
    llm_config={
        "timeout": 600,
        "cache_seed": 42,
        "config_list": config_list,
    },
)

# 2. create the QdrantRetrieveUserProxyAgent instance named "ragproxyagent"
ragproxyagent = QdrantRetrieveUserProxyAgent(
    name="ragproxyagent",
    human_input_mode="NEVER",
    max_consecutive_auto_reply=10,
    retrieve_config={
        "task": "code",
        "docs_path": "https://raw.githubusercontent.com/microsoft/autogen/main/README.md",  
        "chunk_token_size": 2000,
        "model": config_list[0]["model"],
        "client": QdrantClient(":memory:"),
        "embedding_model": "BAAI/bge-small-en-v1.5",
    },
)

assistant.reset()
qa_problem = "what is AutoGen?"
ragproxyagent.initiate_chat(assistant, problem=qa_problem)

assistant.reset()
qa_problem = "List all the benefits of AutoGen"
ragproxyagent.initiate_chat(assistant, problem=qa_problem)
Categories
AutoGen

AutoGen RAG

pip install "pyautogen[retrievechat]" "flaml[automl]"
export AUTOGEN_USE_DOCKER=False
export OPENAI_API_KEY=xxxxxxxxxxxx

OAI_CONFIG_LIST

[
    {
        "model": "gpt-4-turbo-preview"
    }
]
import json, os, chromadb, autogen
from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
# Accepted file formats for that can be stored in a vector database instance
from autogen.retrieve_utils import TEXT_FORMATS

config_list = autogen.config_list_from_json(
    env_or_file="OAI_CONFIG_LIST",
)

# 1. create an RetrieveAssistantAgent instance named "assistant"
assistant = RetrieveAssistantAgent(
    name="assistant",
    system_message="You are a helpful assistant.",
    llm_config={
        "timeout": 600,
        "cache_seed": 42,
        "config_list": config_list,
    },
)

# 2. create the RetrieveUserProxyAgent instance named "ragproxyagent"
ragproxyagent = RetrieveUserProxyAgent(
    name="ragproxyagent",
    human_input_mode="NEVER",
    max_consecutive_auto_reply=3,
    retrieve_config={
        "task": "code",
        "docs_path": [
            "https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Examples/Integrate%20-%20Spark.md",
            "https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Research.md",
            os.path.join(os.path.abspath(""), "..", "website", "docs"),
        ],
        "custom_text_types": ["mdx"],
        "chunk_token_size": 2000,
        "model": config_list[0]["model"],
        "client": chromadb.PersistentClient(path="/tmp/chromadb"),
        "embedding_model": "all-mpnet-base-v2",
        "get_or_create": True,  
    },
    code_execution_config=False
)

# Example 1: Generate code based off docstrings w/o human feedback

assistant.reset()
code_problem = "How can I use FLAML to perform a classification task and use spark to do parallel training. Train 30 seconds and force cancel jobs if time limit is reached."
ragproxyagent.initiate_chat(
    assistant, problem=code_problem, search_string="spark"
)  

#  Example 2: Answer a question based off docstrings w/o human feedback

assistant.reset()
qa_problem = "Who is the author of FLAML?"
ragproxyagent.initiate_chat(assistant, problem=qa_problem)

# Example 3: Generate code based off docstrings w/ human feedback

assistant.reset()
ragproxyagent.human_input_mode = "ALWAYS"
code_problem = "how to build a time series forecasting model for stock price using FLAML?"
ragproxyagent.initiate_chat(assistant, problem=code_problem)

# Example 4: Answer a question based off docstrings w/ human feedback.

assistant.reset()
ragproxyagent.human_input_mode = "ALWAYS"
qa_problem = "Is there a function named `tune_automl` in FLAML?"
ragproxyagent.initiate_chat(assistant, problem=qa_problem)  # type "exit" to exit the conversation

# Example 5: Solve comprehensive QA problems with RetrieveChat's unique feature `Update Context`
# https://ai.google.com/research/NaturalQuestions

config_list[0]["model"] = "gpt-35-turbo"
corpus_file = "https://huggingface.co/datasets/thinkall/NaturalQuestionsQA/resolve/main/corpus.txt"

ragproxyagent = RetrieveUserProxyAgent(
    name="ragproxyagent",
    human_input_mode="NEVER",
    max_consecutive_auto_reply=10,
    retrieve_config={
        "task": "qa",
        "docs_path": corpus_file,
        "chunk_token_size": 2000,
        "model": config_list[0]["model"],
        "client": chromadb.PersistentClient(path="/tmp/chromadb"),
        "collection_name": "natural-questions",
        "chunk_mode": "one_line",
        "embedding_model": "all-MiniLM-L6-v2",
        "get_or_create": True, 
    },
)

# %%
# queries_file = "https://huggingface.co/datasets/thinkall/NaturalQuestionsQA/resolve/main/queries.jsonl"
queries = """{"_id": "ce2342e1feb4e119cb273c05356b33309d38fa132a1cbeac2368a337e38419b8", "text": "what is non controlling interest on balance sheet", "metadata": {"answer": ["the portion of a subsidiary corporation 's stock that is not owned by the parent corporation"]}}
{"_id": "3a10ff0e520530c0aa33b2c7e8d989d78a8cd5d699201fc4b13d3845010994ee", "text": "how many episodes are in chicago fire season 4", "metadata": {"answer": ["23"]}}
{"_id": "fcdb6b11969d5d3b900806f52e3d435e615c333405a1ff8247183e8db6246040", "text": "what are bulls used for on a farm", "metadata": {"answer": ["breeding", "as work oxen", "slaughtered for meat"]}}
{"_id": "26c3b53ec44533bbdeeccffa32e094cfea0cc2a78c9f6a6c7a008ada1ad0792e", "text": "has been honoured with the wisden leading cricketer in the world award for 2016", "metadata": {"answer": ["Virat Kohli"]}}
{"_id": "0868d0964c719a52cbcfb116971b0152123dad908ac4e0a01bc138f16a907ab3", "text": "who carried the usa flag in opening ceremony", "metadata": {"answer": ["Erin Hamlin"]}}
"""
queries = [json.loads(line) for line in queries.split("\n") if line]
questions = [q["text"] for q in queries]
answers = [q["metadata"]["answer"] for q in queries]
print(questions)
print(answers)

# %%
for i in range(len(questions)):
    print(f"\n\n>>>>>>>>>>>>  Below are outputs of Case {i+1}  <<<<<<<<<<<<\n\n")
    assistant.reset()
    qa_problem = questions[i]
    ragproxyagent.initiate_chat(assistant, problem=qa_problem, n_results=30)
    
# Example 6: Solve comprehensive QA problems with customized prompt and few-shot learning
# https://github.com/Alab-NII/2wikimultihop)

# %%
PROMPT_MULTIHOP = """You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the context provided by the user. You must think step-by-step.
First, please learn the following examples of context and question pairs and their corresponding answers.

Context:
Kurram Garhi: Kurram Garhi is a small village located near the city of Bannu, which is the part of Khyber Pakhtunkhwa province of Pakistan. Its population is approximately 35000.
Trojkrsti: Trojkrsti is a village in Municipality of Prilep, Republic of Macedonia.
Q: Are both Kurram Garhi and Trojkrsti located in the same country?
A: Kurram Garhi is located in the country of Pakistan. Trojkrsti is located in the country of Republic of Macedonia. Thus, they are not in the same country. So the answer is: no.


Context:
Early Side of Later: Early Side of Later is the third studio album by English singer- songwriter Matt Goss. It was released on 21 June 2004 by Concept Music and reached No. 78 on the UK Albums Chart.
What's Inside: What's Inside is the fourteenth studio album by British singer- songwriter Joan Armatrading.
Q: Which album was released earlier, What'S Inside or Cassandra'S Dream (Album)?
A: What's Inside was released in the year 1995. Cassandra's Dream (album) was released in the year 2008. Thus, of the two, the album to release earlier is What's Inside. So the answer is: What's Inside.


Context:
Maria Alexandrovna (Marie of Hesse): Maria Alexandrovna , born Princess Marie of Hesse and by Rhine (8 August 1824 – 3 June 1880) was Empress of Russia as the first wife of Emperor Alexander II.
Grand Duke Alexei Alexandrovich of Russia: Grand Duke Alexei Alexandrovich of Russia,(Russian: Алексей Александрович; 14 January 1850 (2 January O.S.) in St. Petersburg – 14 November 1908 in Paris) was the fifth child and the fourth son of Alexander II of Russia and his first wife Maria Alexandrovna (Marie of Hesse).
Q: What is the cause of death of Grand Duke Alexei Alexandrovich Of Russia's mother?
A: The mother of Grand Duke Alexei Alexandrovich of Russia is Maria Alexandrovna. Maria Alexandrovna died from tuberculosis. So the answer is: tuberculosis.


Context:
Laughter in Hell: Laughter in Hell is a 1933 American Pre-Code drama film directed by Edward L. Cahn and starring Pat O'Brien. The film's title was typical of the sensationalistic titles of many Pre-Code films.
Edward L. Cahn: Edward L. Cahn (February 12, 1899 – August 25, 1963) was an American film director.
Q: When did the director of film Laughter In Hell die?
A: The film Laughter In Hell was directed by Edward L. Cahn. Edward L. Cahn died on August 25, 1963. So the answer is: August 25, 1963.

Second, please complete the answer by thinking step-by-step.

Context:
{input_context}
Q: {input_question}
A:
"""

# create the RetrieveUserProxyAgent instance named "ragproxyagent"
corpus_file = "https://huggingface.co/datasets/thinkall/2WikiMultihopQA/resolve/main/corpus.txt"

# Create a new collection for NaturalQuestions dataset
ragproxyagent = RetrieveUserProxyAgent(
    name="ragproxyagent",
    human_input_mode="NEVER",
    max_consecutive_auto_reply=3,
    retrieve_config={
        "task": "qa",
        "docs_path": corpus_file,
        "chunk_token_size": 2000,
        "model": config_list[0]["model"],
        "client": chromadb.PersistentClient(path="/tmp/chromadb"),
        "collection_name": "2wikimultihopqa",
        "chunk_mode": "one_line",
        "embedding_model": "all-MiniLM-L6-v2",
        "customized_prompt": PROMPT_MULTIHOP,
        "customized_answer_prefix": "the answer is",
        "get_or_create": True,  
    },
)

# queries_file = "https://huggingface.co/datasets/thinkall/2WikiMultihopQA/resolve/main/queries.jsonl"
queries = """{"_id": "61a46987092f11ebbdaeac1f6bf848b6", "text": "Which film came out first, Blind Shaft or The Mask Of Fu Manchu?", "metadata": {"answer": ["The Mask Of Fu Manchu"]}}
{"_id": "a7b9672009c311ebbdb0ac1f6bf848b6", "text": "Are North Marion High School (Oregon) and Seoul High School both located in the same country?", "metadata": {"answer": ["no"]}}
"""
queries = [json.loads(line) for line in queries.split("\n") if line]
questions = [q["text"] for q in queries]
answers = [q["metadata"]["answer"] for q in queries]
print(questions)
print(answers)

for i in range(len(questions)):
    print(f"\n\n>>>>>>>>>>>>  Below are outputs of Case {i+1}  <<<<<<<<<<<<\n\n")

    # reset the assistant. Always reset the assistant before starting a new conversation.
    assistant.reset()

    qa_problem = questions[i]
    ragproxyagent.initiate_chat(assistant, problem=qa_problem, n_results=10)