Agent is not using tools

import json
from pathlib import Path
from typing import Optional
from textwrap import dedent
from typing import List

from phi.assistant import Assistant
from phi.tools import Toolkit
from phi.tools.exa import ExaTools
from phi.tools.shell import ShellTools
from phi.tools.calculator import Calculator
from phi.tools.duckduckgo import DuckDuckGo
from phi.tools.yfinance import YFinanceTools
from phi.tools.file import FileTools
from phi.knowledge import AssistantKnowledge
from phi.assistant.duckdb import DuckDbAssistant
from phi.tools.python import PythonTools
from phi.storage.assistant.postgres import PgAssistantStorage
from phi.utils.log import logger
from phi.vectordb.pgvector import PgVector2
from phi.agent import Agent, AgentMemory
from phi.memory.classifier import MemoryClassifier
from phi.memory.summarizer import MemorySummarizer
from phi.model.google import Gemini
from phi.model.groq import Groq
from phi.memory.db.sqlite import SqliteMemoryDb
from phi.storage.agent.json import JsonFileAgentStorage
from phi.workflow import Workflow, RunResponse, RunEvent

class CustomJsonFileAgentStorage(JsonFileAgentStorage):
    def serialize(self, data: dict) -> str:
        # Clean up Gemini's parts before serialization
        if data.get("agent_data", {}).get("model", {}).get("provider") == "Google":
            if "memory" in data:
                # Clean up runs' response messages
                if "runs" in data["memory"]:
                    for run in data["memory"]["runs"]:
                        if "response" in run and "messages" in run["response"]:
                            for m in run["response"]["messages"]:
                                if isinstance(m, dict):
                                    m.pop("parts", None)
                
                # Clean up top-level memory messages
                if "messages" in data["memory"]:
                    for m in data["memory"]["messages"]:
                        if isinstance(m, dict):
                            m.pop("parts", None)
        
        return super().serialize(data)

def get_llm_os(
    llm_id: str = "gemini-2.0-flash-exp",
    calculator: bool = False,
    ddg_search: bool = False,
    shell_tools: bool = False,
    python_assistant: bool = False,
    investment_assistant: bool = False,
    user_id: Optional[str] = None,
    run_id: Optional[str] = None,
    debug_mode: bool = True,
) -> Agent:
    logger.info(f"-*- Creating {llm_id} LLM OS -*-")

    # Add tools available to the LLM OS
    tools: List[Toolkit] = []
    extra_instructions: List[str] = []
    if calculator:
        tools.append(
            Calculator(
                add=True,
                subtract=True,
                multiply=True,
                divide=True,
                exponentiate=True,
                factorial=True,
                is_prime=True,
                square_root=True,
            )
        )
    if ddg_search:
        tools.append(DuckDuckGo(fixed_max_results=5))
    if shell_tools:
        tools.append(ShellTools())
        extra_instructions.append(
            "You can use the `run_shell_command` tool to run shell commands. For example, `run_shell_command(args='ls')`."
        )

    team: List[Agent] = []
    if python_assistant:
        _python_assistant = Agent(
            name="Python Assistant",
            tools=[PythonTools()],
            role="Write and run python code",
            model=Groq(id="llama-3.3-70b-versatile"),
            pip_install=True,
        )
        team.append(_python_assistant)
        extra_instructions.append("To write and run python code, delegate the task to the `Python Assistant`.")

    if investment_assistant:
        _investment_assistant = Agent(
            name="Investment Assistant",
            role="Write a investment report on a given company (stock) symbol",
            model=Gemini(id="gemini-2.0-flash-exp"),
            description="You are a Senior Investment Analyst for Goldman Sachs tasked with writing an investment report for a very important client.",
            instructions=[
                "For a given stock symbol, get the stock price, company information, analyst recommendations, and company news",
                "Carefully read the research and generate a final - Goldman Sachs worthy investment report in the <report_format> provided below.",
                "Provide thoughtful insights and recommendations based on the research.",
                "When you share numbers, make sure to include the units (e.g., millions/billions) and currency.",
                "REMEMBER: This report is for a very important client, so the quality of the report is important.",
            ],
            expected_output=dedent(
                """\
            <report_format>
            ## [Company Name]: Investment Report

            ### **Overview**
            {give a brief introduction of the company and why the user should read this report}
            {make this section engaging and create a hook for the reader}

            ### Core Metrics
            {provide a summary of core metrics and show the latest data}
            - Current price: {current price}
            - 52-week high: {52-week high}
            - 52-week low: {52-week low}
            - Market Cap: {Market Cap} in billions
            - P/E Ratio: {P/E Ratio}
            - Earnings per Share: {EPS}
            - 50-day average: {50-day average}
            - 200-day average: {200-day average}
            - Analyst Recommendations: {buy, hold, sell} (number of analysts)

            ### Financial Performance
            {analyze the company's financial performance}

            ### Growth Prospects
            {analyze the company's growth prospects and future potential}

            ### News and Updates
            {summarize relevant news that can impact the stock price}

            ### [Summary]
            {give a summary of the report and what are the key takeaways}

            ### [Recommendation]
            {provide a recommendation on the stock along with a thorough reasoning}

            </report_format>
            """
            ),
            tools=[YFinanceTools(stock_price=True, company_info=True, analyst_recommendations=True, company_news=True)],
            # This setting tells the LLM to format messages in markdown
            markdown=True,
            add_datetime_to_instructions=True,
            debug_mode=debug_mode,
        )
        team.append(_investment_assistant)
        extra_instructions.extend(
            [
                "To get an investment report on a stock, delegate the task to the `Investment Assistant`. "
                "Return the report in the <report_format> to the user without any additional text like 'here is the report'.",
                "Answer any questions they may have using the information in the report.",
                "Never provide investment advise without the investment report.",
            ]
        )

    llm_os = Agent(
        name="AI_OS",
        run_id=run_id,
        user_id=user_id,
        model=Gemini(id="gemini-2.0-flash-exp"),
        description=dedent(
            """\
        You are the most advanced AI system in the world called `AI-OS`.
        You have access to a set of tools and a team of AI Assistants at your disposal.
        Your goal is to assist the user in the best way possible.\
        """
        ),
        instructions=[
            "When the user sends a message, first **think** and determine if:\n"
            " - You can answer by using a tool available to you\n"
            " - You need to search the knowledge base\n"
            " - You need to search the internet\n"
            " - You need to delegate the task to a team member\n"
            " - You need to ask a clarifying question",
            "If the user asks about a topic, first ALWAYS search your knowledge base using the `search_knowledge_base` tool.",
            "If you dont find relevant information in your knowledge base, use the `duckduckgo_search` tool to search the internet.",
            "If the user asks to summarize the conversation or if you need to reference your chat history with the user, use the `get_chat_history` tool.",
            "If the users message is unclear, ask clarifying questions to get more information.",
            "Carefully read the information you have gathered and provide a clear and concise answer to the user.",
            "Do not use phrases like 'based on my knowledge' or 'depending on the information'.",
            "You can delegate tasks to an AI Assistant in your team depending of their role and the tools available to them.",
        ],
        extra_instructions=extra_instructions,
        # Add long-term memory to the LLM OS backed by a PostgreSQL database
        storage=CustomJsonFileAgentStorage(dir_path="tmp/agent_sessions_json"),
        memory=AgentMemory(
            classifier=MemoryClassifier(model=Groq(id="llama-3.3-70b-versatile")),
            summarizer=MemorySummarizer(model=Groq(id="llama-3.3-70b-versatile")),
            db=SqliteMemoryDb(
                table_name="agent_memory",
                db_file="tmp/agent_memory.db",
            ),
            create_user_memories=True,
            update_user_memories_after_run=True,
            create_session_summary=True,
            update_session_summary_after_run=True,
        ),
        # Add a knowledge base to the LLM OS
        # Add selected tools to the LLM OS
        tools=tools,
        # Add selected team members to the LLM OS
        team=team,
        # Show tool calls in the chat
        show_tool_calls=True,
        # This setting gives the LLM a tool to search the knowledge base for information
        search_knowledge=True,
        # This setting gives the LLM a tool to get chat history
        read_chat_history=True,
        # This setting adds chat history to the messages
        add_chat_history_to_messages=True,
        # This setting adds 6 previous messages from chat history to the messages sent to the LLM
        num_history_messages=6,
        # This setting tells the LLM to format messages in markdown
        markdown=True,
        # This setting adds the current datetime to the instructions
        add_datetime_to_instructions=True,
        # Add an introductory Assistant message
        introduction=dedent(
            """\
        Hi, I'm your LLM OS.
        I have access to a set of tools and AI Assistants to assist you.
        Let's solve some problems together!\
        """
        ),
        debug_mode=debug_mode,
    )
    return llm_os

above is assistan.py i took this from cookbook/llm_os and modified it a little also this is my app.py

from assistant import get_llm_os
import sys
from pathlib import Path

def create_directories():
    """Create necessary directories if they don't exist"""
    Path("tmp/agent_sessions_json").mkdir(parents=True, exist_ok=True)
    Path("tmp").mkdir(parents=True, exist_ok=True)

def initialize_llm_os():
    """Initialize the LLM OS with desired configurations"""
    return get_llm_os(
        calculator=True,  # Enable calculator
        ddg_search=True,  # Enable DuckDuckGo search
        python_assistant=True,  # Enable Python Assistant
        investment_assistant=True,  # Enable Investment Assistant
        debug_mode=True,  # Enable debug mode for better visibility
    )

def run_chat_interface():
    """Run the interactive chat interface"""
    print("\n=== LLM OS Chat Interface ===")
    print("Type 'exit' or 'quit' to end the conversation")
    print("Type 'clear' to clear the screen")
    print("================================\n")

    # Create necessary directories
    create_directories()
    
    # Initialize LLM OS
    llm_os = initialize_llm_os()

    while True:
        try:
            # Get user input
            user_input = input("\nYou: ").strip()

            # Handle special commands
            if user_input.lower() in ['exit', 'quit']:
                print("\nGoodbye! Thank you for using LLM OS.")
                sys.exit(0)
            elif user_input.lower() == 'clear':
                # Clear screen - works on both Windows and Unix-like systems
                print('\033[2J\033[H')
                continue
            elif not user_input:
                continue

            # Get response from LLM OS
            response = llm_os.run(user_input)
            
            # Print the response
            if response and response.content:
                print("\nLLM OS:", response.content)
            else:
                print("\nLLM OS: I apologize, but I couldn't generate a response. Please try again.")

        except KeyboardInterrupt:
            print("\n\nExiting gracefully...")
            sys.exit(0)
        except Exception as e:
            print(f"\nAn error occurred: {str(e)}")
            print("Please try again.")

if __name__ == "__main__":
    run_chat_interface()

when i provide a task related to team like fianance and python related it uses the teams and works correctly but task which needs tools like ddgo and calculator isnt getting used i think the way its codes isnt correct can you see the issue and help ?

Hi @Prajwal
Thank you for reaching out and using Phidata! I’ve tagged the relevant engineers to assist you with your query. We aim to respond within 24 hours.
If this is urgent, please feel free to let us know, and we’ll do our best to prioritize it.
Thanks for your patience!

i solved it
i just added

tool_choice="auto",    # Important: This allows the model to choose when to use tools

in the main agent add its workig