DEV Community

Sayen VV
Sayen VV

Posted on

🚀 A Complete Guide to Rendering Agents & Workflows Using Agent Framework Dev UI

The Agent Framework Dev UI offers an interactive dashboard that helps developers visualize, test, debug, and refine multi-agent systems.

This guide walks through a full multi-agent pipeline consisting of:

  • Custom tool definitions
  • Three specialized AI agents
  • A supervisor agent
  • A multi-agent workflow
  • Rendering everything in the Dev UI

By the end, you'll have a fully interactive AI agent system running at localhost:8001.


🛠️ 1. Defining Tools (Search, Classification, Summarization)

Each agent relies on a dedicated tool to perform its task.

from service import AISearch
from service import AIClassifier
from service import AISummarizer

from models.tool_calls import AISearchToolCall
from models.tool_calls import ClassificationToolCall
from models.tool_calls import SummarizationToolCall

SearchingTool = AIFunction(
    func=AISearch.search,
    name="AIsearch",
    description="Search for relavant answers",
    input_model=AISearchToolCall
)

ClassificationTool = AIFunction(
    func=AIClassifier.classify,
    name="AIClassifier",
    description="Classify data into relevant categories based on inputs",
    input_model=ClassificationToolCall
)

SummarizationTool = AIFunction(
    func=AISummarizer.summarize,
    name="AISummarizer",
    description="Generate concise summary from long text",
    input_model=SummarizationToolCall
)
Enter fullscreen mode Exit fullscreen mode

Now creating each agents using the defined tools, so create 3 agents for each tasks

import os

from agent_framework import ChatAgent
from agent_framework.azure import AzureOpenAIChatClient

from tool_registry import SearchingTool
from tool_registry import ClassificationTool
from tool_registry import SummarizationTool

def build_search_agent():
    openai_client = AzureOpenAIChatClient(
        api_key=os.environ.get("OPENAI_API_KEY"),
        endpoint=os.environ.get("OPENAI_ENDPOINT"),
        deployment_name=os.environ.get("gpt-4o-2"),
        api_version=os.environ.get("2024-08-01-preview")
    )

    search_agent = ChatAgent(
        chat_client=openai_client,
        name="SearchAgent",
        instructions=(
            "You are a Search Agent
        ),
        tools=[SearchingTool]
    )
    return search_agent


def build_classification_agent():
    openai_client = AzureOpenAIChatClient(
        api_key=os.environ.get("OPENAI_API_KEY"),
        endpoint=os.environ.get("OPENAI_ENDPOINT"),
        deployment_name=os.environ.get("gpt-4o-2"),
        api_version=os.environ.get("2024-08-01-preview")
    )

    classification_agent = ChatAgent(
        chat_client=openai_client,
        name="ClassificationAgent",
        instructions=(
            "You are a Classification Agent. 
        ),
        tools=[ClassificationTool]
    )
    return classification_agent

def build_summary_agent():
    openai_client = AzureOpenAIChatClient(
        api_key=os.environ.get("OPENAI_API_KEY"),
        endpoint=os.environ.get("OPENAI_ENDPOINT"),
        deployment_name=os.environ.get("gpt-4o-2"),
        api_version=os.environ.get("2024-08-01-preview")
    )

    summary_agent = ChatAgent(
        chat_client=openai_client,
        name="SummaryAgent",
        instructions=(
            "You are a Summary Agent. Your responsibilities:\n"
            "1. Receive classified outputs from ClassificationAgent.\n"
            "2. Summarize long text chunks.\n"
            "3. Use the AISummarizer tool.\n"
            "4. Produce the final summarized response."
        ),
        tools=[SummarizationTool]
    )
    return summary_agent

def build_supervisor_agent():
    """
    Creates and returns the Supervisor Agent.
    This agent decides which agent should handle the user's request:
        - SearchAgent
        - ClassificationAgent
        - SummaryAgent

    It does NOT perform the work itself.
    Instead, it chooses the correct handoff tool.
    """

    # Initialize Azure OpenAI Chat Client
    openai_client = AzureOpenAIChatClient(
        api_key=os.environ.get("OPENAI_API_KEY"),
        endpoint=os.environ.get("OPENAI_ENDPOINT"),
        deployment_name=os.environ.get("GPT_MODEL_NAME"),  # Example: "gpt-4o-mini"
        api_version="2024-08-01-preview"
    )

    supervisor_agent = ChatAgent(
        chat_client=openai_client,
        name="SupervisorAgent",
        instructions=(
            "You are the Supervisor Agent.\n\n"
            "Your job is to understand the user's message and decide which agent must handle it.\n"
            "You NEVER perform the task yourself — you only choose the correct agent.\n\n"

            "Available agents and when to use them:\n"
            "1. SearchAgent\n"
            "   - When the user wants to search data, retrieve documents, fetch context, or perform RAG-style lookups.\n\n"

            "2. ClassificationAgent\n"
            "   - When the user wants categorization, labeling, grouping, or classifying the retrieved information.\n\n"

            "3. SummaryAgent\n"
            "   - When the user asks for summarization, condensation, conclusions, insights, or refined output.\n\n"

            "Available handoff tools:\n"
            "  • `handoff_to_search_agent`\n"
            "  • `handoff_to_classification_agent`\n"
            "  • `handoff_to_summary_agent`\n\n"

            "Rules:\n"
            "- ALWAYS produce a short friendly natural-language response explaining what you are doing.\n"
            "- When delegation is needed, ALWAYS call the correct handoff tool.\n"
            "- If no handoff is required, answer normally.\n"
        )
    )

    return supervisor_agent


Enter fullscreen mode Exit fullscreen mode

Building the Multi-Agent Workflow

from agents import build_search_agent
from agents import build_classification_agent
from agents import build_summary_agent
from agents import build_supervisor_agent

from agent_framework import Workflow, InMemoryCheckpointStorage,
from agent_framework import HandoffBuilder

search_executor = build_search_agent()
classification_executor = build_classification_agent()
summary_executor = build_summary_agent()
supervisor_executor = build_supervisor_agent() 

def build_pipeline_workflow(payload: Union[dict, InputRequest]) -> Workflow:

    checkpoint_storage = InMemoryCheckpointStorage()

    workflow = (
        HandoffBuilder(
            name="AI Multi-Agent Pipeline Workflow",
            participants=[
                supervisor_executor,
                search_executor,
                classification_executor,
                summary_executor
            ],
            description="Workflow using SearchAgent → ClassificationAgent → SummaryAgent"
        )
        .set_coordinator(supervisor_executor)
        .add_handoff(supervisor_executor, [search_executor, classification_executor, summary_executor])
        .add_handoff(search_executor, [classification_executor, summary_executor])
        .add_handoff(classification_executor, [summary_executor])
        .with_termination_condition(
            lambda conv: sum(1 for msg in conv if msg.role.value == 'user') >= 10
        )
        .with_checkpointing(checkpoint_storage)
        .enable_return_to_previous(enabled=True)
        .request_prompt("Provide the details to proceed with the multi-agent workflow.")
        .build()
    )

    return workflow

Enter fullscreen mode Exit fullscreen mode

Now we need to render the created workflow and agents in DEV UI for that follow below :

def render_in_dev_ui():

    from agent_framework.devui import serve
    from workflow import build_pipeline_workflow
    from agents import build_search_agent, build_classification_agent, build_summary_agent

    serve(
        entities=[
            build_pipeline_workflow(None),
            build_search_agent(),
            build_classification_agent(),
            build_summary_agent()
        ],
        port=8001,
        auto_open=True,
        ui_enabled=True
    )

if __name__ == "__main__":
    render_in_dev_ui()
Enter fullscreen mode Exit fullscreen mode

So Now the ui will open in localhost:8001 and if you check localhost:8001/docs you can see all the apis available in DEV UI

Top comments (0)