diff --git a/samples/apps/autogen-studio/.gitignore b/samples/apps/autogen-studio/.gitignore index e1e3c9942ec1..549ce16b6db9 100644 --- a/samples/apps/autogen-studio/.gitignore +++ b/samples/apps/autogen-studio/.gitignore @@ -9,6 +9,9 @@ autogenstudio/web/workdir/* autogenstudio/web/ui/* autogenstudio/web/skills/user/* .release.sh +.nightly.sh + +notebooks/work_dir/* # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/samples/apps/autogen-studio/autogenstudio/chatmanager.py b/samples/apps/autogen-studio/autogenstudio/chatmanager.py index 84b85673f07c..a91401e6663d 100644 --- a/samples/apps/autogen-studio/autogenstudio/chatmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/chatmanager.py @@ -1,7 +1,5 @@ import asyncio -import json import os -import time from datetime import datetime from queue import Queue from typing import Any, Dict, List, Optional, Tuple, Union @@ -9,12 +7,7 @@ import websockets from fastapi import WebSocket, WebSocketDisconnect -from .datamodel import Message, SocketMessage, Workflow -from .utils import ( - extract_successful_code_blocks, - get_modified_files, - summarize_chat_history, -) +from .datamodel import Message from .workflowmanager import WorkflowManager @@ -82,76 +75,12 @@ def chat( connection_id=connection_id, ) - workflow = Workflow.model_validate(workflow) - message_text = message.content.strip() + result_message: Message = workflow_manager.run(message=f"{message_text}", clear_history=False, history=history) - start_time = time.time() - workflow_manager.run(message=f"{message_text}", clear_history=False) - end_time = time.time() - - metadata = { - "messages": workflow_manager.agent_history, - "summary_method": workflow.summary_method, - "time": end_time - start_time, - "files": get_modified_files(start_time, end_time, source_dir=work_dir), - } - - output = self._generate_output(message_text, workflow_manager, workflow) - - output_message = Message( - user_id=message.user_id, - role="assistant", - content=output, - meta=json.dumps(metadata), - session_id=message.session_id, - ) - - return output_message - - def _generate_output( - self, - message_text: str, - workflow_manager: WorkflowManager, - workflow: Workflow, - ) -> str: - """ - Generates the output response based on the workflow configuration and agent history. - - :param message_text: The text of the incoming message. - :param flow: An instance of `WorkflowManager`. - :param flow_config: An instance of `AgentWorkFlowConfig`. - :return: The output response as a string. - """ - - output = "" - if workflow.summary_method == "last": - successful_code_blocks = extract_successful_code_blocks(workflow_manager.agent_history) - last_message = ( - workflow_manager.agent_history[-1]["message"]["content"] if workflow_manager.agent_history else "" - ) - successful_code_blocks = "\n\n".join(successful_code_blocks) - output = (last_message + "\n" + successful_code_blocks) if successful_code_blocks else last_message - elif workflow.summary_method == "llm": - client = workflow_manager.receiver.client - status_message = SocketMessage( - type="agent_status", - data={ - "status": "summarizing", - "message": "Summarizing agent dialogue", - }, - connection_id=workflow_manager.connection_id, - ) - self.send(status_message.dict()) - output = summarize_chat_history( - task=message_text, - messages=workflow_manager.agent_history, - client=client, - ) - - elif workflow.summary_method == "none": - output = "" - return output + result_message.user_id = message.user_id + result_message.session_id = message.session_id + return result_message class WebSocketConnectionManager: diff --git a/samples/apps/autogen-studio/autogenstudio/cli.py b/samples/apps/autogen-studio/autogenstudio/cli.py index 42642bcd68af..81fee7991455 100644 --- a/samples/apps/autogen-studio/autogenstudio/cli.py +++ b/samples/apps/autogen-studio/autogenstudio/cli.py @@ -16,7 +16,7 @@ def ui( port: int = 8081, workers: int = 1, reload: Annotated[bool, typer.Option("--reload")] = False, - docs: bool = False, + docs: bool = True, appdir: str = None, database_uri: Optional[str] = None, ): @@ -48,6 +48,39 @@ def ui( ) +@app.command() +def serve( + workflow: str = "", + host: str = "127.0.0.1", + port: int = 8084, + workers: int = 1, + docs: bool = False, +): + """ + Serve an API Endpoint based on an AutoGen Studio workflow json file. + + Args: + workflow (str): Path to the workflow json file. + host (str, optional): Host to run the UI on. Defaults to 127.0.0.1 (localhost). + port (int, optional): Port to run the UI on. Defaults to 8081. + workers (int, optional): Number of workers to run the UI with. Defaults to 1. + reload (bool, optional): Whether to reload the UI on code changes. Defaults to False. + docs (bool, optional): Whether to generate API docs. Defaults to False. + + """ + + os.environ["AUTOGENSTUDIO_API_DOCS"] = str(docs) + os.environ["AUTOGENSTUDIO_WORKFLOW_FILE"] = workflow + + uvicorn.run( + "autogenstudio.web.serve:app", + host=host, + port=port, + workers=workers, + reload=False, + ) + + @app.command() def version(): """ diff --git a/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py b/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py index 00d3714b63fa..6a02a0a7038c 100644 --- a/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py @@ -1,3 +1,4 @@ +import threading from datetime import datetime from typing import Optional @@ -15,15 +16,23 @@ Skill, Workflow, WorkflowAgentLink, + WorkflowAgentType, ) from .utils import init_db_samples valid_link_types = ["agent_model", "agent_skill", "agent_agent", "workflow_agent"] +class WorkflowAgentMap(SQLModel): + agent: Agent + link: WorkflowAgentLink + + class DBManager: """A class to manage database operations""" + _init_lock = threading.Lock() # Class-level lock + def __init__(self, engine_uri: str): connection_args = {"check_same_thread": True} if "sqlite" in engine_uri else {} self.engine = create_engine(engine_uri, connect_args=connection_args) @@ -31,14 +40,15 @@ def __init__(self, engine_uri: str): def create_db_and_tables(self): """Create a new database and tables""" - try: - SQLModel.metadata.create_all(self.engine) + with self._init_lock: # Use the lock try: - init_db_samples(self) + SQLModel.metadata.create_all(self.engine) + try: + init_db_samples(self) + except Exception as e: + logger.info("Error while initializing database samples: " + str(e)) except Exception as e: - logger.info("Error while initializing database samples: " + str(e)) - except Exception as e: - logger.info("Error while creating database tables:" + str(e)) + logger.info("Error while creating database tables:" + str(e)) def upsert(self, model: SQLModel): """Create a new entity""" @@ -62,7 +72,7 @@ def upsert(self, model: SQLModel): session.refresh(model) except Exception as e: session.rollback() - logger.error("Error while upserting %s", e) + logger.error("Error while updating " + str(model_class.__name__) + ": " + str(e)) status = False response = Response( @@ -115,7 +125,7 @@ def get_items( session.rollback() status = False status_message = f"Error while fetching {model_class.__name__}" - logger.error("Error while getting %s: %s", model_class.__name__, e) + logger.error("Error while getting items: " + str(model_class.__name__) + " " + str(e)) response: Response = Response( message=status_message, @@ -157,16 +167,16 @@ def delete(self, model_class: SQLModel, filters: dict = None): status_message = f"{model_class.__name__} Deleted Successfully" else: print(f"Row with filters {filters} not found") - logger.info("Row with filters %s not found", filters) + logger.info("Row with filters + filters + not found") status_message = "Row not found" except exc.IntegrityError as e: session.rollback() - logger.error("Integrity ... Error while deleting: %s", e) + logger.error("Integrity ... Error while deleting: " + str(e)) status_message = f"The {model_class.__name__} is linked to another entity and cannot be deleted." status = False except Exception as e: session.rollback() - logger.error("Error while deleting: %s", e) + logger.error("Error while deleting: " + str(e)) status_message = f"Error while deleting: {e}" status = False response = Response( @@ -182,6 +192,7 @@ def get_linked_entities( primary_id: int, return_json: bool = False, agent_type: Optional[str] = None, + sequence_id: Optional[int] = None, ): """ Get all entities linked to the primary entity. @@ -217,19 +228,21 @@ def get_linked_entities( linked_entities = agent.agents elif link_type == "workflow_agent": linked_entities = session.exec( - select(Agent) - .join(WorkflowAgentLink) + select(WorkflowAgentLink, Agent) + .join(Agent, WorkflowAgentLink.agent_id == Agent.id) .where( WorkflowAgentLink.workflow_id == primary_id, - WorkflowAgentLink.agent_type == agent_type, ) ).all() + + linked_entities = [WorkflowAgentMap(agent=agent, link=link) for link, agent in linked_entities] + linked_entities = sorted(linked_entities, key=lambda x: x.link.sequence_id) # type: ignore except Exception as e: - logger.error("Error while getting linked entities: %s", e) + logger.error("Error while getting linked entities: " + str(e)) status_message = f"Error while getting linked entities: {e}" status = False if return_json: - linked_entities = [self._model_to_dict(row) for row in linked_entities] + linked_entities = [row.model_dump() for row in linked_entities] response = Response( message=status_message, @@ -245,6 +258,7 @@ def link( primary_id: int, secondary_id: int, agent_type: Optional[str] = None, + sequence_id: Optional[int] = None, ) -> Response: """ Link two entities together. @@ -357,6 +371,7 @@ def link( WorkflowAgentLink.workflow_id == primary_id, WorkflowAgentLink.agent_id == secondary_id, WorkflowAgentLink.agent_type == agent_type, + WorkflowAgentLink.sequence_id == sequence_id, ) ).first() if existing_link: @@ -373,6 +388,7 @@ def link( workflow_id=primary_id, agent_id=secondary_id, agent_type=agent_type, + sequence_id=sequence_id, ) session.add(workflow_agent_link) # add and commit the link @@ -385,7 +401,7 @@ def link( except Exception as e: session.rollback() - logger.error("Error while linking: %s", e) + logger.error("Error while linking: " + str(e)) status = False status_message = f"Error while linking due to an exception: {e}" @@ -402,6 +418,7 @@ def unlink( primary_id: int, secondary_id: int, agent_type: Optional[str] = None, + sequence_id: Optional[int] = 0, ) -> Response: """ Unlink two entities. @@ -417,6 +434,7 @@ def unlink( """ status = True status_message = "" + print("primary", primary_id, "secondary", secondary_id, "sequence", sequence_id, "agent_type", agent_type) if link_type not in valid_link_types: status = False @@ -452,6 +470,7 @@ def unlink( WorkflowAgentLink.workflow_id == primary_id, WorkflowAgentLink.agent_id == secondary_id, WorkflowAgentLink.agent_type == agent_type, + WorkflowAgentLink.sequence_id == sequence_id, ) ).first() @@ -465,7 +484,7 @@ def unlink( except Exception as e: session.rollback() - logger.error("Error while unlinking: %s", e) + logger.error("Error while unlinking: " + str(e)) status = False status_message = f"Error while unlinking due to an exception: {e}" diff --git a/samples/apps/autogen-studio/autogenstudio/database/utils.py b/samples/apps/autogen-studio/autogenstudio/database/utils.py index c14003b414c3..189fa1baf8d1 100644 --- a/samples/apps/autogen-studio/autogenstudio/database/utils.py +++ b/samples/apps/autogen-studio/autogenstudio/database/utils.py @@ -23,6 +23,7 @@ Skill, Workflow, WorkflowAgentLink, + WorkFlowType, ) @@ -71,9 +72,15 @@ def get_agent(agent_id): agent_dict["agents"] = [get_agent(agent.id) for agent in agent.agents] return agent_dict + agents = [] for link in workflow_agent_links: agent_dict = get_agent(link.agent_id) - workflow[str(link.agent_type.value)] = agent_dict + agents.append({"agent": agent_dict, "link": link.model_dump(mode="json")}) + # workflow[str(link.agent_type.value)] = agent_dict + if workflow["type"] == WorkFlowType.sequential.value: + # sort agents by sequence_id in link + agents = sorted(agents, key=lambda x: x["link"]["sequence_id"]) + workflow["agents"] = agents return workflow @@ -141,9 +148,13 @@ def init_db_samples(dbmanager: Any): logger.info("Database already initialized with Default and Travel Planning Workflows") return logger.info("Initializing database with Default and Travel Planning Workflows") + # models - gpt_4_model = Model( - model="gpt-4-1106-preview", description="OpenAI GPT-4 model", user_id="guestuser@gmail.com", api_type="open_ai" + google_gemini_model = Model( + model="gemini-1.5-pro-latest", + description="Google's Gemini model", + user_id="guestuser@gmail.com", + api_type="google", ) azure_model = Model( model="gpt4-turbo", @@ -160,61 +171,42 @@ def init_db_samples(dbmanager: Any): api_type="open_ai", ) - google_gemini_model = Model( - model="gemini-1.5-pro-latest", - description="Google's Gemini model", - user_id="guestuser@gmail.com", - api_type="google", + gpt_4_model = Model( + model="gpt-4-1106-preview", description="OpenAI GPT-4 model", user_id="guestuser@gmail.com", api_type="open_ai" ) # skills - + generate_pdf_skill = Skill( + name="generate_and_save_pdf", + description="Generate and save a pdf file based on the provided input sections.", + user_id="guestuser@gmail.com", + libraries=["requests", "fpdf", "PIL"], + content='import uuid\nimport requests\nfrom fpdf import FPDF\nfrom typing import List, Dict, Optional\nfrom pathlib import Path\nfrom PIL import Image, ImageDraw, ImageOps\nfrom io import BytesIO\n\ndef generate_and_save_pdf(\n sections: List[Dict[str, Optional[str]]], \n output_file: str = "report.pdf", \n report_title: str = "PDF Report"\n) -> None:\n """\n Function to generate a beautiful PDF report in A4 paper format. \n\n :param sections: A list of sections where each section is represented by a dictionary containing:\n - title: The title of the section.\n - level: The heading level (e.g., "title", "h1", "h2").\n - content: The content or body text of the section.\n - image: (Optional) The URL or local path to the image.\n :param output_file: The name of the output PDF file. (default is "report.pdf")\n :param report_title: The title of the report. (default is "PDF Report")\n :return: None\n """\n\n def get_image(image_url_or_path):\n if image_url_or_path.startswith("http://") or image_url_or_path.startswith("https://"):\n response = requests.get(image_url_or_path)\n if response.status_code == 200:\n return BytesIO(response.content)\n elif Path(image_url_or_path).is_file():\n return open(image_url_or_path, \'rb\')\n return None\n\n def add_rounded_corners(img, radius=6):\n mask = Image.new(\'L\', img.size, 0)\n draw = ImageDraw.Draw(mask)\n draw.rounded_rectangle([(0, 0), img.size], radius, fill=255)\n img = ImageOps.fit(img, mask.size, centering=(0.5, 0.5))\n img.putalpha(mask)\n return img\n\n class PDF(FPDF):\n def header(self):\n self.set_font("Arial", "B", 12)\n self.cell(0, 10, report_title, 0, 1, "C")\n \n def chapter_title(self, txt): \n self.set_font("Arial", "B", 12)\n self.cell(0, 10, txt, 0, 1, "L")\n self.ln(2)\n \n def chapter_body(self, body):\n self.set_font("Arial", "", 12)\n self.multi_cell(0, 10, body)\n self.ln()\n\n def add_image(self, img_data):\n img = Image.open(img_data)\n img = add_rounded_corners(img)\n img_path = Path(f"temp_{uuid.uuid4().hex}.png")\n img.save(img_path, format="PNG")\n self.image(str(img_path), x=None, y=None, w=190 if img.width > 190 else img.width)\n self.ln(10)\n img_path.unlink()\n\n pdf = PDF()\n pdf.add_page()\n font_size = {"title": 16, "h1": 14, "h2": 12, "body": 12}\n\n for section in sections:\n title, level, content, image = section.get("title", ""), section.get("level", "h1"), section.get("content", ""), section.get("image")\n pdf.set_font("Arial", "B" if level in font_size else "", font_size.get(level, font_size["body"]))\n pdf.chapter_title(title)\n\n if content: pdf.chapter_body(content)\n if image:\n img_data = get_image(image)\n if img_data:\n pdf.add_image(img_data)\n if isinstance(img_data, BytesIO):\n img_data.close()\n\n pdf.output(output_file)\n print(f"PDF report saved as {output_file}")\n\n# # Example usage\n# sections = [\n# {\n# "title": "Introduction - Early Life",\n# "level": "h1",\n# "image": "https://picsum.photos/536/354",\n# "content": ("Marie Curie was born on 7 November 1867 in Warsaw, Poland. "\n# "She was the youngest of five children. Both of her parents were teachers. "\n# "Her father was a math and physics instructor, and her mother was the head of a private school. "\n# "Marie\'s curiosity and brilliance were evident from an early age."),\n# },\n# {\n# "title": "Academic Accomplishments",\n# "level": "h2",\n# "content": ("Despite many obstacles, Marie Curie earned degrees in physics and mathematics from the University of Paris. "\n# "She conducted groundbreaking research on radioactivity, becoming the first woman to win a Nobel Prize. "\n# "Her achievements paved the way for future generations of scientists, particularly women in STEM fields."),\n# },\n# {\n# "title": "Major Discoveries",\n# "level": "h2",\n# "image": "https://picsum.photos/536/354",\n# "content": ("One of Marie Curie\'s most notable discoveries was that of radium and polonium, two radioactive elements. "\n# "Her meticulous work not only advanced scientific understanding but also had practical applications in medicine and industry."),\n# },\n# {\n# "title": "Conclusion - Legacy",\n# "level": "h1",\n# "content": ("Marie Curie\'s legacy lives on through her contributions to science, her role as a trailblazer for women in STEM, "\n# "and the ongoing impact of her discoveries on modern medicine and technology. "\n# "Her life and work remain an inspiration to many, demonstrating the power of perseverance and intellectual curiosity."),\n# },\n# ]\n\n# generate_and_save_pdf_report(sections, "my_report.pdf", "The Life of Marie Curie")', + ) generate_image_skill = Skill( - name="generate_images", + name="generate_and_save_images", + secrets=[{"secret": "OPENAI_API_KEY", "value": None}], + libraries=["openai"], description="Generate and save images based on a user's query.", content='\nfrom typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\n\nfrom openai import OpenAI\n\n\ndef generate_and_save_images(query: str, image_size: str = "1024x1024") -> List[str]:\n """\n Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI\'s DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image.\n\n :param query: A natural language description of the image to be generated.\n :param image_size: The size of the image to be generated. (default is "1024x1024")\n :return: A list of filenames for the saved images.\n """\n\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model="dall-e-3", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = str(uuid.uuid4()) + ".png" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, "wb") as img_file:\n img_file.write(img_response.content)\n print(f"Image saved to {file_path}")\n saved_files.append(str(file_path))\n else:\n print(f"Failed to download the image from {img_url}")\n else:\n print("No image data found in the response!")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images("A cute baby sea otter")\n', user_id="guestuser@gmail.com", ) # agents - user_proxy_config = AgentConfig( - name="user_proxy", - description="User Proxy Agent Configuration", - human_input_mode="NEVER", - max_consecutive_auto_reply=25, - system_message="You are a helpful assistant", - code_execution_config=CodeExecutionConfigTypes.local, - default_auto_reply="TERMINATE", - llm_config=False, - ) - user_proxy = Agent( - user_id="guestuser@gmail.com", type=AgentType.userproxy, config=user_proxy_config.model_dump(mode="json") - ) - - painter_assistant_config = AgentConfig( - name="default_assistant", - description="Assistant Agent", - human_input_mode="NEVER", - max_consecutive_auto_reply=25, - system_message=AssistantAgent.DEFAULT_SYSTEM_MESSAGE, - code_execution_config=CodeExecutionConfigTypes.none, - llm_config={}, - ) - painter_assistant = Agent( - user_id="guestuser@gmail.com", type=AgentType.assistant, config=painter_assistant_config.model_dump(mode="json") - ) planner_assistant_config = AgentConfig( name="planner_assistant", description="Assistant Agent", human_input_mode="NEVER", max_consecutive_auto_reply=25, - system_message="You are a helpful assistant that can suggest a travel plan for a user. You are the primary cordinator who will receive suggestions or advice from other agents (local_assistant, language_assistant). You must ensure that the finally plan integrates the suggestions from other agents or team members. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.", + system_message="You are a helpful assistant that can suggest a travel plan for a user and utilize any context information provided. You are the primary cordinator who will receive suggestions or advice from other agents (local_assistant, language_assistant). You must ensure that the finally plan integrates the suggestions from other agents or team members. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.", code_execution_config=CodeExecutionConfigTypes.none, llm_config={}, ) planner_assistant = Agent( - user_id="guestuser@gmail.com", type=AgentType.assistant, config=planner_assistant_config.model_dump(mode="json") + user_id="guestuser@gmail.com", + type=AgentType.assistant, + config=planner_assistant_config.model_dump(mode="json"), ) local_assistant_config = AgentConfig( @@ -222,7 +214,7 @@ def init_db_samples(dbmanager: Any): description="Local Assistant Agent", human_input_mode="NEVER", max_consecutive_auto_reply=25, - system_message="You are a local assistant that can suggest local activities or places to visit for a user. You can suggest local activities, places to visit, restaurants to eat at, etc. You can also provide information about the weather, local events, etc. You can provide information about the local area, but you cannot suggest a complete travel plan. You can only provide information about the local area.", + system_message="You are a local assistant that can suggest local activities or places to visit for a user and can utilize any context information provided. You can suggest local activities, places to visit, restaurants to eat at, etc. You can also provide information about the weather, local events, etc. You can provide information about the local area, but you cannot suggest a complete travel plan. You can only provide information about the local area.", code_execution_config=CodeExecutionConfigTypes.none, llm_config={}, ) @@ -245,7 +237,7 @@ def init_db_samples(dbmanager: Any): config=language_assistant_config.model_dump(mode="json"), ) - # group chat + # group chat agent travel_groupchat_config = AgentConfig( name="travel_groupchat", admin_name="groupchat", @@ -262,11 +254,48 @@ def init_db_samples(dbmanager: Any): user_id="guestuser@gmail.com", type=AgentType.groupchat, config=travel_groupchat_config.model_dump(mode="json") ) - # workflows - default_workflow = Workflow(name="Default Workflow", description="Default workflow", user_id="guestuser@gmail.com") + user_proxy_config = AgentConfig( + name="user_proxy", + description="User Proxy Agent Configuration", + human_input_mode="NEVER", + max_consecutive_auto_reply=25, + system_message="You are a helpful assistant", + code_execution_config=CodeExecutionConfigTypes.local, + default_auto_reply="TERMINATE", + llm_config=False, + ) + user_proxy = Agent( + user_id="guestuser@gmail.com", type=AgentType.userproxy, config=user_proxy_config.model_dump(mode="json") + ) + + default_assistant_config = AgentConfig( + name="default_assistant", + description="Assistant Agent", + human_input_mode="NEVER", + max_consecutive_auto_reply=25, + system_message=AssistantAgent.DEFAULT_SYSTEM_MESSAGE, + code_execution_config=CodeExecutionConfigTypes.none, + llm_config={}, + ) + default_assistant = Agent( + user_id="guestuser@gmail.com", type=AgentType.assistant, config=default_assistant_config.model_dump(mode="json") + ) + # workflows travel_workflow = Workflow( - name="Travel Planning Workflow", description="Travel workflow", user_id="guestuser@gmail.com" + name="Travel Planning Workflow", + description="Travel workflow", + user_id="guestuser@gmail.com", + sample_tasks=["Plan a 3 day trip to Hawaii Islands.", "Plan an eventful and exciting trip to Uzbeksitan."], + ) + default_workflow = Workflow( + name="Default Workflow", + description="Default workflow", + user_id="guestuser@gmail.com", + sample_tasks=[ + "paint a picture of a glass of ethiopian coffee, freshly brewed in a tall glass cup, on a table right in front of a lush green forest scenery", + "Plot the stock price of NVIDIA YTD.", + ], ) with Session(dbmanager.engine) as session: @@ -275,26 +304,27 @@ def init_db_samples(dbmanager: Any): session.add(azure_model) session.add(gpt_4_model) session.add(generate_image_skill) + session.add(generate_pdf_skill) session.add(user_proxy) - session.add(painter_assistant) + session.add(default_assistant) session.add(travel_groupchat_agent) session.add(planner_assistant) session.add(local_assistant) session.add(language_assistant) - session.add(default_workflow) session.add(travel_workflow) + session.add(default_workflow) session.commit() - dbmanager.link(link_type="agent_model", primary_id=painter_assistant.id, secondary_id=gpt_4_model.id) - dbmanager.link(link_type="agent_skill", primary_id=painter_assistant.id, secondary_id=generate_image_skill.id) + dbmanager.link(link_type="agent_model", primary_id=default_assistant.id, secondary_id=gpt_4_model.id) + dbmanager.link(link_type="agent_skill", primary_id=default_assistant.id, secondary_id=generate_image_skill.id) dbmanager.link( link_type="workflow_agent", primary_id=default_workflow.id, secondary_id=user_proxy.id, agent_type="sender" ) dbmanager.link( link_type="workflow_agent", primary_id=default_workflow.id, - secondary_id=painter_assistant.id, + secondary_id=default_assistant.id, agent_type="receiver", ) diff --git a/samples/apps/autogen-studio/autogenstudio/datamodel.py b/samples/apps/autogen-studio/autogenstudio/datamodel.py index 3dbd46c357ee..6c6dc567a808 100644 --- a/samples/apps/autogen-studio/autogenstudio/datamodel.py +++ b/samples/apps/autogen-studio/autogenstudio/datamodel.py @@ -20,6 +20,16 @@ # pylint: disable=protected-access +class MessageMeta(SQLModel, table=False): + task: Optional[str] = None + messages: Optional[List[Dict[str, Any]]] = None + summary_method: Optional[str] = "last" + files: Optional[List[dict]] = None + time: Optional[datetime] = None + log: Optional[List[dict]] = None + usage: Optional[List[dict]] = None + + class Message(SQLModel, table=True): __table_args__ = {"sqlite_autoincrement": True} id: Optional[int] = Field(default=None, primary_key=True) @@ -38,7 +48,7 @@ class Message(SQLModel, table=True): default=None, sa_column=Column(Integer, ForeignKey("session.id", ondelete="CASCADE")) ) connection_id: Optional[str] = None - meta: Optional[Dict] = Field(default={}, sa_column=Column(JSON)) + meta: Optional[Union[MessageMeta, dict]] = Field(default={}, sa_column=Column(JSON)) class Session(SQLModel, table=True): @@ -82,11 +92,12 @@ class Skill(SQLModel, table=True): sa_column=Column(DateTime(timezone=True), onupdate=func.now()), ) # pylint: disable=not-callable user_id: Optional[str] = None + version: Optional[str] = "0.0.1" name: str content: str description: Optional[str] = None - secrets: Optional[Dict] = Field(default={}, sa_column=Column(JSON)) - libraries: Optional[Dict] = Field(default={}, sa_column=Column(JSON)) + secrets: Optional[List[dict]] = Field(default_factory=list, sa_column=Column(JSON)) + libraries: Optional[List[str]] = Field(default_factory=list, sa_column=Column(JSON)) agents: List["Agent"] = Relationship(back_populates="skills", link_model=AgentSkillLink) @@ -97,7 +108,7 @@ class LLMConfig(SQLModel, table=False): temperature: float = 0 cache_seed: Optional[Union[int, None]] = None timeout: Optional[int] = None - max_tokens: Optional[int] = 1000 + max_tokens: Optional[int] = 2048 extra_body: Optional[dict] = None @@ -105,6 +116,10 @@ class ModelTypes(str, Enum): openai = "open_ai" google = "google" azure = "azure" + anthropic = "anthropic" + mistral = "mistral" + together = "together" + groq = "groq" class Model(SQLModel, table=True): @@ -119,6 +134,7 @@ class Model(SQLModel, table=True): sa_column=Column(DateTime(timezone=True), onupdate=func.now()), ) # pylint: disable=not-callable user_id: Optional[str] = None + version: Optional[str] = "0.0.1" model: str api_key: Optional[str] = None base_url: Optional[str] = None @@ -164,6 +180,7 @@ class WorkflowAgentType(str, Enum): sender = "sender" receiver = "receiver" planner = "planner" + sequential = "sequential" class WorkflowAgentLink(SQLModel, table=True): @@ -174,6 +191,7 @@ class WorkflowAgentLink(SQLModel, table=True): default=WorkflowAgentType.sender, sa_column=Column(SqlEnum(WorkflowAgentType), primary_key=True), ) + sequence_id: Optional[int] = Field(default=0, primary_key=True) class AgentLink(SQLModel, table=True): @@ -194,8 +212,9 @@ class Agent(SQLModel, table=True): sa_column=Column(DateTime(timezone=True), onupdate=func.now()), ) # pylint: disable=not-callable user_id: Optional[str] = None + version: Optional[str] = "0.0.1" type: AgentType = Field(default=AgentType.assistant, sa_column=Column(SqlEnum(AgentType))) - config: AgentConfig = Field(default_factory=AgentConfig, sa_column=Column(JSON)) + config: Union[AgentConfig, dict] = Field(default_factory=AgentConfig, sa_column=Column(JSON)) skills: List[Skill] = Relationship(back_populates="agents", link_model=AgentSkillLink) models: List[Model] = Relationship(back_populates="agents", link_model=AgentModelLink) workflows: List["Workflow"] = Relationship(link_model=WorkflowAgentLink, back_populates="agents") @@ -215,11 +234,12 @@ class Agent(SQLModel, table=True): secondaryjoin="Agent.id==AgentLink.agent_id", ), ) + task_instruction: Optional[str] = None class WorkFlowType(str, Enum): - twoagents = "twoagents" - groupchat = "groupchat" + autonomous = "autonomous" + sequential = "sequential" class WorkFlowSummaryMethod(str, Enum): @@ -240,14 +260,16 @@ class Workflow(SQLModel, table=True): sa_column=Column(DateTime(timezone=True), onupdate=func.now()), ) # pylint: disable=not-callable user_id: Optional[str] = None + version: Optional[str] = "0.0.1" name: str description: str agents: List[Agent] = Relationship(back_populates="workflows", link_model=WorkflowAgentLink) - type: WorkFlowType = Field(default=WorkFlowType.twoagents, sa_column=Column(SqlEnum(WorkFlowType))) + type: WorkFlowType = Field(default=WorkFlowType.autonomous, sa_column=Column(SqlEnum(WorkFlowType))) summary_method: Optional[WorkFlowSummaryMethod] = Field( default=WorkFlowSummaryMethod.last, sa_column=Column(SqlEnum(WorkFlowSummaryMethod)), ) + sample_tasks: Optional[List[str]] = Field(default_factory=list, sa_column=Column(JSON)) class Response(SQLModel): diff --git a/samples/apps/autogen-studio/autogenstudio/profiler.py b/samples/apps/autogen-studio/autogenstudio/profiler.py new file mode 100644 index 000000000000..679a56917e20 --- /dev/null +++ b/samples/apps/autogen-studio/autogenstudio/profiler.py @@ -0,0 +1,108 @@ +# metrics - agent_frequency, execution_count, tool_count, + +from typing import Dict, List, Optional + +from .datamodel import Message, MessageMeta + + +class Profiler: + """ + Profiler class to profile agent task runs and compute metrics + for performance evaluation. + """ + + def __init__(self): + self.metrics: List[Dict] = [] + + def _is_code(self, message: Message) -> bool: + """ + Check if the message contains code. + + :param message: The message instance to check. + :return: True if the message contains code, False otherwise. + """ + content = message.get("message").get("content").lower() + return "```" in content + + def _is_tool(self, message: Message) -> bool: + """ + Check if the message uses a tool. + + :param message: The message instance to check. + :return: True if the message uses a tool, False otherwise. + """ + content = message.get("message").get("content").lower() + return "from skills import" in content + + def _is_code_execution(self, message: Message) -> bool: + """ + Check if the message indicates code execution. + + :param message: The message instance to check. + :return: dict with is_code and status keys. + """ + content = message.get("message").get("content").lower() + if "exitcode:" in content: + status = "exitcode: 0" in content + return {"is_code": True, "status": status} + else: + return {"is_code": False, "status": False} + + def _is_terminate(self, message: Message) -> bool: + """ + Check if the message indicates termination. + + :param message: The message instance to check. + :return: True if the message indicates termination, False otherwise. + """ + content = message.get("message").get("content").lower() + return "terminate" in content + + def profile(self, agent_message: Message): + """ + Profile the agent task run and compute metrics. + + :param agent: The agent instance that ran the task. + :param task: The task instance that was run. + """ + meta = MessageMeta(**agent_message.meta) + print(meta.log) + usage = meta.usage + messages = meta.messages + profile = [] + bar = [] + stats = {} + total_code_executed = 0 + success_code_executed = 0 + agents = [] + for message in messages: + agent = message.get("sender") + is_code = self._is_code(message) + is_tool = self._is_tool(message) + is_code_execution = self._is_code_execution(message) + total_code_executed += is_code_execution["is_code"] + success_code_executed += 1 if is_code_execution["status"] else 0 + + row = { + "agent": agent, + "tool_call": is_code, + "code_execution": is_code_execution, + "terminate": self._is_terminate(message), + } + bar_row = { + "agent": agent, + "tool_call": "tool call" if is_tool else "no tool call", + "code_execution": ( + "success" + if is_code_execution["status"] + else "failure" if is_code_execution["is_code"] else "no code" + ), + "message": 1, + } + profile.append(row) + bar.append(bar_row) + agents.append(agent) + code_success_rate = (success_code_executed / total_code_executed if total_code_executed > 0 else 0) * 100 + stats["code_success_rate"] = code_success_rate + stats["total_code_executed"] = total_code_executed + return {"profile": profile, "bar": bar, "stats": stats, "agents": set(agents), "usage": usage} diff --git a/samples/apps/autogen-studio/autogenstudio/utils/utils.py b/samples/apps/autogen-studio/autogenstudio/utils/utils.py index ed533ec3883c..40cd549cb06b 100644 --- a/samples/apps/autogen-studio/autogenstudio/utils/utils.py +++ b/samples/apps/autogen-studio/autogenstudio/utils/utils.py @@ -289,7 +289,7 @@ def init_app_folders(app_file_path: str) -> Dict[str, str]: return folders -def get_skills_from_prompt(skills: List[Skill], work_dir: str) -> str: +def get_skills_prompt(skills: List[Skill], work_dir: str) -> str: """ Create a prompt with the content of all skills and write the skills to a file named skills.py in the work_dir. @@ -306,10 +306,18 @@ def get_skills_from_prompt(skills: List[Skill], work_dir: str) -> str: """ prompt = "" # filename: skills.py + for skill in skills: + if not isinstance(skill, Skill): + skill = Skill(**skill) + if skill.secrets: + for secret in skill.secrets: + if secret.get("value") is not None: + os.environ[secret["secret"]] = secret["value"] prompt += f""" ##### Begin of {skill.name} ##### +from skills import {skill.name} # Import the function from skills.py {skill.content} @@ -317,15 +325,40 @@ def get_skills_from_prompt(skills: List[Skill], work_dir: str) -> str: """ + return instruction + prompt + + +def save_skills_to_file(skills: List[Skill], work_dir: str) -> None: + """ + Write the skills to a file named skills.py in the work_dir. + + :param skills: A dictionary skills + """ + + # TBD: Double check for duplicate skills? + # check if work_dir exists if not os.path.exists(work_dir): os.makedirs(work_dir) + skills_content = "" + for skill in skills: + if not isinstance(skill, Skill): + skill = Skill(**skill) + + skills_content += f""" + +##### Begin of {skill.name} ##### + +{skill.content} + +#### End of {skill.name} #### + + """ + # overwrite skills.py in work_dir with open(os.path.join(work_dir, "skills.py"), "w", encoding="utf-8") as f: - f.write(prompt) - - return instruction + prompt + f.write(skills_content) def delete_files_in_folder(folders: Union[str, List[str]]) -> None: @@ -405,9 +438,23 @@ def test_model(model: Model): Test the model endpoint by sending a simple message to the model and returning the response. """ + print("Testing model", model) + sanitized_model = sanitize_model(model) client = OpenAIWrapper(config_list=[sanitized_model]) - response = client.create(messages=[{"role": "user", "content": "2+2="}], cache_seed=None) + response = client.create( + messages=[ + { + "role": "system", + "content": "You are a helpful assistant that can add numbers. ONLY RETURN THE RESULT.", + }, + { + "role": "user", + "content": "2+2=", + }, + ], + cache_seed=None, + ) return response.choices[0].message.content @@ -426,7 +473,11 @@ def load_code_execution_config(code_execution_type: CodeExecutionConfigTypes, wo if code_execution_type == CodeExecutionConfigTypes.local: executor = LocalCommandLineCodeExecutor(work_dir=work_dir) elif code_execution_type == CodeExecutionConfigTypes.docker: - executor = DockerCommandLineCodeExecutor(work_dir=work_dir) + try: + executor = DockerCommandLineCodeExecutor(work_dir=work_dir) + except Exception as e: + logger.error(f"Error initializing Docker executor: {e}") + return False elif code_execution_type == CodeExecutionConfigTypes.none: return False else: @@ -462,3 +513,61 @@ def summarize_chat_history(task: str, messages: List[Dict[str, str]], client: Mo ] response = client.create(messages=summarization_prompt, cache_seed=None) return response.choices[0].message.content + + +def get_autogen_log(db_path="logs.db"): + """ + Fetches data the autogen logs database. + Args: + dbname (str): Name of the database file. Defaults to "logs.db". + table (str): Name of the table to query. Defaults to "chat_completions". + + Returns: + list: A list of dictionaries, where each dictionary represents a row from the table. + """ + import json + import sqlite3 + + con = sqlite3.connect(db_path) + query = """ + SELECT + chat_completions.*, + agents.name AS agent_name + FROM + chat_completions + JOIN + agents ON chat_completions.wrapper_id = agents.wrapper_id + """ + cursor = con.execute(query) + rows = cursor.fetchall() + column_names = [description[0] for description in cursor.description] + data = [dict(zip(column_names, row)) for row in rows] + for row in data: + response = json.loads(row["response"]) + print(response) + total_tokens = response.get("usage", {}).get("total_tokens", 0) + row["total_tokens"] = total_tokens + con.close() + return data + + +def find_key_value(d, target_key): + """ + Recursively search for a key in a nested dictionary and return its value. + """ + if d is None: + return None + + if isinstance(d, dict): + if target_key in d: + return d[target_key] + for k in d: + item = find_key_value(d[k], target_key) + if item is not None: + return item + elif isinstance(d, list): + for i in d: + item = find_key_value(i, target_key) + if item is not None: + return item + return None diff --git a/samples/apps/autogen-studio/autogenstudio/version.py b/samples/apps/autogen-studio/autogenstudio/version.py index bafe37f75b14..3d83da06d441 100644 --- a/samples/apps/autogen-studio/autogenstudio/version.py +++ b/samples/apps/autogen-studio/autogenstudio/version.py @@ -1,3 +1,3 @@ -VERSION = "0.0.56rc9" +VERSION = "0.1.4" __version__ = VERSION APP_NAME = "autogenstudio" diff --git a/samples/apps/autogen-studio/autogenstudio/web/app.py b/samples/apps/autogen-studio/autogenstudio/web/app.py index 76ab8139ebc3..5926f6c64a14 100644 --- a/samples/apps/autogen-studio/autogenstudio/web/app.py +++ b/samples/apps/autogen-studio/autogenstudio/web/app.py @@ -4,7 +4,7 @@ import threading import traceback from contextlib import asynccontextmanager -from typing import Any +from typing import Any, Union from fastapi import FastAPI, WebSocket, WebSocketDisconnect from fastapi.middleware.cors import CORSMiddleware @@ -16,9 +16,11 @@ from ..database import workflow_from_id from ..database.dbmanager import DBManager from ..datamodel import Agent, Message, Model, Response, Session, Skill, Workflow +from ..profiler import Profiler from ..utils import check_and_cast_datetime_fields, init_app_folders, md5_hash, test_model from ..version import VERSION +profiler = Profiler() managers = {"chat": None} # manage calls to autogen # Create thread-safe queue for messages between api thread and autogen threads message_queue = queue.Queue() @@ -92,8 +94,15 @@ async def lifespan(app: FastAPI): allow_headers=["*"], ) - -api = FastAPI(root_path="/api") +show_docs = os.environ.get("AUTOGENSTUDIO_API_DOCS", "False").lower() == "true" +docs_url = "/docs" if show_docs else None +api = FastAPI( + root_path="/api", + title="AutoGen Studio API", + version=VERSION, + docs_url=docs_url, + description="AutoGen Studio is a low-code tool for building and testing multi-agent workflows using AutoGen.", +) # mount an api route such that the main route serves the ui and the /api app.mount("/api", api) @@ -293,6 +302,19 @@ async def get_workflow(workflow_id: int, user_id: str): return list_entity(Workflow, filters=filters) +@api.get("/workflows/export/{workflow_id}") +async def export_workflow(workflow_id: int, user_id: str): + """Export a user workflow""" + response = Response(message="Workflow exported successfully", status=True, data=None) + try: + workflow_details = workflow_from_id(workflow_id, dbmanager=dbmanager) + response.data = workflow_details + except Exception as ex_error: + response.message = "Error occurred while exporting workflow: " + str(ex_error) + response.status = False + return response.model_dump(mode="json") + + @api.post("/workflows") async def create_workflow(workflow: Workflow): """Create a new workflow""" @@ -317,6 +339,19 @@ async def link_workflow_agent(workflow_id: int, agent_id: int, agent_type: str): ) +@api.post("/workflows/link/agent/{workflow_id}/{agent_id}/{agent_type}/{sequence_id}") +async def link_workflow_agent_sequence(workflow_id: int, agent_id: int, agent_type: str, sequence_id: int): + """Link an agent to a workflow""" + print("Sequence ID: ", sequence_id) + return dbmanager.link( + link_type="workflow_agent", + primary_id=workflow_id, + secondary_id=agent_id, + agent_type=agent_type, + sequence_id=sequence_id, + ) + + @api.delete("/workflows/link/agent/{workflow_id}/{agent_id}/{agent_type}") async def unlink_workflow_agent(workflow_id: int, agent_id: int, agent_type: str): """Unlink an agent from a workflow""" @@ -328,17 +363,47 @@ async def unlink_workflow_agent(workflow_id: int, agent_id: int, agent_type: str ) -@api.get("/workflows/link/agent/{workflow_id}/{agent_type}") -async def get_linked_workflow_agents(workflow_id: int, agent_type: str): +@api.delete("/workflows/link/agent/{workflow_id}/{agent_id}/{agent_type}/{sequence_id}") +async def unlink_workflow_agent_sequence(workflow_id: int, agent_id: int, agent_type: str, sequence_id: int): + """Unlink an agent from a workflow sequence""" + return dbmanager.unlink( + link_type="workflow_agent", + primary_id=workflow_id, + secondary_id=agent_id, + agent_type=agent_type, + sequence_id=sequence_id, + ) + + +@api.get("/workflows/link/agent/{workflow_id}") +async def get_linked_workflow_agents(workflow_id: int): """Get all agents linked to a workflow""" return dbmanager.get_linked_entities( link_type="workflow_agent", primary_id=workflow_id, - agent_type=agent_type, return_json=True, ) +@api.get("/profiler/{message_id}") +async def profile_agent_task_run(message_id: int): + """Profile an agent task run""" + try: + agent_message = dbmanager.get(Message, filters={"id": message_id}).data[0] + + profile = profiler.profile(agent_message) + return { + "status": True, + "message": "Agent task run profiled successfully", + "data": profile, + } + except Exception as ex_error: + return { + "status": False, + "message": "Error occurred while profiling agent task run: " + str(ex_error), + } + + @api.get("/sessions") async def list_sessions(user_id: str): """List all sessions for a user""" @@ -395,7 +460,6 @@ async def run_session_workflow(message: Message, session_id: int, workflow_id: i response: Response = dbmanager.upsert(agent_response) return response.model_dump(mode="json") except Exception as ex_error: - print(traceback.format_exc()) return { "status": False, "message": "Error occurred while processing message: " + str(ex_error), diff --git a/samples/apps/autogen-studio/autogenstudio/web/serve.py b/samples/apps/autogen-studio/autogenstudio/web/serve.py new file mode 100644 index 000000000000..462615378b8a --- /dev/null +++ b/samples/apps/autogen-studio/autogenstudio/web/serve.py @@ -0,0 +1,30 @@ +# loads a fast api api endpoint with a single endpoint that takes text query and return a response + +import json +import os + +from fastapi import FastAPI + +from ..datamodel import Response +from ..workflowmanager import WorkflowManager + +app = FastAPI() +workflow_file_path = os.environ.get("AUTOGENSTUDIO_WORKFLOW_FILE", None) + + +if workflow_file_path: + workflow_manager = WorkflowManager(workflow=workflow_file_path) +else: + raise ValueError("Workflow file must be specified") + + +@app.get("/predict/{task}") +async def predict(task: str): + response = Response(message="Task successfully completed", status=True, data=None) + try: + result_message = workflow_manager.run(message=task, clear_history=False) + response.data = result_message + except Exception as e: + response.message = str(e) + response.status = False + return response diff --git a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py index 8b41caab4285..f5065e85e5c8 100644 --- a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py @@ -1,4 +1,6 @@ +import json import os +import time from datetime import datetime from typing import Any, Dict, List, Optional, Union @@ -7,20 +9,33 @@ from .datamodel import ( Agent, AgentType, + CodeExecutionConfigTypes, Message, SocketMessage, + Workflow, + WorkFlowSummaryMethod, + WorkFlowType, +) +from .utils import ( + clear_folder, + find_key_value, + get_modified_files, + get_skills_prompt, + load_code_execution_config, + sanitize_model, + save_skills_to_file, + summarize_chat_history, ) -from .utils import clear_folder, get_skills_from_prompt, load_code_execution_config, sanitize_model -class WorkflowManager: +class AutoWorkflowManager: """ - AutoGenWorkFlowManager class to load agents from a provided configuration and run a chat between them + WorkflowManager class to load agents from a provided configuration and run a chat between them. """ def __init__( self, - workflow: Dict, + workflow: Union[Dict, str], history: Optional[List[Message]] = None, work_dir: str = None, clear_work_dir: bool = True, @@ -28,27 +43,74 @@ def __init__( connection_id: Optional[str] = None, ) -> None: """ - Initializes the AutoGenFlow with agents specified in the config and optional - message history. + Initializes the WorkflowManager with agents specified in the config and optional message history. Args: - config: The configuration settings for the sender and receiver agents. - history: An optional list of previous messages to populate the agents' history. - + workflow (Union[Dict, str]): The workflow configuration. This can be a dictionary or a string which is a path to a JSON file. + history (Optional[List[Message]]): The message history. + work_dir (str): The working directory. + clear_work_dir (bool): If set to True, clears the working directory. + send_message_function (Optional[callable]): The function to send messages. + connection_id (Optional[str]): The connection identifier. """ + if isinstance(workflow, str): + if os.path.isfile(workflow): + with open(workflow, "r") as file: + self.workflow = json.load(file) + else: + raise FileNotFoundError(f"The file {workflow} does not exist.") + elif isinstance(workflow, dict): + self.workflow = workflow + else: + raise ValueError("The 'workflow' parameter should be either a dictionary or a valid JSON file path") + # TODO - improved typing for workflow + self.workflow_skills = [] self.send_message_function = send_message_function self.connection_id = connection_id self.work_dir = work_dir or "work_dir" + self.code_executor_pool = { + CodeExecutionConfigTypes.local: load_code_execution_config( + CodeExecutionConfigTypes.local, work_dir=self.work_dir + ), + CodeExecutionConfigTypes.docker: load_code_execution_config( + CodeExecutionConfigTypes.docker, work_dir=self.work_dir + ), + } if clear_work_dir: clear_folder(self.work_dir) - self.workflow = workflow - self.sender = self.load(workflow.get("sender")) - self.receiver = self.load(workflow.get("receiver")) self.agent_history = [] + self.history = history or [] + self.sender = None + self.receiver = None - if history: - self._populate_history(history) + def _run_workflow(self, message: str, history: Optional[List[Message]] = None, clear_history: bool = False) -> None: + """ + Runs the workflow based on the provided configuration. + + Args: + message: The initial message to start the chat. + history: A list of messages to populate the agents' history. + clear_history: If set to True, clears the chat history before initiating. + + """ + for agent in self.workflow.get("agents", []): + if agent.get("link").get("agent_type") == "sender": + self.sender = self.load(agent.get("agent")) + elif agent.get("link").get("agent_type") == "receiver": + self.receiver = self.load(agent.get("agent")) + if self.sender and self.receiver: + # save all agent skills to skills.py + save_skills_to_file(self.workflow_skills, self.work_dir) + if history: + self._populate_history(history) + self.sender.initiate_chat( + self.receiver, + message=message, + clear_history=clear_history, + ) + else: + raise ValueError("Sender and receiver agents are not defined in the workflow configuration.") def _serialize_agent( self, @@ -184,13 +246,13 @@ def get_default_system_message(agent_type: str) -> str: config_list.append(sanitized_llm) agent.config.llm_config.config_list = config_list - agent.config.code_execution_config = load_code_execution_config( - agent.config.code_execution_config, work_dir=self.work_dir - ) + agent.config.code_execution_config = self.code_executor_pool.get(agent.config.code_execution_config, False) if skills: + for skill in skills: + self.workflow_skills.append(skill) skills_prompt = "" - skills_prompt = get_skills_from_prompt(skills, self.work_dir) + skills_prompt = get_skills_prompt(skills, self.work_dir) if agent.config.system_message: agent.config.system_message = agent.config.system_message + "\n\n" + skills_prompt else: @@ -241,7 +303,263 @@ def load(self, agent: Any) -> autogen.Agent: raise ValueError(f"Unknown agent type: {agent.type}") return agent - def run(self, message: str, clear_history: bool = False) -> None: + def _generate_output( + self, + message_text: str, + summary_method: str, + ) -> str: + """ + Generates the output response based on the workflow configuration and agent history. + + :param message_text: The text of the incoming message. + :param flow: An instance of `WorkflowManager`. + :param flow_config: An instance of `AgentWorkFlowConfig`. + :return: The output response as a string. + """ + + output = "" + if summary_method == WorkFlowSummaryMethod.last: + (self.agent_history) + last_message = self.agent_history[-1]["message"]["content"] if self.agent_history else "" + output = last_message + elif summary_method == WorkFlowSummaryMethod.llm: + client = self.receiver.client + if self.connection_id: + status_message = SocketMessage( + type="agent_status", + data={ + "status": "summarizing", + "message": "Summarizing agent dialogue", + }, + connection_id=self.connection_id, + ) + self.send_message_function(status_message.model_dump(mode="json")) + output = summarize_chat_history( + task=message_text, + messages=self.agent_history, + client=client, + ) + + elif summary_method == "none": + output = "" + return output + + def _get_agent_usage(self, agent: autogen.Agent): + final_usage = [] + default_usage = {"total_cost": 0, "total_tokens": 0} + agent_usage = agent.client.total_usage_summary if agent.client else default_usage + agent_usage = { + "agent": agent.name, + "total_cost": find_key_value(agent_usage, "total_cost") or 0, + "total_tokens": find_key_value(agent_usage, "total_tokens") or 0, + } + final_usage.append(agent_usage) + + if type(agent) == ExtendedGroupChatManager: + print("groupchat found, processing", len(agent.groupchat.agents)) + for agent in agent.groupchat.agents: + agent_usage = agent.client.total_usage_summary if agent.client else default_usage or default_usage + agent_usage = { + "agent": agent.name, + "total_cost": find_key_value(agent_usage, "total_cost") or 0, + "total_tokens": find_key_value(agent_usage, "total_tokens") or 0, + } + final_usage.append(agent_usage) + return final_usage + + def _get_usage_summary(self): + sender_usage = self._get_agent_usage(self.sender) + receiver_usage = self._get_agent_usage(self.receiver) + + all_usage = [] + all_usage.extend(sender_usage) + all_usage.extend(receiver_usage) + # all_usage = [sender_usage, receiver_usage] + return all_usage + + def run(self, message: str, history: Optional[List[Message]] = None, clear_history: bool = False) -> Message: + """ + Initiates a chat between the sender and receiver agents with an initial message + and an option to clear the history. + + Args: + message: The initial message to start the chat. + clear_history: If set to True, clears the chat history before initiating. + """ + + start_time = time.time() + self._run_workflow(message=message, history=history, clear_history=clear_history) + end_time = time.time() + + output = self._generate_output(message, self.workflow.get("summary_method", "last")) + + usage = self._get_usage_summary() + # print("usage", usage) + + result_message = Message( + content=output, + role="assistant", + meta={ + "messages": self.agent_history, + "summary_method": self.workflow.get("summary_method", "last"), + "time": end_time - start_time, + "files": get_modified_files(start_time, end_time, source_dir=self.work_dir), + "usage": usage, + }, + ) + return result_message + + +class SequentialWorkflowManager: + """ + WorkflowManager class to load agents from a provided configuration and run a chat between them sequentially. + """ + + def __init__( + self, + workflow: Union[Dict, str], + history: Optional[List[Message]] = None, + work_dir: str = None, + clear_work_dir: bool = True, + send_message_function: Optional[callable] = None, + connection_id: Optional[str] = None, + ) -> None: + """ + Initializes the WorkflowManager with agents specified in the config and optional message history. + + Args: + workflow (Union[Dict, str]): The workflow configuration. This can be a dictionary or a string which is a path to a JSON file. + history (Optional[List[Message]]): The message history. + work_dir (str): The working directory. + clear_work_dir (bool): If set to True, clears the working directory. + send_message_function (Optional[callable]): The function to send messages. + connection_id (Optional[str]): The connection identifier. + """ + if isinstance(workflow, str): + if os.path.isfile(workflow): + with open(workflow, "r") as file: + self.workflow = json.load(file) + else: + raise FileNotFoundError(f"The file {workflow} does not exist.") + elif isinstance(workflow, dict): + self.workflow = workflow + else: + raise ValueError("The 'workflow' parameter should be either a dictionary or a valid JSON file path") + + # TODO - improved typing for workflow + self.send_message_function = send_message_function + self.connection_id = connection_id + self.work_dir = work_dir or "work_dir" + if clear_work_dir: + clear_folder(self.work_dir) + self.agent_history = [] + self.history = history or [] + self.sender = None + self.receiver = None + self.model_client = None + + def _run_workflow(self, message: str, history: Optional[List[Message]] = None, clear_history: bool = False) -> None: + """ + Runs the workflow based on the provided configuration. + + Args: + message: The initial message to start the chat. + history: A list of messages to populate the agents' history. + clear_history: If set to True, clears the chat history before initiating. + + """ + user_proxy = { + "config": { + "name": "user_proxy", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "code_execution_config": "local", + "default_auto_reply": "TERMINATE", + "description": "User Proxy Agent Configuration", + "llm_config": False, + "type": "userproxy", + } + } + sequential_history = [] + for i, agent in enumerate(self.workflow.get("agents", [])): + workflow = Workflow( + name="agent workflow", type=WorkFlowType.autonomous, summary_method=WorkFlowSummaryMethod.llm + ) + workflow = workflow.model_dump(mode="json") + agent = agent.get("agent") + workflow["agents"] = [ + {"agent": user_proxy, "link": {"agent_type": "sender"}}, + {"agent": agent, "link": {"agent_type": "receiver"}}, + ] + + auto_workflow = AutoWorkflowManager( + workflow=workflow, + history=history, + work_dir=self.work_dir, + clear_work_dir=True, + send_message_function=self.send_message_function, + connection_id=self.connection_id, + ) + task_prompt = ( + f""" + Your primary instructions are as follows: + {agent.get("task_instruction")} + Context for addressing your task is below: + ======= + {str(sequential_history)} + ======= + Now address your task: + """ + if i > 0 + else message + ) + result = auto_workflow.run(message=task_prompt, clear_history=clear_history) + sequential_history.append(result.content) + self.model_client = auto_workflow.receiver.client + print(f"======== end of sequence === {i}============") + self.agent_history.extend(result.meta.get("messages", [])) + + def _generate_output( + self, + message_text: str, + summary_method: str, + ) -> str: + """ + Generates the output response based on the workflow configuration and agent history. + + :param message_text: The text of the incoming message. + :param flow: An instance of `WorkflowManager`. + :param flow_config: An instance of `AgentWorkFlowConfig`. + :return: The output response as a string. + """ + + output = "" + if summary_method == WorkFlowSummaryMethod.last: + (self.agent_history) + last_message = self.agent_history[-1]["message"]["content"] if self.agent_history else "" + output = last_message + elif summary_method == WorkFlowSummaryMethod.llm: + if self.connection_id: + status_message = SocketMessage( + type="agent_status", + data={ + "status": "summarizing", + "message": "Summarizing agent dialogue", + }, + connection_id=self.connection_id, + ) + self.send_message_function(status_message.model_dump(mode="json")) + output = summarize_chat_history( + task=message_text, + messages=self.agent_history, + client=self.model_client, + ) + + elif summary_method == "none": + output = "" + return output + + def run(self, message: str, history: Optional[List[Message]] = None, clear_history: bool = False) -> Message: """ Initiates a chat between the sender and receiver agents with an initial message and an option to clear the history. @@ -250,11 +568,80 @@ def run(self, message: str, clear_history: bool = False) -> None: message: The initial message to start the chat. clear_history: If set to True, clears the chat history before initiating. """ - self.sender.initiate_chat( - self.receiver, - message=message, - clear_history=clear_history, + + start_time = time.time() + self._run_workflow(message=message, history=history, clear_history=clear_history) + end_time = time.time() + output = self._generate_output(message, self.workflow.get("summary_method", "last")) + + result_message = Message( + content=output, + role="assistant", + meta={ + "messages": self.agent_history, + "summary_method": self.workflow.get("summary_method", "last"), + "time": end_time - start_time, + "files": get_modified_files(start_time, end_time, source_dir=self.work_dir), + "task": message, + }, ) + return result_message + + +class WorkflowManager: + """ + WorkflowManager class to load agents from a provided configuration and run a chat between them. + """ + + def __new__( + self, + workflow: Union[Dict, str], + history: Optional[List[Message]] = None, + work_dir: str = None, + clear_work_dir: bool = True, + send_message_function: Optional[callable] = None, + connection_id: Optional[str] = None, + ) -> None: + """ + Initializes the WorkflowManager with agents specified in the config and optional message history. + + Args: + workflow (Union[Dict, str]): The workflow configuration. This can be a dictionary or a string which is a path to a JSON file. + history (Optional[List[Message]]): The message history. + work_dir (str): The working directory. + clear_work_dir (bool): If set to True, clears the working directory. + send_message_function (Optional[callable]): The function to send messages. + connection_id (Optional[str]): The connection identifier. + """ + if isinstance(workflow, str): + if os.path.isfile(workflow): + with open(workflow, "r") as file: + self.workflow = json.load(file) + else: + raise FileNotFoundError(f"The file {workflow} does not exist.") + elif isinstance(workflow, dict): + self.workflow = workflow + else: + raise ValueError("The 'workflow' parameter should be either a dictionary or a valid JSON file path") + + if self.workflow.get("type") == WorkFlowType.autonomous.value: + return AutoWorkflowManager( + workflow=workflow, + history=history, + work_dir=work_dir, + clear_work_dir=clear_work_dir, + send_message_function=send_message_function, + connection_id=connection_id, + ) + elif self.workflow.get("type") == WorkFlowType.sequential.value: + return SequentialWorkflowManager( + workflow=workflow, + history=history, + work_dir=work_dir, + clear_work_dir=clear_work_dir, + send_message_function=send_message_function, + connection_id=connection_id, + ) class ExtendedConversableAgent(autogen.ConversableAgent): diff --git a/samples/apps/autogen-studio/frontend/gatsby-config.ts b/samples/apps/autogen-studio/frontend/gatsby-config.ts index 9644cfc03898..8fbc5259ed9d 100644 --- a/samples/apps/autogen-studio/frontend/gatsby-config.ts +++ b/samples/apps/autogen-studio/frontend/gatsby-config.ts @@ -1,5 +1,5 @@ import type { GatsbyConfig } from "gatsby"; -import fs from 'fs'; +import fs from "fs"; const envFile = `.env.${process.env.NODE_ENV}`; diff --git a/samples/apps/autogen-studio/frontend/package.json b/samples/apps/autogen-studio/frontend/package.json index b62adb4578d1..7a06f09dac03 100644 --- a/samples/apps/autogen-studio/frontend/package.json +++ b/samples/apps/autogen-studio/frontend/package.json @@ -18,6 +18,7 @@ }, "dependencies": { "@ant-design/charts": "^1.3.6", + "@ant-design/plots": "^2.2.2", "@headlessui/react": "^1.7.16", "@heroicons/react": "^2.0.18", "@mdx-js/mdx": "^1.6.22", diff --git a/samples/apps/autogen-studio/frontend/src/components/atoms.tsx b/samples/apps/autogen-studio/frontend/src/components/atoms.tsx index c4c1368a1232..a0864153f5ac 100644 --- a/samples/apps/autogen-studio/frontend/src/components/atoms.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/atoms.tsx @@ -751,7 +751,7 @@ export const PdfViewer = ({ url }: { url: string }) => { data={url} type="application/pdf" width="100%" - height="450px" + style={{ height: "calc(90vh - 200px)" }} >

PDF cannot be displayed.

diff --git a/samples/apps/autogen-studio/frontend/src/components/types.ts b/samples/apps/autogen-studio/frontend/src/components/types.ts index eba391446028..ca51003e7ed0 100644 --- a/samples/apps/autogen-studio/frontend/src/components/types.ts +++ b/samples/apps/autogen-studio/frontend/src/components/types.ts @@ -9,6 +9,8 @@ export interface IMessage { session_id?: number; connection_id?: string; workflow_id?: number; + meta?: any; + id?: number; } export interface IStatus { @@ -21,7 +23,7 @@ export interface IChatMessage { text: string; sender: "user" | "bot"; meta?: any; - msg_id: string; + id?: number; } export interface ILLMConfig { @@ -63,9 +65,9 @@ export interface IAgent { export interface IWorkflow { name: string; description: string; - sender: IAgent; - receiver: IAgent; - type: "twoagents" | "groupchat"; + sender?: IAgent; + receiver?: IAgent; + type?: "autonomous" | "sequential"; created_at?: string; updated_at?: string; summary_method?: "none" | "last" | "llm"; @@ -78,7 +80,7 @@ export interface IModelConfig { api_key?: string; api_version?: string; base_url?: string; - api_type?: "open_ai" | "azure" | "google"; + api_type?: "open_ai" | "azure" | "google" | "anthropic" | "mistral"; user_id?: string; created_at?: string; updated_at?: string; @@ -115,6 +117,8 @@ export interface IGalleryItem { export interface ISkill { name: string; content: string; + secrets?: any[]; + libraries?: string[]; id?: number; description?: string; user_id?: string; diff --git a/samples/apps/autogen-studio/frontend/src/components/utils.ts b/samples/apps/autogen-studio/frontend/src/components/utils.ts index 2264f5c66a21..e70590153a88 100644 --- a/samples/apps/autogen-studio/frontend/src/components/utils.ts +++ b/samples/apps/autogen-studio/frontend/src/components/utils.ts @@ -266,6 +266,18 @@ export const sampleModelConfig = (modelType: string = "open_ai") => { description: "Google Gemini Model model", }; + const anthropicConfig: IModelConfig = { + model: "claude-3-5-sonnet-20240620", + api_type: "anthropic", + description: "Claude 3.5 Sonnet model", + }; + + const mistralConfig: IModelConfig = { + model: "mistral", + api_type: "mistral", + description: "Mistral model", + }; + switch (modelType) { case "open_ai": return openaiConfig; @@ -273,6 +285,10 @@ export const sampleModelConfig = (modelType: string = "open_ai") => { return azureConfig; case "google": return googleConfig; + case "anthropic": + return anthropicConfig; + case "mistral": + return mistralConfig; default: return openaiConfig; } @@ -286,13 +302,36 @@ export const getRandomIntFromDateAndSalt = (salt: number = 43444) => { return randomInt; }; +export const getSampleWorkflow = (workflow_type: string = "autonomous") => { + const autonomousWorkflow: IWorkflow = { + name: "Default Chat Workflow", + description: "Autonomous Workflow", + type: "autonomous", + summary_method: "llm", + }; + const sequentialWorkflow: IWorkflow = { + name: "Default Sequential Workflow", + description: "Sequential Workflow", + type: "sequential", + summary_method: "llm", + }; + + if (workflow_type === "autonomous") { + return autonomousWorkflow; + } else if (workflow_type === "sequential") { + return sequentialWorkflow; + } else { + return autonomousWorkflow; + } +}; + export const sampleAgentConfig = (agent_type: string = "assistant") => { const llm_config: ILLMConfig = { config_list: [], temperature: 0.1, timeout: 600, cache_seed: null, - max_tokens: 1000, + max_tokens: 4000, }; const userProxyConfig: IAgentConfig = { @@ -357,90 +396,6 @@ export const sampleAgentConfig = (agent_type: string = "assistant") => { } }; -export const sampleWorkflowConfig = (type = "twoagents") => { - const llm_model_config: IModelConfig[] = []; - - const llm_config: ILLMConfig = { - config_list: llm_model_config, - temperature: 0.1, - timeout: 600, - cache_seed: null, - max_tokens: 1000, - }; - - const userProxyConfig: IAgentConfig = { - name: "userproxy", - human_input_mode: "NEVER", - max_consecutive_auto_reply: 15, - system_message: "You are a helpful assistant.", - default_auto_reply: "TERMINATE", - llm_config: false, - code_execution_config: "local", - }; - const userProxyFlowSpec: IAgent = { - type: "userproxy", - config: userProxyConfig, - }; - - const assistantConfig: IAgentConfig = { - name: "primary_assistant", - llm_config: llm_config, - human_input_mode: "NEVER", - max_consecutive_auto_reply: 8, - code_execution_config: "none", - system_message: - "You are a helpful AI assistant. Solve tasks using your coding and language skills. In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself. 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly. Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill. When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user. If you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user. If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try. When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible. Reply 'TERMINATE' in the end when everything is done.", - }; - - const assistantFlowSpec: IAgent = { - type: "assistant", - config: assistantConfig, - }; - - const workFlowConfig: IWorkflow = { - name: "Default Agent Workflow", - description: "Default Agent Workflow", - sender: userProxyFlowSpec, - receiver: assistantFlowSpec, - type: "twoagents", - }; - - const groupChatAssistantConfig = Object.assign( - { - admin_name: "groupchat_assistant", - messages: [], - max_round: 10, - speaker_selection_method: "auto", - allow_repeat_speaker: false, - description: "Group Chat Assistant", - }, - assistantConfig - ); - groupChatAssistantConfig.name = "groupchat_assistant"; - groupChatAssistantConfig.system_message = - "You are a helpful assistant skilled at cordinating a group of other assistants to solve a task. "; - - const groupChatFlowSpec: IAgent = { - type: "groupchat", - config: groupChatAssistantConfig, - }; - - const groupChatWorkFlowConfig: IWorkflow = { - name: "Default Group Workflow", - description: "Default Group Workflow", - sender: userProxyFlowSpec, - receiver: groupChatFlowSpec, - type: "groupchat", - }; - - if (type === "twoagents") { - return workFlowConfig; - } else if (type === "groupchat") { - return groupChatWorkFlowConfig; - } - return workFlowConfig; -}; - export const getSampleSkill = () => { const content = ` from typing import List @@ -495,7 +450,7 @@ def generate_and_save_images(query: str, image_size: str = "1024x1024") -> List[ `; const skill: ISkill = { - name: "generate_images", + name: "generate_and_save_images", description: "Generate and save images based on a user's query.", content: content, }; @@ -612,7 +567,7 @@ export const fetchVersion = () => { */ export const sanitizeConfig = ( data: any, - keys: string[] = ["api_key", "id", "created_at", "updated_at"] + keys: string[] = ["api_key", "id", "created_at", "updated_at", "secrets"] ): any => { if (Array.isArray(data)) { return data.map((item) => sanitizeConfig(item, keys)); diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/agents.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/agents.tsx index 8800ebfbdd37..6fcb505cc7e6 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/agents.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/agents.tsx @@ -141,13 +141,9 @@ const AgentsView = ({}: any) => { icon: DocumentDuplicateIcon, onClick: (e: any) => { e.stopPropagation(); - let newAgent = { ...agent }; + let newAgent = { ...sanitizeConfig(agent) }; newAgent.config.name = `${agent.config.name}_copy`; - newAgent.user_id = user?.email; - newAgent.updated_at = new Date().toISOString(); - if (newAgent.id) { - delete newAgent.id; - } + console.log("newAgent", newAgent); setNewAgent(newAgent); setShowNewAgentModal(true); }, @@ -187,7 +183,7 @@ const AgentsView = ({}: any) => { aria-hidden="true" className="my-2 break-words" > - {" "} +
{agent.type}
{" "} {truncateText(agent.config.description || "", 70)}
{
{" "} - Configure an agent that can reused in your agent workflow{" "} - {selectedAgent?.config.name} + Configure an agent that can reused in your agent workflow . +
+ Tip: You can also create a Group of Agents ( New Agent - + GroupChat) which can have multiple agents in it. +
{agents && agents.length > 0 && (
diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/models.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/models.tsx index 2a3b0506d79c..87ae739b62e7 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/models.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/models.tsx @@ -6,7 +6,7 @@ import { PlusIcon, TrashIcon, } from "@heroicons/react/24/outline"; -import { Button, Dropdown, Input, MenuProps, Modal, message } from "antd"; +import { Dropdown, MenuProps, Modal, message } from "antd"; import * as React from "react"; import { IModelConfig, IStatus } from "../../types"; import { appContext } from "../../../hooks/provider"; @@ -17,14 +17,7 @@ import { timeAgo, truncateText, } from "../../utils"; -import { - BounceLoader, - Card, - CardHoverBar, - ControlRowView, - LoadingOverlay, -} from "../../atoms"; -import TextArea from "antd/es/input/TextArea"; +import { BounceLoader, Card, CardHoverBar, LoadingOverlay } from "../../atoms"; import { ModelConfigView } from "./utils/modelconfig"; const ModelsView = ({}: any) => { @@ -175,13 +168,8 @@ const ModelsView = ({}: any) => { icon: DocumentDuplicateIcon, onClick: (e: any) => { e.stopPropagation(); - let newModel = { ...model }; - newModel.model = `${model.model} Copy`; - newModel.user_id = user?.email; - newModel.updated_at = new Date().toISOString(); - if (newModel.id) { - delete newModel.id; - } + let newModel = { ...sanitizeConfig(model) }; + newModel.model = `${model.model}_copy`; setNewModel(newModel); setShowNewModelModal(true); }, diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/skills.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/skills.tsx index 77b50588dd20..7d3dfe75611f 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/skills.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/skills.tsx @@ -1,12 +1,15 @@ import { ArrowDownTrayIcon, ArrowUpTrayIcon, + CodeBracketIcon, + CodeBracketSquareIcon, DocumentDuplicateIcon, InformationCircleIcon, + KeyIcon, PlusIcon, TrashIcon, } from "@heroicons/react/24/outline"; -import { Button, Input, Modal, message, MenuProps, Dropdown } from "antd"; +import { Button, Input, Modal, message, MenuProps, Dropdown, Tabs } from "antd"; import * as React from "react"; import { ISkill, IStatus } from "../../types"; import { appContext } from "../../../hooks/provider"; @@ -25,6 +28,8 @@ import { LoadingOverlay, MonacoEditor, } from "../../atoms"; +import { SkillSelector } from "./utils/selectors"; +import { SkillConfigView } from "./utils/skillconfig"; const SkillsView = ({}: any) => { const [loading, setLoading] = React.useState(false); @@ -109,38 +114,6 @@ const SkillsView = ({}: any) => { fetchJSON(listSkillsUrl, payLoad, onSuccess, onError); }; - const saveSkill = (skill: ISkill) => { - setError(null); - setLoading(true); - // const fetch; - skill.user_id = user?.email; - const payLoad = { - method: "POST", - headers: { - Accept: "application/json", - "Content-Type": "application/json", - }, - body: JSON.stringify(skill), - }; - - const onSuccess = (data: any) => { - if (data && data.status) { - message.success(data.message); - const updatedSkills = [data.data].concat(skills || []); - setSkills(updatedSkills); - } else { - message.error(data.message); - } - setLoading(false); - }; - const onError = (err: any) => { - setError(err); - message.error(err.message); - setLoading(false); - }; - fetchJSON(saveSkillsUrl, payLoad, onSuccess, onError); - }; - React.useEffect(() => { if (user) { // console.log("fetching messages", messages); @@ -173,12 +146,8 @@ const SkillsView = ({}: any) => { icon: DocumentDuplicateIcon, onClick: (e: any) => { e.stopPropagation(); - let newSkill = { ...skill }; - newSkill.name = `${skill.name} Copy`; - newSkill.user_id = user?.email; - if (newSkill.id) { - delete newSkill.id; - } + let newSkill = { ...sanitizeConfig(skill) }; + newSkill.name = `${skill.name}_copy`; setNewSkill(newSkill); setShowNewSkillModal(true); }, @@ -245,6 +214,15 @@ const SkillsView = ({}: any) => { }) => { const editorRef = React.useRef(null); const [localSkill, setLocalSkill] = React.useState(skill); + + const closeModal = () => { + setSkill(null); + setShowSkillModal(false); + if (handler) { + handler(skill); + } + }; + return ( { onCancel={() => { setShowSkillModal(false); }} - footer={[ - , - , - ]} + footer={[]} > {localSkill && ( -
-
- { - const updatedSkill = { ...localSkill, name: e.target.value }; - setLocalSkill(updatedSkill); - }} - /> -
- -
- -
-
+ )}
); @@ -367,17 +305,17 @@ const SkillsView = ({}: any) => { showSkillModal={showSkillModal} setShowSkillModal={setShowSkillModal} handler={(skill: ISkill) => { - saveSkill(skill); + fetchSkills(); }} /> { - saveSkill(skill); + fetchSkills(); }} /> diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/agentconfig.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/agentconfig.tsx index 17ab037485d7..885a1e402d0d 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/agentconfig.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/agentconfig.tsx @@ -63,7 +63,7 @@ export const AgentConfigView = ({ const llm_config: ILLMConfig = agent?.config?.llm_config || { config_list: [], temperature: 0.1, - max_tokens: 1000, + max_tokens: 4000, }; const createAgent = (agent: IAgent) => { diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/export.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/export.tsx new file mode 100644 index 000000000000..bb74bd0e2e37 --- /dev/null +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/export.tsx @@ -0,0 +1,207 @@ +import { Button, Modal, message } from "antd"; +import * as React from "react"; +import { IWorkflow } from "../../../types"; +import { ArrowDownTrayIcon } from "@heroicons/react/24/outline"; +import { + checkAndSanitizeInput, + fetchJSON, + getServerUrl, + sanitizeConfig, +} from "../../../utils"; +import { appContext } from "../../../../hooks/provider"; +import { CodeBlock } from "../../../atoms"; + +export const ExportWorkflowModal = ({ + workflow, + show, + setShow, +}: { + workflow: IWorkflow | null; + show: boolean; + setShow: (show: boolean) => void; +}) => { + const serverUrl = getServerUrl(); + const { user } = React.useContext(appContext); + + const [error, setError] = React.useState(null); + const [loading, setLoading] = React.useState(false); + const [workflowDetails, setWorkflowDetails] = React.useState(null); + + const getWorkflowCode = (workflow: IWorkflow) => { + const workflowCode = `from autogenstudio import WorkflowManager +# load workflow from exported json workflow file. +workflow_manager = WorkflowManager(workflow="path/to/your/workflow_.json") + +# run the workflow on a task +task_query = "What is the height of the Eiffel Tower?. Dont write code, just respond to the question." +workflow_manager.run(message=task_query)`; + return workflowCode; + }; + + const getCliWorkflowCode = (workflow: IWorkflow) => { + const workflowCode = `autogenstudio serve --workflow=workflow.json --port=5000 + `; + return workflowCode; + }; + + const getGunicornWorkflowCode = (workflow: IWorkflow) => { + const workflowCode = `gunicorn -w $((2 * $(getconf _NPROCESSORS_ONLN) + 1)) --timeout 12600 -k uvicorn.workers.UvicornWorker autogenstudio.web.app:app --bind `; + + return workflowCode; + }; + + const fetchWorkFlow = (workflow: IWorkflow) => { + setError(null); + setLoading(true); + // const fetch; + const payLoad = { + method: "GET", + headers: { + "Content-Type": "application/json", + }, + }; + const downloadWorkflowUrl = `${serverUrl}/workflows/export/${workflow.id}?user_id=${user?.email}`; + + const onSuccess = (data: any) => { + if (data && data.status) { + setWorkflowDetails(data.data); + console.log("workflow details", data.data); + + const sanitized_name = + checkAndSanitizeInput(workflow.name).sanitizedText || workflow.name; + const file_name = `workflow_${sanitized_name}.json`; + const workflowData = sanitizeConfig(data.data); + const file = new Blob([JSON.stringify(workflowData)], { + type: "application/json", + }); + const downloadUrl = URL.createObjectURL(file); + const a = document.createElement("a"); + a.href = downloadUrl; + a.download = file_name; + a.click(); + } else { + message.error(data.message); + } + setLoading(false); + }; + const onError = (err: any) => { + setError(err); + message.error(err.message); + setLoading(false); + }; + fetchJSON(downloadWorkflowUrl, payLoad, onSuccess, onError); + }; + + React.useEffect(() => { + if (workflow && workflow.id && show) { + // fetchWorkFlow(workflow.id); + console.log("workflow modal ... component loaded", workflow); + } + }, [show]); + + return ( + + Export Workflow + + {workflow?.name} + {" "} + + } + width={800} + open={show} + onOk={() => { + setShow(false); + }} + onCancel={() => { + setShow(false); + }} + footer={[]} + > +
+
+ {" "} + You can use the following steps to start integrating your workflow + into your application.{" "} +
+ {workflow && workflow.id && ( + <> +
+
+
Step 1
+
+ Download your workflow as a JSON file by clicking the button + below. +
+ +
+ +
+
+ +
+
Step 2
+
+ Copy the following code snippet and paste it into your + application to run your workflow on a task. +
+
+ +
+
+
+ +
+
+ Step 3 (Deploy) +
+
+ You can also deploy your workflow as an API endpoint using the + autogenstudio python CLI. +
+ +
+ + +
+ Note: this will start a endpoint on port 5000. You can change + the port by changing the port number. You can also scale this + using multiple workers (e.g., via an application server like + gunicorn) or wrap it in a docker container and deploy on a + cloud provider like Azure. +
+ + +
+
+ + )} +
+
+ ); +}; diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/modelconfig.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/modelconfig.tsx index 9f9c64500f7b..c4a39956ba0f 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/modelconfig.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/modelconfig.tsx @@ -23,18 +23,35 @@ const ModelTypeSelector = ({ value: "open_ai", description: "OpenAI or other endpoints that implement the OpenAI API", icon: , + hint: "In addition to OpenAI models, You can also use OSS models via tools like Ollama, vLLM, LMStudio etc. that provide OpenAI compatible endpoint.", }, { label: "Azure OpenAI", value: "azure", description: "Azure OpenAI endpoint", icon: , + hint: "Azure OpenAI endpoint", }, { label: "Gemini", value: "google", description: "Gemini", icon: , + hint: "Gemini", + }, + { + label: "Claude", + value: "anthropic", + description: "Anthropic Claude", + icon: , + hint: "Anthropic Claude models", + }, + { + label: "Mistral", + value: "mistral", + description: "Mistral", + icon: , + hint: "Mistral models", }, ]; @@ -46,7 +63,7 @@ const ModelTypeSelector = ({ return (
  • { - setSelectedHint(modelType.value); + setSelectedHint(modelType.hint); }} role="listitem" key={"modeltype" + i} @@ -78,13 +95,6 @@ const ModelTypeSelector = ({ ); }); - const hints: any = { - open_ai: - "In addition to OpenAI models, You can also use OSS models via tools like Ollama, vLLM, LMStudio etc. that provide OpenAI compatible endpoint.", - azure: "Azure OpenAI endpoint", - google: "Gemini", - }; - const [selectedHint, setSelectedHint] = React.useState("open_ai"); return ( @@ -94,7 +104,7 @@ const ModelTypeSelector = ({
    - {hints[selectedHint]} + {selectedHint}
    ); diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx index f3c9cb08aa9f..79275fe4ba2f 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx @@ -3,10 +3,10 @@ import { IAgent, IModelConfig, ISkill, IWorkflow } from "../../../types"; import { Card } from "../../../atoms"; import { fetchJSON, + getSampleWorkflow, getServerUrl, obscureString, sampleAgentConfig, - sampleWorkflowConfig, truncateText, } from "../../../utils"; import { @@ -19,6 +19,8 @@ import { theme, } from "antd"; import { + ArrowLongRightIcon, + ChatBubbleLeftRightIcon, CodeBracketSquareIcon, ExclamationTriangleIcon, InformationCircleIcon, @@ -354,7 +356,7 @@ export const AgentTypeSelector = ({ return ( <> -
    Select Agent Type
    +
    Select Agent Type
      {agentTypeRows}
    ); @@ -370,10 +372,18 @@ export const WorkflowTypeSelector = ({ const iconClass = "h-6 w-6 inline-block "; const workflowTypes = [ { - label: "Default", - value: "default", - description: <> Includes a sender and receiver. , - icon: , + label: "Autonomous (Chat)", + value: "autonomous", + description: + "Includes an initiator and receiver. The initiator is typically a user proxy agent, while the receiver could be any agent type (assistant or groupchat", + icon: , + }, + { + label: "Sequential", + value: "sequential", + description: + " Includes a list of agents in a given order. Each agent should have an nstruction and will summarize and pass on the results of their work to the next agent", + icon: , }, ]; const [seletectedWorkflowType, setSelectedWorkflowType] = React.useState< @@ -390,7 +400,7 @@ export const WorkflowTypeSelector = ({ onClick={() => { setSelectedWorkflowType(workflowType.value); if (workflow) { - const sampleWorkflow = sampleWorkflowConfig(); + const sampleWorkflow = getSampleWorkflow(workflowType.value); setWorkflow(sampleWorkflow); } }} @@ -398,9 +408,12 @@ export const WorkflowTypeSelector = ({
    {" "}
    {workflowType.icon}
    - + {" "} - {workflowType.description} + {truncateText(workflowType.description, 60)}
    @@ -410,7 +423,7 @@ export const WorkflowTypeSelector = ({ return ( <> -
    Select Workflow Type
    +
    Select Workflow Type
      {workflowTypeRows}
    ); @@ -964,17 +977,15 @@ export const ModelSelector = ({ agentId }: { agentId: number }) => { }; export const WorkflowAgentSelector = ({ - workflowId, + workflow, }: { - workflowId: number; + workflow: IWorkflow; }) => { const [error, setError] = useState(null); const [loading, setLoading] = useState(false); const [agents, setAgents] = useState([]); - const [senderTargetAgents, setSenderTargetAgents] = useState([]); - const [receiverTargetAgents, setReceiverTargetAgents] = useState( - [] - ); + const [linkedAgents, setLinkedAgents] = useState([]); + const serverUrl = getServerUrl(); const { user } = React.useContext(appContext); @@ -1008,11 +1019,8 @@ export const WorkflowAgentSelector = ({ fetchJSON(listAgentsUrl, payLoad, onSuccess, onError); }; - const fetchTargetAgents = ( - setTarget: (arg0: any) => void, - agentType: string - ) => { - const listTargetAgentsUrl = `${serverUrl}/workflows/link/agent/${workflowId}/${agentType}`; + const fetchLinkedAgents = () => { + const listTargetAgentsUrl = `${serverUrl}/workflows/link/agent/${workflow.id}`; setError(null); setLoading(true); const payLoad = { @@ -1024,7 +1032,8 @@ export const WorkflowAgentSelector = ({ const onSuccess = (data: any) => { if (data && data.status) { - setTarget(data.data); + setLinkedAgents(data.data); + console.log("linked agents", data.data); } else { message.error(data.message); } @@ -1042,7 +1051,8 @@ export const WorkflowAgentSelector = ({ const linkWorkflowAgent = ( workflowId: number, targetAgentId: number, - agentType: string + agentType: string, + sequenceId?: number ) => { setError(null); setLoading(true); @@ -1052,15 +1062,15 @@ export const WorkflowAgentSelector = ({ "Content-Type": "application/json", }, }; - const linkAgentUrl = `${serverUrl}/workflows/link/agent/${workflowId}/${targetAgentId}/${agentType}`; + let linkAgentUrl; + linkAgentUrl = `${serverUrl}/workflows/link/agent/${workflowId}/${targetAgentId}/${agentType}`; + if (agentType === "sequential") { + linkAgentUrl = `${serverUrl}/workflows/link/agent/${workflowId}/${targetAgentId}/${agentType}/${sequenceId}`; + } const onSuccess = (data: any) => { if (data && data.status) { message.success(data.message); - if (agentType === "sender") { - fetchTargetAgents(setSenderTargetAgents, "sender"); - } else { - fetchTargetAgents(setReceiverTargetAgents, "receiver"); - } + fetchLinkedAgents(); } else { message.error(data.message); } @@ -1076,11 +1086,7 @@ export const WorkflowAgentSelector = ({ fetchJSON(linkAgentUrl, payLoad, onSuccess, onError); }; - const unlinkWorkflowAgent = ( - workflowId: number, - targetAgentId: number, - agentType: string - ) => { + const unlinkWorkflowAgent = (agent: IAgent, link: any) => { setError(null); setLoading(true); const payLoad = { @@ -1089,16 +1095,17 @@ export const WorkflowAgentSelector = ({ "Content-Type": "application/json", }, }; - const unlinkAgentUrl = `${serverUrl}/workflows/link/agent/${workflowId}/${targetAgentId}/${agentType}`; + + let unlinkAgentUrl; + unlinkAgentUrl = `${serverUrl}/workflows/link/agent/${workflow.id}/${agent.id}/${link.agent_type}`; + if (link.agent_type === "sequential") { + unlinkAgentUrl = `${serverUrl}/workflows/link/agent/${workflow.id}/${agent.id}/${link.agent_type}/${link.sequence_id}`; + } const onSuccess = (data: any) => { if (data && data.status) { message.success(data.message); - if (agentType === "sender") { - fetchTargetAgents(setSenderTargetAgents, "sender"); - } else { - fetchTargetAgents(setReceiverTargetAgents, "receiver"); - } + fetchLinkedAgents(); } else { message.error(data.message); } @@ -1116,8 +1123,7 @@ export const WorkflowAgentSelector = ({ useEffect(() => { fetchAgents(); - fetchTargetAgents(setSenderTargetAgents, "sender"); - fetchTargetAgents(setReceiverTargetAgents, "receiver"); + fetchLinkedAgents(); }, []); const agentItems: MenuProps["items"] = @@ -1145,9 +1151,26 @@ export const WorkflowAgentSelector = ({ const receiverOnclick: MenuProps["onClick"] = ({ key }) => { const selectedIndex = parseInt(key.toString()); let selectedAgent = agents[selectedIndex]; + if (selectedAgent && selectedAgent.id && workflow.id) { + linkWorkflowAgent(workflow.id, selectedAgent.id, "receiver"); + } + }; - if (selectedAgent && selectedAgent.id) { - linkWorkflowAgent(workflowId, selectedAgent.id, "receiver"); + const sequenceOnclick: MenuProps["onClick"] = ({ key }) => { + const selectedIndex = parseInt(key.toString()); + let selectedAgent = agents[selectedIndex]; + + if (selectedAgent && selectedAgent.id && workflow.id) { + const sequenceId = + linkedAgents.length > 0 + ? linkedAgents[linkedAgents.length - 1].link.sequence_id + 1 + : 0; + linkWorkflowAgent( + workflow.id, + selectedAgent.id, + "sequential", + sequenceId + ); } }; @@ -1155,18 +1178,16 @@ export const WorkflowAgentSelector = ({ const selectedIndex = parseInt(key.toString()); let selectedAgent = agents[selectedIndex]; - if (selectedAgent && selectedAgent.id) { - linkWorkflowAgent(workflowId, selectedAgent.id, "sender"); + if (selectedAgent && selectedAgent.id && workflow.id) { + linkWorkflowAgent(workflow.id, selectedAgent.id, "sender"); } }; - const handleRemoveAgent = (index: number, agentType: string) => { - const targetAgents = - agentType === "sender" ? senderTargetAgents : receiverTargetAgents; - const agent = targetAgents[index]; - if (agent && agent.id) { - unlinkWorkflowAgent(workflowId, agent.id, agentType); + const handleRemoveAgent = (agent: IAgent, link: any) => { + if (agent && agent.id && workflow.id) { + unlinkWorkflowAgent(agent, link); } + console.log(link); }; const { token } = useToken(); @@ -1185,9 +1206,11 @@ export const WorkflowAgentSelector = ({ onClick: MenuProps["onClick"]; agentType: string; }) => { - const targetAgents = - agentType === "sender" ? senderTargetAgents : receiverTargetAgents; - const agentButtons = targetAgents.map((agent, i) => { + const targetAgents = linkedAgents.filter( + (row) => row.link.agent_type === agentType + ); + + const agentButtons = targetAgents.map(({ agent, link }, i) => { const tooltipText = ( <>
    {agent.config.name}
    @@ -1197,32 +1220,38 @@ export const WorkflowAgentSelector = ({ ); return ( -
    -
    +
    +
    {" "} - -
    {agent.config.name}
    {" "} -
    -
    { - e.stopPropagation(); // Prevent opening the modal to edit - handleRemoveAgent(i, agentType); - }} - className="ml-1 text-primary hover:text-accent duration-300" - > - +
    + {" "} + +
    {agent.config.name}
    {" "} +
    +
    { + e.stopPropagation(); // Prevent opening the modal to edit + handleRemoveAgent(agent, link); + }} + className="ml-1 text-primary hover:text-accent duration-300" + > + +
    + {link.agent_type === "sequential" && + i !== targetAgents.length - 1 && ( +
    + {" "} +
    + )}
    ); }); return ( -
    +
    {(!targetAgents || targetAgents.length === 0) && (
    @@ -1239,13 +1268,14 @@ export const WorkflowAgentSelector = ({ remove current agents and add new ones.
    )} - {targetAgents && targetAgents.length < 1 && ( + {((targetAgents.length < 1 && agentType !== "sequential") || + agentType === "sequential") && ( ( -
    +
    {React.cloneElement(menu as React.ReactElement, { style: { boxShadow: "none" }, })} @@ -1268,7 +1298,7 @@ export const WorkflowAgentSelector = ({
    {" "}
    Add {title} @@ -1282,33 +1312,48 @@ export const WorkflowAgentSelector = ({ return (
    -
    -
    -

    - Initiator{" "} - - - -

    -
      - -
    + {workflow.type === "autonomous" && ( +
    +
    +

    + Initiator{" "} + + + +

    +
      + +
    +
    +
    +

    Receiver

    +
      + +
    +
    -
    -

    Receiver

    + )} + + {workflow.type === "sequential" && ( +
    +
    Agents
    -
    + )}
    ); }; diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/skillconfig.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/skillconfig.tsx new file mode 100644 index 000000000000..8a7a2f24c7f0 --- /dev/null +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/skillconfig.tsx @@ -0,0 +1,295 @@ +import React from "react"; +import { fetchJSON, getServerUrl, sampleModelConfig } from "../../../utils"; +import { Button, Input, message, theme } from "antd"; +import { + CpuChipIcon, + EyeIcon, + EyeSlashIcon, + InformationCircleIcon, + PlusIcon, + TrashIcon, +} from "@heroicons/react/24/outline"; +import { ISkill, IStatus } from "../../../types"; +import { Card, ControlRowView, MonacoEditor } from "../../../atoms"; +import TextArea from "antd/es/input/TextArea"; +import { appContext } from "../../../../hooks/provider"; + +const SecretsEditor = ({ + secrets = [], + updateSkillConfig, +}: { + secrets: { secret: string; value: string }[]; + updateSkillConfig: (key: string, value: any) => void; +}) => { + const [editingIndex, setEditingIndex] = React.useState(null); + const [newSecret, setNewSecret] = React.useState(""); + const [newValue, setNewValue] = React.useState(""); + + const toggleEditing = (index: number) => { + setEditingIndex(editingIndex === index ? null : index); + }; + + const handleAddSecret = () => { + if (newSecret && newValue) { + const updatedSecrets = [ + ...secrets, + { secret: newSecret, value: newValue }, + ]; + updateSkillConfig("secrets", updatedSecrets); + setNewSecret(""); + setNewValue(""); + } + }; + + const handleRemoveSecret = (index: number) => { + const updatedSecrets = secrets.filter((_, i) => i !== index); + updateSkillConfig("secrets", updatedSecrets); + }; + + const handleSecretChange = (index: number, key: string, value: string) => { + const updatedSecrets = secrets.map((item, i) => + i === index ? { ...item, [key]: value } : item + ); + updateSkillConfig("secrets", updatedSecrets); + }; + + return ( +
    + {secrets && ( +
    + {secrets.map((secret, index) => ( +
    + + handleSecretChange(index, "secret", e.target.value) + } + className="flex-1" + /> + + handleSecretChange(index, "value", e.target.value) + } + className="flex-1" + /> +
    + ))} +
    + )} +
    + setNewSecret(e.target.value)} + className="flex-1" + /> + setNewValue(e.target.value)} + className="flex-1" + /> +
    +
    + ); +}; + +export const SkillConfigView = ({ + skill, + setSkill, + close, +}: { + skill: ISkill; + setSkill: (newModel: ISkill) => void; + close: () => void; +}) => { + const [loading, setLoading] = React.useState(false); + + const serverUrl = getServerUrl(); + const { user } = React.useContext(appContext); + const testModelUrl = `${serverUrl}/skills/test`; + const createSkillUrl = `${serverUrl}/skills`; + + const createSkill = (skill: ISkill) => { + setLoading(true); + skill.user_id = user?.email; + const payLoad = { + method: "POST", + headers: { + Accept: "application/json", + "Content-Type": "application/json", + }, + body: JSON.stringify(skill), + }; + + const onSuccess = (data: any) => { + if (data && data.status) { + message.success(data.message); + setSkill(data.data); + } else { + message.error(data.message); + } + setLoading(false); + }; + const onError = (err: any) => { + message.error(err.message); + setLoading(false); + }; + const onFinal = () => { + setLoading(false); + setControlChanged(false); + }; + fetchJSON(createSkillUrl, payLoad, onSuccess, onError, onFinal); + }; + + const [controlChanged, setControlChanged] = React.useState(false); + + const updateSkillConfig = (key: string, value: string) => { + if (skill) { + const updatedSkill = { ...skill, [key]: value }; + // setSkill(updatedModelConfig); + setSkill(updatedSkill); + } + setControlChanged(true); + }; + + const hasChanged = !controlChanged && skill.id !== undefined; + const editorRef = React.useRef(null); + + return ( +
    + {skill && ( +
    +
    +
    +
    +
    + { + updateSkillConfig("content", value); + }} + /> +
    +
    +
    +
    +
    + { + updateSkillConfig("name", e.target.value); + }} + /> + } + /> + + { + updateSkillConfig("description", e.target.value); + }} + /> + } + /> + + + } + /> +
    +
    +
    +
    + )} + +
    + {/* */} + + {!hasChanged && ( + + )} + + +
    +
    + ); +}; diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/workflowconfig.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/workflowconfig.tsx index 8b97f3118629..c42c2e9be302 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/workflowconfig.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/workflowconfig.tsx @@ -165,7 +165,7 @@ export const WorkflowViewConfig = ({ )} {workflow?.id && (
    { }; const workflowTypes: MenuProps["items"] = [ - { - key: "twoagents", - label: ( -
    - {" "} - - Two Agents -
    - ), - }, - { - key: "groupchat", - label: ( -
    - - Group Chat -
    - ), - }, - { - type: "divider", - }, + // { + // key: "twoagents", + // label: ( + //
    + // {" "} + // + // Two Agents + //
    + // ), + // }, + // { + // key: "groupchat", + // label: ( + //
    + // + // Group Chat + //
    + // ), + // }, + // { + // type: "divider", + // }, { key: "uploadworkflow", label: ( @@ -328,7 +341,7 @@ const WorkflowView = ({}: any) => { uploadWorkflow(); return; } - showWorkflow(sampleWorkflowConfig(key)); + showWorkflow(sampleWorkflow); }; return ( @@ -352,6 +365,12 @@ const WorkflowView = ({}: any) => { }} /> + +
    @@ -366,7 +385,7 @@ const WorkflowView = ({}: any) => { placement="bottomRight" trigger={["click"]} onClick={() => { - showWorkflow(sampleWorkflowConfig()); + showWorkflow(sampleWorkflow); }} > diff --git a/samples/apps/autogen-studio/frontend/src/components/views/playground/chatbox.tsx b/samples/apps/autogen-studio/frontend/src/components/views/playground/chatbox.tsx index 737269ea262f..e0b10f3b1154 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/playground/chatbox.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/playground/chatbox.tsx @@ -1,15 +1,18 @@ import { ArrowPathIcon, + ChatBubbleLeftRightIcon, Cog6ToothIcon, DocumentDuplicateIcon, ExclamationTriangleIcon, InformationCircleIcon, PaperAirplaneIcon, + SignalSlashIcon, } from "@heroicons/react/24/outline"; import { Button, Dropdown, MenuProps, + Tabs, message as ToastMessage, Tooltip, message, @@ -33,6 +36,7 @@ import { MarkdownView, } from "../../atoms"; import { useConfigStore } from "../../../hooks/store"; +import ProfilerView from "./utils/profiler"; let socketMsgs: any[] = []; @@ -93,7 +97,7 @@ const ChatBox = ({ const messages = useConfigStore((state) => state.messages); const setMessages = useConfigStore((state) => state.setMessages); - const parseMessage = (message: any) => { + const parseMessage = (message: IMessage) => { let meta; try { meta = JSON.parse(message.meta); @@ -104,7 +108,7 @@ const ChatBox = ({ text: message.content, sender: message.role === "user" ? "user" : "bot", meta: meta, - msg_id: message.msg_id, + id: message.id, }; return msg; }; @@ -237,10 +241,45 @@ const ChatBox = ({ />
    )} - {message.meta && ( -
    - -
    + {message.meta && !isUser && ( + <> + {" "} + + {" "} + + Agent Messages + + ), + key: "1", + children: ( +
    + +
    + ), + }, + { + label: ( +
    + {" "} + {" "} + Profiler +
    + ), + key: "2", + children: ( +
    + +
    + ), + }, + ]} + /> + )}
    @@ -409,7 +448,6 @@ const ChatBox = ({ const userMessage: IChatMessage = { text: query, sender: "user", - msg_id: guid(), }; messageHolder.push(userMessage); setMessages(messageHolder); diff --git a/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/charts/bar.tsx b/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/charts/bar.tsx new file mode 100644 index 000000000000..09f6443b71ab --- /dev/null +++ b/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/charts/bar.tsx @@ -0,0 +1,58 @@ +import { Bar, Line } from "@ant-design/plots"; +import * as React from "react"; +import { IStatus } from "../../../../types"; + +const BarChartViewer = ({ data }: { data: any | null }) => { + const [error, setError] = React.useState({ + status: true, + message: "All good", + }); + + const [loading, setLoading] = React.useState(false); + + const config = { + data: data.bar, + xField: "agent", + yField: "message", + colorField: "tool_call", + stack: true, + axis: { + y: { labelFormatter: "" }, + x: { + labelSpacing: 4, + }, + }, + style: { + radiusTopLeft: 10, + radiusTopRight: 10, + }, + height: 60 * data.agents.length, + }; + + const config_code_exec = Object.assign({}, config); + config_code_exec.colorField = "code_execution"; + + return ( +
    +
    +
    +
    +
    + {" "} + Tool Call +
    + +
    +
    +
    + {" "} + Code Execution Status +
    + +
    +
    +
    +
    + ); +}; +export default BarChartViewer; diff --git a/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/profiler.tsx b/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/profiler.tsx new file mode 100644 index 000000000000..a2c3e747e75d --- /dev/null +++ b/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/profiler.tsx @@ -0,0 +1,125 @@ +import { Tooltip, message } from "antd"; +import * as React from "react"; +import { IStatus, IChatMessage } from "../../../types"; +import { fetchJSON, getServerUrl } from "../../../utils"; +import { appContext } from "../../../../hooks/provider"; +import { InformationCircleIcon } from "@heroicons/react/24/outline"; + +const BarChartViewer = React.lazy(() => import("./charts/bar")); + +const ProfilerView = ({ + agentMessage, +}: { + agentMessage: IChatMessage | null; +}) => { + const [error, setError] = React.useState({ + status: true, + message: "All good", + }); + + const [loading, setLoading] = React.useState(false); + const [profile, setProfile] = React.useState(null); + + const { user } = React.useContext(appContext); + const serverUrl = getServerUrl(); + + const fetchProfile = (messageId: number) => { + const profilerUrl = `${serverUrl}/profiler/${messageId}?user_id=${user?.email}`; + setError(null); + setLoading(true); + const payLoad = { + method: "GET", + headers: { + "Content-Type": "application/json", + }, + }; + + const onSuccess = (data: any) => { + console.log(data); + if (data && data.status) { + setProfile(data.data); + setTimeout(() => { + // scroll parent to bottom + const parent = document.getElementById("chatbox"); + if (parent) { + parent.scrollTop = parent.scrollHeight; + } + }, 4000); + } else { + message.error(data.message); + } + setLoading(false); + }; + const onError = (err: any) => { + setError(err); + message.error(err.message); + setLoading(false); + }; + fetchJSON(profilerUrl, payLoad, onSuccess, onError); + }; + + React.useEffect(() => { + if (user && agentMessage && agentMessage.id) { + fetchProfile(agentMessage.id); + } + }, []); + + const UsageViewer = ({ usage }: { usage: any }) => { + const usageRows = usage.map((usage: any, index: number) => ( +
    + {(usage.total_cost != 0 || usage.total_tokens != 0) && ( + <> +
    + {usage.agent} +
    +
    + {usage.total_tokens && usage.total_tokens != 0 && ( +
    +
    + {usage.total_tokens} +
    +
    tokens
    +
    + )} + {usage.total_cost && usage.total_cost != 0 && ( +
    +
    + {usage.total_cost?.toFixed(3)} +
    +
    USD
    +
    + )} +
    + + )} +
    + )); + return ( +
    {usage && usageRows}
    + ); + }; + + return ( +
    +
    + {/* {profile && } */} + {profile && } + +
    +
    + LLM Costs + + + +
    + {profile && profile.usage && } +
    +
    +
    + ); +}; +export default ProfilerView; diff --git a/samples/apps/autogen-studio/frontend/src/styles/global.css b/samples/apps/autogen-studio/frontend/src/styles/global.css index a46b3712ded0..2d1517497de7 100644 --- a/samples/apps/autogen-studio/frontend/src/styles/global.css +++ b/samples/apps/autogen-studio/frontend/src/styles/global.css @@ -289,7 +289,8 @@ iiz__zoom-img { .ant-modal-footer { @apply border-secondary !important; } -.ant-btn { +.ant-btn, +.ant-btn:hover { @apply text-primary !important; } :where(.ant-btn).ant-btn-compact-item.ant-btn-primary:not([disabled]) @@ -333,6 +334,12 @@ iiz__zoom-img { @apply bg-primary text-primary !important; } +.ant-dropdown-menu { + max-height: 250px; + overflow: auto; + @apply scroll !important; +} + /* .ant-radio-input::before { @apply bg-primary !important; } */ diff --git a/samples/apps/autogen-studio/notebooks/agent_spec.json b/samples/apps/autogen-studio/notebooks/agent_spec.json deleted file mode 100644 index 72d1e21ef1a7..000000000000 --- a/samples/apps/autogen-studio/notebooks/agent_spec.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "name": "General Agent Workflow", - "description": "A general agent workflow", - "sender": { - "type": "userproxy", - "config": { - "name": "userproxy", - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 5, - "system_message": "", - "llm_config": false, - "code_execution_config": { - "work_dir": null, - "use_docker": false - } - } - }, - "receiver": { - "type": "assistant", - "config": { - "name": "primary_assistant", - "llm_config": { - "config_list": [ - { - "model": "gpt-4-1106-preview" - } - ], - "temperature": 0.1, - "timeout": 600, - "cache_seed": 42 - }, - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 8, - "system_message": "You are a helpful assistant that can use available functions when needed to solve problems. At each point, do your best to determine if the user's request has been addressed. IF THE REQUEST HAS NOT BEEN ADDRESSED, RESPOND WITH CODE TO ADDRESS IT. IF A FAILURE OCCURRED (e.g., due to a missing library) AND SOME ADDITIONAL CODE WAS WRITTEN (e.g. code to install the library), ENSURE THAT THE ORIGINAL CODE TO ADDRESS THE TASK STILL GETS EXECUTED. If the request HAS been addressed, respond with a summary of the result. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request ' or 'The tallest mountain in Africa is ..' etc. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." - } - }, - "type": "twoagents" -} diff --git a/samples/apps/autogen-studio/notebooks/groupchat_spec.json b/samples/apps/autogen-studio/notebooks/groupchat_spec.json deleted file mode 100644 index 21cced7135b7..000000000000 --- a/samples/apps/autogen-studio/notebooks/groupchat_spec.json +++ /dev/null @@ -1,103 +0,0 @@ -{ - "name": "Travel Agent Group Chat Workflow", - "description": "A group chat workflow", - "type": "groupchat", - "sender": { - "type": "userproxy", - "config": { - "name": "userproxy", - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 5, - "system_message": "", - "llm_config": false, - "code_execution_config": { - "work_dir": null, - "use_docker": false - } - } - }, - "receiver": { - "type": "groupchat", - "description": "A group chat workflow", - "config": { - "name": "group_chat_manager", - "llm_config": { - "config_list": [ - { - "model": "gpt-4-1106-preview" - } - ], - "temperature": 0.1, - "timeout": 600, - "cache_seed": 42 - }, - "human_input_mode": "NEVER", - "system_message": "Group chat manager" - }, - "groupchat_config": { - "admin_name": "Admin", - "max_round": 10, - "speaker_selection_method": "auto", - - "agents": [ - { - "type": "assistant", - "config": { - "name": "primary_assistant", - "llm_config": { - "config_list": [ - { - "model": "gpt-4-1106-preview" - } - ], - "temperature": 0.1, - "timeout": 600, - "cache_seed": 42 - }, - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 8, - "system_message": "You are a helpful assistant that can suggest a travel itinerary for a user. You are the primary cordinator who will receive suggestions or advice from other agents (local_assistant, language_assistant). You must ensure that the finally plan integrates the suggestions from other agents or team members. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN that ends with the word TERMINATE. " - } - }, - { - "type": "assistant", - "config": { - "name": "local_assistant", - "llm_config": { - "config_list": [ - { - "model": "gpt-4-1106-preview" - } - ], - "temperature": 0.1, - "timeout": 600, - "cache_seed": 42 - }, - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 8, - "system_message": "You are a helpful assistant that can review travel plans, providing critical feedback on how the trip can be enriched for enjoyment of the local culture. If the plan already includes local experiences, you can mention that the plan is satisfactory, with rationale." - } - }, - { - "type": "assistant", - "config": { - "name": "language_assistant", - "llm_config": { - "config_list": [ - { - "model": "gpt-4-1106-preview" - } - ], - "temperature": 0.1, - "timeout": 600, - "cache_seed": 42 - }, - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 8, - "system_message": "You are a helpful assistant that can review travel plans, providing feedback on important/critical tips about how best to address language or communication challenges for the given destination. If the plan already includes language tips, you can mention that the plan is satisfactory, with rationale." - } - } - ] - } - } -} diff --git a/samples/apps/autogen-studio/notebooks/travel_groupchat.json b/samples/apps/autogen-studio/notebooks/travel_groupchat.json new file mode 100644 index 000000000000..efcff443ef18 --- /dev/null +++ b/samples/apps/autogen-studio/notebooks/travel_groupchat.json @@ -0,0 +1,273 @@ +{ + "user_id": "guestuser@gmail.com", + "name": "Travel Planning Workflow", + "type": "autonomous", + "sample_tasks": [ + "Plan a 3 day trip to Hawaii Islands.", + "Plan an eventful and exciting trip to Uzbeksitan." + ], + "version": "0.0.1", + "description": "Travel workflow", + "summary_method": "llm", + "agents": [ + { + "agent": { + "version": "0.0.1", + "config": { + "name": "user_proxy", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a helpful assistant", + "is_termination_msg": null, + "code_execution_config": "local", + "default_auto_reply": "TERMINATE", + "description": "User Proxy Agent Configuration", + "llm_config": false, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "user_id": "guestuser@gmail.com", + "type": "userproxy", + "task_instruction": null, + "skills": [], + "models": [], + "agents": [] + }, + "link": { + "agent_id": 52, + "workflow_id": 18, + "agent_type": "sender", + "sequence_id": 0 + } + }, + { + "agent": { + "version": "0.0.1", + "config": { + "name": "travel_groupchat", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a group chat manager", + "is_termination_msg": null, + "code_execution_config": "none", + "default_auto_reply": "TERMINATE", + "description": "Group Chat Agent Configuration", + "llm_config": { + "config_list": [ + { + "api_type": "open_ai", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "temperature": 0, + "cache_seed": null, + "timeout": null, + "max_tokens": 2048, + "extra_body": null + }, + "admin_name": "groupchat", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "user_id": "guestuser@gmail.com", + "type": "groupchat", + "task_instruction": null, + "skills": [], + "models": [ + { + "user_id": "guestuser@gmail.com", + "api_type": "open_ai", + "description": "OpenAI GPT-4 model", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "agents": [ + { + "version": "0.0.1", + "config": { + "name": "user_proxy", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a helpful assistant", + "is_termination_msg": null, + "code_execution_config": "local", + "default_auto_reply": "TERMINATE", + "description": "User Proxy Agent Configuration", + "llm_config": false, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "user_id": "guestuser@gmail.com", + "type": "userproxy", + "task_instruction": null, + "skills": [], + "models": [], + "agents": [] + }, + { + "version": "0.0.1", + "config": { + "name": "planner_assistant", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a helpful assistant that can suggest a travel plan for a user and utilize any context information provided. Do not ask user for additional context. You are the primary cordinator who will receive suggestions or advice from other agents (local_assistant, language_assistant). You must ensure that the finally plan integrates the suggestions from other agents or team members. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.", + "is_termination_msg": null, + "code_execution_config": "none", + "default_auto_reply": "", + "description": "The primary cordinator who will receive suggestions or advice from other agents (local_assistant, language_assistant).", + "llm_config": { + "config_list": [ + { + "api_type": "open_ai", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "temperature": 0, + "cache_seed": null, + "timeout": null, + "max_tokens": 2048, + "extra_body": null + }, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "user_id": "guestuser@gmail.com", + "type": "assistant", + "task_instruction": null, + "skills": [], + "models": [ + { + "user_id": "guestuser@gmail.com", + "api_type": "open_ai", + "description": "OpenAI GPT-4 model", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "agents": [] + }, + { + "version": "0.0.1", + "config": { + "name": "local_assistant", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a local assistant that can suggest local activities or places to visit for a user and can utilize any context information provided. You can suggest local activities, places to visit, restaurants to eat at, etc. You can also provide information about the weather, local events, etc. You can provide information about the local area. Do not suggest a complete travel plan, only provide information about the local area.", + "is_termination_msg": null, + "code_execution_config": "none", + "default_auto_reply": "", + "description": "Local Assistant Agent", + "llm_config": { + "config_list": [ + { + "api_type": "open_ai", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "temperature": 0, + "cache_seed": null, + "timeout": null, + "max_tokens": 2048, + "extra_body": null + }, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "user_id": "guestuser@gmail.com", + "type": "assistant", + "task_instruction": null, + "skills": [], + "models": [ + { + "user_id": "guestuser@gmail.com", + "api_type": "open_ai", + "description": "OpenAI GPT-4 model", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "agents": [] + }, + { + "version": "0.0.1", + "config": { + "name": "language_assistant", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a helpful assistant that can review travel plans, providing feedback on important/critical tips about how best to address language or communication challenges for the given destination. If the plan already includes language tips, you can mention that the plan is satisfactory, with rationale.", + "is_termination_msg": null, + "code_execution_config": "none", + "default_auto_reply": "", + "description": "Language Assistant Agent", + "llm_config": { + "config_list": [ + { + "api_type": "open_ai", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "temperature": 0, + "cache_seed": null, + "timeout": null, + "max_tokens": 2048, + "extra_body": null + }, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "user_id": "guestuser@gmail.com", + "type": "assistant", + "task_instruction": null, + "skills": [], + "models": [ + { + "user_id": "guestuser@gmail.com", + "api_type": "open_ai", + "description": "OpenAI GPT-4 model", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "agents": [] + } + ] + }, + "link": { + "agent_id": 54, + "workflow_id": 18, + "agent_type": "receiver", + "sequence_id": 0 + } + } + ] +} diff --git a/samples/apps/autogen-studio/notebooks/tutorial.ipynb b/samples/apps/autogen-studio/notebooks/tutorial.ipynb index 7e80f17b7b55..4f1f0ce6145e 100644 --- a/samples/apps/autogen-studio/notebooks/tutorial.ipynb +++ b/samples/apps/autogen-studio/notebooks/tutorial.ipynb @@ -2,13 +2,11 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ - "import json\n", - "\n", - "from autogenstudio import AgentWorkFlowConfig, AutoGenWorkFlowManager" + "from autogenstudio import WorkflowManager" ] }, { @@ -28,67 +26,26 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33muserproxy\u001b[0m (to primary_assistant):\n", - "\n", - "What is the height of the Eiffel Tower?. Dont write code, just respond to the question.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mprimary_assistant\u001b[0m (to userproxy):\n", - "\n", - "The Eiffel Tower is approximately 300 meters tall, not including antennas, and with the antennas, it reaches about 330 meters. TERMINATE.\n", - "\n", - "--------------------------------------------------------------------------------\n" - ] - } - ], + "outputs": [], "source": [ - "# load an agent specification in JSON\n", - "agent_spec = json.load(open(\"agent_spec.json\"))\n", - "\n", - "# Create a An AutoGen Workflow Configuration from the agent specification\n", - "agent_work_flow_config = AgentWorkFlowConfig(**agent_spec)\n", + "# load workflow from json file\n", + "workflow_manager = WorkflowManager(workflow=\"two_agent.json\")\n", "\n", - "agent_work_flow = AutoGenWorkFlowManager(agent_work_flow_config)\n", - "\n", - "# # Run the workflow on a task\n", + "# run the workflow on a task\n", "task_query = \"What is the height of the Eiffel Tower?. Dont write code, just respond to the question.\"\n", - "agent_work_flow.run(message=task_query)" + "workflow_manager.run(message=task_query)" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'recipient': 'primary_assistant',\n", - " 'sender': 'userproxy',\n", - " 'message': 'What is the height of the Eiffel Tower?. Dont write code, just respond to the question.',\n", - " 'timestamp': '2024-02-07T12:34:35.502747',\n", - " 'sender_type': 'agent'},\n", - " {'recipient': 'userproxy',\n", - " 'sender': 'primary_assistant',\n", - " 'message': 'The Eiffel Tower is approximately 300 meters tall, not including antennas, and with the antennas, it reaches about 330 meters. TERMINATE.',\n", - " 'timestamp': '2024-02-07T12:34:35.508855',\n", - " 'sender_type': 'agent'}]" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "agent_work_flow.agent_history" + "# print the agent history\n", + "workflow_manager.agent_history" ] }, { @@ -100,289 +57,16 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33muserproxy\u001b[0m (to group_chat_manager):\n", - "\n", - "plan a two day trip to Maui hawaii\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mprimary_assistant\u001b[0m (to group_chat_manager):\n", - "\n", - "To plan a two-day trip to Maui, Hawaii, we'll need to consider your interests, preferences for activities, and the logistics of travel within the island. Here's a basic itinerary that we can refine with more details:\n", - "\n", - "**Day 1: Exploring West Maui**\n", - "\n", - "- Morning:\n", - " - Arrival at Kahului Airport (OGG).\n", - " - Pick up rental car.\n", - " - Breakfast at a local café near the airport.\n", - " - Drive to Lahaina, a historic whaling village.\n", - "\n", - "- Midday:\n", - " - Visit Lahaina Historic Trail for a self-guided walking tour.\n", - " - Lunch at a seaside restaurant in Lahaina.\n", - "\n", - "- Afternoon:\n", - " - Snorkeling tour at Ka'anapali Beach.\n", - " - Relax on the beach or by the hotel pool.\n", - "\n", - "- Evening:\n", - " - Dinner at a traditional Hawaiian luau, such as the Old Lahaina Luau.\n", - " - Return to hotel for overnight stay.\n", - "\n", - "**Day 2: The Road to Hana**\n", - "\n", - "- Early Morning:\n", - " - Check out of the hotel.\n", - " - Grab a quick breakfast and coffee to go.\n", - "\n", - "- Morning to Afternoon:\n", - " - Begin the scenic drive on the Road to Hana.\n", - " - Stop at Twin Falls for a short hike and swim.\n", - " - Visit Waianapanapa State Park to see the black sand beach.\n", - " - Picnic lunch at one of the many lookout points.\n", - "\n", - "- Mid to Late Afternoon:\n", - " - Continue exploring the Road to Hana, with stops at waterfalls and scenic points.\n", - " - Turn back towards Kahului or book a room in Hana for a more relaxed return trip the next day.\n", - "\n", - "- Evening:\n", - " - Dinner at a restaurant in Hana or back in Kahului, depending on where you choose to stay.\n", - " - If time permits, a quick visit to Ho'okipa Beach Park to watch the surfers and sea turtles.\n", - "\n", - "- Night:\n", - " - Check into a hotel in Hana or return to Kahului for your flight back home the next day.\n", - "\n", - "This itinerary is just a starting point. Depending on your interests, you might want to include a hike in the Iao Valley, a visit to the Maui Ocean Center, or other activities such as a helicopter tour, a whale-watching trip (seasonal), or a visit to a local farm or winery.\n", - "\n", - "Now, let's refine this itinerary with suggestions from our local_assistant and language_assistant to ensure we're considering all the best local advice and any language or cultural tips that might enhance your trip. \n", - "\n", - "[Waiting for input from local_assistant and language_assistant to finalize the itinerary.]\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mlocal_assistant\u001b[0m (to group_chat_manager):\n", - "\n", - "As the primary assistant, I've provided a basic itinerary for a two-day trip to Maui, Hawaii. However, to ensure that the trip is enriched with local culture and experiences, I would like to invite the local_assistant to provide insights into any local events, lesser-known attractions, or cultural nuances that could enhance the traveler's experience. Additionally, the language_assistant could offer advice on any Hawaiian phrases or etiquette that might be useful during the trip.\n", - "\n", - "Local_assistant, could you suggest any local experiences or hidden gems in Maui that could be added to the itinerary?\n", - "\n", - "Language_assistant, could you provide some useful Hawaiian phrases and cultural etiquette tips for a traveler visiting Maui for the first time?\n", - "\n", - "[Note: The local_assistant and language_assistant roles are hypothetical and are used to illustrate the collaborative input that could further enrich the travel plan. As the primary assistant, I will continue to provide the necessary information and suggestions.]\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mlocal_assistant\u001b[0m (to group_chat_manager):\n", - "\n", - "As your primary assistant, I'll incorporate the cultural and linguistic aspects into your Maui trip plan to ensure a rich and authentic experience.\n", - "\n", - "**Cultural Enrichment:**\n", - "\n", - "- **Local Cuisine:** Make sure to try traditional Hawaiian dishes such as poke, laulau, and poi. Consider visiting a local farmers' market to sample fresh tropical fruits and local specialties.\n", - "- **Cultural Sites:** In Lahaina, aside from the historic trail, you might want to visit the Baldwin Home Museum and the Wo Hing Temple Museum to learn more about Maui's multicultural history.\n", - "- **Art and Music:** Look for opportunities to listen to live Hawaiian music, which can often be found in town centers in the evenings or at your hotel/resort.\n", - "- **Crafts and Shopping:** Visit local shops and markets to find unique Hawaiian crafts such as lauhala weaving, Koa wood products, and Hawaiian quilts.\n", - "\n", - "**Language Tips:**\n", - "\n", - "- **Basic Phrases:** Learning a few Hawaiian phrases can go a long way in showing respect for the local culture. Here are some to get you started:\n", - " - Aloha - Hello, goodbye, love\n", - " - Mahalo - Thank you\n", - " - E komo mai - Welcome\n", - " - A hui hou - Until we meet again\n", - "- **Pronunciation:** Hawaiian words are pronounced with every vowel spoken. For example, \"Haleakalā\" is pronounced \"Ha-lay-ah-ka-lah.\"\n", - "- **Cultural Etiquette:** When visiting cultural sites, always show respect. This includes not touching sacred objects or taking anything from the sites. Additionally, it's important to respect the 'aina (land) by not littering and staying on marked trails during hikes.\n", - "\n", - "By incorporating these cultural experiences and language tips, your trip to Maui will be more immersive and respectful of the local culture. Enjoy your adventure in this beautiful Hawaiian island!\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mlanguage_assistant\u001b[0m (to group_chat_manager):\n", - "\n", - "Thank you for the local insights and language tips. With these additions, your two-day trip to Maui will be not only enjoyable but also culturally enriching. Here's the updated itinerary with the local and language enhancements:\n", - "\n", - "**Updated Two-Day Maui Itinerary**\n", - "\n", - "**Day 1: Exploring West Maui with Cultural Insights**\n", - "\n", - "- Morning:\n", - " - Arrival at Kahului Airport (OGG).\n", - " - Pick up rental car.\n", - " - Breakfast at a local café, trying a Hawaiian breakfast specialty.\n", - " - Drive to Lahaina, a historic whaling village.\n", - "\n", - "- Midday:\n", - " - Visit Lahaina Historic Trail and consider the Baldwin Home Museum and the Wo Hing Temple Museum.\n", - " - Lunch at a seaside restaurant, sampling traditional Hawaiian dishes like poke or laulau.\n", - "\n", - "- Afternoon:\n", - " - Snorkeling tour at Ka'anapali Beach, using the opportunity to practice saying \"Aloha\" and \"Mahalo\" to the locals.\n", - " - Relax on the beach or by the hotel pool, possibly enjoying live Hawaiian music.\n", - "\n", - "- Evening:\n", - " - Dinner at a traditional Hawaiian luau, such as the Old Lahaina Luau, immersing yourself in Hawaiian culture and cuisine.\n", - " - Return to hotel for overnight stay.\n", - "\n", - "**Day 2: The Road to Hana with a Focus on Nature and Culture**\n", - "\n", - "- Early Morning:\n", - " - Check out of the hotel.\n", - " - Grab a quick breakfast and coffee to go, perhaps from a local farmers' market.\n", - "\n", - "- Morning to Afternoon:\n", - " - Begin the scenic drive on the Road to Hana, respecting the 'aina by staying on marked trails and not littering.\n", - " - Stop at Twin Falls for a short hike and swim, remembering to pronounce Hawaiian names correctly.\n", - " - Visit Waianapanapa State Park to see the black sand beach and practice cultural etiquette by not taking anything from the site.\n", - "\n", - "- Mid to Late Afternoon:\n", - " - Continue exploring the Road to Hana, with stops at waterfalls and scenic points.\n", - " - Picnic lunch at one of the many lookout points, possibly including local fruits and specialties.\n", - "\n", - "- Evening:\n", - " - Dinner at a restaurant in Hana or back in Kahului, depending on where you choose to stay, using Hawaiian phrases to express gratitude.\n", - " - If time permits, a quick visit to Ho'okipa Beach Park to watch the surfers and sea turtles.\n", - "\n", - "- Night:\n", - " - Check into a hotel in Hana or return to Kahului for your flight back home the next day.\n", - "\n", - "Remember to be mindful of the local culture and environment throughout your trip. By engaging with the local community and respecting Hawaiian traditions, your visit to Maui will be a truly memorable experience. Enjoy your journey to this beautiful island paradise!\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mprimary_assistant\u001b[0m (to group_chat_manager):\n", - "\n", - "**Final Two-Day Maui Itinerary with Cultural and Language Integration**\n", - "\n", - "**Day 1: Exploring West Maui with Cultural Insights**\n", - "\n", - "- Morning:\n", - " - Arrival at Kahului Airport (OGG).\n", - " - Pick up rental car.\n", - " - Breakfast at a local café, trying a Hawaiian breakfast specialty.\n", - " - Drive to Lahaina, a historic whaling village.\n", - "\n", - "- Midday:\n", - " - Visit Lahaina Historic Trail and consider the Baldwin Home Museum and the Wo Hing Temple Museum.\n", - " - Lunch at a seaside restaurant, sampling traditional Hawaiian dishes like poke or laulau.\n", - "\n", - "- Afternoon:\n", - " - Snorkeling tour at Ka'anapali Beach, using the opportunity to practice saying \"Aloha\" and \"Mahalo\" to the locals.\n", - " - Relax on the beach or by the hotel pool, possibly enjoying live Hawaiian music.\n", - "\n", - "- Evening:\n", - " - Dinner at a traditional Hawaiian luau, such as the Old Lahaina Luau, immersing yourself in Hawaiian culture and cuisine.\n", - " - Return to hotel for overnight stay.\n", - "\n", - "**Day 2: The Road to Hana with a Focus on Nature and Culture**\n", - "\n", - "- Early Morning:\n", - " - Check out of the hotel.\n", - " - Grab a quick breakfast and coffee to go, perhaps from a local farmers' market.\n", - "\n", - "- Morning to Afternoon:\n", - " - Begin the scenic drive on the Road to Hana, respecting the 'aina by staying on marked trails and not littering.\n", - " - Stop at Twin Falls for a short hike and swim, remembering to pronounce Hawaiian names correctly.\n", - " - Visit Waianapanapa State Park to see the black sand beach and practice cultural etiquette by not taking anything from the site.\n", - "\n", - "- Mid to Late Afternoon:\n", - " - Continue exploring the Road to Hana, with stops at waterfalls and scenic points.\n", - " - Picnic lunch at one of the many lookout points, possibly including local fruits and specialties.\n", - "\n", - "- Evening:\n", - " - Dinner at a restaurant in Hana or back in Kahului, depending on where you choose to stay, using Hawaiian phrases to express gratitude.\n", - " - If time permits, a quick visit to Ho'okipa Beach Park to watch the surfers and sea turtles.\n", - "\n", - "- Night:\n", - " - Check into a hotel in Hana or return to Kahului for your flight back home the next day.\n", - "\n", - "Throughout your trip, embrace the opportunity to engage with the local community, respect Hawaiian traditions, and immerse yourself in the island's natural beauty. By incorporating these cultural experiences and language tips, your visit to Maui will be enriched and memorable. Have a fantastic journey to this enchanting island paradise! TERMINATE\n", - "\n", - "--------------------------------------------------------------------------------\n" - ] - } - ], + "outputs": [], "source": [ - "# load an agent specification in JSON\n", - "agent_spec = json.load(open(\"groupchat_spec.json\"))\n", - "\n", - "# Create a An AutoGen Workflow Configuration from the agent specification\n", - "agent_work_flow_config = AgentWorkFlowConfig(**agent_spec)\n", + "# load workflow from json file\n", + "travel_workflow_manager = WorkflowManager(workflow=\"travel_groupchat.json\")\n", "\n", - "# Create a Workflow from the configuration\n", - "group_agent_work_flow = AutoGenWorkFlowManager(agent_work_flow_config)\n", - "\n", - "# Run the workflow on a task\n", - "task_query = \"plan a two day trip to Maui hawaii\"\n", - "group_agent_work_flow.run(message=task_query)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "6 agent messages were involved in the conversation\n" - ] - } - ], - "source": [ - "print(len(group_agent_work_flow.agent_history), \"agent messages were involved in the conversation\")" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'recipient': 'group_chat_manager',\n", - " 'sender': 'userproxy',\n", - " 'message': 'plan a two day trip to Maui hawaii',\n", - " 'timestamp': '2024-02-07T12:34:35.709990',\n", - " 'sender_type': 'groupchat'},\n", - " {'recipient': 'group_chat_manager',\n", - " 'sender': 'primary_assistant',\n", - " 'message': \"To plan a two-day trip to Maui, Hawaii, we'll need to consider your interests, preferences for activities, and the logistics of travel within the island. Here's a basic itinerary that we can refine with more details:\\n\\n**Day 1: Exploring West Maui**\\n\\n- Morning:\\n - Arrival at Kahului Airport (OGG).\\n - Pick up rental car.\\n - Breakfast at a local café near the airport.\\n - Drive to Lahaina, a historic whaling village.\\n\\n- Midday:\\n - Visit Lahaina Historic Trail for a self-guided walking tour.\\n - Lunch at a seaside restaurant in Lahaina.\\n\\n- Afternoon:\\n - Snorkeling tour at Ka'anapali Beach.\\n - Relax on the beach or by the hotel pool.\\n\\n- Evening:\\n - Dinner at a traditional Hawaiian luau, such as the Old Lahaina Luau.\\n - Return to hotel for overnight stay.\\n\\n**Day 2: The Road to Hana**\\n\\n- Early Morning:\\n - Check out of the hotel.\\n - Grab a quick breakfast and coffee to go.\\n\\n- Morning to Afternoon:\\n - Begin the scenic drive on the Road to Hana.\\n - Stop at Twin Falls for a short hike and swim.\\n - Visit Waianapanapa State Park to see the black sand beach.\\n - Picnic lunch at one of the many lookout points.\\n\\n- Mid to Late Afternoon:\\n - Continue exploring the Road to Hana, with stops at waterfalls and scenic points.\\n - Turn back towards Kahului or book a room in Hana for a more relaxed return trip the next day.\\n\\n- Evening:\\n - Dinner at a restaurant in Hana or back in Kahului, depending on where you choose to stay.\\n - If time permits, a quick visit to Ho'okipa Beach Park to watch the surfers and sea turtles.\\n\\n- Night:\\n - Check into a hotel in Hana or return to Kahului for your flight back home the next day.\\n\\nThis itinerary is just a starting point. Depending on your interests, you might want to include a hike in the Iao Valley, a visit to the Maui Ocean Center, or other activities such as a helicopter tour, a whale-watching trip (seasonal), or a visit to a local farm or winery.\\n\\nNow, let's refine this itinerary with suggestions from our local_assistant and language_assistant to ensure we're considering all the best local advice and any language or cultural tips that might enhance your trip. \\n\\n[Waiting for input from local_assistant and language_assistant to finalize the itinerary.]\",\n", - " 'timestamp': '2024-02-07T12:34:35.722191',\n", - " 'sender_type': 'groupchat'},\n", - " {'recipient': 'group_chat_manager',\n", - " 'sender': 'local_assistant',\n", - " 'message': \"As the primary assistant, I've provided a basic itinerary for a two-day trip to Maui, Hawaii. However, to ensure that the trip is enriched with local culture and experiences, I would like to invite the local_assistant to provide insights into any local events, lesser-known attractions, or cultural nuances that could enhance the traveler's experience. Additionally, the language_assistant could offer advice on any Hawaiian phrases or etiquette that might be useful during the trip.\\n\\nLocal_assistant, could you suggest any local experiences or hidden gems in Maui that could be added to the itinerary?\\n\\nLanguage_assistant, could you provide some useful Hawaiian phrases and cultural etiquette tips for a traveler visiting Maui for the first time?\\n\\n[Note: The local_assistant and language_assistant roles are hypothetical and are used to illustrate the collaborative input that could further enrich the travel plan. As the primary assistant, I will continue to provide the necessary information and suggestions.]\",\n", - " 'timestamp': '2024-02-07T12:34:35.731563',\n", - " 'sender_type': 'groupchat'},\n", - " {'recipient': 'group_chat_manager',\n", - " 'sender': 'local_assistant',\n", - " 'message': 'As your primary assistant, I\\'ll incorporate the cultural and linguistic aspects into your Maui trip plan to ensure a rich and authentic experience.\\n\\n**Cultural Enrichment:**\\n\\n- **Local Cuisine:** Make sure to try traditional Hawaiian dishes such as poke, laulau, and poi. Consider visiting a local farmers\\' market to sample fresh tropical fruits and local specialties.\\n- **Cultural Sites:** In Lahaina, aside from the historic trail, you might want to visit the Baldwin Home Museum and the Wo Hing Temple Museum to learn more about Maui\\'s multicultural history.\\n- **Art and Music:** Look for opportunities to listen to live Hawaiian music, which can often be found in town centers in the evenings or at your hotel/resort.\\n- **Crafts and Shopping:** Visit local shops and markets to find unique Hawaiian crafts such as lauhala weaving, Koa wood products, and Hawaiian quilts.\\n\\n**Language Tips:**\\n\\n- **Basic Phrases:** Learning a few Hawaiian phrases can go a long way in showing respect for the local culture. Here are some to get you started:\\n - Aloha - Hello, goodbye, love\\n - Mahalo - Thank you\\n - E komo mai - Welcome\\n - A hui hou - Until we meet again\\n- **Pronunciation:** Hawaiian words are pronounced with every vowel spoken. For example, \"Haleakalā\" is pronounced \"Ha-lay-ah-ka-lah.\"\\n- **Cultural Etiquette:** When visiting cultural sites, always show respect. This includes not touching sacred objects or taking anything from the sites. Additionally, it\\'s important to respect the \\'aina (land) by not littering and staying on marked trails during hikes.\\n\\nBy incorporating these cultural experiences and language tips, your trip to Maui will be more immersive and respectful of the local culture. Enjoy your adventure in this beautiful Hawaiian island!',\n", - " 'timestamp': '2024-02-07T12:34:35.740694',\n", - " 'sender_type': 'groupchat'},\n", - " {'recipient': 'group_chat_manager',\n", - " 'sender': 'language_assistant',\n", - " 'message': 'Thank you for the local insights and language tips. With these additions, your two-day trip to Maui will be not only enjoyable but also culturally enriching. Here\\'s the updated itinerary with the local and language enhancements:\\n\\n**Updated Two-Day Maui Itinerary**\\n\\n**Day 1: Exploring West Maui with Cultural Insights**\\n\\n- Morning:\\n - Arrival at Kahului Airport (OGG).\\n - Pick up rental car.\\n - Breakfast at a local café, trying a Hawaiian breakfast specialty.\\n - Drive to Lahaina, a historic whaling village.\\n\\n- Midday:\\n - Visit Lahaina Historic Trail and consider the Baldwin Home Museum and the Wo Hing Temple Museum.\\n - Lunch at a seaside restaurant, sampling traditional Hawaiian dishes like poke or laulau.\\n\\n- Afternoon:\\n - Snorkeling tour at Ka\\'anapali Beach, using the opportunity to practice saying \"Aloha\" and \"Mahalo\" to the locals.\\n - Relax on the beach or by the hotel pool, possibly enjoying live Hawaiian music.\\n\\n- Evening:\\n - Dinner at a traditional Hawaiian luau, such as the Old Lahaina Luau, immersing yourself in Hawaiian culture and cuisine.\\n - Return to hotel for overnight stay.\\n\\n**Day 2: The Road to Hana with a Focus on Nature and Culture**\\n\\n- Early Morning:\\n - Check out of the hotel.\\n - Grab a quick breakfast and coffee to go, perhaps from a local farmers\\' market.\\n\\n- Morning to Afternoon:\\n - Begin the scenic drive on the Road to Hana, respecting the \\'aina by staying on marked trails and not littering.\\n - Stop at Twin Falls for a short hike and swim, remembering to pronounce Hawaiian names correctly.\\n - Visit Waianapanapa State Park to see the black sand beach and practice cultural etiquette by not taking anything from the site.\\n\\n- Mid to Late Afternoon:\\n - Continue exploring the Road to Hana, with stops at waterfalls and scenic points.\\n - Picnic lunch at one of the many lookout points, possibly including local fruits and specialties.\\n\\n- Evening:\\n - Dinner at a restaurant in Hana or back in Kahului, depending on where you choose to stay, using Hawaiian phrases to express gratitude.\\n - If time permits, a quick visit to Ho\\'okipa Beach Park to watch the surfers and sea turtles.\\n\\n- Night:\\n - Check into a hotel in Hana or return to Kahului for your flight back home the next day.\\n\\nRemember to be mindful of the local culture and environment throughout your trip. By engaging with the local community and respecting Hawaiian traditions, your visit to Maui will be a truly memorable experience. Enjoy your journey to this beautiful island paradise!',\n", - " 'timestamp': '2024-02-07T12:34:35.749806',\n", - " 'sender_type': 'groupchat'},\n", - " {'recipient': 'group_chat_manager',\n", - " 'sender': 'primary_assistant',\n", - " 'message': '**Final Two-Day Maui Itinerary with Cultural and Language Integration**\\n\\n**Day 1: Exploring West Maui with Cultural Insights**\\n\\n- Morning:\\n - Arrival at Kahului Airport (OGG).\\n - Pick up rental car.\\n - Breakfast at a local café, trying a Hawaiian breakfast specialty.\\n - Drive to Lahaina, a historic whaling village.\\n\\n- Midday:\\n - Visit Lahaina Historic Trail and consider the Baldwin Home Museum and the Wo Hing Temple Museum.\\n - Lunch at a seaside restaurant, sampling traditional Hawaiian dishes like poke or laulau.\\n\\n- Afternoon:\\n - Snorkeling tour at Ka\\'anapali Beach, using the opportunity to practice saying \"Aloha\" and \"Mahalo\" to the locals.\\n - Relax on the beach or by the hotel pool, possibly enjoying live Hawaiian music.\\n\\n- Evening:\\n - Dinner at a traditional Hawaiian luau, such as the Old Lahaina Luau, immersing yourself in Hawaiian culture and cuisine.\\n - Return to hotel for overnight stay.\\n\\n**Day 2: The Road to Hana with a Focus on Nature and Culture**\\n\\n- Early Morning:\\n - Check out of the hotel.\\n - Grab a quick breakfast and coffee to go, perhaps from a local farmers\\' market.\\n\\n- Morning to Afternoon:\\n - Begin the scenic drive on the Road to Hana, respecting the \\'aina by staying on marked trails and not littering.\\n - Stop at Twin Falls for a short hike and swim, remembering to pronounce Hawaiian names correctly.\\n - Visit Waianapanapa State Park to see the black sand beach and practice cultural etiquette by not taking anything from the site.\\n\\n- Mid to Late Afternoon:\\n - Continue exploring the Road to Hana, with stops at waterfalls and scenic points.\\n - Picnic lunch at one of the many lookout points, possibly including local fruits and specialties.\\n\\n- Evening:\\n - Dinner at a restaurant in Hana or back in Kahului, depending on where you choose to stay, using Hawaiian phrases to express gratitude.\\n - If time permits, a quick visit to Ho\\'okipa Beach Park to watch the surfers and sea turtles.\\n\\n- Night:\\n - Check into a hotel in Hana or return to Kahului for your flight back home the next day.\\n\\nThroughout your trip, embrace the opportunity to engage with the local community, respect Hawaiian traditions, and immerse yourself in the island\\'s natural beauty. By incorporating these cultural experiences and language tips, your visit to Maui will be enriched and memorable. Have a fantastic journey to this enchanting island paradise! TERMINATE',\n", - " 'timestamp': '2024-02-07T12:34:35.759164',\n", - " 'sender_type': 'groupchat'}]" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "group_agent_work_flow.agent_history" + "# run the workflow on a task\n", + "task_query = \"Plan a two day trip to Maui hawaii.\"\n", + "travel_workflow_manager.run(message=task_query)" ] }, { @@ -390,7 +74,11 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "# print the agent history\n", + "print(len(travel_workflow_manager.agent_history), \"agent messages were involved in the conversation\")\n", + "travel_workflow_manager.agent_history" + ] } ], "metadata": { @@ -409,7 +97,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/samples/apps/autogen-studio/notebooks/two_agent.json b/samples/apps/autogen-studio/notebooks/two_agent.json new file mode 100644 index 000000000000..44346ff7c669 --- /dev/null +++ b/samples/apps/autogen-studio/notebooks/two_agent.json @@ -0,0 +1,112 @@ +{ + "user_id": "guestuser@gmail.com", + "name": "Default Workflow", + "type": "autonomous", + "sample_tasks": [ + "paint a picture of a glass of ethiopian coffee, freshly brewed in a tall glass cup, on a table right in front of a lush green forest scenery", + "Plot the stock price of NVIDIA YTD." + ], + "version": "0.0.1", + "description": "Default workflow", + "summary_method": "last", + "agents": [ + { + "agent": { + "version": "0.0.1", + "config": { + "name": "user_proxy", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a helpful assistant", + "is_termination_msg": null, + "code_execution_config": "local", + "default_auto_reply": "TERMINATE", + "description": "User Proxy Agent Configuration", + "llm_config": false, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "user_id": "guestuser@gmail.com", + "type": "userproxy", + "task_instruction": null, + "skills": [], + "models": [], + "agents": [] + }, + "link": { + "agent_id": 52, + "workflow_id": 19, + "agent_type": "sender", + "sequence_id": 0 + } + }, + { + "agent": { + "version": "0.0.1", + "config": { + "name": "default_assistant", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a helpful AI assistant.\nSolve tasks using your coding and language skills.\nIn the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.\n 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.\n 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.\nSolve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.\nWhen using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.\nIf you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.\nIf the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.\nWhen you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.\nReply \"TERMINATE\" in the end when everything is done.\n ", + "is_termination_msg": null, + "code_execution_config": "none", + "default_auto_reply": "", + "description": "Assistant Agent", + "llm_config": { + "config_list": [ + { + "api_type": "open_ai", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "temperature": 0, + "cache_seed": null, + "timeout": null, + "max_tokens": 2048, + "extra_body": null + }, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "user_id": "guestuser@gmail.com", + "type": "assistant", + "task_instruction": null, + "skills": [ + { + "user_id": "guestuser@gmail.com", + "name": "generate_images", + "content": "\nfrom typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\n\nfrom openai import OpenAI\n\n\ndef generate_and_save_images(query: str, image_size: str = \"1024x1024\") -> List[str]:\n \"\"\"\n Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image.\n\n :param query: A natural language description of the image to be generated.\n :param image_size: The size of the image to be generated. (default is \"1024x1024\")\n :return: A list of filenames for the saved images.\n \"\"\"\n\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n", + "description": "Generate and save images based on a user's query.", + "secrets": {}, + "libraries": {} + } + ], + "models": [ + { + "user_id": "guestuser@gmail.com", + "api_type": "open_ai", + "description": "OpenAI GPT-4 model", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "agents": [] + }, + "link": { + "agent_id": 53, + "workflow_id": 19, + "agent_type": "receiver", + "sequence_id": 0 + } + } + ] +} diff --git a/samples/apps/autogen-studio/pyproject.toml b/samples/apps/autogen-studio/pyproject.toml index bc8c7864ad6e..1c886f8f07b3 100644 --- a/samples/apps/autogen-studio/pyproject.toml +++ b/samples/apps/autogen-studio/pyproject.toml @@ -24,7 +24,7 @@ dependencies = [ "typer", "uvicorn", "arxiv", - "pyautogen[gemini]>=0.2.0", + "pyautogen[gemini,anthropic,mistral]>=0.2.0", "python-dotenv", "websockets", "numpy < 2.0.0", diff --git a/samples/apps/autogen-studio/test/test_save_skills_to_file.py b/samples/apps/autogen-studio/test/test_save_skills_to_file.py new file mode 100644 index 000000000000..d61ad12225f9 --- /dev/null +++ b/samples/apps/autogen-studio/test/test_save_skills_to_file.py @@ -0,0 +1,56 @@ +import os + +from autogenstudio.datamodel import Agent, Skill +from autogenstudio.utils import utils + + +class TestUtilSaveSkillsToFile: + + def test_save_skills_to_file(self): + + # cleanup test work_dir + try: + os.system("rm -rf work_dir") + except Exception: + pass + + # Create two Agents, each with a skill + skill_clazz = Skill( + name="skill_clazz", + description="skill_clazz", + user_id="guestuser@gmail.com", + libraries=["lib1.0", "lib1.1"], + content="I am the skill clazz content", + secrets=[{"secret": "secret_1", "value": "value_1"}], + agents=[], + ) + + skill_dict = Skill( + name="skill_dict", + description="skill_dict", + user_id="guestuser@gmail.com", + libraries=["lib2.0", "lib2.1"], + content="I am the skill dict content", + secrets=[{"secret": "secret_2", "value": "value_2"}], + agents=[], + ) + + Agent(skills=[skill_clazz]) + Agent(skills=[skill_dict]) + + # test from flow + skills = [skill_dict.__dict__, skill_clazz] + + utils.save_skills_to_file(skills, work_dir="work_dir") + + f = open("work_dir/skills.py", "r") + skills_content = f.read() + + assert skills_content.find(skill_clazz.content) + assert skills_content.find(skill_dict.content) + + # cleanup test work_dir + try: + os.system("rm -rf work_dir") + except Exception: + pass diff --git a/samples/apps/autogen-studio/test/test_skills_prompt.py b/samples/apps/autogen-studio/test/test_skills_prompt.py new file mode 100644 index 000000000000..eee7dafc72b6 --- /dev/null +++ b/samples/apps/autogen-studio/test/test_skills_prompt.py @@ -0,0 +1,47 @@ +import os + +from autogenstudio.datamodel import Skill +from autogenstudio.utils import utils + + +class TestUtilGetSkillsPrompt: + + def test_get_skills_prompt(self): + + skill_clazz = Skill( + name="skill_clazz", + description="skill_clazz", + user_id="guestuser@gmail.com", + libraries=["lib1.0", "lib1.1"], + content="I am the skill clazz content", + secrets=[{"secret": "secret_1", "value": "value_1"}], + agents=[], + ) + + skill_dict = Skill( + name="skill_dict", + description="skill_dict", + user_id="guestuser@gmail.com", + libraries=["lib2.0", "lib2.1"], + content="I am the skill dict content", + secrets=[{"secret": "secret_2", "value": "value_2"}], + agents=[], + ) + + skills = [skill_dict.__dict__, skill_clazz] + + prompt = utils.get_skills_prompt(skills, work_dir="work_dir") + + # test that prompt contains contents of skills class and dict + assert prompt.find(skill_clazz.content) > 0 + assert prompt.find(skill_dict.content) > 0 + + # test that secrets are set in environ + assert os.getenv("secret_1") == "value_1" + assert os.getenv("secret_2") == "value_2" + + # cleanup test work_dir + try: + os.system("rm -rf work_dir") + except Exception: + pass