From 3be3127fdc2c248ebfa8acf710d43d51e7b56f5e Mon Sep 17 00:00:00 2001 From: tylerreed Date: Fri, 26 Jan 2024 13:16:25 -0500 Subject: [PATCH] updated user agents to either not execute code, or when executing, don't use Docker for update 0.2.8 --- ai_agency_01_workout/agents.py | 4 ++ ai_agency_03_video_captions/agents.py | 6 +- ai_agency_04_youtube_service/agents.py | 8 ++- autogen_agentbuilder/OAI_CONFIG_LIST | 6 ++ autogen_agentbuilder/main.py | 63 +++++++++++++++++++ autogen_agentbuilder/utility.py | 39 ++++++++++++ autogen_beginner_course/groupchat.py | 4 +- autogen_functions/autogen_function_example.py | 4 ++ .../autogen_function_example_decorators.py | 4 ++ autogen_functions/autogen_function_planner.py | 2 +- autogen_gpt_vision/autogen_vision.py | 6 +- autogen_memgpt/app.py | 19 +++--- .../autogen_memgpt_lmstudio.py | 48 ++------------ autogen_transcribe_video/transcribe.py | 2 +- autogen_webscraping/vision_crawl.py | 6 +- saas_products/password/password.py | 2 +- 16 files changed, 160 insertions(+), 63 deletions(-) create mode 100644 autogen_agentbuilder/OAI_CONFIG_LIST create mode 100644 autogen_agentbuilder/main.py create mode 100644 autogen_agentbuilder/utility.py diff --git a/ai_agency_01_workout/agents.py b/ai_agency_01_workout/agents.py index 7b31569..c5bd17f 100644 --- a/ai_agency_01_workout/agents.py +++ b/ai_agency_01_workout/agents.py @@ -7,6 +7,10 @@ system_message=system_messages.user_proxy_message, human_input_mode="NEVER", max_consecutive_auto_reply=0, + code_execution_config= { + "work_dir": "code", + "use_docker": False + } ) fitness_expert = autogen.AssistantAgent( diff --git a/ai_agency_03_video_captions/agents.py b/ai_agency_03_video_captions/agents.py index 0e4208e..dcf8f31 100644 --- a/ai_agency_03_video_captions/agents.py +++ b/ai_agency_03_video_captions/agents.py @@ -13,7 +13,11 @@ name="user_proxy", is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"), human_input_mode="NEVER", - max_consecutive_auto_reply=2 + max_consecutive_auto_reply=2, + code_execution_config= { + "work_dir": "code", + "use_docker": False + } ) user_proxy.register_function( diff --git a/ai_agency_04_youtube_service/agents.py b/ai_agency_04_youtube_service/agents.py index 936b01c..2a0835d 100644 --- a/ai_agency_04_youtube_service/agents.py +++ b/ai_agency_04_youtube_service/agents.py @@ -19,8 +19,12 @@ name="user_proxy", is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"), human_input_mode="NEVER", - max_consecutive_auto_reply=0 + max_consecutive_auto_reply=0, + code_execution_config={ + "work_dir": "code", + "use_docker": False + } ) groupchat = autogen.GroupChat(agents=[user_proxy, script_maker, description_maker], messages=[], max_round=5) -manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=config.llm_config) \ No newline at end of file +manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=config.llm_config) diff --git a/autogen_agentbuilder/OAI_CONFIG_LIST b/autogen_agentbuilder/OAI_CONFIG_LIST new file mode 100644 index 0000000..966cbb6 --- /dev/null +++ b/autogen_agentbuilder/OAI_CONFIG_LIST @@ -0,0 +1,6 @@ +[ + { + "model": "gpt-4-1106-preview", + "api_key": "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" + } +] \ No newline at end of file diff --git a/autogen_agentbuilder/main.py b/autogen_agentbuilder/main.py new file mode 100644 index 0000000..1c4b720 --- /dev/null +++ b/autogen_agentbuilder/main.py @@ -0,0 +1,63 @@ +import json +import utility + +import autogen +from autogen.agentchat.contrib.agent_builder import AgentBuilder + +config_file_or_env = "OAI_CONFIG_LIST" +llm_config = {"temperature": 0} +config_list = autogen.config_list_from_json(config_file_or_env, filter_dict={"model": ["gpt-4-1106-preview", "gpt-4"]}) + +position_list = utility.position_list +agent_prompt = utility.agent_sys_msg_prompt +library_path_or_json = "./agent_library_example.json" + +build_manager = autogen.OpenAIWrapper(config_list=config_list) +sys_msg_list = [] + + +def start_task(execution_task: str, agent_list: list): + group_chat = autogen.GroupChat(agents=agent_list, messages=[], max_round=12) + manager = autogen.GroupChatManager(groupchat=group_chat, llm_config={"config_list": config_list, **llm_config}) + agent_list[0].initiate_chat(manager, message=execution_task) + + +def generate_agents(): + for pos in position_list: + resp_agent_sys_msg = ( + build_manager.create( + messages=[ + { + "role": "user", + "content": agent_prompt.format( + position=pos, + default_sys_msg=autogen.AssistantAgent.DEFAULT_SYSTEM_MESSAGE, + ), + } + ] + ) + .choices[0] + .message.content + ) + sys_msg_list.append({"name": pos, "profile": resp_agent_sys_msg}) + + +generate_agents() + +json.dump(sys_msg_list, open(library_path_or_json, "w"), indent=4) + +new_builder = AgentBuilder( + config_file_or_env=config_file_or_env, builder_model="gpt-4-1106-preview", agent_model="gpt-4-1106-preview" +) +agent_list, _ = new_builder.build_from_library(utility.building_task, library_path_or_json, llm_config) + +# new_builder = AgentBuilder(config_file_or_env=config_file_or_env) +# agent_list, agent_config = new_builder.load(saved_path) +saved_path = new_builder.save("./autogen_agentbuilder") +print(saved_path) +start_task( + execution_task=utility.execution_task, + agent_list=agent_list, +) + +new_builder.clear_all_agents() diff --git a/autogen_agentbuilder/utility.py b/autogen_agentbuilder/utility.py new file mode 100644 index 0000000..f4c8c85 --- /dev/null +++ b/autogen_agentbuilder/utility.py @@ -0,0 +1,39 @@ +building_task = "Be able to create a python function with documentation on how it works." + +execution_task = "Create a python function that reverses a string." + +agent_sys_msg_prompt = """Considering the following position: + +POSITION: {position} + +What requirements should this position be satisfied? + +Hint: +# Your answer should be in one sentence. +# Your answer should be natural, starting from "As a ...". +# People with the above position need to complete a task given by a leader or colleague. +# People will work in a group chat, solving tasks with other people with different jobs. +# The modified requirement should not contain the code interpreter skill. +# Coding skill is limited to Python. +""" + +position_list = [ + "Environmental_Scientist", + "Astronomer", + "Software_Developer", + "Data_Analyst", + "Journalist", + "Teacher", + "Lawyer", + "Programmer", + "Accountant", + "Mathematician", + "Physicist", + "Biologist", + "Chemist", + "Statistician", + "IT_Specialist", + "Cybersecurity_Expert", + "Artificial_Intelligence_Engineer", + "Financial_Analyst", +] \ No newline at end of file diff --git a/autogen_beginner_course/groupchat.py b/autogen_beginner_course/groupchat.py index a581441..4308be0 100644 --- a/autogen_beginner_course/groupchat.py +++ b/autogen_beginner_course/groupchat.py @@ -58,10 +58,10 @@ "plan includes adding verifiable info such as source URL.", llm_config=gpt4_config, ) -groupchat = autogen.GroupChat( +group_chat = autogen.GroupChat( agents=[user_proxy, engineer, scientist, planner, executor, critic], messages=[], max_round=50 ) -manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=gpt4_config) +manager = autogen.GroupChatManager(groupchat=group_chat, llm_config=gpt4_config) user_proxy.initiate_chat( manager, diff --git a/autogen_functions/autogen_function_example.py b/autogen_functions/autogen_function_example.py index c271e2c..1343abe 100644 --- a/autogen_functions/autogen_function_example.py +++ b/autogen_functions/autogen_function_example.py @@ -71,6 +71,10 @@ def currency_calculator( is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"), human_input_mode="NEVER", max_consecutive_auto_reply=10, + code_execution_config={ + "work_dir": "code", + "use_docker": False + } ) CurrencySymbol = Literal["USD", "EUR"] diff --git a/autogen_functions/autogen_function_example_decorators.py b/autogen_functions/autogen_function_example_decorators.py index a10c31a..4691667 100644 --- a/autogen_functions/autogen_function_example_decorators.py +++ b/autogen_functions/autogen_function_example_decorators.py @@ -31,6 +31,10 @@ is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"), human_input_mode="NEVER", max_consecutive_auto_reply=10, + code_execution_config= { + "work_dir": "code", + "use_docker": False + } ) CurrencySymbol = Literal["USD", "EUR"] diff --git a/autogen_functions/autogen_function_planner.py b/autogen_functions/autogen_function_planner.py index ba6750a..2a60137 100644 --- a/autogen_functions/autogen_function_planner.py +++ b/autogen_functions/autogen_function_planner.py @@ -88,7 +88,7 @@ def ask_planner(message): max_consecutive_auto_reply=10, is_termination_msg=lambda x: "content" in x and x["content"] is not None and x["content"].rstrip().endswith( "TERMINATE"), - code_execution_config={"work_dir": "planning"}, + code_execution_config={"work_dir": "planning", "use_docker": False}, function_map={"ask_planner": ask_planner}, ) diff --git a/autogen_gpt_vision/autogen_vision.py b/autogen_gpt_vision/autogen_vision.py index 039539e..1938462 100644 --- a/autogen_gpt_vision/autogen_vision.py +++ b/autogen_gpt_vision/autogen_vision.py @@ -24,7 +24,11 @@ name="User_proxy", system_message="A human admin.", human_input_mode="NEVER", - max_consecutive_auto_reply=0 + max_consecutive_auto_reply=0, + code_execution_config={ + "work_dir": "code", + "use_docker": False + } ) # Example 1 diff --git a/autogen_memgpt/app.py b/autogen_memgpt/app.py index 8342d89..97bb511 100644 --- a/autogen_memgpt/app.py +++ b/autogen_memgpt/app.py @@ -1,6 +1,6 @@ import autogen import os -from memgpt.autogen.memgpt_agent import create_autogen_memgpt_agent +from memgpt.autogen.memgpt_agent import create_memgpt_autogen_agent_from_config from dotenv import load_dotenv load_dotenv() @@ -28,7 +28,7 @@ user_proxy = autogen.UserProxyAgent( name="User_proxy", system_message="A human admin.", - code_execution_config={"last_n_messages": 2, "work_dir": "groupchat"}, + code_execution_config={"last_n_messages": 2, "work_dir": "groupchat", "use_docker": False}, human_input_mode="TERMINATE" ) @@ -59,14 +59,15 @@ ) # else: - coder = create_autogen_memgpt_agent( + coder = create_memgpt_autogen_agent_from_config( "MemGPT_coder", - persona_description="I am a 10x engineer, trained in Python. I was the first engineer at Uber " - "(which I make sure to tell everyone I work with).", - user_description=f"You are participating in a group chat with a user ({user_proxy.name}) " - f"and a product manager ({pm.name}).", - model=os.getenv("model"), - interface_kwargs=interface_kwargs + llm_config=llm_config, + system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber " + f"(which I make sure to tell everyone I work with).\n" + f"You are participating in a group chat with a user ({user_proxy.name}) " + f"and a product manager ({pm.name}).", + interface_kwargs=interface_kwargs, + default_auto_reply="...", ) # Initialize the group chat between the user and two LLM agents (PM and coder) diff --git a/autogen_memgpt_lmstudio/autogen_memgpt_lmstudio.py b/autogen_memgpt_lmstudio/autogen_memgpt_lmstudio.py index 3952327..f534ebc 100644 --- a/autogen_memgpt_lmstudio/autogen_memgpt_lmstudio.py +++ b/autogen_memgpt_lmstudio/autogen_memgpt_lmstudio.py @@ -38,46 +38,6 @@ }, ] -elif LLM_BACKEND == "azure": - # Make sure that you have access to this deployment/model on your Azure account! - # If you don't have access to the model, the code will fail - model = os.getenv("model") - - azure_openai_api_key = os.getenv("AZURE_OPENAI_KEY") - azure_openai_version = os.getenv("AZURE_OPENAI_VERSION") - azure_openai_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") - assert ( - azure_openai_api_key is not None and azure_openai_version is not None and azure_openai_endpoint is not None - ), "Set all the required OpenAI Azure variables (see: https://memgpt.readthedocs.io/en/latest/endpoints/#azure)" - - # This config is for AutoGen agents that are not powered by MemGPT - config_list = [ - { - "model": model, - "api_type": "azure", - "api_key": azure_openai_api_key, - "api_version": azure_openai_version, - # NOTE: on versions of pyautogen < 0.2.0, use "api_base" - # "api_base": azure_openai_endpoint, - "base_url": azure_openai_endpoint, - } - ] - - # This config is for AutoGen agents that powered by MemGPT - config_list_memgpt = [ - { - "model": model, - "context_window": LLM_MAX_TOKENS[model], - "preset": DEFAULT_PRESET, - "model_wrapper": None, - # Azure specific - "model_endpoint_type": "azure", - "azure_key": azure_openai_api_key, - "azure_endpoint": azure_openai_endpoint, - "azure_version": azure_openai_version, - }, - ] - elif LLM_BACKEND == "local": # Example using LM Studio on a local machine # You will have to change the parameters based on your setup @@ -90,7 +50,6 @@ # "api_base": "http://localhost:1234/v1", # "api_type": "open_ai", "base_url": "http://localhost:1234/v1", - # ex. "http://127.0.0.1:5001/v1" if you are using webui, "http://localhost:1234/v1/" if you are using LM Studio "api_key": "NULL", # not needed }, ] @@ -104,7 +63,7 @@ "context_window": 8192, # the context window of your model (for Mistral 7B-based models, it's likely 8192) "model_wrapper": "airoboros-l2-70b-2.1", # airoboros is the default wrapper and should work for most models "model_endpoint_type": "lmstudio", # can use webui, ollama, llamacpp, etc. - "model_endpoint": "http://localhost:1234", # the IP address of your LLM backend + "model_endpoint": "http://localhost:1234/v1", # the IP address of your LLM backend }, ] @@ -114,7 +73,7 @@ # If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo # (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb) # If USE_MEMGPT is True, then we swap out the "coder" agent with a MemGPT agent -USE_MEMGPT = True +USE_MEMGPT = False # Set to True if you want to print MemGPT's inner workings. DEBUG = False @@ -132,7 +91,7 @@ user_proxy = autogen.UserProxyAgent( name="User_proxy", system_message="A human admin.", - code_execution_config={"last_n_messages": 2, "work_dir": "groupchat"}, + code_execution_config={"last_n_messages": 2, "work_dir": "groupchat", "use_docker": False}, human_input_mode="TERMINATE", # needed? default_auto_reply="...", # Set a default auto-reply message here (non-empty auto-reply is required for LM Studio) ) @@ -147,6 +106,7 @@ if not USE_MEMGPT: # In the AutoGen example, we create an AssistantAgent to play the role of the coder + print("AutoGen Coder") coder = autogen.AssistantAgent( name="Coder", llm_config=llm_config, diff --git a/autogen_transcribe_video/transcribe.py b/autogen_transcribe_video/transcribe.py index 89a06f8..5e1acff 100644 --- a/autogen_transcribe_video/transcribe.py +++ b/autogen_transcribe_video/transcribe.py @@ -64,7 +64,7 @@ is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"), human_input_mode="NEVER", max_consecutive_auto_reply=10, - code_execution_config={"work_dir": "scripts"}, + code_execution_config={"work_dir": "scripts", "use_docker": False}, ) user_proxy.register_function( diff --git a/autogen_webscraping/vision_crawl.py b/autogen_webscraping/vision_crawl.py index 5da7377..10df04f 100644 --- a/autogen_webscraping/vision_crawl.py +++ b/autogen_webscraping/vision_crawl.py @@ -20,7 +20,11 @@ name="User_proxy", system_message="A human admin.", human_input_mode="NEVER", - max_consecutive_auto_reply=0 + max_consecutive_auto_reply=0, + code_execution_config={ + "work_dir": "code", + "use_docker": False + } ) diff --git a/saas_products/password/password.py b/saas_products/password/password.py index 4fed43f..ca6af9c 100644 --- a/saas_products/password/password.py +++ b/saas_products/password/password.py @@ -19,7 +19,7 @@ system_message="A human admin. Interact with the planner to discuss the plan. Plan execution needs to be approved " "by this admin.", human_input_mode="TERMINATE", - code_execution_config={"last_n_messages": 3, "work_dir": "programming"}, + code_execution_config={"last_n_messages": 3, "work_dir": "programming", "use_docker": False}, ) engineer = autogen.AssistantAgent(