Skip to content

Commit

Permalink
updated user agents to either not execute code, or when executing, do…
Browse files Browse the repository at this point in the history
…n't use Docker for update 0.2.8
  • Loading branch information
tylerprogramming committed Jan 26, 2024
1 parent 5f50a5c commit 3be3127
Show file tree
Hide file tree
Showing 16 changed files with 160 additions and 63 deletions.
4 changes: 4 additions & 0 deletions ai_agency_01_workout/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@
system_message=system_messages.user_proxy_message,
human_input_mode="NEVER",
max_consecutive_auto_reply=0,
code_execution_config= {
"work_dir": "code",
"use_docker": False
}
)

fitness_expert = autogen.AssistantAgent(
Expand Down
6 changes: 5 additions & 1 deletion ai_agency_03_video_captions/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,11 @@
name="user_proxy",
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
human_input_mode="NEVER",
max_consecutive_auto_reply=2
max_consecutive_auto_reply=2,
code_execution_config= {
"work_dir": "code",
"use_docker": False
}
)

user_proxy.register_function(
Expand Down
8 changes: 6 additions & 2 deletions ai_agency_04_youtube_service/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,12 @@
name="user_proxy",
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
human_input_mode="NEVER",
max_consecutive_auto_reply=0
max_consecutive_auto_reply=0,
code_execution_config={
"work_dir": "code",
"use_docker": False
}
)

groupchat = autogen.GroupChat(agents=[user_proxy, script_maker, description_maker], messages=[], max_round=5)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=config.llm_config)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=config.llm_config)
6 changes: 6 additions & 0 deletions autogen_agentbuilder/OAI_CONFIG_LIST
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[
{
"model": "gpt-4-1106-preview",
"api_key": "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"
}
]
63 changes: 63 additions & 0 deletions autogen_agentbuilder/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
import json
import utility

import autogen
from autogen.agentchat.contrib.agent_builder import AgentBuilder

config_file_or_env = "OAI_CONFIG_LIST"
llm_config = {"temperature": 0}
config_list = autogen.config_list_from_json(config_file_or_env, filter_dict={"model": ["gpt-4-1106-preview", "gpt-4"]})

position_list = utility.position_list
agent_prompt = utility.agent_sys_msg_prompt
library_path_or_json = "./agent_library_example.json"

build_manager = autogen.OpenAIWrapper(config_list=config_list)
sys_msg_list = []


def start_task(execution_task: str, agent_list: list):
group_chat = autogen.GroupChat(agents=agent_list, messages=[], max_round=12)
manager = autogen.GroupChatManager(groupchat=group_chat, llm_config={"config_list": config_list, **llm_config})
agent_list[0].initiate_chat(manager, message=execution_task)


def generate_agents():
for pos in position_list:
resp_agent_sys_msg = (
build_manager.create(
messages=[
{
"role": "user",
"content": agent_prompt.format(
position=pos,
default_sys_msg=autogen.AssistantAgent.DEFAULT_SYSTEM_MESSAGE,
),
}
]
)
.choices[0]
.message.content
)
sys_msg_list.append({"name": pos, "profile": resp_agent_sys_msg})


generate_agents()

json.dump(sys_msg_list, open(library_path_or_json, "w"), indent=4)

new_builder = AgentBuilder(
config_file_or_env=config_file_or_env, builder_model="gpt-4-1106-preview", agent_model="gpt-4-1106-preview"
)
agent_list, _ = new_builder.build_from_library(utility.building_task, library_path_or_json, llm_config)

# new_builder = AgentBuilder(config_file_or_env=config_file_or_env)
# agent_list, agent_config = new_builder.load(saved_path)
saved_path = new_builder.save("./autogen_agentbuilder")
print(saved_path)
start_task(
execution_task=utility.execution_task,
agent_list=agent_list,
)

new_builder.clear_all_agents()
39 changes: 39 additions & 0 deletions autogen_agentbuilder/utility.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
building_task = "Be able to create a python function with documentation on how it works."

execution_task = "Create a python function that reverses a string."

agent_sys_msg_prompt = """Considering the following position:
POSITION: {position}
What requirements should this position be satisfied?
Hint:
# Your answer should be in one sentence.
# Your answer should be natural, starting from "As a ...".
# People with the above position need to complete a task given by a leader or colleague.
# People will work in a group chat, solving tasks with other people with different jobs.
# The modified requirement should not contain the code interpreter skill.
# Coding skill is limited to Python.
"""

position_list = [
"Environmental_Scientist",
"Astronomer",
"Software_Developer",
"Data_Analyst",
"Journalist",
"Teacher",
"Lawyer",
"Programmer",
"Accountant",
"Mathematician",
"Physicist",
"Biologist",
"Chemist",
"Statistician",
"IT_Specialist",
"Cybersecurity_Expert",
"Artificial_Intelligence_Engineer",
"Financial_Analyst",
]
4 changes: 2 additions & 2 deletions autogen_beginner_course/groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,10 @@
"plan includes adding verifiable info such as source URL.",
llm_config=gpt4_config,
)
groupchat = autogen.GroupChat(
group_chat = autogen.GroupChat(
agents=[user_proxy, engineer, scientist, planner, executor, critic], messages=[], max_round=50
)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=gpt4_config)
manager = autogen.GroupChatManager(groupchat=group_chat, llm_config=gpt4_config)

user_proxy.initiate_chat(
manager,
Expand Down
4 changes: 4 additions & 0 deletions autogen_functions/autogen_function_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,10 @@ def currency_calculator(
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
code_execution_config={
"work_dir": "code",
"use_docker": False
}
)

CurrencySymbol = Literal["USD", "EUR"]
Expand Down
4 changes: 4 additions & 0 deletions autogen_functions/autogen_function_example_decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
code_execution_config= {
"work_dir": "code",
"use_docker": False
}
)

CurrencySymbol = Literal["USD", "EUR"]
Expand Down
2 changes: 1 addition & 1 deletion autogen_functions/autogen_function_planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def ask_planner(message):
max_consecutive_auto_reply=10,
is_termination_msg=lambda x: "content" in x and x["content"] is not None and x["content"].rstrip().endswith(
"TERMINATE"),
code_execution_config={"work_dir": "planning"},
code_execution_config={"work_dir": "planning", "use_docker": False},
function_map={"ask_planner": ask_planner},
)

Expand Down
6 changes: 5 additions & 1 deletion autogen_gpt_vision/autogen_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,11 @@
name="User_proxy",
system_message="A human admin.",
human_input_mode="NEVER",
max_consecutive_auto_reply=0
max_consecutive_auto_reply=0,
code_execution_config={
"work_dir": "code",
"use_docker": False
}
)

# Example 1
Expand Down
19 changes: 10 additions & 9 deletions autogen_memgpt/app.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import autogen
import os
from memgpt.autogen.memgpt_agent import create_autogen_memgpt_agent
from memgpt.autogen.memgpt_agent import create_memgpt_autogen_agent_from_config
from dotenv import load_dotenv

load_dotenv()
Expand Down Expand Up @@ -28,7 +28,7 @@
user_proxy = autogen.UserProxyAgent(
name="User_proxy",
system_message="A human admin.",
code_execution_config={"last_n_messages": 2, "work_dir": "groupchat"},
code_execution_config={"last_n_messages": 2, "work_dir": "groupchat", "use_docker": False},
human_input_mode="TERMINATE"
)

Expand Down Expand Up @@ -59,14 +59,15 @@
)
#
else:
coder = create_autogen_memgpt_agent(
coder = create_memgpt_autogen_agent_from_config(
"MemGPT_coder",
persona_description="I am a 10x engineer, trained in Python. I was the first engineer at Uber "
"(which I make sure to tell everyone I work with).",
user_description=f"You are participating in a group chat with a user ({user_proxy.name}) "
f"and a product manager ({pm.name}).",
model=os.getenv("model"),
interface_kwargs=interface_kwargs
llm_config=llm_config,
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).\n"
f"You are participating in a group chat with a user ({user_proxy.name}) "
f"and a product manager ({pm.name}).",
interface_kwargs=interface_kwargs,
default_auto_reply="...",
)

# Initialize the group chat between the user and two LLM agents (PM and coder)
Expand Down
48 changes: 4 additions & 44 deletions autogen_memgpt_lmstudio/autogen_memgpt_lmstudio.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,46 +38,6 @@
},
]

elif LLM_BACKEND == "azure":
# Make sure that you have access to this deployment/model on your Azure account!
# If you don't have access to the model, the code will fail
model = os.getenv("model")

azure_openai_api_key = os.getenv("AZURE_OPENAI_KEY")
azure_openai_version = os.getenv("AZURE_OPENAI_VERSION")
azure_openai_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
assert (
azure_openai_api_key is not None and azure_openai_version is not None and azure_openai_endpoint is not None
), "Set all the required OpenAI Azure variables (see: https://memgpt.readthedocs.io/en/latest/endpoints/#azure)"

# This config is for AutoGen agents that are not powered by MemGPT
config_list = [
{
"model": model,
"api_type": "azure",
"api_key": azure_openai_api_key,
"api_version": azure_openai_version,
# NOTE: on versions of pyautogen < 0.2.0, use "api_base"
# "api_base": azure_openai_endpoint,
"base_url": azure_openai_endpoint,
}
]

# This config is for AutoGen agents that powered by MemGPT
config_list_memgpt = [
{
"model": model,
"context_window": LLM_MAX_TOKENS[model],
"preset": DEFAULT_PRESET,
"model_wrapper": None,
# Azure specific
"model_endpoint_type": "azure",
"azure_key": azure_openai_api_key,
"azure_endpoint": azure_openai_endpoint,
"azure_version": azure_openai_version,
},
]

elif LLM_BACKEND == "local":
# Example using LM Studio on a local machine
# You will have to change the parameters based on your setup
Expand All @@ -90,7 +50,6 @@
# "api_base": "http://localhost:1234/v1",
# "api_type": "open_ai",
"base_url": "http://localhost:1234/v1",
# ex. "http://127.0.0.1:5001/v1" if you are using webui, "http://localhost:1234/v1/" if you are using LM Studio
"api_key": "NULL", # not needed
},
]
Expand All @@ -104,7 +63,7 @@
"context_window": 8192, # the context window of your model (for Mistral 7B-based models, it's likely 8192)
"model_wrapper": "airoboros-l2-70b-2.1", # airoboros is the default wrapper and should work for most models
"model_endpoint_type": "lmstudio", # can use webui, ollama, llamacpp, etc.
"model_endpoint": "http://localhost:1234", # the IP address of your LLM backend
"model_endpoint": "http://localhost:1234/v1", # the IP address of your LLM backend
},
]

Expand All @@ -114,7 +73,7 @@
# If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo
# (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb)
# If USE_MEMGPT is True, then we swap out the "coder" agent with a MemGPT agent
USE_MEMGPT = True
USE_MEMGPT = False

# Set to True if you want to print MemGPT's inner workings.
DEBUG = False
Expand All @@ -132,7 +91,7 @@
user_proxy = autogen.UserProxyAgent(
name="User_proxy",
system_message="A human admin.",
code_execution_config={"last_n_messages": 2, "work_dir": "groupchat"},
code_execution_config={"last_n_messages": 2, "work_dir": "groupchat", "use_docker": False},
human_input_mode="TERMINATE", # needed?
default_auto_reply="...", # Set a default auto-reply message here (non-empty auto-reply is required for LM Studio)
)
Expand All @@ -147,6 +106,7 @@

if not USE_MEMGPT:
# In the AutoGen example, we create an AssistantAgent to play the role of the coder
print("AutoGen Coder")
coder = autogen.AssistantAgent(
name="Coder",
llm_config=llm_config,
Expand Down
2 changes: 1 addition & 1 deletion autogen_transcribe_video/transcribe.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
code_execution_config={"work_dir": "scripts"},
code_execution_config={"work_dir": "scripts", "use_docker": False},
)

user_proxy.register_function(
Expand Down
6 changes: 5 additions & 1 deletion autogen_webscraping/vision_crawl.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,11 @@
name="User_proxy",
system_message="A human admin.",
human_input_mode="NEVER",
max_consecutive_auto_reply=0
max_consecutive_auto_reply=0,
code_execution_config={
"work_dir": "code",
"use_docker": False
}
)


Expand Down
2 changes: 1 addition & 1 deletion saas_products/password/password.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
system_message="A human admin. Interact with the planner to discuss the plan. Plan execution needs to be approved "
"by this admin.",
human_input_mode="TERMINATE",
code_execution_config={"last_n_messages": 3, "work_dir": "programming"},
code_execution_config={"last_n_messages": 3, "work_dir": "programming", "use_docker": False},
)

engineer = autogen.AssistantAgent(
Expand Down

0 comments on commit 3be3127

Please sign in to comment.