diff --git a/autogen_teaching/OAI_CONFIG_LIST.json b/autogen_teaching/OAI_CONFIG_LIST.json deleted file mode 100644 index d5c274b..0000000 --- a/autogen_teaching/OAI_CONFIG_LIST.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "model": "gpt-3.5-turbo", - "api_key": "sk-1111" - } -] \ No newline at end of file diff --git a/autogen_teaching/main.py b/autogen_teaching/main.py deleted file mode 100644 index 743b5a5..0000000 --- a/autogen_teaching/main.py +++ /dev/null @@ -1,85 +0,0 @@ -# imports -from typing import Annotated -import autogen - -# define the tasks -task1 = """ - Find arxiv papers that show how are people studying trust calibration in AI based systems -""" -task2 = """ - Analyze the above the results to list the application domains studied by these papers. -""" -task3 = """ - Use this data to generate a bar chart of domains and number of papers in that domain and save to a file -""" -task4 = """ - Reflect on the sequence and create a recipe containing all the steps - necessary and name for it. Suggest well-documented, generalized python function(s) - to perform similar tasks for coding steps in future. Make sure coding steps and - non-coding steps are never mixed in one function. In the docstr of the function(s), - clarify what non-coding steps are needed to use the language skill of the assistant. -""" - -# create the llm config -llm_config = { - "timeout": 120, - "cache_seed": 43, - "config_list": autogen.config_list_from_json( - "OAI_CONFIG_LIST.json", - filter_dict={"model": ["gpt-3.5-turbo"]}, - ), - "temperature": 0, -} - -# create the agents -assistant = autogen.AssistantAgent( - name="Assistant", - llm_config=llm_config, - is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False -) - -assistant_create_recipe = autogen.AssistantAgent( - name="Recipe Assistant", - llm_config=llm_config, - is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False -) - -user = autogen.UserProxyAgent( - name="User", - human_input_mode="NEVER", - max_consecutive_auto_reply=10, - code_execution_config={ - "work_dir": "teaching", - "use_docker": False - } -) - - -# create a simple function -@user.register_for_execution() -@assistant_create_recipe.register_for_llm(description="Recipe Assistant.") -def save_recipe(recipe: Annotated[str, "Save the Recipe"]) -> str: - with open('new_recipe.txt', 'w') as file: - file.write(recipe) - return recipe - - -# initiate the chats -user.initiate_chat(assistant, message=task1) -user.initiate_chat(assistant, message=task2, clear_history=False) -user.initiate_chat(assistant, message=task3, clear_history=False) -user.initiate_chat(assistant_create_recipe, message=task4, clear_history=False) - -# initiate chat with the recipe from the file -# with open('./new_recipe.txt', 'r') as file: -# file_content = file.read() -# -# user.initiate_chat(assistant, message=file_content) - - - - - - - - diff --git a/autogen_teaching/teaching/generate_bar_chart.py b/autogen_teaching/teaching/generate_bar_chart.py deleted file mode 100644 index 17b2a4a..0000000 --- a/autogen_teaching/teaching/generate_bar_chart.py +++ /dev/null @@ -1,32 +0,0 @@ -# filename: generate_bar_chart.py -import matplotlib.pyplot as plt - -# Data for application domains and number of papers -domains = [ - "AI-assisted decision-making", - "User trust in AI", - "Human-AI collaboration", - "Human-robot teams", - "Explainable robots", - "Model calibration", - "Trust explanations", - "HCI", - "Team cohesion", - "Anthropomorphic agents" -] - -num_papers = [2, 1, 2, 1, 1, 1, 1, 1, 1, 1] - -# Create a bar chart -plt.figure(figsize=(12, 6)) -plt.bar(domains, num_papers, color='skyblue') -plt.xlabel('Application Domains') -plt.ylabel('Number of Papers') -plt.title('Distribution of Papers Across Application Domains') -plt.xticks(rotation=45, ha='right') -plt.tight_layout() - -# Save the bar chart to a file -plt.savefig('papers_distribution.png') - -plt.show() \ No newline at end of file diff --git a/autogen_teaching/teaching/gpt_application_domains_chart.png b/autogen_teaching/teaching/gpt_application_domains_chart.png deleted file mode 100644 index 2b635c2..0000000 Binary files a/autogen_teaching/teaching/gpt_application_domains_chart.png and /dev/null differ diff --git a/autogen_teaching/teaching/gpt_application_domains_chart.py b/autogen_teaching/teaching/gpt_application_domains_chart.py deleted file mode 100644 index bafe23c..0000000 --- a/autogen_teaching/teaching/gpt_application_domains_chart.py +++ /dev/null @@ -1,34 +0,0 @@ -# filename: gpt_application_domains_chart.py - -import matplotlib.pyplot as plt - -# Define the application domains for GPT models -domains_gpt = [ - "Natural Language Understanding", - "Text Generation", - "Language Translation", - "Conversational Agents", - "Question Answering", - "Summarization", - "Language Modeling", - "Sentiment Analysis", - "Information Retrieval", - "Speech Recognition" -] - -num_papers_gpt = [25, 20, 15, 10, 8, 7, 6, 5, 4, 3] - -# Create a bar chart for GPT model application domains -plt.figure(figsize=(12, 6)) -plt.bar(domains_gpt, num_papers_gpt, color='lightcoral') -plt.xlabel('Application Domains') -plt.ylabel('Number of Papers') -plt.title('Distribution of Papers Across Application Domains for GPT Models') -plt.xticks(rotation=45, ha='right') -plt.tight_layout() - -# Save the bar chart as an image file -plt.savefig('gpt_application_domains_chart.png') - -# Display the bar chart -plt.show() \ No newline at end of file diff --git a/autogen_teaching/teaching/papers_distribution.png b/autogen_teaching/teaching/papers_distribution.png deleted file mode 100644 index 0b2a9ed..0000000 Binary files a/autogen_teaching/teaching/papers_distribution.png and /dev/null differ diff --git a/autogen_teaching/teaching/search_arxiv_papers.py b/autogen_teaching/teaching/search_arxiv_papers.py deleted file mode 100644 index 20833af..0000000 --- a/autogen_teaching/teaching/search_arxiv_papers.py +++ /dev/null @@ -1,18 +0,0 @@ -# filename: search_arxiv_papers.py -import requests - -# Define the search query -query = "trust calibration AI" - -# Make a request to the arXiv API -url = f"http://export.arxiv.org/api/query?search_query=all:{query}&max_results=10" -response = requests.get(url) - -# Parse the XML response to extract paper titles and links -from xml.etree import ElementTree as ET -root = ET.fromstring(response.content) - -for entry in root.findall('{http://www.w3.org/2005/Atom}entry'): - title = entry.find('{http://www.w3.org/2005/Atom}title').text - link = entry.find('{http://www.w3.org/2005/Atom}id').text - print(f"Title: {title}\nLink: {link}\n")