Skip to content

Commit

Permalink
Merge pull request #17 from mwolfson/aws_bedrock
Browse files Browse the repository at this point in the history
add AWS bedrock
  • Loading branch information
mwolfson authored Aug 6, 2024
2 parents 6ef4097 + a09c241 commit 2d15024
Show file tree
Hide file tree
Showing 3 changed files with 185 additions and 5 deletions.
104 changes: 102 additions & 2 deletions multi_ai_hub.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@
"\n",
"This is organized into API providers, there are helper functions for:\n",
"- [**Anthropic**](#anthropic_api) | [API Docs](https://docs.anthropic.com/claude/reference/getting-started-with-the-api)\n",
"- [**AWS Bedrock**](#aws_api) | [API Docs](https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html)\n",
"- [**Azure**](#azure_api) | [API Docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)\n",
"- [**Google**](#google_api) | [API Docs](https://ai.google.dev/)\n",
"- [**OpenAI**](#openai_api) | [API Docs](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo)\n",
"- [**Perplexity**](#pplx_api) | [API Docs](https://docs.perplexity.ai/)\n",
Expand Down Expand Up @@ -528,6 +530,91 @@
"# print(generate_text_azure(\"you are a pirate\", \"say hello and return the message in uppercase\", \"gpt-4\"))"
]
},
{
"cell_type": "markdown",
"id": "2d6825da",
"metadata": {},
"source": [
"## <a name=\"aws_api\"></a>Setup AWS Bedrock\n",
"\n",
"Check the [docs](https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html), and get a project setup. You will need to setup a project, and request access to the models you wish to use.\n",
"\n",
"You will need 2 values with environment variables having the following names:\n",
"\n",
"- AWS_ACCESS_KEY_ID,\n",
"- AWS_SECRET_ACCESS_KEY\n",
"\n",
"### Import SDK \n",
"\n",
"`pip install boto3 requests`"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2e87248a",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import boto3\n",
"\n",
"# Fetch AWS credentials from environment variables\n",
"AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')\n",
"AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')\n",
"AWS_REGION = os.getenv('AWS_REGION', 'us-east-1') # Default to us-east-1 if not set\n",
"\n",
"# # Ensure credentials are set\n",
"# if not AWS_ACCESS_KEY_ID or not AWS_SECRET_ACCESS_KEY:\n",
"# raise ValueError(\"AWS credentials not found in environment variables\")\n",
"\n",
"# Create a Bedrock client\n",
"bedrock_client = boto3.client(\n",
" service_name='bedrock-runtime',\n",
" region_name=AWS_REGION,\n",
" aws_access_key_id=AWS_ACCESS_KEY_ID,\n",
" aws_secret_access_key=AWS_SECRET_ACCESS_KEY\n",
")\n",
"\n",
"def generate_text_aws(pre, prompt, model=\"ai21.j2-mid-v1\"):\n",
" body = json.dumps({\n",
" \"prompt\": pre + prompt,\n",
" \"maxTokens\": 2048,\n",
" \"temperature\": 0.1,\n",
" \"topP\": 1,\n",
" \"stopSequences\": [],\n",
" \"countPenalty\": {\"scale\": 0},\n",
" \"presencePenalty\": {\"scale\": 0},\n",
" \"frequencyPenalty\": {\"scale\": 0}\n",
" })\n",
" \n",
" response = bedrock_client.invoke_model(\n",
" modelId='ai21.j2-mid-v1',\n",
" body=body\n",
" )\n",
" \n",
" response_body = json.loads(response['body'].read())\n",
" return response_body['completions'][0]['data']['text']"
]
},
{
"cell_type": "markdown",
"id": "e1cc9d70",
"metadata": {},
"source": [
"### Test the AWS Endpoint directly"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e5172b33",
"metadata": {},
"outputs": [],
"source": [
"# print(generate_text_aws(\"you are a pirate\", \"say hello and return the message in uppercase\", \"ai21.j2-mid-v1\"))"
]
},
{
"cell_type": "markdown",
"id": "55e643db",
Expand Down Expand Up @@ -557,6 +644,8 @@
"ANTHROPIC_OPUS = \"claude-3-opus-20240229\"\n",
"ANTHROPIC_SONNET = \"claude-3-5-sonnet-20240620\"\n",
"AZURE_GPT4 = \"gpt-4\"\n",
"AWS_JURASSIC2_MID = \"ai21.j2-mid-v1\"\n",
"AWS_LLAMA2_70B = \"meta.llama2-70b-chat-v1\"\n",
"GEMINI_PRO = \"gemini-pro\"\n",
"GEMINI_FLASH = \"gemini-1.5-flash-latest\"\n",
"OPEN_AI_GPT35TURBO = \"gpt-3.5-turbo\"\n",
Expand Down Expand Up @@ -584,6 +673,14 @@
" response = generate_text_azure(system, user + output_style, AZURE_GPT4)\n",
" return response\n",
"\n",
"def action_aws_jurassic2mid(system, user, output_style):\n",
" response = generate_text_aws(system, user + output_style, AWS_JURASSIC2_MID)\n",
" return response\n",
"\n",
"def action_aws_llama270b(system, user, output_style):\n",
" response = generate_text_aws(system, user + output_style, AWS_LLAMA2_70B)\n",
" return response\n",
"\n",
"def action_gemini_pro(system, user, output_style,):\n",
" response = generate_text_google(system + user + output_style, GEMINI_PRO)\n",
" return response\n",
Expand Down Expand Up @@ -633,6 +730,8 @@
" ANTHROPIC_OPUS: action_anthropic_opus,\n",
" ANTHROPIC_SONNET: action_anthropic_sonnet,\n",
" AZURE_GPT4: action_azure_gpt4,\n",
" AWS_JURASSIC2_MID: action_aws_jurassic2mid,\n",
" AWS_LLAMA2_70B: action_aws_llama270b,\n",
" GEMINI_PRO: action_gemini_pro,\n",
" GEMINI_FLASH: action_gemini_flash,\n",
" OPEN_AI_GPT35TURBO: action_openai_35turbo,\n",
Expand Down Expand Up @@ -720,7 +819,8 @@
"outputs": [],
"source": [
"# models = [ \n",
"# ANTHROPIC_OPUS,\n",
"# AWS_JURASSIC2_MID,\n",
"# AWS_LLAMA2_70B,\n",
"# AZURE_GPT4\n",
"# ]\n",
"\n",
Expand All @@ -744,7 +844,7 @@
"After making changes to this notebook, run the following on the command-line to create the python script to use:\n",
"\n",
"```\n",
"jupyter nbconvert --to script .\\multi_ai_hub.ipynb\n",
"jupyter nbconvert --to script ./multi_ai_hub.ipynb\n",
"```"
]
},
Expand Down
84 changes: 82 additions & 2 deletions multi_ai_hub.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@
#
# This is organized into API providers, there are helper functions for:
# - [**Anthropic**](#anthropic_api) | [API Docs](https://docs.anthropic.com/claude/reference/getting-started-with-the-api)
# - [**AWS Bedrock**](#aws_api) | [API Docs](https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html)
# - [**Azure**](#azure_api) | [API Docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)
# - [**Google**](#google_api) | [API Docs](https://ai.google.dev/)
# - [**OpenAI**](#openai_api) | [API Docs](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo)
# - [**Perplexity**](#pplx_api) | [API Docs](https://docs.perplexity.ai/)
Expand Down Expand Up @@ -340,6 +342,71 @@ def generate_text_azure(pre, prompt, model="gpt-4"):
# print(generate_text_azure("you are a pirate", "say hello and return the message in uppercase", "gpt-4"))


# ## <a name="aws_api"></a>Setup AWS Bedrock
#
# Check the [docs](https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html), and get a project setup. You will need to setup a project, and request access to the models you wish to use.
#
# You will need 2 values with environment variables having the following names:
#
# - AWS_ACCESS_KEY_ID,
# - AWS_SECRET_ACCESS_KEY
#
# ### Import SDK
#
# `pip install boto3 requests`

# In[ ]:


import json
import boto3

# Fetch AWS credentials from environment variables
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_REGION = os.getenv('AWS_REGION', 'us-east-1') # Default to us-east-1 if not set

# # Ensure credentials are set
# if not AWS_ACCESS_KEY_ID or not AWS_SECRET_ACCESS_KEY:
# raise ValueError("AWS credentials not found in environment variables")

# Create a Bedrock client
bedrock_client = boto3.client(
service_name='bedrock-runtime',
region_name=AWS_REGION,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY
)

def generate_text_aws(pre, prompt, model="ai21.j2-mid-v1"):
body = json.dumps({
"prompt": pre + prompt,
"maxTokens": 2048,
"temperature": 0.1,
"topP": 1,
"stopSequences": [],
"countPenalty": {"scale": 0},
"presencePenalty": {"scale": 0},
"frequencyPenalty": {"scale": 0}
})

response = bedrock_client.invoke_model(
modelId='ai21.j2-mid-v1',
body=body
)

response_body = json.loads(response['body'].read())
return response_body['completions'][0]['data']['text']


# ### Test the AWS Endpoint directly

# In[ ]:


# print(generate_text_aws("you are a pirate", "say hello and return the message in uppercase", "ai21.j2-mid-v1"))


# ## Add Actions to map to different models and AI providers

# 1. Define a function for each model you want to test
Expand All @@ -353,6 +420,8 @@ def generate_text_azure(pre, prompt, model="gpt-4"):
ANTHROPIC_OPUS = "claude-3-opus-20240229"
ANTHROPIC_SONNET = "claude-3-5-sonnet-20240620"
AZURE_GPT4 = "gpt-4"
AWS_JURASSIC2_MID = "ai21.j2-mid-v1"
AWS_LLAMA2_70B = "meta.llama2-70b-chat-v1"
GEMINI_PRO = "gemini-pro"
GEMINI_FLASH = "gemini-1.5-flash-latest"
OPEN_AI_GPT35TURBO = "gpt-3.5-turbo"
Expand Down Expand Up @@ -380,6 +449,14 @@ def action_azure_gpt4(system, user, output_style):
response = generate_text_azure(system, user + output_style, AZURE_GPT4)
return response

def action_aws_jurassic2mid(system, user, output_style):
response = generate_text_aws(system, user + output_style, AWS_JURASSIC2_MID)
return response

def action_aws_llama270b(system, user, output_style):
response = generate_text_aws(system, user + output_style, AWS_LLAMA2_70B)
return response

def action_gemini_pro(system, user, output_style,):
response = generate_text_google(system + user + output_style, GEMINI_PRO)
return response
Expand Down Expand Up @@ -429,6 +506,8 @@ def action_sonar_medium_online(system, user, output_style):
ANTHROPIC_OPUS: action_anthropic_opus,
ANTHROPIC_SONNET: action_anthropic_sonnet,
AZURE_GPT4: action_azure_gpt4,
AWS_JURASSIC2_MID: action_aws_jurassic2mid,
AWS_LLAMA2_70B: action_aws_llama270b,
GEMINI_PRO: action_gemini_pro,
GEMINI_FLASH: action_gemini_flash,
OPEN_AI_GPT35TURBO: action_openai_35turbo,
Expand Down Expand Up @@ -495,7 +574,8 @@ def generate_text(models, system, user, output_style):


# models = [
# ANTHROPIC_OPUS,
# AWS_JURASSIC2_MID,
# AWS_LLAMA2_70B,
# AZURE_GPT4
# ]

Expand All @@ -514,7 +594,7 @@ def generate_text(models, system, user, output_style):
# After making changes to this notebook, run the following on the command-line to create the python script to use:
#
# ```
# jupyter nbconvert --to script .\multi_ai_hub.ipynb
# jupyter nbconvert --to script ./multi_ai_hub.ipynb
# ```

# In[ ]:
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

setup(
name='multiaihub',
version='0.1.7', # Start with a version number
version='0.1.8', # Start with a version number
description='MAH makes it easy to send the same prompt to multiple LLMs',
license='Apache License 2.0',
long_description="MAH - Multi AI Hub is a project designed to make it easy to send the same prompt to multiple LLMs to help with testing and comparison.",
Expand Down

0 comments on commit 2d15024

Please sign in to comment.