Skip to content

Commit

Permalink
Merge pull request #436 from swarmauri/master
Browse files Browse the repository at this point in the history
🚢 Shipping v0.4.4
  • Loading branch information
cobycloud authored Sep 18, 2024
2 parents 3b2a6a0 + 74609f9 commit 9dc2c17
Show file tree
Hide file tree
Showing 16 changed files with 705 additions and 132 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/publish_dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
python-version: ["3.11"]

env:
# Model Provider Keys
Expand All @@ -29,6 +29,7 @@ jobs:
DEEPSEEK_API_KEY: ${{ secrets.DEEPSEEK_API_KEY }}
SHUTTLEAI_API_KEY: ${{ secrets.SHUTTLEAI_API_KEY }}
DEEPINFRA_API_KEY: ${{ secrets.DEEPINFRA_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}

# Database Keys
QDRANT_URL_KEY: ${{ secrets.QDRANT_URL_KEY }}
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/publish_stable.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ jobs:
DEEPSEEK_API_KEY: ${{ secrets.DEEPSEEK_API_KEY }}
SHUTTLEAI_API_KEY: ${{ secrets.SHUTTLEAI_API_KEY }}
DEEPINFRA_API_KEY: ${{ secrets.DEEPINFRA_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}

# Database Keys
QDRANT_URL_KEY: ${{ secrets.QDRANT_URL_KEY }}
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/staging.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ jobs:
DEEPSEEK_API_KEY: ${{ secrets.DEEPSEEK_API_KEY }}
SHUTTLEAI_API_KEY: ${{ secrets.SHUTTLEAI_API_KEY }}
DEEPINFRA_API_KEY: ${{ secrets.DEEPINFRA_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}

# Database Keys
QDRANT_URL_KEY: ${{ secrets.QDRANT_URL_KEY }}
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
'scikit-learn',
'gensim',
'textblob',
'spacy==3.7.4',
'spacy',
'pygments',
'gradio',
'websockets',
Expand Down
2 changes: 1 addition & 1 deletion swarmauri/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.4.3"
__version__ = "0.4.4"
__long_desc__ = """
# Swarmauri SDK
Expand Down
4 changes: 2 additions & 2 deletions swarmauri/standard/agents/concrete/ToolAgent.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,12 @@
from swarmauri.standard.messages.concrete import HumanMessage, AgentMessage, FunctionMessage

from swarmauri.core.typing import SubclassUnion
from swarmauri.standard.toolkits.concrete.Toolkit import Toolkit
from swarmauri.standard.toolkits.base.ToolkitBase import ToolkitBase
from swarmauri.standard.conversations.base.ConversationBase import ConversationBase

class ToolAgent(AgentToolMixin, AgentConversationMixin, AgentBase):
llm: SubclassUnion[LLMBase]
toolkit: SubclassUnion[Toolkit]
toolkit: SubclassUnion[ToolkitBase]
conversation: SubclassUnion[ConversationBase] # 🚧 Placeholder
model_config = ConfigDict(extra='forbid', arbitrary_types_allowed=True)
type: Literal['ToolAgent'] = 'ToolAgent'
Expand Down
7 changes: 5 additions & 2 deletions swarmauri/standard/llms/concrete/DeepSeekModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,13 @@


class DeepSeekModel(LLMBase):
"""
Provider resources: https://platform.deepseek.com/api-docs/quick_start/pricing
"""

api_key: str
allowed_models: List[str] = [
"deepseek-chat",
"deepseek-coder",
"deepseek-chat"
]
name: str = "deepseek-chat"
type: Literal["DeepSeekModel"] = "DeepSeekModel"
Expand Down
84 changes: 45 additions & 39 deletions swarmauri/standard/llms/concrete/GeminiProModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,81 +9,87 @@


class GeminiProModel(LLMBase):
"""
Provider resources: https://deepmind.google/technologies/gemini/pro/
"""

api_key: str
allowed_models: List[str] = ['gemini-1.5-pro-latest']
allowed_models: List[str] = ["gemini-1.5-pro-latest"]
name: str = "gemini-1.5-pro-latest"
type: Literal['GeminiProModel'] = 'GeminiProModel'

def _format_messages(self, messages: List[SubclassUnion[MessageBase]]) -> List[Dict[str, str]]:
type: Literal["GeminiProModel"] = "GeminiProModel"

def _format_messages(
self, messages: List[SubclassUnion[MessageBase]]
) -> List[Dict[str, str]]:
# Remove system instruction from messages
message_properties = ['content', 'role']
sanitized_messages = [message.model_dump(include=message_properties) for message in messages
if message.role != 'system']
message_properties = ["content", "role"]
sanitized_messages = [
message.model_dump(include=message_properties)
for message in messages
if message.role != "system"
]

for message in sanitized_messages:
if message['role'] == 'assistant':
message['role'] = 'model'
if message["role"] == "assistant":
message["role"] = "model"

# update content naming
message['parts'] = message.pop('content')
message["parts"] = message.pop("content")

return sanitized_messages

def _get_system_context(self, messages: List[SubclassUnion[MessageBase]]) -> str:
system_context = None
for message in messages:
if message.role == 'system':
if message.role == "system":
system_context = message.content
return system_context

def predict(self,
conversation,
temperature=0.7,
max_tokens=256):

def predict(self, conversation, temperature=0.7, max_tokens=256):
genai.configure(api_key=self.api_key)
generation_config = {
"temperature": temperature,
"top_p": 0.95,
"top_k": 0,
"max_output_tokens": max_tokens,
}
}

safety_settings = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE",
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_MEDIUM_AND_ABOVE",
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE",
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE",
},
]


system_context = self._get_system_context(conversation.history)
formatted_messages = self._format_messages(conversation.history)


next_message = formatted_messages.pop()

client = genai.GenerativeModel(model_name=self.name,
client = genai.GenerativeModel(
model_name=self.name,
safety_settings=safety_settings,
generation_config=generation_config,
system_instruction=system_context)
system_instruction=system_context,
)

convo = client.start_chat(
history=formatted_messages,
)
)

convo.send_message(next_message['parts'])
convo.send_message(next_message["parts"])

message_content = convo.last.text
conversation.add_message(AgentMessage(content=message_content))
Expand Down
56 changes: 36 additions & 20 deletions swarmauri/standard/llms/concrete/GroqModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,33 +7,49 @@
from swarmauri.standard.messages.concrete.AgentMessage import AgentMessage
from swarmauri.standard.llms.base.LLMBase import LLMBase


class GroqModel(LLMBase):
"""Provider resources: https://console.groq.com/docs/models"""

api_key: str
allowed_models: List[str] = ['llama3-8b-8192',
'llama3-70b-8192',
'mixtral-8x7b-32768',
'gemma-7b-it']
allowed_models: List[str] = [
"llama3-8b-8192",
"llama3-70b-8192",
"mixtral-8x7b-32768",
"gemma-7b-it",
"gemma-2-9b-it",
"llama-3.1-70b-versatile",
"llama-3.1-8b-instant",
"llava-v1.5-7b-4096-preview",
]
name: str = "gemma-7b-it"
type: Literal['GroqModel'] = 'GroqModel'
type: Literal["GroqModel"] = "GroqModel"

def _format_messages(self, messages: List[SubclassUnion[MessageBase]]) -> List[Dict[str, str]]:
message_properties = ['content', 'role', 'name']
formatted_messages = [message.model_dump(include=message_properties, exclude_none=True) for message in messages]
def _format_messages(
self, messages: List[SubclassUnion[MessageBase]]
) -> List[Dict[str, str]]:
message_properties = ["content", "role", "name"]
formatted_messages = [
message.model_dump(include=message_properties, exclude_none=True)
for message in messages
]
return formatted_messages

def predict(self,
conversation,
temperature: float = 0.7,
max_tokens: int = 256,
top_p: float = 1.0,
enable_json: bool = False,
stop: Optional[List[str]] = None) -> str:
def predict(
self,
conversation,
temperature: float = 0.7,
max_tokens: int = 256,
top_p: float = 1.0,
enable_json: bool = False,
stop: Optional[List[str]] = None,
) -> str:

formatted_messages = self._format_messages(conversation.history)

client = Groq(api_key=self.api_key)
stop = stop or []

response_format = {"type": "json_object"} if enable_json else None
response = client.chat.completions.create(
model=self.name,
Expand All @@ -44,10 +60,10 @@ def predict(self,
top_p=top_p,
frequency_penalty=0,
presence_penalty=0,
stop=stop
stop=stop,
)

result = json.loads(response.json())
message_content = result['choices'][0]['message']['content']
message_content = result["choices"][0]["message"]["content"]
conversation.add_message(AgentMessage(content=message_content))
return conversation
return conversation
70 changes: 40 additions & 30 deletions swarmauri/standard/llms/concrete/MistralModel.py
Original file line number Diff line number Diff line change
@@ -1,66 +1,76 @@
import json
from typing import List, Literal, Dict
from mistralai import Mistral
from mistralai import Mistral
from swarmauri.core.typing import SubclassUnion

from swarmauri.standard.messages.base.MessageBase import MessageBase
from swarmauri.standard.messages.concrete.AgentMessage import AgentMessage
from swarmauri.standard.llms.base.LLMBase import LLMBase


class MistralModel(LLMBase):
"""Provider resources: https://docs.mistral.ai/getting-started/models/"""

api_key: str
allowed_models: List[str] = ['open-mistral-7b',
'open-mixtral-8x7b',
'open-mixtral-8x22b',
'mistral-small-latest',
'mistral-medium-latest',
'mistral-large-latest',
'codestral',
'open-mistral-nemo',
'codestral-latest',
'open-codestral-mamba',
allowed_models: List[str] = [
"open-mistral-7b",
"open-mixtral-8x7b",
"open-mixtral-8x22b",
"mistral-small-latest",
"mistral-medium-latest",
"mistral-large-latest",
"open-mistral-nemo",
"codestral-latest",
"open-codestral-mamba",
]
name: str = "open-mixtral-8x7b"
type: Literal['MistralModel'] = 'MistralModel'
type: Literal["MistralModel"] = "MistralModel"

def _format_messages(self, messages: List[SubclassUnion[MessageBase]]) -> List[Dict[str, str]]:
message_properties = ['content', 'role']
formatted_messages = [message.model_dump(include=message_properties, exclude_none=True) for message in messages]
def _format_messages(
self, messages: List[SubclassUnion[MessageBase]]
) -> List[Dict[str, str]]:
message_properties = ["content", "role"]
formatted_messages = [
message.model_dump(include=message_properties, exclude_none=True)
for message in messages
]
return formatted_messages

def predict(self,
conversation,
temperature: int = 0.7,
max_tokens: int = 256,
def predict(
self,
conversation,
temperature: int = 0.7,
max_tokens: int = 256,
top_p: int = 1,
enable_json: bool=False,
safe_prompt: bool=False):

enable_json: bool = False,
safe_prompt: bool = False,
):

formatted_messages = self._format_messages(conversation.history)

client = Mistral(api_key=self.api_key)
client = Mistral(api_key=self.api_key)
if enable_json:
response = client.chat.complete(
model=self.name,
messages=formatted_messages,
temperature=temperature,
response_format={ "type": "json_object" },
response_format={"type": "json_object"},
max_tokens=max_tokens,
top_p=top_p,
safe_prompt=safe_prompt
safe_prompt=safe_prompt,
)
else:
response = client.chat.complete(
model=self.name,
messages=formatted_messages,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
safe_prompt=safe_prompt
top_p=top_p,
safe_prompt=safe_prompt,
)

result = json.loads(response.json())
message_content = result['choices'][0]['message']['content']
message_content = result["choices"][0]["message"]["content"]
conversation.add_message(AgentMessage(content=message_content))

return conversation
return conversation
Loading

0 comments on commit 9dc2c17

Please sign in to comment.