Skip to content

Commit

Permalink
Merge pull request #2532 from kqlio67/main
Browse files Browse the repository at this point in the history
Enhance Provider Performance
  • Loading branch information
hlohaus authored Jan 3, 2025
2 parents 63a81fd + f028acf commit 04624f2
Show file tree
Hide file tree
Showing 13 changed files with 362 additions and 396 deletions.
54 changes: 31 additions & 23 deletions docs/providers-and-models.md

Large diffs are not rendered by default.

1 change: 0 additions & 1 deletion g4f/Provider/Airforce.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
additional_models_imagine = ["flux-1.1-pro", "midjourney", "dall-e-3"]
model_aliases = {
# Alias mappings for models
"gpt-4": "gpt-4o",
"openchat-3.5": "openchat-3.5-0106",
"deepseek-coder": "deepseek-coder-6.7b-instruct",
"hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
Expand Down
422 changes: 189 additions & 233 deletions g4f/Provider/Blackbox.py

Large diffs are not rendered by default.

7 changes: 3 additions & 4 deletions g4f/Provider/ChatGptEs.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,8 @@ async def create_async_generator(
post_id = re.findall(r'data-post-id="(.+?)"', await initial_response.text())[0]

formatted_prompt = format_prompt(messages)

conversation_history = [
"Human: You are a helpful AI assistant. Please respond in the same language that the user uses in their message. Provide accurate, relevant and helpful information while maintaining a friendly and professional tone. If you're not sure about something, please acknowledge that and provide the best information you can while noting any uncertainties. Focus on being helpful while respecting the user's choice of language."
]

conversation_history = []

for message in messages[:-1]:
if message['role'] == "user":
Expand All @@ -66,6 +64,7 @@ async def create_async_generator(
conversation_history.append(f"AI: {message['content']}")

payload = {
'wpaicg_user_agree': '1',
'_wpnonce': nonce_,
'post_id': post_id,
'url': cls.url,
Expand Down
136 changes: 58 additions & 78 deletions g4f/Provider/DDG.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,10 @@
from __future__ import annotations

from aiohttp import ClientSession, ClientTimeout, ClientError
import json
import aiohttp
from aiohttp import ClientSession, BaseConnector

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation
from .helper import format_prompt
from ..requests.aiohttp import get_connector
from ..requests.raise_for_status import raise_for_status

MODELS = [
{"model":"gpt-4o","modelName":"GPT-4o","modelVariant":None,"modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"4"},
{"model":"gpt-4o-mini","modelName":"GPT-4o","modelVariant":"mini","modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"3"},
{"model":"claude-3-5-sonnet-20240620","modelName":"Claude 3.5","modelVariant":"Sonnet","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"7"},
{"model":"claude-3-opus-20240229","modelName":"Claude 3","modelVariant":"Opus","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"2"},
{"model":"claude-3-haiku-20240307","modelName":"Claude 3","modelVariant":"Haiku","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"1"},
{"model":"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo","modelName":"Llama 3.1","modelVariant":"70B","modelStyleId":"llama-3","createdBy":"Meta","moderationLevel":"MEDIUM","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"5"},
{"model":"mistralai/Mixtral-8x7B-Instruct-v0.1","modelName":"Mixtral","modelVariant":"8x7B","modelStyleId":"mixtral","createdBy":"Mistral AI","moderationLevel":"LOW","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"6"},
{"model":"Qwen/Qwen2.5-Coder-32B-Instruct","modelName":"Qwen 2.5 Coder","modelVariant":"32B","modelStyleId":"qwen","createdBy":"Alibaba Cloud","moderationLevel":"LOW","isAvailable":0,"isOpenSource":1,"inputCharLimit":16e3,"settingId":"90"}
]

class Conversation(BaseConversation):
vqd: str = None
Expand All @@ -32,32 +17,45 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
label = "DuckDuckGo AI Chat"
url = "https://duckduckgo.com/aichat"
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"

working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True

default_model = "gpt-4o-mini"
models = [model.get("model") for model in MODELS]
models = [default_model, "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"]

model_aliases = {
"gpt-4": "gpt-4o-mini",
"claude-3-haiku": "claude-3-haiku-20240307",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"gpt-4": "gpt-4o-mini",
}

@classmethod
async def get_vqd(cls, proxy: str, connector: BaseConnector = None):
status_url = "https://duckduckgo.com/duckchat/v1/status"
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
'Accept': 'text/event-stream',
'x-vqd-accept': '1'
}
async with aiohttp.ClientSession(connector=get_connector(connector, proxy)) as session:
async with session.get(status_url, headers=headers) as response:
await raise_for_status(response)
return response.headers.get("x-vqd-4")
async def fetch_vqd(cls, session: ClientSession) -> str:
"""
Fetches the required VQD token for the chat session.
Args:
session (ClientSession): The active HTTP session.
Returns:
str: The VQD token.
Raises:
Exception: If the token cannot be fetched.
"""
async with session.get("https://duckduckgo.com/duckchat/v1/status", headers={"x-vqd-accept": "1"}) as response:
if response.status == 200:
vqd = response.headers.get("x-vqd-4", "")
if not vqd:
raise Exception("Failed to fetch VQD token: Empty token.")
return vqd
else:
raise Exception(f"Failed to fetch VQD token: {response.status} {await response.text()}")

@classmethod
async def create_async_generator(
Expand All @@ -67,66 +65,48 @@ async def create_async_generator(
conversation: Conversation = None,
return_conversation: bool = False,
proxy: str = None,
connector: BaseConnector = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)

is_new_conversation = False
if conversation is None:
conversation = Conversation(model)
is_new_conversation = True

if conversation.vqd is None:
conversation.vqd = await cls.get_vqd(proxy, connector)
if not conversation.vqd:
raise Exception("Failed to obtain VQD token")

headers = {
'accept': 'text/event-stream',
'content-type': 'application/json',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
'x-vqd-4': conversation.vqd,
"Content-Type": "application/json",
}
async with ClientSession(headers=headers, timeout=ClientTimeout(total=30)) as session:
# Fetch VQD token
if conversation is None:
conversation = Conversation(model)

async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
if is_new_conversation:
conversation.message_history = [{"role": "user", "content": format_prompt(messages)}]
else:
if len(messages) >= 2:
conversation.message_history = [
*conversation.message_history,
messages[-2],
messages[-1]
]
elif len(messages) == 1:
conversation.message_history = [
*conversation.message_history,
messages[-1]
]
if conversation.vqd is None:
conversation.vqd = await cls.fetch_vqd(session)

headers["x-vqd-4"] = conversation.vqd

if return_conversation:
yield conversation

data = {
if len(messages) >= 2:
conversation.message_history.extend([messages[-2], messages[-1]])
elif len(messages) == 1:
conversation.message_history.append(messages[-1])

payload = {
"model": conversation.model,
"messages": conversation.message_history
"messages": conversation.message_history,
}

async with session.post(cls.api_endpoint, json=data) as response:
conversation.vqd = response.headers.get("x-vqd-4")
await raise_for_status(response)

async for line in response.content:
if line:
decoded_line = line.decode('utf-8')
if decoded_line.startswith('data: '):
json_str = decoded_line[6:]
if json_str == '[DONE]':
break
try:
async with session.post(cls.api_endpoint, headers=headers, json=payload, proxy=proxy) as response:
conversation.vqd = response.headers.get("x-vqd-4")
response.raise_for_status()
async for line in response.content:
line = line.decode("utf-8").strip()
if line.startswith("data:"):
try:
json_data = json.loads(json_str)
if 'message' in json_data:
yield json_data['message']
message = json.loads(line[5:].strip())
if "message" in message:
yield message["message"]
except json.JSONDecodeError:
pass
continue
except ClientError as e:
raise Exception(f"HTTP ClientError occurred: {e}")
except asyncio.TimeoutError:
raise Exception("Request timed out.")
2 changes: 1 addition & 1 deletion g4f/Provider/FreeGpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True
supports_system_message = True

default_model = 'gemini-pro'
default_model = 'gemini-1.5-pro'

@classmethod
async def create_async_generator(
Expand Down
2 changes: 1 addition & 1 deletion g4f/Provider/GizAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class GizAI(AsyncGeneratorProvider, ProviderModelMixin):

default_model = 'chat-gemini-flash'
models = [default_model]
model_aliases = {"gemini-flash": "chat-gemini-flash",}
model_aliases = {"gemini-1.5-flash": "chat-gemini-flash",}

@classmethod
def get_model(cls, model: str) -> str:
Expand Down
49 changes: 34 additions & 15 deletions g4f/Provider/Liaobots.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
"tokenLimit": 7800,
"context": "8K",
},
"gpt-4o-2024-08-06": {
"id": "gpt-4o-2024-08-06",
"gpt-4o-2024-11-20": {
"id": "gpt-4o-2024-11-20",
"name": "GPT-4o",
"model": "ChatGPT",
"provider": "OpenAI",
Expand Down Expand Up @@ -54,9 +54,9 @@
"tokenLimit": 100000,
"context": "128K",
},
"grok-beta": {
"id": "grok-beta",
"name": "Grok-Beta",
"grok-2": {
"id": "grok-2",
"name": "Grok-2",
"model": "Grok",
"provider": "x.ai",
"maxLength": 400000,
Expand Down Expand Up @@ -99,7 +99,7 @@
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-opus-20240229-t": {
"claude-3-opus-20240229-t": {
"id": "claude-3-opus-20240229-t",
"name": "Claude-3-Opus-T",
"model": "Claude",
Expand All @@ -109,14 +109,32 @@
"context": "200K",
},
"claude-3-5-sonnet-20241022-t": {
"id": "claude-3-5-sonnet-20241022-t",
"id": "claude-3-5-sonnet-20241022-t",
"name": "Claude-3.5-Sonnet-V2-T",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"gemini-2.0-flash-exp": {
"id": "gemini-2.0-flash-exp",
"name": "Gemini-2.0-Flash-Exp",
"model": "Gemini",
"provider": "Google",
"maxLength": 4000000,
"tokenLimit": 1000000,
"context": "1024K",
},
"gemini-2.0-flash-thinking-exp": {
"id": "gemini-2.0-flash-thinking-exp",
"name": "Gemini-2.0-Flash-Thinking-Exp",
"model": "Gemini",
"provider": "Google",
"maxLength": 4000000,
"tokenLimit": 1000000,
"context": "1024K",
},
"gemini-1.5-flash-002": {
"id": "gemini-1.5-flash-002",
"name": "Gemini-1.5-Flash-1M",
Expand All @@ -128,29 +146,28 @@
},
"gemini-1.5-pro-002": {
"id": "gemini-1.5-pro-002",
"name": "Gemini-1.5-Pro-1M",
"name": "Gemini-1.5-Pro-1M",
"model": "Gemini",
"provider": "Google",
"maxLength": 4000000,
"tokenLimit": 1000000,
"context": "1024K",
}
},
}


class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://liaobots.site"
working = True
supports_message_history = True
supports_system_message = True

default_model = "gpt-4o-2024-08-06"
default_model = "gpt-4o-2024-11-20"
models = list(models.keys())
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-free",
"gpt-4o": "gpt-4o-2024-08-06",
"gpt-4o": default_model,
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
"gpt-4": "gpt-4o-2024-08-06",
"gpt-4": default_model,

"o1-preview": "o1-preview-2024-09-12",
"o1-mini": "o1-mini-2024-09-12",
Expand All @@ -162,8 +179,10 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"claude-3-opus": "claude-3-opus-20240229-t",
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",

"gemini-flash": "gemini-1.5-flash-002",
"gemini-pro": "gemini-1.5-pro-002"
"gemini-2.0-flash": "gemini-2.0-flash-exp",
"gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp",
"gemini-1.5-flash": "gemini-1.5-flash-002",
"gemini-1.5-pro": "gemini-1.5-pro-002"
}

_auth_code = ""
Expand Down
6 changes: 4 additions & 2 deletions g4f/Provider/needs_auth/GeminiPro.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,10 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
default_vision_model = default_model
fallback_models = [default_model, "gemini-2.0-flash-exp", "gemini-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b"]
model_aliases = {
"gemini-flash": "gemini-1.5-flash",
"gemini-flash": "gemini-1.5-flash-8b",
"gemini-1.5-flash": "gemini-1.5-flash",
"gemini-1.5-flash": "gemini-1.5-flash-8b",
"gemini-1.5-pro": "gemini-pro",
"gemini-2.0-flash": "gemini-2.0-flash-exp",
}

@classmethod
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,9 @@

from .OpenaiAPI import OpenaiAPI

class glhfChat(OpenaiAPI):
label = "glhf.chat"
class GlhfChat(OpenaiAPI):
label = "GlhfChat"
url = "https://glhf.chat"
login_url = "https://glhf.chat/users/settings/api"
api_base = "https://glhf.chat/api/openai/v1"
working = True
model_aliases = {
Expand All @@ -27,4 +26,4 @@ class glhfChat(OpenaiAPI):
'Qwen2.5-7B-Instruct': 'hf:Qwen/Qwen2.5-7B-Instruct',
'SOLAR-10.7B-Instruct-v1.0': 'hf:upstage/SOLAR-10.7B-Instruct-v1.0',
'Llama-3.1-Nemotron-70B-Instruct-HF': 'hf:nvidia/Llama-3.1-Nemotron-70B-Instruct-HF'
}
}
Loading

0 comments on commit 04624f2

Please sign in to comment.