Skip to content

Commit

Permalink
Update DeepInfraChat.py needs_auth/ PollinationsAI.py + fixes xtekky#…
Browse files Browse the repository at this point in the history
  • Loading branch information
kqlio67 committed Dec 20, 2024
1 parent 3d17c3a commit 341bf95
Show file tree
Hide file tree
Showing 5 changed files with 133 additions and 131 deletions.
67 changes: 33 additions & 34 deletions g4f/Provider/DeepInfraChat.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,20 @@
from __future__ import annotations

from aiohttp import ClientSession, ClientResponseError
import json
from ..typing import AsyncResult, Messages
from .needs_auth import OpenaiAPI

from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin

class DeepInfraChat(OpenaiAPI):
url = "https://deepinfra.com/chat"
class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://deepinfra.com"
api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"

api_base = "https://api.deepinfra.com/v1/openai"

working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True

default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
models = [
'meta-llama/Llama-3.3-70B-Instruct',
Expand Down Expand Up @@ -50,39 +47,41 @@ async def create_async_generator(
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)

headers = {
'Accept-Language': 'en-US,en;q=0.9',
'Content-Type': 'application/json',
'Origin': 'https://deepinfra.com',
'Referer': 'https://deepinfra.com/',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
'X-Deepinfra-Source': 'web-page',
'accept': 'text/event-stream',
}

data = {
'model': model,
'messages': messages,
'stream': True
}

async with ClientSession(headers=headers) as session:
data = {
"model": model,
"messages": messages,
"stream": True
}

async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line:
decoded_line = line.decode('utf-8').strip()
if decoded_line.startswith('data:'):
json_part = decoded_line[5:].strip()
if json_part == '[DONE]':
break
try:
data = json.loads(json_part)
choices = data.get('choices', [])
if choices:
delta = choices[0].get('delta', {})
content = delta.get('content', '')
if content:
yield content
except json.JSONDecodeError:
print(f"JSON decode error: {json_part}")
async for chunk in response.content:
if chunk:
chunk_text = chunk.decode()
try:
# Handle streaming response
if chunk_text.startswith("data: "):
if chunk_text.strip() == "data: [DONE]":
continue
chunk_data = json.loads(chunk_text[6:])
if content := chunk_data["choices"][0]["delta"].get("content"):
yield content
# Handle non-streaming response
else:
chunk_data = json.loads(chunk_text)
if content := chunk_data["choices"][0]["message"].get("content"):
yield content
except (json.JSONDecodeError, KeyError):
continue
3 changes: 3 additions & 0 deletions g4f/Provider/PollinationsAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ class PollinationsAI(OpenaiAPI):
default_model = "openai"
default_image_model = "flux"

image_models = []
models = []

additional_models_image = ["midjourney", "dall-e-3"]
additional_models_text = ["sur", "sur-mistral", "claude"]
model_aliases = {
Expand Down
110 changes: 96 additions & 14 deletions g4f/Provider/needs_auth/DeepInfra.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,34 +2,40 @@

import requests
from ...typing import AsyncResult, Messages
from .OpenaiAPI import OpenaiAPI
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...requests import StreamSession, raise_for_status
from ...image import ImageResponse

class DeepInfra(OpenaiAPI):
label = "DeepInfra"
class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://deepinfra.com"
working = True
api_base = "https://api.deepinfra.com/v1/openai",
needs_auth = True
supports_stream = True
supports_message_history = True
default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"

default_image_model = ''
image_models = []
models = []

@classmethod
def get_models(cls, **kwargs):
if not cls.models:
url = 'https://api.deepinfra.com/models/featured'
models = requests.get(url).json()
cls.models = [model['model_name'] for model in models if model["type"] == "text-generation"]
cls.text_models = [model['model_name'] for model in models if model["type"] == "text-generation"]
cls.image_models = [model['model_name'] for model in models if model["reported_type"] == "text-to-image"]
cls.models = cls.text_models + cls.image_models
return cls.models

@classmethod
def create_async_generator(
async def create_text_completion(
cls,
model: str,
messages: Messages,
stream: bool,
temperature: float = 0.7,
max_tokens: int = 1028,
api_base: str = "https://api.deepinfra.com/v1/openai",
**kwargs
) -> AsyncResult:
headers = {
Expand All @@ -47,11 +53,87 @@ def create_async_generator(
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}
return super().create_async_generator(
model, messages,
stream=stream,
temperature=temperature,
max_tokens=max_tokens,
headers=headers,

data = {
"model": model,
"messages": messages,
"stream": stream,
"temperature": temperature,
"max_tokens": max_tokens,
**kwargs
)
}

async with StreamSession(headers=headers) as session:
async with session.post(f"{api_base}/chat/completions", json=data) as response:
await raise_for_status(response)
yield response

@classmethod
async def create_image(
cls,
prompt: str,
model: str,
api_key: str = None,
api_base: str = "https://api.deepinfra.com/v1/inference",
proxy: str = None,
timeout: int = 180,
extra_data: dict = {},
**kwargs
) -> ImageResponse:
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
'Connection': 'keep-alive',
'Origin': 'https://deepinfra.com',
'Referer': 'https://deepinfra.com/',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
'X-Deepinfra-Source': 'web-embed',
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}

if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"

async with StreamSession(
proxies={"all": proxy},
headers=headers,
timeout=timeout
) as session:
model = cls.get_model(model)
data = {"prompt": prompt, **extra_data}
data = {"input": data} if model == cls.default_image_model else data

async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response:
await raise_for_status(response)
data = await response.json()
images = data.get("output", data.get("images", data.get("image_url")))
if not images:
raise RuntimeError(f"Response: {data}")
images = images[0] if len(images) == 1 else images
return ImageResponse(images, prompt)

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = True,
prompt: str = None,
**kwargs
) -> AsyncResult:
if model in cls.image_models:
prompt = messages[-1]["content"] if prompt is None else prompt
yield await cls.create_image(prompt=prompt, model=model, **kwargs)
else:
async for response in cls.create_text_completion(
model=model,
messages=messages,
stream=stream,
**kwargs
):
yield response
81 changes: 0 additions & 81 deletions g4f/Provider/needs_auth/DeepInfraImage.py

This file was deleted.

3 changes: 1 addition & 2 deletions g4f/Provider/needs_auth/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
from .Cerebras import Cerebras
from .CopilotAccount import CopilotAccount
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .Gemini import Gemini
from .GeminiPro import GeminiPro
from .GithubCopilot import GithubCopilot
Expand All @@ -26,4 +25,4 @@
from .Theb import Theb
from .ThebApi import ThebApi
from .WhiteRabbitNeo import WhiteRabbitNeo
from .xAI import xAI
from .xAI import xAI

0 comments on commit 341bf95

Please sign in to comment.