Skip to content

Commit

Permalink
Improve slim docker image example, clean up OpenaiChat provider (#2397)
Browse files Browse the repository at this point in the history
* Improve slim docker image example, clean up OpenaiChat provider

* Enhance event loop management for asynchronous generators

* Fix attribute " shutdown_default_executor" not found in old python versions

* asyncio file added with all async helpers
  • Loading branch information
hlohaus authored Nov 21, 2024
1 parent 4be8e69 commit e4bfd9d
Show file tree
Hide file tree
Showing 9 changed files with 104 additions and 336 deletions.
9 changes: 6 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,15 +105,18 @@ docker run \
hlohaus789/g4f:latest
```

Or run this command to start the gui without a browser and in the debug mode:
Start the GUI without a browser requirement and in debug mode.
There's no need to update the Docker image every time.
Simply remove the g4f package from the image and install the Python package:
```bash
docker pull hlohaus789/g4f:latest-slim
docker run \
-p 8080:8080 \
-v ${PWD}/har_and_cookies:/app/har_and_cookies \
-v ${PWD}/generated_images:/app/generated_images \
hlohaus789/g4f:latest-slim \
python -m g4f.cli gui -debug
rm -r -f /app/g4f/ \
&& pip install -U g4f[slim] \
&& python -m g4f.cli gui -d
```

3. **Access the Client:**
Expand Down
224 changes: 8 additions & 216 deletions g4f/Provider/needs_auth/OpenaiChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,18 +15,14 @@
has_nodriver = True
except ImportError:
has_nodriver = False
try:
from platformdirs import user_config_dir
has_platformdirs = True
except ImportError:
has_platformdirs = False

from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...typing import AsyncResult, Messages, Cookies, ImageType, AsyncIterator
from ...requests.raise_for_status import raise_for_status
from ...requests.aiohttp import StreamSession
from ...requests import StreamSession
from ...requests import get_nodriver
from ...image import ImageResponse, ImageRequest, to_image, to_bytes, is_accepted_format
from ...errors import MissingAuthError, ResponseError
from ...errors import MissingAuthError
from ...providers.response import BaseConversation, FinishReason, SynthesizeData
from ..helper import format_cookies
from ..openai.har_file import get_request_config, NoValidHarFileError
Expand Down Expand Up @@ -62,7 +58,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
default_model = "auto"
default_vision_model = "gpt-4o"
fallback_models = ["auto", "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
vision_models = fallback_models
image_models = fallback_models

Expand All @@ -83,51 +79,6 @@ def get_models(cls):
cls.models = cls.fallback_models
return cls.models

@classmethod
async def create(
cls,
prompt: str = None,
model: str = "",
messages: Messages = [],
action: str = "next",
**kwargs
) -> Response:
"""
Create a new conversation or continue an existing one
Args:
prompt: The user input to start or continue the conversation
model: The name of the model to use for generating responses
messages: The list of previous messages in the conversation
history_disabled: A flag indicating if the history and training should be disabled
action: The type of action to perform, either "next", "continue", or "variant"
conversation_id: The ID of the existing conversation, if any
parent_id: The ID of the parent message, if any
image: The image to include in the user input, if any
**kwargs: Additional keyword arguments to pass to the generator
Returns:
A Response object that contains the generator, action, messages, and options
"""
# Add the user input to the messages list
if prompt is not None:
messages.append({
"role": "user",
"content": prompt
})
generator = cls.create_async_generator(
model,
messages,
return_conversation=True,
**kwargs
)
return Response(
generator,
action,
messages,
kwargs
)

@classmethod
async def upload_image(
cls,
Expand Down Expand Up @@ -189,32 +140,6 @@ async def upload_image(
image_data["download_url"] = (await response.json())["download_url"]
return ImageRequest(image_data)

@classmethod
async def get_default_model(cls, session: StreamSession, headers: dict):
"""
Get the default model name from the service
Args:
session: The StreamSession object to use for requests
headers: The headers to include in the requests
Returns:
The default model name as a string
"""
if not cls.default_model:
url = f"{cls.url}/backend-anon/models" if cls._api_key is None else f"{cls.url}/backend-api/models"
async with session.get(url, headers=headers) as response:
cls._update_request_args(session)
if response.status == 401:
raise MissingAuthError('Add a .har file for OpenaiChat' if cls._api_key is None else "Invalid api key")
await raise_for_status(response)
data = await response.json()
if "categories" in data:
cls.default_model = data["categories"][-1]["default_model"]
return cls.default_model
raise ResponseError(data)
return cls.default_model

@classmethod
def create_messages(cls, messages: Messages, image_request: ImageRequest = None):
"""
Expand Down Expand Up @@ -296,38 +221,13 @@ async def get_generated_image(cls, session: StreamSession, headers: dict, elemen
except Exception as e:
raise RuntimeError(f"Error in downloading image: {e}")

@classmethod
async def delete_conversation(cls, session: StreamSession, headers: dict, conversation_id: str):
"""
Deletes a conversation by setting its visibility to False.
This method sends an HTTP PATCH request to update the visibility of a conversation.
It's used to effectively delete a conversation from being accessed or displayed in the future.
Args:
session (StreamSession): The StreamSession object used for making HTTP requests.
headers (dict): HTTP headers to be used for the request.
conversation_id (str): The unique identifier of the conversation to be deleted.
Raises:
HTTPError: If the HTTP request fails or returns an unsuccessful status code.
"""
async with session.patch(
f"{cls.url}/backend-api/conversation/{conversation_id}",
json={"is_visible": False},
headers=headers
) as response:
cls._update_request_args(session)
...

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 180,
api_key: str = None,
cookies: Cookies = None,
auto_continue: bool = False,
history_disabled: bool = False,
Expand Down Expand Up @@ -465,7 +365,6 @@ async def create_async_generator(
continue
await raise_for_status(response)
if return_conversation:
history_disabled = False
yield conversation
async for line in response.iter_lines():
async for chunk in cls.iter_messages_line(session, line, conversation):
Expand All @@ -483,19 +382,6 @@ async def create_async_generator(
else:
break
yield FinishReason(conversation.finish_reason)
if history_disabled and auto_continue:
await cls.delete_conversation(session, cls._headers, conversation.conversation_id)

@classmethod
async def iter_messages_chunk(
cls,
messages: AsyncIterator,
session: StreamSession,
fields: Conversation,
) -> AsyncIterator:
async for message in messages:
async for chunk in cls.iter_messages_line(session, message, fields):
yield chunk

@classmethod
async def iter_messages_line(cls, session: StreamSession, line: bytes, fields: Conversation) -> AsyncIterator:
Expand Down Expand Up @@ -575,15 +461,7 @@ async def login(cls, proxy: str = None):

@classmethod
async def nodriver_auth(cls, proxy: str = None):
if has_platformdirs:
user_data_dir = user_config_dir("g4f-nodriver")
else:
user_data_dir = None
debug.log(f"Open nodriver with user_dir: {user_data_dir}")
browser = await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
)
browser = await get_nodriver(proxy=proxy)
page = browser.main_tab
def on_request(event: nodriver.cdp.network.RequestWillBeSent):
if event.request.url == start_url or event.request.url.startswith(conversation_url):
Expand Down Expand Up @@ -622,14 +500,14 @@ def on_request(event: nodriver.cdp.network.RequestWillBeSent):
pass
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
RequestConfig.cookies[c.name] = c.value
RequestConfig.user_agent = await page.evaluate("window.navigator.userAgent")
user_agent = await page.evaluate("window.navigator.userAgent")
await page.select("#prompt-textarea", 240)
while True:
if RequestConfig.proof_token:
break
await asyncio.sleep(1)
await page.close()
cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=RequestConfig.user_agent)
cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=user_agent)
cls._set_api_key(RequestConfig.access_token)

@staticmethod
Expand Down Expand Up @@ -672,90 +550,4 @@ def __init__(self, conversation_id: str = None, message_id: str = None, finish_r
self.conversation_id = conversation_id
self.message_id = message_id
self.finish_reason = finish_reason
self.is_recipient = False

class Response():
"""
Class to encapsulate a response from the chat service.
"""
def __init__(
self,
generator: AsyncResult,
action: str,
messages: Messages,
options: dict
):
self._generator = generator
self.action = action
self.is_end = False
self._message = None
self._messages = messages
self._options = options
self._fields = None

async def generator(self) -> AsyncIterator:
if self._generator is not None:
self._generator = None
chunks = []
async for chunk in self._generator:
if isinstance(chunk, Conversation):
self._fields = chunk
else:
yield chunk
chunks.append(str(chunk))
self._message = "".join(chunks)
if self._fields is None:
raise RuntimeError("Missing response fields")
self.is_end = self._fields.finish_reason == "stop"

def __aiter__(self):
return self.generator()

async def get_message(self) -> str:
await self.generator()
return self._message

async def get_fields(self) -> dict:
await self.generator()
return {
"conversation_id": self._fields.conversation_id,
"parent_id": self._fields.message_id
}

async def create_next(self, prompt: str, **kwargs) -> Response:
return await OpenaiChat.create(
**self._options,
prompt=prompt,
messages=await self.get_messages(),
action="next",
**await self.get_fields(),
**kwargs
)

async def do_continue(self, **kwargs) -> Response:
fields = await self.get_fields()
if self.is_end:
raise RuntimeError("Can't continue message. Message already finished.")
return await OpenaiChat.create(
**self._options,
messages=await self.get_messages(),
action="continue",
**fields,
**kwargs
)

async def create_variant(self, **kwargs) -> Response:
if self.action != "next":
raise RuntimeError("Can't create variant from continue or variant request.")
return await OpenaiChat.create(
**self._options,
messages=self._messages,
action="variant",
**await self.get_fields(),
**kwargs
)

async def get_messages(self) -> list:
messages = self._messages
messages.append({"role": "assistant", "content": await self.message()})
return messages
self.is_recipient = False
1 change: 0 additions & 1 deletion g4f/Provider/openai/har_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ class NoValidHarFileError(Exception):
pass

class RequestConfig:
user_agent: str = None
cookies: dict = None
headers: dict = None
access_request_id: str = None
Expand Down
12 changes: 5 additions & 7 deletions g4f/client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import string
import asyncio
import base64
import logging
from typing import Union, AsyncIterator, Iterator, Coroutine, Optional

from ..providers.base_provider import AsyncGeneratorProvider
Expand All @@ -16,13 +15,13 @@
from ..providers.response import ResponseType, FinishReason, BaseConversation, SynthesizeData
from ..errors import NoImageResponseError, ModelNotFoundError
from ..providers.retry_provider import IterListProvider
from ..providers.base_provider import get_running_loop
from ..providers.asyncio import get_running_loop, to_sync_generator, async_generator_to_list
from ..Provider.needs_auth.BingCreateImages import BingCreateImages
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
from .image_models import ImageModels
from .types import IterResponse, ImageProvider, Client as BaseClient
from .service import get_model_and_provider, get_last_provider, convert_to_provider
from .helper import find_stop, filter_json, filter_none, safe_aclose, to_sync_iter, to_async_iterator
from .helper import find_stop, filter_json, filter_none, safe_aclose, to_async_iterator

ChatCompletionResponseType = Iterator[Union[ChatCompletion, ChatCompletionChunk, BaseConversation]]
AsyncChatCompletionResponseType = AsyncIterator[Union[ChatCompletion, ChatCompletionChunk, BaseConversation]]
Expand Down Expand Up @@ -50,8 +49,7 @@ def iter_response(
idx = 0

if hasattr(response, '__aiter__'):
# It's an async iterator, wrap it into a sync iterator
response = to_sync_iter(response)
response = to_sync_generator(response)

for chunk in response:
if isinstance(chunk, FinishReason):
Expand Down Expand Up @@ -231,10 +229,10 @@ def create(
response = asyncio.run(response)
if stream and hasattr(response, '__aiter__'):
# It's an async generator, wrap it into a sync iterator
response = to_sync_iter(response)
response = to_sync_generator(response)
elif hasattr(response, '__aiter__'):
# If response is an async generator, collect it into a list
response = list(to_sync_iter(response))
response = asyncio.run(async_generator_to_list(response))
response = iter_response(response, stream, response_format, max_tokens, stop)
response = iter_append_model_and_provider(response)
if stream:
Expand Down
Loading

0 comments on commit e4bfd9d

Please sign in to comment.