Skip to content

Commit

Permalink
Add Path and PathLike support when uploading images (#2514)
Browse files Browse the repository at this point in the history
* Add Path and PathLike support when uploading images
Improve raise_for_status in special cases
Move ImageResponse to providers.response module
Improve OpenaiChat and OpenaiAccount providers
Add Sources for web_search in OpenaiChat
Add JsonConversation for import and export conversations to js
Add RequestLogin response type
Add TitleGeneration support in OpenaiChat and gui
* Improve Docker Container Guide in README.md
* Add tool calls api support, add search tool support
  • Loading branch information
hlohaus authored Dec 28, 2024
1 parent 9918df9 commit 86e36ef
Show file tree
Hide file tree
Showing 34 changed files with 931 additions and 1,329 deletions.
22 changes: 16 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,19 @@ To access the space, please use the following login credentials:

1. **Install Docker:** Begin by [downloading and installing Docker](https://docs.docker.com/get-docker/).

2. **Set Up the Container:**
2. **Check Directories:**

Before running the container, make sure the necessary data directories exist or can be created. For example, you can create and set ownership on these directories by running:

```bash
mkdir -p ${PWD}/har_and_cookies ${PWD}/generated_images
chown -R 1000:1000 ${PWD}/har_and_cookies ${PWD}/generated_images
```

3. **Set Up the Container:**
Use the following commands to pull the latest image and start the container:

```sh
```bash
docker pull hlohaus789/g4f
docker run \
-p 8080:8080 -p 1337:1337 -p 7900:7900 \
Expand All @@ -110,7 +119,9 @@ docker run \
hlohaus789/g4f:latest
```

To run the slim docker image. Use this command:
##### Running the Slim Docker Image

Use the following command to run the Slim Docker image. This command also updates the `g4f` package at startup and installs any additional dependencies:

```bash
docker run \
Expand All @@ -122,14 +133,13 @@ docker run \
&& pip install -U g4f[slim] \
&& python -m g4f --debug
```
It also updates the `g4f` package at startup and installs any new required dependencies.

3. **Access the Client:**
4. **Access the Client:**

- To use the included client, navigate to: [http://localhost:8080/chat/](http://localhost:8080/chat/) or [http://localhost:1337/chat/](http://localhost:1337/chat/)
- Or set the API base for your client to: [http://localhost:1337/v1](http://localhost:1337/v1)

4. **(Optional) Provider Login:**
5. **(Optional) Provider Login:**
If required, you can access the container's desktop here: http://localhost:7900/?autoconnect=1&resize=scale&password=secret for provider login purposes.

#### Installation Guide for Windows (.exe)
Expand Down
1 change: 1 addition & 0 deletions etc/unittest/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from .image_client import *
from .include import *
from .retry_provider import *
from .web_search import *
from .models import *

unittest.main()
20 changes: 13 additions & 7 deletions etc/unittest/mocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,40 +5,45 @@
class ProviderMock(AbstractProvider):
working = True

@classmethod
def create_completion(
model, messages, stream, **kwargs
cls, model, messages, stream, **kwargs
):
yield "Mock"

class AsyncProviderMock(AsyncProvider):
working = True

@classmethod
async def create_async(
model, messages, **kwargs
cls, model, messages, **kwargs
):
return "Mock"

class AsyncGeneratorProviderMock(AsyncGeneratorProvider):
working = True

@classmethod
async def create_async_generator(
model, messages, stream, **kwargs
cls, model, messages, stream, **kwargs
):
yield "Mock"

class ModelProviderMock(AbstractProvider):
working = True

@classmethod
def create_completion(
model, messages, stream, **kwargs
cls, model, messages, stream, **kwargs
):
yield model

class YieldProviderMock(AsyncGeneratorProvider):
working = True

@classmethod
async def create_async_generator(
model, messages, stream, **kwargs
cls, model, messages, stream, **kwargs
):
for message in messages:
yield message["content"]
Expand Down Expand Up @@ -84,8 +89,9 @@ async def create_async_generator(

class YieldNoneProviderMock(AsyncGeneratorProvider):
working = True


@classmethod
async def create_async_generator(
model, messages, stream, **kwargs
cls, model, messages, stream, **kwargs
):
yield None
89 changes: 89 additions & 0 deletions etc/unittest/web_search.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
from __future__ import annotations

import json
import unittest

try:
from duckduckgo_search import DDGS
from duckduckgo_search.exceptions import DuckDuckGoSearchException
from bs4 import BeautifulSoup
has_requirements = True
except ImportError:
has_requirements = False

from g4f.client import AsyncClient
from .mocks import YieldProviderMock

DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}]

class TestIterListProvider(unittest.IsolatedAsyncioTestCase):
def setUp(self) -> None:
if not has_requirements:
self.skipTest('web search requirements not passed')

async def test_search(self):
client = AsyncClient(provider=YieldProviderMock)
tool_calls = [
{
"function": {
"arguments": {
"query": "search query", # content of last message: messages[-1]["content"]
"max_results": 5, # maximum number of search results
"max_words": 500, # maximum number of used words from search results for generating the response
"backend": "html", # or "lite", "api": change it to pypass rate limits
"add_text": True, # do scraping websites
"timeout": 5, # in seconds for scraping websites
"region": "wt-wt",
"instructions": "Using the provided web search results, to write a comprehensive reply to the user request.\n"
"Make sure to add the sources of cites using [[Number]](Url) notation after the reference. Example: [[0]](http://google.com)",
},
"name": "search_tool"
},
"type": "function"
}
]
try:
response = await client.chat.completions.create([{"content": "", "role": "user"}], "", tool_calls=tool_calls)
self.assertIn("Using the provided web search results", response.choices[0].message.content)
except DuckDuckGoSearchException as e:
self.skipTest(f'DuckDuckGoSearchException: {e}')

async def test_search2(self):
client = AsyncClient(provider=YieldProviderMock)
tool_calls = [
{
"function": {
"arguments": {
"query": "search query",
},
"name": "search_tool"
},
"type": "function"
}
]
try:
response = await client.chat.completions.create([{"content": "", "role": "user"}], "", tool_calls=tool_calls)
self.assertIn("Using the provided web search results", response.choices[0].message.content)
except DuckDuckGoSearchException as e:
self.skipTest(f'DuckDuckGoSearchException: {e}')

async def test_search3(self):
client = AsyncClient(provider=YieldProviderMock)
tool_calls = [
{
"function": {
"arguments": json.dumps({
"query": "search query", # content of last message: messages[-1]["content"]
"max_results": 5, # maximum number of search results
"max_words": 500, # maximum number of used words from search results for generating the response
}),
"name": "search_tool"
},
"type": "function"
}
]
try:
response = await client.chat.completions.create([{"content": "", "role": "user"}], "", tool_calls=tool_calls)
self.assertIn("Using the provided web search results", response.choices[0].message.content)
except DuckDuckGoSearchException as e:
self.skipTest(f'DuckDuckGoSearchException: {e}')
9 changes: 5 additions & 4 deletions g4f/Provider/Copilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,19 @@
except ImportError:
has_nodriver = False

from .base_provider import AbstractProvider, ProviderModelMixin, BaseConversation
from .base_provider import AbstractProvider, ProviderModelMixin
from .helper import format_prompt_max_length
from .openai.har_file import get_headers, get_har_files
from ..typing import CreateResult, Messages, ImagesType
from ..errors import MissingRequirementsError, NoValidHarFileError
from ..requests.raise_for_status import raise_for_status
from ..providers.response import JsonConversation, RequestLogin
from ..providers.asyncio import get_running_loop
from .openai.har_file import get_headers, get_har_files
from ..requests import get_nodriver
from ..image import ImageResponse, to_bytes, is_accepted_format
from .. import debug

class Conversation(BaseConversation):
class Conversation(JsonConversation):
conversation_id: str

def __init__(self, conversation_id: str):
Expand Down Expand Up @@ -80,7 +81,7 @@ def create_completion(
if has_nodriver:
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield f"[Login to {cls.label}]({login_url})\n\n"
yield RequestLogin(cls.label, login_url)
get_running_loop(check_nested=True)
cls._access_token, cls._cookies = asyncio.run(get_access_token_and_cookies(cls.url, proxy))
else:
Expand Down
58 changes: 16 additions & 42 deletions g4f/Provider/Mhystical.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,7 @@
from __future__ import annotations

import json
import logging
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from .needs_auth.OpenaiAPI import OpenaiAPI

"""
Mhystical.cc
Expand All @@ -19,39 +14,31 @@
"""

logger = logging.getLogger(__name__)

class Mhystical(AsyncGeneratorProvider, ProviderModelMixin):
class Mhystical(OpenaiAPI):
url = "https://api.mhystical.cc"
api_endpoint = "https://api.mhystical.cc/v1/completions"
working = True
needs_auth = False
supports_stream = False # Set to False, as streaming is not specified in ChatifyAI
supports_system_message = False
supports_message_history = True

default_model = 'gpt-4'
models = [default_model]
model_aliases = {}

@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases.get(model, cls.default_model)
else:
return cls.default_model
def get_model(cls, model: str, **kwargs) -> str:
cls.last_model = cls.default_model
return cls.default_model

@classmethod
async def create_async_generator(
def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
stream: bool = False,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)

headers = {
"x-api-key": "mhystical",
"Content-Type": "application/json",
Expand All @@ -61,24 +48,11 @@ async def create_async_generator(
"referer": f"{cls.url}/",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
}

async with ClientSession(headers=headers) as session:
data = {
"model": model,
"messages": [{"role": "user", "content": format_prompt(messages)}]
}
async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response:
await raise_for_status(response)
response_text = await response.text()
filtered_response = cls.filter_response(response_text)
yield filtered_response

@staticmethod
def filter_response(response_text: str) -> str:
try:
json_response = json.loads(response_text)
message_content = json_response["choices"][0]["message"]["content"]
return message_content
except (KeyError, IndexError, json.JSONDecodeError) as e:
logger.error("Error parsing response: %s", e)
return "Error: Failed to parse response from API."
return super().create_async_generator(
model=model,
messages=messages,
stream=cls.supports_stream,
api_endpoint=cls.api_endpoint,
headers=headers,
**kwargs
)
2 changes: 1 addition & 1 deletion g4f/Provider/PollinationsAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ async def create_async_generator(
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
if model in cls.image_models:
if cls.get_models() and model in cls.image_models:
async for response in cls._generate_image(model, messages, prompt, proxy, seed, width, height):
yield response
elif model in cls.models:
Expand Down
Loading

0 comments on commit 86e36ef

Please sign in to comment.