diff --git a/README.md b/README.md index 2b2c9795dd9..52ccc2b8e8b 100644 --- a/README.md +++ b/README.md @@ -212,7 +212,7 @@ client = Client() response = client.chat.completions.create( model="gpt-4o-mini", messages=[{"role": "user", "content": "Hello"}], - # Add any other necessary parameters + web_search = False ) print(response.choices[0].message.content) ``` @@ -230,7 +230,6 @@ response = client.images.generate( model="flux", prompt="a white siamese cat", response_format="url" - # Add any other necessary parameters ) image_url = response.data[0].url diff --git a/docs/async_client.md b/docs/async_client.md index 4b6c9bc6324..ca8b39a3fc8 100644 --- a/docs/async_client.md +++ b/docs/async_client.md @@ -12,6 +12,7 @@ The G4F AsyncClient API is designed to be compatible with the OpenAI API, making - [Initializing the Client](#initializing-the-client) - [Creating Chat Completions](#creating-chat-completions) - [Configuration](#configuration) + - [Explanation of Parameters](#explanation-of-parameters) - [Usage Examples](#usage-examples) - [Text Completions](#text-completions) - [Streaming Completions](#streaming-completions) @@ -80,6 +81,29 @@ client = AsyncClient( ) ``` +## Explanation of Parameters +**When using the G4F to create chat completions or perform related tasks, you can configure the following parameters:** +- **`model`**: + Specifies the AI model to be used for the task. Examples include `"gpt-4o"` for GPT-4 Optimized or `"gpt-4o-mini"` for a lightweight version. The choice of model determines the quality and speed of the response. Always ensure the selected model is supported by the provider. + +- **`messages`**: + **A list of dictionaries representing the conversation context. Each dictionary contains two keys:** + - `role`: Defines the role of the message sender, such as `"user"` (input from the user) or `"system"` (instructions to the AI). + - `content`: The actual text of the message. + **Example:** + ```python + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What day is it today?"} + ] + ``` + +- **`web_search`**: + (Optional) A Boolean flag indicating whether to enable internet-based search capabilities for the task. If True, the system performs a web search using the DuckDuckGo search engine to retrieve up-to-date information. This is particularly useful for obtaining real-time or specific details not contained within the model's training. + +- **`provider`**: + Specifies the backend provider for the API. Examples include `g4f.Provider.Blackbox` or `g4f.Provider.OpenaiChat`. Each provider may support a different subset of models and features, so select one that matches your requirements. + ## Usage Examples ### Text Completions **Generate text completions using the ChatCompletions endpoint:** @@ -97,7 +121,8 @@ async def main(): "role": "user", "content": "Say this is a test" } - ] + ], + web_search = False ) print(response.choices[0].message.content) @@ -139,13 +164,15 @@ import g4f import requests import asyncio from g4f.client import AsyncClient +from g4f.Provider.CopilotAccount import CopilotAccount async def main(): client = AsyncClient( - provider=g4f.Provider.CopilotAccount + provider=CopilotAccount ) image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/images/cat.jpeg", stream=True).raw + # Or: image = open("docs/images/cat.jpeg", "rb") response = await client.chat.completions.create( model=g4f.models.default, @@ -374,4 +401,4 @@ Remember to handle errors gracefully, implement rate limiting, and monitor your --- -[Return to Home](/) \ No newline at end of file +[Return to Home](/) diff --git a/docs/client.md b/docs/client.md index 3eef1e834d8..c9fb52b3b6c 100644 --- a/docs/client.md +++ b/docs/client.md @@ -8,6 +8,7 @@ - [Initializing the Client](#initializing-the-client) - [Creating Chat Completions](#creating-chat-completions) - [Configuration](#configuration) + - [Explanation of Parameters](#explanation-of-parameters) - [Usage Examples](#usage-examples) - [Text Completions](#text-completions) - [Streaming Completions](#streaming-completions) @@ -84,6 +85,30 @@ client = Client( ) ``` +## Explanation of Parameters +**When using the G4F to create chat completions or perform related tasks, you can configure the following parameters:** +- **`model`**: + Specifies the AI model to be used for the task. Examples include `"gpt-4o"` for GPT-4 Optimized or `"gpt-4o-mini"` for a lightweight version. The choice of model determines the quality and speed of the response. Always ensure the selected model is supported by the provider. + +- **`messages`**: + **A list of dictionaries representing the conversation context. Each dictionary contains two keys:** + - `role`: Defines the role of the message sender, such as `"user"` (input from the user) or `"system"` (instructions to the AI). + - `content`: The actual text of the message. + **Example:** + ```python + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What day is it today?"} + ] + ``` + +- **`web_search`**: + (Optional) A Boolean flag indicating whether to enable internet-based search capabilities for the task. If True, the system performs a web search using the DuckDuckGo search engine to retrieve up-to-date information. This is particularly useful for obtaining real-time or specific details not contained within the model's training. + +- **`provider`**: + Specifies the backend provider for the API. Examples include `g4f.Provider.Blackbox` or `g4f.Provider.OpenaiChat`. Each provider may support a different subset of models and features, so select one that matches your requirements. + + ## Usage Examples ### Text Completions **Generate text completions using the `ChatCompletions` endpoint:** @@ -99,7 +124,8 @@ response = client.chat.completions.create( "role": "user", "content": "Say this is a test" } - ] + ], + web_search = False # Add any other necessary parameters ) @@ -234,7 +260,7 @@ client = Client( provider=GeminiPro ) -image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw +image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/images/cat.jpeg", stream=True).raw # Or: image = open("docs/images/cat.jpeg", "rb") response = client.chat.completions.create( @@ -242,7 +268,7 @@ response = client.chat.completions.create( messages=[ { "role": "user", - "content": "What are on this image?" + "content": "What's in this image?" } ], image=image diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md index 1684cc8770f..2f4b5fbf79c 100644 --- a/docs/providers-and-models.md +++ b/docs/providers-and-models.md @@ -20,12 +20,13 @@ This document provides an overview of various AI providers and models, including ### Providers Free | Website | Provider | Text Models | Image Models | Vision Models | Stream | Status | Auth | |----------|-------------|--------------|---------------|--------|--------|------|------| -|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`phi-2, gpt-4, gpt-4o-mini, gpt-4o, gpt-4-turbo, o1-mini, openchat-3.5, deepseek-coder, hermes-2-dpo, hermes-2-pro, openhermes-2.5, lfm-40b, german-7b, llama-2-7b, llama-3.1-70b, neural-7b, zephyr-7b, evil,`|`sdxl, flux-pro, flux, flux-realism, flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, midjourney, dall-e-3`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌+✔| +|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`phi-2, gpt-4, gpt-4o-mini, gpt-4o, gpt-4-turbo, o1-mini, openchat-3.5, deepseek-coder, hermes-2-dpo, hermes-2-pro, openhermes-2.5, lfm-40b, german-7b, llama-2-7b, llama-3.1-8b, llama-3.1-70b, neural-7b, zephyr-7b, evil,`|`sdxl, flux-pro, flux, flux-realism, flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, midjourney, dall-e-3`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌+✔| |[amigochat.io](https://amigochat.io/chat/)|`g4f.Provider.AmigoChat`|✔|✔|❌|✔|![Error](https://img.shields.io/badge/RateLimit-f48d37)|❌| |[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackboxai, gpt-4, gpt-4o, gemini-pro, claude-3.5-sonnet, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, llama-3.3-70b, mixtral-7b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo`|`flux`|`blackboxai, gpt-4o, gemini-pro, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| -|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox2`|`llama-3.1-70b`|`flux`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| +|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.BlackboxCreateAgent`|`llama-3.1-70b`|`flux`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[chatgpt.com](https://chatgpt.com)|`g4f.Provider.ChatGpt`|✔|❌|❌|✔|![Error](https://img.shields.io/badge/HTTPError-f48d37)|❌| |[chatgpt.es](https://chatgpt.es)|`g4f.Provider.ChatGptEs`|`gpt-4, gpt-4o, gpt-4o-mini`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| +|[claudeson.net](https://claudeson.net)|`g4f.Provider.ClaudeSon`|`claude-3.5-sonnet`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-1b, qwen-1.5-7b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[copilot.microsoft.com](https://copilot.microsoft.com)|`g4f.Provider.Copilot`|`gpt-4`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[darkai.foundation](https://darkai.foundation)|`g4f.Provider.DarkAI`|`gpt-3.5-turbo, gpt-4o, llama-3.1-70b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| @@ -47,6 +48,11 @@ This document provides an overview of various AI providers and models, including |[teach-anything.com](https://www.teach-anything.com)|`g4f.Provider.TeachAnything`|`llama-3.1-70b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[you.com](https://you.com)|`g4f.Provider.You`|✔|✔|✔|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| +--- +### Providers Free [HuggingSpace](https://hf.space) +|[black-forest-labs-flux-1-dev.hf.space](https://black-forest-labs-flux-1-dev.hf.space)|`g4f.Provider.BlackForestLabsFlux1Dev`|❌|`flux-dev`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| +|[black-forest-labs-flux-1-schnell.hf.space](https://black-forest-labs-flux-1-schnell.hf.space)|`g4f.Provider.BlackForestLabsFlux1Schnell`|❌|`flux-schnell`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| +|[voodoohop-flux-1-schnell.hf.space](https://voodoohop-flux-1-schnell.hf.space)|`g4f.Provider.VoodoohopFlux1Schnell`|❌|`flux-schnell`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| --- ### Providers Needs Auth @@ -165,7 +171,7 @@ This document provides an overview of various AI providers and models, including |flux-disney|Flux AI|1+ Providers|[]( )| |flux-pixel|Flux AI|1+ Providers|[]( )| |flux-4o|Flux AI|1+ Providers|[]( )| -|flux-schnell|Black Forest Labs|2+ Providers|[huggingface.co](https://huggingface.co/black-forest-labs/FLUX.1-schnell)| +|flux-schnell|Black Forest Labs|1+ Providers|[huggingface.co](https://huggingface.co/black-forest-labs/FLUX.1-schnell)| |dall-e-3|OpenAI|5+ Providers|[openai.com](https://openai.com/index/dall-e/)| |midjourney|Midjourney|2+ Providers|[docs.midjourney.com](https://docs.midjourney.com/docs/model-versions)| |any-dark||2+ Providers|[]( )| diff --git a/docs/requests.md b/docs/requests.md index bdabfa174eb..e185cec54b2 100644 --- a/docs/requests.md +++ b/docs/requests.md @@ -389,3 +389,7 @@ Feel free to customize and expand upon these examples to suit your specific need 6. **Logging:** - Implement logging to monitor the behavior of your applications, which is crucial for debugging and maintaining your systems. + +--- + +[Return to Home](/) diff --git a/etc/examples/api.py b/etc/examples/api_completions_copilot.py old mode 100644 new mode 100755 similarity index 76% rename from etc/examples/api.py rename to etc/examples/api_completions_copilot.py index 2485baded6c..6c77f8e9ec1 --- a/etc/examples/api.py +++ b/etc/examples/api_completions_copilot.py @@ -6,7 +6,7 @@ conversation_id = str(uuid.uuid4()) body = { "model": "", - "provider": "Copilot", + "provider": "Copilot", "stream": True, "messages": [ {"role": "user", "content": "Hello, i am Heiner. How are you?"} @@ -22,7 +22,9 @@ if json_data.get("error"): print(json_data) break - print(json_data.get("choices", [{"delta": {}}])[0]["delta"].get("content", ""), end="") + content = json_data.get("choices", [{"delta": {}}])[0]["delta"].get("content", "") + if content: + print(content, end="") except json.JSONDecodeError: pass print() @@ -31,7 +33,7 @@ body = { "model": "", "provider": "Copilot", - "stream": True, + "stream": True, "messages": [ {"role": "user", "content": "Tell me somethings about my name"} ], @@ -46,6 +48,8 @@ if json_data.get("error"): print(json_data) break - print(json_data.get("choices", [{"delta": {}}])[0]["delta"].get("content", ""), end="") + content = json_data.get("choices", [{"delta": {}}])[0]["delta"].get("content", "") + if content: + print(content, end="") except json.JSONDecodeError: - pass \ No newline at end of file + pass diff --git a/etc/examples/image_api.py b/etc/examples/api_generations_image.py old mode 100644 new mode 100755 similarity index 68% rename from etc/examples/image_api.py rename to etc/examples/api_generations_image.py index 9a438f9b8ac..368d5acc038 --- a/etc/examples/image_api.py +++ b/etc/examples/api_generations_image.py @@ -1,9 +1,11 @@ import requests url = "http://localhost:1337/v1/images/generations" body = { - "model": "dall-e", + "model": "flux", "prompt": "hello world user", + "response_format": None, + #"response_format": "url", #"response_format": "b64_json", } data = requests.post(url, json=body, stream=True).json() -print(data) \ No newline at end of file +print(data) diff --git a/etc/examples/messages.py b/etc/examples/messages.py new file mode 100755 index 00000000000..32720c7c0d6 --- /dev/null +++ b/etc/examples/messages.py @@ -0,0 +1,33 @@ +from g4f.client import Client + +class ConversationHandler: + def __init__(self, model="gpt-4"): + self.client = Client() + self.model = model + self.conversation_history = [] + + def add_user_message(self, content): + self.conversation_history.append({ + "role": "user", + "content": content + }) + + def get_response(self): + response = self.client.chat.completions.create( + model=self.model, + messages=self.conversation_history + ) + assistant_message = { + "role": response.choices[0].message.role, + "content": response.choices[0].message.content + } + self.conversation_history.append(assistant_message) + return assistant_message["content"] + +# Usage example +conversation = ConversationHandler() +conversation.add_user_message("Hello!") +print("Assistant:", conversation.get_response()) + +conversation.add_user_message("How are you?") +print("Assistant:", conversation.get_response()) diff --git a/etc/examples/messages_stream.py b/etc/examples/messages_stream.py new file mode 100755 index 00000000000..237797985cb --- /dev/null +++ b/etc/examples/messages_stream.py @@ -0,0 +1,25 @@ +import asyncio +from g4f.client import AsyncClient + +async def main(): + client = AsyncClient() + + stream = client.chat.completions.create( + model="gpt-4", + messages=[{"role": "user", "content": "Say hello there!"}], + stream=True, + ) + + accumulated_text = "" + try: + async for chunk in stream: + if chunk.choices and chunk.choices[0].delta.content: + content = chunk.choices[0].delta.content + accumulated_text += content + print(content, end="", flush=True) + except Exception as e: + print(f"\nError occurred: {e}") + finally: + print("\n\nFinal accumulated text:", accumulated_text) + +asyncio.run(main()) diff --git a/etc/examples/openaichat.py b/etc/examples/openaichat.py old mode 100644 new mode 100755 diff --git a/etc/examples/text_completions_demo_async.py b/etc/examples/text_completions_demo_async.py new file mode 100755 index 00000000000..098185907e7 --- /dev/null +++ b/etc/examples/text_completions_demo_async.py @@ -0,0 +1,17 @@ +import asyncio +from g4f.client import AsyncClient + +async def main(): + client = AsyncClient() + + response = await client.chat.completions.create( + model="gpt-4o", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "how does a court case get to the Supreme Court?"} + ] + ) + + print(response.choices[0].message.content) + +asyncio.run(main()) diff --git a/etc/examples/text_completions_demo_sync.py b/etc/examples/text_completions_demo_sync.py new file mode 100755 index 00000000000..2ed41b32e47 --- /dev/null +++ b/etc/examples/text_completions_demo_sync.py @@ -0,0 +1,13 @@ +from g4f.client import Client + +client = Client() + +response = client.chat.completions.create( + model="gpt-4o", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "how does a court case get to the Supreme Court?"} + ], +) + +print(response.choices[0].message.content) diff --git a/etc/examples/text_completions_streaming.py b/etc/examples/text_completions_streaming.py new file mode 100755 index 00000000000..fff27d77c13 --- /dev/null +++ b/etc/examples/text_completions_streaming.py @@ -0,0 +1,49 @@ +import asyncio +from g4f.client import Client, AsyncClient + +question = """ +Hey! How can I recursively list all files in a directory in Python? +""" + +# Synchronous streaming function +def sync_stream(): + client = Client() + stream = client.chat.completions.create( + model="gpt-4", + messages=[ + {"role": "user", "content": question} + ], + stream=True, + ) + + for chunk in stream: + if chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content or "", end="") + +# Asynchronous streaming function +async def async_stream(): + client = AsyncClient() + stream = client.chat.completions.create( + model="gpt-4", + messages=[ + {"role": "user", "content": question} + ], + stream=True, + ) + + async for chunk in stream: + if chunk.choices and chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content, end="") + +# Main function to run both streams +def main(): + print("Synchronous Stream:") + sync_stream() + print("\n\nAsynchronous Stream:") + asyncio.run(async_stream()) + +if __name__ == "__main__": + try: + main() + except Exception as e: + print(f"An error occurred: {str(e)}") diff --git a/etc/examples/vision_images.py b/etc/examples/vision_images.py new file mode 100644 index 00000000000..99780b6f803 --- /dev/null +++ b/etc/examples/vision_images.py @@ -0,0 +1,36 @@ +import g4f +import requests + +from g4f.client import Client +from g4f.Provider.Blackbox import Blackbox + +client = Client( + provider=Blackbox +) + +# Processing remote image +remote_image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/images/cat.jpeg", stream=True).content +response_remote = client.chat.completions.create( + model=g4f.models.default, + messages=[ + {"role": "user", "content": "What are on this image?"} + ], + image=remote_image +) +print("Response for remote image:") +print(response_remote.choices[0].message.content) + +print("\n" + "-"*50 + "\n") # Separator + +# Processing local image +local_image = open("docs/images/cat.jpeg", "rb") +response_local = client.chat.completions.create( + model=g4f.models.default, + messages=[ + {"role": "user", "content": "What are on this image?"} + ], + image=local_image +) +print("Response for local image:") +print(response_local.choices[0].message.content) +local_image.close() # Close file after use diff --git a/etc/examples/image_chat_reka.py b/etc/examples/vision_images_reka.py old mode 100644 new mode 100755 similarity index 79% rename from etc/examples/image_chat_reka.py rename to etc/examples/vision_images_reka.py index 954960db6a1..6b3a88abcb2 --- a/etc/examples/image_chat_reka.py +++ b/etc/examples/vision_images_reka.py @@ -18,10 +18,10 @@ } ], stream = True, - image = open("test.png", "rb") # open("path", "rb"), do not use .read(), etc. it must be a file object + image = open("docs/images/cat.jpeg", "rb") # open("path", "rb"), do not use .read(), etc. it must be a file object ) for message in completion: print(message.choices[0].delta.content or "") - # >>> In the image there is ... \ No newline at end of file + # >>> In the image there is ... diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py index 6f55834c838..3862b10bc14 100644 --- a/g4f/Provider/Airforce.py +++ b/g4f/Provider/Airforce.py @@ -4,7 +4,6 @@ import requests from aiohttp import ClientSession from typing import List -from requests.packages.urllib3.exceptions import InsecureRequestWarning from ..typing import AsyncResult, Messages from ..image import ImageResponse @@ -12,8 +11,6 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .. import debug - -requests.packages.urllib3.disable_warnings(InsecureRequestWarning) def split_message(message: str, max_length: int = 1000) -> List[str]: """Splits the message into parts up to (max_length).""" @@ -29,7 +26,7 @@ def split_message(message: str, max_length: int = 1000) -> List[str]: return chunks class Airforce(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://llmplayground.net" + url = "https://api.airforce" api_endpoint_completions = "https://api.airforce/chat/completions" api_endpoint_imagine2 = "https://api.airforce/imagine2" @@ -41,6 +38,9 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): default_model = "gpt-4o-mini" default_image_model = "flux" + models = [] + image_models = [] + hidden_models = {"Flux-1.1-Pro"} additional_models_imagine = ["flux-1.1-pro", "midjourney", "dall-e-3"] model_aliases = { @@ -54,7 +54,10 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): "lfm-40b": "lfm-40b-moe", "german-7b": "discolm-german-7b-v1", "llama-2-7b": "llama-2-7b-chat-int8", + "llama-3.1-70b": "llama-3.1-70b-chat", + "llama-3.1-8b": "llama-3.1-8b-chat", "llama-3.1-70b": "llama-3.1-70b-turbo", + "llama-3.1-8b": "llama-3.1-8b-turbo", "neural-7b": "neural-chat-7b-v3-1", "zephyr-7b": "zephyr-7b-beta", "evil": "any-uncensored", @@ -66,29 +69,51 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): @classmethod def get_models(cls): + """Get available models with error handling""" if not cls.image_models: try: - url = "https://api.airforce/imagine2/models" - response = requests.get(url, verify=False) + response = requests.get( + f"{cls.url}/imagine2/models", + headers={ + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36", + } + ) response.raise_for_status() cls.image_models = response.json() - cls.image_models.extend(cls.additional_models_imagine) + if isinstance(cls.image_models, list): + cls.image_models.extend(cls.additional_models_imagine) + else: + cls.image_models = cls.additional_models_imagine.copy() except Exception as e: debug.log(f"Error fetching image models: {e}") + cls.image_models = cls.additional_models_imagine.copy() if not cls.models: try: - url = "https://api.airforce/models" - response = requests.get(url, verify=False) + response = requests.get( + f"{cls.url}/models", + headers={ + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36", + } + ) response.raise_for_status() data = response.json() - cls.models = [model['id'] for model in data['data']] - cls.models.extend(cls.image_models) - cls.models = [model for model in cls.models if model not in cls.hidden_models] + if isinstance(data, dict) and 'data' in data: + cls.models = [model['id'] for model in data['data']] + cls.models.extend(cls.image_models) + cls.models = [model for model in cls.models if model not in cls.hidden_models] + else: + cls.models = list(cls.model_aliases.keys()) except Exception as e: debug.log(f"Error fetching text models: {e}") + cls.models = list(cls.model_aliases.keys()) + + return cls.models or list(cls.model_aliases.keys()) - return cls.models + @classmethod + def get_model(cls, model: str) -> str: + """Get the actual model name from alias""" + return cls.model_aliases.get(model, model) @classmethod async def check_api_key(cls, api_key: str) -> bool: diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index fd788576f8b..e4d4cb96d49 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -6,16 +6,18 @@ import json import re import aiohttp - -import json +import asyncio from pathlib import Path from ..typing import AsyncResult, Messages, ImagesType from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..image import ImageResponse, to_data_uri from ..cookies import get_cookies_dir +from ..web_search import get_search_message from .helper import format_prompt +from .. import debug + class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): label = "Blackbox AI" url = "https://www.blackbox.ai" @@ -30,12 +32,15 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): default_vision_model = default_model default_image_model = 'flux' image_models = ['ImageGeneration', 'repomap'] - vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b'] + vision_models = [default_vision_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b'] + + web_search_models = ['blackboxai', 'meta-llama/Llama-3.3-70B-Instruct-Turbo', 'meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro'] userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro'] agentMode = { 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}, + # 'meta-llama/Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"}, 'mistralai/Mistral-7B-Instruct-v0.2': {'mode': True, 'id': "mistralai/Mistral-7B-Instruct-v0.2", 'name': "Mistral-(7B)-Instruct-v0.2"}, 'deepseek-ai/deepseek-llm-67b-chat': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"}, @@ -88,20 +93,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'builder Agent': {'mode': True, 'id': "builder Agent"}, } - additional_prefixes = { - 'gpt-4o': '@GPT-4o', - 'gemini-pro': '@Gemini-PRO', - 'claude-sonnet-3.5': '@Claude-Sonnet-3.5' - } - - model_prefixes = { - **{ - mode: f"@{value['id']}" for mode, value in trendingAgentMode.items() - if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"] - }, - **additional_prefixes - } - models = list(dict.fromkeys([default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())])) model_aliases = { @@ -120,7 +111,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): ### image ### "flux": "ImageGeneration", } - + @classmethod def _get_cache_file(cls) -> Path: dir = Path(get_cookies_dir()) @@ -136,7 +127,7 @@ def _load_cached_value(cls) -> str | None: data = json.load(f) return data.get('validated_value') except Exception as e: - print(f"Error reading cache file: {e}") + debug.log(f"Error reading cache file: {e}") return None @classmethod @@ -146,68 +137,69 @@ def _save_cached_value(cls, value: str): with open(cache_file, 'w') as f: json.dump({'validated_value': value}, f) except Exception as e: - print(f"Error writing to cache file: {e}") + debug.log(f"Error writing to cache file: {e}") @classmethod async def fetch_validated(cls): cached_value = cls._load_cached_value() + + async with aiohttp.ClientSession() as session: + # Let's try both URLs + urls_to_try = [ + "https://www.blackbox.ai", + "https://api.blackbox.ai" + ] + + for base_url in urls_to_try: + try: + async with session.get(base_url) as response: + if response.status != 200: + continue + + page_content = await response.text() + js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content) + + if not js_files: + js_files = re.findall(r'static/js/[a-zA-Z0-9-]+\.js', page_content) + + uuid_format = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']' + + def is_valid_context(text_around): + return any(char + '=' in text_around for char in 'abcdefghijklmnopqrstuvwxyz') + + for js_file in js_files: + js_url = f"{base_url}/_next/{js_file}" + try: + async with session.get(js_url) as js_response: + if js_response.status == 200: + js_content = await js_response.text() + for match in re.finditer(uuid_format, js_content): + start = max(0, match.start() - 10) + end = min(len(js_content), match.end() + 10) + context = js_content[start:end] + + if is_valid_context(context): + validated_value = match.group(1) + cls._save_cached_value(validated_value) + return validated_value + except Exception: + continue + + except Exception as e: + debug.log(f"Error trying {base_url}: {e}") + continue + + # If we failed to get a new validated_value, we return the cached one if cached_value: return cached_value - - async with aiohttp.ClientSession() as session: - try: - async with session.get(cls.url) as response: - if response.status != 200: - print("Failed to load the page.") - return cached_value - - page_content = await response.text() - js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content) - - uuid_format = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']' - - def is_valid_context(text_around): - return any(char + '=' in text_around for char in 'abcdefghijklmnopqrstuvwxyz') - - for js_file in js_files: - js_url = f"{cls.url}/_next/{js_file}" - async with session.get(js_url) as js_response: - if js_response.status == 200: - js_content = await js_response.text() - for match in re.finditer(uuid_format, js_content): - start = max(0, match.start() - 10) - end = min(len(js_content), match.end() + 10) - context = js_content[start:end] - - if is_valid_context(context): - validated_value = match.group(1) - cls._save_cached_value(validated_value) - return validated_value - except Exception as e: - print(f"Error fetching validated value: {e}") - - return cached_value + + raise RuntimeError("Failed to get validated value from both URLs") @staticmethod def generate_id(length=7): characters = string.ascii_letters + string.digits return ''.join(random.choice(characters) for _ in range(length)) - @classmethod - def add_prefix_to_messages(cls, messages: Messages, model: str) -> Messages: - prefix = cls.model_prefixes.get(model, "") - if not prefix: - return messages - - new_messages = [] - for message in messages: - new_message = message.copy() - if message['role'] == 'user': - new_message['content'] = (prefix + " " + message['content']).strip() - new_messages.append(new_message) - - return new_messages - @classmethod async def create_async_generator( cls, @@ -217,93 +209,135 @@ async def create_async_generator( proxy: str = None, web_search: bool = False, images: ImagesType = None, - top_p: float = None, - temperature: float = None, + top_p: float = 0.9, + temperature: float = 0.5, max_tokens: int = None, + max_retries: int = 3, + delay: int = 1, **kwargs ) -> AsyncResult: - message_id = cls.generate_id() - messages = cls.add_prefix_to_messages(messages, model) - validated_value = await cls.fetch_validated() - formatted_message = format_prompt(messages) - model = cls.get_model(model) + + use_internal_search = web_search and model in cls.web_search_models - messages = [{"id": message_id, "content": formatted_message, "role": "user"}] - - if images is not None: - messages[-1]['data'] = { - "imagesData": [ - { - "filePath": f"MultipleFiles/{image_name}", - "contents": to_data_uri(image) - } - for image, image_name in images - ], - "fileText": "", - "title": "" + if web_search and not use_internal_search: + + def run_search(): + return get_search_message(messages[-1]["content"]) + + import concurrent.futures + with concurrent.futures.ThreadPoolExecutor() as executor: + messages[-1]["content"] = await asyncio.get_event_loop().run_in_executor( + executor, run_search + ) + web_search = False + + async def process_request(): + validated_value = await cls.fetch_validated() + + if not validated_value: + raise RuntimeError("Failed to get validated value") + + formatted_message = format_prompt(messages) + current_model = cls.get_model(model) + + first_message = next((msg for msg in messages if msg['role'] == 'user'), None) + chat_id = cls.generate_id() + current_messages = [{"id": chat_id, "content": formatted_message, "role": "user"}] + + if images is not None: + current_messages[-1]['data'] = { + "imagesData": [ + { + "filePath": f"/{image_name}", + "contents": to_data_uri(image) + } + for image, image_name in images + ], + "fileText": "", + "title": "" + } + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'content-type': 'application/json', + 'origin': 'https://www.blackbox.ai', + 'referer': 'https://www.blackbox.ai/', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' + } + + data = { + "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {}, + "clickedAnswer2": False, + "clickedAnswer3": False, + "clickedForceWebSearch": False, + "codeModelMode": True, + "deepSearchMode": False, + "domains": None, + "githubToken": None, + "id": chat_id, + "imageGenerationMode": False, + "isChromeExt": False, + "isMicMode": False, + "maxTokens": max_tokens, + "messages": current_messages, + "mobileClient": False, + "playgroundTemperature": temperature, + "playgroundTopP": top_p, + "previewToken": None, + "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {}, + "userId": None, + "userSelectedModel": model if model in cls.userSelectedModel else None, + "userSystemPrompt": None, + "validated": validated_value, + "visitFromDelta": False, + "webSearchModePrompt": False, + "webSearchMode": use_internal_search } - headers = { - 'accept': '*/*', - 'accept-language': 'en-US,en;q=0.9', - 'content-type': 'application/json', - 'origin': cls.url, - 'referer': f'{cls.url}/', - 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' - } - - data = { - "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {}, - "clickedAnswer2": False, - "clickedAnswer3": False, - "clickedForceWebSearch": False, - "codeModelMode": True, - "deepSearchMode": False, - "githubToken": None, - "id": message_id, - "imageGenerationMode": False, - "isChromeExt": False, - "isMicMode": False, - "maxTokens": max_tokens, - "messages": messages, - "mobileClient": False, - "playgroundTemperature": temperature, - "playgroundTopP": top_p, - "previewToken": None, - "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {}, - "userId": None, - "userSelectedModel": model if model in cls.userSelectedModel else None, - "userSystemPrompt": None, - "validated": validated_value, - "visitFromDelta": False, - "webSearchModePrompt": False, - "webSearchMode": web_search - } - - async with ClientSession(headers=headers) as session: - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - response_text = await response.text() - - if model in cls.image_models: - image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text) - if image_matches: - image_url = image_matches[0] - yield ImageResponse(image_url, prompt) - return - - response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL) - response_text = re.sub(r'and for API requests replace https://www.blackbox.ai with https://api.blackbox.ai', '', response_text, flags=re.DOTALL) - - json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL) - if json_match: - search_results = json.loads(json_match.group(1)) - answer = response_text.split('$~~~$')[-1].strip() - - formatted_response = f"{answer}\n\n**Source:**" - for i, result in enumerate(search_results, 1): - formatted_response += f"\n{i}. {result['title']}: {result['link']}" - - yield formatted_response - else: - yield response_text.strip() + for attempt in range(max_retries): + try: + async with ClientSession(headers=headers) as session: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_text = await response.text() + + if current_model in cls.image_models: + image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text) + if image_matches: + yield ImageResponse(image_matches[0], prompt) + return + + response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL) + response_text = re.sub(r'and for API requests replace https://www.blackbox.ai with https://api.blackbox.ai', '', response_text, flags=re.DOTALL) + + response_text = response_text.strip() + + if not response_text: + raise ValueError("Empty response received") + + json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL) + if json_match: + search_results = json.loads(json_match.group(1)) + answer = response_text.split('$~~~$')[-1].strip() + + formatted_response = f"{answer}\n\n**Source:**" + for i, result in enumerate(search_results, 1): + formatted_response += f"\n{i}. {result['title']}: {result['link']}" + + yield formatted_response + else: + yield response_text + return + + except Exception as e: + debug.log(f"Error: {str(e)}") + if attempt == max_retries - 1: + raise RuntimeError("Failed after all retries") + else: + wait_time = delay * (2 ** attempt) + random.uniform(0, 1) + debug.log(f"Attempt {attempt + 1} failed. Retrying in {wait_time:.2f} seconds...") + await asyncio.sleep(wait_time) + + async for chunk in process_request(): + yield chunk diff --git a/g4f/Provider/BlackboxCreateAgent.py b/g4f/Provider/BlackboxCreateAgent.py new file mode 100644 index 00000000000..d329ea0e22f --- /dev/null +++ b/g4f/Provider/BlackboxCreateAgent.py @@ -0,0 +1,257 @@ +from __future__ import annotations + +import random +import asyncio +import re +import json +from pathlib import Path +from aiohttp import ClientSession +from typing import AsyncIterator, Optional + +from ..typing import AsyncResult, Messages +from ..image import ImageResponse +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..cookies import get_cookies_dir + +from .. import debug + + +class BlackboxCreateAgent(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://www.blackbox.ai" + api_endpoints = { + "llama-3.1-70b": "https://www.blackbox.ai/api/improve-prompt", + "flux": "https://www.blackbox.ai/api/image-generator" + } + + working = True + supports_system_message = True + supports_message_history = True + + default_model = 'llama-3.1-70b' + chat_models = [default_model] + image_models = ['flux'] + models = [*chat_models, *image_models] + + @classmethod + def _get_cache_file(cls) -> Path: + """Returns the path to the cache file.""" + dir = Path(get_cookies_dir()) + dir.mkdir(exist_ok=True) + return dir / 'blackbox2.json' + + @classmethod + def _load_cached_value(cls) -> str | None: + cache_file = cls._get_cache_file() + if cache_file.exists(): + try: + with open(cache_file, 'r') as f: + data = json.load(f) + return data.get('validated_value') + except Exception as e: + debug.log(f"Error reading cache file: {e}") + return None + + @classmethod + def _save_cached_value(cls, value: str): + cache_file = cls._get_cache_file() + try: + with open(cache_file, 'w') as f: + json.dump({'validated_value': value}, f) + except Exception as e: + debug.log(f"Error writing to cache file: {e}") + + @classmethod + async def fetch_validated(cls) -> Optional[str]: + """ + Asynchronously retrieves the validated value from cache or website. + + :return: The validated value or None if retrieval fails. + """ + cached_value = cls._load_cached_value() + if cached_value: + return cached_value + + js_file_pattern = r'static/chunks/\d{4}-[a-fA-F0-9]+\.js' + v_pattern = r'j\s*=\s*[\'"]([0-9a-fA-F-]{36})[\'"]' + + def is_valid_context(text: str) -> bool: + """Checks if the context is valid.""" + return any(char + '=' in text for char in 'abcdefghijklmnopqrstuvwxyz') + + async with ClientSession() as session: + try: + async with session.get(cls.url) as response: + if response.status != 200: + debug.log("Failed to download the page.") + return cached_value + + page_content = await response.text() + js_files = re.findall(js_file_pattern, page_content) + + for js_file in js_files: + js_url = f"{cls.url}/_next/{js_file}" + async with session.get(js_url) as js_response: + if js_response.status == 200: + js_content = await js_response.text() + for match in re.finditer(v_pattern, js_content): + start = max(0, match.start() - 50) + end = min(len(js_content), match.end() + 50) + context = js_content[start:end] + + if is_valid_context(context): + validated_value = match.group(1) + cls._save_cached_value(validated_value) + return validated_value + except Exception as e: + debug.log(f"Error while retrieving validated_value: {e}") + + return cached_value + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + prompt: str = None, + **kwargs + ) -> AsyncIterator[str | ImageResponse]: + """ + Creates an async generator for text or image generation. + """ + if model in cls.chat_models: + async for text in cls._generate_text(model, messages, proxy=proxy, **kwargs): + yield text + elif model in cls.image_models: + prompt = messages[-1]['content'] + async for image in cls._generate_image(model, prompt, proxy=proxy, **kwargs): + yield image + else: + raise ValueError(f"Model {model} not supported") + + @classmethod + async def _generate_text( + cls, + model: str, + messages: Messages, + proxy: str = None, + max_retries: int = 3, + delay: int = 1, + max_tokens: int = None, + **kwargs + ) -> AsyncIterator[str]: + headers = cls._get_headers() + + for outer_attempt in range(2): # Add outer loop for retrying with a new key + validated_value = await cls.fetch_validated() + if not validated_value: + raise RuntimeError("Failed to get validated value") + + async with ClientSession(headers=headers) as session: + api_endpoint = cls.api_endpoints[model] + + data = { + "messages": messages, + "max_tokens": max_tokens, + "validated": validated_value + } + + for attempt in range(max_retries): + try: + async with session.post(api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + + if response_data.get('status') == 200 and 'prompt' in response_data: + yield response_data['prompt'] + return # Successful execution + else: + raise KeyError("Invalid response format or missing 'prompt' key") + except Exception as e: + if attempt == max_retries - 1: + if outer_attempt == 0: # If this is the first attempt with this key + # Remove the cached key and try to get a new one + cls._save_cached_value("") + debug.log("Invalid key, trying to get a new one...") + break # Exit the inner loop to get a new key + else: + raise RuntimeError(f"Error after all attempts: {str(e)}") + else: + wait_time = delay * (2 ** attempt) + random.uniform(0, 1) + debug.log(f"Attempt {attempt + 1} failed. Retrying in {wait_time:.2f} seconds...") + await asyncio.sleep(wait_time) + + @classmethod + async def _generate_image( + cls, + model: str, + prompt: str, + proxy: str = None, + **kwargs + ) -> AsyncIterator[ImageResponse]: + headers = { + **cls._get_headers() + } + + api_endpoint = cls.api_endpoints[model] + + async with ClientSession(headers=headers) as session: + data = { + "query": prompt + } + + async with session.post(api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + + if 'markdown' in response_data: + # Extract URL from markdown format: ![](url) + image_url = re.search(r'\!\[\]\((.*?)\)', response_data['markdown']) + if image_url: + yield ImageResponse(images=[image_url.group(1)], alt=prompt) + else: + raise ValueError("Could not extract image URL from markdown") + else: + raise KeyError("'markdown' key not found in response") + + @staticmethod + def _get_headers() -> dict: + return { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'authorization': f'Bearer 56c8eeff9971269d7a7e625ff88e8a83a34a556003a5c87c289ebe9a3d8a3d2c', + 'content-type': 'application/json', + 'origin': 'https://www.blackbox.ai', + 'referer': 'https://www.blackbox.ai', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' + } + + @classmethod + async def create_async( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + """ + Creates an async response for the provider. + + Args: + model: The model to use + messages: The messages to process + proxy: Optional proxy to use + **kwargs: Additional arguments + + Returns: + AsyncResult: The response from the provider + """ + if model in cls.chat_models: + async for text in cls._generate_text(model, messages, proxy=proxy, **kwargs): + return text + elif model in cls.image_models: + prompt = messages[-1]['content'] + async for image in cls._generate_image(model, prompt, proxy=proxy, **kwargs): + return image + else: + raise ValueError(f"Model {model} not supported") diff --git a/g4f/Provider/ClaudeSon.py b/g4f/Provider/ClaudeSon.py new file mode 100644 index 00000000000..5adc4f38054 --- /dev/null +++ b/g4f/Provider/ClaudeSon.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class ClaudeSon(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://claudeson.net" + api_endpoint = "https://claudeson.net/api/coze/chat" + working = True + + supports_system_message = True + supports_message_history = True + + default_model = 'claude-3.5-sonnet' + models = [default_model] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "content-type": "application/json", + "origin": "https://claudeson.net", + "referer": "https://claudeson.net/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" + } + async with ClientSession(headers=headers) as session: + data = { + "textStr": format_prompt(messages), + "type": "company" + } + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk: + yield chunk.decode() diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py index 6453d1673e0..48b87b9b11f 100644 --- a/g4f/Provider/DeepInfraChat.py +++ b/g4f/Provider/DeepInfraChat.py @@ -1,17 +1,25 @@ from __future__ import annotations +import json + +from aiohttp import ClientSession from ..typing import AsyncResult, Messages -from .needs_auth import OpenaiAPI +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -class DeepInfraChat(OpenaiAPI): - label = "DeepInfra Chat" +class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): url = "https://deepinfra.com/chat" + api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions" working = True - api_base = "https://api.deepinfra.com/v1/openai" - + needs_auth = False + supports_stream = True + supports_system_message = True + supports_message_history = True + default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' models = [ + 'meta-llama/Llama-3.3-70B-Instruct', 'meta-llama/Meta-Llama-3.1-8B-Instruct', + 'meta-llama/Llama-3.3-70B-Instruct-Turbo', default_model, 'Qwen/QwQ-32B-Preview', 'microsoft/WizardLM-2-8x22B', @@ -20,7 +28,9 @@ class DeepInfraChat(OpenaiAPI): 'nvidia/Llama-3.1-Nemotron-70B-Instruct', ] model_aliases = { + "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct", "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo", "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "qwq-32b": "Qwen/QwQ-32B-Preview", "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B", @@ -30,20 +40,48 @@ class DeepInfraChat(OpenaiAPI): } @classmethod - def create_async_generator( + async def create_async_generator( cls, model: str, messages: Messages, proxy: str = None, **kwargs ) -> AsyncResult: + model = cls.get_model(model) + headers = { 'Accept-Language': 'en-US,en;q=0.9', 'Content-Type': 'application/json', 'Origin': 'https://deepinfra.com', 'Referer': 'https://deepinfra.com/', - 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', 'X-Deepinfra-Source': 'web-page', 'accept': 'text/event-stream', } - return super().create_async_generator(model, messages, proxy, headers=headers, **kwargs) \ No newline at end of file + + async with ClientSession(headers=headers) as session: + data = { + "model": model, + "messages": messages, + "stream": True + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk: + chunk_text = chunk.decode() + try: + # Handle streaming response + if chunk_text.startswith("data: "): + if chunk_text.strip() == "data: [DONE]": + continue + chunk_data = json.loads(chunk_text[6:]) + if content := chunk_data["choices"][0]["delta"].get("content"): + yield content + # Handle non-streaming response + else: + chunk_data = json.loads(chunk_text) + if content := chunk_data["choices"][0]["message"].get("content"): + yield content + except (json.JSONDecodeError, KeyError): + continue diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index dece4c39c5f..f1ad0031301 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -1,109 +1,228 @@ from __future__ import annotations -from urllib.parse import quote +import json import random import requests +from typing import Optional from aiohttp import ClientSession +from ..requests.raise_for_status import raise_for_status from ..typing import AsyncResult, Messages from ..image import ImageResponse -from ..requests.raise_for_status import raise_for_status -from ..requests.aiohttp import get_connector from .needs_auth.OpenaiAPI import OpenaiAPI -from .helper import format_prompt_max_length class PollinationsAI(OpenaiAPI): label = "Pollinations AI" url = "https://pollinations.ai" + working = True - needs_auth = False + needs_auth = True supports_stream = True + supports_system_message = True + supports_message_history = True + + # API endpoints base api_base = "https://text.pollinations.ai/openai" - + + # API endpoints + text_api_endpoint = "https://text.pollinations.ai" + image_api_endpoint = "https://image.pollinations.ai" + + # Models configuration default_model = "openai" + default_image_model = "flux" + + image_models = [] + models = [] + additional_models_image = ["midjourney", "dall-e-3"] additional_models_text = ["sur", "sur-mistral", "claude"] model_aliases = { "gpt-4o": "openai", "mistral-nemo": "mistral", - "llama-3.1-70b": "llama", # + "llama-3.1-70b": "llama", "gpt-4": "searchgpt", "gpt-4": "claude", - "qwen-2.5-coder-32b": "qwen-coder", - "claude-3.5-sonnet": "sur", + "qwen-2.5-coder-32b": "qwen-coder", + "claude-3.5-sonnet": "sur", } - - headers = { - "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" - } - + @classmethod def get_models(cls, **kwargs): + # Initialize model lists if not exists if not hasattr(cls, 'image_models'): cls.image_models = [] + if not hasattr(cls, 'text_models'): + cls.text_models = [] + + # Fetch image models if not cached if not cls.image_models: url = "https://image.pollinations.ai/models" - response = requests.get(url, headers=cls.headers) + response = requests.get(url) raise_for_status(response) cls.image_models = response.json() cls.image_models.extend(cls.additional_models_image) - if not hasattr(cls, 'models'): - cls.models = [] - if not cls.models: + + # Fetch text models if not cached + if not cls.text_models: url = "https://text.pollinations.ai/models" - response = requests.get(url, headers=cls.headers) + response = requests.get(url) raise_for_status(response) - cls.models = [model.get("name") for model in response.json()] - cls.models.extend(cls.image_models) - cls.models.extend(cls.additional_models_text) - return cls.models + cls.text_models = [model.get("name") for model in response.json()] + cls.text_models.extend(cls.additional_models_text) + + # Return combined models + return cls.text_models + cls.image_models @classmethod async def create_async_generator( cls, model: str, messages: Messages, - prompt: str = None, - api_key: str = None, proxy: str = None, - seed: str = None, + # Image specific parameters + prompt: str = None, width: int = 1024, height: int = 1024, + seed: Optional[int] = None, + nologo: bool = True, + private: bool = False, + enhance: bool = False, + safe: bool = False, + # Text specific parameters + api_key: str = None, + temperature: float = 0.5, + presence_penalty: float = 0, + top_p: float = 1, + frequency_penalty: float = 0, + stream: bool = True, **kwargs ) -> AsyncResult: model = cls.get_model(model) - if cls.get_models() and model in cls.image_models: - async for response in cls._generate_image(model, messages, prompt, proxy, seed, width, height): - yield response - elif model in cls.models: - async for response in cls._generate_text(model, messages, api_key, proxy, **kwargs): - yield response + + # Check if models + # Image generation + if model in cls.image_models: + async for result in cls._generate_image( + model=model, + messages=messages, + prompt=prompt, + proxy=proxy, + width=width, + height=height, + seed=seed, + nologo=nologo, + private=private, + enhance=enhance, + safe=safe + ): + yield result else: - raise ValueError(f"Unknown model: {model}") + # Text generation + async for result in cls._generate_text( + model=model, + messages=messages, + proxy=proxy, + api_key=api_key, + temperature=temperature, + presence_penalty=presence_penalty, + top_p=top_p, + frequency_penalty=frequency_penalty, + stream=stream + ): + yield result @classmethod - async def _generate_image(cls, model: str, messages: Messages, prompt: str = None, proxy: str = None, seed: str = None, width: int = 1024, height: int = 1024): - if prompt is None: - prompt = messages[-1]["content"] + async def _generate_image( + cls, + model: str, + messages: Messages, + prompt: str, + proxy: str, + width: int, + height: int, + seed: Optional[int], + nologo: bool, + private: bool, + enhance: bool, + safe: bool + ) -> AsyncResult: if seed is None: - seed = random.randint(0, 100000) - image = f"https://image.pollinations.ai/prompt/{quote(prompt)}?width={width}&height={height}&seed={int(seed)}&nofeed=true&nologo=true&model={quote(model)}" - async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session: - async with session.get(image) as response: - await raise_for_status(response) - yield ImageResponse(image, prompt) + seed = random.randint(0, 10000) + + + headers = { + 'Accept': '*/*', + 'Accept-Language': 'en-US,en;q=0.9', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36', + } + + params = { + "seed": seed, + "width": width, + "height": height, + "model": model, + "nologo": nologo, + "private": private, + "enhance": enhance, + "safe": safe + } + params = {k: v for k, v in params.items() if v is not None} + + async with ClientSession(headers=headers) as session: + prompt = quote(messages[-1]["content"]) + param_string = "&".join(f"{k}={v}" for k, v in params.items()) + url = f"{cls.image_api_endpoint}/prompt/{prompt}?{param_string}" + + async with session.head(url, proxy=proxy) as response: + if response.status == 200: + image_response = ImageResponse(images=url, alt=messages[-1]["content"]) + yield image_response @classmethod - async def _generate_text(cls, model: str, messages: Messages, api_key: str = None, proxy: str = None, **kwargs): + async def _generate_text( + cls, + model: str, + messages: Messages, + proxy: str, + api_key: str, + temperature: float, + presence_penalty: float, + top_p: float, + frequency_penalty: float, + stream: bool + ) -> AsyncResult: if api_key is None: - async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session: - prompt = format_prompt_max_length(messages, 5000) - async with session.get(f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}") as response: - await raise_for_status(response) - async for line in response.content.iter_any(): - yield line.decode(errors="ignore") - else: - async for chunk in super().create_async_generator( - model, messages, proxy=proxy, **kwargs - ): - yield chunk \ No newline at end of file + api_key = "dummy" # Default value if api_key is not provided + + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "authorization": f"Bearer {api_key}", + "content-type": "application/json", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" + } + + async with ClientSession(headers=headers) as session: + data = { + "messages": messages, + "model": model, + "temperature": temperature, + "presence_penalty": presence_penalty, + "top_p": top_p, + "frequency_penalty": frequency_penalty, + "jsonMode": False, + "stream": stream + } + + async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk: + decoded_chunk = chunk.decode() + try: + json_response = json.loads(decoded_chunk) + content = json_response['choices'][0]['message']['content'] + yield content + except json.JSONDecodeError: + yield decoded_chunk diff --git a/g4f/Provider/Prodia.py b/g4f/Provider/Prodia.py index 847da6d7a35..76516e2f30c 100644 --- a/g4f/Provider/Prodia.py +++ b/g4f/Provider/Prodia.py @@ -2,6 +2,7 @@ from aiohttp import ClientSession import asyncio +import random from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin @@ -13,10 +14,11 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): working = True default_model = 'absolutereality_v181.safetensors [3d9d4d2b]' + default_image_model = default_model image_models = [ '3Guofeng3_v34.safetensors [50f420de]', 'absolutereality_V16.safetensors [37db0fc3]', - default_model, + default_image_model, 'amIReal_V41.safetensors [0a8a2e61]', 'analog-diffusion-1.0.ckpt [9ca13f02]', 'aniverse_v30.safetensors [579e6f85]', @@ -78,7 +80,7 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): 'shoninsBeautiful_v10.safetensors [25d8c546]', 'theallys-mix-ii-churned.safetensors [5d9225a4]', 'timeless-1.0.ckpt [7c4971d4]', - 'toonyou_beta6.safetensors [980f6b15]', + 'toonyou_beta6.safetensors [980f6b15]' ] models = [*image_models] @@ -100,13 +102,16 @@ async def create_async_generator( negative_prompt: str = "", steps: str = 20, # 1-25 cfg: str = 7, # 0-20 - seed: str = "-1", + seed: Optional[int] = None, sampler: str = "DPM++ 2M Karras", # "Euler", "Euler a", "Heun", "DPM++ 2M Karras", "DPM++ SDE Karras", "DDIM" aspect_ratio: str = "square", # "square", "portrait", "landscape" **kwargs ) -> AsyncResult: model = cls.get_model(model) + if seed is None: + seed = random.randint(0, 10000) + headers = { "accept": "*/*", "accept-language": "en-US,en;q=0.9", diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 04ff8396bf7..82e8da069c7 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -10,33 +10,34 @@ from .needs_auth import * from .not_working import * from .local import * +from .hf_space import * -from .Airforce import Airforce -from .AmigoChat import AmigoChat -from .Blackbox import Blackbox -from .Blackbox2 import Blackbox2 -from .ChatGpt import ChatGpt -from .ChatGptEs import ChatGptEs -from .Cloudflare import Cloudflare -from .Copilot import Copilot -from .DarkAI import DarkAI -from .DDG import DDG -from .DeepInfraChat import DeepInfraChat -from .Flux import Flux -from .Free2GPT import Free2GPT -from .FreeGpt import FreeGpt -from .GizAI import GizAI -from .Liaobots import Liaobots -from .Mhystical import Mhystical -from .PerplexityLabs import PerplexityLabs -from .Pi import Pi -from .Pizzagpt import Pizzagpt -from .PollinationsAI import PollinationsAI -from .Prodia import Prodia -from .ReplicateHome import ReplicateHome -from .RubiksAI import RubiksAI -from .TeachAnything import TeachAnything -from .You import You +from .Airforce import Airforce +from .AmigoChat import AmigoChat +from .Blackbox import Blackbox +from .BlackboxCreateAgent import BlackboxCreateAgent +from .ChatGpt import ChatGpt +from .ChatGptEs import ChatGptEs +from .ClaudeSon import ClaudeSon +from .Cloudflare import Cloudflare +from .Copilot import Copilot +from .DarkAI import DarkAI +from .DDG import DDG +from .DeepInfraChat import DeepInfraChat +from .Free2GPT import Free2GPT +from .FreeGpt import FreeGpt +from .GizAI import GizAI +from .Liaobots import Liaobots +from .Mhystical import Mhystical +from .PerplexityLabs import PerplexityLabs +from .Pi import Pi +from .Pizzagpt import Pizzagpt +from .PollinationsAI import PollinationsAI +from .Prodia import Prodia +from .ReplicateHome import ReplicateHome +from .RubiksAI import RubiksAI +from .TeachAnything import TeachAnything +from .You import You import sys diff --git a/g4f/Provider/Flux.py b/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py similarity index 91% rename from g4f/Provider/Flux.py rename to g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py index d394915383e..7987cc1b52f 100644 --- a/g4f/Provider/Flux.py +++ b/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py @@ -3,15 +3,16 @@ import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from ..image import ImageResponse, ImagePreview -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...typing import AsyncResult, Messages +from ...image import ImageResponse, ImagePreview +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -class Flux(AsyncGeneratorProvider, ProviderModelMixin): - label = "Flux (HuggingSpace)" +class BlackForestLabsFlux1Dev(AsyncGeneratorProvider, ProviderModelMixin): url = "https://black-forest-labs-flux-1-dev.hf.space" api_endpoint = "/gradio_api/call/infer" + working = True + default_model = 'flux-dev' models = [default_model] image_models = [default_model] diff --git a/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py b/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py new file mode 100644 index 00000000000..7b29b7afc6f --- /dev/null +++ b/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json +import random +from typing import Optional + +from ...typing import AsyncResult, Messages +from ...image import ImageResponse +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin + +class BlackForestLabsFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://black-forest-labs-flux-1-schnell.hf.space" + api_endpoint = "https://black-forest-labs-flux-1-schnell.hf.space/call/infer" + + working = True + + default_model = "flux-schnell" + default_image_model = default_model + image_models = [default_image_model] + models = [*image_models] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + width: int = 768, + height: int = 768, + num_inference_steps: int = 2, + seed: Optional[int] = None, + randomize_seed: bool = False, + **kwargs + ) -> AsyncResult: + if seed is None: + seed = random.randint(0, 10000) + + width = max(32, width - (width % 8)) + height = max(32, height - (height % 8)) + + prompt = messages[-1]["content"] + + payload = { + "data": [ + prompt, + seed, + randomize_seed, + width, + height, + num_inference_steps + ] + } + + async with ClientSession() as session: + async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + event_id = response_data['event_id'] + + while True: + async with session.get(f"{cls.api_endpoint}/{event_id}", proxy=proxy) as status_response: + status_response.raise_for_status() + events = (await status_response.text()).split('\n\n') + + for event in events: + if event.startswith('event:'): + event_parts = event.split('\ndata: ') + if len(event_parts) < 2: + continue + + event_type = event_parts[0].split(': ')[1] + data = event_parts[1] + + if event_type == 'error': + raise Exception(f"Error generating image: {data}") + elif event_type == 'complete': + json_data = json.loads(data) + image_url = json_data[0]['url'] + yield ImageResponse(images=[image_url], alt=prompt) + return diff --git a/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py b/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py new file mode 100644 index 00000000000..bd55b20b846 --- /dev/null +++ b/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json +import random +from typing import Optional + +from ...typing import AsyncResult, Messages +from ...image import ImageResponse +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin + +class VoodoohopFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://voodoohop-flux-1-schnell.hf.space" + api_endpoint = "https://voodoohop-flux-1-schnell.hf.space/call/infer" + + working = True + + default_model = "flux-schnell" + default_image_model = default_model + image_models = [default_image_model] + models = [*image_models] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + width: int = 768, + height: int = 768, + num_inference_steps: int = 2, + seed: Optional[int] = None, + randomize_seed: bool = False, + **kwargs + ) -> AsyncResult: + if seed is None: + seed = random.randint(0, 10000) + + width = max(32, width - (width % 8)) + height = max(32, height - (height % 8)) + + prompt = messages[-1]["content"] + + payload = { + "data": [ + prompt, + seed, + randomize_seed, + width, + height, + num_inference_steps + ] + } + + async with ClientSession() as session: + async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + event_id = response_data['event_id'] + + while True: + async with session.get(f"{cls.api_endpoint}/{event_id}", proxy=proxy) as status_response: + status_response.raise_for_status() + events = (await status_response.text()).split('\n\n') + + for event in events: + if event.startswith('event:'): + event_parts = event.split('\ndata: ') + if len(event_parts) < 2: + continue + + event_type = event_parts[0].split(': ')[1] + data = event_parts[1] + + if event_type == 'error': + raise Exception(f"Error generating image: {data}") + elif event_type == 'complete': + json_data = json.loads(data) + image_url = json_data[0]['url'] + yield ImageResponse(images=[image_url], alt=prompt) + return diff --git a/g4f/Provider/hf_space/__init__.py b/g4f/Provider/hf_space/__init__.py new file mode 100644 index 00000000000..94524e3570d --- /dev/null +++ b/g4f/Provider/hf_space/__init__.py @@ -0,0 +1,3 @@ +from .BlackForestLabsFlux1Dev import BlackForestLabsFlux1Dev +from .BlackForestLabsFlux1Schnell import BlackForestLabsFlux1Schnell +from .VoodoohopFlux1Schnell import VoodoohopFlux1Schnell diff --git a/g4f/models.py b/g4f/models.py index 19354a85f6b..0d2b5dbc884 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -5,17 +5,17 @@ from .Provider import IterListProvider, ProviderType from .Provider import ( Blackbox, - Blackbox2, + BlackboxCreateAgent, BingCreateImages, ChatGpt, ChatGptEs, + ClaudeSon, Cloudflare, Copilot, CopilotAccount, DarkAI, DDG, DeepInfraChat, - Flux, GigaChat, Gemini, GeminiPro, @@ -36,6 +36,11 @@ ReplicateHome, RubiksAI, TeachAnything, + + ## HuggingSpace ## + BlackForestLabsFlux1Dev, + BlackForestLabsFlux1Schnell, + VoodoohopFlux1Schnell, ) @dataclass(unsafe_hash=True) @@ -67,7 +72,7 @@ class ImageModel(Model): best_provider = IterListProvider([ DDG, Pizzagpt, - Blackbox2, + BlackboxCreateAgent, Blackbox, Copilot, DeepInfraChat, @@ -175,7 +180,7 @@ class ImageModel(Model): llama_3_1_70b = Model( name = "llama-3.1-70b", base_provider = "Meta Llama", - best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, Blackbox2, TeachAnything, PollinationsAI, DarkAI, Airforce, RubiksAI, PerplexityLabs]) + best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, BlackboxCreateAgent, TeachAnything, PollinationsAI, DarkAI, Airforce, RubiksAI, PerplexityLabs]) ) llama_3_1_405b = Model( @@ -313,7 +318,7 @@ class ImageModel(Model): claude_3_5_sonnet = Model( name = 'claude-3.5-sonnet', base_provider = 'Anthropic', - best_provider = IterListProvider([Blackbox, PollinationsAI, Liaobots]) + best_provider = IterListProvider([Blackbox, PollinationsAI, ClaudeSon, Liaobots]) ) ### Reka AI ### @@ -555,7 +560,7 @@ class ImageModel(Model): flux = ImageModel( name = 'flux', base_provider = 'Flux AI', - best_provider = IterListProvider([Blackbox, Blackbox2, PollinationsAI, Airforce]) + best_provider = IterListProvider([Blackbox, BlackboxCreateAgent, PollinationsAI, Airforce]) ) flux_pro = ImageModel( @@ -567,7 +572,13 @@ class ImageModel(Model): flux_dev = ImageModel( name = 'flux-dev', base_provider = 'Flux AI', - best_provider = IterListProvider([Flux, HuggingChat, HuggingFace]) + best_provider = IterListProvider([BlackForestLabsFlux1Dev, HuggingChat, HuggingFace]) +) + +flux_schnell = ImageModel( + name = 'flux-schnell', + base_provider = 'Flux AI', + best_provider = IterListProvider([BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell]) ) flux_realism = ImageModel( @@ -813,6 +824,7 @@ class ModelUtils: flux.name: flux, flux_pro.name: flux_pro, flux_dev.name: flux_dev, + flux_schnell.name: flux_schnell, flux_realism.name: flux_realism, flux_cablyai.name: flux_cablyai, flux_anime.name: flux_anime,