Skip to content

Commit

Permalink
Merge pull request #2540 from hlohaus/sun
Browse files Browse the repository at this point in the history
Add Edge as Browser for nodriver
  • Loading branch information
hlohaus authored Jan 5, 2025
2 parents 9fd4e3c + afc94b2 commit b35240c
Show file tree
Hide file tree
Showing 21 changed files with 386 additions and 159 deletions.
2 changes: 1 addition & 1 deletion docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ RUN pip install --break-system-packages --upgrade pip \
&& pip install --break-system-packages \
undetected-chromedriver selenium-wire \
&& pip uninstall -y --break-system-packages \
pywebview plyer
pywebview

# Copy the entire package into the container.
ADD --chown=$G4F_USER:$G4F_USER g4f $G4F_DIR/g4f
Expand Down
19 changes: 17 additions & 2 deletions g4f/Provider/Cloudflare.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,14 @@

import asyncio
import json
from pathlib import Path

from ..typing import AsyncResult, Messages, Cookies
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, get_running_loop
from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
from ..requests import DEFAULT_HEADERS, has_nodriver, has_curl_cffi
from ..providers.response import FinishReason
from ..cookies import get_cookies_dir
from ..errors import ResponseStatusError, ModelNotFoundError

class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
Expand All @@ -19,7 +21,7 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "@cf/meta/llama-3.1-8b-instruct"
default_model = "@cf/meta/llama-3.3-70b-instruct-fp8-fast"
model_aliases = {
"llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16",
"llama-2-7b": "@cf/meta/llama-2-7b-chat-int8",
Expand All @@ -33,6 +35,10 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
}
_args: dict = None

@classmethod
def get_cache_file(cls) -> Path:
return Path(get_cookies_dir()) / f"auth_{cls.parent if hasattr(cls, 'parent') else cls.__name__}.json"

@classmethod
def get_models(cls) -> str:
if not cls.models:
Expand Down Expand Up @@ -67,7 +73,11 @@ async def create_async_generator(
timeout: int = 300,
**kwargs
) -> AsyncResult:
cache_file = cls.get_cache_file()
if cls._args is None:
if cache_file.exists():
with cache_file.open("r") as f:
cls._args = json.load(f)
if has_nodriver:
cls._args = await get_args_from_nodriver(cls.url, proxy, timeout, cookies)
else:
Expand All @@ -93,6 +103,8 @@ async def create_async_generator(
await raise_for_status(response)
except ResponseStatusError:
cls._args = None
if cache_file.exists():
cache_file.unlink()
raise
reason = None
async for line in response.iter_lines():
Expand All @@ -109,4 +121,7 @@ async def create_async_generator(
except Exception:
continue
if reason is not None:
yield FinishReason(reason)
yield FinishReason(reason)

with cache_file.open("w") as f:
json.dump(cls._args, f)
48 changes: 37 additions & 11 deletions g4f/Provider/DDG.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
from __future__ import annotations

from aiohttp import ClientSession, ClientTimeout, ClientError
import asyncio
from aiohttp import ClientSession, ClientTimeout, ClientError, ClientResponseError
import json

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation
from .helper import format_prompt
from ..providers.response import FinishReason
from .. import debug

class Conversation(BaseConversation):
vqd: str = None
message_history: Messages = []
cookies: dict = {}

def __init__(self, model: str):
self.model = model
Expand Down Expand Up @@ -65,20 +69,24 @@ async def create_async_generator(
conversation: Conversation = None,
return_conversation: bool = False,
proxy: str = None,
headers: dict = {
"Content-Type": "application/json",
},
cookies: dict = None,
max_retries: int = 3,
**kwargs
) -> AsyncResult:
headers = {
"Content-Type": "application/json",
}
async with ClientSession(headers=headers, timeout=ClientTimeout(total=30)) as session:
if cookies is None and conversation is not None:
cookies = conversation.cookies
async with ClientSession(headers=headers, cookies=cookies, timeout=ClientTimeout(total=30)) as session:
# Fetch VQD token
if conversation is None:
conversation = Conversation(model)

if conversation.vqd is None:
conversation.cookies = session.cookie_jar
conversation.vqd = await cls.fetch_vqd(session)

headers["x-vqd-4"] = conversation.vqd
if conversation.vqd is not None:
headers["x-vqd-4"] = conversation.vqd

if return_conversation:
yield conversation
Expand All @@ -97,15 +105,33 @@ async def create_async_generator(
async with session.post(cls.api_endpoint, headers=headers, json=payload, proxy=proxy) as response:
conversation.vqd = response.headers.get("x-vqd-4")
response.raise_for_status()
reason = None
async for line in response.content:
line = line.decode("utf-8").strip()
if line.startswith("data:"):
try:
message = json.loads(line[5:].strip())
if "message" in message:
yield message["message"]
if "message" in message and message["message"]:
yield message["message"]
reason = "max_tokens"
elif message.get("message") == '':
reason = "stop"
except json.JSONDecodeError:
continue
if reason is not None:
yield FinishReason(reason)
except ClientResponseError as e:
if e.code in (400, 429) and max_retries > 0:
debug.log(f"Retry: max_retries={max_retries}, wait={512 - max_retries * 48}: {e}")
await asyncio.sleep(512 - max_retries * 48)
is_started = False
async for chunk in cls.create_async_generator(model, messages, conversation, return_conversation, max_retries=max_retries-1, **kwargs):
if chunk:
yield chunk
is_started = True
if is_started:
return
raise e
except ClientError as e:
raise Exception(f"HTTP ClientError occurred: {e}")
except asyncio.TimeoutError:
Expand Down
2 changes: 1 addition & 1 deletion g4f/Provider/needs_auth/HuggingFace.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ async def create_async_generator(
else:
is_special = True
debug.log(f"Special token: {is_special}")
yield FinishReason("stop" if is_special else "max_tokens", actions=["variant"] if is_special else ["continue", "variant"])
yield FinishReason("stop" if is_special else "length", actions=["variant"] if is_special else ["continue", "variant"])
else:
if response.headers["content-type"].startswith("image/"):
base64_data = base64.b64encode(b"".join([chunk async for chunk in response.iter_content()]))
Expand Down
15 changes: 8 additions & 7 deletions g4f/Provider/needs_auth/OpenaiChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,11 +105,11 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
_expires: int = None

@classmethod
async def on_auth_async(cls, **kwargs) -> AuthResult:
async def on_auth_async(cls, **kwargs) -> AsyncIterator:
if cls.needs_auth:
async for _ in cls.login():
pass
return AuthResult(
async for chunk in cls.login():
yield chunk
yield AuthResult(
api_key=cls._api_key,
cookies=cls._cookies or RequestConfig.cookies or {},
headers=cls._headers or RequestConfig.headers or cls.get_default_headers(),
Expand Down Expand Up @@ -174,7 +174,8 @@ async def upload_image(image, image_name):
"use_case": "multimodal"
}
# Post the image data to the service and get the image data
async with session.post(f"{cls.url}/backend-api/files", json=data, headers=auth_result.headers) as response:
headers = auth_result.headers if hasattr(auth_result, "headers") else None
async with session.post(f"{cls.url}/backend-api/files", json=data, headers=headers) as response:
cls._update_request_args(auth_result, session)
await raise_for_status(response, "Create file failed")
image_data = {
Expand Down Expand Up @@ -360,7 +361,7 @@ async def create_authed(
f"{cls.url}/backend-anon/sentinel/chat-requirements"
if cls._api_key is None else
f"{cls.url}/backend-api/sentinel/chat-requirements",
json={"p": None if auth_result.proof_token is None else get_requirements_token(auth_result.proof_token)},
json={"p": None if not getattr(auth_result, "proof_token") else get_requirements_token(auth_result.proof_token)},
headers=cls._headers
) as response:
if response.status == 401:
Expand All @@ -386,7 +387,7 @@ async def create_authed(
proofofwork = generate_proof_token(
**chat_requirements["proofofwork"],
user_agent=auth_result.headers.get("user-agent"),
proof_token=auth_result.proof_token
proof_token=getattr(auth_result, "proof_token")
)
[debug.log(text) for text in (
#f"Arkose: {'False' if not need_arkose else auth_result.arkose_token[:12]+'...'}",
Expand Down
5 changes: 3 additions & 2 deletions g4f/api/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
from g4f.cookies import read_cookie_files, get_cookies_dir
from g4f.Provider import ProviderType, ProviderUtils, __providers__
from g4f.gui import get_gui_app
from g4f.tools.files import supports_filename, get_streaming
from g4f.tools.files import supports_filename, get_async_streaming
from .stubs import (
ChatCompletionsConfig, ImageGenerationConfig,
ProviderResponseModel, ModelResponseModel,
Expand Down Expand Up @@ -436,7 +436,8 @@ def read_files(request: Request, bucket_id: str, delete_files: bool = True, refi
event_stream = "text/event-stream" in request.headers.get("accept", "")
if not os.path.isdir(bucket_dir):
return ErrorResponse.from_message("Bucket dir not found", 404)
return StreamingResponse(get_streaming(bucket_dir, delete_files, refine_chunks_with_spacy, event_stream), media_type="text/plain")
return StreamingResponse(get_async_streaming(bucket_dir, delete_files, refine_chunks_with_spacy, event_stream),
media_type="text/event-stream" if event_stream else "text/plain")

@self.app.post("/v1/files/{bucket_id}", responses={
HTTP_200_OK: {"model": UploadResponseModel}
Expand Down
95 changes: 81 additions & 14 deletions g4f/gui/client/home.html
Original file line number Diff line number Diff line change
Expand Up @@ -103,17 +103,29 @@
z-index: -1;
}

iframe.stream {
.stream-widget {
max-height: 0;
transition: max-height 0.15s ease-out;
color: var(--colour-5);
overflow: scroll;
text-align: left;
}

iframe.stream.show {
.stream-widget.show {
max-height: 1000px;
height: 1000px;
transition: max-height 0.25s ease-in;
background: rgba(255,255,255,0.7);
border-top: 2px solid rgba(255,255,255,0.5);
padding: 20px;
}

.stream-widget img {
max-width: 320px;
}

#stream-container {
width: 100%;
}

.description {
Expand Down Expand Up @@ -207,32 +219,87 @@
<p>Powered by the G4F framework</p>
</div>

<iframe id="stream-widget" class="stream" frameborder="0"></iframe>
<iframe class="stream-widget" frameborder="0"></iframe>
</div>
<script>
const iframe = document.getElementById('stream-widget');""
let search = (navigator.language == "de" ? "news in deutschland" : navigator.language == "en" ? "world news" : navigator.language);
if (Math.floor(Math.random() * 6) % 2 == 0) {
const iframe = document.querySelector('.stream-widget');
const rand_idx = Math.floor(Math.random() * 9)
if (rand_idx < 3) {
search = "xtekky/gpt4free releases";
} else if (rand_idx < 5) {
search = "developer news";
} else {
search = (navigator.language == "de" ? "news in deutsch" : navigator.language == "en" ? "world news" : `news in ${navigator.language}`);
}
const url = "/backend-api/v2/create?prompt=Create of overview of the news in plain text&stream=1&web_search=" + search;
const summary_prompt = "Give a summary of the provided text in ```markdown``` format. Add maybe one or more images.";
const url = `/backend-api/v2/create?prompt=${summary_prompt}&stream=1&web_search=${search}`;
iframe.src = url;
setTimeout(()=>iframe.classList.add('show'), 3000);
const message = "Loading...";
setTimeout(()=>{
iframe.classList.add('show');
const iframeDocument = iframe.contentDocument || iframe.contentWindow?.document;
if (iframeDocument) {
const iframeBody = iframeDocument.querySelector("body");
if (iframeBody) {
iframeBody.innerHTML = message + iframeBody.innerHTML;
}
} else {
iframe.parentElement.removeChild(iframe);
}
}, 1000);

function filterMarkdown(text, allowedTypes = null, defaultValue = null) {
const match = text.match(/```(.+)\n(?<code>[\s\S]+?)(\n```|$)/);
if (match) {
const [, type, code] = match;
if (!allowedTypes || allowedTypes.includes(type)) {
return code;
}
}
return defaultValue;
}

let scroll_to_bottom_callback = () => {
const i = document.querySelector(".stream-widget");
if (!i.contentWindow || !i.contentDocument) {
return;
}
clientHeight = i.contentDocument.body.scrollHeight;
i.contentWindow.scrollTo(0, clientHeight);
if (clientHeight - i.contentWindow.scrollY < 2 * clientHeight) {
setTimeout(scroll_to_bottom_callback, 1000);
}
};
setTimeout(scroll_to_bottom_callback, 1000);

iframe.onload = () => {
const iframeDocument = iframe.contentDocument || iframe.contentWindow.document;
const iframeBody = iframeDocument.querySelector("body");
const iframeContent = iframeDocument.querySelector("pre");
let iframeText = iframeContent.innerHTML;
const markdown = window.markdownit();
iframeBody.innerHTML = markdown.render(iframeContent.innerHTML);
const iframeContainer = document.querySelector(".container");
iframe.remove()
if (iframeText.indexOf('"error"') < 0) {
iframeContainer.innerHTML += `<div class="stream-widget show">${markdown.render(filterMarkdown(iframeText, "markdown", iframeText))}</div>`;
}
scroll_to_bottom_callback = () => null;
}

(async () => {
const prompt = `
const today = new Date().toJSON().slice(0, 10);
const max = 100;
const cache_id = Math.floor(Math.random() * max);
let prompt;
if (cache_id % 2 == 0) {
prompt = `
Today is ${new Date().toJSON().slice(0, 10)}.
Create a single-page HTML screensaver reflecting the current season (based on the date).
For example, if it's Spring, it might use floral patterns or pastel colors.
Avoid using any text. Consider a subtle animation or transition effect.`;
const response = await fetch(`/backend-api/v2/create?prompt=${prompt}&filter_markdown=html`)
Avoid using any text.`;
} else {
prompt = `Create a single-page HTML screensaver. Avoid using any text.`;
const response = await fetch(`/backend-api/v2/create?prompt=${prompt}&filter_markdown=html&cache=${cache_id}`);
}
const response = await fetch(`/backend-api/v2/create?prompt=${prompt}&filter_markdown=html&cache=${cache_id}`);
const text = await response.text()
background.src = `data:text/html;charset=utf-8,${encodeURIComponent(text)}`;
const gradient = document.querySelector('.gradient');
Expand Down
3 changes: 2 additions & 1 deletion g4f/gui/client/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,8 @@ <h3>Settings</h3>
<button class="hide-input">
<i class="fa-solid fa-angles-down"></i>
</button>
<span class="text"></span>
<input type="checkbox" id="agree" name="agree" value="yes" checked>
<label for="agree" class="text" onclick="this.innerText='';">Scroll to bottom</label>
</div>
<div class="stop_generating stop_generating-hidden">
<button id="cancelButton">
Expand Down
Loading

0 comments on commit b35240c

Please sign in to comment.