Update model list in OpenaiChat (o3-mini, o3-mini-high)

Add Reasoning to OpenaiChat provider
Check for pipeline_tag in HuggingChat providers
Add image preview in PollinationsAI
Add input of custom Model in GUI
This commit is contained in:
hlohaus
2025-02-02 23:03:59 +01:00
parent 18968eed02
commit 167ceedd31
13 changed files with 257 additions and 110 deletions
+1 -8
View File
@@ -308,14 +308,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
image_url = image_url_match.group(1)
yield ImageResponse(image_url, format_image_prompt(messages, prompt))
else:
if "<think>" in text_to_yield and "</think>" in text_to_yield:
parts = text_to_yield.split('<think>', 1)
yield parts[0]
reasoning_parts = parts[1].split('</think>', 1)
yield Reasoning(f"<think>{reasoning_parts[0]}</think>")
yield reasoning_parts[1]
full_response = text_to_yield
elif "Generated by BLACKBOX.AI" in text_to_yield:
if "Generated by BLACKBOX.AI" in text_to_yield:
conversation.validated_value = await cls.fetch_validated(force_refresh=True)
if conversation.validated_value:
data["validated"] = conversation.validated_value
+14 -8
View File
@@ -13,7 +13,7 @@ from ..typing import AsyncResult, Messages, ImagesType
from ..image import to_data_uri
from ..requests.raise_for_status import raise_for_status
from ..requests.aiohttp import get_connector
from ..providers.response import ImageResponse, FinishReason, Usage
from ..providers.response import ImageResponse, ImagePreview, FinishReason, Usage
DEFAULT_HEADERS = {
'Accept': '*/*',
@@ -125,7 +125,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
# Check if models
# Image generation
if model in cls.image_models:
yield await cls._generate_image(
async for chunk in cls._generate_image(
model=model,
prompt=format_image_prompt(messages, prompt),
proxy=proxy,
@@ -136,7 +136,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
private=private,
enhance=enhance,
safe=safe
)
):
yield chunk
else:
# Text generation
async for result in cls._generate_text(
@@ -167,7 +168,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
private: bool,
enhance: bool,
safe: bool
) -> ImageResponse:
) -> AsyncResult:
params = {
"seed": seed,
"width": width,
@@ -178,11 +179,16 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"enhance": enhance,
"safe": safe
}
params = {k: json.dumps(v) if isinstance(v, bool) else v for k, v in params.items() if v is not None}
params = {k: json.dumps(v) if isinstance(v, bool) else str(v) for k, v in params.items() if v is not None}
params = "&".join( "%s=%s" % (key, quote_plus(params[key]))
for key in params.keys())
url = f"{cls.image_api_endpoint}prompt/{quote_plus(prompt)}?{params}"
yield ImagePreview(url, prompt)
async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
async with session.head(f"{cls.image_api_endpoint}prompt/{quote_plus(prompt)}", params=params) as response:
await raise_for_status(response)
return ImageResponse(str(response.url), prompt)
async with session.head(url) as response:
if response.status != 500: # Server is busy
await raise_for_status(response)
yield ImageResponse(str(response.url), prompt)
@classmethod
async def _generate_text(
+23 -2
View File
@@ -1,8 +1,11 @@
from __future__ import annotations
from ...providers.types import Messages
from ...typing import ImagesType
from ...requests import StreamSession, raise_for_status
from ...errors import ModelNotSupportedError
from ..template.OpenaiTemplate import OpenaiTemplate
from .models import model_aliases
from ...providers.types import Messages
from .HuggingChat import HuggingChat
from ... import debug
@@ -37,6 +40,10 @@ class HuggingFaceAPI(OpenaiTemplate):
api_base: str = None,
max_tokens: int = 2048,
max_inputs_lenght: int = 10000,
impersonate: str = None,
proxy: str = None,
timeout: int = 300,
images: ImagesType = None,
**kwargs
):
if api_base is None:
@@ -44,6 +51,20 @@ class HuggingFaceAPI(OpenaiTemplate):
if model in cls.model_aliases:
model_name = cls.model_aliases[model]
api_base = f"https://api-inference.huggingface.co/models/{model_name}/v1"
if images is not None:
async with StreamSession(
proxy=proxy,
timeout=timeout,
impersonate=impersonate,
) as session:
async with session.get(f"https://huggingface.co/api/models/{model}") as response:
if response.status == 404:
raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
await raise_for_status(response)
model_data = await response.json()
pipeline_tag = model_data.get("pipeline_tag")
if pipeline_tag != "image-text-to-text":
raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__} pipeline_tag={pipeline_tag}")
start = calculate_lenght(messages)
if start > max_inputs_lenght:
if len(messages) > 6:
@@ -54,7 +75,7 @@ class HuggingFaceAPI(OpenaiTemplate):
if len(messages) > 1 and calculate_lenght(messages) > max_inputs_lenght:
messages = [messages[-1]]
debug.log(f"Messages trimmed from: {start} to: {calculate_lenght(messages)}")
async for chunk in super().create_async_generator(model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs):
async for chunk in super().create_async_generator(model, messages, api_base=api_base, max_tokens=max_tokens, images=images, **kwargs):
yield chunk
def calculate_lenght(messages: Messages) -> int:
+30 -27
View File
@@ -78,18 +78,13 @@ class HuggingFaceInference(AsyncGeneratorProvider, ProviderModelMixin):
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
payload = None
if cls.get_models() and model in cls.image_models:
stream = False
prompt = format_image_prompt(messages, prompt)
payload = {"inputs": prompt, "parameters": {"seed": random.randint(0, 2**32), **extra_data}}
else:
params = {
"return_full_text": False,
"max_new_tokens": max_tokens,
"temperature": temperature,
**extra_data
}
do_continue = action == "continue"
params = {
"return_full_text": False,
"max_new_tokens": max_tokens,
"temperature": temperature,
**extra_data
}
do_continue = action == "continue"
async with StreamSession(
headers=headers,
proxy=proxy,
@@ -101,22 +96,30 @@ class HuggingFaceInference(AsyncGeneratorProvider, ProviderModelMixin):
raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
await raise_for_status(response)
model_data = await response.json()
model_type = None
if "config" in model_data and "model_type" in model_data["config"]:
model_type = model_data["config"]["model_type"]
debug.log(f"Model type: {model_type}")
inputs = get_inputs(messages, model_data, model_type, do_continue)
debug.log(f"Inputs len: {len(inputs)}")
if len(inputs) > 4096:
if len(messages) > 6:
messages = messages[:3] + messages[-3:]
else:
messages = [m for m in messages if m["role"] == "system"] + [messages[-1]]
pipeline_tag = model_data.get("pipeline_tag")
if pipeline_tag == "text-to-image":
stream = False
inputs = format_image_prompt(messages, prompt)
payload = {"inputs": inputs, "parameters": {"seed": random.randint(0, 2**32), **extra_data}}
elif pipeline_tag in ("text-generation", "image-text-to-text"):
model_type = None
if "config" in model_data and "model_type" in model_data["config"]:
model_type = model_data["config"]["model_type"]
debug.log(f"Model type: {model_type}")
inputs = get_inputs(messages, model_data, model_type, do_continue)
debug.log(f"New len: {len(inputs)}")
if model_type == "gpt2" and max_tokens >= 1024:
params["max_new_tokens"] = 512
payload = {"inputs": inputs, "parameters": params, "stream": stream}
debug.log(f"Inputs len: {len(inputs)}")
if len(inputs) > 4096:
if len(messages) > 6:
messages = messages[:3] + messages[-3:]
else:
messages = [m for m in messages if m["role"] == "system"] + [messages[-1]]
inputs = get_inputs(messages, model_data, model_type, do_continue)
debug.log(f"New len: {len(inputs)}")
if model_type == "gpt2" and max_tokens >= 1024:
params["max_new_tokens"] = 512
payload = {"inputs": inputs, "parameters": params, "stream": stream}
else:
raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__} pipeline_tag: {pipeline_tag}")
async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response:
if response.status == 404:
+19 -10
View File
@@ -25,8 +25,9 @@ from ...requests import get_nodriver
from ...image import ImageResponse, ImageRequest, to_image, to_bytes, is_accepted_format
from ...errors import MissingAuthError, NoValidHarFileError
from ...providers.response import JsonConversation, FinishReason, SynthesizeData, AuthResult
from ...providers.response import Sources, TitleGeneration, RequestLogin, Parameters
from ...providers.response import Sources, TitleGeneration, RequestLogin, Parameters, Reasoning
from ..helper import format_cookies
from ..openai.models import default_model, default_image_model, models, image_models, text_models
from ..openai.har_file import get_request_config
from ..openai.har_file import RequestConfig, arkReq, arkose_url, start_url, conversation_url, backend_url, backend_anon_url
from ..openai.proofofwork import generate_proof_token
@@ -95,12 +96,11 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
supports_gpt_4 = True
supports_message_history = True
supports_system_message = True
default_model = "auto"
default_image_model = "dall-e-3"
image_models = [default_image_model]
text_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "o1", "o1-preview", "o1-mini"]
default_model = default_model
default_image_model = default_image_model
image_models = image_models
vision_models = text_models
models = text_models + image_models
models = models
synthesize_content_type = "audio/mpeg"
_api_key: str = None
@@ -368,9 +368,11 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
)
[debug.log(text) for text in (
#f"Arkose: {'False' if not need_arkose else auth_result.arkose_token[:12]+'...'}",
f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
f"AccessToken: {'False' if cls._api_key is None else cls._api_key[:12]+'...'}",
#f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
#f"AccessToken: {'False' if cls._api_key is None else cls._api_key[:12]+'...'}",
)]
if action == "continue" and conversation.message_id is None:
action = "next"
data = {
"action": action,
"parent_message_id": conversation.message_id,
@@ -497,7 +499,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
v = line.get("v")
if isinstance(v, str) and fields.is_recipient:
if "p" not in line or line.get("p") == "/message/content/parts/0":
yield v
yield Reasoning(token=v) if fields.is_thinking else v
elif isinstance(v, list):
for m in v:
if m.get("p") == "/message/content/parts/0" and fields.is_recipient:
@@ -508,6 +510,9 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
sources.add_source(link)
elif re.match(r"^/message/metadata/content_references/\d+$", m.get("p")):
sources.add_source(m.get("v"))
elif m.get("p") == "/message/metadata/finished_text":
fields.is_thinking = False
yield Reasoning(status=m.get("v"))
elif m.get("p") == "/message/metadata":
fields.finish_reason = m.get("v", {}).get("finish_details", {}).get("type")
break
@@ -519,6 +524,9 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
fields.is_recipient = m.get("recipient", "all") == "all"
if fields.is_recipient:
c = m.get("content", {})
if c.get("content_type") == "text" and m.get("author", {}).get("role") == "tool" and "initial_text" in m.get("metadata", {}):
fields.is_thinking = True
yield Reasoning(status=c.get("metadata", {}).get("initial_text"))
if c.get("content_type") == "multimodal_text":
generated_images = []
for element in c.get("parts"):
@@ -697,13 +705,14 @@ class Conversation(JsonConversation):
"""
Class to encapsulate response fields.
"""
def __init__(self, conversation_id: str = None, message_id: str = None, user_id: str = None, finish_reason: str = None, parent_message_id: str = None):
def __init__(self, conversation_id: str = None, message_id: str = None, user_id: str = None, finish_reason: str = None, parent_message_id: str = None, is_thinking: bool = False):
self.conversation_id = conversation_id
self.message_id = message_id
self.finish_reason = finish_reason
self.is_recipient = False
self.parent_message_id = message_id if parent_message_id is None else parent_message_id
self.user_id = user_id
self.is_thinking = is_thinking
def get_cookies(
urls: Optional[Iterator[str]] = None
+6
View File
@@ -0,0 +1,6 @@
default_model = "auto"
default_image_model = "dall-e-3"
image_models = [default_image_model]
text_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "o1", "o1-preview", "o1-mini", "o3-mini", "o3-mini-high"]
vision_models = text_models
models = text_models + image_models
+18 -11
View File
@@ -10,6 +10,7 @@ from email.utils import formatdate
import os.path
import hashlib
import asyncio
from urllib.parse import quote_plus
from fastapi import FastAPI, Response, Request, UploadFile, Depends
from fastapi.middleware.wsgi import WSGIMiddleware
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
@@ -176,11 +177,11 @@ class Api:
return ErrorResponse.from_message("G4F API key required", HTTP_401_UNAUTHORIZED)
if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key):
return ErrorResponse.from_message("Invalid G4F API key", HTTP_403_FORBIDDEN)
elif not AppConfig.demo:
if user_g4f_api_key is not None and path.startswith("/images/"):
elif not AppConfig.demo and not path.startswith("/images/"):
if user_g4f_api_key is not None:
if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key):
return ErrorResponse.from_message("Invalid G4F API key", HTTP_403_FORBIDDEN)
elif path.startswith("/backend-api/") or path.startswith("/images/") or path.startswith("/chat/") and path != "/chat/":
elif path.startswith("/backend-api/") or path.startswith("/chat/") and path != "/chat/":
try:
username = await self.get_username(request)
except HTTPException as e:
@@ -551,8 +552,8 @@ class Api:
HTTP_404_NOT_FOUND: {}
})
async def get_image(filename, request: Request):
target = os.path.join(images_dir, filename)
ext = os.path.splitext(filename)[1]
target = os.path.join(images_dir, quote_plus(filename))
ext = os.path.splitext(filename)[1][1:]
stat_result = SimpleNamespace()
stat_result.st_size = 0
if os.path.isfile(target):
@@ -560,10 +561,12 @@ class Api:
stat_result.st_mtime = int(f"{filename.split('_')[0]}") if filename.startswith("1") else 0
headers = {
"cache-control": "public, max-age=31536000",
"content-type": f"image/{ext.replace('jpg', 'jpeg')[1:] or 'jpeg'}",
"content-length": str(stat_result.st_size),
"content-type": f"image/{ext.replace('jpg', 'jpeg') or 'jpeg'}",
"last-modified": formatdate(stat_result.st_mtime, usegmt=True),
"etag": f'"{hashlib.md5(filename.encode()).hexdigest()}"',
**({
"content-length": str(stat_result.st_size),
} if stat_result.st_size else {})
}
response = FileResponse(
target,
@@ -583,10 +586,14 @@ class Api:
source_url = source_url[1]
source_url = source_url.replace("%2F", "/").replace("%3A", ":").replace("%3F", "?").replace("%3D", "=")
if source_url.startswith("https://"):
await copy_images(
[source_url],
target=target)
debug.log(f"Image copied from {source_url}")
try:
await copy_images(
[source_url],
target=target)
debug.log(f"Image copied from {source_url}")
except Exception as e:
debug.log(f"{type(e).__name__}: Download failed: {source_url}\n{e}")
return RedirectResponse(url=source_url)
if not os.path.isfile(target):
return ErrorResponse.from_message("File not found", HTTP_404_NOT_FOUND)
async def stream():
+16 -6
View File
@@ -47,12 +47,13 @@
gallery: '#messages',
children: 'a:has(img)',
secondaryZoomLevel: 2,
allowPanToNext: true,
pswpModule: () => import('https://cdn.jsdelivr.net/npm/photoswipe'),
});
lightbox.addFilter('itemData', (itemData, index) => {
const img = itemData.element.querySelector('img');
itemData.width = img.naturalWidth;
itemData.height = img.naturalHeight;
itemData.width = img.naturalWidth || 1024;
itemData.height = img.naturalHeight || 1024;
return itemData;
});
lightbox.on('uiRegister', function() {
@@ -66,7 +67,13 @@
lightbox.pswp.on('change', () => {
const currSlideElement = lightbox.pswp.currSlide.data.element;
if (currSlideElement) {
el.innerText = currSlideElement.querySelector('img').getAttribute('alt');
const img = currSlideElement.querySelector('img');
el.innerText = img.getAttribute('alt');
const download = document.createElement("a");
download.setAttribute("href", img.getAttribute('src'));
download.setAttribute("download", `${img.getAttribute('alt')}${lightbox.pswp.currSlide.index}.jpg`);
download.innerHTML = '<i class="fa-solid fa-download"></i>';
el.appendChild(download);
}
});
}
@@ -157,8 +164,8 @@
<label for="report_error" class="toogle" title=""></label>
</div>
<div class="field box">
<label for="systemPrompt" class="label" title="">System prompt</label>
<textarea id="systemPrompt" placeholder="You are a helpful assistant."></textarea>
<label for="systemPrompt" class="label">System prompt</label>
<textarea id="systemPrompt" placeholder="You are a helpful assistant." data-value="If you need to generate images, you can use the following format: ![keywords](/generate/filename.jpg). This will enable the use of an image generation tool."></textarea>
</div>
<div class="field box">
<label for="message-input-height" class="label" title="">Input max. height</label>
@@ -269,6 +276,7 @@
<div id="send-button">
<i class="fa-solid fa-square-plus"></i>
<i class="fa-regular fa-paper-plane"></i>
<a href="" id="download" class="hidden"></a>
</div>
</div>
</div>
@@ -293,7 +301,8 @@
<option value="dall-e-3">dall-e-3 (Image Generation)</option>
<option disabled="disabled">----</option>
</select>
<select name="model2" id="model2" class="hidden"></select>
<select name="model2" id="model2" class="hidden model"></select>
<input type="text" id="model3" value="" class="hidden model" placeholder="Model:"/>
</div>
<div class="field">
<select name="provider" id="provider">
@@ -303,6 +312,7 @@
<option value="Gemini">Google Gemini</option>
<option value="DDG">DuckDuckGo AI Chat</option>
<option value="Blackbox">Blackbox AI</option>
<option value="Custom Model">Custom Model</option>
<option disabled="disabled">----</option>
</select>
</div>
+4 -3
View File
@@ -799,7 +799,7 @@ form input:checked+label:after {
color: var(--colour-3);
}
select {
select, input.model {
border-radius: 8px;
backdrop-filter: blur(20px);
cursor: pointer;
@@ -871,6 +871,7 @@ button.regenerate_button, button.continue_button, button.options_button {
}
select:hover,
input.model:hover
.buttons button:hover,
.stop_generating button:hover,
.toolbar .regenerate button:hover,
@@ -948,7 +949,7 @@ select:hover,
}
@media only screen and (min-width: 40em) {
select {
select, input.model {
width: 200px;
}
.field {
@@ -1446,7 +1447,7 @@ form .field.saved .fa-xmark {
max-height: 200px;
}
.hidden {
.hidden, input.hidden {
display: none;
}
+1 -1
View File
@@ -17,7 +17,7 @@
"background_color": "#ffffff",
"display": "standalone",
"share_target": {
"action": "/chat/",
"action": "/chat/share",
"method": "GET",
"enctype": "application/x-www-form-urlencoded",
"params": {
+104 -25
View File
@@ -15,6 +15,7 @@ const inputCount = document.getElementById("input-count").querySelector("
const providerSelect = document.getElementById("provider");
const modelSelect = document.getElementById("model");
const modelProvider = document.getElementById("model2");
const custom_model = document.getElementById("model3");
const chatPrompt = document.getElementById("chatPrompt");
const settings = document.querySelector(".settings");
const chat = document.querySelector(".conversation");
@@ -78,18 +79,28 @@ function render_reasoning(reasoning, final = false) {
</div>` : "";
return `<div class="reasoning_body">
<div class="reasoning_title">
<strong>Reasoning <i class="fa-solid fa-brain"></i>:</strong> ${escapeHtml(reasoning.status)}
<strong>Reasoning <i class="brain">🧠</i>:</strong> ${escapeHtml(reasoning.status)}
</div>
${inner_text}
</div>`;
}
function render_reasoning_text(reasoning) {
return `Reasoning 🧠: ${reasoning.status}\n\n${reasoning.text}\n\n`;
}
function filter_message(text) {
return text.replaceAll(
/<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm, ""
).replace(/ \[aborted\]$/g, "").replace(/ \[error\]$/g, "");
}
function filter_message_content(text) {
return text.replaceAll(
/\/\]\(\/generate\//gm, "/](/images/"
).replace(/ \[aborted\]$/g, "").replace(/ \[error\]$/g, "")
}
function fallback_clipboard (text) {
var textBox = document.createElement("textarea");
textBox.value = text;
@@ -182,6 +193,53 @@ const get_message_el = (el) => {
return message_el;
}
function register_message_images() {
message_box.querySelectorAll(`.loading-indicator`).forEach((el) => el.remove());
message_box.querySelectorAll(`.message img:not([alt="your avatar"])`).forEach(async (el) => {
if (!el.complete) {
const indicator = document.createElement("span");
indicator.classList.add("loading-indicator");
indicator.innerHTML = `<i class="fas fa-spinner fa-spin"></i>`;
el.parentElement.appendChild(indicator);
el.onerror = () => {
let indexCommand;
if ((indexCommand = el.src.indexOf("/generate/")) >= 0) {
indexCommand = indexCommand + "/generate/".length + 1;
let newPath = el.src.substring(indexCommand)
let filename = newPath.replace(/(?:\?.+?|$)/, "");
let seed = Math.floor(Date.now() / 1000);
newPath = `https://image.pollinations.ai/prompt/${newPath}?seed=${seed}&nologo=true`;
let downloadUrl = newPath;
if (document.getElementById("download_images")?.checked) {
downloadUrl = `/images/${filename}?url=${escapeHtml(newPath)}`;
}
const link = document.createElement("a");
link.setAttribute("href", newPath);
const newImg = document.createElement("img");
newImg.src = downloadUrl;
newImg.alt = el.alt;
newImg.onload = () => {
lazy_scroll_to_bottom();
indicator.remove();
}
link.appendChild(newImg);
el.parentElement.appendChild(link);
} else {
const span = document.createElement("span");
span.innerHTML = `<i class="fa-solid fa-plug"></i>${escapeHtml(el.alt)}`;
el.parentElement.appendChild(span);
}
el.remove();
indicator.remove();
}
el.onload = () => {
indicator.remove();
lazy_scroll_to_bottom();
}
}
});
}
const register_message_buttons = async () => {
message_box.querySelectorAll(".message .content .provider").forEach(async (el) => {
if (!("click" in el.dataset)) {
@@ -243,24 +301,22 @@ const register_message_buttons = async () => {
message_box.querySelectorAll(".message .fa-file-export").forEach(async (el) => {
if (!("click" in el.dataset)) {
el.dataset.click = "true";
//
el.addEventListener("click", async () => {
const elem = window.document.createElement('a');
let filename = `chat ${new Date().toLocaleString()}.md`.replaceAll(":", "-");
const conversation = await get_conversation(window.conversation_id);
let buffer = "";
conversation.items.forEach(message => {
buffer += render_reasoning_text(message.reasoning);
buffer += `${message.role == 'user' ? 'User' : 'Assistant'}: ${message.content.trim()}\n\n\n`;
});
const file = new File([buffer.trim()], 'message.md', {type: 'text/plain'});
const objectUrl = URL.createObjectURL(file);
elem.href = objectUrl;
elem.download = filename;
document.body.appendChild(elem);
elem.click();
document.body.removeChild(elem);
var download = document.getElementById("download");
download.setAttribute("href", "data:text/markdown;charset=utf-8," + encodeURIComponent(buffer.trim()));
download.setAttribute("download", filename);
download.click();
el.classList.add("clicked");
setTimeout(() => el.classList.remove("clicked"), 1000);
URL.revokeObjectURL(objectUrl);
})
}
});
@@ -376,7 +432,7 @@ const handle_ask = async (do_ask_gpt = true) => {
messageInput.focus();
await scroll_to_bottom();
let message = messageInput.value;
let message = messageInput.value.trim();
if (message.length <= 0) {
return;
}
@@ -755,6 +811,7 @@ async function add_message_chunk(message, message_id, provider, scroll, finish_m
if (!img.complete)
return;
content_map.inner.innerHTML = markdown_render(message.preview);
await register_message_images();
} else if (message.type == "content") {
message_storage[message_id] += message.content;
update_message(content_map, message_id, null, scroll);
@@ -779,7 +836,7 @@ async function add_message_chunk(message, message_id, provider, scroll, finish_m
} else if (message.type == "reasoning") {
if (!reasoning_storage[message_id]) {
reasoning_storage[message_id] = message;
reasoning_storage[message_id].text = message.token || "";
reasoning_storage[message_id].text = "";
} else if (message.status) {
reasoning_storage[message_id].status = message.status;
} else if (message.token) {
@@ -952,6 +1009,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
}
await safe_remove_cancel_button();
await register_message_buttons();
await register_message_images();
await load_conversations();
regenerate_button.classList.remove("regenerate-hidden");
}
@@ -1201,8 +1259,8 @@ const load_conversation = async (conversation_id, scroll=true) => {
} else {
buffer = "";
}
buffer = buffer.replace(/ \[aborted\]$/g, "").replace(/ \[error\]$/g, "");
new_content = item.content.replace(/ \[aborted\]$/g, "").replace(/ \[error\]$/g, "");
buffer = filter_message_content(buffer);
new_content = filter_message_content(item.content);
buffer = merge_messages(buffer, new_content);
last_model = item.provider?.model;
providers.push(item.provider?.name);
@@ -1658,12 +1716,9 @@ const register_settings_storage = async () => {
const load_settings_storage = async () => {
const optionElements = document.querySelectorAll(optionElementsSelector);
optionElements.forEach((element) => {
if (element.name && element.name != element.id && (value = appStorage.getItem(element.name))) {
appStorage.setItem(element.id, value);
appStorage.removeItem(element.name);
}
if (!(value = appStorage.getItem(element.id))) {
return;
value = appStorage.getItem(element.id);
if (value == null && element.dataset.value) {
value = element.dataset.value;
}
if (value) {
switch (element.type) {
@@ -1677,10 +1732,10 @@ const load_settings_storage = async () => {
case "number":
case "textarea":
if (element.id.endsWith("-api_key")) {
element.placeholder = value && value.length >= 22 ? (value.substring(0, 12)+"*".repeat(12)+value.substring(value.length-12)) : "*".repeat(value.length);
element.placeholder = value && value.length >= 22 ? (value.substring(0, 12)+"*".repeat(12)+value.substring(value.length-12)) : "*".repeat(value ? value.length : 0);
element.dataset.value = value;
} else {
element.value = value;
element.value = value == null ? element.dataset.value : value;
}
break;
default:
@@ -1834,7 +1889,7 @@ async function on_load() {
let chat_url = new URL(window.location.href)
let chat_params = new URLSearchParams(chat_url.search);
if (chat_params.get("prompt")) {
messageInput.value = `${chat_params.title}\n${chat_params.prompt}\n${chat_params.url}`.trim();
messageInput.value = `${window.location.href}\n`;
messageInput.style.height = messageInput.scrollHeight + "px";
messageInput.focus();
//await handle_ask();
@@ -2255,7 +2310,9 @@ chatPrompt?.addEventListener("input", async () => {
});
function get_selected_model() {
if (modelProvider.selectedIndex >= 0) {
if (custom_model.value) {
return custom_model;
} else if (modelProvider.selectedIndex >= 0) {
return modelProvider.options[modelProvider.selectedIndex];
} else if (modelSelect.selectedIndex >= 0) {
model = modelSelect.options[modelSelect.selectedIndex];
@@ -2401,17 +2458,31 @@ async function load_provider_models(provider=null) {
if (!provider) {
provider = providerSelect.value;
}
if (!custom_model.value) {
custom_model.classList.add("hidden");
}
if (provider == "Custom Model" || custom_model.value) {
modelProvider.classList.add("hidden");
modelSelect.classList.add("hidden");
document.getElementById("model3").classList.remove("hidden");
return;
}
modelProvider.innerHTML = '';
modelProvider.name = `model[${provider}]`;
if (!provider) {
modelProvider.classList.add("hidden");
modelSelect.classList.remove("hidden");
document.getElementById("model3").value = "";
document.getElementById("model3").classList.remove("hidden");
return;
}
const models = await api('models', provider);
if (models && models.length > 0) {
modelSelect.classList.add("hidden");
modelProvider.classList.remove("hidden");
if (!custom_model.value) {
custom_model.classList.add("hidden");
modelProvider.classList.remove("hidden");
}
let defaultIndex = 0;
models.forEach((model, i) => {
let option = document.createElement('option');
@@ -2423,11 +2494,13 @@ async function load_provider_models(provider=null) {
defaultIndex = i;
}
});
modelProvider.selectedIndex = defaultIndex;
let value = appStorage.getItem(modelProvider.name);
if (value) {
modelProvider.value = value;
}
modelProvider.selectedIndex = defaultIndex;
} else if (custom_model.value) {
modelSelect.classList.add("hidden");
} else {
modelProvider.classList.add("hidden");
modelSelect.classList.remove("hidden");
@@ -2439,6 +2512,12 @@ providerSelect.addEventListener("change", () => {
});
modelSelect.addEventListener("change", () => messageInput.focus());
modelProvider.addEventListener("change", () => messageInput.focus());
custom_model.addEventListener("change", () => {
if (!custom_model.value) {
load_provider_models();
}
messageInput.focus();
});
document.getElementById("pin").addEventListener("click", async () => {
const pin_container = document.getElementById("pin_container");
+1 -1
View File
@@ -276,7 +276,7 @@ class Backend_Api(Api):
response = iter_run_tools(ChatCompletion.create, **parameters)
if do_filter_markdown:
return Response(filter_markdown(response, do_filter_markdown), mimetype='text/plain')
return Response(filter_markdown("".join([str(chunk) for chunk in response]), do_filter_markdown), mimetype='text/plain')
def cast_str():
for chunk in response:
if not isinstance(chunk, Exception):
+20 -8
View File
@@ -155,16 +155,28 @@ def iter_run_tools(
yield chunk
continue
if "<think>" in chunk:
chunk = chunk.split("<think>", 1)
yield chunk[0]
yield Reasoning(None, "Is thinking...", is_thinking="<think>")
yield Reasoning(chunk[1])
if chunk != "<think>":
chunk = chunk.split("<think>", 1)
if len(chunk) > 0 and chunk[0]:
yield chunk[0]
yield Reasoning(None, "🤔 Is thinking...", is_thinking="<think>")
if chunk != "<think>":
if len(chunk) > 1 and chunk[1]:
yield Reasoning(chunk[1])
is_thinking = time.time()
if "</think>" in chunk:
chunk = chunk.split("</think>", 1)
yield Reasoning(chunk[0])
yield Reasoning(None, f"Finished in {round(time.time()-is_thinking, 2)} seconds", is_thinking="</think>")
yield chunk[1]
if chunk != "<think>":
chunk = chunk.split("</think>", 1)
if len(chunk) > 0 and chunk[0]:
yield Reasoning(chunk[0])
is_thinking = time.time() - is_thinking
if is_thinking > 1:
yield Reasoning(None, f"Thought for {is_thinking:.2f}s", is_thinking="</think>")
else:
yield Reasoning(None, f"Finished", is_thinking="</think>")
if chunk != "<think>":
if len(chunk) > 1 and chunk[1]:
yield chunk[1]
is_thinking = 0
elif is_thinking:
yield Reasoning(chunk)