Merge pull request #3418 from xtekky/copilot/add-pa-py-file-parser-executor

Refactor PA backend conversation endpoint for improved streaming and …
This commit is contained in:
H Lohaus
2026-04-06 00:39:51 +02:00
committed by GitHub
3 changed files with 81 additions and 91 deletions
+1 -90
View File
@@ -224,7 +224,7 @@ class Api:
return (
path.startswith("/v1")
or path.startswith("/api/")
or path.startswith("/pa/")
or (path.startswith("/pa/") and not demo)
or (demo and path == "/backend-api/v2/upload_cookies")
)
@@ -766,95 +766,6 @@ class Api:
logger.exception(e)
return ErrorResponse.from_exception(e, config, HTTP_500_INTERNAL_SERVER_ERROR)
@self.app.post("/pa/backend-api/v2/conversation", responses={
HTTP_200_OK: {},
HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
HTTP_422_UNPROCESSABLE_ENTITY: {"model": ErrorResponseModel},
HTTP_500_INTERNAL_SERVER_ERROR: {"model": ErrorResponseModel},
})
async def pa_backend_conversation(request: Request):
"""GUI-compatible streaming conversation endpoint for PA providers.
Accepts the same JSON body as ``/backend-api/v2/conversation`` and
streams Server-Sent Events in the same format used by the gpt4free
web interface (``{"type": "content", "content": "..."}`` etc.).
The ``provider`` field should contain the opaque PA provider ID
returned by ``GET /pa/providers``. When omitted the first available
PA provider is used.
"""
from g4f.mcp.pa_provider import get_pa_registry
try:
body = await request.json()
except Exception:
return ErrorResponse.from_message(
"Invalid JSON body", HTTP_422_UNPROCESSABLE_ENTITY
)
registry = get_pa_registry()
pid = body.get("provider")
if pid:
provider_cls = registry.get_provider_class(pid)
if provider_cls is None:
return ErrorResponse.from_message(
f"PA provider '{pid}' not found", HTTP_404_NOT_FOUND
)
else:
listing = registry.list_providers()
if not listing:
return ErrorResponse.from_message(
"No PA providers found in workspace", HTTP_404_NOT_FOUND
)
provider_cls = registry.get_provider_class(listing[0]["id"])
provider_label = getattr(provider_cls, "label", provider_cls.__name__)
messages = body.get("messages") or []
model = body.get("model") or getattr(provider_cls, "default_model", "") or ""
async def gen_backend_stream():
yield (
"data: "
+ json.dumps({"type": "provider", "provider": provider_label, "model": model})
+ "\n\n"
)
try:
response = self.client.chat.completions.create(
messages=messages,
model=model,
provider=provider_cls,
stream=True,
)
async for chunk in response:
if isinstance(chunk, BaseConversation):
continue
text = ""
if hasattr(chunk, "choices") and chunk.choices:
delta = chunk.choices[0].delta
text = getattr(delta, "content", "") or ""
if text:
yield (
"data: "
+ json.dumps({"type": "content", "content": text})
+ "\n\n"
)
except GeneratorExit:
pass
except Exception as e:
logger.exception(e)
yield (
"data: "
+ json.dumps({"type": "error", "error": f"{type(e).__name__}: {e}"})
+ "\n\n"
)
yield (
"data: "
+ json.dumps({"type": "finish", "finish": "stop"})
+ "\n\n"
)
return StreamingResponse(gen_backend_stream(), media_type="text/event-stream")
# ------------------------------------------------------------------ #
# PA workspace static file serving (HTML/CSS/JS/images for browser) #
# ------------------------------------------------------------------ #
+78
View File
@@ -37,6 +37,7 @@ try:
except ImportError:
has_crypto = False
from ...client import Client
from ...client.service import convert_to_provider
from ...providers.asyncio import to_sync_generator
from ...providers.response import FinishReason, AudioResponse, MediaResponse, Reasoning, HiddenResponse, JsonResponse
@@ -85,6 +86,7 @@ class Backend_Api(Api):
"""
self.app: Flask = app
self.chat_cache = {}
self.client = Client()
if has_crypto:
private_key_obj = get_session_key()
@@ -130,6 +132,82 @@ class Backend_Api(Api):
"user": request.headers.get("x-user", "error")
})
@app.route('/pa/backend-api/v2/conversation', methods=['POST'])
async def pa_backend_conversation():
"""GUI-compatible streaming conversation endpoint for PA providers.
Accepts the same JSON body as ``/backend-api/v2/conversation`` and
streams Server-Sent Events in the same format used by the gpt4free
web interface (``{"type": "content", "content": "..."}`` etc.).
The ``provider`` field should contain the opaque PA provider ID
returned by ``GET /pa/providers``. When omitted the first available
PA provider is used.
"""
from g4f.mcp.pa_provider import get_pa_registry
if app.demo and has_crypto:
secret = request.headers.get("x-secret", request.headers.get("x_secret"))
if not secret or not validate_secret(secret):
return jsonify({"error": {"message": "Invalid or missing secret"}}), 403
try:
body = {**request.json}
except Exception:
return jsonify({"error": {"message": "Invalid JSON body"}}), 422
registry = get_pa_registry()
pid = body.get("provider")
if pid:
provider_cls = registry.get_provider_class(pid)
if provider_cls is None:
return jsonify({"error": {"message": f"PA provider '{pid}' not found"}}), 404
else:
listing = registry.list_providers()
if not listing:
return jsonify({"error": {"message": "No PA providers found in workspace"}}), 404
provider_cls = registry.get_provider_class(listing[0]["id"])
provider_label = getattr(provider_cls, "label", provider_cls.__name__)
messages = body.get("messages") or []
model = body.get("model") or getattr(provider_cls, "default_model", "") or ""
def gen_backend_stream():
yield (
"data: "
+ json.dumps({"type": "provider", "provider": {"name": pid, "label": provider_label, "model": model}})
+ "\n\n"
)
try:
response = self.client.chat.completions.create(
messages=messages,
model=model,
provider=provider_cls,
stream=True,
)
for chunk in response:
if chunk.choices and chunk.choices[0].delta:
yield f"data: {json.dumps({'type': 'content', 'content': chunk.choices[0].delta.content})}\n\n"
except GeneratorExit:
pass
except Exception as e:
logger.exception(e)
yield (
"data: "
+ json.dumps({"type": "error", "error": f"{type(e).__name__}: {e}"})
+ "\n\n"
)
yield (
"data: "
+ json.dumps({"type": "finish", "finish": "stop"})
+ "\n\n"
)
return self.app.response_class(
safe_iter_generator(gen_backend_stream()),
mimetype='text/event-stream'
)
@app.route('/backend-api/v2/models', methods=['GET'])
@lru_cache(maxsize=1)
def jsonify_models():
+2 -1
View File
@@ -39,7 +39,8 @@ def create_or_read_keys() -> tuple[RSAPrivateKey, RSAPublicKey]:
return private_key, public_key
# Generate keys
private_key_obj = rsa.generate_private_key(public_exponent=65537, key_size=4096)
# Note: Using 1024 bits for the session key so the user can put it his secret (captcha)
private_key_obj = rsa.generate_private_key(public_exponent=65537, key_size=1024)
public_key_obj = private_key_obj.public_key()
# Serialize private key