Refactor provider method handling for improved clarity and consistency; update mocks and add SKILL.md for usage guidance

This commit is contained in:
hlohaus
2026-04-17 14:07:49 +02:00
parent 06df47f279
commit cc0ad7a9f8
9 changed files with 86 additions and 26 deletions
+48
View File
@@ -0,0 +1,48 @@
# SKILL.md
## Using gpt4free as an LLM Server for Bots (Clawbot/OpenClaw)
### Overview
This skill covers running gpt4free as a local LLM server with an OpenAI-compatible REST API, custom model routing (config.yaml), and integration with bots like Clawbot or OpenClaw.
### Best Practices
- Start the API server with: `python -m g4f --port 8080` (or use `g4f api --debug --port 8080`)
- Use the `/v1` endpoint for OpenAI-compatible requests (e.g., POST to `http://localhost:8080/v1/chat/completions`)
- Define custom model routes in `config.yaml` to aggregate/fallback across providers
- Place `config.yaml` in your cookies directory (e.g., `~/.g4f/cookies/config.yaml`)
- For Clawbot/OpenClaw, patch their config to point to your gpt4free server (see `patch-openclaw.py`)
- Test with: `g4f client "Hello" --model openclaw` or Python client
### Common Pitfalls
- Not starting the server before connecting bots
- Incorrect config.yaml path or syntax errors
- Missing required Python dependencies (install with `pip install -r requirements.txt`)
- Not exposing the correct port (default 8080)
- Forgetting to patch bot configs to use your local endpoint
### Workflow Steps
1. Install and set up gpt4free (see README)
2. Start the API server: `python -m g4f --port 8080`
3. (Optional) Create or edit `config.yaml` for custom model routing:
```yaml
models:
- name: "openclaw"
providers:
- provider: "GeminiCLI"
model: "gemini-3-flash-preview"
condition: "quota.models.gemini-3-flash-preview.remainingFraction > 0 and error_count < 3"
- provider: "Antigravity"
model: "gemini-3-flash"
- provider: "PollinationsAI"
model: "openai"
```
4. Patch your bot config (e.g., OpenClaw) to use `http://localhost:8080/v1` as the base URL (see `scripts/patch-openclaw.py`)
5. Start your bot and verify it connects to gpt4free
6. Monitor logs and test with the Python client or CLI
### References
- [README.md](../README.md)
- [docs/config-yaml-routing.md](../docs/config-yaml-routing.md)
- [scripts/patch-openclaw.py](../scripts/patch-openclaw.py)
- [scripts/setup-openclaw.sh](../scripts/setup-openclaw.sh)
- [g4f/client/__init__.py](../g4f/client/__init__.py)
+3 -3
View File
@@ -15,8 +15,8 @@ from .retry_provider import *
from .thinking import * from .thinking import *
from .web_search import * from .web_search import *
from .models import * from .models import *
from .mcp import * #from .mcp import *
from .tool_support_provider import * #from .tool_support_provider import *
from .config_provider import * #from .config_provider import *
unittest.main() unittest.main()
+8 -4
View File
@@ -5,6 +5,7 @@ import unittest
from g4f.errors import ModelNotFoundError from g4f.errors import ModelNotFoundError
from g4f.client import Client, AsyncClient, ChatCompletion, ChatCompletionChunk from g4f.client import Client, AsyncClient, ChatCompletion, ChatCompletionChunk
from g4f.client.service import get_model_and_provider from g4f.client.service import get_model_and_provider
from g4f.providers.types import BaseProvider
from g4f.Provider.Copilot import Copilot from g4f.Provider.Copilot import Copilot
from g4f.models import gpt_4o from g4f.models import gpt_4o
from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderMock from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderMock
@@ -117,25 +118,28 @@ class TestPassModel(unittest.TestCase):
def test_best_provider(self): def test_best_provider(self):
not_default_model = "gpt-4o" not_default_model = "gpt-4o"
model, provider = get_model_and_provider(not_default_model, None, False) model, provider = get_model_and_provider(not_default_model, None, False)
self.assertTrue(hasattr(provider, "create_completion")) self.assertIsInstance(model, str)
self.assertIsInstance(provider, (type, BaseProvider))
self.assertEqual(model, not_default_model) self.assertEqual(model, not_default_model)
def test_default_model(self): def test_default_model(self):
default_model = "" default_model = ""
model, provider = get_model_and_provider(default_model, None, False) model, provider = get_model_and_provider(default_model, None, False)
self.assertTrue(hasattr(provider, "create_completion")) self.assertIsInstance(model, str)
self.assertIsInstance(provider, (type, BaseProvider))
self.assertEqual(model, default_model) self.assertEqual(model, default_model)
def test_provider_as_model(self): def test_provider_as_model(self):
provider_as_model = Copilot.__name__ provider_as_model = Copilot.__name__
model, provider = get_model_and_provider(provider_as_model, None, False) model, provider = get_model_and_provider(provider_as_model, None, False)
self.assertTrue(hasattr(provider, "create_completion"))
self.assertIsInstance(model, str) self.assertIsInstance(model, str)
self.assertIsInstance(provider, (type, BaseProvider))
self.assertEqual(model, Copilot.default_model) self.assertEqual(model, Copilot.default_model)
def test_get_model(self): def test_get_model(self):
model, provider = get_model_and_provider(gpt_4o.name, None, False) model, provider = get_model_and_provider(gpt_4o.name, None, False)
self.assertTrue(hasattr(provider, "create_completion")) self.assertIsInstance(model, str)
self.assertIsInstance(provider, (type, BaseProvider))
self.assertEqual(model, gpt_4o.name) self.assertEqual(model, gpt_4o.name)
if __name__ == '__main__': if __name__ == '__main__':
+8
View File
@@ -4,6 +4,7 @@ from g4f.errors import MissingAuthError
class ProviderMock(AbstractProvider): class ProviderMock(AbstractProvider):
working = True working = True
use_stream_timeout = False
@classmethod @classmethod
def create_completion( def create_completion(
@@ -13,6 +14,7 @@ class ProviderMock(AbstractProvider):
class AsyncProviderMock(AsyncProvider): class AsyncProviderMock(AsyncProvider):
working = True working = True
use_stream_timeout = False
@classmethod @classmethod
async def create_async( async def create_async(
@@ -22,6 +24,7 @@ class AsyncProviderMock(AsyncProvider):
class AsyncGeneratorProviderMock(AsyncGeneratorProvider): class AsyncGeneratorProviderMock(AsyncGeneratorProvider):
working = True working = True
use_stream_timeout = False
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@@ -29,8 +32,10 @@ class AsyncGeneratorProviderMock(AsyncGeneratorProvider):
): ):
yield "Mock" yield "Mock"
class ModelProviderMock(AbstractProvider): class ModelProviderMock(AbstractProvider):
working = True working = True
use_stream_timeout = False # Added to fix unittest error
@classmethod @classmethod
def create_completion( def create_completion(
@@ -40,6 +45,7 @@ class ModelProviderMock(AbstractProvider):
class YieldProviderMock(AsyncGeneratorProvider): class YieldProviderMock(AsyncGeneratorProvider):
working = True working = True
use_stream_timeout = False
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@@ -50,6 +56,7 @@ class YieldProviderMock(AsyncGeneratorProvider):
class YieldImageResponseProviderMock(AsyncGeneratorProvider): class YieldImageResponseProviderMock(AsyncGeneratorProvider):
working = True working = True
use_stream_timeout = False
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@@ -58,6 +65,7 @@ class YieldImageResponseProviderMock(AsyncGeneratorProvider):
yield ImageResponse(prompt, "") yield ImageResponse(prompt, "")
class MissingAuthProviderMock(AbstractProvider): class MissingAuthProviderMock(AbstractProvider):
use_stream_timeout = False
working = True working = True
@classmethod @classmethod
-1
View File
@@ -73,7 +73,6 @@ class ChatCompletion:
) )
method = get_provider_method(provider) method = get_provider_method(provider)
result = method(model, messages, stream=stream, **kwargs) result = method(model, messages, stream=stream, **kwargs)
result = to_sync_generator(result)
return result if stream or ignore_stream else concat_chunks(result) return result if stream or ignore_stream else concat_chunks(result)
@staticmethod @staticmethod
+9 -3
View File
@@ -89,7 +89,9 @@ def get_async_provider_method(provider: type) -> Optional[callable]:
if hasattr(provider, "create_async_generator"): if hasattr(provider, "create_async_generator"):
return provider.create_async_generator return provider.create_async_generator
if hasattr(provider, "create_async"): if hasattr(provider, "create_async"):
return provider.create_async async def wrapper(*args, **kwargs):
yield await provider.create_async(*args, **kwargs)
return wrapper
if hasattr(provider, "create_completion"): if hasattr(provider, "create_completion"):
async def wrapper(*args, **kwargs): async def wrapper(*args, **kwargs):
for chunk in provider.create_completion(*args, **kwargs): for chunk in provider.create_completion(*args, **kwargs):
@@ -102,9 +104,13 @@ def get_provider_method(provider: type) -> Optional[callable]:
if hasattr(provider, "create_completion"): if hasattr(provider, "create_completion"):
return provider.create_completion return provider.create_completion
if hasattr(provider, "create_async_generator"): if hasattr(provider, "create_async_generator"):
return provider.create_async_generator def wrapper(*args, **kwargs):
return to_sync_generator(provider.create_async_generator(*args, **kwargs), stream=provider.supports_stream)
return wrapper
if hasattr(provider, "create_async"): if hasattr(provider, "create_async"):
return provider.create_async def wrapper(*args, **kwargs):
yield asyncio.run(provider.create_async(*args, **kwargs))
return wrapper
raise NotImplementedError(f"{provider.__name__} does not implement a create method") raise NotImplementedError(f"{provider.__name__} does not implement a create method")
class AbstractProvider(BaseProvider): class AbstractProvider(BaseProvider):
+3 -6
View File
@@ -109,7 +109,7 @@ class RotatedProvider(BaseRetryProvider):
method = get_async_provider_method(provider) method = get_async_provider_method(provider)
response = method(model=alias, messages=messages, **extra_body) response = method(model=alias, messages=messages, **extra_body)
started = False started = False
async for chunk in to_async_iterator(response): async for chunk in response:
if isinstance(chunk, JsonConversation): if isinstance(chunk, JsonConversation):
if conversation is None: conversation = JsonConversation() if conversation is None: conversation = JsonConversation()
setattr(conversation, provider.__name__, chunk.get_dict()) setattr(conversation, provider.__name__, chunk.get_dict())
@@ -168,7 +168,7 @@ class IterListProvider(BaseRetryProvider):
try: try:
method = get_async_provider_method(provider) method = get_async_provider_method(provider)
response = method(model=alias, messages=messages, **extra_body) response = method(model=alias, messages=messages, **extra_body)
async for chunk in to_async_iterator(response): async for chunk in response:
if isinstance(chunk, JsonConversation): if isinstance(chunk, JsonConversation):
if conversation is None: if conversation is None:
conversation = JsonConversation() conversation = JsonConversation()
@@ -231,7 +231,7 @@ class RetryProvider(IterListProvider):
debug.log(f"Using {provider.__name__} provider (attempt {attempt + 1})") debug.log(f"Using {provider.__name__} provider (attempt {attempt + 1})")
method = get_async_provider_method(provider) method = get_async_provider_method(provider)
response = method(model=model, messages=messages, **kwargs) response = method(model=model, messages=messages, **kwargs)
async for chunk in to_async_iterator(response): async for chunk in response:
yield chunk yield chunk
if is_content(chunk): if is_content(chunk):
started = True started = True
@@ -255,9 +255,6 @@ def raise_exceptions(exceptions: dict) -> None:
RetryNoProviderError: If no provider is found. RetryNoProviderError: If no provider is found.
""" """
if exceptions: if exceptions:
for provider_name, e in exceptions.items():
if isinstance(e, (MissingAuthError, NoValidHarFileError)):
raise e
if len(exceptions) == 1: if len(exceptions) == 1:
raise list(exceptions.values())[0] raise list(exceptions.values())[0]
raise RetryProviderError("RetryProvider failed:\n" + "\n".join([ raise RetryProviderError("RetryProvider failed:\n" + "\n".join([
+4 -4
View File
@@ -75,14 +75,14 @@ class ToolSupportProvider(AsyncGeneratorProvider):
chunks = [] chunks = []
has_usage = False has_usage = False
method = get_async_provider_method(provider) method = get_async_provider_method(provider)
async for chunk in to_async_iterator(method( async for chunk in method(
model, model=model,
messages, messages=messages,
stream=stream, stream=stream,
media=media, media=media,
response_format=response_format, response_format=response_format,
**kwargs, **kwargs,
)): ):
if isinstance(chunk, str): if isinstance(chunk, str):
chunks.append(chunk) chunks.append(chunk)
elif isinstance(chunk, Usage): elif isinstance(chunk, Usage):
+3 -5
View File
@@ -298,9 +298,7 @@ async def async_iter_run_tools(
# Generate response # Generate response
method = get_async_provider_method(provider) method = get_async_provider_method(provider)
response = to_async_iterator( response = method(model=model, messages=messages, **kwargs)
method(model=model, messages=messages, **kwargs)
)
timeout = kwargs.get("stream_timeout") if provider.use_stream_timeout else kwargs.get("timeout") timeout = kwargs.get("stream_timeout") if provider.use_stream_timeout else kwargs.get("timeout")
response = wait_for(response, timeout=timeout) if stream else response response = wait_for(response, timeout=timeout) if stream else response
@@ -476,9 +474,9 @@ def iter_run_tools(
completion_tokens = 0 completion_tokens = 0
usage = None usage = None
method = get_provider_method(provider) method = get_provider_method(provider)
for chunk in to_sync_generator(method( for chunk in method(
model=model, messages=messages, provider=provider, **kwargs model=model, messages=messages, provider=provider, **kwargs
)): ):
if isinstance(chunk, FinishReason): if isinstance(chunk, FinishReason):
if sources is not None: if sources is not None:
yield sources yield sources