mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-03 13:34:36 +00:00
Add MCP server implementation with web search, scraping, and image generation tools
Co-authored-by: hlohaus <983577+hlohaus@users.noreply.github.com>
This commit is contained in:
@@ -15,5 +15,6 @@ from .retry_provider import *
|
||||
from .thinking import *
|
||||
from .web_search import *
|
||||
from .models import *
|
||||
from .mcp import *
|
||||
|
||||
unittest.main()
|
||||
166
etc/unittest/mcp.py
Normal file
166
etc/unittest/mcp.py
Normal file
@@ -0,0 +1,166 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import unittest
|
||||
|
||||
from g4f.mcp.server import MCPServer, MCPRequest
|
||||
from g4f.mcp.tools import WebSearchTool, WebScrapeTool, ImageGenerationTool
|
||||
|
||||
try:
|
||||
from ddgs import DDGS, DDGSError
|
||||
from bs4 import BeautifulSoup
|
||||
has_requirements = True
|
||||
except ImportError:
|
||||
has_requirements = False
|
||||
|
||||
|
||||
class TestMCPServer(unittest.IsolatedAsyncioTestCase):
|
||||
"""Test cases for MCP server"""
|
||||
|
||||
async def test_server_initialization(self):
|
||||
"""Test that server initializes correctly"""
|
||||
server = MCPServer()
|
||||
self.assertIsNotNone(server)
|
||||
self.assertEqual(server.server_info["name"], "gpt4free-mcp-server")
|
||||
self.assertEqual(len(server.tools), 3)
|
||||
self.assertIn('web_search', server.tools)
|
||||
self.assertIn('web_scrape', server.tools)
|
||||
self.assertIn('image_generation', server.tools)
|
||||
|
||||
async def test_initialize_request(self):
|
||||
"""Test initialize method"""
|
||||
server = MCPServer()
|
||||
request = MCPRequest(
|
||||
jsonrpc="2.0",
|
||||
id=1,
|
||||
method="initialize",
|
||||
params={}
|
||||
)
|
||||
response = await server.handle_request(request)
|
||||
self.assertEqual(response.jsonrpc, "2.0")
|
||||
self.assertEqual(response.id, 1)
|
||||
self.assertIsNotNone(response.result)
|
||||
self.assertEqual(response.result["protocolVersion"], "2024-11-05")
|
||||
self.assertIn("serverInfo", response.result)
|
||||
|
||||
async def test_tools_list(self):
|
||||
"""Test tools/list method"""
|
||||
server = MCPServer()
|
||||
request = MCPRequest(
|
||||
jsonrpc="2.0",
|
||||
id=2,
|
||||
method="tools/list",
|
||||
params={}
|
||||
)
|
||||
response = await server.handle_request(request)
|
||||
self.assertEqual(response.jsonrpc, "2.0")
|
||||
self.assertEqual(response.id, 2)
|
||||
self.assertIsNotNone(response.result)
|
||||
self.assertIn("tools", response.result)
|
||||
self.assertEqual(len(response.result["tools"]), 3)
|
||||
|
||||
# Check tool structure
|
||||
tool_names = [tool["name"] for tool in response.result["tools"]]
|
||||
self.assertIn("web_search", tool_names)
|
||||
self.assertIn("web_scrape", tool_names)
|
||||
self.assertIn("image_generation", tool_names)
|
||||
|
||||
async def test_ping(self):
|
||||
"""Test ping method"""
|
||||
server = MCPServer()
|
||||
request = MCPRequest(
|
||||
jsonrpc="2.0",
|
||||
id=3,
|
||||
method="ping",
|
||||
params={}
|
||||
)
|
||||
response = await server.handle_request(request)
|
||||
self.assertEqual(response.jsonrpc, "2.0")
|
||||
self.assertEqual(response.id, 3)
|
||||
self.assertIsNotNone(response.result)
|
||||
|
||||
async def test_invalid_method(self):
|
||||
"""Test invalid method returns error"""
|
||||
server = MCPServer()
|
||||
request = MCPRequest(
|
||||
jsonrpc="2.0",
|
||||
id=4,
|
||||
method="invalid_method",
|
||||
params={}
|
||||
)
|
||||
response = await server.handle_request(request)
|
||||
self.assertEqual(response.jsonrpc, "2.0")
|
||||
self.assertEqual(response.id, 4)
|
||||
self.assertIsNotNone(response.error)
|
||||
self.assertEqual(response.error["code"], -32601)
|
||||
|
||||
async def test_tool_call_invalid_tool(self):
|
||||
"""Test calling non-existent tool"""
|
||||
server = MCPServer()
|
||||
request = MCPRequest(
|
||||
jsonrpc="2.0",
|
||||
id=5,
|
||||
method="tools/call",
|
||||
params={
|
||||
"name": "nonexistent_tool",
|
||||
"arguments": {}
|
||||
}
|
||||
)
|
||||
response = await server.handle_request(request)
|
||||
self.assertEqual(response.jsonrpc, "2.0")
|
||||
self.assertEqual(response.id, 5)
|
||||
self.assertIsNotNone(response.error)
|
||||
self.assertEqual(response.error["code"], -32601)
|
||||
|
||||
|
||||
class TestMCPTools(unittest.IsolatedAsyncioTestCase):
|
||||
"""Test cases for MCP tools"""
|
||||
|
||||
def setUp(self) -> None:
|
||||
if not has_requirements:
|
||||
self.skipTest('MCP tools requirements not installed')
|
||||
|
||||
async def test_web_search_tool_schema(self):
|
||||
"""Test WebSearchTool schema"""
|
||||
tool = WebSearchTool()
|
||||
self.assertIsNotNone(tool.description)
|
||||
self.assertIsNotNone(tool.input_schema)
|
||||
self.assertEqual(tool.input_schema["type"], "object")
|
||||
self.assertIn("query", tool.input_schema["properties"])
|
||||
self.assertIn("query", tool.input_schema["required"])
|
||||
|
||||
async def test_web_scrape_tool_schema(self):
|
||||
"""Test WebScrapeTool schema"""
|
||||
tool = WebScrapeTool()
|
||||
self.assertIsNotNone(tool.description)
|
||||
self.assertIsNotNone(tool.input_schema)
|
||||
self.assertEqual(tool.input_schema["type"], "object")
|
||||
self.assertIn("url", tool.input_schema["properties"])
|
||||
self.assertIn("url", tool.input_schema["required"])
|
||||
|
||||
async def test_image_generation_tool_schema(self):
|
||||
"""Test ImageGenerationTool schema"""
|
||||
tool = ImageGenerationTool()
|
||||
self.assertIsNotNone(tool.description)
|
||||
self.assertIsNotNone(tool.input_schema)
|
||||
self.assertEqual(tool.input_schema["type"], "object")
|
||||
self.assertIn("prompt", tool.input_schema["properties"])
|
||||
self.assertIn("prompt", tool.input_schema["required"])
|
||||
|
||||
async def test_web_search_missing_query(self):
|
||||
"""Test web search with missing query parameter"""
|
||||
tool = WebSearchTool()
|
||||
result = await tool.execute({})
|
||||
self.assertIn("error", result)
|
||||
|
||||
async def test_web_scrape_missing_url(self):
|
||||
"""Test web scrape with missing url parameter"""
|
||||
tool = WebScrapeTool()
|
||||
result = await tool.execute({})
|
||||
self.assertIn("error", result)
|
||||
|
||||
async def test_image_generation_missing_prompt(self):
|
||||
"""Test image generation with missing prompt parameter"""
|
||||
tool = ImageGenerationTool()
|
||||
result = await tool.execute({})
|
||||
self.assertIn("error", result)
|
||||
@@ -78,12 +78,22 @@ def run_api_args(args):
|
||||
log_config=args.log_config,
|
||||
)
|
||||
|
||||
def get_mcp_parser():
|
||||
mcp_parser = ArgumentParser(description="Run the MCP (Model Context Protocol) server")
|
||||
mcp_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.")
|
||||
return mcp_parser
|
||||
|
||||
def run_mcp_args(args):
|
||||
from ..mcp.server import main as mcp_main
|
||||
mcp_main()
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Run gpt4free", exit_on_error=False)
|
||||
subparsers = parser.add_subparsers(dest="mode", help="Mode to run the g4f in.")
|
||||
subparsers.add_parser("api", parents=[get_api_parser()], add_help=False)
|
||||
subparsers.add_parser("gui", parents=[gui_parser()], add_help=False)
|
||||
subparsers.add_parser("client", parents=[get_parser()], add_help=False)
|
||||
subparsers.add_parser("mcp", parents=[get_mcp_parser()], add_help=False)
|
||||
|
||||
try:
|
||||
args = parser.parse_args()
|
||||
@@ -93,8 +103,10 @@ def main():
|
||||
run_gui_args(args)
|
||||
elif args.mode == "client":
|
||||
run_client_args(args)
|
||||
elif args.mode == "mcp":
|
||||
run_mcp_args(args)
|
||||
else:
|
||||
raise argparse.ArgumentError(None, "No valid mode specified. Use 'api', 'gui', or 'client'.")
|
||||
raise argparse.ArgumentError(None, "No valid mode specified. Use 'api', 'gui', 'client', or 'mcp'.")
|
||||
except argparse.ArgumentError:
|
||||
try:
|
||||
run_client_args(get_parser(exit_on_error=False).parse_args(), exit_on_error=False)
|
||||
|
||||
283
g4f/mcp/README.md
Normal file
283
g4f/mcp/README.md
Normal file
@@ -0,0 +1,283 @@
|
||||
# gpt4free MCP Server
|
||||
|
||||
A Model Context Protocol (MCP) server implementation for gpt4free that provides AI assistants with access to web search, scraping, and image generation capabilities.
|
||||
|
||||
## Overview
|
||||
|
||||
The gpt4free MCP server exposes three main tools:
|
||||
|
||||
1. **Web Search** - Search the web using DuckDuckGo
|
||||
2. **Web Scraping** - Extract and clean text content from web pages
|
||||
3. **Image Generation** - Generate images from text prompts using various AI providers
|
||||
|
||||
## Installation
|
||||
|
||||
The MCP server is included with gpt4free. No additional installation is required beyond the base gpt4free package.
|
||||
|
||||
```bash
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Running the MCP Server
|
||||
|
||||
Start the MCP server using:
|
||||
|
||||
```bash
|
||||
python -m g4f.mcp
|
||||
```
|
||||
|
||||
Or using the g4f command:
|
||||
|
||||
```bash
|
||||
g4f mcp
|
||||
```
|
||||
|
||||
The server communicates over stdin/stdout using JSON-RPC 2.0 protocol.
|
||||
|
||||
### Configuration for AI Assistants
|
||||
|
||||
To use this MCP server with an AI assistant like Claude Desktop, add the following to your MCP configuration:
|
||||
|
||||
**For Claude Desktop** (`claude_desktop_config.json`):
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"gpt4free": {
|
||||
"command": "python",
|
||||
"args": ["-m", "g4f.mcp"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**For VS Code with Cline**:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"gpt4free": {
|
||||
"command": "python",
|
||||
"args": ["-m", "g4f.mcp"],
|
||||
"disabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Available Tools
|
||||
|
||||
### web_search
|
||||
|
||||
Search the web for information.
|
||||
|
||||
**Parameters:**
|
||||
- `query` (string, required): The search query
|
||||
- `max_results` (integer, optional): Maximum number of results (default: 5)
|
||||
|
||||
**Example:**
|
||||
```json
|
||||
{
|
||||
"name": "web_search",
|
||||
"arguments": {
|
||||
"query": "latest AI developments 2024",
|
||||
"max_results": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### web_scrape
|
||||
|
||||
Scrape and extract text content from a web page.
|
||||
|
||||
**Parameters:**
|
||||
- `url` (string, required): The URL to scrape
|
||||
- `max_words` (integer, optional): Maximum words to extract (default: 1000)
|
||||
|
||||
**Example:**
|
||||
```json
|
||||
{
|
||||
"name": "web_scrape",
|
||||
"arguments": {
|
||||
"url": "https://example.com/article",
|
||||
"max_words": 1000
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### image_generation
|
||||
|
||||
Generate images from text prompts.
|
||||
|
||||
**Parameters:**
|
||||
- `prompt` (string, required): Description of the image to generate
|
||||
- `model` (string, optional): Image model to use (default: "flux")
|
||||
- `width` (integer, optional): Image width in pixels (default: 1024)
|
||||
- `height` (integer, optional): Image height in pixels (default: 1024)
|
||||
|
||||
**Example:**
|
||||
```json
|
||||
{
|
||||
"name": "image_generation",
|
||||
"arguments": {
|
||||
"prompt": "A serene mountain landscape at sunset",
|
||||
"width": 1024,
|
||||
"height": 1024
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Protocol Details
|
||||
|
||||
The MCP server implements the Model Context Protocol using JSON-RPC 2.0 over stdio transport.
|
||||
|
||||
### Supported Methods
|
||||
|
||||
- `initialize` - Initialize connection with the server
|
||||
- `tools/list` - List all available tools
|
||||
- `tools/call` - Execute a tool with given arguments
|
||||
- `ping` - Health check
|
||||
|
||||
### Example Request/Response
|
||||
|
||||
**Request:**
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "tools/call",
|
||||
"params": {
|
||||
"name": "web_search",
|
||||
"arguments": {
|
||||
"query": "Python programming tutorials",
|
||||
"max_results": 3
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": {
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "{\"query\": \"Python programming tutorials\", \"results\": [...], \"count\": 3}"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
The MCP server requires the following dependencies (included in gpt4free):
|
||||
|
||||
- `aiohttp` - For async HTTP requests
|
||||
- `beautifulsoup4` - For web scraping
|
||||
- `ddgs` - For web search
|
||||
|
||||
These are automatically installed with:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The server returns standard JSON-RPC error responses:
|
||||
|
||||
- `-32601`: Method not found
|
||||
- `-32602`: Invalid parameters
|
||||
- `-32603`: Internal error
|
||||
|
||||
Errors specific to tools are returned in the result object with an `error` field.
|
||||
|
||||
## Development
|
||||
|
||||
### Project Structure
|
||||
|
||||
```
|
||||
g4f/mcp/
|
||||
├── __init__.py # Package initialization
|
||||
├── __main__.py # CLI entry point
|
||||
├── server.py # MCP server implementation
|
||||
├── tools.py # Tool implementations
|
||||
└── README.md # This file
|
||||
```
|
||||
|
||||
### Adding New Tools
|
||||
|
||||
To add a new tool:
|
||||
|
||||
1. Create a new class inheriting from `MCPTool` in `tools.py`
|
||||
2. Implement the required properties and methods
|
||||
3. Register the tool in `MCPServer.__init__()` in `server.py`
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
class MyNewTool(MCPTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Description of what the tool does"
|
||||
|
||||
@property
|
||||
def input_schema(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"param1": {
|
||||
"type": "string",
|
||||
"description": "Parameter description"
|
||||
}
|
||||
},
|
||||
"required": ["param1"]
|
||||
}
|
||||
|
||||
async def execute(self, arguments: Dict[str, Any]) -> Any:
|
||||
# Implementation
|
||||
pass
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Server Won't Start
|
||||
|
||||
Make sure all dependencies are installed:
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Tools Return Errors
|
||||
|
||||
Check that:
|
||||
- Network connectivity is available for web search and scraping
|
||||
- URLs are valid and accessible
|
||||
- Image generation providers are not rate-limited
|
||||
|
||||
### Debug Mode
|
||||
|
||||
The server writes diagnostic information to stderr. To see debug output:
|
||||
```bash
|
||||
python -m g4f.mcp 2> debug.log
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This MCP server is part of the gpt4free project and is licensed under the GNU General Public License v3.0.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome! Please see the main gpt4free repository for contribution guidelines.
|
||||
|
||||
## Related Links
|
||||
|
||||
- [gpt4free Repository](https://github.com/xtekky/gpt4free)
|
||||
- [Model Context Protocol Specification](https://modelcontextprotocol.io/)
|
||||
- [MCP Documentation](https://modelcontextprotocol.io/docs)
|
||||
13
g4f/mcp/__init__.py
Normal file
13
g4f/mcp/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
"""MCP (Model Context Protocol) Server for gpt4free
|
||||
|
||||
This module provides an MCP server implementation that exposes gpt4free capabilities
|
||||
through the Model Context Protocol standard, allowing AI assistants to access:
|
||||
- Web search functionality
|
||||
- Web scraping capabilities
|
||||
- Image generation using various providers
|
||||
"""
|
||||
|
||||
from .server import MCPServer
|
||||
from .tools import WebSearchTool, WebScrapeTool, ImageGenerationTool
|
||||
|
||||
__all__ = ['MCPServer', 'WebSearchTool', 'WebScrapeTool', 'ImageGenerationTool']
|
||||
9
g4f/mcp/__main__.py
Normal file
9
g4f/mcp/__main__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""Main entry point for gpt4free MCP server
|
||||
|
||||
This module provides the main entry point for running the MCP server.
|
||||
"""
|
||||
|
||||
from .server import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
9
g4f/mcp/claude_desktop_config.example.json
Normal file
9
g4f/mcp/claude_desktop_config.example.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"gpt4free": {
|
||||
"command": "python",
|
||||
"args": ["-m", "g4f.mcp"],
|
||||
"description": "gpt4free MCP server providing web search, scraping, and image generation"
|
||||
}
|
||||
}
|
||||
}
|
||||
202
g4f/mcp/server.py
Normal file
202
g4f/mcp/server.py
Normal file
@@ -0,0 +1,202 @@
|
||||
"""MCP Server implementation using stdio transport
|
||||
|
||||
This module implements a Model Context Protocol (MCP) server that communicates
|
||||
over standard input/output using JSON-RPC 2.0. The server exposes tools for:
|
||||
- Web search
|
||||
- Web scraping
|
||||
- Image generation
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import json
|
||||
import asyncio
|
||||
from typing import Any, Dict, List, Optional
|
||||
from dataclasses import dataclass, asdict
|
||||
|
||||
from .tools import WebSearchTool, WebScrapeTool, ImageGenerationTool
|
||||
|
||||
|
||||
@dataclass
|
||||
class MCPRequest:
|
||||
"""MCP request following JSON-RPC 2.0 format"""
|
||||
jsonrpc: str = "2.0"
|
||||
id: Optional[int | str] = None
|
||||
method: Optional[str] = None
|
||||
params: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MCPResponse:
|
||||
"""MCP response following JSON-RPC 2.0 format"""
|
||||
jsonrpc: str = "2.0"
|
||||
id: Optional[int | str] = None
|
||||
result: Optional[Any] = None
|
||||
error: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class MCPServer:
|
||||
"""Model Context Protocol server for gpt4free
|
||||
|
||||
This server exposes gpt4free capabilities through the MCP standard,
|
||||
allowing AI assistants to utilize web search, scraping, and image generation.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize MCP server with available tools"""
|
||||
self.tools = {
|
||||
'web_search': WebSearchTool(),
|
||||
'web_scrape': WebScrapeTool(),
|
||||
'image_generation': ImageGenerationTool(),
|
||||
}
|
||||
self.server_info = {
|
||||
"name": "gpt4free-mcp-server",
|
||||
"version": "1.0.0",
|
||||
"description": "MCP server providing web search, scraping, and image generation capabilities"
|
||||
}
|
||||
|
||||
def get_tool_list(self) -> List[Dict[str, Any]]:
|
||||
"""Get list of available tools with their schemas"""
|
||||
tool_list = []
|
||||
for name, tool in self.tools.items():
|
||||
tool_list.append({
|
||||
"name": name,
|
||||
"description": tool.description,
|
||||
"inputSchema": tool.input_schema
|
||||
})
|
||||
return tool_list
|
||||
|
||||
async def handle_request(self, request: MCPRequest) -> MCPResponse:
|
||||
"""Handle incoming MCP request"""
|
||||
try:
|
||||
method = request.method
|
||||
params = request.params or {}
|
||||
|
||||
# Handle MCP protocol methods
|
||||
if method == "initialize":
|
||||
result = {
|
||||
"protocolVersion": "2024-11-05",
|
||||
"serverInfo": self.server_info,
|
||||
"capabilities": {
|
||||
"tools": {}
|
||||
}
|
||||
}
|
||||
return MCPResponse(jsonrpc="2.0", id=request.id, result=result)
|
||||
|
||||
elif method == "tools/list":
|
||||
result = {
|
||||
"tools": self.get_tool_list()
|
||||
}
|
||||
return MCPResponse(jsonrpc="2.0", id=request.id, result=result)
|
||||
|
||||
elif method == "tools/call":
|
||||
tool_name = params.get("name")
|
||||
tool_arguments = params.get("arguments", {})
|
||||
|
||||
if tool_name not in self.tools:
|
||||
return MCPResponse(
|
||||
jsonrpc="2.0",
|
||||
id=request.id,
|
||||
error={
|
||||
"code": -32601,
|
||||
"message": f"Tool not found: {tool_name}"
|
||||
}
|
||||
)
|
||||
|
||||
tool = self.tools[tool_name]
|
||||
result = await tool.execute(tool_arguments)
|
||||
|
||||
return MCPResponse(
|
||||
jsonrpc="2.0",
|
||||
id=request.id,
|
||||
result={
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": json.dumps(result, indent=2)
|
||||
}
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
elif method == "ping":
|
||||
return MCPResponse(jsonrpc="2.0", id=request.id, result={})
|
||||
|
||||
else:
|
||||
return MCPResponse(
|
||||
jsonrpc="2.0",
|
||||
id=request.id,
|
||||
error={
|
||||
"code": -32601,
|
||||
"message": f"Method not found: {method}"
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return MCPResponse(
|
||||
jsonrpc="2.0",
|
||||
id=request.id,
|
||||
error={
|
||||
"code": -32603,
|
||||
"message": f"Internal error: {str(e)}"
|
||||
}
|
||||
)
|
||||
|
||||
async def run(self):
|
||||
"""Run the MCP server with stdio transport"""
|
||||
# Write server info to stderr for debugging
|
||||
sys.stderr.write(f"Starting {self.server_info['name']} v{self.server_info['version']}\n")
|
||||
sys.stderr.flush()
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Read line from stdin
|
||||
line = await asyncio.get_event_loop().run_in_executor(
|
||||
None, sys.stdin.readline
|
||||
)
|
||||
|
||||
if not line:
|
||||
break
|
||||
|
||||
# Parse JSON-RPC request
|
||||
request_data = json.loads(line)
|
||||
request = MCPRequest(
|
||||
jsonrpc=request_data.get("jsonrpc", "2.0"),
|
||||
id=request_data.get("id"),
|
||||
method=request_data.get("method"),
|
||||
params=request_data.get("params")
|
||||
)
|
||||
|
||||
# Handle request
|
||||
response = await self.handle_request(request)
|
||||
|
||||
# Write response to stdout
|
||||
response_dict = {
|
||||
"jsonrpc": response.jsonrpc,
|
||||
"id": response.id
|
||||
}
|
||||
if response.result is not None:
|
||||
response_dict["result"] = response.result
|
||||
if response.error is not None:
|
||||
response_dict["error"] = response.error
|
||||
|
||||
sys.stdout.write(json.dumps(response_dict) + "\n")
|
||||
sys.stdout.flush()
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
sys.stderr.write(f"JSON decode error: {e}\n")
|
||||
sys.stderr.flush()
|
||||
except Exception as e:
|
||||
sys.stderr.write(f"Error: {e}\n")
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point for MCP server"""
|
||||
server = MCPServer()
|
||||
asyncio.run(server.run())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
264
g4f/mcp/tools.py
Normal file
264
g4f/mcp/tools.py
Normal file
@@ -0,0 +1,264 @@
|
||||
"""MCP Tools for gpt4free
|
||||
|
||||
This module provides MCP tool implementations that wrap gpt4free capabilities:
|
||||
- WebSearchTool: Web search using ddg search
|
||||
- WebScrapeTool: Web page scraping and content extraction
|
||||
- ImageGenerationTool: Image generation using various AI providers
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import Any, Dict
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class MCPTool(ABC):
|
||||
"""Base class for MCP tools"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def description(self) -> str:
|
||||
"""Tool description"""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def input_schema(self) -> Dict[str, Any]:
|
||||
"""JSON schema for tool input parameters"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def execute(self, arguments: Dict[str, Any]) -> Any:
|
||||
"""Execute the tool with given arguments"""
|
||||
pass
|
||||
|
||||
|
||||
class WebSearchTool(MCPTool):
|
||||
"""Web search tool using gpt4free's search capabilities"""
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Search the web for information using DuckDuckGo. Returns search results with titles, URLs, and snippets."
|
||||
|
||||
@property
|
||||
def input_schema(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query to execute"
|
||||
},
|
||||
"max_results": {
|
||||
"type": "integer",
|
||||
"description": "Maximum number of results to return (default: 5)",
|
||||
"default": 5
|
||||
}
|
||||
},
|
||||
"required": ["query"]
|
||||
}
|
||||
|
||||
async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute web search"""
|
||||
from ..tools.web_search import do_search
|
||||
|
||||
query = arguments.get("query", "")
|
||||
max_results = arguments.get("max_results", 5)
|
||||
|
||||
if not query:
|
||||
return {
|
||||
"error": "Query parameter is required"
|
||||
}
|
||||
|
||||
try:
|
||||
# Perform search
|
||||
result, sources = await do_search(
|
||||
prompt=query,
|
||||
query=query,
|
||||
instructions=""
|
||||
)
|
||||
|
||||
# Format results
|
||||
search_results = []
|
||||
if sources:
|
||||
for i, source in enumerate(sources[:max_results]):
|
||||
search_results.append({
|
||||
"title": source.get("title", ""),
|
||||
"url": source.get("url", ""),
|
||||
"snippet": source.get("snippet", "")
|
||||
})
|
||||
|
||||
return {
|
||||
"query": query,
|
||||
"results": search_results,
|
||||
"count": len(search_results)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"error": f"Search failed: {str(e)}"
|
||||
}
|
||||
|
||||
|
||||
class WebScrapeTool(MCPTool):
|
||||
"""Web scraping tool using gpt4free's scraping capabilities"""
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Scrape and extract text content from a web page URL. Returns cleaned text content with optional word limit."
|
||||
|
||||
@property
|
||||
def input_schema(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The URL of the web page to scrape"
|
||||
},
|
||||
"max_words": {
|
||||
"type": "integer",
|
||||
"description": "Maximum number of words to extract (default: 1000)",
|
||||
"default": 1000
|
||||
}
|
||||
},
|
||||
"required": ["url"]
|
||||
}
|
||||
|
||||
async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute web scraping"""
|
||||
from ..tools.fetch_and_scrape import fetch_and_scrape
|
||||
from aiohttp import ClientSession
|
||||
|
||||
url = arguments.get("url", "")
|
||||
max_words = arguments.get("max_words", 1000)
|
||||
|
||||
if not url:
|
||||
return {
|
||||
"error": "URL parameter is required"
|
||||
}
|
||||
|
||||
try:
|
||||
# Scrape the URL
|
||||
async with ClientSession() as session:
|
||||
content = await fetch_and_scrape(
|
||||
session=session,
|
||||
url=url,
|
||||
max_words=max_words,
|
||||
add_source=True
|
||||
)
|
||||
|
||||
if not content:
|
||||
return {
|
||||
"error": "Failed to scrape content from URL"
|
||||
}
|
||||
|
||||
return {
|
||||
"url": url,
|
||||
"content": content,
|
||||
"word_count": len(content.split())
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"error": f"Scraping failed: {str(e)}"
|
||||
}
|
||||
|
||||
|
||||
class ImageGenerationTool(MCPTool):
|
||||
"""Image generation tool using gpt4free's image generation capabilities"""
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Generate images from text prompts using AI image generation providers. Returns base64-encoded image data."
|
||||
|
||||
@property
|
||||
def input_schema(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "The text prompt describing the image to generate"
|
||||
},
|
||||
"model": {
|
||||
"type": "string",
|
||||
"description": "The image generation model to use (default: flux)",
|
||||
"default": "flux"
|
||||
},
|
||||
"width": {
|
||||
"type": "integer",
|
||||
"description": "Image width in pixels (default: 1024)",
|
||||
"default": 1024
|
||||
},
|
||||
"height": {
|
||||
"type": "integer",
|
||||
"description": "Image height in pixels (default: 1024)",
|
||||
"default": 1024
|
||||
}
|
||||
},
|
||||
"required": ["prompt"]
|
||||
}
|
||||
|
||||
async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute image generation"""
|
||||
from ..client import AsyncClient
|
||||
from ..image import to_data_uri
|
||||
import base64
|
||||
|
||||
prompt = arguments.get("prompt", "")
|
||||
model = arguments.get("model", "flux")
|
||||
width = arguments.get("width", 1024)
|
||||
height = arguments.get("height", 1024)
|
||||
|
||||
if not prompt:
|
||||
return {
|
||||
"error": "Prompt parameter is required"
|
||||
}
|
||||
|
||||
try:
|
||||
# Generate image using gpt4free client
|
||||
client = AsyncClient()
|
||||
|
||||
response = await client.images.generate(
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
width=width,
|
||||
height=height
|
||||
)
|
||||
|
||||
# Get the image data
|
||||
if response and hasattr(response, 'data') and response.data:
|
||||
image_data = response.data[0]
|
||||
|
||||
# Convert to base64 if needed
|
||||
if hasattr(image_data, 'url'):
|
||||
image_url = image_data.url
|
||||
|
||||
# Check if it's already a data URI
|
||||
if image_url.startswith('data:'):
|
||||
return {
|
||||
"prompt": prompt,
|
||||
"model": model,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"image": image_url
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"prompt": prompt,
|
||||
"model": model,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"image_url": image_url
|
||||
}
|
||||
|
||||
return {
|
||||
"error": "Image generation failed: No image data in response"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"error": f"Image generation failed: {str(e)}"
|
||||
}
|
||||
5
setup.py
5
setup.py
@@ -114,7 +114,10 @@ setup(
|
||||
install_requires=INSTALL_REQUIRE,
|
||||
extras_require=EXTRA_REQUIRE,
|
||||
entry_points={
|
||||
'console_scripts': ['g4f=g4f.cli:main'],
|
||||
'console_scripts': [
|
||||
'g4f=g4f.cli:main',
|
||||
'g4f-mcp=g4f.mcp.server:main',
|
||||
],
|
||||
},
|
||||
url='https://github.com/xtekky/gpt4free', # Link to your GitHub repository
|
||||
project_urls={
|
||||
|
||||
Reference in New Issue
Block a user