Initial commit: Penpot MCP Server - Complete AI-powered design workflow automation with MCP protocol, Penpot API integration, Claude AI support, CLI tools, and comprehensive documentation

This commit is contained in:
Chema
2025-05-26 19:16:46 +02:00
commit 85e658d2cd
42 changed files with 8159 additions and 0 deletions

1
penpot_mcp/__init__.py Normal file
View File

@@ -0,0 +1 @@
"""Package penpot_mcp."""

View File

@@ -0,0 +1 @@
"""PenpotAPI module for interacting with the Penpot design platform."""

View File

@@ -0,0 +1,852 @@
import argparse
import json
import os
from typing import Any, Dict, List, Optional, Union
import requests
from dotenv import load_dotenv
class PenpotAPI:
def __init__(
self,
base_url: str = None,
debug: bool = False,
email: Optional[str] = None,
password: Optional[str] = None):
# Load environment variables if not already loaded
load_dotenv()
# Use base_url from parameters if provided, otherwise from environment,
# fallback to default URL
self.base_url = base_url or os.getenv("PENPOT_API_URL", "https://design.penpot.app/api")
self.session = requests.Session()
self.access_token = None
self.debug = debug
self.email = email or os.getenv("PENPOT_USERNAME")
self.password = password or os.getenv("PENPOT_PASSWORD")
self.profile_id = None
# Set default headers - we'll use different headers at request time
# based on the required content type (JSON vs Transit+JSON)
self.session.headers.update({
"Accept": "application/json, application/transit+json",
"Content-Type": "application/json"
})
def set_access_token(self, token: str):
"""Set the auth token for authentication."""
self.access_token = token
# For cookie-based auth, set the auth-token cookie
self.session.cookies.set("auth-token", token)
# Also set Authorization header for APIs that use it
self.session.headers.update({
"Authorization": f"Token {token}"
})
def login_with_password(
self,
email: Optional[str] = None,
password: Optional[str] = None) -> str:
"""
Login with email and password to get an auth token.
This method uses the same cookie-based auth approach as the export methods.
Args:
email: Email for Penpot account (if None, will use stored email or PENPOT_USERNAME env var)
password: Password for Penpot account (if None, will use stored password or PENPOT_PASSWORD env var)
Returns:
Auth token for API calls
"""
# Just use the export authentication as it's more reliable
token = self.login_for_export(email, password)
self.set_access_token(token)
# Get profile ID after login
self.get_profile()
return token
def get_profile(self) -> Dict[str, Any]:
"""
Get profile information for the current authenticated user.
Returns:
Dictionary containing profile information, including the profile ID
"""
url = f"{self.base_url}/rpc/command/get-profile"
payload = {} # No parameters needed
response = self._make_authenticated_request('post', url, json=payload, use_transit=False)
# Parse and normalize the response
data = response.json()
normalized_data = self._normalize_transit_response(data)
if self.debug:
print("\nProfile data retrieved:")
print(json.dumps(normalized_data, indent=2)[:200] + "...")
# Store profile ID for later use
if 'id' in normalized_data:
self.profile_id = normalized_data['id']
if self.debug:
print(f"\nStored profile ID: {self.profile_id}")
return normalized_data
def login_for_export(self, email: Optional[str] = None, password: Optional[str] = None) -> str:
"""
Login with email and password to get an auth token for export operations.
This is required for export operations which use a different authentication
mechanism than the standard API access token.
Args:
email: Email for Penpot account (if None, will use stored email or PENPOT_USERNAME env var)
password: Password for Penpot account (if None, will use stored password or PENPOT_PASSWORD env var)
Returns:
Auth token extracted from cookies
"""
# Use parameters if provided, else use instance variables, else check environment variables
email = email or self.email or os.getenv("PENPOT_USERNAME")
password = password or self.password or os.getenv("PENPOT_PASSWORD")
if not email or not password:
raise ValueError(
"Email and password are required for export authentication. "
"Please provide them as parameters or set PENPOT_USERNAME and "
"PENPOT_PASSWORD environment variables."
)
url = f"{self.base_url}/rpc/command/login-with-password"
# Use Transit+JSON format
payload = {
"~:email": email,
"~:password": password
}
if self.debug:
print("\nLogin request payload (Transit+JSON format):")
print(json.dumps(payload, indent=2).replace(password, "********"))
# Create a new session just for this request
login_session = requests.Session()
# Set headers
headers = {
"Content-Type": "application/transit+json"
}
response = login_session.post(url, json=payload, headers=headers)
if self.debug and response.status_code != 200:
print(f"\nError response: {response.status_code}")
print(f"Response text: {response.text}")
response.raise_for_status()
# Extract auth token from cookies
if 'Set-Cookie' in response.headers:
if self.debug:
print("\nSet-Cookie header found")
for cookie in login_session.cookies:
if cookie.name == "auth-token":
if self.debug:
print(f"\nAuth token extracted from cookies: {cookie.value[:10]}...")
return cookie.value
raise ValueError("Auth token not found in response cookies")
else:
# Try to extract from response JSON if available
try:
data = response.json()
if 'auth-token' in data:
return data['auth-token']
except Exception:
pass
# If we reached here, we couldn't find the token
raise ValueError("Auth token not found in response cookies or JSON body")
def _make_authenticated_request(self, method: str, url: str, **kwargs) -> requests.Response:
"""
Make an authenticated request, handling re-auth if needed.
This internal method handles lazy authentication when a request
fails due to authentication issues, using the same cookie-based
approach as the export methods.
Args:
method: HTTP method (post, get, etc.)
url: URL to make the request to
**kwargs: Additional arguments to pass to requests
Returns:
The response object
"""
# If we don't have a token yet but have credentials, login first
if not self.access_token and self.email and self.password:
if self.debug:
print("\nNo access token set, logging in with credentials...")
self.login_with_password()
# Set up headers
headers = kwargs.get('headers', {})
if 'headers' in kwargs:
del kwargs['headers']
# Use Transit+JSON format for API calls (required by Penpot)
use_transit = kwargs.pop('use_transit', True)
if use_transit:
headers['Content-Type'] = 'application/transit+json'
headers['Accept'] = 'application/transit+json'
# Convert payload to Transit+JSON format if present
if 'json' in kwargs and kwargs['json']:
payload = kwargs['json']
# Only transform if not already in Transit format
if not any(isinstance(k, str) and k.startswith('~:') for k in payload.keys()):
transit_payload = {}
# Add cmd if not present
if 'cmd' not in payload and '~:cmd' not in payload:
# Extract command from URL
cmd = url.split('/')[-1]
transit_payload['~:cmd'] = f"~:{cmd}"
# Convert standard JSON to Transit+JSON format
for key, value in payload.items():
# Skip command if already added
if key == 'cmd':
continue
transit_key = f"~:{key}" if not key.startswith('~:') else key
# Handle special UUID conversion for IDs
if isinstance(value, str) and ('-' in value) and len(value) > 30:
transit_value = f"~u{value}"
else:
transit_value = value
transit_payload[transit_key] = transit_value
if self.debug:
print("\nConverted payload to Transit+JSON format:")
print(f"Original: {payload}")
print(f"Transit: {transit_payload}")
kwargs['json'] = transit_payload
else:
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
# Ensure the Authorization header is set if we have a token
if self.access_token:
headers['Authorization'] = f"Token {self.access_token}"
# Combine with session headers
combined_headers = {**self.session.headers, **headers}
# Make the request
try:
response = getattr(self.session, method)(url, headers=combined_headers, **kwargs)
if self.debug:
print(f"\nRequest to: {url}")
print(f"Method: {method}")
print(f"Headers: {combined_headers}")
if 'json' in kwargs:
print(f"Payload: {json.dumps(kwargs['json'], indent=2)}")
print(f"Response status: {response.status_code}")
response.raise_for_status()
return response
except requests.HTTPError as e:
# Handle authentication errors
if e.response.status_code in (401, 403) and self.email and self.password:
if self.debug:
print("\nAuthentication failed. Trying to re-login...")
# Re-login and update token
self.login_with_password()
# Update headers with new token
headers['Authorization'] = f"Token {self.access_token}"
combined_headers = {**self.session.headers, **headers}
# Retry the request with the new token
response = getattr(self.session, method)(url, headers=combined_headers, **kwargs)
response.raise_for_status()
return response
else:
# Re-raise other errors
raise
def _normalize_transit_response(self, data: Union[Dict, List, Any]) -> Union[Dict, List, Any]:
"""
Normalize a Transit+JSON response to a more usable format.
This recursively processes the response data, handling special Transit types
like UUIDs, keywords, and nested structures.
Args:
data: The data to normalize, can be a dict, list, or other value
Returns:
Normalized data
"""
if isinstance(data, dict):
# Normalize dictionary
result = {}
for key, value in data.items():
# Convert transit keywords in keys (~:key -> key)
norm_key = key.replace(
'~:', '') if isinstance(
key, str) and key.startswith('~:') else key
# Recursively normalize values
result[norm_key] = self._normalize_transit_response(value)
return result
elif isinstance(data, list):
# Normalize list items
return [self._normalize_transit_response(item) for item in data]
elif isinstance(data, str) and data.startswith('~u'):
# Convert Transit UUIDs (~u123-456 -> 123-456)
return data[2:]
else:
# Return other types as-is
return data
def list_projects(self) -> Dict[str, Any]:
"""
List all available projects for the authenticated user.
Returns:
Dictionary containing project information
"""
url = f"{self.base_url}/rpc/command/get-all-projects"
payload = {} # No parameters required
response = self._make_authenticated_request('post', url, json=payload, use_transit=False)
if self.debug:
content_type = response.headers.get('Content-Type', '')
print(f"\nResponse content type: {content_type}")
print(f"Response preview: {response.text[:100]}...")
# Parse JSON
data = response.json()
if self.debug:
print("\nData preview:")
print(json.dumps(data, indent=2)[:200] + "...")
return data
def get_project(self, project_id: str) -> Optional[Dict[str, Any]]:
"""
Get details for a specific project.
Args:
project_id: The ID of the project to retrieve
Returns:
Dictionary containing project information
"""
# First get all projects
projects = self.list_projects()
# Find the specific project by ID
for project in projects:
if project.get('id') == project_id:
return project
return None
def get_project_files(self, project_id: str) -> List[Dict[str, Any]]:
"""
Get all files for a specific project.
Args:
project_id: The ID of the project
Returns:
List of file information dictionaries
"""
url = f"{self.base_url}/rpc/command/get-project-files"
payload = {
"project-id": project_id
}
response = self._make_authenticated_request('post', url, json=payload, use_transit=False)
# Parse JSON
files = response.json()
return files
def get_file(self, file_id: str, save_data: bool = False,
save_raw_response: bool = False) -> Dict[str, Any]:
"""
Get details for a specific file.
Args:
file_id: The ID of the file to retrieve
features: List of features to include in the response
project_id: Optional project ID if known
save_data: Whether to save the data to a file
save_raw_response: Whether to save the raw response
Returns:
Dictionary containing file information
"""
url = f"{self.base_url}/rpc/command/get-file"
payload = {
"id": file_id,
}
response = self._make_authenticated_request('post', url, json=payload, use_transit=False)
# Save raw response if requested
if save_raw_response:
raw_filename = f"{file_id}_raw_response.json"
with open(raw_filename, 'w') as f:
f.write(response.text)
if self.debug:
print(f"\nSaved raw response to {raw_filename}")
# Parse JSON
data = response.json()
# Save normalized data if requested
if save_data:
filename = f"{file_id}.json"
with open(filename, 'w') as f:
json.dump(data, f, indent=2)
if self.debug:
print(f"\nSaved file data to {filename}")
return data
def create_export(self, file_id: str, page_id: str, object_id: str,
export_type: str = "png", scale: int = 1,
email: Optional[str] = None, password: Optional[str] = None,
profile_id: Optional[str] = None):
"""
Create an export job for a Penpot object.
Args:
file_id: The file ID
page_id: The page ID
object_id: The object ID to export
export_type: Type of export (png, svg, pdf)
scale: Scale factor for the export
name: Name for the export
suffix: Suffix to add to the export name
email: Email for authentication (if different from instance)
password: Password for authentication (if different from instance)
profile_id: Optional profile ID (if not provided, will be fetched automatically)
Returns:
Export resource ID
"""
# This uses the cookie auth approach, which requires login
token = self.login_for_export(email, password)
# If profile_id is not provided, get it from instance variable or fetch it
if not profile_id:
if not self.profile_id:
# We need to set the token first for the get_profile call to work
self.set_access_token(token)
self.get_profile()
profile_id = self.profile_id
if not profile_id:
raise ValueError("Profile ID not available and couldn't be retrieved automatically")
# Build the URL for export creation
url = f"{self.base_url}/export"
# Set up the data for the export
payload = {
"~:wait": True,
"~:exports": [
{"~:type": f"~:{export_type}",
"~:suffix": "",
"~:scale": scale,
"~:page-id": f"~u{page_id}",
"~:file-id": f"~u{file_id}",
"~:name": "",
"~:object-id": f"~u{object_id}"}
],
"~:profile-id": f"~u{profile_id}",
"~:cmd": "~:export-shapes"
}
if self.debug:
print("\nCreating export with parameters:")
print(json.dumps(payload, indent=2))
# Create a session with the auth token
export_session = requests.Session()
export_session.cookies.set("auth-token", token)
headers = {
"Content-Type": "application/transit+json",
"Accept": "application/transit+json"
}
# Make the request
response = export_session.post(url, json=payload, headers=headers)
if self.debug and response.status_code != 200:
print(f"\nError response: {response.status_code}")
print(f"Response text: {response.text}")
response.raise_for_status()
# Parse the response
data = response.json()
if self.debug:
print("\nExport created successfully")
print(f"Response: {json.dumps(data, indent=2)}")
# Extract and return the resource ID
resource_id = data.get("~:id")
if not resource_id:
raise ValueError("Resource ID not found in response")
return resource_id
def get_export_resource(self,
resource_id: str,
save_to_file: Optional[str] = None,
email: Optional[str] = None,
password: Optional[str] = None) -> Union[bytes,
str]:
"""
Download an export resource by ID.
Args:
resource_id: The resource ID from create_export
save_to_file: Path to save the file (if None, returns the content)
email: Email for authentication (if different from instance)
password: Password for authentication (if different from instance)
Returns:
Either the file content as bytes, or the path to the saved file
"""
# This uses the cookie auth approach, which requires login
token = self.login_for_export(email, password)
# Build the URL for the resource
url = f"{self.base_url}/export"
payload = {
"~:wait": False,
"~:cmd": "~:get-resource",
"~:id": resource_id
}
headers = {
"Content-Type": "application/transit+json",
"Accept": "*/*"
}
if self.debug:
print(f"\nFetching export resource: {url}")
# Create a session with the auth token
export_session = requests.Session()
export_session.cookies.set("auth-token", token)
# Make the request
response = export_session.post(url, json=payload, headers=headers)
if self.debug and response.status_code != 200:
print(f"\nError response: {response.status_code}")
print(f"Response headers: {response.headers}")
response.raise_for_status()
# Get the content type
content_type = response.headers.get('Content-Type', '')
if self.debug:
print(f"\nResource fetched successfully")
print(f"Content-Type: {content_type}")
print(f"Content length: {len(response.content)} bytes")
# Determine filename if saving to file
if save_to_file:
if os.path.isdir(save_to_file):
# If save_to_file is a directory, we need to figure out the filename
filename = None
# Try to get filename from Content-Disposition header
content_disp = response.headers.get('Content-Disposition', '')
if 'filename=' in content_disp:
filename = content_disp.split('filename=')[1].strip('"\'')
# If we couldn't get a filename, use the resource_id with an extension
if not filename:
ext = content_type.split('/')[-1].split(';')[0]
if ext in ('jpeg', 'png', 'pdf', 'svg+xml'):
if ext == 'svg+xml':
ext = 'svg'
filename = f"{resource_id}.{ext}"
else:
filename = f"{resource_id}"
save_path = os.path.join(save_to_file, filename)
else:
# Use the provided path directly
save_path = save_to_file
# Ensure the directory exists
os.makedirs(os.path.dirname(os.path.abspath(save_path)), exist_ok=True)
# Save the content to file
with open(save_path, 'wb') as f:
f.write(response.content)
if self.debug:
print(f"\nSaved resource to {save_path}")
return save_path
else:
# Return the content
return response.content
def export_and_download(self, file_id: str, page_id: str, object_id: str,
save_to_file: Optional[str] = None, export_type: str = "png",
scale: int = 1, name: str = "Board", suffix: str = "",
email: Optional[str] = None, password: Optional[str] = None,
profile_id: Optional[str] = None) -> Union[bytes, str]:
"""
Create and download an export in one step.
This is a convenience method that combines create_export and get_export_resource.
Args:
file_id: The file ID
page_id: The page ID
object_id: The object ID to export
save_to_file: Path to save the file (if None, returns the content)
export_type: Type of export (png, svg, pdf)
scale: Scale factor for the export
name: Name for the export
suffix: Suffix to add to the export name
email: Email for authentication (if different from instance)
password: Password for authentication (if different from instance)
profile_id: Optional profile ID (if not provided, will be fetched automatically)
Returns:
Either the file content as bytes, or the path to the saved file
"""
# Create the export
resource_id = self.create_export(
file_id=file_id,
page_id=page_id,
object_id=object_id,
export_type=export_type,
scale=scale,
email=email,
password=password,
profile_id=profile_id
)
# Download the resource
return self.get_export_resource(
resource_id=resource_id,
save_to_file=save_to_file,
email=email,
password=password
)
def extract_components(self, file_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Extract components from file data.
This processes a file's data to extract and normalize component information.
Args:
file_data: The file data from get_file
Returns:
Dictionary containing components information
"""
components = {}
components_index = file_data.get('data', {}).get('componentsIndex', {})
for component_id, component_data in components_index.items():
# Extract basic component info
component = {
'id': component_id,
'name': component_data.get('name', 'Unnamed'),
'path': component_data.get('path', []),
'shape': component_data.get('shape', ''),
'fileId': component_data.get('fileId', file_data.get('id')),
'created': component_data.get('created'),
'modified': component_data.get('modified')
}
# Add the component to our collection
components[component_id] = component
return {'components': components}
def analyze_file_structure(self, file_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Analyze file structure and return summary information.
Args:
file_data: The file data from get_file
Returns:
Dictionary containing analysis information
"""
data = file_data.get('data', {})
# Count pages
pages = data.get('pagesIndex', {})
page_count = len(pages)
# Count objects by type
object_types = {}
total_objects = 0
for page_id, page_data in pages.items():
objects = page_data.get('objects', {})
total_objects += len(objects)
for obj_id, obj_data in objects.items():
obj_type = obj_data.get('type', 'unknown')
object_types[obj_type] = object_types.get(obj_type, 0) + 1
# Count components
components = data.get('componentsIndex', {})
component_count = len(components)
# Count colors, typographies, etc.
colors = data.get('colorsIndex', {})
color_count = len(colors)
typographies = data.get('typographiesIndex', {})
typography_count = len(typographies)
return {
'pageCount': page_count,
'objectCount': total_objects,
'objectTypes': object_types,
'componentCount': component_count,
'colorCount': color_count,
'typographyCount': typography_count,
'fileName': file_data.get('name', 'Unknown'),
'fileId': file_data.get('id')
}
def main():
# Set up argument parser
parser = argparse.ArgumentParser(description='Penpot API Tool')
parser.add_argument('--debug', action='store_true', help='Enable debug output')
# Create subparsers for different commands
subparsers = parser.add_subparsers(dest='command', help='Command to run')
# List projects command
list_parser = subparsers.add_parser('list-projects', help='List all projects')
# Get project command
project_parser = subparsers.add_parser('get-project', help='Get project details')
project_parser.add_argument('--id', required=True, help='Project ID')
# List files command
files_parser = subparsers.add_parser('list-files', help='List files in a project')
files_parser.add_argument('--project-id', required=True, help='Project ID')
# Get file command
file_parser = subparsers.add_parser('get-file', help='Get file details')
file_parser.add_argument('--file-id', required=True, help='File ID')
file_parser.add_argument('--save', action='store_true', help='Save file data to JSON')
# Export command
export_parser = subparsers.add_parser('export', help='Export an object')
export_parser.add_argument(
'--profile-id',
required=False,
help='Profile ID (optional, will be fetched automatically if not provided)')
export_parser.add_argument('--file-id', required=True, help='File ID')
export_parser.add_argument('--page-id', required=True, help='Page ID')
export_parser.add_argument('--object-id', required=True, help='Object ID')
export_parser.add_argument(
'--type',
default='png',
choices=[
'png',
'svg',
'pdf'],
help='Export type')
export_parser.add_argument('--scale', type=int, default=1, help='Scale factor')
export_parser.add_argument('--output', required=True, help='Output file path')
# Parse arguments
args = parser.parse_args()
# Create API client
api = PenpotAPI(debug=args.debug)
# Handle different commands
if args.command == 'list-projects':
projects = api.list_projects()
print(f"Found {len(projects)} projects:")
for project in projects:
print(f"- {project.get('name')} - {project.get('teamName')} (ID: {project.get('id')})")
elif args.command == 'get-project':
project = api.get_project(args.id)
if project:
print(f"Project: {project.get('name')}")
print(json.dumps(project, indent=2))
else:
print(f"Project not found: {args.id}")
elif args.command == 'list-files':
files = api.get_project_files(args.project_id)
print(f"Found {len(files)} files:")
for file in files:
print(f"- {file.get('name')} (ID: {file.get('id')})")
elif args.command == 'get-file':
file_data = api.get_file(args.file_id, save_data=args.save)
print(f"File: {file_data.get('name')}")
if args.save:
print(f"Data saved to {args.file_id}.json")
else:
print("File metadata:")
print(json.dumps({k: v for k, v in file_data.items() if k != 'data'}, indent=2))
elif args.command == 'export':
output_path = api.export_and_download(
file_id=args.file_id,
page_id=args.page_id,
object_id=args.object_id,
export_type=args.type,
scale=args.scale,
save_to_file=args.output,
profile_id=args.profile_id
)
print(f"Exported to: {output_path}")
else:
parser.print_help()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,299 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"required": ["colors", "typographies", "pages", "components", "id", "tokensLib", "pagesIndex"],
"properties": {
"colors": {
"type": "object",
"patternProperties": {
"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$": {
"type": "object",
"required": ["path", "color", "name", "modifiedAt", "opacity", "id"],
"properties": {
"path": {"type": "string"},
"color": {"type": "string", "pattern": "^#[0-9A-Fa-f]{6}$"},
"name": {"type": "string"},
"modifiedAt": {"type": "string", "format": "date-time"},
"opacity": {"type": "number", "minimum": 0, "maximum": 1},
"id": {"type": "string", "format": "uuid"}
}
}
}
},
"typographies": {
"type": "object",
"patternProperties": {
"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$": {
"type": "object",
"required": ["lineHeight", "path", "fontStyle", "textTransform", "fontId", "fontSize", "fontWeight", "name", "modifiedAt", "fontVariantId", "id", "letterSpacing", "fontFamily"],
"properties": {
"lineHeight": {"type": "string"},
"path": {"type": "string"},
"fontStyle": {"type": "string", "enum": ["normal"]},
"textTransform": {"type": "string", "enum": ["uppercase", "none"]},
"fontId": {"type": "string"},
"fontSize": {"type": "string"},
"fontWeight": {"type": "string"},
"name": {"type": "string"},
"modifiedAt": {"type": "string", "format": "date-time"},
"fontVariantId": {"type": "string"},
"id": {"type": "string", "format": "uuid"},
"letterSpacing": {"type": "string"},
"fontFamily": {"type": "string"}
}
}
}
},
"pages": {
"type": "array",
"items": {"type": "string", "format": "uuid"}
},
"components": {
"type": "object",
"patternProperties": {
"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$": {
"type": "object",
"required": ["id", "name", "path", "modifiedAt", "mainInstanceId", "mainInstancePage"],
"properties": {
"id": {"type": "string", "format": "uuid"},
"name": {"type": "string"},
"path": {"type": "string"},
"modifiedAt": {"type": "string", "format": "date-time"},
"mainInstanceId": {"type": "string", "format": "uuid"},
"mainInstancePage": {"type": "string", "format": "uuid"},
"annotation": {"type": "string"}
}
}
}
},
"id": {"type": "string", "format": "uuid"},
"tokensLib": {
"type": "object",
"required": ["sets", "themes", "activeThemes"],
"properties": {
"sets": {
"type": "object",
"patternProperties": {
"^S-[a-z]+$": {
"type": "object",
"required": ["name", "description", "modifiedAt", "tokens"],
"properties": {
"name": {"type": "string"},
"description": {"type": "string"},
"modifiedAt": {"type": "string", "format": "date-time"},
"tokens": {
"type": "object",
"patternProperties": {
"^[a-z][a-z0-9.-]*$": {
"type": "object",
"required": ["name", "type", "value", "description", "modifiedAt"],
"properties": {
"name": {"type": "string"},
"type": {"type": "string", "enum": ["dimensions", "sizing", "color", "border-radius", "spacing", "stroke-width", "rotation", "opacity"]},
"value": {"type": "string"},
"description": {"type": "string"},
"modifiedAt": {"type": "string", "format": "date-time"}
}
}
}
}
}
}
}
},
"themes": {
"type": "object",
"patternProperties": {
".*": {
"type": "object",
"patternProperties": {
".*": {
"type": "object",
"required": ["name", "group", "description", "isSource", "id", "modifiedAt", "sets"],
"properties": {
"name": {"type": "string"},
"group": {"type": "string"},
"description": {"type": "string"},
"isSource": {"type": "boolean"},
"id": {"type": "string", "format": "uuid"},
"modifiedAt": {"type": "string", "format": "date-time"},
"sets": {"type": "array", "items": {"type": "string"}}
}
}
}
}
}
},
"activeThemes": {
"type": "array",
"items": {"type": "string"}
}
}
},
"options": {
"type": "object",
"properties": {
"componentsV2": {"type": "boolean"}
}
},
"pagesIndex": {
"type": "object",
"patternProperties": {
"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$": {
"type": "object",
"required": ["options", "objects", "id", "name"],
"properties": {
"options": {"type": "object"},
"objects": {
"type": "object",
"patternProperties": {
"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$": {
"type": "object",
"required": ["id", "name", "type"],
"properties": {
"id": {"type": "string", "format": "uuid"},
"name": {"type": "string"},
"type": {"type": "string", "enum": ["frame", "rect", "text"]},
"x": {"type": "number"},
"y": {"type": "number"},
"width": {"type": "number"},
"height": {"type": "number"},
"rotation": {"type": "number"},
"selrect": {
"type": "object",
"properties": {
"x": {"type": "number"},
"y": {"type": "number"},
"width": {"type": "number"},
"height": {"type": "number"},
"x1": {"type": "number"},
"y1": {"type": "number"},
"x2": {"type": "number"},
"y2": {"type": "number"}
}
},
"points": {
"type": "array",
"items": {
"type": "object",
"properties": {
"x": {"type": "number"},
"y": {"type": "number"}
}
}
},
"transform": {
"type": "object",
"properties": {
"a": {"type": "number"},
"b": {"type": "number"},
"c": {"type": "number"},
"d": {"type": "number"},
"e": {"type": "number"},
"f": {"type": "number"}
}
},
"transformInverse": {
"type": "object",
"properties": {
"a": {"type": "number"},
"b": {"type": "number"},
"c": {"type": "number"},
"d": {"type": "number"},
"e": {"type": "number"},
"f": {"type": "number"}
}
},
"parentId": {"type": "string", "format": "uuid"},
"frameId": {"type": "string", "format": "uuid"},
"flipX": {"type": ["null", "boolean"]},
"flipY": {"type": ["null", "boolean"]},
"hideFillOnExport": {"type": "boolean"},
"growType": {"type": "string", "enum": ["fixed", "auto-height"]},
"hideInViewer": {"type": "boolean"},
"r1": {"type": "number"},
"r2": {"type": "number"},
"r3": {"type": "number"},
"r4": {"type": "number"},
"proportion": {"type": "number"},
"proportionLock": {"type": "boolean"},
"componentRoot": {"type": "boolean"},
"componentId": {"type": "string", "format": "uuid"},
"mainInstance": {"type": "boolean"},
"componentFile": {"type": "string", "format": "uuid"},
"strokes": {
"type": "array",
"items": {
"type": "object",
"properties": {
"strokeStyle": {"type": "string"},
"strokeAlignment": {"type": "string"},
"strokeWidth": {"type": "number"},
"strokeColor": {"type": "string"},
"strokeOpacity": {"type": "number"}
}
}
},
"fills": {
"type": "array",
"items": {
"type": "object",
"properties": {
"fillColor": {"type": "string"},
"fillOpacity": {"type": "number"},
"fillImage": {
"type": "object",
"properties": {
"name": {"type": "string"},
"width": {"type": "number"},
"height": {"type": "number"},
"mtype": {"type": "string"},
"id": {"type": "string", "format": "uuid"},
"keepAspectRatio": {"type": "boolean"}
}
}
}
}
},
"shapes": {
"type": "array",
"items": {"type": "string", "format": "uuid"}
},
"content": {
"type": "object",
"properties": {
"type": {"type": "string"},
"children": {"type": "array"}
}
},
"appliedTokens": {"type": "object"},
"positionData": {"type": "array"},
"layoutItemMarginType": {"type": "string"},
"constraintsV": {"type": "string"},
"constraintsH": {"type": "string"},
"layoutItemMargin": {"type": "object"},
"layoutGapType": {"type": "string"},
"layoutPadding": {"type": "object"},
"layoutWrapType": {"type": "string"},
"layout": {"type": "string"},
"layoutAlignItems": {"type": "string"},
"layoutPaddingType": {"type": "string"},
"layoutItemHSizing": {"type": "string"},
"layoutGap": {"type": "object"},
"layoutItemVSizing": {"type": "string"},
"layoutJustifyContent": {"type": "string"},
"layoutFlexDir": {"type": "string"},
"layoutAlignContent": {"type": "string"},
"shapeRef": {"type": "string", "format": "uuid"}
}
}
}
},
"id": {"type": "string", "format": "uuid"},
"name": {"type": "string"}
}
}
}
}
}
}

View File

@@ -0,0 +1,295 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"required": ["colors", "typographies", "pages", "components", "id", "tokensLib", "pagesIndex"],
"properties": {
"colors": {
"type": "object",
"patternProperties": {
"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$": {
"type": "object",
"required": ["path", "color", "name", "modifiedAt", "opacity", "id"],
"properties": {
"path": {"type": "string"},
"color": {"type": "string", "pattern": "^#[0-9A-Fa-f]{6}$"},
"name": {"type": "string"},
"modifiedAt": {"type": "string", "format": "date-time"},
"opacity": {"type": "number", "minimum": 0, "maximum": 1},
"id": {"type": "string", "format": "uuid"}
}
}
}
},
"typographies": {
"type": "object",
"patternProperties": {
"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$": {
"type": "object",
"required": ["lineHeight", "path", "fontStyle", "textTransform", "fontId", "fontSize", "fontWeight", "name", "modifiedAt", "fontVariantId", "id", "letterSpacing", "fontFamily"],
"properties": {
"lineHeight": {"type": "string"},
"path": {"type": "string"},
"fontStyle": {"type": "string", "enum": ["normal"]},
"textTransform": {"type": "string", "enum": ["uppercase", "none"]},
"fontId": {"type": "string"},
"fontSize": {"type": "string"},
"fontWeight": {"type": "string"},
"name": {"type": "string"},
"modifiedAt": {"type": "string", "format": "date-time"},
"fontVariantId": {"type": "string"},
"id": {"type": "string", "format": "uuid"},
"letterSpacing": {"type": "string"},
"fontFamily": {"type": "string"}
}
}
}
},
"components": {
"type": "object",
"patternProperties": {
"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$": {
"type": "object",
"required": ["id", "name", "path", "modifiedAt", "mainInstanceId", "mainInstancePage"],
"properties": {
"id": {"type": "string", "format": "uuid"},
"name": {"type": "string"},
"path": {"type": "string"},
"modifiedAt": {"type": "string", "format": "date-time"},
"mainInstanceId": {"type": "string", "format": "uuid"},
"mainInstancePage": {"type": "string", "format": "uuid"},
"annotation": {"type": "string"}
}
}
}
},
"id": {"type": "string", "format": "uuid"},
"tokensLib": {
"type": "object",
"required": ["sets", "themes", "activeThemes"],
"properties": {
"sets": {
"type": "object",
"patternProperties": {
"^S-[a-z]+$": {
"type": "object",
"required": ["name", "description", "modifiedAt", "tokens"],
"properties": {
"name": {"type": "string"},
"description": {"type": "string"},
"modifiedAt": {"type": "string", "format": "date-time"},
"tokens": {
"type": "object",
"patternProperties": {
"^[a-z][a-z0-9.-]*$": {
"type": "object",
"required": ["name", "type", "value", "description", "modifiedAt"],
"properties": {
"name": {"type": "string"},
"type": {"type": "string", "enum": ["dimensions", "sizing", "color", "border-radius", "spacing", "stroke-width", "rotation", "opacity"]},
"value": {"type": "string"},
"description": {"type": "string"},
"modifiedAt": {"type": "string", "format": "date-time"}
}
}
}
}
}
}
}
},
"themes": {
"type": "object",
"patternProperties": {
".*": {
"type": "object",
"patternProperties": {
".*": {
"type": "object",
"required": ["name", "group", "description", "isSource", "id", "modifiedAt", "sets"],
"properties": {
"name": {"type": "string"},
"group": {"type": "string"},
"description": {"type": "string"},
"isSource": {"type": "boolean"},
"id": {"type": "string", "format": "uuid"},
"modifiedAt": {"type": "string", "format": "date-time"},
"sets": {"type": "array", "items": {"type": "string"}}
}
}
}
}
}
},
"activeThemes": {
"type": "array",
"items": {"type": "string"}
}
}
},
"options": {
"type": "object",
"properties": {
"componentsV2": {"type": "boolean"}
}
},
"objects": {
"type": "object",
"patternProperties": {
"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$": {
"type": "object",
"required": ["options", "objects", "id", "name"],
"properties": {
"options": {"type": "object"},
"objects": {
"type": "object",
"patternProperties": {
"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$": {
"type": "object",
"required": ["id", "name", "type"],
"properties": {
"id": {"type": "string", "format": "uuid"},
"name": {"type": "string"},
"type": {"type": "string", "enum": ["frame", "rect", "text"]},
"x": {"type": "number"},
"y": {"type": "number"},
"width": {"type": "number"},
"height": {"type": "number"},
"rotation": {"type": "number"},
"selrect": {
"type": "object",
"properties": {
"x": {"type": "number"},
"y": {"type": "number"},
"width": {"type": "number"},
"height": {"type": "number"},
"x1": {"type": "number"},
"y1": {"type": "number"},
"x2": {"type": "number"},
"y2": {"type": "number"}
}
},
"points": {
"type": "array",
"items": {
"type": "object",
"properties": {
"x": {"type": "number"},
"y": {"type": "number"}
}
}
},
"transform": {
"type": "object",
"properties": {
"a": {"type": "number"},
"b": {"type": "number"},
"c": {"type": "number"},
"d": {"type": "number"},
"e": {"type": "number"},
"f": {"type": "number"}
}
},
"transformInverse": {
"type": "object",
"properties": {
"a": {"type": "number"},
"b": {"type": "number"},
"c": {"type": "number"},
"d": {"type": "number"},
"e": {"type": "number"},
"f": {"type": "number"}
}
},
"parentId": {"type": "string", "format": "uuid"},
"frameId": {"type": "string", "format": "uuid"},
"flipX": {"type": ["null", "boolean"]},
"flipY": {"type": ["null", "boolean"]},
"hideFillOnExport": {"type": "boolean"},
"growType": {"type": "string", "enum": ["fixed", "auto-height"]},
"hideInViewer": {"type": "boolean"},
"r1": {"type": "number"},
"r2": {"type": "number"},
"r3": {"type": "number"},
"r4": {"type": "number"},
"proportion": {"type": "number"},
"proportionLock": {"type": "boolean"},
"componentRoot": {"type": "boolean"},
"componentId": {"type": "string", "format": "uuid"},
"mainInstance": {"type": "boolean"},
"componentFile": {"type": "string", "format": "uuid"},
"strokes": {
"type": "array",
"items": {
"type": "object",
"properties": {
"strokeStyle": {"type": "string"},
"strokeAlignment": {"type": "string"},
"strokeWidth": {"type": "number"},
"strokeColor": {"type": "string"},
"strokeOpacity": {"type": "number"}
}
}
},
"fills": {
"type": "array",
"items": {
"type": "object",
"properties": {
"fillColor": {"type": "string"},
"fillOpacity": {"type": "number"},
"fillImage": {
"type": "object",
"properties": {
"name": {"type": "string"},
"width": {"type": "number"},
"height": {"type": "number"},
"mtype": {"type": "string"},
"id": {"type": "string", "format": "uuid"},
"keepAspectRatio": {"type": "boolean"}
}
}
}
}
},
"shapes": {
"type": "array",
"items": {"type": "string", "format": "uuid"}
},
"content": {
"type": "object",
"properties": {
"type": {"type": "string"},
"children": {"type": "array"}
}
},
"appliedTokens": {"type": "object"},
"positionData": {"type": "array"},
"layoutItemMarginType": {"type": "string"},
"constraintsV": {"type": "string"},
"constraintsH": {"type": "string"},
"layoutItemMargin": {"type": "object"},
"layoutGapType": {"type": "string"},
"layoutPadding": {"type": "object"},
"layoutWrapType": {"type": "string"},
"layout": {"type": "string"},
"layoutAlignItems": {"type": "string"},
"layoutPaddingType": {"type": "string"},
"layoutItemHSizing": {"type": "string"},
"layoutGap": {"type": "object"},
"layoutItemVSizing": {"type": "string"},
"layoutJustifyContent": {"type": "string"},
"layoutFlexDir": {"type": "string"},
"layoutAlignContent": {"type": "string"},
"shapeRef": {"type": "string", "format": "uuid"}
}
}
}
},
"id": {"type": "string", "format": "uuid"},
"name": {"type": "string"}
}
}
}
}
}
}

View File

@@ -0,0 +1 @@
"""Server implementation for the Penpot MCP server."""

279
penpot_mcp/server/client.py Normal file
View File

@@ -0,0 +1,279 @@
"""Client for connecting to the Penpot MCP server."""
import asyncio
from typing import Any, Dict, List, Optional
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
class PenpotMCPClient:
"""Client for interacting with the Penpot MCP server."""
def __init__(self, server_command="python", server_args=None, env=None):
"""
Initialize the Penpot MCP client.
Args:
server_command: The command to run the server
server_args: Arguments to pass to the server command
env: Environment variables for the server process
"""
self.server_command = server_command
self.server_args = server_args or ["-m", "penpot_mcp.server.mcp_server"]
self.env = env
self.session = None
async def connect(self):
"""
Connect to the MCP server.
Returns:
The client session
"""
# Create server parameters for stdio connection
server_params = StdioServerParameters(
command=self.server_command,
args=self.server_args,
env=self.env,
)
# Connect to the server
read, write = await stdio_client(server_params).__aenter__()
self.session = await ClientSession(read, write).__aenter__()
# Initialize the connection
await self.session.initialize()
return self.session
async def disconnect(self):
"""Disconnect from the server."""
if self.session:
await self.session.__aexit__(None, None, None)
self.session = None
async def list_resources(self) -> List[Dict[str, Any]]:
"""
List available resources from the server.
Returns:
List of resource information
"""
if not self.session:
raise RuntimeError("Not connected to server")
return await self.session.list_resources()
async def list_tools(self) -> List[Dict[str, Any]]:
"""
List available tools from the server.
Returns:
List of tool information
"""
if not self.session:
raise RuntimeError("Not connected to server")
return await self.session.list_tools()
async def get_server_info(self) -> Dict[str, Any]:
"""
Get server information.
Returns:
Server information
"""
if not self.session:
raise RuntimeError("Not connected to server")
info, _ = await self.session.read_resource("server://info")
return info
async def list_projects(self) -> Dict[str, Any]:
"""
List Penpot projects.
Returns:
Project information
"""
if not self.session:
raise RuntimeError("Not connected to server")
return await self.session.call_tool("list_projects")
async def get_project(self, project_id: str) -> Dict[str, Any]:
"""
Get details for a specific project.
Args:
project_id: The project ID
Returns:
Project information
"""
if not self.session:
raise RuntimeError("Not connected to server")
return await self.session.call_tool("get_project", {"project_id": project_id})
async def get_project_files(self, project_id: str) -> Dict[str, Any]:
"""
Get files for a specific project.
Args:
project_id: The project ID
Returns:
File information
"""
if not self.session:
raise RuntimeError("Not connected to server")
return await self.session.call_tool("get_project_files", {"project_id": project_id})
async def get_file(self, file_id: str, features: Optional[List[str]] = None,
project_id: Optional[str] = None) -> Dict[str, Any]:
"""
Get details for a specific file.
Args:
file_id: The file ID
features: List of features to include
project_id: Optional project ID
Returns:
File information
"""
if not self.session:
raise RuntimeError("Not connected to server")
params = {"file_id": file_id}
if features:
params["features"] = features
if project_id:
params["project_id"] = project_id
return await self.session.call_tool("get_file", params)
async def get_components(self) -> Dict[str, Any]:
"""
Get components from the server.
Returns:
Component information
"""
if not self.session:
raise RuntimeError("Not connected to server")
components, _ = await self.session.read_resource("content://components")
return components
async def export_object(self, file_id: str, page_id: str, object_id: str,
export_type: str = "png", scale: int = 1,
save_to_file: Optional[str] = None) -> Dict[str, Any]:
"""
Export an object from a Penpot file.
Args:
file_id: The ID of the file containing the object
page_id: The ID of the page containing the object
object_id: The ID of the object to export
export_type: Export format (png, svg, pdf)
scale: Scale factor for the export
save_to_file: Optional path to save the exported file
Returns:
If save_to_file is None: Dictionary with the exported image data
If save_to_file is provided: Dictionary with the saved file path
"""
if not self.session:
raise RuntimeError("Not connected to server")
params = {
"file_id": file_id,
"page_id": page_id,
"object_id": object_id,
"export_type": export_type,
"scale": scale
}
result = await self.session.call_tool("export_object", params)
# The result is now directly an Image object which has 'data' and 'format' fields
# If the client wants to save the file
if save_to_file:
import os
# Create directory if it doesn't exist
os.makedirs(os.path.dirname(os.path.abspath(save_to_file)), exist_ok=True)
# Save to file
with open(save_to_file, "wb") as f:
f.write(result["data"])
return {"file_path": save_to_file, "format": result.get("format")}
# Otherwise return the result as is
return result
async def run_client_example():
"""Run a simple example using the client."""
# Create and connect the client
client = PenpotMCPClient()
await client.connect()
try:
# Get server info
print("Getting server info...")
server_info = await client.get_server_info()
print(f"Server info: {server_info}")
# List projects
print("\nListing projects...")
projects_result = await client.list_projects()
if "error" in projects_result:
print(f"Error: {projects_result['error']}")
else:
projects = projects_result.get("projects", [])
print(f"Found {len(projects)} projects:")
for project in projects[:5]: # Show first 5 projects
print(f"- {project.get('name', 'Unknown')} (ID: {project.get('id', 'N/A')})")
# Example of exporting an object (uncomment and update with actual IDs to test)
"""
print("\nExporting object...")
# Replace with actual IDs from your Penpot account
export_result = await client.export_object(
file_id="your-file-id",
page_id="your-page-id",
object_id="your-object-id",
export_type="png",
scale=2,
save_to_file="exported_object.png"
)
print(f"Export saved to: {export_result.get('file_path')}")
# Or get the image data directly without saving
image_data = await client.export_object(
file_id="your-file-id",
page_id="your-page-id",
object_id="your-object-id"
)
print(f"Received image in format: {image_data.get('format')}")
print(f"Image size: {len(image_data.get('data'))} bytes")
"""
finally:
# Disconnect from the server
await client.disconnect()
def main():
"""Run the client example."""
asyncio.run(run_client_example())
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,431 @@
"""
Main MCP server implementation for Penpot.
This module defines the MCP server with resources and tools for interacting with
the Penpot design platform.
"""
import hashlib
import json
import os
import re
import argparse
import sys
from typing import List, Optional, Dict
from mcp.server.fastmcp import FastMCP, Image
from penpot_mcp.api.penpot_api import PenpotAPI
from penpot_mcp.tools.penpot_tree import get_object_subtree_with_fields
from penpot_mcp.utils import config
from penpot_mcp.utils.cache import MemoryCache
from penpot_mcp.utils.http_server import ImageServer
class PenpotMCPServer:
"""Penpot MCP Server implementation."""
def __init__(self, name="Penpot MCP Server", test_mode=False):
"""
Initialize the Penpot MCP Server.
Args:
name: Server name
test_mode: If True, certain features like HTTP server will be disabled for testing
"""
# Initialize the MCP server
self.mcp = FastMCP(name, instructions="""
I can help you generate code from your Penpot UI designs. My primary aim is to convert Penpot design components into functional code.
The typical workflow for code generation from Penpot designs is:
1. List your projects using 'list_projects' to find the project containing your designs
2. List files within the project using 'get_project_files' to locate the specific design file
3. Search for the target component within the file using 'search_object' to find the component you want to convert
4. Retrieve the Penpot tree schema using 'penpot_tree_schema' to understand which fields are available in the object tree
5. Get a cropped version of the object tree with a screenshot using 'get_object_tree' to see the component structure and visual representation
6. Get the full screenshot of the object using 'get_rendered_component' for detailed visual reference
For complex designs, you may need multiple iterations of 'get_object_tree' and 'get_rendered_component' due to LLM context limits.
Use the resources to access schemas, cached files, and rendered objects (screenshots) as needed.
Let me know which Penpot design you'd like to convert to code, and I'll guide you through the process!
""")
# Initialize the Penpot API
self.api = PenpotAPI(
base_url=config.PENPOT_API_URL,
debug=config.DEBUG
)
# Initialize memory cache
self.file_cache = MemoryCache(ttl_seconds=600) # 10 minutes
# Storage for rendered component images
self.rendered_components: Dict[str, Image] = {}
# Initialize HTTP server for images if enabled and not in test mode
self.image_server = None
self.image_server_url = None
# Detect if running in a test environment
is_test_env = test_mode or 'pytest' in sys.modules
if config.ENABLE_HTTP_SERVER and not is_test_env:
try:
self.image_server = ImageServer(
host=config.HTTP_SERVER_HOST,
port=config.HTTP_SERVER_PORT
)
# Start the server and get the URL with actual port assigned
self.image_server_url = self.image_server.start()
print(f"Image server started at {self.image_server_url}")
except Exception as e:
print(f"Warning: Failed to start image server: {str(e)}")
# Register resources and tools
if config.RESOURCES_AS_TOOLS:
self._register_resources(resources_only=True)
self._register_tools(include_resource_tools=True)
else:
self._register_resources(resources_only=False)
self._register_tools(include_resource_tools=False)
def _register_resources(self, resources_only=False):
"""Register all MCP resources. If resources_only is True, only register server://info as a resource."""
@self.mcp.resource("server://info")
def server_info() -> dict:
"""Provide information about the server."""
info = {
"status": "online",
"name": "Penpot MCP Server",
"description": "Model Context Provider for Penpot",
"api_url": config.PENPOT_API_URL
}
if self.image_server and self.image_server.is_running:
info["image_server"] = self.image_server_url
return info
if resources_only:
return
@self.mcp.resource("penpot://schema", mime_type="application/schema+json")
def penpot_schema() -> dict:
"""Provide the Penpot API schema as JSON."""
schema_path = os.path.join(config.RESOURCES_PATH, 'penpot-schema.json')
try:
with open(schema_path, 'r') as f:
return json.load(f)
except Exception as e:
return {"error": f"Failed to load schema: {str(e)}"}
@self.mcp.resource("penpot://tree-schema", mime_type="application/schema+json")
def penpot_tree_schema() -> dict:
"""Provide the Penpot object tree schema as JSON."""
schema_path = os.path.join(config.RESOURCES_PATH, 'penpot-tree-schema.json')
try:
with open(schema_path, 'r') as f:
return json.load(f)
except Exception as e:
return {"error": f"Failed to load tree schema: {str(e)}"}
@self.mcp.resource("rendered-component://{component_id}", mime_type="image/png")
def get_rendered_component(component_id: str) -> Image:
"""Return a rendered component image by its ID."""
if component_id in self.rendered_components:
return self.rendered_components[component_id]
raise Exception(f"Component with ID {component_id} not found")
@self.mcp.resource("penpot://cached-files")
def get_cached_files() -> dict:
"""List all files currently stored in the cache."""
return self.file_cache.get_all_cached_files()
def _register_tools(self, include_resource_tools=False):
"""Register all MCP tools. If include_resource_tools is True, also register resource logic as tools."""
@self.mcp.tool()
def list_projects() -> dict:
"""Retrieve a list of all available Penpot projects."""
try:
projects = self.api.list_projects()
return {"projects": projects}
except Exception as e:
return {"error": str(e)}
@self.mcp.tool()
def get_project_files(project_id: str) -> dict:
"""Get all files contained within a specific Penpot project.
Args:
project_id: The ID of the Penpot project
"""
try:
files = self.api.get_project_files(project_id)
return {"files": files}
except Exception as e:
return {"error": str(e)}
def get_cached_file(file_id: str) -> dict:
"""Internal helper to retrieve a file, using cache if available.
Args:
file_id: The ID of the Penpot file
"""
cached_data = self.file_cache.get(file_id)
if cached_data is not None:
return cached_data
try:
file_data = self.api.get_file(file_id=file_id)
self.file_cache.set(file_id, file_data)
return file_data
except Exception as e:
return {"error": str(e)}
@self.mcp.tool()
def get_file(file_id: str) -> dict:
"""Retrieve a Penpot file by its ID and cache it. Don't use this tool for code generation, use 'get_object_tree' instead.
Args:
file_id: The ID of the Penpot file
"""
try:
file_data = self.api.get_file(file_id=file_id)
self.file_cache.set(file_id, file_data)
return file_data
except Exception as e:
return {"error": str(e)}
@self.mcp.tool()
def export_object(
file_id: str,
page_id: str,
object_id: str,
export_type: str = "png",
scale: int = 1) -> Image:
"""Export a Penpot design object as an image.
Args:
file_id: The ID of the Penpot file
page_id: The ID of the page containing the object
object_id: The ID of the object to export
export_type: Image format (png, svg, etc.)
scale: Scale factor for the exported image
"""
temp_filename = None
try:
import tempfile
temp_dir = tempfile.gettempdir()
temp_filename = os.path.join(temp_dir, f"{object_id}.{export_type}")
output_path = self.api.export_and_download(
file_id=file_id,
page_id=page_id,
object_id=object_id,
export_type=export_type,
scale=scale,
save_to_file=temp_filename
)
with open(output_path, "rb") as f:
file_content = f.read()
image = Image(data=file_content, format=export_type)
# If HTTP server is enabled, add the image to the server
if self.image_server and self.image_server.is_running:
image_id = hashlib.md5(f"{file_id}:{page_id}:{object_id}".encode()).hexdigest()
# Use the current image_server_url to ensure the correct port
image_url = self.image_server.add_image(image_id, file_content, export_type)
# Add HTTP URL to the image metadata
image.http_url = image_url
return image
except Exception as e:
raise Exception(f"Export failed: {str(e)}")
finally:
if temp_filename and os.path.exists(temp_filename):
try:
os.remove(temp_filename)
except Exception as e:
print(f"Warning: Failed to delete temporary file {temp_filename}: {str(e)}")
@self.mcp.tool()
def get_object_tree(
file_id: str,
object_id: str,
fields: List[str],
depth: int = -1,
format: str = "json"
) -> dict:
"""Get the object tree structure for a Penpot object ("tree" field) with rendered screenshot image of the object ("image.mcp_uri" field).
Args:
file_id: The ID of the Penpot file
object_id: The ID of the object to retrieve
fields: Specific fields to include in the tree (call "penpot_tree_schema" resource/tool for available fields)
depth: How deep to traverse the object tree (-1 for full depth)
format: Output format ('json' or 'yaml')
"""
try:
file_data = get_cached_file(file_id)
if "error" in file_data:
return file_data
result = get_object_subtree_with_fields(
file_data,
object_id,
include_fields=fields,
depth=depth
)
if "error" in result:
return result
simplified_tree = result["tree"]
page_id = result["page_id"]
final_result = {"tree": simplified_tree}
try:
image = export_object(
file_id=file_id,
page_id=page_id,
object_id=object_id
)
image_id = hashlib.md5(f"{file_id}:{object_id}".encode()).hexdigest()
self.rendered_components[image_id] = image
# Image URI preferences:
# 1. HTTP server URL if available
# 2. Fallback to MCP resource URI
image_uri = f"render_component://{image_id}"
if hasattr(image, 'http_url'):
final_result["image"] = {
"uri": image.http_url,
"mcp_uri": image_uri,
"format": image.format if hasattr(image, 'format') else "png"
}
else:
final_result["image"] = {
"uri": image_uri,
"format": image.format if hasattr(image, 'format') else "png"
}
except Exception as e:
final_result["image_error"] = str(e)
if format.lower() == "yaml":
try:
import yaml
yaml_result = yaml.dump(final_result, default_flow_style=False, sort_keys=False)
return {"yaml_result": yaml_result}
except ImportError:
return {"format_error": "YAML format requested but PyYAML package is not installed"}
except Exception as e:
return {"format_error": f"Error formatting as YAML: {str(e)}"}
return final_result
except Exception as e:
return {"error": str(e)}
@self.mcp.tool()
def search_object(file_id: str, query: str) -> dict:
"""Search for objects within a Penpot file by name.
Args:
file_id: The ID of the Penpot file to search in
query: Search string (supports regex patterns)
"""
try:
file_data = get_cached_file(file_id)
if "error" in file_data:
return file_data
pattern = re.compile(query, re.IGNORECASE)
matches = []
data = file_data.get('data', {})
for page_id, page_data in data.get('pagesIndex', {}).items():
page_name = page_data.get('name', 'Unnamed')
for obj_id, obj_data in page_data.get('objects', {}).items():
obj_name = obj_data.get('name', '')
if pattern.search(obj_name):
matches.append({
'id': obj_id,
'name': obj_name,
'page_id': page_id,
'page_name': page_name,
'object_type': obj_data.get('type', 'unknown')
})
return {'objects': matches}
except Exception as e:
return {"error": str(e)}
if include_resource_tools:
@self.mcp.tool()
def penpot_schema() -> dict:
"""Provide the Penpot API schema as JSON."""
schema_path = os.path.join(config.RESOURCES_PATH, 'penpot-schema.json')
try:
with open(schema_path, 'r') as f:
return json.load(f)
except Exception as e:
return {"error": f"Failed to load schema: {str(e)}"}
@self.mcp.tool()
def penpot_tree_schema() -> dict:
"""Provide the Penpot object tree schema as JSON."""
schema_path = os.path.join(config.RESOURCES_PATH, 'penpot-tree-schema.json')
try:
with open(schema_path, 'r') as f:
return json.load(f)
except Exception as e:
return {"error": f"Failed to load tree schema: {str(e)}"}
@self.mcp.tool()
def get_rendered_component(component_id: str) -> Image:
"""Return a rendered component image by its ID."""
if component_id in self.rendered_components:
return self.rendered_components[component_id]
raise Exception(f"Component with ID {component_id} not found")
@self.mcp.tool()
def get_cached_files() -> dict:
"""List all files currently stored in the cache."""
return self.file_cache.get_all_cached_files()
def run(self, port=None, debug=None, mode=None):
"""
Run the MCP server.
Args:
port: Port to run on (overrides config) - only used in 'sse' mode
debug: Debug mode (overrides config)
mode: MCP mode ('stdio' or 'sse', overrides config)
"""
# Use provided values or fall back to config
debug = debug if debug is not None else config.DEBUG
# Get mode from parameter, environment variable, or default to stdio
mode = mode or os.environ.get('MODE', 'stdio')
# Validate mode
if mode not in ['stdio', 'sse']:
print(f"Invalid mode: {mode}. Using stdio mode.")
mode = 'stdio'
if mode == 'sse':
print(f"Starting Penpot MCP Server on port {port} (debug={debug}, mode={mode})")
else:
print(f"Starting Penpot MCP Server (debug={debug}, mode={mode})")
# Start HTTP server if enabled and not already running
if config.ENABLE_HTTP_SERVER and self.image_server and not self.image_server.is_running:
try:
self.image_server_url = self.image_server.start()
except Exception as e:
print(f"Warning: Failed to start image server: {str(e)}")
self.mcp.run(mode)
def create_server():
"""Create and configure a new server instance."""
# Detect if running in a test environment
is_test_env = 'pytest' in sys.modules
return PenpotMCPServer(test_mode=is_test_env)
# Create a global server instance with a standard name for the MCP tool
server = create_server()
def main():
"""Entry point for the console script."""
parser = argparse.ArgumentParser(description='Run the Penpot MCP Server')
parser.add_argument('--port', type=int, help='Port to run on')
parser.add_argument('--debug', action='store_true', help='Enable debug mode')
parser.add_argument('--mode', choices=['stdio', 'sse'], default=os.environ.get('MODE', 'stdio'),
help='MCP mode (stdio or sse)')
args = parser.parse_args()
server.run(port=args.port, debug=args.debug, mode=args.mode)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1 @@
"""Tool implementations for the Penpot MCP server."""

View File

@@ -0,0 +1 @@
"""Command-line interface tools for Penpot MCP."""

View File

@@ -0,0 +1,62 @@
"""Command-line interface for the Penpot tree visualization tool."""
import argparse
import json
import sys
from typing import Any, Dict
from penpot_mcp.tools.penpot_tree import build_tree, export_tree_to_dot, print_tree
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='Generate a tree from a Penpot JSON file')
parser.add_argument('input_file', help='Path to the Penpot JSON file')
parser.add_argument('--filter', '-f', help='Filter nodes by regex pattern')
parser.add_argument('--export', '-e', help='Export tree to a file (supports PNG, SVG, etc.)')
return parser.parse_args()
def load_penpot_file(file_path: str) -> Dict[str, Any]:
"""
Load a Penpot JSON file.
Args:
file_path: Path to the JSON file
Returns:
The loaded JSON data
Raises:
FileNotFoundError: If the file doesn't exist
json.JSONDecodeError: If the file isn't valid JSON
"""
try:
with open(file_path, 'r') as f:
return json.load(f)
except FileNotFoundError:
sys.exit(f"Error: File not found: {file_path}")
except json.JSONDecodeError:
sys.exit(f"Error: Invalid JSON file: {file_path}")
def main() -> None:
"""Main entry point for the command."""
args = parse_args()
# Load the Penpot file
data = load_penpot_file(args.input_file)
# Build the tree
root = build_tree(data)
# Export the tree if requested
if args.export:
export_tree_to_dot(root, args.export, args.filter)
# Print the tree
print_tree(root, args.filter)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,100 @@
"""Command-line interface for validating Penpot files against a schema."""
import argparse
import json
import os
import sys
from typing import Any, Dict, Optional, Tuple
from jsonschema import SchemaError, ValidationError, validate
from penpot_mcp.utils import config
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='Validate a Penpot JSON file against a schema')
parser.add_argument('input_file', help='Path to the Penpot JSON file to validate')
parser.add_argument(
'--schema',
'-s',
default=os.path.join(
config.RESOURCES_PATH,
'penpot-schema.json'),
help='Path to the JSON schema file (default: resources/penpot-schema.json)')
parser.add_argument('--verbose', '-v', action='store_true',
help='Enable verbose output with detailed validation errors')
return parser.parse_args()
def load_json_file(file_path: str) -> Dict[str, Any]:
"""
Load a JSON file.
Args:
file_path: Path to the JSON file
Returns:
The loaded JSON data
Raises:
FileNotFoundError: If the file doesn't exist
json.JSONDecodeError: If the file isn't valid JSON
"""
try:
with open(file_path, 'r') as f:
return json.load(f)
except FileNotFoundError:
sys.exit(f"Error: File not found: {file_path}")
except json.JSONDecodeError:
sys.exit(f"Error: Invalid JSON file: {file_path}")
def validate_penpot_file(data: Dict[str, Any], schema: Dict[str,
Any]) -> Tuple[bool, Optional[str]]:
"""
Validate a Penpot file against a schema.
Args:
data: The Penpot file data
schema: The JSON schema
Returns:
Tuple of (is_valid, error_message)
"""
try:
validate(instance=data, schema=schema)
return True, None
except ValidationError as e:
return False, str(e)
except SchemaError as e:
return False, f"Schema error: {str(e)}"
def main() -> None:
"""Main entry point for the command."""
args = parse_args()
# Load the files
print(f"Loading Penpot file: {args.input_file}")
data = load_json_file(args.input_file)
print(f"Loading schema file: {args.schema}")
schema = load_json_file(args.schema)
# Validate the file
print("Validating file...")
is_valid, error = validate_penpot_file(data, schema)
if is_valid:
print("✅ Validation successful! The file conforms to the schema.")
else:
print("❌ Validation failed!")
if args.verbose and error:
print("\nError details:")
print(error)
sys.exit(1)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,472 @@
"""
Tool for building and visualizing the structure of Penpot files as a tree.
This module provides functionality to parse Penpot file data and generate
a tree representation, which can be displayed or exported.
"""
import re
from typing import Any, Dict, Optional, Union, List
from anytree import Node, RenderTree
from anytree.exporter import DotExporter
def build_tree(data: Dict[str, Any]) -> Node:
"""
Build a tree representation of Penpot file data.
Args:
data: The Penpot file data
Returns:
The root node of the tree
"""
# Create nodes dictionary with ID as key
nodes = {}
# Create a synthetic root node with a special ID that won't conflict
synthetic_root_id = "SYNTHETIC-ROOT"
root = Node(f"{synthetic_root_id} (root) - Root")
nodes[synthetic_root_id] = root
# Add components section
components_node = Node(f"components (section) - Components", parent=root)
# Store component annotations for later reference
component_annotations = {}
# Process components
for comp_id, comp_data in data.get('components', {}).items():
comp_name = comp_data.get('name', 'Unnamed')
comp_node = Node(f"{comp_id} (component) - {comp_name}", parent=components_node)
nodes[comp_id] = comp_node
# Store annotation if present
if 'annotation' in comp_data and comp_data['annotation']:
component_annotations[comp_id] = comp_data['annotation']
# First pass: create all page nodes
for page_id, page_data in data.get('pagesIndex', {}).items():
# Create page node
page_name = page_data.get('name', 'Unnamed')
page_node = Node(f"{page_id} (page) - {page_name}", parent=root)
nodes[page_id] = page_node
# Second pass: process each page and its objects
for page_id, page_data in data.get('pagesIndex', {}).items():
page_name = page_data.get('name', 'Unnamed')
# Create a page-specific dictionary for objects to avoid ID collisions
page_nodes = {}
# First, create all object nodes for this page
for obj_id, obj_data in page_data.get('objects', {}).items():
obj_type = obj_data.get('type', 'unknown')
obj_name = obj_data.get('name', 'Unnamed')
# Make a unique key that includes the page ID to avoid collisions
page_obj_id = f"{page_id}:{obj_id}"
node = Node(f"{obj_id} ({obj_type}) - {obj_name}")
page_nodes[obj_id] = node # Store with original ID for this page's lookup
# Store additional properties for filtering
node.obj_type = obj_type
node.obj_name = obj_name
node.obj_id = obj_id
# Add component reference if this is a component instance
if 'componentId' in obj_data and obj_data['componentId'] in nodes:
comp_ref = obj_data['componentId']
node.componentRef = comp_ref
# If this component has an annotation, store it
if comp_ref in component_annotations:
node.componentAnnotation = component_annotations[comp_ref]
# Identify the all-zeros root frame for this page
all_zeros_id = "00000000-0000-0000-0000-000000000000"
page_root_frame = None
# First, find and connect the all-zeros root frame if it exists
if all_zeros_id in page_data.get('objects', {}):
page_root_frame = page_nodes[all_zeros_id]
page_root_frame.parent = nodes[page_id]
# Then build parent-child relationships for this page
for obj_id, obj_data in page_data.get('objects', {}).items():
# Skip the all-zeros root frame as we already processed it
if obj_id == all_zeros_id:
continue
parent_id = obj_data.get('parentId')
# Skip if parent ID is the same as object ID (circular reference)
if parent_id and parent_id == obj_id:
print(
f"Warning: Object {obj_id} references itself as parent. Attaching to page instead.")
page_nodes[obj_id].parent = nodes[page_id]
elif parent_id and parent_id in page_nodes:
# Check for circular references in the node hierarchy
is_circular = False
check_node = page_nodes[parent_id]
while check_node.parent is not None:
if hasattr(check_node.parent, 'obj_id') and check_node.parent.obj_id == obj_id:
is_circular = True
break
check_node = check_node.parent
if is_circular:
print(
f"Warning: Circular reference detected for {obj_id}. Attaching to page instead.")
page_nodes[obj_id].parent = nodes[page_id]
else:
page_nodes[obj_id].parent = page_nodes[parent_id]
else:
# If no parent or parent not found, connect to the all-zeros root frame if it exists,
# otherwise connect to the page
if page_root_frame:
page_nodes[obj_id].parent = page_root_frame
else:
page_nodes[obj_id].parent = nodes[page_id]
return root
def print_tree(root: Node, filter_pattern: Optional[str] = None) -> None:
"""
Print a tree representation to the console, with optional filtering.
Args:
root: The root node of the tree
filter_pattern: Optional regex pattern to filter nodes
"""
matched_nodes = []
# Apply filtering
if filter_pattern:
# Find all nodes that match the filter
pattern = re.compile(filter_pattern, re.IGNORECASE)
# Helper function to check if a node matches the filter
def matches_filter(node):
if not hasattr(node, 'obj_type') and not hasattr(node, 'obj_name'):
return False # Root node or section nodes
if pattern.search(
node.obj_type) or pattern.search(
node.obj_name) or pattern.search(
node.obj_id):
return True
return False
# Find all matching nodes and their paths to root
for pre, _, node in RenderTree(root):
if matches_filter(node):
matched_nodes.append(node)
# If we found matches, only print these nodes and their ancestors
if matched_nodes:
print(f"Filtered results matching '{filter_pattern}':")
# Build a set of all nodes to show (matching nodes and their ancestors)
nodes_to_show = set()
for node in matched_nodes:
# Add the node and all its ancestors
current = node
while current is not None:
nodes_to_show.add(current)
current = current.parent
# Print the filtered tree
for pre, _, node in RenderTree(root):
if node in nodes_to_show:
node_name = node.name
if hasattr(node, 'componentRef'):
comp_ref_str = f" (refs component: {node.componentRef}"
if hasattr(node, 'componentAnnotation'):
comp_ref_str += f" - Note: {node.componentAnnotation}"
comp_ref_str += ")"
node_name += comp_ref_str
# Highlight matched nodes
if node in matched_nodes:
print(f"{pre}{node_name} <-- MATCH")
else:
print(f"{pre}{node_name}")
print(f"\nFound {len(matched_nodes)} matching objects.")
return
# If no filter or no matches, print the entire tree
for pre, _, node in RenderTree(root):
node_name = node.name
if hasattr(node, 'componentRef'):
comp_ref_str = f" (refs component: {node.componentRef}"
if hasattr(node, 'componentAnnotation'):
comp_ref_str += f" - Note: {node.componentAnnotation}"
comp_ref_str += ")"
node_name += comp_ref_str
print(f"{pre}{node_name}")
def export_tree_to_dot(root: Node, output_file: str, filter_pattern: Optional[str] = None) -> bool:
"""
Export the tree to a DOT file (Graphviz format).
Args:
root: The root node of the tree
output_file: Path to save the exported file
filter_pattern: Optional regex pattern to filter nodes
Returns:
True if successful, False otherwise
"""
try:
# If filtering, we may want to only export the filtered tree
if filter_pattern:
# TODO: Implement filtered export
pass
DotExporter(root).to_picture(output_file)
print(f"Tree exported to {output_file}")
return True
except Exception as e:
print(f"Warning: Could not export to {output_file}: {e}")
print("Make sure Graphviz is installed: https://graphviz.org/download/")
return False
def find_page_containing_object(content: Dict[str, Any], object_id: str) -> Optional[str]:
"""
Find which page contains the specified object.
Args:
content: The Penpot file content
object_id: The ID of the object to find
Returns:
The page ID containing the object, or None if not found
"""
# Helper function to recursively search for an object in the hierarchy
def find_object_in_hierarchy(objects_dict, target_id):
# Check if the object is directly in the dictionary
if target_id in objects_dict:
return True
# Check if the object is a child of any object in the dictionary
for obj_id, obj_data in objects_dict.items():
# Look for objects that have shapes (children)
if "shapes" in obj_data and target_id in obj_data["shapes"]:
return True
# Check in children elements if any
if "children" in obj_data:
child_objects = {child["id"]: child for child in obj_data["children"]}
if find_object_in_hierarchy(child_objects, target_id):
return True
return False
# Check each page
for page_id, page_data in content.get('pagesIndex', {}).items():
objects_dict = page_data.get('objects', {})
if find_object_in_hierarchy(objects_dict, object_id):
return page_id
return None
def find_object_in_tree(tree: Node, target_id: str) -> Optional[Dict[str, Any]]:
"""
Find an object in the tree by its ID and return its subtree as a dictionary.
Args:
tree: The root node of the tree
target_id: The ID of the object to find
Returns:
Dictionary representation of the object's subtree, or None if not found
"""
# Helper function to search in a node's children
def find_object_in_children(node, target_id):
for child in node.children:
if hasattr(child, 'obj_id') and child.obj_id == target_id:
return convert_node_to_dict(child)
result = find_object_in_children(child, target_id)
if result:
return result
return None
# Iterate through the tree's children
for child in tree.children:
# Check if this is a page node (contains "(page)" in its name)
if "(page)" in child.name:
# Check all objects under this page
for obj in child.children:
if hasattr(obj, 'obj_id') and obj.obj_id == target_id:
return convert_node_to_dict(obj)
# Check children recursively
result = find_object_in_children(obj, target_id)
if result:
return result
return None
def convert_node_to_dict(node: Node) -> Dict[str, Any]:
"""
Convert an anytree.Node to a dictionary format for API response.
Args:
node: The node to convert
Returns:
Dictionary representation of the node and its subtree
"""
result = {
'id': node.obj_id if hasattr(node, 'obj_id') else None,
'type': node.obj_type if hasattr(node, 'obj_type') else None,
'name': node.obj_name if hasattr(node, 'obj_name') else None,
'children': []
}
# Add component reference if available
if hasattr(node, 'componentRef'):
result['componentRef'] = node.componentRef
# Add component annotation if available
if hasattr(node, 'componentAnnotation'):
result['componentAnnotation'] = node.componentAnnotation
# Recursively add children
for child in node.children:
result['children'].append(convert_node_to_dict(child))
return result
def get_object_subtree(file_data: Dict[str, Any], object_id: str) -> Dict[str, Union[Dict, str]]:
"""
Get a simplified tree representation of an object and its children.
Args:
file_data: The Penpot file data
object_id: The ID of the object to get the tree for
Returns:
Dictionary containing the simplified tree or an error message
"""
try:
# Get the content from file data
content = file_data.get('data')
# Find which page contains the object
page_id = find_page_containing_object(content, object_id)
if not page_id:
return {"error": f"Object {object_id} not found in file"}
# Build the full tree
full_tree = build_tree(content)
# Find the object in the full tree and extract its subtree
simplified_tree = find_object_in_tree(full_tree, object_id)
if not simplified_tree:
return {"error": f"Object {object_id} not found in tree structure"}
return {
"tree": simplified_tree,
"page_id": page_id
}
except Exception as e:
return {"error": str(e)}
def get_object_subtree_with_fields(file_data: Dict[str, Any], object_id: str,
include_fields: Optional[List[str]] = None,
depth: int = -1) -> Dict[str, Any]:
"""
Get a filtered tree representation of an object with only specified fields.
This function finds an object in the Penpot file data and returns a subtree
with the object as the root, including only the specified fields and limiting
the depth of the tree if requested.
Args:
file_data: The Penpot file data
object_id: The ID of the object to get the tree for
include_fields: List of field names to include in the output (None means include all)
depth: Maximum depth of the tree (-1 means no limit)
Returns:
Dictionary containing the filtered tree or an error message
"""
try:
# Get the content from file data
content = file_data.get('data', file_data)
# Find which page contains the object
page_id = find_page_containing_object(content, object_id)
if not page_id:
return {"error": f"Object {object_id} not found in file"}
# Get the page data
page_data = content.get('pagesIndex', {}).get(page_id, {})
objects_dict = page_data.get('objects', {})
# Check if the object exists in this page
if object_id not in objects_dict:
return {"error": f"Object {object_id} not found in page {page_id}"}
# Function to recursively build the filtered object tree
def build_filtered_object_tree(obj_id: str, current_depth: int = 0):
if obj_id not in objects_dict:
return None
obj_data = objects_dict[obj_id]
# Create a new dict with only the requested fields or all fields if None
if include_fields is None:
filtered_obj = obj_data.copy()
else:
filtered_obj = {field: obj_data[field] for field in include_fields if field in obj_data}
# Always include the id field
filtered_obj['id'] = obj_id
# If depth limit reached, don't process children
if depth != -1 and current_depth >= depth:
return filtered_obj
# Find all children of this object
children = []
for child_id, child_data in objects_dict.items():
if child_data.get('parentId') == obj_id:
child_tree = build_filtered_object_tree(child_id, current_depth + 1)
if child_tree:
children.append(child_tree)
# Add children field only if we have children
if children:
filtered_obj['children'] = children
return filtered_obj
# Build the filtered tree starting from the requested object
object_tree = build_filtered_object_tree(object_id)
if not object_tree:
return {"error": f"Failed to build object tree for {object_id}"}
return {
"tree": object_tree,
"page_id": page_id
}
except Exception as e:
return {"error": str(e)}

View File

@@ -0,0 +1 @@
"""Utility functions and helper modules for the Penpot MCP server."""

83
penpot_mcp/utils/cache.py Normal file
View File

@@ -0,0 +1,83 @@
"""
Cache utilities for Penpot MCP server.
"""
import time
from typing import Optional, Dict, Any
class MemoryCache:
"""In-memory cache implementation with TTL support."""
def __init__(self, ttl_seconds: int = 600):
"""
Initialize the memory cache.
Args:
ttl_seconds: Time to live in seconds (default 10 minutes)
"""
self.ttl_seconds = ttl_seconds
self._cache: Dict[str, Dict[str, Any]] = {}
def get(self, file_id: str) -> Optional[Dict[str, Any]]:
"""
Get a file from cache if it exists and is not expired.
Args:
file_id: The ID of the file to retrieve
Returns:
The cached file data or None if not found/expired
"""
if file_id not in self._cache:
return None
cache_data = self._cache[file_id]
# Check if cache is expired
if time.time() - cache_data['timestamp'] > self.ttl_seconds:
del self._cache[file_id] # Remove expired cache
return None
return cache_data['data']
def set(self, file_id: str, data: Dict[str, Any]) -> None:
"""
Store a file in cache.
Args:
file_id: The ID of the file to cache
data: The file data to cache
"""
self._cache[file_id] = {
'timestamp': time.time(),
'data': data
}
def clear(self) -> None:
"""Clear all cached files."""
self._cache.clear()
def get_all_cached_files(self) -> Dict[str, Dict[str, Any]]:
"""
Get all valid cached files.
Returns:
Dictionary mapping file IDs to their cached data
"""
result = {}
current_time = time.time()
# Create a list of expired keys to remove
expired_keys = []
for file_id, cache_data in self._cache.items():
if current_time - cache_data['timestamp'] <= self.ttl_seconds:
result[file_id] = cache_data['data']
else:
expired_keys.append(file_id)
# Remove expired entries
for key in expired_keys:
del self._cache[key]
return result

View File

@@ -0,0 +1,25 @@
"""Configuration module for the Penpot MCP server."""
import os
from dotenv import find_dotenv, load_dotenv
# Load environment variables
load_dotenv(find_dotenv())
# Server configuration
PORT = int(os.environ.get('PORT', 5000))
DEBUG = os.environ.get('DEBUG', 'true').lower() == 'true'
RESOURCES_AS_TOOLS = os.environ.get('RESOURCES_AS_TOOLS', 'true').lower() == 'true'
# HTTP server for exported images
ENABLE_HTTP_SERVER = os.environ.get('ENABLE_HTTP_SERVER', 'true').lower() == 'true'
HTTP_SERVER_HOST = os.environ.get('HTTP_SERVER_HOST', 'localhost')
HTTP_SERVER_PORT = int(os.environ.get('HTTP_SERVER_PORT', 0))
# Penpot API configuration
PENPOT_API_URL = os.environ.get('PENPOT_API_URL', 'https://design.penpot.app/api')
PENPOT_USERNAME = os.environ.get('PENPOT_USERNAME')
PENPOT_PASSWORD = os.environ.get('PENPOT_PASSWORD')
RESOURCES_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'resources')

View File

@@ -0,0 +1,128 @@
"""HTTP server module for serving exported images from memory."""
import io
import json
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
import socketserver
class InMemoryImageHandler(BaseHTTPRequestHandler):
"""HTTP request handler for serving images stored in memory."""
# Class variable to store images
images = {}
def do_GET(self):
"""Handle GET requests."""
# Remove query parameters if any
path = self.path.split('?', 1)[0]
path = path.split('#', 1)[0]
# Extract image ID from path
# Expected path format: /images/{image_id}.{format}
parts = path.split('/')
if len(parts) == 3 and parts[1] == 'images':
# Extract image_id by removing the file extension if present
image_id_with_ext = parts[2]
image_id = image_id_with_ext.split('.')[0]
if image_id in self.images:
img_data = self.images[image_id]['data']
img_format = self.images[image_id]['format']
# Set content type based on format
content_type = f"image/{img_format}"
if img_format == 'svg':
content_type = 'image/svg+xml'
self.send_response(200)
self.send_header('Content-type', content_type)
self.send_header('Content-length', len(img_data))
self.end_headers()
self.wfile.write(img_data)
return
# Return 404 if image not found
self.send_response(404)
self.send_header('Content-type', 'application/json')
self.end_headers()
response = {'error': 'Image not found'}
self.wfile.write(json.dumps(response).encode())
class ImageServer:
"""Server for in-memory images."""
def __init__(self, host='localhost', port=0):
"""Initialize the HTTP server.
Args:
host: Host address to listen on
port: Port to listen on (0 means use a random available port)
"""
self.host = host
self.port = port
self.server = None
self.server_thread = None
self.is_running = False
self.base_url = None
def start(self):
"""Start the HTTP server in a background thread.
Returns:
Base URL of the server with actual port used
"""
if self.is_running:
return self.base_url
# Create TCP server with address reuse enabled
class ReuseAddressTCPServer(socketserver.TCPServer):
allow_reuse_address = True
self.server = ReuseAddressTCPServer((self.host, self.port), InMemoryImageHandler)
# Get the actual port that was assigned
self.port = self.server.socket.getsockname()[1]
self.base_url = f"http://{self.host}:{self.port}"
# Start server in a separate thread
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True # Don't keep process running if main thread exits
self.server_thread.start()
self.is_running = True
print(f"Image server started at {self.base_url}")
return self.base_url
def stop(self):
"""Stop the HTTP server."""
if not self.is_running:
return
self.server.shutdown()
self.server.server_close()
self.is_running = False
print("Image server stopped")
def add_image(self, image_id, image_data, image_format='png'):
"""Add image to in-memory storage.
Args:
image_id: Unique identifier for the image
image_data: Binary image data
image_format: Image format (png, jpg, etc.)
Returns:
URL to access the image
"""
InMemoryImageHandler.images[image_id] = {
'data': image_data,
'format': image_format
}
return f"{self.base_url}/images/{image_id}.{image_format}"
def remove_image(self, image_id):
"""Remove image from in-memory storage."""
if image_id in InMemoryImageHandler.images:
del InMemoryImageHandler.images[image_id]