Primary Colors
Secondary Colors
Typography
Heading 1
Body Text
Caption
4.5:1 minimum contrast ratio for all text
Full keyboard navigation support
ARIA labels and semantic HTML
// analysisSlice.ts
import { createSlice, PayloadAction } from '@reduxjs/toolkit';
interface AnalysisState {
code: string;
isLoading: boolean;
results: CodeAnalysisResult | null;
error: string | null;
}
const initialState: AnalysisState = {
code: '',
isLoading: false,
results: null,
error: null
};
const analysisSlice = createSlice({
name: 'analysis',
initialState,
reducers: {
setCode: (state, action: PayloadAction<string>) => {
state.code = action.payload;
},
analyzeCodeStart: (state) => {
state.isLoading = true;
state.error = null;
},
analyzeCodeSuccess: (state, action: PayloadAction<CodeAnalysisResult>) => {
state.isLoading = false;
state.results = action.payload;
},
analyzeCodeFailure: (state, action: PayloadAction<string>) => {
state.isLoading = false;
state.error = action.payload;
}
}
});
export const {
setCode,
analyzeCodeStart,
analyzeCodeSuccess,
analyzeCodeFailure
} = analysisSlice.actions;
export default analysisSlice.reducer;
// useCodeAnalysis.ts
import { useEffect } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import { analyzeCodeStart, analyzeCodeSuccess, analyzeCodeFailure } from '../store/analysisSlice';
import { RootState } from '../store/store';
import api from '../services/api';
export const useCodeAnalysis = () => {
const dispatch = useDispatch();
const { code, isLoading, results, error } = useSelector(
(state: RootState) => state.analysis
);
const analyzeCode = async () => {
try {
dispatch(analyzeCodeStart());
const response = await api.post('/analyze', { code });
dispatch(analyzeCodeSuccess(response.data));
} catch (err) {
dispatch(analyzeCodeFailure(err.message));
}
};
return {
code,
isLoading,
results,
error,
analyzeCode
};
};
// CodeEditor.tsx
import React, { useState, useEffect } from 'react';
import { useDispatch } from 'react-redux';
import { setCode } from '../store/analysisSlice';
import MonacoEditor from '@monaco-editor/react';
import { LoadingSpinner } from './ui/LoadingSpinner';
interface CodeEditorProps {
initialValue?: string;
language?: string;
}
export const CodeEditor: React.FC<CodeEditorProps> = ({
initialValue = '',
language = 'javascript'
}) => {
const [value, setValue] = useState(initialValue);
const dispatch = useDispatch();
useEffect(() => {
const timer = setTimeout(() => {
dispatch(setCode(value));
}, 500);
return () => clearTimeout(timer);
}, [value, dispatch]);
return (
<div className="border border-gray-200 rounded-lg overflow-hidden">
<div className="bg-gray-800 px-4 py-2 flex justify-between items-center">
<span className="text-sm text-gray-300">{language.toUpperCase()}</span>
<button className="text-xs bg-indigo-600 text-white px-2 py-1 rounded">
Analyze
</button>
</div>
<MonacoEditor
height="500px"
language={language}
theme="vs-dark"
value={value}
onChange={(newValue) => setValue(newValue || '')}
loading={<LoadingSpinner />}
options={{
minimap: { enabled: false },
fontSize: 14,
wordWrap: 'on',
automaticLayout: true
}}
/>
</div>
);
};
// AnalysisResults.tsx
import React from 'react';
import { CodeIssue } from '../types';
import { IssueSeverityBadge } from './ui/IssueSeverityBadge';
interface AnalysisResultsProps {
issues: CodeIssue[];
loading?: boolean;
}
export const AnalysisResults: React.FC<AnalysisResultsProps> = ({
issues,
loading = false
}) => {
if (loading) {
return <div className="text-center py-8">Analyzing code...</div>;
}
if (!issues || issues.length === 0) {
return <div className="text-center py-8 text-gray-500">No issues found</div>;
}
return (
<div className="space-y-4">
{issues.map((issue, index) => (
<div key={index} className="border border-gray-200 rounded-lg p-4">
<div className="flex justify-between items-start mb-2">
<h4 className="font-medium">{issue.message}</h4>
<IssueSeverityBadge severity={issue.severity} />
</div>
<p className="text-sm text-gray-600 mb-2">
Line {issue.line}: {issue.codeSnippet}
</p>
<p className="text-xs text-gray-500">{issue.ruleId}</p>
</div>
))}
</div>
);
};
// SecurityFindings.tsx
import React from 'react';
import { SecurityFinding } from '../types';
import { CveBadge } from './ui/CveBadge';
interface SecurityFindingsProps {
findings: SecurityFinding[];
}
export const SecurityFindings: React.FC<SecurityFindingsProps> = ({
findings
}) => {
return (
<div className="space-y-4">
{findings.map((finding, index) => (
<div key={index} className="border border-red-200 rounded-lg p-4 bg-red-50">
<div className="flex justify-between items-start mb-2">
<h4 className="font-medium text-red-800">{finding.title}</h4>
<CveBadge cveId={finding.cveId} />
</div>
<p className="text-sm text-red-700 mb-2">{finding.description}</p>
<div className="flex space-x-2">
<span className="text-xs px-2 py-1 bg-red-100 text-red-800 rounded">
Severity: {finding.severity}
</span>
<span className="text-xs px-2 py-1 bg-red-100 text-red-800 rounded">
Line: {finding.line}
</span>
</div>
</div>
))}
</div>
);
};
# main.py
from fastapi import FastAPI, Depends
from fastapi.middleware.cors import CORSMiddleware
from app.api.v1.endpoints import code_analysis, auth
from app.core.config import settings
app = FastAPI(
title="Deepsite API",
description="AI-powered code analysis platform",
version="1.0.0",
docs_url="/api/docs",
redoc_url="/api/redoc"
)
# CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=settings.BACKEND_CORS_ORIGINS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Include API routers
app.include_router(
auth.router,
prefix="/api/v1/auth",
tags=["authentication"]
)
app.include_router(
code_analysis.router,
prefix="/api/v1/analysis",
tags=["code analysis"],
dependencies=[Depends(auth.get_current_active_user)]
)
# auth.py
from datetime import datetime, timedelta
from typing import Optional
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt
from passlib.context import CryptContext
from pydantic import BaseModel
from app.core.config import settings
# Password hashing
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
class Token(BaseModel):
access_token: str
token_type: str
class TokenData(BaseModel):
username: Optional[str] = None
def verify_password(plain_password: str, hashed_password: str):
return pwd_context.verify(plain_password, hashed_password)
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(
to_encode,
settings.SECRET_KEY,
algorithm=settings.ALGORITHM
)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(
token,
settings.SECRET_KEY,
algorithms=[settings.ALGORITHM]
)
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except JWTError:
raise credentials_exception
user = get_user(fake_db, username=token_data.username)
if user is None:
raise credentials_exception
return user
# endpoints/code_analysis.py
from fastapi import APIRouter, Depends, HTTPException
from fastapi import status
from typing import List
from app.services.code_analysis import analyze_code
from app.models.schemas import CodeAnalysisRequest, CodeAnalysisResult
from app.api.v1.dependencies import get_current_active_user
router = APIRouter()
@router.post(
"/",
response_model=CodeAnalysisResult,
summary="Analyze code for quality and security issues",
description="""Analyzes the provided code for:
- Code quality issues
- Security vulnerabilities
- Performance anti-patterns
- Style violations""",
response_description="Analysis results with found issues"
)
async def analyze_code_endpoint(
request: CodeAnalysisRequest,
current_user: str = Depends(get_current_active_user)
) -> CodeAnalysisResult:
try:
result = await analyze_code(request.code, request.language)
return result
except Exception as e:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=f"Error analyzing code: {str(e)}"
)
@router.get(
"/history/",
response_model=List[CodeAnalysisResult],
summary="Get analysis history",
description="Returns the user's previous code analysis results",
response_description="List of previous analysis results"
)
async def get_analysis_history(
current_user: str = Depends(get_current_active_user)
) -> List[CodeAnalysisResult]:
# Implementation would query database for user's history
return []
# services/code_analysis.py
from typing import Optional
from app.models.schemas import CodeAnalysisResult, CodeIssue, SecurityFinding
from app.integrations.huggingface import query_hf_model
from app.integrations.sonarqube import analyze_with_sonarqube
async def analyze_code(
code: str,
language: str = "python"
) -> CodeAnalysisResult:
"""Analyze code using multiple analysis methods"""
# Initialize result objects
quality_issues: List[CodeIssue] = []
security_findings: List[SecurityFinding] = []
# 1. Static analysis with SonarQube
sonar_results = await analyze_with_sonarqube(code, language)
quality_issues.extend(sonar_results.issues)
# 2. AI-powered analysis with HuggingFace
hf_prompt = f"""
Analyze this {language} code for quality and security issues:
{code}
Return findings in JSON format with:
- issue_type (quality|security|performance|style)
- severity (low|medium|high|critical)
- message
- line_number
- recommendation
"""
hf_response = await query_hf_model(
model="codellama/CodeLlama-34b-Instruct-hf",
prompt=hf_prompt
)
# Process HF response and add to findings
if hf_response and hf_response.get("findings"):
for finding in hf_response["findings"]:
if finding["issue_type"] == "security":
security_findings.append(
SecurityFinding.from_hf_response(finding)
)
else:
quality_issues.append(
CodeIssue.from_hf_response(finding)
)
return CodeAnalysisResult(
quality_issues=quality_issues,
security_findings=security_findings,
summary=generate_summary(quality_issues, security_findings)
)
# models/schemas.py
from pydantic import BaseModel, Field
from typing import List, Optional
from enum import Enum
class Severity(str, Enum):
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class CodeIssue(BaseModel):
rule_id: str = Field(..., example="S100")
message: str = Field(..., example="Avoid using 'eval' function")
severity: Severity = Field(..., example="high")
line: int = Field(..., example=42)
code_snippet: str = Field(..., example="eval(user_input)")
category: str = Field(..., example="security")
@classmethod
def from_hf_response(cls, data: dict):
return cls(
rule_id=data.get("rule_id", "AI-"+data["issue_type"]),
message=data["message"],
severity=data["severity"],
line=data["line_number"],
code_snippet=data.get("code_snippet", ""),
category=data["issue_type"]
)
class SecurityFinding(BaseModel):
cve_id: Optional[str] = Field(None, example="CVE-2021-1234")
title: str = Field(..., example="SQL Injection Vulnerability")
description: str = Field(..., example="User input directly concatenated into SQL query")
severity: Severity = Field(..., example="critical")
line: int = Field(..., example=15)
recommendation: str = Field(..., example="Use parameterized queries")
@classmethod
def from_hf_response(cls, data: dict):
return cls(
title=data["message"],
description=data.get("recommendation", ""),
severity=data["severity"],
line=data["line_number"],
recommendation=data.get("recommendation", "")
)
class CodeAnalysisRequest(BaseModel):
code: str = Field(..., example="def foo():\n pass")
language: str = Field("python", example="python")
class CodeAnalysisResult(BaseModel):
quality_issues: List[CodeIssue] = []
security_findings: List[SecurityFinding] = []
summary: str = Field(..., example="Found 3 critical issues")
# integrations/huggingface.py
import os
from typing import Optional, Dict, Any
import httpx
from app.core.config import settings
from app.core.logging import logger
HF_API_URL = "https://api-inference.huggingface.co/models"
HF_API_KEY = settings.HF_API_KEY
async def query_hf_model(
model: str,
prompt: str,
parameters: Optional[Dict[str, Any]] = None
) -> Optional[Dict[str, Any]]:
"""Query HuggingFace inference API"""
headers = {
"Authorization": f"Bearer {HF_API_KEY}",
"Content-Type": "application/json"
}
payload = {
"inputs": prompt,
"parameters": parameters or {
"max_new_tokens": 1024,
"temperature": 0.7,
"return_full_text": False
}
}
try:
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.post(
f"{HF_API_URL}/{model}",
headers=headers,
json=payload
)
if response.status_code == 200:
return response.json()
elif response.status_code == 503:
# Model is loading, wait and retry
logger.info(f"Model {model} is loading, retrying...")
await asyncio.sleep(10)
return await query_hf_model(model, prompt, parameters)
else:
logger.error(
f"HF API error: {response.status_code} - {response.text}"
)
return None
except Exception as e:
logger.error(f"Error querying HF API: {str(e)}")
return None
# services/model_selector.py
from enum import Enum
from typing import Optional, Dict, Any
from app.integrations.huggingface import query_hf_model
class ModelType(str, Enum):
CODE_ANALYSIS = "code_analysis"
SECURITY = "security"
DOCUMENTATION = "documentation"
MODEL_MAP = {
ModelType.CODE_ANALYSIS: "codellama/CodeLlama-34b-Instruct-hf",
ModelType.SECURITY: "deepset/roberta-base-squad2",
ModelType.DOCUMENTATION: "bigcode/starcoder"
}
def get_model_for_task(task_type: ModelType) -> str:
"""Get appropriate model for given task type"""
return MODEL_MAP.get(task_type, MODEL_MAP[ModelType.CODE_ANALYSIS])
async def analyze_with_model(
code: str,
task_type: ModelType,
language: str = "python"
) -> Optional[Dict[str, Any]]:
"""Analyze code with task-specific model"""
model = get_model_for_task(task_type)
prompt = generate_prompt_for_task(task_type, code, language)
parameters = {
"max_new_tokens": 1024,
"temperature": 0.5 if task_type == ModelType.SECURITY else 0.7
}
return await query_hf_model(model, prompt, parameters)
def generate_prompt_for_task(
task_type: ModelType,
code: str,
language: str
) -> str:
"""Generate task-specific prompt"""
if task_type == ModelType.SECURITY:
return f"""Analyze this {language} code for security vulnerabilities:
{code}
Return JSON with:
- vulnerability_type
- severity (low/medium/high/critical)
- description
- line_number
- recommendation
"""
elif task_type == ModelType.DOCUMENTATION:
return f"""Generate documentation for this {language} code:
{code}
Include:
- Function description
- Parameters
- Return value
- Usage example
"""
else: # Default code analysis
return f"""Analyze this {language} code for quality issues:
{code}
Return JSON with:
- issue_type (style/performance/bug)
- severity (low/medium/high)
- message
- line_number
- recommendation
"""
User submits code for analysis
SonarQube scans for common issues
CodeLlama analyzes for complex patterns
Combine findings from all sources
Create comprehensive report
# Dockerfile
FROM python:3.9-slim as base
# Install system dependencies
RUN apt-get update && apt-get install -y \
build-essential \
curl \
&& rm -rf /var/lib/apt/lists/*
# Create and activate virtual environment
ENV VIRTUAL_ENV=/opt/venv
RUN python3 -m venv $VIRTUAL_ENV
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
# Install Python dependencies
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY . .
# Frontend build stage
FROM node:16 as frontend-builder
WORKDIR /app/frontend
COPY frontend/package.json frontend/package-lock.json ./
RUN npm install
COPY frontend .
RUN npm run build
# Final production image
FROM base as production
COPY --from=frontend-builder /app/frontend/dist /app/frontend/dist
# Set environment variables
ENV PYTHONUNBUFFERED=1
ENV PORT=8000
ENV MODE=production
# Run application
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "$PORT"]
# .github/workflows/deploy.yml
name: Deploy to HuggingFace Spaces
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
build-and-deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.9'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: '16'
- name: Build frontend
working-directory: ./frontend
run: |
npm install
npm run build
- name: Configure Docker
run: |
echo "${{ secrets.HF_TOKEN }}" | docker login \
-u ${{ secrets.HF_USERNAME }} \
--password-stdin \
ghcr.io
- name: Build and push Docker image
run: |
docker build -t ghcr.io/${{ secrets.HF_USERNAME }}/deepsite:latest .
docker push ghcr.io/${{ secrets.HF_USERNAME }}/deepsite:latest
- name: Deploy to Spaces
uses: huggingface/huggingface_hub@main
with:
huggingface_token: ${{ secrets.HF_TOKEN }}
repo_id: ${{ secrets.HF_USERNAME }}/deepsite
space_sdk: docker
space_hardware: cpu-basic
space_storage: small
# app/api/dependencies.py
from fastapi import Depends, Request
from fastapi.security import HTTPBearer
from fastapi import HTTPException, status
from redis import Redis
from app.core.config import settings
security = HTTPBearer()
redis = Redis.from_url(settings.REDIS_URL)
async def rate_limit(
request: Request,
credentials: str = Depends(security)
) -> None:
"""Rate limiting middleware"""
# Get user ID from token
try:
payload = jwt.decode(
credentials.credentials,
settings.SECRET_KEY,
algorithms=[settings.ALGORITHM]
)
user_id = payload.get("sub")
except JWTError:
user_id = request.client.host # Fallback to IP
# Check rate limit
key = f"rate_limit:{user_id}"
current = redis.incr(key)
if current == 1:
redis.expire(key, 60) # 1 minute window
if current > settings.RATE_LIMIT:
raise HTTPException(
status_code=status.HTTP_429_TOO_MANY_REQUESTS,
detail="Too many requests"
)
# README.md --- title: Deepsite emoji: 🚀 colorFrom: indigo colorTo: purple sdk: docker app_file: app/main.py pinned: true license: apache-2.0 # Hardware resources resources: cpu: 2 memory: 8Gi # Environment variables env: - HF_API_KEY: your_huggingface_api_key - MODE: production - PORT: 8000 # Health check healthcheck: test: ["CMD", "curl", "-f", "http://localhost:$PORT/api/health"] interval: 30s timeout: 10s retries: 3
/metrics/api/healthClear separation between frontend, backend, and AI components
RESTful interfaces between all components
Docker containers for easy deployment on HuggingFace Spaces