diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..c11e1f4
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,38 @@
+# =============================================================================
+# No More Jockeys - Environment Configuration
+# =============================================================================
+# This is the main environment file for the entire project.
+# Copy this to .env and fill in your actual values.
+
+# -----------------------------------------------------------------------------
+# ANTHROPIC API CONFIGURATION
+# -----------------------------------------------------------------------------
+# Required for AI functionality
+ANTHROPIC_API_KEY=your_anthropic_api_key_here
+
+# -----------------------------------------------------------------------------
+# HELICONE MONITORING
+# -----------------------------------------------------------------------------
+# REQUIRED in production for request monitoring
+# OPTIONAL in development (will use direct Anthropic API if not set)
+HELICONE_API_KEY=your_helicone_key_here
+
+# -----------------------------------------------------------------------------
+# API URL CONFIGURATION
+# -----------------------------------------------------------------------------
+# Frontend will auto-detect based on NODE_ENV, but you can override:
+#
+# For local development (default behavior):
+# NEXT_PUBLIC_API_URL=http://localhost:8000
+#
+# To test frontend locally against production backend:
+# NEXT_PUBLIC_API_URL=https://backend-pu7w8cumu-set4.vercel.app
+#
+# For production deployment:
+# NEXT_PUBLIC_API_URL=https://your-production-backend.vercel.app
+
+# -----------------------------------------------------------------------------
+# DEVELOPMENT MODE CONFIGURATION
+# -----------------------------------------------------------------------------
+# Uncomment to set explicit development mode
+# NODE_ENV=development
diff --git a/.gitignore b/.gitignore
index e85314f..1d25a48 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,16 +1,36 @@
+# Dependencies
node_modules/
-.envrc
+venv/
+__pycache__/
+
+# Environment files
.env
.env.local
+.env*.local
+
+# Build outputs
.next/
-.vercel
-__pycache__/
-venv/
-*.pyc
dist/
build/
+
+# Deployment
+.vercel
+
+# Logs
*.log
+
+# OS files
.DS_Store
+.envrc
+
+# Editor files
*.swp
*.swo
*~
+
+# Python
+*.pyc
+
+# Temporary files
+backend.log
+frontend.log
diff --git a/README.md b/README.md
index c3bc119..79b074b 100644
--- a/README.md
+++ b/README.md
@@ -7,8 +7,26 @@ Multi-LLM No More Jockeys game implementation with separate backend and frontend
- **Backend**: FastAPI Python application (`/backend`)
- **Frontend**: Next.js React application (`/frontend`)
+## Quick Start
+
+```bash
+cp .env.example .env
+# Add your ANTHROPIC_API_KEY to .env
+./dev-local.sh
+```
+
## Development
+### Development Modes
+
+```bash
+./dev-local.sh # Frontend + Backend locally
+./dev-hybrid.sh # Frontend local → Production backend
+./dev-backend.sh # Backend only
+```
+
+### Alternative Development Commands
+
```bash
# Install dependencies and start both services
npm run dev
@@ -18,6 +36,15 @@ npm run dev:backend # Starts on http://localhost:8000
npm run dev:frontend # Starts on http://localhost:3000
```
+### Environment
+
+Edit `.env`:
+```bash
+ANTHROPIC_API_KEY=your_api_key_here
+HELICONE_API_KEY=optional_monitoring_key # Optional - adds observability
+# NEXT_PUBLIC_API_URL=optional_override
+```
+
## Deployment
This project uses GitHub Actions to automatically deploy to Vercel when changes are pushed to the main branch.
@@ -58,15 +85,16 @@ This project uses GitHub Actions to automatically deploy to Vercel when changes
For production deployments, set these in your Vercel project settings:
+**Backend**:
+- `ANTHROPIC_API_KEY`: Your Anthropic API key (required)
+- `HELICONE_API_KEY`: Your Helicone API key (required in production)
+
**Frontend**:
- `NEXT_PUBLIC_API_URL`: Your backend Vercel URL
-**Backend**:
-- `ANTHROPIC_API_KEY`: Your Anthropic API key (if using AI features)
-
## Game Rules
-No More Jockeys is a game where players take turns naming a person and a category that person has never been. The challenge is to avoid categories that have already been "banned" by previous players.
+No More Jockeys is a game where players take turns naming a person and a category that person belongs to. That category becomes banned. Don't name anyone from banned categories or you're eliminated.
## Tech Stack
diff --git a/backend/.gitignore b/backend/.gitignore
deleted file mode 100644
index e985853..0000000
--- a/backend/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.vercel
diff --git a/backend/api/agents.py b/backend/api/agents.py
index 3a6837b..b5d7cc5 100644
--- a/backend/api/agents.py
+++ b/backend/api/agents.py
@@ -2,6 +2,11 @@
from langchain_core.messages import SystemMessage, HumanMessage
import json
import os
+import logging
+from typing import Dict, List, Optional
+from enum import Enum
+from dataclasses import dataclass
+from datetime import datetime
from .prompts import (
PLAYER_SYSTEM_PROMPT,
PLAYER_TURN_PROMPT,
@@ -10,52 +15,194 @@
VALIDATOR_SYSTEM_PROMPT
)
from .game_state import GameState, Move, Player
-from datetime import datetime
-def _parse_json_response(response_content: str) -> dict:
- """Extracts and parses JSON from a string that might contain extra text."""
- content = response_content.strip()
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+
+class Environment(Enum):
+ """Environment types for production detection."""
+ DEVELOPMENT = "development"
+ PRODUCTION = "production"
+
+
+@dataclass
+class ValidationResult:
+ """Result of move validation."""
+ is_valid: bool
+ violations: List[str]
+ explanations: Dict[str, str]
+
+
+class ProductionDetector:
+ """Handles production environment detection."""
- # Try to find a complete JSON object
- brace_count = 0
- start_idx = -1
- end_idx = -1
+ @staticmethod
+ def is_production() -> bool:
+ """Detect if running in production environment."""
+ return (
+ os.environ.get('VERCEL') == '1' or
+ os.environ.get('NODE_ENV') == 'production' or
+ os.environ.get('ENVIRONMENT') == 'production'
+ )
+
+
+class LLMClientFactory:
+ """Factory for creating LLM clients with appropriate configuration."""
- for i, char in enumerate(content):
- if char == '{':
- if start_idx == -1:
- start_idx = i
- brace_count += 1
- elif char == '}':
- brace_count -= 1
- if brace_count == 0 and start_idx != -1:
- end_idx = i + 1
- break
+ @staticmethod
+ def create_anthropic_client(
+ model_name: str,
+ temperature: float,
+ max_tokens: int,
+ role: Optional[str] = None,
+ player_id: Optional[int] = None
+ ) -> ChatAnthropic:
+ """Create ChatAnthropic client with environment-appropriate configuration."""
+ logger.info(f"Creating LLM client for role: {role}, player: {player_id}")
+
+ anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")
+ helicone_key = os.environ.get('HELICONE_API_KEY')
+ is_production = ProductionDetector.is_production()
+
+ logger.info(f"Environment check - Production: {is_production}")
+ logger.info(f"API Keys - Anthropic: {'SET' if anthropic_api_key else 'MISSING'}, "
+ f"Helicone: {'SET' if helicone_key else 'MISSING'}")
+
+ if not anthropic_api_key:
+ error_msg = "ANTHROPIC_API_KEY environment variable is required"
+ logger.error(error_msg)
+ raise ValueError(error_msg)
+
+ try:
+ # Production: Helicone optional for now (with warning)
+ if is_production:
+ logger.info("Production mode: Helicone monitoring recommended")
+ if not helicone_key or helicone_key == 'your_helicone_key_here':
+ logger.warning("HELICONE_API_KEY not set in production - monitoring disabled")
+ logger.warning("For better observability, consider setting HELICONE_API_KEY")
+ # Fall back to direct Anthropic API
+ logger.info("Using direct Anthropic API in production (no monitoring)")
+ return ChatAnthropic(
+ model=model_name,
+ anthropic_api_key=anthropic_api_key,
+ temperature=temperature,
+ max_tokens=max_tokens
+ )
+
+ logger.info("Creating Helicone-enabled client for production")
+ return LLMClientFactory._create_helicone_client(
+ model_name, temperature, max_tokens, anthropic_api_key,
+ helicone_key, role, player_id
+ )
+
+ # Development: Helicone optional
+ if helicone_key and helicone_key != 'your_helicone_key_here':
+ logger.info("Development mode: Using Helicone monitoring")
+ return LLMClientFactory._create_helicone_client(
+ model_name, temperature, max_tokens, anthropic_api_key,
+ helicone_key, role, player_id
+ )
+
+ # Development: Direct Anthropic API
+ logger.info("Development mode: Using direct Anthropic API")
+ return ChatAnthropic(
+ model=model_name,
+ anthropic_api_key=anthropic_api_key,
+ temperature=temperature,
+ max_tokens=max_tokens
+ )
+
+ except Exception as e:
+ logger.error(f"Failed to create LLM client: {str(e)}")
+ logger.error(f"Environment: {dict(os.environ)}")
+ raise
- if start_idx != -1 and end_idx != -1:
- json_content = content[start_idx:end_idx]
- print(f"Extracted JSON: {json_content}")
- return json.loads(json_content)
- else:
- print(f"No valid JSON found, trying full content: {content}")
- return json.loads(content)
-
-class JockeyAgent:
- def __init__(self, player_id: int, model_name: str = "claude-3-5-sonnet-20241022"):
- self.player_id = player_id
- self.llm = ChatAnthropic(
+ @staticmethod
+ def _create_helicone_client(
+ model_name: str,
+ temperature: float,
+ max_tokens: int,
+ anthropic_api_key: str,
+ helicone_key: str,
+ role: Optional[str],
+ player_id: Optional[int]
+ ) -> ChatAnthropic:
+ """Create Helicone-enabled ChatAnthropic client."""
+ headers = {
+ "Helicone-Auth": f"Bearer {helicone_key}",
+ "Helicone-Property-App": "no-more-jockeys",
+ }
+
+ if role:
+ headers["Helicone-Property-Role"] = role
+ if player_id:
+ headers["Helicone-Property-Player"] = f"player-{player_id}"
+
+ return ChatAnthropic(
model=model_name,
- anthropic_api_key=os.environ.get("ANTHROPIC_API_KEY"),
+ anthropic_api_key=anthropic_api_key,
anthropic_api_url="https://api.helicone.ai/v1",
- temperature=0.7,
- max_tokens=200,
- default_headers={
- "Helicone-Auth": f"Bearer {os.environ.get('HELICONE_API_KEY')}",
- "Helicone-Property-App": "no-more-jockeys",
- "Helicone-Property-Player": f"player-{player_id}"
- }
+ temperature=temperature,
+ max_tokens=max_tokens,
+ default_headers=headers
)
- self.system_prompt = PLAYER_SYSTEM_PROMPT.format(player_id=player_id)
+
+
+class JSONResponseParser:
+ """Handles parsing JSON responses from LLM outputs."""
+
+ @staticmethod
+ def parse_json_response(response_content: str) -> Dict:
+ """Extract and parse JSON from LLM response that might contain extra text."""
+ content = response_content.strip()
+
+ # Try to find a complete JSON object
+ brace_count = 0
+ start_idx = -1
+ end_idx = -1
+
+ for i, char in enumerate(content):
+ if char == '{':
+ if start_idx == -1:
+ start_idx = i
+ brace_count += 1
+ elif char == '}':
+ brace_count -= 1
+ if brace_count == 0 and start_idx != -1:
+ end_idx = i + 1
+ break
+
+ if start_idx != -1 and end_idx != -1:
+ json_content = content[start_idx:end_idx]
+ print(f"Extracted JSON: {json_content}")
+ return json.loads(json_content)
+ else:
+ print(f"No valid JSON found, trying full content: {content}")
+ return json.loads(content)
+
+class JockeyAgent:
+ """AI agent that plays the No More Jockeys game."""
+
+ def __init__(self, player_id: int, model_name: str = "claude-3-5-sonnet-20241022"):
+ """Initialize the jockey agent with LLM client and system prompt."""
+ logger.info(f"Initializing JockeyAgent for player {player_id}")
+
+ try:
+ self.player_id = player_id
+ self.llm = LLMClientFactory.create_anthropic_client(
+ model_name=model_name,
+ temperature=0.7,
+ max_tokens=200,
+ role="player",
+ player_id=player_id
+ )
+ self.system_prompt = PLAYER_SYSTEM_PROMPT.format(player_id=player_id)
+ logger.info(f"Successfully initialized JockeyAgent for player {player_id}")
+ except Exception as e:
+ logger.error(f"Failed to initialize JockeyAgent for player {player_id}: {str(e)}")
+ raise
def take_turn(self, game_state: GameState, feedback: str = None) -> dict:
"""Generate a move based on current game state.
@@ -93,7 +240,7 @@ def take_turn(self, game_state: GameState, feedback: str = None) -> dict:
try:
print(f"Raw response content: {response.content}")
- move_data = _parse_json_response(response.content)
+ move_data = JSONResponseParser.parse_json_response(response.content)
# Validate required fields
if not all(key in move_data for key in ["person", "category", "reasoning"]):
raise ValueError("Missing required fields")
@@ -109,19 +256,23 @@ def take_turn(self, game_state: GameState, feedback: str = None) -> dict:
}
class ValidatorAgent:
+ """AI agent that validates moves and provides person information."""
+
def __init__(self, model_name: str = "claude-3-5-sonnet-20241022"):
- self.llm = ChatAnthropic(
- model=model_name,
- anthropic_api_key=os.environ.get("ANTHROPIC_API_KEY"),
- anthropic_api_url="https://api.helicone.ai/v1",
- temperature=0.1, # Low temperature for consistency
- max_tokens=300,
- default_headers={
- "Helicone-Auth": f"Bearer {os.environ.get('HELICONE_API_KEY')}",
- "Helicone-Property-App": "no-more-jockeys",
- "Helicone-Property-Role": "validator"
- }
- )
+ """Initialize the validator agent with LLM client."""
+ logger.info("Initializing ValidatorAgent")
+
+ try:
+ self.llm = LLMClientFactory.create_anthropic_client(
+ model_name=model_name,
+ temperature=0.1, # Low temperature for consistency
+ max_tokens=300,
+ role="validator"
+ )
+ logger.info("Successfully initialized ValidatorAgent")
+ except Exception as e:
+ logger.error(f"Failed to initialize ValidatorAgent: {str(e)}")
+ raise
def get_person_info(self, person: str) -> dict:
"""Get comprehensive info about a person"""
@@ -132,7 +283,7 @@ def get_person_info(self, person: str) -> dict:
response = self.llm.invoke(messages)
try:
- return _parse_json_response(response.content)
+ return JSONResponseParser.parse_json_response(response.content)
except Exception as e:
print(f"Error parsing person info: {e}")
print(f"Person info response content was: '{response.content}'")
@@ -164,7 +315,7 @@ def validate_move(self, person: str, banned_categories: list[dict]) -> tuple[boo
response = self.llm.invoke(messages)
try:
- result = _parse_json_response(response.content)
+ result = JSONResponseParser.parse_json_response(response.content)
return result["safe"], result.get("violations", []), result.get("explanations", {})
except Exception as e:
print(f"Error parsing validation response: {e}")
@@ -173,39 +324,57 @@ def validate_move(self, person: str, banned_categories: list[dict]) -> tuple[boo
return True, [], {"error": f"Validation parsing failed: {str(e)}"}
class GameOrchestrator:
+ """Orchestrates the No More Jockeys game between human and AI players."""
+
def __init__(self, human_player_name: str = None, ai_retry_attempts: int = 2):
- self.human_player_name = human_player_name
- self.has_human = human_player_name is not None
- self.ai_retry_attempts = ai_retry_attempts # Number of retry attempts for AI players
-
- if self.has_human:
- # Human is player 1, AI agents are 2-4
- self.agents = {
- i: JockeyAgent(player_id=i) for i in range(2, 5)
- }
- self.game_state = GameState(
- players=[
- Player(id=1, name=human_player_name, is_human=True),
- Player(id=2, name="Claude-2", is_human=False),
- Player(id=3, name="Claude-3", is_human=False),
- Player(id=4, name="Claude-4", is_human=False)
- ],
- banned_categories=[],
- moves=[]
- )
- else:
- # All AI agents
- self.agents = {
- i: JockeyAgent(player_id=i) for i in range(1, 5)
- }
- self.game_state = GameState(
- players=[Player(id=i, name=f"Claude-{i}", is_human=False) for i in range(1, 5)],
- banned_categories=[],
- moves=[]
- )
+ """Initialize the game orchestrator.
+
+ Args:
+ human_player_name: Name of human player, if any
+ ai_retry_attempts: Number of retry attempts for AI players when invalid moves are made
+ """
+ logger.info(f"Initializing GameOrchestrator with human player: {human_player_name}")
- self.validator = ValidatorAgent()
- self.pending_human_turn = False
+ try:
+ self.human_player_name = human_player_name
+ self.has_human = human_player_name is not None
+ self.ai_retry_attempts = ai_retry_attempts # Number of retry attempts for AI players
+
+ if self.has_human:
+ logger.info("Setting up game with human player")
+ # Human is player 1, AI agents are 2-4
+ self.agents = {
+ i: JockeyAgent(player_id=i) for i in range(2, 5)
+ }
+ self.game_state = GameState(
+ players=[
+ Player(id=1, name=human_player_name, is_human=True),
+ Player(id=2, name="Claude-2", is_human=False),
+ Player(id=3, name="Claude-3", is_human=False),
+ Player(id=4, name="Claude-4", is_human=False)
+ ],
+ banned_categories=[],
+ moves=[]
+ )
+ else:
+ logger.info("Setting up AI-only game")
+ # All AI agents
+ self.agents = {
+ i: JockeyAgent(player_id=i) for i in range(1, 5)
+ }
+ self.game_state = GameState(
+ players=[Player(id=i, name=f"Claude-{i}", is_human=False) for i in range(1, 5)],
+ banned_categories=[],
+ moves=[]
+ )
+
+ self.validator = ValidatorAgent()
+ self.pending_human_turn = False
+ logger.info("Successfully initialized GameOrchestrator")
+
+ except Exception as e:
+ logger.error(f"Failed to initialize GameOrchestrator: {str(e)}")
+ raise
def play_turn(self, human_move: dict = None) -> dict:
"""Execute one turn of the game"""
@@ -249,9 +418,7 @@ def play_turn(self, human_move: dict = None) -> dict:
# Track retry attempts
current_move = move_data
- current_valid = is_valid
current_violations = violations
- current_explanations = explanations
for retry_num in range(1, self.ai_retry_attempts + 1):
# Generate feedback based on all previous attempts
@@ -259,7 +426,7 @@ def play_turn(self, human_move: dict = None) -> dict:
feedback = f"Your choice '{current_move['person']}' violated: {', '.join(current_violations)}. Choose someone else."
else:
# For multiple retries, provide comprehensive feedback
- feedback = f"Multiple attempts failed. Choose a completely different person who does NOT fall into any banned categories."
+ feedback = "Multiple attempts failed. Choose a completely different person who does NOT fall into any banned categories."
print(f"🔄 AI Player {current_player.id} attempting retry {retry_num}/{self.ai_retry_attempts}...")
@@ -329,6 +496,7 @@ def play_turn(self, human_move: dict = None) -> dict:
"waiting_for_human": False
}
- def _get_winner(self) -> int | None:
+ def _get_winner(self) -> Optional[int]:
+ """Get the ID of the winning player, if any."""
active = self.game_state.get_active_players()
return active[0].id if len(active) == 1 else None
diff --git a/backend/api/main.py b/backend/api/main.py
index 4bff048..ca6f563 100644
--- a/backend/api/main.py
+++ b/backend/api/main.py
@@ -2,23 +2,47 @@
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import uuid
+import logging
from dotenv import load_dotenv
-from .agents import GameOrchestrator
-# Load local environment variables from .env file for locl development
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+# Load environment variables (development only - production uses system env vars)
load_dotenv()
+logger.info("Starting FastAPI application...")
+
+try:
+ from .agents import GameOrchestrator
+ logger.info("Successfully imported GameOrchestrator")
+except Exception as e:
+ logger.error(f"Failed to import GameOrchestrator: {str(e)}")
+ raise
+
app = FastAPI()
# CORS configuration - allow all origins
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
- allow_credentials=True,
+ allow_credentials=False, # Cannot be True with allow_origins=["*"]
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
allow_headers=["*"],
)
+# Health check endpoints
+@app.get("/")
+async def root():
+ """Root endpoint for health check"""
+ return {"message": "FastAPI backend is running", "status": "healthy"}
+
+@app.get("/api/health")
+async def health_check():
+ """API health check endpoint"""
+ return {"message": "API is healthy", "status": "ok"}
+
# Simple in-memory game storage
games = {}
@@ -37,33 +61,54 @@ class HumanMoveRequest(BaseModel):
@app.post("/api/game/create")
async def create_game(request: CreateGameRequest = CreateGameRequest()):
"""Create a new game instance"""
- game_id = str(uuid.uuid4())
- # Create game orchestrator
- orchestrator = GameOrchestrator(
- human_player_name=request.human_player_name
- )
- orchestrator.game_state.game_id = game_id
-
- games[game_id] = orchestrator
+ logger.info(f"Creating new game with human player: {request.human_player_name}")
- return {
- "game_id": game_id,
- "game_state": orchestrator.game_state.to_dict(),
- "has_human": orchestrator.has_human
- }
+ try:
+ game_id = str(uuid.uuid4())
+ logger.info(f"Generated game ID: {game_id}")
+
+ # Create game orchestrator
+ orchestrator = GameOrchestrator(
+ human_player_name=request.human_player_name
+ )
+ orchestrator.game_state.game_id = game_id
+
+ games[game_id] = orchestrator
+ logger.info(f"Successfully created game {game_id}")
+
+ return {
+ "game_id": game_id,
+ "game_state": orchestrator.game_state.to_dict(),
+ "has_human": orchestrator.has_human
+ }
+
+ except Exception as e:
+ logger.error(f"Failed to create game: {str(e)}")
+ raise HTTPException(status_code=500, detail=f"Failed to create game: {str(e)}")
@app.post("/api/game/turn")
async def play_turn(action: GameAction):
"""Play one turn of the game"""
- if action.game_id not in games:
- raise HTTPException(status_code=404, detail="Game not found")
-
- orchestrator = games[action.game_id]
+ logger.info(f"Playing turn for game {action.game_id}")
- # Play turn using orchestrator
- result = orchestrator.play_turn()
-
- return result
+ try:
+ if action.game_id not in games:
+ logger.error(f"Game {action.game_id} not found")
+ raise HTTPException(status_code=404, detail="Game not found")
+
+ orchestrator = games[action.game_id]
+
+ # Play turn using orchestrator
+ result = orchestrator.play_turn()
+ logger.info(f"Successfully played turn for game {action.game_id}")
+
+ return result
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Failed to play turn for game {action.game_id}: {str(e)}")
+ raise HTTPException(status_code=500, detail=f"Failed to play turn: {str(e)}")
@app.get("/api/game/{game_id}/state")
async def get_game_state(game_id: str):
@@ -93,5 +138,4 @@ async def make_human_move(request: HumanMoveRequest):
return result
-# For Vercel
-handler = app
+# FastAPI app is automatically detected by Vercel for ASGI deployment
diff --git a/dev-backend.sh b/dev-backend.sh
new file mode 100755
index 0000000..67407a1
--- /dev/null
+++ b/dev-backend.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+# =============================================================================
+# No More Jockeys - Backend Only Development
+# =============================================================================
+# Use Case: Run backend locally for API testing/development
+# Helicone: NO (direct Anthropic API)
+
+set -e
+
+echo "🎯 No More Jockeys - Backend Only Mode"
+echo "======================================="
+echo "Backend: http://localhost:8000"
+echo "Helicone: DISABLED (direct Anthropic API for development)"
+echo "API Docs: http://localhost:8000/docs"
+echo
+
+# Check environment
+if [ ! -f ".env" ]; then
+ echo "❌ .env file not found. Please copy .env.example to .env and configure it."
+ exit 1
+fi
+
+if [ -z "$(grep ANTHROPIC_API_KEY .env | cut -d= -f2)" ]; then
+ echo "❌ ANTHROPIC_API_KEY not set in .env file"
+ exit 1
+fi
+
+# Load environment variables
+export $(grep -v '^#' .env | xargs)
+
+# Unset Helicone to ensure direct API usage
+unset HELICONE_API_KEY
+
+echo "✅ Environment configured"
+echo " • Helicone: DISABLED"
+echo
+
+# Start backend only
+npm run dev:backend
diff --git a/dev-hybrid.sh b/dev-hybrid.sh
new file mode 100755
index 0000000..bb6b1b4
--- /dev/null
+++ b/dev-hybrid.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# =============================================================================
+# No More Jockeys - Hybrid Development (Frontend → Production Backend)
+# =============================================================================
+# Use Case: Run frontend locally connected to production backend
+# Helicone: YES (production monitoring)
+
+set -e
+
+echo "🎯 No More Jockeys - Hybrid Development Mode"
+echo "=============================================="
+echo "Frontend: http://localhost:3000 → Backend: https://backend-pu7w8cumu-set4.vercel.app"
+echo "Helicone: ENABLED (production backend requires monitoring)"
+echo
+
+# Check environment
+if [ ! -f ".env" ]; then
+ echo "❌ .env file not found. Please copy .env.example to .env and configure it."
+ exit 1
+fi
+
+# Load environment variables
+export $(grep -v '^#' .env | xargs)
+
+# Set production backend URL
+export NEXT_PUBLIC_API_URL=https://backend-pu7w8cumu-set4.vercel.app
+
+echo "✅ Environment configured"
+echo " • API URL: $NEXT_PUBLIC_API_URL"
+echo " • Helicone: ${HELICONE_API_KEY:+ENABLED}${HELICONE_API_KEY:-DISABLED}"
+echo
+
+# Start frontend only
+cd frontend && npm run dev
diff --git a/dev-local.sh b/dev-local.sh
new file mode 100755
index 0000000..93ae2e7
--- /dev/null
+++ b/dev-local.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# =============================================================================
+# No More Jockeys - Local Development (Frontend → Local Backend)
+# =============================================================================
+# Use Case: Run frontend locally connected to local backend
+# Helicone: NO (direct Anthropic API)
+
+set -e
+
+echo "🎯 No More Jockeys - Local Development Mode"
+echo "============================================="
+echo "Frontend: http://localhost:3000 → Backend: http://localhost:8000"
+echo "Helicone: DISABLED (direct Anthropic API for development)"
+echo
+
+# Check environment
+if [ ! -f ".env" ]; then
+ echo "❌ .env file not found. Please copy .env.example to .env and configure it."
+ exit 1
+fi
+
+if [ -z "$(grep ANTHROPIC_API_KEY .env | cut -d= -f2)" ]; then
+ echo "❌ ANTHROPIC_API_KEY not set in .env file"
+ exit 1
+fi
+
+# Load environment variables
+export $(grep -v '^#' .env | xargs)
+
+# Unset Helicone to ensure direct API usage
+unset HELICONE_API_KEY
+
+# Ensure we're using local API URL
+export NEXT_PUBLIC_API_URL=http://localhost:8000
+
+echo "✅ Environment configured"
+echo " • API URL: $NEXT_PUBLIC_API_URL"
+echo " • Helicone: DISABLED"
+echo
+
+# Start both services
+npm run dev
diff --git a/frontend/.gitignore b/frontend/.gitignore
index e985853..5bbb991 100644
--- a/frontend/.gitignore
+++ b/frontend/.gitignore
@@ -1 +1,35 @@
+# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
+
+# dependencies
+/node_modules
+/.pnp
+.pnp.js
+
+# testing
+/coverage
+
+# next.js
+/.next/
+/out/
+
+# production
+/build
+
+# misc
+.DS_Store
+*.pem
+
+# debug
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+
+# local env files
+.env*.local
+
+# vercel
.vercel
+
+# typescript
+*.tsbuildinfo
+next-env.d.ts
\ No newline at end of file
diff --git a/frontend/pages/about.js b/frontend/pages/about.js
index 3f245f9..b370def 100644
--- a/frontend/pages/about.js
+++ b/frontend/pages/about.js
@@ -4,9 +4,7 @@ export default function About() {
return (
diff --git a/frontend/pages/index.js b/frontend/pages/index.js
index 54a6a1d..f2269b6 100644
--- a/frontend/pages/index.js
+++ b/frontend/pages/index.js
@@ -14,18 +14,35 @@ export default function Home() {
const [humanMove, setHumanMove] = useState({ person: '', category: '' });
const [darkMode, setDarkMode] = useState(false);
- // Force fresh deployment - trigger rebuild
- const API_URL = process.env.NEXT_PUBLIC_API_URL || 'https://backend-pu7w8cumu-set4.vercel.app';
+ // API URL configuration based on environment
+ const getApiUrl = () => {
+ // 1. Explicit override via environment variable (highest priority)
+ if (process.env.NEXT_PUBLIC_API_URL) {
+ return process.env.NEXT_PUBLIC_API_URL;
+ }
+
+ // 2. Auto-detect based on NODE_ENV
+ if (process.env.NODE_ENV === 'production') {
+ return 'https://backend-set4.vercel.app';
+ }
+
+ // 3. Default to local development
+ return 'http://localhost:8000';
+ };
+
+ const API_URL = getApiUrl();
const createGame = async (withHuman = false, playerName = '') => {
setLoading(true);
try {
+ const requestBody = withHuman
+ ? { human_player_name: playerName || 'You' }
+ : {};
+
const res = await fetch(`${API_URL}/api/game/create`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
- body: JSON.stringify({
- human_player_name: withHuman ? (playerName || 'You') : null
- }),
+ body: JSON.stringify(requestBody),
});
const data = await res.json();
@@ -160,9 +177,7 @@ export default function Home() {