- Configure workspace Git repository with comprehensive .gitignore - Add Odoo as submodule for ERP reference code - Include documentation: SETUP.md, GIT-STRUCTURE.md - Add gitignore templates for projects (backend, frontend, database) - Structure supports independent repos per project/subproject level Workspace includes: - core/ - Reusable patterns, modules, orchestration system - projects/ - Active projects (erp-suite, gamilit, trading-platform, etc.) - knowledge-base/ - Reference code and patterns (includes Odoo submodule) - devtools/ - Development tools and templates - customers/ - Client implementations template 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
107 lines
3.3 KiB
Bash
Executable File
107 lines
3.3 KiB
Bash
Executable File
#!/bin/bash
|
|
#
|
|
# OrbiQuant IA - Ollama Setup Script
|
|
# Downloads and configures Llama 3 8B for trading assistant
|
|
#
|
|
|
|
set -e
|
|
|
|
# Colors
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
NC='\033[0m'
|
|
BOLD='\033[1m'
|
|
|
|
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
|
|
|
echo -e "${BOLD}"
|
|
echo "╔══════════════════════════════════════════════════════════╗"
|
|
echo "║ OrbiQuant IA - Ollama Setup ║"
|
|
echo "╚══════════════════════════════════════════════════════════╝"
|
|
echo -e "${NC}"
|
|
|
|
# Check if Docker is running
|
|
if ! docker info >/dev/null 2>&1; then
|
|
echo -e "${RED}Error: Docker is not running${NC}"
|
|
exit 1
|
|
fi
|
|
|
|
# Check if Ollama container exists
|
|
CONTAINER_NAME="orbiquant-ollama"
|
|
|
|
if ! docker ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
|
echo -e "${YELLOW}Ollama container not found. Starting with docker-compose...${NC}"
|
|
cd "$PROJECT_ROOT"
|
|
|
|
if [ -f "apps/llm-agent/docker-compose.ollama.yml" ]; then
|
|
docker-compose -f apps/llm-agent/docker-compose.ollama.yml up -d
|
|
else
|
|
echo -e "${RED}docker-compose.ollama.yml not found${NC}"
|
|
exit 1
|
|
fi
|
|
|
|
echo -e "${BLUE}Waiting for Ollama to start...${NC}"
|
|
sleep 10
|
|
fi
|
|
|
|
# Check if container is running
|
|
if ! docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
|
echo -e "${YELLOW}Starting Ollama container...${NC}"
|
|
docker start $CONTAINER_NAME
|
|
sleep 5
|
|
fi
|
|
|
|
echo -e "\n${BLUE}Checking Ollama status...${NC}"
|
|
docker exec $CONTAINER_NAME ollama --version
|
|
|
|
# Check available models
|
|
echo -e "\n${BLUE}Current models:${NC}"
|
|
docker exec $CONTAINER_NAME ollama list
|
|
|
|
# Models to install
|
|
MODELS=("llama3:8b")
|
|
|
|
echo -e "\n${BOLD}Installing models...${NC}"
|
|
|
|
for model in "${MODELS[@]}"; do
|
|
echo -e "\n${YELLOW}Pulling $model...${NC}"
|
|
echo -e "${BLUE}(This may take a while - model is ~4.7GB)${NC}"
|
|
|
|
docker exec $CONTAINER_NAME ollama pull $model
|
|
|
|
if [ $? -eq 0 ]; then
|
|
echo -e "${GREEN}✓ $model installed successfully${NC}"
|
|
else
|
|
echo -e "${RED}✗ Failed to install $model${NC}"
|
|
fi
|
|
done
|
|
|
|
# Verify GPU access
|
|
echo -e "\n${BLUE}Checking GPU access...${NC}"
|
|
if docker exec $CONTAINER_NAME nvidia-smi >/dev/null 2>&1; then
|
|
echo -e "${GREEN}✓ GPU access available${NC}"
|
|
docker exec $CONTAINER_NAME nvidia-smi --query-gpu=name,memory.total,memory.free --format=csv
|
|
else
|
|
echo -e "${YELLOW}⚠ GPU not available - will use CPU (slower)${NC}"
|
|
fi
|
|
|
|
# Test model
|
|
echo -e "\n${BLUE}Testing model with a simple prompt...${NC}"
|
|
RESPONSE=$(docker exec $CONTAINER_NAME ollama run llama3:8b "Say 'Hello, I am ready for trading!' in exactly those words" 2>/dev/null | head -n 5)
|
|
echo -e "${GREEN}Model response: $RESPONSE${NC}"
|
|
|
|
# Final status
|
|
echo -e "\n${BOLD}Setup Complete!${NC}"
|
|
echo -e "\n${BLUE}Available models:${NC}"
|
|
docker exec $CONTAINER_NAME ollama list
|
|
|
|
echo -e "\n${BOLD}Quick test:${NC}"
|
|
echo " docker exec $CONTAINER_NAME ollama run llama3:8b 'What is Bitcoin?'"
|
|
|
|
echo -e "\n${BOLD}API endpoint:${NC}"
|
|
echo " http://localhost:11434/api/generate"
|
|
|
|
echo -e "\n${GREEN}Ollama is ready for OrbiQuant IA!${NC}"
|