BASH Post Services

Viewing: 1744866629_AI_llama.sh

Try wget https://bash.commongrounds.cc/uploads/1744866629_AI_llama.sh from the console

Raw File Link

#!/bin/bash

# Script to install and set up Ollama for offline AI CLI use on Debian-based distros
# with NVIDIA GTX 1080Ti GPU support and PHP development tools
# Target: Debian-based systems (e.g., Ubuntu 20.04/22.04)
# Requirements: NVIDIA drivers/CUDA installed, sudo privileges, 1TB allocated space
# Note: Internet required for initial model download; runtime is offline

# Exit on error
set -e

# Log file for installation
LOG_FILE="$HOME/ollama_setup.log"
echo "Installation started at $(date)" | tee -a "$LOG_FILE"

# Update system packages
echo "Updating system packages..." | tee -a "$LOG_FILE"
sudo apt-get update -y
sudo apt-get upgrade -y

# Install essential tools
echo "Installing essential tools..." | tee -a "$LOG_FILE"
sudo apt-get install -y curl git wget software-properties-common

# Verify NVIDIA driver and CUDA
echo "Verifying NVIDIA driver and CUDA..." | tee -a "$LOG_FILE"
nvidia-smi >> "$LOG_FILE" 2>&1 || { echo "NVIDIA driver not found"; exit 1; }
nvcc --version >> "$LOG_FILE" 2>&1 || { echo "CUDA not found"; exit 1; }

# Install PHP and development tools
echo "Installing PHP and development tools..." | tee -a "$LOG_FILE"
sudo apt-get install -y php php-cli php-fpm php-mbstring php-curl php-xml composer
php -v >> "$LOG_FILE" 2>&1 || { echo "PHP installation failed"; exit 1; }

# Install Ollama
echo "Installing Ollama..." | tee -a "$LOG_FILE"
curl -fsSL https://ollama.com/install.sh | sh
# Verify Ollama installation
ollama --version >> "$LOG_FILE" 2>&1 || { echo "Ollama installation failed"; exit 1; }

# Download a PHP-focused model (CodeLlama 13B, quantized for 1080Ti)
echo "Downloading CodeLlama 13B model (requires internet)..." | tee -a "$LOG_FILE"
ollama pull codellama:13b >> "$LOG_FILE" 2>&1
# Note: Model is ~7GB, fits in 1080Ti's 11GB VRAM with 4-bit quantization

# Create a PHP project directory
echo "Setting up PHP project directory..." | tee -a "$LOG_FILE"
mkdir -p ~/ollama_php_project

# Create a sample PHP file
cat > ~/ollama_php_project/test_auth.php << 'EOF'
<?php
// Sample PHP file for Ollama-generated code
// Run 'ollama run codellama:13b' and prompt for PHP code
echo "This is a placeholder PHP file.\n";
?>
EOF

# Create a script to simplify Ollama PHP code generation
cat > ~/ollama_php_project/generate_php.sh << 'EOF'
#!/bin/bash
# Script to generate PHP code using Ollama
MODEL="codellama:13b"
echo "Enter your PHP coding prompt (e.g., 'Create a PHP class for user authentication'):"
read -p "Prompt: " PROMPT
echo "Generating PHP code..."
ollama run $MODEL "Provide a PHP code solution for: $PROMPT. Include detailed comments and follow PHP best practices." > php_output.txt
cat php_output.txt
echo "Output saved to php_output.txt"
EOF
chmod +x ~/ollama_php_project/generate_php.sh

# Instructions for the user
echo "Setup completed successfully!" | tee -a "$LOG_FILE"
echo "To use Ollama for PHP coding:" | tee -a "$LOG_FILE"
echo "1. Start Ollama server: ollama serve (run in background)" | tee -a "$LOG_FILE"
echo "2. Run interactive session: ollama run codellama:13b" | tee -a "$LOG_FILE"
echo "3. Generate PHP code: ~/ollama_php_project/generate_php.sh" | tee -a "$LOG_FILE"
echo "4. Check ~/ollama_php_project/test_auth.php for sample PHP code" | tee -a "$LOG_FILE"
echo "5. Use composer for PHP package management in ~/ollama_php_project/" | tee -a "$LOG_FILE"
echo "Log file: $LOG_FILE" | tee -a "$LOG_FILE"
echo "Note: Ollama runs offline after model download. Ensure ollama serve is running for CLI commands." | tee -a "$LOG_FILE"

echo "Installation finished at $(date)" | tee -a "$LOG_FILE"
BASH to Home