Εγκατάσταση Ubuntu Server 22.04 LTS: Συντονισμένη Διαδικασία Βήμα 1: Προετοιμασία Εγκατάστασης bash # Κατεβάστε το Ubuntu Server 22.04.3 LTS wget https://releases.ubuntu.com/22.04/ubuntu-22.04.3-live-server-amd64.iso # Δημιουργήστε bootable USB (Linux) sudo dd if=ubuntu-22.04.3-live-server-amd64.iso of=/dev/sdX bs=4M status=progress && sync # ή με Rufus (Windows) επιλέξτε "DD mode" Βήμα 2: Προσαρμοσμένη Εγκατάσταση με Autoinstall (Cloud-init) yaml # meta-data instance-id: cyberautonomy-server local-hostname: cyberautonomy # user-data #cloud-config autoinstall: version: 1 early-commands: - sudo systemctl stop ssh locale: el_GR.UTF-8 keyboard: layout: gr storage: layout: name: lvm identity: hostname: cyberautonomy username: admin password: "$6$rounds=4096$salt$hashed_password_here" ssh: install-server: true allow-pw: true authorized-keys: - ssh-ed25519 AAAAC3NzaC... user@host packages: - openssh-server - ufw - fail2ban - htop - tmux - vim - git - curl - wget user-data: disable-root: false timezone: Europe/Athens Βήμα 3: Post-Installation Optimization Script bash #!/bin/bash # optimize-server.sh # 1. Ενημέρωση συστήματος sudo apt update && sudo apt upgrade -y # 2. Ρύθμιση hostname και hosts sudo hostnamectl set-hostname cyberautonomy sudo tee -a /etc/hosts << EOF 127.0.1.1 cyberautonomy.local cyberautonomy EOF # 3. Ρύθμιση timezone και NTP sudo timedatectl set-timezone Europe/Athens sudo apt install -y chrony sudo systemctl enable chrony # 4. Ρύθμιση swap για ARM devices (αν χρειάζεται) if [ "$(uname -m)" = "aarch64" ]; then sudo fallocate -l 2G /swapfile sudo chmod 600 /swapfile sudo mkswap /swapfile sudo swapon /swapfile echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab fi # 5. Ρύθμιση kernel parameters για server sudo tee -a /etc/sysctl.conf << EOF # Network optimization net.core.rmem_max = 134217728 net.core.wmem_max = 134217728 net.ipv4.tcp_rmem = 4096 87380 134217728 net.ipv4.tcp_wmem = 4096 65536 134217728 net.ipv4.tcp_congestion_control = bbr net.core.default_qdisc = fq # Security hardening net.ipv4.tcp_syncookies = 1 net.ipv4.tcp_synack_retries = 2 net.ipv4.conf.all.rp_filter = 1 # File system optimization vm.swappiness = 10 vm.vfs_cache_pressure = 50 EOF sudo sysctl -p # 6. Ρύθμιση limits sudo tee -a /etc/security/limits.conf << EOF * soft nofile 65536 * hard nofile 65536 * soft nproc 65536 * hard nproc 65536 EOF Ειδική Ρύθμιση για Raspberry Pi /boot/config.txt Βελτιστοποιήσεις: ini # CPU and GPU arm_boost=1 over_voltage=2 arm_freq=1800 gpu_freq=600 # Memory gpu_mem=16 total_mem=1024 # I/O and Storage dtparam=sd_overclock=100 dtparam=sd_poll_once dtparam=i2c_arm=on dtparam=spi=on # Networking dtparam=eth_led0=14 dtparam=eth_led1=14 # Power Management force_turbo=0 avoid_warnings=2 Προσαρμογή για SSD Boot (αντικατάσταση microSD): bash # 1. Αντιγράψτε το σύστημα σε SSD sudo dd if=/dev/mmcblk0 of=/dev/sda bs=4M status=progress # 2. Ενεργοποίηση USB boot echo program_usb_boot_mode=1 | sudo tee -a /boot/config.txt sudo reboot # 3. Επαληθεύστε vcgencmd otp_dump | grep 17 # Πρέπει να επιστρέψει: 17:3020000a 3.2 Offline Wikipedia: Kiwix - Λεπτομερής Υλοποίηση Αρχιτεκτονική Kiwix και Δομή ZIM Αρχείων Δομή ZIM Αρχείου: text wikipedia_en_all_maxi_2023-12.zim ├── METADATA │ ├── Title: "Wikipedia English" │ ├── Description: "Complete English Wikipedia" │ ├── Date: 2023-12-01 │ └── Language: en ├── CONTENT │ ├── Articles (HTML with embedded images) │ ├── Images (JPEG/PNG/SVG) │ ├── CSS/JavaScript │ └── Search Index └── INDEX ├── Full-text search (xapian) ├── Title list └── URL mapping Εγκατάσταση και Ρύθμιση Kiwix-serve Εγκατάσταση από Source (Latest Version): bash #!/bin/bash # install-kiwix.sh # Εξαρτήσεις sudo apt update sudo apt install -y \ libzim-dev libicu-dev libssl-dev \ liblzma-dev libzstd-dev libgtest-dev \ cmake build-essential pkg-config \ git wget curl # Κατεβάστε και compile το kiwix-tools cd /tmp git clone https://github.com/kiwix/kiwix-tools.git cd kiwix-tools mkdir build && cd build cmake .. \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX=/usr/local \ -DKIWIX_TOOLS_BUILD_ALL=ON make -j$(nproc) sudo make install # Εγκατάσταση kiwix-desktop (για διαχείριση) sudo apt install -y kiwix Εναλλακτική: Docker Εγκατάσταση dockerfile # Dockerfile για Kiwix server FROM alpine:latest RUN apk add --no-cache \ libstdc++ \ libgcc \ libzim \ xapian-core \ && wget https://download.kiwix.org/release/kiwix-tools/kiwix-tools_linux-x86_64.tar.gz \ && tar -xzf kiwix-tools_linux-x86_64.tar.gz \ && mv kiwix-tools_*/* /usr/local/bin/ \ && rm -rf kiwix-tools_* VOLUME /data EXPOSE 8080 CMD ["kiwix-serve", "--port=8080", "/data/*.zim"] Διαχείριση Πολλαπλών ZIM Αρχείων με Library XML: xml Προηγμένες Ρυθμίσεις Kiwix-serve Systemd Service Configuration: ini # /etc/systemd/system/kiwix.service [Unit] Description=Kiwix Server After=network.target Wants=network-online.target RequiresMountsFor=/data [Service] Type=exec User=kiwix Group=kiwix ExecStartPre=/usr/bin/mkdir -p /var/cache/kiwix ExecStart=/usr/local/bin/kiwix-serve \ --port=8080 \ --address=0.0.0.0 \ --threads=4 \ --cache=2048 \ --monitorLibrary \ --library /var/kiwix/library.xml \ --daemon \ --log /var/log/kiwix/access.log # Performance tuning Environment="KIWIX_MAX_ARTICLE_SIZE=20971520" Environment="KIWIX_CACHE_SIZE=536870912" Environment="KIWIX_WORKERS=8" # Security NoNewPrivileges=true PrivateTmp=true ProtectSystem=strict ReadWritePaths=/var/cache/kiwix /data # Resource limits LimitNOFILE=65536 LimitNPROC=65536 Restart=on-failure RestartSec=5s [Install] WantedBy=multi-user.target Nginx Reverse Proxy Configuration: nginx # /etc/nginx/sites-available/kiwix server { listen 80; server_name wiki.local en.wikipedia.local; # Security headers add_header X-Frame-Options "SAMEORIGIN" always; add_header X-Content-Type-Options "nosniff" always; add_header X-XSS-Protection "1; mode=block" always; # Compression gzip on; gzip_vary on; gzip_min_length 1024; gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml+rss application/javascript application/json; # Cache static content location ~* \.(jpg|jpeg|png|gif|ico|css|js)$ { expires 1y; add_header Cache-Control "public, immutable"; } # Kiwix proxy location / { proxy_pass http://127.0.0.1:8080; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; # Buffering optimizations proxy_buffering on; proxy_buffer_size 4k; proxy_buffers 8 4k; proxy_busy_buffers_size 8k; # Timeouts proxy_connect_timeout 30s; proxy_send_timeout 120s; proxy_read_timeout 120s; } # Search endpoint optimization location /search { proxy_pass http://127.0.0.1:8080/search; proxy_cache kiwix_cache; proxy_cache_valid 200 1h; proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; } } # Cache zone definition proxy_cache_path /var/cache/nginx/kiwix levels=1:2 keys_zone=kiwix_cache:10m max_size=1g inactive=24h use_temp_path=off; Διαχείριση και Ενημέρωση ZIM Αρχείων Αυτοματοποιημένο Download Script: bash #!/bin/bash # update-zim-files.sh BASE_URL="https://download.kiwix.org/zim" DOWNLOAD_DIR="/data/zim" LOG_FILE="/var/log/zim-update.log" MAX_SIZE=200000000000 # 200GB max storage # Κατάλογος αρχείων προς λήψη declare -A ZIM_FILES=( ["wikipedia_en_all_maxi"]="wikipedia_en_all_maxi" ["wiktionary_en_all"]="wiktionary_en_all" ["wikibooks_en_all"]="wikibooks_en_all" ["gutenberg_en_all"]="gutenberg_en_all" ["stack_exchange_ubuntu"]="stack_exchange/ubuntu.stackexchange.com" ) # Δημιουργία καταλόγων mkdir -p "$DOWNLOAD_DIR"/{new,old} # Έλεγχος διαθέσιμου χώρου available_space=$(df "$DOWNLOAD_DIR" | awk 'NR==2 {print $4}') if [ "$available_space" -lt 50000000 ]; then echo "$(date): CRITICAL: Insufficient disk space" | tee -a "$LOG_FILE" exit 1 fi for prefix in "${!ZIM_FILES[@]}"; do echo "$(date): Processing $prefix" | tee -a "$LOG_FILE" # Βρείτε το τελευταίο αρχείο latest_file=$(curl -s "$BASE_URL/${ZIM_FILES[$prefix]}/" | grep -oP "${prefix}_\d{4}-\d{2}\.zim" | sort -r | head -n1) if [ -z "$latest_file" ]; then echo "$(date): ERROR: Could not find file for $prefix" | tee -a "$LOG_FILE" continue fi current_file=$(ls "$DOWNLOAD_DIR"/"$prefix"_*.zim 2>/dev/null | head -n1) if [ -n "$current_file" ] && [[ "$current_file" == *"$latest_file" ]]; then echo "$(date): INFO: $prefix is up to date" | tee -a "$LOG_FILE" continue fi # Λήψη νέου αρχείου echo "$(date): Downloading $latest_file" | tee -a "$LOG_FILE" wget -q --show-progress -O "$DOWNLOAD_DIR/new/$latest_file" \ "$BASE_URL/${ZIM_FILES[$prefix]}/$latest_file" if [ $? -eq 0 ]; then # Επαλήθευση checksum wget -q -O "$DOWNLOAD_DIR/new/$latest_file.sha256" \ "$BASE_URL/${ZIM_FILES[$prefix]}/$latest_file.sha256" cd "$DOWNLOAD_DIR/new" if sha256sum -c "$latest_file.sha256" 2>/dev/null; then # Μετακίνηση αρχείων [ -f "$current_file" ] && mv "$current_file" "$DOWNLOAD_DIR/old/" mv "$latest_file" "$DOWNLOAD_DIR/" # Ενημέρωση library.xml update_library_xml "$latest_file" echo "$(date): SUCCESS: Updated $prefix to $latest_file" | tee -a "$LOG_FILE" # Επανεκκίνηση Kiwix αν χρειάζεται if systemctl is-active --quiet kiwix; then systemctl reload kiwix fi else echo "$(date): ERROR: Checksum verification failed for $latest_file" | tee -a "$LOG_FILE" rm -f "$DOWNLOAD_DIR/new/$latest_file" fi fi done # Καθαρισμός παλιών αρχείων (κρατάμε τα 2 τελευταία) find "$DOWNLOAD_DIR/old" -name "*.zim" -type f | sort -r | tail -n +3 | xargs rm -f # Ενημέρωση library.xml function update_library_xml() { local zim_file=$1 local zim_path="$DOWNLOAD_DIR/$zim_file" # Εξαγωγή μεταδεδομένων από ZIM metadata=$(kiwix-manage "$zim_path" -x 2>/dev/null) xmlstarlet ed -L \ -s "/library" -t elem -n "book" \ -i "/library/book[last()]" -t attr -n "id" -v "${zim_file%.zim}" \ -i "/library/book[last()]" -t attr -n "path" -v "$zim_path" \ -i "/library/book[last()]" -t attr -n "title" -v "$(echo "$metadata" | grep '^Title:' | cut -d: -f2-)" \ /var/kiwix/library.xml } 3.3 Offline Χάρτες: OpenStreetMap & Tile Servers Επιλογές Tile Server Architectures Πίνακας 3.2: Σύγκριση Tile Server Solutions Λύση Τύπος RAM Αποθήκευση Εγκατάσταση Συντήρηση Κατάλληλο για TileServer-GL Vector/Raster 4GB+ Μέτρια Easy Easy Small regions OpenMapTiles Vector 8GB+ Μεγάλη Medium Medium Country-level Mapnik/ModTile Raster 2GB+ Πολύ μεγάλη Hard Hard Custom styling Tegola Vector 2GB+ Μικρή Easy Easy Real-time updates Maperitive Raster 1GB Μικρή Easy Easy Personal use Πλήρης Εγκατάσταση TileServer-GL με Docker Docker Compose Configuration: yaml # docker-compose.yml version: '3.8' services: postgis: image: postgis/postgis:15-3.3 container_name: osm_postgis environment: POSTGRES_DB: gis POSTGRES_USER: osm POSTGRES_PASSWORD: ${DB_PASSWORD} volumes: - postgis_data:/var/lib/postgresql/data - ./config/postgres.conf:/etc/postgresql/postgresql.conf ports: - "5432:5432" command: postgres -c config_file=/etc/postgresql/postgresql.conf restart: unless-stopped networks: - osm_network tileserver: image: maptiler/tileserver-gl container_name: tileserver_gl ports: - "8081:80" volumes: - ./data:/data - ./config/tileserver-config.yml:/data/config.yml - ./styles:/data/styles environment: - NODE_OPTIONS=--max-old-space-size=4096 depends_on: - postgis restart: unless-stopped networks: - osm_network networks: osm_network: driver: bridge volumes: postgis_data: PostgreSQL/PostGIS Optimization Configuration: conf # config/postgres.conf # Memory Settings shared_buffers = 2GB work_mem = 32MB maintenance_work_mem = 512MB effective_cache_size = 4GB # WAL Settings wal_level = minimal max_wal_size = 2GB min_wal_size = 1GB checkpoint_timeout = 30min checkpoint_completion_target = 0.9 # Parallel Processing max_worker_processes = 4 max_parallel_workers_per_gather = 2 max_parallel_workers = 4 max_parallel_maintenance_workers = 2 # OSM-specific optimizations shared_preload_libraries = 'pg_stat_statements' pg_stat_statements.track = all TileServer-GL Configuration: yaml # config/tileserver-config.yml options: paths: root: /data fonts: fonts sprites: sprites styles: styles mbtiles: mbtiles formatQuality: jpeg: 80 webp: 90 maxSize: 8192 scale: 1 png8: false # Cache settings staticMaxAge: 86400 headers: Access-Control-Allow-Origin: "*" Cache-Control: "public, max-age=604800" data: openstreetmap: mbtiles: /data/greece.mbtiles # ή PostGIS connection # pghost: postgis # pgport: 5432 # dbname: gis # user: osm # password: ${DB_PASSWORD} styles: osm-bright: style: /data/styles/osm-bright/style.json serve_data: true tiles: - "http://tileserver/data/openstreetmap/{z}/{x}/{y}.pbf" basic: style: /data/styles/basic/style.json serve_data: false Δημιουργία Vector Tiles από OSM Data Βήμα 1: Λήψη και Επεξεργασία OSM Data bash #!/bin/bash # process-osm-data.sh REGION="greece" OSM_DATA="/data/osm" PG_CONN="host=postgis port=5432 dbname=gis user=osm password=${DB_PASSWORD}" # Λήψη OSM data από Geofabrik wget -P "$OSM_DATA" "https://download.geofabrik.de/europe/${REGION}-latest.osm.pbf" # Εγκατάσταση απαραίτητων εργαλείων sudo apt install -y osm2pgsql postgis postgresql-client # Εισαγωγή σε PostgreSQL/PostGIS osm2pgsql \ --create \ --slim \ --hstore \ --multi-geometry \ --number-processes 4 \ --cache 2048 \ --database "$PG_CONN" \ --output-pgsql-schema osm \ --style /usr/share/osm2pgsql/default.style \ "${OSM_DATA}/${REGION}-latest.osm.pbf" # Δημιουργία ευρετηρίων για βελτιστοποίηση psql "$PG_CONN" < .env < this.cacheVisibleTiles()); } async cacheVisibleTiles() { const bounds = this.map.getBounds(); const zoom = this.map.getZoom(); if (zoom > this.maxZoom) return; const tileBounds = this.getTileBounds(bounds, zoom); for (let x = tileBounds.min.x; x <= tileBounds.max.x; x++) { for (let y = tileBounds.min.y; y <= tileBounds.max.y; y++) { await this.cacheTile(zoom, x, y); } } } async cacheTile(z, x, y) { const url = `/tiles/${z}/${x}/${y}.png`; try { const response = await fetch(url); if (response.ok) { const blob = await response.blob(); await this.storeTile(z, x, y, blob); } } catch (error) { console.warn(`Failed to cache tile ${z}/${x}/${y}:`, error); } } async storeTile(z, x, y, blob) { return new Promise((resolve, reject) => { const transaction = this.db.transaction(['tiles'], 'readwrite'); const store = transaction.objectStore('tiles'); const tileKey = `${z}_${x}_${y}`; const request = store.put({ key: tileKey, z: z, x: x, y: y, blob: blob, timestamp: Date.now() }); request.onsuccess = () => resolve(); request.onerror = () => reject(request.error); }); } initDatabase() { return new Promise((resolve, reject) => { const request = indexedDB.open('OfflineMapTiles', 1); request.onupgradeneeded = (event) => { const db = event.target.result; if (!db.objectStoreNames.contains('tiles')) { const store = db.createObjectStore('tiles', { keyPath: 'key' }); store.createIndex('coords', ['z', 'x', 'y'], { unique: true }); store.createIndex('timestamp', 'timestamp', { unique: false }); } }; request.onsuccess = (event) => resolve(event.target.result); request.onerror = (event) => reject(event.target.error); }); } } // Service Worker για offline caching // /var/www/maps/sw.js self.addEventListener('install', (event) => { event.waitUntil( caches.open('offline-maps-v1').then((cache) => { return cache.addAll([ '/', '/index.html', '/styles/main.css', '/js/offline-maps.js', '/images/tile-error.png', '/manifest.json' ]); }) ); }); self.addEventListener('fetch', (event) => { // Intercept tile requests if (event.request.url.includes('/tiles/')) { event.respondWith( caches.match(event.request).then((response) => { return response || fetch(event.request).then((fetchResponse) => { // Cache the tile for future use return caches.open('offline-maps-v1').then((cache) => { cache.put(event.request, fetchResponse.clone()); return fetchResponse; }); }); }) ); } }); 3.4 Δικτυακή Πρόσβαση & Διασύνδεση Εγκατάσταση και Ρύθμιση Autonomous Network Πλήρες Δίκτυο Configuration: bash #!/bin/bash # setup-autonomous-network.sh # 1. Ρύθμιση Network Interfaces cat > /etc/netplan/01-autonomous.yaml < /etc/hostapd/hostapd.conf < /etc/dnsmasq.conf < /etc/unbound/unbound.conf < Cyber-Autonomy Portal 🛡️ Cyber-Autonomy Portal Your self-hosted knowledge hub 📚 Wikipedia Online Complete offline Wikipedia with 6+ million articles Access Wikipedia 🗺️ Maps Online Detailed OpenStreetMap with offline navigation Access Maps 📖 Library Online Collection of eBooks, manuals, and guides Access Library 🔍 Search Online Unified search across all knowledge bases Search Everything System Uptime -- CPU Usage --% Memory --/-- GB Storage --/-- GB Unified Search με Meilisearch Meilisearch Configuration για Cross-Service Search: yaml # docker-compose.search.yml version: '3.8' services: meilisearch: image: getmeili/meilisearch:v1.5 container_name: meilisearch ports: - "7700:7700" environment: - MEILI_MASTER_KEY=${MEILI_MASTER_KEY} - MEILI_ENV=production - MEILI_NO_ANALYTICS=true - MEILI_LOG_LEVEL=INFO volumes: - ./meilisearch/data:/meili_data - ./meilisearch/snapshots:/meili_snapshots networks: - cyber_network restart: unless-stopped search-indexer: build: ./search-indexer container_name: search_indexer volumes: - /data:/data:ro - ./search-indexer/config:/config environment: - MEILI_HOST=http://meilisearch:7700 - MEILI_API_KEY=${MEILI_MASTER_KEY} depends_on: - meilisearch networks: - cyber_network restart: unless-stopped search-frontend: image: nginx:alpine container_name: search_frontend ports: - "8082:80" volumes: - ./search-frontend:/usr/share/nginx/html - ./search-frontend/nginx.conf:/etc/nginx/conf.d/default.conf networks: - cyber_network restart: unless-stopped Search Indexer Python Script: python #!/usr/bin/env python3 # search-indexer/indexer.py import json import sqlite3 import xml.etree.ElementTree as ET from pathlib import Path from typing import Dict, List import meilisearch from datetime import datetime class KnowledgeIndexer: def __init__(self, meili_host: str, meili_api_key: str): self.client = meilisearch.Client(meili_host, meili_api_key) self.data_path = Path("/data") def index_wikipedia(self): """Index Wikipedia ZIM file contents""" print("Indexing Wikipedia...") # This would require ZIM file parsing library # For demonstration, we'll create a simple index index = self.client.index("wikipedia") # Example documents documents = [ { "id": "1", "title": "Mathematics", "content": "Mathematics is the study of numbers, shapes, and patterns.", "category": "Science", "source": "wikipedia", "url": "/A/Mathematics" }, { "id": "2", "title": "History", "content": "History is the study of past events.", "category": "Humanities", "source": "wikipedia", "url": "/A/History" } ] index.add_documents(documents) # Configure searchable attributes index.update_searchable_attributes(["title", "content", "category"]) # Configure ranking rules index.update_ranking_rules([ "words", "typo", "proximity", "attribute", "sort", "exactness" ]) print(f"Indexed {len(documents)} Wikipedia documents") def index_maps(self): """Index map features from OSM database""" print("Indexing maps...") index = self.client.index("maps") # Connect to PostgreSQL/PostGIS conn = psycopg2.connect( host="postgis", database="gis", user="osm", password=os.getenv("DB_PASSWORD") ) # Query important features with conn.cursor() as cur: cur.execute(""" SELECT osm_id, name, ST_AsText(way) as location, 'node' as type FROM planet_osm_point WHERE name IS NOT NULL LIMIT 1000 """) features = [] for row in cur.fetchall(): features.append({ "id": f"node_{row[0]}", "name": row[1], "location": row[2], "type": row[3], "source": "osm" }) index.add_documents(features) print(f"Indexed {len(features)} map features") def index_library(self): """Index library documents""" print("Indexing library...") index = self.client.index("library") # Index eBooks (assuming Calibre library) calibre_path = self.data_path / "calibre" if calibre_path.exists(): documents = [] for author_dir in calibre_path.iterdir(): if author_dir.is_dir(): for book_dir in author_dir.iterdir(): if book_dir.is_dir(): # Read metadata opf_file = book_dir / "metadata.opf" if opf_file.exists(): tree = ET.parse(opf_file) root = tree.getroot() # Extract metadata metadata = {} for elem in root.iter(): if elem.tag.endswith('title'): metadata['title'] = elem.text elif elem.tag.endswith('creator'): metadata['author'] = elem.text elif elem.tag.endswith('subject'): metadata['subject'] = elem.text documents.append({ "id": book_dir.name, "title": metadata.get('title', 'Unknown'), "author": metadata.get('author', 'Unknown'), "subject": metadata.get('subject', ''), "path": str(book_dir.relative_to(self.data_path)), "source": "library" }) if documents: index.add_documents(documents) print(f"Indexed {len(documents)} library documents") def create_unified_index(self): """Create a unified search index across all sources""" print("Creating unified index...") # Delete existing index if it exists try: self.client.delete_index("unified") except: pass # Create new index index = self.client.create_index("unified", {'primaryKey': 'id'}) # Get documents from all sources all_documents = [] # Add Wikipedia documents wiki_docs = self.client.index("wikipedia").get_documents() for doc in wiki_docs['results']: doc['category'] = 'wikipedia' all_documents.append(doc) # Add map documents map_docs = self.client.index("maps").get_documents() for doc in map_docs['results']: doc['category'] = 'maps' all_documents.append(doc) # Add library documents library_docs = self.client.index("library").get_documents() for doc in library_docs['results']: doc['category'] = 'library' all_documents.append(doc) # Add to unified index index.add_documents(all_documents) # Configure unified search index.update_searchable_attributes([ "title", "content", "name", "author", "subject" ]) index.update_filterable_attributes(["category", "source"]) print(f"Created unified index with {len(all_documents)} documents") def run(self): """Run complete indexing process""" print("Starting knowledge base indexing...") self.index_wikipedia() self.index_maps() self.index_library() self.create_unified_index() print("Indexing completed successfully") if __name__ == "__main__": import os indexer = KnowledgeIndexer( meili_host=os.getenv("MEILI_HOST", "http://meilisearch:7700"), meili_api_key=os.getenv("MEILI_API_KEY") ) indexer.run() Search Frontend Interface: html Unified Search - Cyber Autonomy 🔍 Unified Knowledge Search Search across Wikipedia, Maps, and Library simultaneously Filters System Monitoring και Εποπτεία Πλήρες Monitoring Stack με Prometheus και Grafana: yaml # docker-compose.monitoring.yml version: '3.8' services: prometheus: image: prom/prometheus:latest container_name: prometheus ports: - "9090:9090" volumes: - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml - prometheus_data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/etc/prometheus/console_libraries' - '--web.console.templates=/etc/prometheus/consoles' - '--storage.tsdb.retention.time=200h' - '--web.enable-lifecycle' networks: - monitoring_network restart: unless-stopped grafana: image: grafana/grafana:latest container_name: grafana ports: - "3000:3000" environment: - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD} - GF_INSTALL_PLUGINS=grafana-piechart-panel volumes: - grafana_data:/var/lib/grafana - ./monitoring/dashboards:/etc/grafana/provisioning/dashboards - ./monitoring/datasources:/etc/grafana/provisioning/datasources networks: - monitoring_network restart: unless-stopped node-exporter: image: prom/node-exporter:latest container_name: node_exporter ports: - "9100:9100" volumes: - /proc:/host/proc:ro - /sys:/host/sys:ro - /:/rootfs:ro command: - '--path.procfs=/host/proc' - '--path.rootfs=/rootfs' - '--path.sysfs=/host/sys' - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' networks: - monitoring_network restart: unless-stopped cadvisor: image: gcr.io/cadvisor/cadvisor:latest container_name: cadvisor ports: - "8080:8080" volumes: - /:/rootfs:ro - /var/run:/var/run:ro - /sys:/sys:ro - /var/lib/docker/:/var/lib/docker:ro - /dev/disk/:/dev/disk:ro privileged: true devices: - /dev/kmsg networks: - monitoring_network restart: unless-stopped networks: monitoring_network: driver: bridge volumes: prometheus_data: grafana_data: Prometheus Configuration: yaml # monitoring/prometheus.yml global: scrape_interval: 15s evaluation_interval: 15s alerting: alertmanagers: - static_configs: - targets: [] rule_files: - "alerts.yml" scrape_configs: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] - job_name: 'node' static_configs: - targets: ['node-exporter:9100'] - job_name: 'cadvisor' static_configs: - targets: ['cadvisor:8080'] - job_name: 'services' static_configs: - targets: - 'wiki:8080' - 'maps:80' - 'search:7700' metrics_path: /health relabel_configs: - source_labels: [__address__] target_label: __param_target - source_labels: [__param_target] target_label: instance - target_label: __address__ replacement: blackbox-exporter:9115 - job_name: 'docker' static_configs: - targets: ['docker:9323'] Grafana Dashboard Configuration: json { "dashboard": { "title": "Cyber Autonomy Server", "panels": [ { "title": "System Health", "type": "stat", "targets": [ { "expr": "up", "legendFormat": "{{instance}}" } ] }, { "title": "CPU Usage", "type": "graph", "targets": [ { "expr": "100 - (avg by (instance) (rate(node_cpu_seconds_total{mode=\"idle\"}[5m])) * 100)", "legendFormat": "CPU Usage" } ] }, { "title": "Memory Usage", "type": "graph", "targets": [ { "expr": "node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes", "legendFormat": "Used Memory" } ] }, { "title": "Disk Usage", "type": "piechart", "targets": [ { "expr": "node_filesystem_size_bytes{mountpoint=\"/data\"}", "legendFormat": "Total" }, { "expr": "node_filesystem_avail_bytes{mountpoint=\"/data\"}", "legendFormat": "Available" } ] }, { "title": "Service Status", "type": "table", "targets": [ { "expr": "probe_success", "legendFormat": "{{instance}}" } ] } ] } } Alerting Rules: yaml # monitoring/alerts.yml groups: - name: cyber_autonomy_alerts rules: - alert: ServiceDown expr: up == 0 for: 1m labels: severity: critical annotations: summary: "Service {{ $labels.instance }} is down" description: "{{ $labels.instance }} has been down for more than 1 minute" - alert: HighCPUUsage expr: 100 - (avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80 for: 5m labels: severity: warning annotations: summary: "High CPU usage on {{ $labels.instance }}" description: "CPU usage is above 80% for 5 minutes" - alert: LowDiskSpace expr: (node_filesystem_avail_bytes{mountpoint="/data"} / node_filesystem_size_bytes{mountpoint="/data"}) * 100 < 10 for: 5m labels: severity: warning annotations: summary: "Low disk space on {{ $labels.instance }}" description: "Disk space is below 10% on {{ $labels.mountpoint }}" - alert: HighMemoryUsage expr: (1 - (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes)) * 100 > 90 for: 5m labels: severity: warning annotations: summary: "High memory usage on {{ $labels.instance }}" description: "Memory usage is above 90% for 5 minutes" Automated Backup and Disaster Recovery Πλήρες Backup Solution: bash #!/bin/bash # backup-solution.sh BACKUP_DIR="/backup" DATA_DIR="/data" RETENTION_DAYS=30 DATE=$(date +%Y%m%d_%H%M%S) LOG_FILE="/var/log/backup.log" # Database backup function backup_databases() { echo "$(date): Starting database backup" >> "$LOG_FILE" # PostgreSQL backup if docker ps | grep -q postgis; then docker exec postgis pg_dumpall -U osm | gzip > "$BACKUP_DIR/db/postgres_full_$DATE.sql.gz" echo "$(date): PostgreSQL backup completed" >> "$LOG_FILE" fi # Meilisearch backup if docker ps | grep -q meilisearch; then docker exec meilisearch curl -X POST "http://localhost:7700/snapshots" \ -H "Authorization: Bearer ${MEILI_MASTER_KEY}" echo "$(date): Meilisearch snapshot triggered" >> "$LOG_FILE" fi } # Configuration backup function backup_configurations() { echo "$(date): Starting configuration backup" >> "$LOG_FILE" # Docker configurations tar -czf "$BACKUP_DIR/config/docker_$DATE.tar.gz" \ /etc/docker/ \ /var/lib/docker/volumes/ 2>/dev/null # Service configurations tar -czf "$BACKUP_DIR/config/services_$DATE.tar.gz" \ /etc/nginx/ \ /etc/hostapd/ \ /etc/dnsmasq.conf \ /etc/unbound/ 2>/dev/null echo "$(date): Configuration backup completed" >> "$LOG_FILE" } # Data backup function with rsync and hardlinks backup_data() { echo "$(date): Starting data backup" >> "$LOG_FILE" # Create daily backup with hardlinks BACKUP_TARGET="$BACKUP_DIR/data/daily_$DATE" PREVIOUS_BACKUP=$(ls -td "$BACKUP_DIR"/data/daily_* 2>/dev/null | head -1) mkdir -p "$BACKUP_TARGET" if [ -n "$PREVIOUS_BACKUP" ]; then # Use hardlinks for unchanged files rsync -av --delete \ --link-dest="$PREVIOUS_BACKUP" \ "$DATA_DIR/" "$BACKUP_TARGET/" else # Full backup rsync -av "$DATA_DIR/" "$BACKUP_TARGET/" fi echo "$(date): Data backup completed" >> "$LOG_FILE" } # Verification function verify_backup() { echo "$(date): Starting backup verification" >> "$LOG_FILE" local errors=0 # Check backup integrity for backup in "$BACKUP_DIR"/data/daily_*/; do if [ -d "$backup" ]; then # Verify checksums if ! find "$backup" -type f -name "*.zim" -exec sha256sum {} + | \ sha256sum -c --quiet 2>/dev/null; then echo "$(date): ERROR: Checksum verification failed for $backup" >> "$LOG_FILE" ((errors++)) fi fi done # Test database restore if [ -f "$BACKUP_DIR/db/postgres_full_$DATE.sql.gz" ]; then if ! gunzip -t "$BACKUP_DIR/db/postgres_full_$DATE.sql.gz"; then echo "$(date): ERROR: Database backup is corrupted" >> "$LOG_FILE" ((errors++)) fi fi if [ $errors -eq 0 ]; then echo "$(date): Backup verification successful" >> "$LOG_FILE" return 0 else echo "$(date): CRITICAL: Backup verification failed with $errors errors" >> "$LOG_FILE" return 1 fi } # Cleanup old backups cleanup_backups() { echo "$(date): Cleaning up old backups" >> "$LOG_FILE" # Remove old daily backups find "$BACKUP_DIR/data" -name "daily_*" -type d -mtime +$RETENTION_DAYS -exec rm -rf {} + # Remove old database backups find "$BACKUP_DIR/db" -name "*.sql.gz" -mtime +$RETENTION_DAYS -delete # Remove old configuration backups find "$BACKUP_DIR/config" -name "*.tar.gz" -mtime +$RETENTION_DAYS -delete echo "$(date): Cleanup completed" >> "$LOG_FILE" } # Offsite backup function offsite_backup() { echo "$(date): Starting offsite backup" >> "$LOG_FILE" # Encrypt backup gpg --batch --yes --passphrase "$ENCRYPTION_PASS" \ -c "$BACKUP_DIR/data/daily_$DATE" # Upload to remote storage (example with rclone) if command -v rclone &> /dev/null; then rclone copy "$BACKUP_DIR/data/daily_$DATE.gpg" \ "remote:cyberautonomy/backups/" \ --progress echo "$(date): Offsite backup completed" >> "$LOG_FILE" else echo "$(date): WARNING: rclone not installed, skipping offsite backup" >> "$LOG_FILE" fi } # Main execution main() { echo "=========================================" >> "$LOG_FILE" echo "$(date): Starting backup procedure" >> "$LOG_FILE" # Create backup directory structure mkdir -p "$BACKUP_DIR"/{data,db,config} # Perform backups backup_databases backup_configurations backup_data # Verify backups if verify_backup; then # Perform offsite backup offsite_backup # Cleanup old backups cleanup_backups echo "$(date): Backup procedure completed successfully" >> "$LOG_FILE" else echo "$(date): CRITICAL: Backup verification failed, aborting procedure" >> "$LOG_FILE" exit 1 fi echo "=========================================" >> "$LOG_FILE" } # Execute main function main Automated Backup with Systemd Timer: ini # /etc/systemd/system/backup.service [Unit] Description=Cyber Autonomy Backup Service Requires=network-online.target After=network-online.target [Service] Type=oneshot ExecStart=/usr/local/bin/backup-solution.sh User=backup Group=backup EnvironmentFile=/etc/backup.conf StandardOutput=journal StandardError=journal [Install] WantedBy=multi-user.target ini # /etc/systemd/system/backup.timer [Unit] Description=Daily backup timer Requires=backup.service [Timer] OnCalendar=daily Persistent=true RandomizedDelaySec=3600 [Install] WantedBy=timers.target Security Hardening και Access Control Πλήρες Security Hardening Script: bash #!/bin/bash # security-hardening.sh # 1. System updates and basic security sudo apt update && sudo apt upgrade -y sudo apt install -y unattended-upgrades fail2ban rkhunter # 2. Firewall configuration sudo ufw default deny incoming sudo ufw default allow outgoing sudo ufw allow 22/tcp comment 'SSH' sudo ufw allow 80/tcp comment 'HTTP' sudo ufw allow 443/tcp comment 'HTTPS' sudo ufw allow 53/udp comment 'DNS' sudo ufw enable # 3. SSH hardening sudo sed -i 's/^#Port 22/Port 2222/' /etc/ssh/sshd_config sudo sed -i 's/^#PermitRootLogin yes/PermitRootLogin no/' /etc/ssh/sshd_config sudo sed -i 's/^#PasswordAuthentication yes/PasswordAuthentication no/' /etc/ssh/sshd_config sudo sed -i 's/^#PubkeyAuthentication yes/PubkeyAuthentication yes/' /etc/ssh/sshd_config sudo sed -i 's/^#ClientAliveInterval 0/ClientAliveInterval 300/' /etc/ssh/sshd_config sudo systemctl restart sshd # 4. Fail2ban configuration sudo tee /etc/fail2ban/jail.local <
Your self-hosted knowledge hub
Complete offline Wikipedia with 6+ million articles
Detailed OpenStreetMap with offline navigation
Collection of eBooks, manuals, and guides
Unified search across all knowledge bases
--
--%
--/-- GB
Search across Wikipedia, Maps, and Library simultaneously