• pezhore@infosec.pub
        link
        fedilink
        English
        arrow-up
        1
        ·
        5 days ago

        I’m doing that with docker compose in my homelab, it’s pretty neat!

        services:
          ollama:
            volumes:
              - /etc/ollama-docker/ollama:/root/.ollama
            container_name: ollama
            pull_policy: always
            tty: true
            restart: unless-stopped
            image: ollama/ollama
            ports:
              - 11434:11434
            deploy:
              resources:
                reservations:
                  devices:
                    - driver: nvidia
                      device_ids: ['0']
                      capabilities:
                        - gpu
        
          open-webui:
            build:
              context: .
              args:
                OLLAMA_BASE_URL: '/ollama'
              dockerfile: Dockerfile
            image: ghcr.io/open-webui/open-webui:main
            container_name: open-webui
            volumes:
              - /etc/ollama-docker/open-webui:/app/backend/data
            depends_on:
              - ollama
            ports:
              - 3000:8080
            environment:
              - 'OLLAMA_BASE_URL=http://ollama:11434/'
              - 'WEBUI_SECRET_KEY='
            extra_hosts:
              - host.docker.internal:host-gateway
            restart: unless-stopped
        
        volumes:
          ollama: {}
          open-webui: {}