ehfd 4 жил өмнө
parent
commit
f0a330a0bb
4 өөрчлөгдсөн 30 нэмэгдсэн , 35 устгасан
  1. 2 2
      Dockerfile
  2. 6 16
      README.md
  3. 22 12
      bootstrap.sh
  4. 0 5
      xgl.yaml

+ 2 - 2
Dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM nvidia/opengl:1.2-glvnd-devel-ubuntu20.04
 
 
 LABEL maintainer "https://github.com/ehfd"
 LABEL maintainer "https://github.com/ehfd"
 
 
@@ -26,7 +26,7 @@ ENV LC_ALL en_US.UTF-8
 
 
 # https://gitlab.com/nvidia/container-images/driver/-/blob/master/ubuntu20.04/Dockerfile
 # https://gitlab.com/nvidia/container-images/driver/-/blob/master/ubuntu20.04/Dockerfile
 RUN dpkg --add-architecture i386 && \
 RUN dpkg --add-architecture i386 && \
-    apt-get update && apt-get install -o APT::Immediate-Configure=false -y --no-install-recommends \
+    apt-get update && apt-get install -y --no-install-recommends \
         apt-utils \
         apt-utils \
         build-essential \
         build-essential \
         ca-certificates \
         ca-certificates \

+ 6 - 16
README.md

@@ -2,13 +2,14 @@
 
 
 MATE Desktop container supporting GLX/Vulkan for NVIDIA GPUs by spawning its own
 MATE Desktop container supporting GLX/Vulkan for NVIDIA GPUs by spawning its own
 X Server and noVNC WebSocket interface instead of using the host X server. Does
 X Server and noVNC WebSocket interface instead of using the host X server. Does
-not require `/tmp/.X11-unix` host sockets or any non-conventional/dangerous host
-setup.
+not require `/tmp/.X11-unix` sockets set up.
+
+Note: Requires **privileged** mode because of how Xorg works.
 
 
 Use
 Use
 [docker-nvidia-egl-desktop](https://github.com/ehfd/docker-nvidia-egl-desktop)
 [docker-nvidia-egl-desktop](https://github.com/ehfd/docker-nvidia-egl-desktop)
 for a MATE Desktop container that directly accesses NVIDIA GPUs without using an
 for a MATE Desktop container that directly accesses NVIDIA GPUs without using an
-X Server (without Vulkan support).
+X Server nor privileged mode (without Vulkan support).
 
 
 Corresponding container toolkit on the host for allocating GPUs should be set
 Corresponding container toolkit on the host for allocating GPUs should be set
 up. Container startup should take some time as it automatically installs NVIDIA
 up. Container startup should take some time as it automatically installs NVIDIA
@@ -17,19 +18,8 @@ drivers.
 Connect to the spawned noVNC WebSocket instance with a browser in port 5901, no
 Connect to the spawned noVNC WebSocket instance with a browser in port 5901, no
 VNC client required (password for the default user is 'vncpasswd').
 VNC client required (password for the default user is 'vncpasswd').
 
 
-Note: Requires access to at least one **/dev/ttyX** device. Check out
-[k8s-hostdev-plugin](https://github.com/bluebeach/k8s-hostdev-plugin) for
-provisioning this in Kubernetes clusters without privileged access.
-
-For Docker this configuration is tested to work but the container will have
-potentially unsafe privileged access:
-
-```
-docker run --gpus 1 --privileged -it -e SIZEW=1920 -e SIZEH=1080 -e SHARED=TRUE -e VNCPASS=vncpasswd -p 5901:5901 ehfd/nvidia-glx-desktop:latest
-```
-
-The below may also work without privileged access but is untested:
+For Docker use this configuration:
 
 
 ```
 ```
-docker run --gpus 1 --device=/dev/tty0:rw -it -e SIZEW=1920 -e SIZEH=1080 -e SHARED=TRUE -e VNCPASS=vncpasswd -p 5901:5901 ehfd/nvidia-glx-desktop:latest
+docker run --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=0 --privileged -it -e SIZEW=1920 -e SIZEH=1080 -e SHARED=TRUE -e VNCPASS=vncpasswd -p 5901:5901 ehfd/nvidia-glx-desktop:latest
 ```
 ```

+ 22 - 12
bootstrap.sh

@@ -1,9 +1,4 @@
 #!/bin/bash
 #!/bin/bash
-set -e
-
-trap "echo TRAPed signal" HUP INT QUIT KILL TERM
-
-echo "user:$VNCPASS" | sudo chpasswd
 
 
 # Install NVIDIA drivers, including X graphic drivers by omitting --x-{prefix,module-path,library-path,sysconfig-path}
 # Install NVIDIA drivers, including X graphic drivers by omitting --x-{prefix,module-path,library-path,sysconfig-path}
 export DRIVER_VERSION=$(head -n1 </proc/driver/nvidia/version | awk '{ print $8 }')
 export DRIVER_VERSION=$(head -n1 </proc/driver/nvidia/version | awk '{ print $8 }')
@@ -25,17 +20,33 @@ sudo ./nvidia-installer --silent \
 sudo rm -rf /tmp/NVIDIA*
 sudo rm -rf /tmp/NVIDIA*
 cd ~
 cd ~
 
 
+set -e
+
+trap "echo TRAPed signal" HUP INT QUIT KILL TERM
+
+echo "user:$VNCPASS" | sudo chpasswd
+
 sudo sed -i "s/allowed_users=console/allowed_users=anybody/;$ a needs_root_rights=yes" /etc/X11/Xwrapper.config
 sudo sed -i "s/allowed_users=console/allowed_users=anybody/;$ a needs_root_rights=yes" /etc/X11/Xwrapper.config
 
 
-if ! sudo nvidia-smi --id="$(echo "$NVIDIA_VISIBLE_DEVICES" | cut -d ',' -f1)" -q | grep -q "Tesla"; then
+# If NVIDIA_VISIBLE_DEVICES is empty or all set GPU_SELECT to first GPU visible
+if [ "$NVIDIA_VISIBLE_DEVICES" == "all" ]; then
+  export GPU_SELECT=$(sudo nvidia-smi --query-gpu=uuid --format=csv | sed -n 2p)
+elif [ -z "$NVIDIA_VISIBLE_DEVICES" ]; then
+  export GPU_SELECT=$(sudo nvidia-smi --query-gpu=uuid --format=csv | sed -n 2p)
+# Else set GPU_SELECT to first GPU in NVIDIA_VISIBLE_DEVICES
+else
+  export GPU_SELECT=$(sudo nvidia-smi --id=$(echo "$NVIDIA_VISIBLE_DEVICES" | cut -d ',' -f1) --query-gpu=uuid --format=csv | sed -n 2p)
+fi
+
+if ! sudo nvidia-smi --id="$GPU_SELECT" -q | grep -q "Tesla"; then
   DISPLAYSTRING="--use-display-device=None"
   DISPLAYSTRING="--use-display-device=None"
 fi
 fi
 
 
-HEX_ID=$(sudo nvidia-smi --query-gpu=pci.bus_id --id="$(echo "$NVIDIA_VISIBLE_DEVICES" | cut -d ',' -f1)" --format=csv | sed -n 2p)
+HEX_ID=$(sudo nvidia-smi --query-gpu=pci.bus_id --id="$GPU_SELECT" --format=csv | sed -n 2p)
 IFS=":." ARR_ID=($HEX_ID)
 IFS=":." ARR_ID=($HEX_ID)
 unset IFS
 unset IFS
 BUS_ID=PCI:$((16#${ARR_ID[1]})):$((16#${ARR_ID[2]})):$((16#${ARR_ID[3]}))
 BUS_ID=PCI:$((16#${ARR_ID[1]})):$((16#${ARR_ID[2]})):$((16#${ARR_ID[3]}))
-sudo nvidia-xconfig --virtual="${SIZEW}x${SIZEH}" --depth="$CDEPTH" --allow-empty-initial-configuration --enable-all-gpus --no-use-edid-dpi --busid="$BUS_ID" --only-one-x-screen "$DISPLAYSTRING"
+sudo nvidia-xconfig --virtual="${SIZEW}x${SIZEH}" --depth="$CDEPTH" --mode="${SIZEW}x${SIZEH}" --allow-empty-initial-configuration --no-use-edid-dpi --busid="$BUS_ID" --only-one-x-screen "$DISPLAYSTRING"
 
 
 if [ "x$SHARED" == "xTRUE" ]; then
 if [ "x$SHARED" == "xTRUE" ]; then
   export SHARESTRING="-shared"
   export SHARESTRING="-shared"
@@ -44,7 +55,7 @@ fi
 shopt -s extglob
 shopt -s extglob
 for TTY in /dev/tty+([0-9]); do
 for TTY in /dev/tty+([0-9]); do
   if [ -w "$TTY" ]; then
   if [ -w "$TTY" ]; then
-    Xorg tty"$(echo "$TTY" | grep -Eo '[0-9]+$')" :0 &
+    Xorg :0 &
     break
     break
   fi
   fi
 done
 done
@@ -57,7 +68,7 @@ sleep 1
 sleep 1
 sleep 1
 
 
 export DISPLAY=:0
 export DISPLAY=:0
-UUID_CUT=$(sudo nvidia-smi --query-gpu=uuid --id="$(echo "$NVIDIA_VISIBLE_DEVICES" | cut -d ',' -f1)" --format=csv | sed -n 2p | cut -c 5-)
+UUID_CUT=$(sudo nvidia-smi --query-gpu=uuid --id="$GPU_SELECT" --format=csv | sed -n 2p | cut -c 5-)
 if vulkaninfo | grep "$UUID_CUT" | grep -q ^; then
 if vulkaninfo | grep "$UUID_CUT" | grep -q ^; then
   VK=0
   VK=0
   while true; do
   while true; do
@@ -69,11 +80,10 @@ if vulkaninfo | grep "$UUID_CUT" | grep -q ^; then
     VK=$((VK + 1))
     VK=$((VK + 1))
   done
   done
 else
 else
-  echo "Vulkan not available for the current GPU."
+  echo "Vulkan is not available for the current GPU."
 fi
 fi
 
 
 mate-session &
 mate-session &
-sleep 1
 
 
 pulseaudio --start
 pulseaudio --start
 
 

+ 0 - 5
xgl.yaml

@@ -55,8 +55,6 @@ spec:
           name: xgl-root-vol
           name: xgl-root-vol
         - mountPath: /dev/shm
         - mountPath: /dev/shm
           name: dshm
           name: dshm
-        - mountPath: /dev/tty0
-          name: tty
       volumes:
       volumes:
       - name: xgl-cache-vol
       - name: xgl-cache-vol
         emptyDir: {}
         emptyDir: {}
@@ -69,6 +67,3 @@ spec:
       - name: dshm
       - name: dshm
         emptyDir:
         emptyDir:
           medium: Memory
           medium: Memory
-      - name: tty
-        hostPath:
-          path: /dev/tty0