ehfd 4 жил өмнө
parent
commit
7e58d44948
3 өөрчлөгдсөн 37 нэмэгдсэн , 11 устгасан
  1. 3 0
      Dockerfile
  2. 17 6
      README.md
  3. 17 5
      bootstrap.sh

+ 3 - 0
Dockerfile

@@ -60,10 +60,13 @@ RUN apt-get update && apt-get install -y \
         python3 \
         python3 \
         python3-numpy \
         python3-numpy \
         x11-xkb-utils \
         x11-xkb-utils \
+        x11-xserver-utils \
         xauth \
         xauth \
         xinit \
         xinit \
         xfonts-base \
         xfonts-base \
         xkb-data \
         xkb-data \
+        libxrandr-dev \
+        xorg-dev \
         libxtst6 \
         libxtst6 \
         libxtst6:i386 \
         libxtst6:i386 \
         mlocate \
         mlocate \

+ 17 - 6
README.md

@@ -2,14 +2,13 @@
 
 
 MATE Desktop container supporting GLX/Vulkan for NVIDIA GPUs by spawning its own
 MATE Desktop container supporting GLX/Vulkan for NVIDIA GPUs by spawning its own
 X Server and noVNC WebSocket interface instead of using the host X server. Does
 X Server and noVNC WebSocket interface instead of using the host X server. Does
-not require `/tmp/.X11-unix` sockets set up.
-
-Note: Requires **privileged** mode because of how Xorg works.
+not require `/tmp/.X11-unix` host sockets or host configuration.
 
 
 Use
 Use
 [docker-nvidia-egl-desktop](https://github.com/ehfd/docker-nvidia-egl-desktop)
 [docker-nvidia-egl-desktop](https://github.com/ehfd/docker-nvidia-egl-desktop)
 for a MATE Desktop container that directly accesses NVIDIA GPUs without using an
 for a MATE Desktop container that directly accesses NVIDIA GPUs without using an
-X Server nor privileged mode (without Vulkan support).
+X Server or privileged mode and is compatible with Kubernetes (without Vulkan
+support).
 
 
 Corresponding container toolkit on the host for allocating GPUs should be set
 Corresponding container toolkit on the host for allocating GPUs should be set
 up. Container startup should take some time as it automatically installs NVIDIA
 up. Container startup should take some time as it automatically installs NVIDIA
@@ -18,8 +17,20 @@ drivers.
 Connect to the spawned noVNC WebSocket instance with a browser in port 5901, no
 Connect to the spawned noVNC WebSocket instance with a browser in port 5901, no
 VNC client required (password for the default user is 'vncpasswd').
 VNC client required (password for the default user is 'vncpasswd').
 
 
-For Docker use this configuration:
+This configuration allows usage of multiple GPU desktops per node but the
+container will use potentially unsafe privileged mode:
+
+```
+docker run --gpus 1 --privileged -it -e SIZEW=1920 -e SIZEH=1080 -e SHARED=TRUE -e VNCPASS=vncpasswd -p 5901:5901 ehfd/nvidia-glx-desktop:latest
+```
+
+Without privileged mode only one GPU desktop can be used per node because of
+Xorg limitations:
+
+Note: Requires **/dev/ttyN** (N >= 8) provision. Check out
+[k8s-hostdev-plugin](https://github.com/bluebeach/k8s-hostdev-plugin) for
+provisioning this in Kubernetes clusters without privileged access.
 
 
 ```
 ```
-docker run --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=0 --privileged -it -e SIZEW=1920 -e SIZEH=1080 -e SHARED=TRUE -e VNCPASS=vncpasswd -p 5901:5901 ehfd/nvidia-glx-desktop:latest
+docker run --gpus 1 --device=/dev/tty63:rw -it -e SIZEW=1920 -e SIZEH=1080 -e SHARED=TRUE -e VNCPASS=vncpasswd -p 5901:5901 ehfd/nvidia-glx-desktop:latest
 ```
 ```

+ 17 - 5
bootstrap.sh

@@ -28,14 +28,20 @@ echo "user:$VNCPASS" | sudo chpasswd
 
 
 sudo sed -i "s/allowed_users=console/allowed_users=anybody/;$ a needs_root_rights=yes" /etc/X11/Xwrapper.config
 sudo sed -i "s/allowed_users=console/allowed_users=anybody/;$ a needs_root_rights=yes" /etc/X11/Xwrapper.config
 
 
-# If NVIDIA_VISIBLE_DEVICES is empty or all set GPU_SELECT to first GPU visible
 if [ "$NVIDIA_VISIBLE_DEVICES" == "all" ]; then
 if [ "$NVIDIA_VISIBLE_DEVICES" == "all" ]; then
   export GPU_SELECT=$(sudo nvidia-smi --query-gpu=uuid --format=csv | sed -n 2p)
   export GPU_SELECT=$(sudo nvidia-smi --query-gpu=uuid --format=csv | sed -n 2p)
 elif [ -z "$NVIDIA_VISIBLE_DEVICES" ]; then
 elif [ -z "$NVIDIA_VISIBLE_DEVICES" ]; then
   export GPU_SELECT=$(sudo nvidia-smi --query-gpu=uuid --format=csv | sed -n 2p)
   export GPU_SELECT=$(sudo nvidia-smi --query-gpu=uuid --format=csv | sed -n 2p)
-# Else set GPU_SELECT to first GPU in NVIDIA_VISIBLE_DEVICES
 else
 else
   export GPU_SELECT=$(sudo nvidia-smi --id=$(echo "$NVIDIA_VISIBLE_DEVICES" | cut -d ',' -f1) --query-gpu=uuid --format=csv | sed -n 2p)
   export GPU_SELECT=$(sudo nvidia-smi --id=$(echo "$NVIDIA_VISIBLE_DEVICES" | cut -d ',' -f1) --query-gpu=uuid --format=csv | sed -n 2p)
+  if [ -z "$GPU_SELECT" ]; then
+    export GPU_SELECT=$(sudo nvidia-smi --query-gpu=uuid --format=csv | sed -n 2p)
+  fi
+fi
+
+if [ -z "$GPU_SELECT" ]; then
+  echo "No NVIDIA GPUs detected. Exiting."
+  exit 1
 fi
 fi
 
 
 if ! sudo nvidia-smi --id="$GPU_SELECT" -q | grep -q "Tesla"; then
 if ! sudo nvidia-smi --id="$GPU_SELECT" -q | grep -q "Tesla"; then
@@ -46,16 +52,22 @@ HEX_ID=$(sudo nvidia-smi --query-gpu=pci.bus_id --id="$GPU_SELECT" --format=csv
 IFS=":." ARR_ID=($HEX_ID)
 IFS=":." ARR_ID=($HEX_ID)
 unset IFS
 unset IFS
 BUS_ID=PCI:$((16#${ARR_ID[1]})):$((16#${ARR_ID[2]})):$((16#${ARR_ID[3]}))
 BUS_ID=PCI:$((16#${ARR_ID[1]})):$((16#${ARR_ID[2]})):$((16#${ARR_ID[3]}))
-sudo nvidia-xconfig --virtual="${SIZEW}x${SIZEH}" --depth="$CDEPTH" --mode="${SIZEW}x${SIZEH}" --allow-empty-initial-configuration --no-use-edid-dpi --busid="$BUS_ID" --only-one-x-screen "$DISPLAYSTRING"
+sudo nvidia-xconfig --virtual="${SIZEW}x${SIZEH}" --depth="$CDEPTH" --mode="${SIZEW}x${SIZEH}" --allow-empty-initial-configuration --no-use-edid-dpi --busid="$BUS_ID" --only-one-x-screen --no-xinerama "$DISPLAYSTRING"
 
 
 if [ "x$SHARED" == "xTRUE" ]; then
 if [ "x$SHARED" == "xTRUE" ]; then
   export SHARESTRING="-shared"
   export SHARESTRING="-shared"
 fi
 fi
 
 
-Xorg :0 &
+shopt -s extglob
+for TTY in $(ls -1 /dev/tty+([0-9]) | sort -rV); do
+  if [ -w "$TTY" ]; then
+    Xorg vt"$(echo "$TTY" | grep -Eo '[0-9]+$')" :0 &
+    break
+  fi
+done
 sleep 1
 sleep 1
 
 
-x11vnc -display :0 -passwd "$VNCPASS" -forever -xkb -rfbport 5900 "$SHARESTRING" &
+x11vnc -display :0 -passwd "$VNCPASS" -forever -repeat -xkb -rfbport 5900 "$SHARESTRING" &
 sleep 1
 sleep 1
 
 
 /opt/noVNC/utils/launch.sh --vnc localhost:5900 --listen 5901 &
 /opt/noVNC/utils/launch.sh --vnc localhost:5900 --listen 5901 &