Procházet zdrojové kódy

Fixes and documentation improvements

ehfd před 3 roky
rodič
revize
5fef2bb4e7
5 změnil soubory, kde provedl 96 přidání a 31 odebrání
  1. 17 11
      Dockerfile
  2. 28 11
      README.md
  3. 40 4
      entrypoint.sh
  4. 10 5
      selkies-gstreamer-entrypoint.sh
  5. 1 0
      xgl.yml

+ 17 - 11
Dockerfile

@@ -7,10 +7,13 @@ LABEL maintainer "https://github.com/ehfd,https://github.com/danisla"
 
 ARG UBUNTU_RELEASE
 ARG CUDA_VERSION
-# Make all NVIDIA GPUs visible, but we want to manually install drivers
+# Make all NVIDIA GPUs visible by default
 ARG NVIDIA_VISIBLE_DEVICES=all
+# Use noninteractive mode to skip confirmation when installing packages
 ARG DEBIAN_FRONTEND=noninteractive
+# All NVIDIA driver capabilities should preferably be used, check `NVIDIA_DRIVER_CAPABILITIES` inside the container if things do not work
 ENV NVIDIA_DRIVER_CAPABILITIES all
+# System defaults that should not be changed
 ENV DISPLAY :0
 ENV PULSE_SERVER 127.0.0.1:4713
 ENV XDG_RUNTIME_DIR /tmp
@@ -30,7 +33,7 @@ ENV WEBRTC_ENABLE_RESIZE false
 ENV ENABLE_AUDIO true
 ENV ENABLE_BASIC_AUTH true
 
-# Install locales to prevent errors
+# Install locales to prevent Xorg errors
 RUN apt-get clean && \
     apt-get update && apt-get install --no-install-recommends -y locales && \
     rm -rf /var/lib/apt/lists/* && \
@@ -39,7 +42,7 @@ ENV LANG en_US.UTF-8
 ENV LANGUAGE en_US:en
 ENV LC_ALL en_US.UTF-8
 
-# Install Xorg, Xfce Desktop, and others
+# Install Xorg, Xfce4 desktop environment, and other utility packages
 RUN dpkg --add-architecture i386 && \
     apt-get update && apt-get install --no-install-recommends -y \
         software-properties-common \
@@ -185,15 +188,18 @@ RUN dpkg --add-architecture i386 && \
         xfce4-weather-plugin \
         xfce4-whiskermenu-plugin \
         xfce4-xkb-plugin && \
+    # Install LibreOffice with the recommended packages
     apt-get install -y libreoffice && \
+    # Prevent dialogs at desktop environment start
     cp -rf /etc/xdg/xfce4/panel/default.xml /etc/xdg/xfce4/xfconf/xfce-perchannel-xml/xfce4-panel.xml && \
+    # Install Vulkan packages
     if [ "${UBUNTU_RELEASE}" = "18.04" ]; then apt-get install --no-install-recommends -y vulkan-utils; else apt-get install --no-install-recommends -y vulkan-tools; fi && \
-    # Support libva and VA-API through NVIDIA VDPAU
+    # Support decoding from libva or VA-API through NVIDIA VDPAU
     curl -fsSL -o /tmp/vdpau-va-driver.deb "https://launchpad.net/~saiarcot895/+archive/ubuntu/chromium-dev/+files/vdpau-va-driver_0.7.4-6ubuntu2~ppa1~18.04.1_amd64.deb" && apt-get install --no-install-recommends -y /tmp/vdpau-va-driver.deb && rm -rf /tmp/* && \
     rm -rf /var/lib/apt/lists/*
 
-# Wine, Winetricks, and PlayOnLinux, comment out the below lines to disable
-ARG WINE_BRANCH=devel
+# Wine, Winetricks, Lutris, and PlayOnLinux, this process must be consistent with https://wiki.winehq.org/Ubuntu
+ARG WINE_BRANCH=staging
 RUN if [ "${UBUNTU_RELEASE}" = "18.04" ]; then add-apt-repository ppa:cybermax-dexter/sdl2-backport; fi && \
     mkdir -pm755 /etc/apt/keyrings && curl -fsSL -o /etc/apt/keyrings/winehq-archive.key "https://dl.winehq.org/wine-builds/winehq.key" && \
     curl -fsSL -o "/etc/apt/sources.list.d/winehq-$(grep VERSION_CODENAME= /etc/os-release | cut -d= -f2).sources" "https://dl.winehq.org/wine-builds/ubuntu/dists/$(grep VERSION_CODENAME= /etc/os-release | cut -d= -f2)/winehq-$(grep VERSION_CODENAME= /etc/os-release | cut -d= -f2).sources" && \
@@ -209,14 +215,13 @@ RUN if [ "${UBUNTU_RELEASE}" = "18.04" ]; then add-apt-repository ppa:cybermax-d
     chmod 755 /usr/bin/winetricks && \
     curl -fsSL -o /usr/share/bash-completion/completions/winetricks "https://raw.githubusercontent.com/Winetricks/winetricks/master/src/winetricks.bash-completion"
 
-# Install latest selkies-gstreamer (https://github.com/selkies-project/selkies-gstreamer) build, Python application, and web application
+# Install latest selkies-gstreamer (https://github.com/selkies-project/selkies-gstreamer) build, Python application, and web application, should be consistent with selkies-gstreamer docs
 RUN apt-get update && apt-get install --no-install-recommends -y \
         build-essential \
         python3-pip \
         python3-dev \
         python3-gi \
         python3-setuptools \
-        python3-tk \
         python3-wheel \
         tzdata \
         sudo \
@@ -253,7 +258,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
     curl -fsSL "https://github.com/selkies-project/selkies-gstreamer/releases/download/v${SELKIES_VERSION}/selkies-gstreamer-web-v${SELKIES_VERSION}.tgz" | tar -zxf - && \
     cd /usr/local/cuda/lib64 && sudo find . -maxdepth 1 -type l -name "*libnvrtc.so.*" -exec sh -c 'ln -sf $(basename {}) libnvrtc.so' \;
 
-# Install latest noVNC web interface for fallback
+# Install latest noVNC web interface and x11vnc for fallback
 RUN apt-get update && apt-get install --no-install-recommends -y \
         autoconf \
         automake \
@@ -286,9 +291,9 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
     ln -s /opt/noVNC/vnc.html /opt/noVNC/index.html && \
     git clone https://github.com/novnc/websockify /opt/noVNC/utils/websockify
 
-# Add custom packages below this comment, or use FROM in a new container and replace entrypoint.sh or supervisord.conf
+# Add custom packages below this comment, or use FROM in a new container and replace entrypoint.sh or supervisord.conf, and set ENTRYPOINT to /usr/bin/supervisord
 
-# Create user with password ${PASSWD}
+# Create user with password ${PASSWD} and assign adequate groups
 RUN apt-get update && apt-get install --no-install-recommends -y \
         sudo && \
     rm -rf /var/lib/apt/lists/* && \
@@ -300,6 +305,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
     echo "user:${PASSWD}" | chpasswd && \
     ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
 
+# Copy scripts and configurations used to start the container
 COPY entrypoint.sh /etc/entrypoint.sh
 RUN chmod 755 /etc/entrypoint.sh
 COPY selkies-gstreamer-entrypoint.sh /etc/selkies-gstreamer-entrypoint.sh

+ 28 - 11
README.md

@@ -32,7 +32,7 @@ docker run --gpus 1 -it --tmpfs /dev/shm:rw -e TZ=UTC -e SIZEW=1920 -e SIZEH=108
 ```
 > NOTES: The container tags available are `latest` and `20.04` for Ubuntu 20.04 and `18.04` for Ubuntu 18.04. Replace all instances of `mypasswd` with your desired password. `BASIC_AUTH_PASSWORD` will default to `PASSWD` if unspecified. The container must not be run in privileged mode.
 
-Change `WEBRTC_ENCODER` to `x264enc`, `vp8enc`, or `vp9enc` when using the selkies-gstreamer interface if your GPU doesn't support `H.264 (AVCHD)` under the `NVENC - Encoding` section in NVIDIA's [Video Encode and Decode GPU Support Matrix](https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new).
+Change `WEBRTC_ENCODER` to `x264enc`, `vp8enc`, or `vp9enc` when using the selkies-gstreamer interface if your GPU does not support `H.264 (AVCHD)` under the `NVENC - Encoding` section in NVIDIA's [Video Encode and Decode GPU Support Matrix](https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new).
 
 2. Connect to the web server with a browser on port 8080. You may also separately configure a reverse proxy to this port for external connectivity.
 > NOTES: Additional configurations and environment variables for the selkies-gstreamer WebRTC HTML5 interface are listed in lines that start with `parser.add_argument` within the [selkies-gstreamer main script](https://github.com/selkies-project/selkies-gstreamer/blob/master/src/selkies_gstreamer/__main__.py).
@@ -55,7 +55,7 @@ kubectl create -f xgl.yml
 ```
 > NOTES: The container tags available are `latest` and `20.04` for Ubuntu 20.04 and `18.04` for Ubuntu 18.04. `BASIC_AUTH_PASSWORD` will default to `PASSWD` if unspecified.
 
-Change `WEBRTC_ENCODER` to `x264enc`, `vp8enc`, or `vp9enc` when using the selkies-gstreamer WebRTC interface if your GPU doesn't support `H.264 (AVCHD)` under the `NVENC - Encoding` section in NVIDIA's [Video Encode and Decode GPU Support Matrix](https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new).
+Change `WEBRTC_ENCODER` to `x264enc`, `vp8enc`, or `vp9enc` when using the selkies-gstreamer WebRTC interface if your GPU does not support `H.264 (AVCHD)` under the `NVENC - Encoding` section in NVIDIA's [Video Encode and Decode GPU Support Matrix](https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new).
 
 3. Connect to the web server spawned at port 8080. You may configure the ingress endpoint or reverse proxy that your Kubernetes cluster provides to this port for external connectivity.
 > NOTES: Additional configurations and environment variables for the selkies-gstreamer WebRTC HTML5 interface are listed in lines that start with `parser.add_argument` within the [selkies-gstreamer main script](https://github.com/selkies-project/selkies-gstreamer/blob/master/src/selkies_gstreamer/__main__.py).
@@ -168,17 +168,23 @@ kubectl create secret generic turn-password --from-literal=turn-password=MY_TURN
 
 ### Troubleshooting
 
-#### The container doesn't work.
+#### The container does not work.
 
-Check that the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) is properly configured in the host. After that, check the environment variable `NVIDIA_DRIVER_CAPABILITIES` after starting a shell interface inside the container. `NVIDIA_DRIVER_CAPABILITIES` should be set to `all`, or include a comma-separated list of `compute` (requirement for CUDA and OpenCL, or for the [selkies-gstreamer](https://github.com/selkies-project/selkies-gstreamer) WebRTC remote desktop interface), `utility` (requirement for `nvidia-smi` and NVML), `graphics` (requirement for OpenGL and part of the requirement for Vulkan), `video` (required for encoding or decoding videos using NVIDIA GPUs, or for the [selkies-gstreamer](https://github.com/selkies-project/selkies-gstreamer) WebRTC remote desktop interface), `display` (the other requirement for Vulkan), and optionally `compat32` if you use Wine or 32-bit graphics applications.
+Check that the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) is properly configured in the host. After that, check the environment variable `NVIDIA_DRIVER_CAPABILITIES` after starting a shell interface inside the container.
+
+`NVIDIA_DRIVER_CAPABILITIES` should be set to `all`, or include a comma-separated list of `compute` (requirement for CUDA and OpenCL, or for the [selkies-gstreamer](https://github.com/selkies-project/selkies-gstreamer) WebRTC remote desktop interface), `utility` (requirement for `nvidia-smi` and NVML), `graphics` (requirement for OpenGL and part of the requirement for Vulkan), `video` (required for encoding or decoding videos using NVIDIA GPUs, or for the [selkies-gstreamer](https://github.com/selkies-project/selkies-gstreamer) WebRTC remote desktop interface), `display` (the other requirement for Vulkan), and optionally `compat32` if you use Wine or 32-bit graphics applications.
 
 If you checked everything here, scroll down.
 
-#### The container doesn't work if an existing GUI or X server is running in the host outside the container. / I want to use `--privileged` mode or `--cap-add` to my containers.
+#### I want to share one GPU with multiple containers to run GUI workloads.
+
+Note that because of restrictions from Xorg, it is not possible to share one GPU to multiple Xorg servers running in different containers. Use [docker-nvidia-egl-desktop](https://github.com/ehfd/docker-nvidia-egl-desktop) if you intend to do this.
+
+#### The container does not work if an existing GUI, desktop environment, or X server is running in the host outside the container. / I want to use this container in `--privileged` mode or with `--cap-add` and do not want other containers to interfere.
 
-In order to use an X server on the host for your monitor with one GPU, and then provision other GPUs for the containers, you must change your `/etc/X11/xorg.conf` configurations.
+In order to use an X server on the host for your monitor with one GPU, and provision the other GPUs to the containers, you must change your `/etc/X11/xorg.conf` configuration of the host.
 
-First, use `sudo nvidia-xconfig --no-probe-all-gpus --busid=$BUS_ID --only-one-x-screen` to generate `/etc/X11/xorg.conf` where `BUS_ID` is generated with the below script. Set `GPU_SELECT` to the ID of the specific GPU you want to provision from `nvidia-smi`.
+First, use `sudo nvidia-xconfig --no-probe-all-gpus --busid=$BUS_ID --only-one-x-screen` to generate `/etc/X11/xorg.conf` where `BUS_ID` is generated with the below script. Set `GPU_SELECT` to the ID (from `nvidia-smi`) of the specific GPU you want to provision.
 
 ```
 HEX_ID=$(nvidia-smi --query-gpu=pci.bus_id --id="$GPU_SELECT" --format=csv | sed -n 2p)
@@ -187,7 +193,7 @@ unset IFS
 BUS_ID=PCI:$((16#${ARR_ID[1]})):$((16#${ARR_ID[2]})):$((16#${ARR_ID[3]}))
 ```
 
-Then, edit `/etc/X11/xorg.conf` and add the following to the end. If you want to use the containers in privileged mode, add this section to the `/etc/X11/xorg.conf` file of all containers as well.
+Then, edit the `/etc/X11/xorg.conf` file of your host outside the container and add the below snippet to the end of the file. If you want to use containers in `--privileged` mode or with `--cap-add`, add the snippet to the `/etc/X11/xorg.conf` files of all other containers running an Xorg server as well (has been already added for this container). The exact file location may vary if not using the NVIDIA graphics driver.
 
 ```
 Section "ServerFlags"
@@ -195,6 +201,12 @@ Section "ServerFlags"
 EndSection
 ```
 
+The below command adds the above snippet automatically. The exact file location may vary if not using the NVIDIA graphics driver.
+
+```bash
+echo -e "Section \"ServerFlags\"\n    Option \"AutoAddGPU\" \"false\"\nEndSection" | sudo tee -a /etc/X11/xorg.conf > /dev/null
+```
+
 [Reference](https://man.archlinux.org/man/extra/xorg-server/xorg.conf.d.5.en)
 
 If you restart your OS or the Xorg server, you will now be able to use one GPU for your host X server and your real monitor, and use the rest of the GPUs for the containers.
@@ -205,7 +217,7 @@ Then, you must avoid the GPU of which you are using for your host X server. Use
 
 Make sure that the `NVIDIA_DRIVER_CAPABILITIES` environment variable is set to `all`, or includes both `graphics` and `display`. The `display` capability is especially crucial to Vulkan, but the container does start without noticeable issues other than Vulkan without `display`, despite its name.
 
-#### The container doesn't work if I set the resolution above 1920 x 1200 or 2560 x 1600 in 60 hz.
+#### The container does not work if I set the resolution above 1920 x 1200 or 2560 x 1600 in 60 hz.
 
 ##### Short answer
 
@@ -213,8 +225,13 @@ If your GPU is a consumer or professional GPU, change the `VIDEO_PORT` environme
 
 ##### Long answer
 
-The container simulates the GPU being virtually plugged into a physical DVI-D/HDMI/DisplayPort digital video interface in consumer and professional GPUs with the `ConnectedMonitor` NVIDIA driver option. The container uses virtualized DVI-D ports for this purpose in Datacenter (Tesla) GPUs. The ports to be used should **only** be connected with an actual monitor when the user wants the remote desktop screen to be shown on that monitor. If you want to show the remote desktop screen spawned by the container in a physical monitor, connect the monitor and set `VIDEO_PORT` to the the video interface identifier that is connected to the monitor. Manually specify a video interface identifier that is not connected to a monitor in `VIDEO_PORT` if you have a physical monitor connected and want to show the screen to the monitor. `VIDEO_PORT` identifiers and their connection states can be obtained by typing `xrandr -q` when the `DISPLAY` environment variable is set to the number of the spawned X server display (for example `:0`). As an alternative, you may set `VIDEO_PORT` to `none` (which effectively sets `--use-display-device=None`), but you must use borderless window instead of fullscreen, and this may lead to quite a lot of applications not starting because the `RANDR` extension is not available in the X server.
-> NOTES: Do not start two or more X servers for a single GPU. Use a separate GPU (or use Xvfb/Xdummy/XVnc without hardware acceleration to use no GPUs) if you need a host X server unaffiliated with containers, and do not make the GPU available to the container runtime.
+The container simulates the GPU to become plugged into a physical DVI-D/HDMI/DisplayPort digital video interface in consumer and professional GPUs with the `ConnectedMonitor` NVIDIA driver option. The container uses virtualized DVI-D ports for this purpose in Datacenter (Tesla) GPUs.
+
+The ports to be used should **only** be connected with an actual monitor if the user wants the remote desktop screen to be shown on that monitor. If you want to show the remote desktop screen spawned by the container in a physical monitor, connect the monitor and set `VIDEO_PORT` to the the video interface identifier that is connected to the monitor. If not, avoid the video interface identifier that is connected to the monitor.
+
+`VIDEO_PORT` identifiers and their connection states can be obtained by typing `xrandr -q` when the `DISPLAY` environment variable is set to the number of the spawned X server display (for example `:0`). As an alternative, you may set `VIDEO_PORT` to `none` (which effectively sets `--use-display-device=None`), but you must use borderless window instead of fullscreen, and this may lead to quite a lot of applications not starting because the `RANDR` extension is not available in the X server.
+
+> NOTES: Do not start two or more X servers for a single GPU. Use a separate GPU (or use Xvfb/Xdummy/Xvnc without hardware acceleration to use no GPUs at all) if you need a host X server unaffiliated with containers, and do not make the GPU available to the container runtime.
 
 Since this container simulates the GPU being virtually plugged into a physical monitor while it actually does not, make sure the resolutions specified with the environment variables `SIZEW` and `SIZEH` are within the maximum size supported by the GPU. The environment variable `VIDEO_PORT` can override which video port is used (defaults to `DFP`, the first interface detected in the driver). Therefore, specifying `VIDEO_PORT` to an unplugged DisplayPort (for example numbered like `DP-0`, `DP-1`, and so on) is recommended for resolutions above 1920 x 1200 at 60 hz, because some driver restrictions are applied when the default is set to an unplugged physical DVI-D or HDMI port. The maximum size that should work in all cases is 1920 x 1200 at 60 hz, mainly for when the default `VIDEO_PORT` identifier `DFP` is not set to DisplayPort. The screen sizes over 1920 x 1200 at 60 hz but under the maximum supported display size specified for each port (supported by GPU specifications) will be possible if the port is set to DisplayPort (both physically connected or disconnected), or when a physical monitor or dummy plug to any other type of display ports (including DVI-D and HDMI) has been physically connected. If all GPUs in the cluster have at least one DisplayPort and they are not physically connected to any monitors, simply setting `VIDEO_PORT` to `DP-0` is recommended (but this is not set as default because of legacy GPU compatibility reasons).
 

+ 40 - 4
entrypoint.sh

@@ -2,25 +2,38 @@
 
 trap "echo TRAPed signal" HUP INT QUIT KILL TERM
 
+# Make user directory owned by the user in case it is not
 sudo chown user:user /home/user
+# Change operating system password to environment variable
 echo "user:$PASSWD" | sudo chpasswd
-sudo rm -rf /tmp/.X*
+# Remove directories to make sure the desktop environment starts
+sudo rm -rf /tmp/.X* ~/.cache ~/.config/xfce4
+# Change time zone from environment variable
 sudo ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" | sudo tee /etc/timezone > /dev/null
+# Add LibreOffice to library path
 export LD_LIBRARY_PATH="/usr/lib/libreoffice/program:${LD_LIBRARY_PATH}"
 
+# This symbolic link enables running Xorg inside a container with `-sharevts`
 sudo ln -snf /dev/ptmx /dev/tty7
+# Start DBus without systemd
 sudo /etc/init.d/dbus start
+# Configure environment for selkies-gstreamer utilities
 source /opt/gstreamer/gst-env
 
-# Install NVIDIA drivers including X graphic drivers
+# Install NVIDIA userspace driver components including X graphic libraries
 if ! command -v nvidia-xconfig &> /dev/null; then
+  # Driver version is provided by the kernel through the container toolkit
   export DRIVER_VERSION=$(head -n1 </proc/driver/nvidia/version | awk '{print $8}')
   cd /tmp
+  # If version is different, new installer will overwrite the existing components
   if [ ! -f "/tmp/NVIDIA-Linux-x86_64-$DRIVER_VERSION.run" ]; then
+    # Check multiple sources in order to probe both consumer and datacenter driver versions
     curl -fsL -O "https://us.download.nvidia.com/XFree86/Linux-x86_64/$DRIVER_VERSION/NVIDIA-Linux-x86_64-$DRIVER_VERSION.run" || curl -fsL -O "https://us.download.nvidia.com/tesla/$DRIVER_VERSION/NVIDIA-Linux-x86_64-$DRIVER_VERSION.run" || { echo "Failed NVIDIA GPU driver download. Exiting."; exit 1; }
   fi
+  # Extract installer before installing
   sudo sh "NVIDIA-Linux-x86_64-$DRIVER_VERSION.run" -x
   cd "NVIDIA-Linux-x86_64-$DRIVER_VERSION"
+  # Run installation without the kernel modules and host components
   sudo ./nvidia-installer --silent \
                     --no-kernel-module \
                     --install-compat32-libs \
@@ -32,18 +45,25 @@ if ! command -v nvidia-xconfig &> /dev/null; then
   sudo rm -rf /tmp/NVIDIA* && cd ~
 fi
 
+# Allow starting Xorg from a pseudoterminal instead of strictly on a tty console
+if [ ! -f /etc/X11/Xwrapper.config ]; then
+    echo -e "allowed_users=anybody\nneeds_root_rights=yes" | sudo tee /etc/X11/Xwrapper.config > /dev/null
+fi
 if grep -Fxq "allowed_users=console" /etc/X11/Xwrapper.config; then
   sudo sed -i "s/allowed_users=console/allowed_users=anybody/;$ a needs_root_rights=yes" /etc/X11/Xwrapper.config
 fi
 
+# Remove existing Xorg configuration
 if [ -f "/etc/X11/xorg.conf" ]; then
   sudo rm -f "/etc/X11/xorg.conf"
 fi
 
+# Get first GPU device if all devices are available or `NVIDIA_VISIBLE_DEVICES` is not set
 if [ "$NVIDIA_VISIBLE_DEVICES" == "all" ]; then
   export GPU_SELECT=$(sudo nvidia-smi --query-gpu=uuid --format=csv | sed -n 2p)
 elif [ -z "$NVIDIA_VISIBLE_DEVICES" ]; then
   export GPU_SELECT=$(sudo nvidia-smi --query-gpu=uuid --format=csv | sed -n 2p)
+# Get first GPU device out of the visible devices in other situations
 else
   export GPU_SELECT=$(sudo nvidia-smi --id=$(echo "$NVIDIA_VISIBLE_DEVICES" | cut -d ',' -f1) --query-gpu=uuid --format=csv | sed -n 2p)
   if [ -z "$GPU_SELECT" ]; then
@@ -56,24 +76,37 @@ if [ -z "$GPU_SELECT" ]; then
   exit 1
 fi
 
+# Setting `VIDEO_PORT` to none disables RANDR/XRANDR, do not set this if using datacenter GPUs
 if [ "${VIDEO_PORT,,}" = "none" ]; then
   export CONNECTED_MONITOR="--use-display-device=None"
+# The X server is otherwise deliberately set to a specific video port despite not being plugged to enable RANDR/XRANDR, monitor will display the screen if plugged to the specific port
 else
   export CONNECTED_MONITOR="--connected-monitor=${VIDEO_PORT}"
 fi
 
+# Bus ID from nvidia-smi is in hexadecimal format, should be converted to decimal format which Xorg understands, required because nvidia-xconfig doesn't work as intended in a container
 HEX_ID=$(sudo nvidia-smi --query-gpu=pci.bus_id --id="$GPU_SELECT" --format=csv | sed -n 2p)
 IFS=":." ARR_ID=($HEX_ID)
 unset IFS
 BUS_ID=PCI:$((16#${ARR_ID[1]})):$((16#${ARR_ID[2]})):$((16#${ARR_ID[3]}))
+# A custom modeline should be generated because there is no monitor to fetch this information normally
 export MODELINE=$(cvt -r "${SIZEW}" "${SIZEH}" "${REFRESH}" | sed -n 2p)
+# Generate /etc/X11/xorg.conf with nvidia-xconfig
 sudo nvidia-xconfig --virtual="${SIZEW}x${SIZEH}" --depth="$CDEPTH" --mode=$(echo "$MODELINE" | awk '{print $2}' | tr -d '"') --allow-empty-initial-configuration --no-probe-all-gpus --busid="$BUS_ID" --no-multigpu --no-sli --no-base-mosaic --only-one-x-screen ${CONNECTED_MONITOR}
+# Guarantee that the X server starts without a monitor by adding more options to the configuration
 sudo sed -i '/Driver\s\+"nvidia"/a\    Option         "ModeValidation" "NoMaxPClkCheck, NoEdidMaxPClkCheck, NoMaxSizeCheck, NoHorizSyncCheck, NoVertRefreshCheck, NoVirtualSizeCheck, NoExtendedGpuCapabilitiesCheck, NoTotalSizeCheck, NoDualLinkDVICheck, NoDisplayPortBandwidthCheck, AllowNon3DVisionModes, AllowNonHDMI3DModes, AllowNonEdidModes, NoEdidHDMI2Check, AllowDpInterlaced"\n    Option         "HardDPMS" "False"' /etc/X11/xorg.conf
+# Add custom generated modeline to the configuration
 sudo sed -i '/Section\s\+"Monitor"/a\    '"$MODELINE" /etc/X11/xorg.conf
+# Prevent interference between GPUs, add this to the host or other containers running Xorg as well
+echo -e "Section \"ServerFlags\"\n    Option \"AutoAddGPU\" \"false\"\nEndSection" | sudo tee -a /etc/X11/xorg.conf > /dev/null
 
+# Default display is :0 across the container
 export DISPLAY=":0"
+# Disable VSYNC in OpenGL by default, change this to "1" to enable
 export __GL_SYNC_TO_VBLANK="0"
+# Prioritize NVIDIA Vulkan driver if multiple GPU vendors exist
 export __NV_PRIME_RENDER_OFFLOAD="1"
+# Run Xorg server with required extensions
 Xorg vt7 -noreset -novtswitch -sharevts -dpi "${DPI}" +extension "GLX" +extension "RANDR" +extension "RENDER" +extension "MIT-SHM" "${DISPLAY}" &
 
 # Wait for X11 to start
@@ -81,16 +114,19 @@ echo "Waiting for X socket"
 until [ -S "/tmp/.X11-unix/X${DISPLAY/:/}" ]; do sleep 1; done
 echo "X socket is ready"
 
+# Run the x11vnc + noVNC fallback web interface if enabled
 if [ "${NOVNC_ENABLE,,}" = "true" ]; then
   if [ -n "$NOVNC_VIEWPASS" ]; then export NOVNC_VIEWONLY="-viewpasswd ${NOVNC_VIEWPASS}"; else unset NOVNC_VIEWONLY; fi
   x11vnc -display "${DISPLAY}" -passwd "${BASIC_AUTH_PASSWORD:-$PASSWD}" -shared -forever -repeat -xkb -snapfb -threads -xrandr "resize" -rfbport 5900 ${NOVNC_VIEWONLY} &
   /opt/noVNC/utils/novnc_proxy --vnc localhost:5900 --listen 8080 --heartbeat 10 &
 fi
 
-# Add custom processes below this section, or within `supervisord.conf` to perform service management like systemd
+# Start Xfce4 desktop environment
 xfce4-session &
 
-# Fix selkies-gstreamer keyboard mapping
+# Add custom processes here, or within `supervisord.conf` to perform service management similar to systemd
+
+# Fix selkies-gstreamer keyboard mapping, remove if selkies-gstreamer issue #6 is fixed and in release
 if [ "${NOVNC_ENABLE,,}" != "true" ]; then
   sudo xmodmap -e "keycode 94 shift = less less"
 fi

+ 10 - 5
selkies-gstreamer-entrypoint.sh

@@ -1,10 +1,15 @@
 #!/bin/bash -e
 
-# Update env for gstreamer
+# Source environment for GStreamer
 source /opt/gstreamer/gst-env
+# Add CUDA library path
 export LD_LIBRARY_PATH="/usr/local/cuda/lib64:${LD_LIBRARY_PATH}"
+
+# Default display is :0 across this setup
 export DISPLAY=":0"
+# Show debug logs for GStreamer
 export GST_DEBUG="${GST_DEBUG:-*:2}"
+# Set password for basic authentication
 if [ "${ENABLE_BASIC_AUTH,,}" = "true" ] && [ -z "$BASIC_AUTH_PASSWORD" ]; then export BASIC_AUTH_PASSWORD="$PASSWD"; fi
 
 # Wait for X11 to start
@@ -12,7 +17,7 @@ echo "Waiting for X socket"
 until [ -S "/tmp/.X11-unix/X${DISPLAY/:/}" ]; do sleep 1; done
 echo "X socket is ready"
 
-# Write Progressive Web App (PWA) config.
+# Write Progressive Web App (PWA) configuration
 export PWA_APP_NAME="Selkies WebRTC"
 export PWA_APP_SHORT_NAME="selkies"
 export PWA_START_URL="/index.html"
@@ -25,7 +30,7 @@ sed -i \
     -e "s|PWA_CACHE|${PWA_APP_SHORT_NAME}-webrtc-pwa|g" \
 /opt/gst-web/sw.js
 
-# Write default user config.
+# Write default user configuration
 export SELKIES_USER_CONFIG_FILE="${HOME}/.config/selkies/selkies-gstreamer-conf.json"
 mkdir -p $(dirname "$SELKIES_USER_CONFIG_FILE")
 if [ ! -f "${SELKIES_USER_CONFIG_FILE}" ]; then
@@ -41,10 +46,10 @@ if [ ! -f "${SELKIES_USER_CONFIG_FILE}" ]; then
 EOF
 fi
 
-# Clear the cache registry to force the cuda elements to refresh
+# Clear the cache registry to force the CUDA elements to refresh
 rm -f "${HOME}/.cache/gstreamer-1.0/registry.x86_64.bin"
 
-# Start the selkies webrtc gstreamer app
+# Start the selkies-gstreamer WebRTC HTML5 remote desktop application
 selkies-gstreamer \
     --json_config="${SELKIES_USER_CONFIG_FILE}" \
     --addr="0.0.0.0" \

+ 1 - 0
xgl.yml

@@ -31,6 +31,7 @@ spec:
           value: "96"
         - name: CDEPTH
           value: "24"
+        # Change to DP-0 or any other DP-* port for larger resolution support if you are NOT using datacenter GPUs
         - name: VIDEO_PORT
           value: "DFP"
         # Choose either `value:` or `secretKeyRef:` but not both at the same time