https://forum.proxmox.com/threads/tutorial-mounting-nfs-share-to-an-unprivileged-lxc.138506/
##
##host
sudo nano /etc/fstab
192.212.40.111:/volume1/10-System /mnt/NAS_10-System nfs defaults 0 0
192.212.40.111:/volume2/30-Mail /mnt/NAS_30-Mail nfs defaults 0 0
192.212.40.111:/volume1/70-Photocopie /mnt/NAS_70-Photocopie nfs defaults 0 0
192.212.40.111:/volume1/80-Photo /mnt/NAS_80-Photo nfs defaults 0 0
192.212.40.111:/volume3/90-VideoClub /mnt/NAS_90-VideoClub nfs defaults 0 0
192.212.40.111:/volume3/99-Ftp /mnt/NAS_99-Ftp nfs defaults 0 0
sudo systemctl daemon-reload
sudo mount -a
#host for LXC200
nano /etc/pve/lxc/200.conf
mp0: /mnt/NAS_10-System/docker/VM200,mp=/System
mp1: /mnt/NAS_80-Photo,mp=/Photo
#host for LXC125
nano /etc/pve/lxc/125.conf
mp0: /mnt/NAS_10-System/docker/VM125,mp=/System
mp1: /mnt/NAS_90-VideoClub,mp=/VideoClub
mp2: /mnt/NAS_30-Mail,mp=/Mail
#host for LXC103
nano /etc/pve/lxc/103.conf
mp0: /mnt/NAS_10-System/docker/RASP103,mp=/System
#LXC***
sudo apt update
sudo apt install cifs-utils smbclient nfs-common passwd -y
sudo groupadd -g 10000 lxc_shares
sudo usermod -aG lxc_shares root
sudo usermod -aG lxc_shares david
sudo reboot
mount smb nas into proxmox
mkdir /mnt/NAS_10-System
mkdir /mnt/NAS_30-Mail
mkdir /mnt/NAS_70-Photocopie
mkdir /mnt/NAS_80-Photo
mkdir /mnt/NAS_90-VideoClub
mkdir /mnt/NAS_99-Ftp
nano /etc/fstab
//192.212.40.111/10-System /mnt/NAS_10-System cifs rw,credentials=/root/.sharelogin,nobrl,uid=101000,gid=101000 0 0
//192.212.40.111/30-Mail /mnt/NAS_30-Mail cifs rw,credentials=/root/.sharelogin,nobrl,uid=101000,gid=101000 0 0
//192.212.40.111/70-Photocopie /mnt/NAS_70-Photocopie cifs rw,credentials=/root/.sharelogin,nobrl,uid=101000,gid=101000 0 0
//192.212.40.111/80-Photo /mnt/NAS_80-Photo cifs rw,credentials=/root/.sharelogin,nobrl,uid=101000,gid=101000 0 0
//192.212.40.111/90-VideoClub /mnt/NAS_90-VideoClub cifs rw,credentials=/root/.sharelogin,nobrl,uid=101000,gid=101000 0 0
//192.212.40.111/99-Ftp /mnt/NAS_99-Ftp cifs rw,credentials=/root/.sharelogin,nobrl,uid=101000,gid=101000 0 0
systemctl daemon-reload
mount -a
Switching GPU Binding (Live) Toggle GPU Driver Script (vfio-pci ↔ nvidia)
A single NVIDIA GPU cannot:
- Be passed through to a VM (via
vfio-pci
) - And be used on the host or in LXC at the same time
Why?
Because when you bind the GPU to vfio-pci
on boot, it’s invisible to the host and cannot be used by NVIDIA’s kernel driver (nvidia.ko
).
Switch Between VM and LXC Use (Rebind on demand)
If you don’t need both at the same time, you can manually switch the GPU between:
- Passthrough to VM (bind to
vfio-pci
) - Use on host / LXC (bind to
nvidia
)
This lets you:
Then later give it back to the VM
Use the GPU for nvidia-smi
or CUDA in an LXC container
here’s a single script that checks which driver is currently bound to your GPU, and automatically toggles between:
vfio-pci
(for passthrough to VM)nvidia
(for use on host or LXC)
#!/bin/bash
# === CONFIGURATION ===
GPU="0000:0a:00.0"
AUDIO="0000:0a:00.1"
VMID=131 # Your Windows VM ID
LXCID=115 # Your LXC container ID using the GPU
# === FUNCTIONS ===
get_driver() {
basename "$(readlink /sys/bus/pci/devices/$1/driver 2>/dev/null)"
}
unbind_driver() {
echo "$1" > "/sys/bus/pci/devices/$1/driver/unbind"
}
bind_driver() {
echo "$1" > "/sys/bus/pci/drivers/$2/bind"
}
switch_to_nvidia() {
echo "→ Switching to NVIDIA driver (LXC use)..."
echo "Stopping VM $VMID..."
qm stop $VMID
sleep 3
echo "Unbinding GPU from current driver..."
unbind_driver "$GPU"
unbind_driver "$AUDIO"
echo "Loading NVIDIA modules..."
modprobe nvidia nvidia_uvm nvidia_drm nvidia_modeset
echo "Binding GPU to nvidia..."
bind_driver "$GPU" nvidia
bind_driver "$AUDIO" snd_hda_intel
echo "Starting LXC container $LXCID..."
pct start $LXCID
echo "✔ Switched to NVIDIA mode."
}
switch_to_vfio() {
echo "→ Switching to VFIO (VM passthrough)..."
echo "Stopping LXC container $LXCID..."
pct stop $LXCID
sleep 3
echo "Unbinding GPU from current driver..."
unbind_driver "$GPU"
unbind_driver "$AUDIO"
echo "Loading VFIO modules..."
modprobe vfio-pci
echo "Binding GPU to vfio-pci..."
bind_driver "$GPU" vfio-pci
bind_driver "$AUDIO" vfio-pci
echo "Starting VM $VMID..."
qm start $VMID
echo "✔ Switched to VFIO mode."
}
# === MAIN ===
MODE="$1"
CURRENT_DRIVER=$(get_driver "$GPU")
echo "Detected GPU driver: ${CURRENT_DRIVER:-none}"
case "$MODE" in
--to-nvidia)
switch_to_nvidia
;;
--to-vfio)
switch_to_vfio
;;
"")
if [ "$CURRENT_DRIVER" == "vfio-pci" ]; then
switch_to_nvidia
elif [ "$CURRENT_DRIVER" == "nvidia" ]; then
switch_to_vfio
elif [ -z "$CURRENT_DRIVER" ]; then
echo "⚠️ No driver bound. Defaulting to NVIDIA..."
switch_to_nvidia
else
echo "❌ Unknown driver bound: $CURRENT_DRIVER"
exit 1
fi
;;
*)
echo "Usage: $0 [--to-nvidia | --to-vfio]"
exit 1
;;
esac
# === FINAL STATUS DISPLAY ===
echo
echo "🔍 Final GPU driver status:"
SHORT_GPU=$(echo "$GPU" | cut -d':' -f2-)
lspci -k | grep "$SHORT_GPU" -A 3
Auto-toggle based on current driver
./toggle-gpu.sh
Force switch to NVIDIA for LXC
./toggle-gpu.sh --to-nvidia
Force switch to VFIO for VM passthrough
./toggle-gpu.sh --to-vfio
passing NVIDIA GPUs to Windows VMs
passing NVIDIA GPUs to Windows VMs because NVIDIA detects you’re running in a virtualized environment and blocks the driver
The /etc/pve/qemu-server/131.conf
args: -cpu host,hv_vapic,hv_stimer,hv_time,hv_synic,hv_vpindex,+invtsc,-hypervisor
bios: ovmf
boot:
cores: 16
cpu: host
efidisk0: local-lvm:vm-131-disk-0,efitype=4m,pre-enrolled-keys=1,size=4M
hostpci0: 0000:0a:00,device-id=0x2882,pcie=1,vendor-id=0x10de,x-vga=1
ide0: PveSsd900:131/vm-131-disk-0.qcow2,size=180G
kvm: 1
machine: pc-q35-9.0
memory: 16384
meta: creation-qemu=9.2.0,ctime=1747334710
name: win10Gaming
net0: virtio=BC:24:11:77:A3:BC,bridge=vmbr2,firewall=1
numa: 0
onboot: 1
ostype: win10
scsihw: virtio-scsi-single
smbios1: uuid=45849243-d81c-4be4-9528-4620ee509da8,manufacturer=QkVTU1RBUiBURUNIIExJTUlURUQ=,product=SE04MA==,version=NS4xNg==,serial=RGVmYXVsdCBzdHJpbmc=,sku=RGVmYXVsdCBzdHJpbmc=,family=RGVmYXVsdCBzdHJpbmc=,base64=1
sockets: 1
tags: 5;sharegpu;windows
usb0: host=1532:0083
usb1: host=145f:0316
usb2: host=0a12:0001
vga: none
vmgenid: 87821f0a-458f-45da-8691-62fcd515c190
What’s the best way to run Docker in Proxmox?
add guest agent
update all docker images
cd /SystemSvg/docker/
for D in *; do [ -d "${D}" ] && cd ${D};docker compose pull;docker compose up -d --force-recreate;cd ..; done
docker image prune
exposer Docker API
I had to edit /lib/systemd/system/docker.service
on my Ubuntu 16.04.2 LTS system to modify the line
ExecStart=/usr/bin/docker daemon -H fd:// -H tcp://0.0.0.0:2375
then
sudo systemctl daemon-reload
sudo systemctl restart docker.service
and everything worked :-). The next step is to figure out how to protect the docker daemon form being hijacked.
traefik
nfs no_root_squash
Conclusion
When you are using NFS mount points with root account on client-side then export them with no_root_squash
option. This will ensure you don’t face access related issues on NFS mount points