CUDA: Difference between revisions
Document selection of nvidia driver |
|||
| Line 17: | Line 17: | ||
* By making a FHS user env | * By making a FHS user env | ||
{{file| | {{file|||3=# Run with `nix-shell cuda-fhs.nix` | ||
# Run with `nix-shell cuda-fhs.nix` | |||
{ pkgs ? import </nowiki><nixpkgs><nowiki> {} }: | { pkgs ? import </nowiki><nixpkgs><nowiki> {} }: | ||
let | |||
# Change according to the driver used: stable, beta | |||
nvidiaPackage = pkgs.linuxPackages.nvidiaPackages.stable; | |||
in | |||
(pkgs.buildFHSEnv { | (pkgs.buildFHSEnv { | ||
name = "cuda-env"; | name = "cuda-env"; | ||
| Line 35: | Line 38: | ||
unzip | unzip | ||
cudatoolkit | cudatoolkit | ||
nvidiaPackage | |||
libGLU libGL | libGLU libGL | ||
xorg.libXi xorg.libXmu freeglut | xorg.libXi xorg.libXmu freeglut | ||
| Line 47: | Line 50: | ||
profile = '' | profile = '' | ||
export CUDA_PATH=${pkgs.cudatoolkit} | export CUDA_PATH=${pkgs.cudatoolkit} | ||
# export LD_LIBRARY_PATH=${ | # export LD_LIBRARY_PATH=${nvidiaPackage}/lib | ||
export EXTRA_LDFLAGS="-L/lib -L${ | export EXTRA_LDFLAGS="-L/lib -L${nvidiaPackage}/lib" | ||
export EXTRA_CCFLAGS="-I/usr/include" | export EXTRA_CCFLAGS="-I/usr/include" | ||
''; | ''; | ||
}).env | }).env|name=cuda-fhs.nix|lang=nix}} | ||
* By making a nix-shell | * By making a nix-shell | ||
{{file| | {{file|||3=# Run with `nix-shell cuda-shell.nix` | ||
# Run with `nix-shell cuda-shell.nix` | |||
{ pkgs ? import </nowiki><nixpkgs><nowiki> {} }: | { pkgs ? import </nowiki><nixpkgs><nowiki> {} }: | ||
let | |||
nvidiaPackage = pkgs.linuxPackages.nvidiaPackages.stable; | |||
in | |||
pkgs.mkShell { | pkgs.mkShell { | ||
name = "cuda-env-shell"; | name = "cuda-env-shell"; | ||
| Line 64: | Line 68: | ||
git gitRepo gnupg autoconf curl | git gitRepo gnupg autoconf curl | ||
procps gnumake util-linux m4 gperf unzip | procps gnumake util-linux m4 gperf unzip | ||
cudatoolkit | cudatoolkit nvidiaPackage | ||
libGLU libGL | libGLU libGL | ||
xorg.libXi xorg.libXmu freeglut | xorg.libXi xorg.libXmu freeglut | ||
| Line 72: | Line 76: | ||
shellHook = '' | shellHook = '' | ||
export CUDA_PATH=${pkgs.cudatoolkit} | export CUDA_PATH=${pkgs.cudatoolkit} | ||
# export LD_LIBRARY_PATH=${ | # export LD_LIBRARY_PATH=${nvidiaPackage}/lib:${pkgs.ncurses}/lib | ||
export EXTRA_LDFLAGS="-L/lib -L${ | export EXTRA_LDFLAGS="-L/lib -L${nvidiaPackage}/lib" | ||
export EXTRA_CCFLAGS="-I/usr/include" | export EXTRA_CCFLAGS="-I/usr/include" | ||
''; | ''; | ||
} | }|name=cuda-shell.nix|lang=nix}} | ||
* By making a flake.nix<syntaxhighlight lang="nix" line="1" start="1"> | * By making a flake.nix<syntaxhighlight lang="nix" line="1" start="1"># flake.nix, run with `nix develop` | ||
# flake.nix, run with `nix develop` | |||
{ | { | ||
description = "CUDA development environment"; | description = "CUDA development environment"; | ||
| Line 94: | Line 96: | ||
config.cudaVersion = "12"; | config.cudaVersion = "12"; | ||
}; | }; | ||
# Change according to the driver used: stable, beta | |||
nvidiaPackage = pkgs.linuxPackages.nvidiaPackages.stable; | |||
in { | in { | ||
# alejandra is a nix formatter with a beautiful output | # alejandra is a nix formatter with a beautiful output | ||
| Line 103: | Line 107: | ||
cudaPackages.cuda_cudart | cudaPackages.cuda_cudart | ||
cudatoolkit | cudatoolkit | ||
nvidiaPackage | |||
cudaPackages.cudnn | cudaPackages.cudnn | ||
libGLU | libGLU | ||
| Line 115: | Line 119: | ||
xorg.libXrandr | xorg.libXrandr | ||
zlib | zlib | ||
ncurses | |||
stdenv.cc | stdenv.cc | ||
binutils | binutils | ||
| Line 122: | Line 126: | ||
shellHook = '' | shellHook = '' | ||
export LD_LIBRARY_PATH="${ | export LD_LIBRARY_PATH="${nvidiaPackage}/lib:$LD_LIBRARY_PATH" | ||
export CUDA_PATH=${pkgs.cudatoolkit} | export CUDA_PATH=${pkgs.cudatoolkit} | ||
export EXTRA_LDFLAGS="-L/lib -L${ | export EXTRA_LDFLAGS="-L/lib -L${nvidiaPackage}/lib" | ||
export EXTRA_CCFLAGS="-I/usr/include" | export EXTRA_CCFLAGS="-I/usr/include" | ||
export CMAKE_PREFIX_PATH="${pkgs.fmt.dev}:$CMAKE_PREFIX_PATH" | export CMAKE_PREFIX_PATH="${pkgs.fmt.dev}:$CMAKE_PREFIX_PATH" | ||
| Line 131: | Line 135: | ||
}; | }; | ||
}; | }; | ||
} | }</syntaxhighlight> | ||
</syntaxhighlight> | |||
== Setting up CUDA Binary Cache == | == Setting up CUDA Binary Cache == | ||
Revision as of 21:02, 24 June 2025
NixOS supports using NVIDIA GPUs for pure computing purposes, not just for graphics. For example, many users rely on NixOS for machine learning both locally and on cloud instances. These use cases are supported by the @NixOS/cuda-maintainers team on GitHub (project board). If you have an issue using your NVIDIA GPU for computing purposes open an issue on GitHub and tag @NixOS/cuda-maintainers.
cachix use nix-community. Click here for more details.
cudatoolkit, cudnn, and related packages
The CUDA toolkit is available in a number of different versions. Please use the latest major version. You can see where they're defined in nixpkgs here.
Several "CUDA-X" libraries are packages as well. In particular,
There are some possible ways to setup a development environment using CUDA on NixOS. This can be accomplished in the following ways:
- By making a FHS user env
# Run with `nix-shell cuda-fhs.nix`
{ pkgs ? import </nowiki><nixpkgs> {} }:
let
# Change according to the driver used: stable, beta
nvidiaPackage = pkgs.linuxPackages.nvidiaPackages.stable;
in
(pkgs.buildFHSEnv {
name = "cuda-env";
targetPkgs = pkgs: with pkgs; [
git
gitRepo
gnupg
autoconf
curl
procps
gnumake
util-linux
m4
gperf
unzip
cudatoolkit
nvidiaPackage
libGLU libGL
xorg.libXi xorg.libXmu freeglut
xorg.libXext xorg.libX11 xorg.libXv xorg.libXrandr zlib
ncurses5
stdenv.cc
binutils
];
multiPkgs = pkgs: with pkgs; [ zlib ];
runScript = "bash";
profile = ''
export CUDA_PATH=${pkgs.cudatoolkit}
# export LD_LIBRARY_PATH=${nvidiaPackage}/lib
export EXTRA_LDFLAGS="-L/lib -L${nvidiaPackage}/lib"
export EXTRA_CCFLAGS="-I/usr/include"
'';
}).env|name=cuda-fhs.nix|lang=nix}}
* By making a nix-shell
{{file|||3=# Run with `nix-shell cuda-shell.nix`
{ pkgs ? import <nixpkgs> {} }:
let
nvidiaPackage = pkgs.linuxPackages.nvidiaPackages.stable;
in
pkgs.mkShell {
name = "cuda-env-shell";
buildInputs = with pkgs; [
git gitRepo gnupg autoconf curl
procps gnumake util-linux m4 gperf unzip
cudatoolkit nvidiaPackage
libGLU libGL
xorg.libXi xorg.libXmu freeglut
xorg.libXext xorg.libX11 xorg.libXv xorg.libXrandr zlib
ncurses5 stdenv.cc binutils
];
shellHook = ''
export CUDA_PATH=${pkgs.cudatoolkit}
# export LD_LIBRARY_PATH=${nvidiaPackage}/lib:${pkgs.ncurses}/lib
export EXTRA_LDFLAGS="-L/lib -L${nvidiaPackage}/lib"
export EXTRA_CCFLAGS="-I/usr/include"
'';
}|name=cuda-shell.nix|lang=nix}}
* By making a flake.nix<syntaxhighlight lang="nix" line="1" start="1"># flake.nix, run with `nix develop`
{
description = "CUDA development environment";
outputs = {
self,
nixpkgs,
}: let
system = "x86_64-linux";
pkgs = import nixpkgs {
inherit system;
config.allowUnfree = true;
config.cudaSupport = true;
config.cudaVersion = "12";
};
# Change according to the driver used: stable, beta
nvidiaPackage = pkgs.linuxPackages.nvidiaPackages.stable;
in {
# alejandra is a nix formatter with a beautiful output
formatter."${system}" = nixpkgs.legacyPackages.${system}.alejandra;
devShells.${system}.default = pkgs.mkShell {
buildInputs = with pkgs; [
ffmpeg
fmt.dev
cudaPackages.cuda_cudart
cudatoolkit
nvidiaPackage
cudaPackages.cudnn
libGLU
libGL
xorg.libXi
xorg.libXmu
freeglut
xorg.libXext
xorg.libX11
xorg.libXv
xorg.libXrandr
zlib
ncurses
stdenv.cc
binutils
uv
];
shellHook = ''
export LD_LIBRARY_PATH="${nvidiaPackage}/lib:$LD_LIBRARY_PATH"
export CUDA_PATH=${pkgs.cudatoolkit}
export EXTRA_LDFLAGS="-L/lib -L${nvidiaPackage}/lib"
export EXTRA_CCFLAGS="-I/usr/include"
export CMAKE_PREFIX_PATH="${pkgs.fmt.dev}:$CMAKE_PREFIX_PATH"
export PKG_CONFIG_PATH="${pkgs.fmt.dev}/lib/pkgconfig:$PKG_CONFIG_PATH"
'';
};
};
}</syntaxhighlight>
== Setting up CUDA Binary Cache ==
The [https://nix-community.org/cache/ Nix-community cache] contains pre-built CUDA packages. By adding it to your system, Nix will fetch these packages instead of building them, saving valuable time and processing power.
For more information, refer to the [[Binary Cache#Using a binary cache Using a binary cache|Using a binary cache]] page.
{{warning|1=You need to rebuild your system at least once after adding the cache, before it can be used.}}
=== NixOS ===
Add the cache to <code>substituters</code> and <code>trusted-public-keys</code> inside your system configuration:
{{file|/etc/nixos/configuration.nix|nix|<nowiki>
nix.settings = {
substituters = [
"https://nix-community.cachix.org"
];
trusted-public-keys = [
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
];
};Non-NixOS
If you have cachix installed and set up, all you need to do is run:
$ cachix use nix-community
Else, you have to add substituters and trusted-public-keys to /etc/nix/nix.conf:
trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=
trusted-substituters = https://nix-community.cachix.org
trusted-users = root @wheel
If your user is in trusted-users, you can also add the cache in your home directory:
substituters = https://nix-community.cachix.org
Some things to keep in mind when setting up CUDA in NixOS
- Some GPUs, like Tesla K80, don't work with the latest drivers, so you must specify them in the option
hardware.nvidia.packagegetting the value from your selected kernel, for example,config.boot.kernelPackages.nvidia_x11_legacy470. You can check which driver version your GPU supports by visiting the nvidia site and checking the driver version. - Even with the drivers correctly installed, some software, like Blender, may not see the CUDA GPU. Make sure your system configuration has the option
hardware.opengl.enableenabled. - By default, software packaged in source code form has CUDA support disabled, because of the unfree license. To solve this, you can enable builds with CUDA support with a nixpkgs wide configuration, or use binary packaged CUDA compatible software such as blender-bin.
CUDA under WSL
This (surprisingly) works just fine using nixpkgs 23.05 provided that you prefix the LD_LIBRARY_PATH in your interactive environment with the WSL library directory. For nix shell this looks like:
shellHook = ''
export CUDA_PATH=${pkgs.cudatoolkit}
export LD_LIBRARY_PATH=/usr/lib/wsl/lib:${pkgs.linuxPackages.nvidia_x11}/lib:${pkgs.ncurses5}/lib
export EXTRA_LDFLAGS="-L/lib -L${pkgs.linuxPackages.nvidia_x11}/lib"
export EXTRA_CCFLAGS="-I/usr/include"
'';