Zack Saadioui
8/27/2024
1
2
bash
curl -fsSL https://ollama.ai/install.sh | sh
1
2
bash
ollama --version
1
2
bash
export OLLAMA_NUM_PARALLEL=4 # Sets the parallel request limit
1
htop
1
nvidia-smi
1
2
3
bash
docker run -d --gpus=all --name ollama-instance-1 ollama/ollama
docker run -d --gpus=all --name ollama-instance-2 ollama/ollama
Copyright © Arsturn 2025