Zack Saadioui
8/27/2024
1
2
bash
curl -fsSL https://ollama.com/install.sh | sh
1
2
bash
export OLLAMA_NUM_THREADS=8
1
2
bash
export OLLAMA_CUDA=1
1
2
bash
ollama run llama2:7b-q4_0
1
2
bash
ollama run llama2 --context-size 1024
1
2
bash
ollama run llama2 < /dev/null
1
2
3
4
5
6
7
8
9
10
11
python
prompt = """
Task: Summarize the following text in 3 bullet points.
Text: [Your text here]
Output format:
- Bullet point 1
- Bullet point 2
- Bullet point 3
"""
response = ollama.generate(model='llama2', prompt=prompt)
print(response['response'])
1
2
bash
ollama run llama2 --verbose
Copyright © Arsturn 2024