Zack Saadioui
8/26/2024
1
pip install
1
2
3
python
from llama_index import VectorStoreIndex
from llama_index.llms import Ollama
1
2
3
python
from llama_index.legacy import VectorStoreIndex
from llama_index.legacy.llms import Ollama
1
2
3
bash
pip install llama-index
pip install llama-index-core
1
2
3
python
from llama_index import ServiceContext
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model, chunk_size=512)
1
2
3
4
5
python
from llama_index.core import Settings
Settings.llm = llm
Settings.embed_model = embed_model
Settings.chunk_size = 512
Copyright © Arsturn 2024