Zack Saadioui
8/26/2024
1
VectorStoreIndex.from_documents
1
VectorStoreIndex.from_documents
1
VectorStoreIndex
1
from_documents
1
2
bash
pip install llama-index
1
VectorStoreIndex.from_documents
1
VectorStoreIndex
1
SimpleDirectoryReader
1
2
python
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
1
SimpleDirectoryReader
1
from_documents
1
2
3
python
documents = SimpleDirectoryReader("../../examples/data/paul_graham").load_data()
index = VectorStoreIndex.from_documents(documents)
1
show_progress=True
1
2
python
index = VectorStoreIndex.from_documents(documents, show_progress=True)
1
Node
1
Node
1
from_documents
1
Node
1
Node
1
TextNode
1
2
python
from llama_index.core.schema import TextNode
1
2
3
4
python
node1 = TextNode(text="<text_chunk>", id_="<node_id>")
node2 = TextNode(text="<text_chunk>", id_="<node_id>")
nodes = [node1, node2]
1
2
python
index = VectorStoreIndex(nodes)
1
Node
1
StorageContext
1 2 3 4 5 6 7 8 9 10 11 12 13 14
import pinecone from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext from llama_index.vector_stores.pinecone import PineconeVectorStore # Initialize Pinecone pinecone.init(api_key="<api_key>", environment="<environment>") pinecone.create_index("quickstart", dimension=1536, metric="euclidean", pod_type="p1") # Customize the Storage Context storage_context = StorageContext.from_defaults(vector_store=PineconeVectorStore(pinecone.Index("quickstart"))) # Load documents & build your index documents = SimpleDirectoryReader("../../examples/data/paul_graham").load_data() index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
1
VectorStoreIndex
1
2
3
4
5
6
python
from llama_index.core.schema import IndexNode
query_engine = other_index.as_query_engine()
obj = IndexNode(text="A query engine describing X, Y, Z.", obj=query_engine, index_id="my_query_engine")
index = VectorStoreIndex(nodes=nodes, objects=[obj])
retriever = index.as_retriever(verbose=True)
1
VectorStoreIndex.from_documents
Copyright © Arsturn 2024