Zack Saadioui
8/26/2024
1
2
bash
pip install fastapi uvicorn1
2
bash
pip install llama-index1
main.py1
To run our FastAPI application, execute:1
2
``
Now your app should be running at1
2
3
4
5
6
dockerfile
FROM tiangolo/uvicorn-gunicorn-fastapi:python3.9
WORKDIR /app
COPY . /app
RUN pip install -r requirements.txt
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80"]1
2
bash
docker build -t my-fastapi-app .1
2
bash
docker run -d --name my-fastapi-container -p 80:80 my-fastapi-app1
2
3
4
5
python
@app.post("/chat")
def chat(input_text: str):
response = llamaIndex.chat(input_text)
return response1
2
3
4
5
python
@app.post("/analytics")
def analyze_data(data: str):
results = llamaIndex.analyze(data)
return resultsCopyright © Arsturn 2025