From 6e57cab13f296b2f84ab09b6f77a154b27cf3092 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Omasta?= Date: Sat, 17 Feb 2024 19:00:00 +0000 Subject: [PATCH] Upload files to 'Backend' update backend --- Backend/Dockerfile | 55 ++++++++++++++++++++++++++++++++++++++ Backend/api.py | 57 ++++++++++++++++++++++++++++++++++++++++ Backend/requirements.txt | 5 ++++ 3 files changed, 117 insertions(+) create mode 100644 Backend/Dockerfile create mode 100644 Backend/api.py create mode 100644 Backend/requirements.txt diff --git a/Backend/Dockerfile b/Backend/Dockerfile new file mode 100644 index 0000000..0d8a369 --- /dev/null +++ b/Backend/Dockerfile @@ -0,0 +1,55 @@ +FROM python:3.10-slim-bullseye AS base + +WORKDIR /app + +# Set utf-8 encoding for Python et al +ENV LANG=C.UTF-8 \ + # Turn off writing .pyc files + PYTHONDONTWRITEBYTECODE=1 \ + # Reduce the OS system calls for this tool it makes a difference + PYTHONUNBUFFERED=1 \ + # Disables cache dir in pip + PIP_NO_CACHE_DIR=1 \ + # Virtual environment + VENV="/opt/venv" \ + # Add new user + APPUSER=appuser \ + # Ensure that the python and pip executables used in the image + PATH="${VENV}/bin:$PATH" + + +FROM base as builder + +COPY requirements.txt . +RUN apt-get update \ + && apt-get install -y git build-essential + + +RUN python -m venv ${VENV} \ + && . ${VENV}/bin/activate \ + && pip install --upgrade pip \ + && pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu \ + && pip install -r requirements.txt + +FROM base as runner + + +COPY api.py . + +COPY --from=builder ${VENV} ${VENV} +ENV PATH="${VENV}/bin:$PATH" + +# Update permissions & change user to not run as root +RUN chgrp -R 0 /app \ + && chmod -R g=u /app \ + && groupadd -r ${APPUSER} \ + && useradd -r -g ${APPUSER} ${APPUSER} \ + && chown -R ${APPUSER}:${APPUSER} /app \ + && usermod -d /app ${APPUSER} + + +CMD ["/opt/venv/bin/uvicorn", "api:app", "--host", "0.0.0.0"] + + + + diff --git a/Backend/api.py b/Backend/api.py new file mode 100644 index 0000000..e1ea552 --- /dev/null +++ b/Backend/api.py @@ -0,0 +1,57 @@ +import torch +import uvicorn +from fastapi import FastAPI +from pydantic import BaseModel +from transformers import MT5Tokenizer,AutoTokenizer, AutoModel ,T5ForConditionalGeneration +import warnings +import json +import random +import torch.nn.functional as F +import os +from dotenv import load_dotenv +#from ece import compute_ECE +from torch.utils.data import DataLoader +from functools import reduce + +warnings.filterwarnings("ignore") +DEVICE ='cpu' + +load_dotenv() +host = os.getenv("HOST") +port = os.getenv("PORT") + +model_dir = os.getenv("QA_MODEL") +#model_dir = "C:/Users/david/Desktop/T5_JUPYTER/qa_model" +tokenizer_dir = os.getenv("QA_TOKENIZER") +#tokenizer_dir = "C:/Users/david/Desktop/T5_JUPYTER/qa_tokenizer" +MODEL = T5ForConditionalGeneration.from_pretrained(model_dir, from_tf=False, return_dict=True).to(DEVICE) +print("Model succesfully loaded!") +TOKENIZER = AutoTokenizer.from_pretrained(tokenizer_dir, use_fast=True) +print("Tokenizer succesfully loaded!") +Q_LEN = 512 +TOKENIZER.add_tokens('') + + +print('model loaded') + +app = FastAPI() + +# BASE MODEL +class InputData(BaseModel): + context: str + question: str + +@app.post("/predict") +async def predict(input_data: InputData): + inputs = TOKENIZER(input_data.question, input_data.context, max_length=512, padding="max_length", truncation=True, add_special_tokens=True) + input_ids = torch.tensor(inputs["input_ids"], dtype=torch.long).to(DEVICE).unsqueeze(0) + attention_mask = torch.tensor(inputs["attention_mask"], dtype=torch.long).to(DEVICE).unsqueeze(0) + outputs = MODEL.generate(input_ids=input_ids, attention_mask=attention_mask, return_dict_in_generate=True,output_scores=True,max_length=512) + predicted_ids = outputs.sequences.numpy() + predicted_text = TOKENIZER.decode(predicted_ids[0], skip_special_tokens=True) + + return {'prediction':predicted_text} + + +if __name__ == "__main__": + uvicorn.run(app, host=host, port=port) \ No newline at end of file diff --git a/Backend/requirements.txt b/Backend/requirements.txt new file mode 100644 index 0000000..28a207c --- /dev/null +++ b/Backend/requirements.txt @@ -0,0 +1,5 @@ +uvicorn==0.23.2 +fastapi==0.103.2 +transformers==4.34.0 +rank_bm25==0.2.2 +python-dotenv