URL="http://backend_inference:8000/predict" PORT="8090" HOST="localhost" QA_MODEL="qa_model" QA_TOKENIZER="qa_tokenizer"