generated from pallavi176/DVC-project-Docs-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
81 lines (64 loc) · 3.05 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import os
import sys
import torch
import uvicorn
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
from typing import Any, Dict, List, ClassVar
from ner.components.model_architecture import XLMRobertaForTokenClassification
from ner.config.configuration import Configuration
from ner.exception.exception import CustomException
app = FastAPI()
class PredictPipeline:
def __init__(self, config):
self.predict_pipeline_config = config.get_model_predict_pipeline_config()
self.tokenizer = self.predict_pipeline_config.tokenizer
if len(os.listdir(self.predict_pipeline_config.output_dir)) == 0:
raise LookupError("Model not found : please Run Training Pipeline from pipeline/train_pipeline.py")
self.model = XLMRobertaForTokenClassification.from_pretrained(self.predict_pipeline_config.output_dir)
def run_data_preparation(self, data: str):
try:
data = data.split()
input_ids = self.tokenizer(data, truncation=self.predict_pipeline_config.truncation,
is_split_into_words=self.predict_pipeline_config.is_split_into_words)
formatted_data = torch.tensor(input_ids["input_ids"]).reshape(-1, 1)
outputs = self.model(formatted_data).logits
predictions = torch.argmax(outputs, dim=-1)
pred_tags = [self.predict_pipeline_config.index2tag[i.item()] for i in predictions[1:-1]]
return pred_tags[1:-1]
except Exception as e:
raise CustomException(e, sys)
def run_pipeline(self, data):
predictions = self.run_data_preparation(data)
response = {
"Input_Data": data.split(),
"Tags": predictions
}
print(response)
return response
pipeline = PredictPipeline(Configuration())
@app.get("/train")
@app.post("/train")
def train(request: Request):
if request.method == "GET":
train_info = {"Pipeline": "To train please use POST method",
"Metadata": "Created using fastapi"}
return JSONResponse(content=train_info, status_code=200, media_type="application/json")
elif request.method == "POST":
return JSONResponse(content=train_info, status_code=200, media_type="application/json")
else:
JSONResponse(content={"Error": True}, status_code=400, media_type="application/json")
@app.get("/predict")
@app.post("/predict/{data}")
def predict(request: Request,data:str):
if request.method == "GET":
train_info = {"Pipeline": "To Predict please use POST method",
"Metadata": "Created using fastapi"}
return JSONResponse(content=train_info, status_code=200, media_type="application/json")
elif request.method == "POST":
response = pipeline.run_pipeline(data)
return JSONResponse(content=response, status_code=200, media_type="application/json")
else:
JSONResponse(content={"Error": True}, status_code=400, media_type="application/json")
if __name__ == "__main__":
uvicorn.run("app:app", host="localhost", port=8085, reload=True)