Skip to content

Commit

Permalink
Load biobert classifiers and tokenizers and use pipeline to classify
Browse files Browse the repository at this point in the history
  • Loading branch information
valearna committed Aug 26, 2024
1 parent 9aa8848 commit 706f23f
Show file tree
Hide file tree
Showing 2 changed files with 59 additions and 33 deletions.
10 changes: 6 additions & 4 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
psycopg2-binary
PyPDF2
tqdm
falcon
requests
numpy
falcon~=3.1.1
requests~=2.31.0
numpy~=1.19.5
dataclasses
pyyaml
pyyaml~=6.0.1
wbtools==3.0.10
#git+https://github.com/WormBase/wbtools.git@develop
scikit-learn
sent2vec-prebuilt
nltk~=3.6.3
transformers~=4.35.2
Original file line number Diff line number Diff line change
@@ -1,15 +1,11 @@
#!/usr/bin/env python3
import json
import logging
from wsgiref import simple_server

import joblib
import sent2vec
import falcon
import os

from wsgiref import simple_server
from falcon import HTTPStatus

from transformers import AutoModelForSequenceClassification, TextClassificationPipeline, AutoTokenizer

logger = logging.getLogger(__name__)

Expand All @@ -32,28 +28,45 @@ class SentenceClassificationReader:

def __init__(self):
self.sentence_classifiers = self.load_sentence_classifiers("/var/sentence_classification_models/")
self.sent2vec_model = self.load_sent2vec_model("/var/sentence_classification_models/biosentvec.bin")
self.sentence_tokenizers = self.load_tokenizers("/var/sentence_classification_models/")

@staticmethod
def load_sent2vec_model(sent2vec_model_path):
logger.info("Loading sentence embedding model...")
biosentvec_model = sent2vec.Sent2vecModel()
try:
biosentvec_model.load_model(sent2vec_model_path)
except Exception as e:
logger.error(e)
logger.info("Sentence embedding model loaded")
return biosentvec_model
def load_tokenizers(tokenizers_path):
logger.info("Loading tokenizers...")
sentence_tokenizer_all_info_expression = AutoTokenizer.from_pretrained(f"{tokenizers_path}/all_info_expression")
sentence_tokenizer_curatable_expression = AutoTokenizer.from_pretrained(
f"{tokenizers_path}/curatable_expression.joblib")
sentence_tokenizer_language_expression = AutoTokenizer.from_pretrained(
f"{tokenizers_path}/language_expression.joblib")
sentence_tokenizer_all_info_kinase = AutoTokenizer.from_pretrained(
f"{tokenizers_path}/all_info_kinase.joblib")
sentence_tokenizer_curatable_kinase = AutoTokenizer.from_pretrained(
f"{tokenizers_path}/curatable_kinase.joblib")
sentence_tokenizer_language_kinase = AutoTokenizer.from_pretrained(
f"{tokenizers_path}/language_kinase.joblib")
logger.info("All sentence classifiers loaded")
return {
"expression": {
"all_info": sentence_tokenizer_all_info_expression,
"curatable": sentence_tokenizer_curatable_expression,
"language": sentence_tokenizer_language_expression
},
"kinase": {
"all_info": sentence_tokenizer_all_info_kinase,
"curatable": sentence_tokenizer_curatable_kinase,
"language": sentence_tokenizer_language_kinase
}
}

@staticmethod
def load_sentence_classifiers(models_path):
logger.info("Loading sentence classifiers...")
sentence_classifier_all_info_expression = joblib.load(f"{models_path}/all_info_expression.joblib")
sentence_classifier_curatable_expression = joblib.load(f"{models_path}/curatable_expression.joblib")
sentence_classifier_language_expression = joblib.load(f"{models_path}/language_expression.joblib")
sentence_classifier_all_info_kinase = joblib.load(f"{models_path}/all_info_kinase.joblib")
sentence_classifier_curatable_kinase = joblib.load(f"{models_path}/curatable_kinase.joblib")
sentence_classifier_language_kinase = joblib.load(f"{models_path}/language_kinase.joblib")
sentence_classifier_all_info_expression = AutoModelForSequenceClassification.from_pretrained(f"{models_path}/all_info_expression")
sentence_classifier_curatable_expression = AutoModelForSequenceClassification.from_pretrained(f"{models_path}/curatable_expression.joblib")
sentence_classifier_language_expression = AutoModelForSequenceClassification.from_pretrained(f"{models_path}/language_expression.joblib")
sentence_classifier_all_info_kinase = AutoModelForSequenceClassification.from_pretrained(f"{models_path}/all_info_kinase.joblib")
sentence_classifier_curatable_kinase = AutoModelForSequenceClassification.from_pretrained(f"{models_path}/curatable_kinase.joblib")
sentence_classifier_language_kinase = AutoModelForSequenceClassification.from_pretrained(f"{models_path}/language_kinase.joblib")
logger.info("All sentence classifiers loaded")
return {
"expression": {
Expand All @@ -71,13 +84,24 @@ def load_sentence_classifiers(models_path):
def on_post(self, req, resp, req_type):
if req_type != "classify_sentences" or "sentences" not in req.media:
raise falcon.HTTPError(falcon.HTTP_BAD_REQUEST)
sentence_embeddings = self.sent2vec_model.embed_sentences(req.media["sentences"])
classes_all_info_expression = self.sentence_classifiers["expression"]["all_info"].predict(sentence_embeddings)
classes_curatable_expression = self.sentence_classifiers["expression"]["curatable"].predict(sentence_embeddings)
classes_language_expression = self.sentence_classifiers["expression"]["language"].predict(sentence_embeddings)
classes_all_info_kinase = self.sentence_classifiers["kinase"]["all_info"].predict(sentence_embeddings)
classes_curatable_kinase = self.sentence_classifiers["kinase"]["curatable"].predict(sentence_embeddings)
classes_language_kinase = self.sentence_classifiers["kinase"]["language"].predict(sentence_embeddings)
classes_all_info_expression = TextClassificationPipeline(
model=self.sentence_classifiers["expression"]["all_info"],
tokenizer=self.sentence_tokenizers["expression"]["all_info"])(req["media"]["sentences"])
classes_curatable_expression = TextClassificationPipeline(
model=self.sentence_classifiers["expression"]["curatable"],
tokenizer=self.sentence_tokenizers["expression"]["curatable"])(req["media"]["sentences"])
classes_language_expression = TextClassificationPipeline(
model=self.sentence_classifiers["expression"]["language"],
tokenizer=self.sentence_tokenizers["expression"]["language"])(req["media"]["sentences"])
classes_all_info_kinase = TextClassificationPipeline(
model=self.sentence_classifiers["kinase"]["all_info"],
tokenizer=self.sentence_tokenizers["kinase"]["all_info"])(req["media"]["sentences"])
classes_curatable_kinase = TextClassificationPipeline(
model=self.sentence_classifiers["kinase"]["curatable"],
tokenizer=self.sentence_tokenizers["kinase"]["curatable"])(req["media"]["sentences"])
classes_language_kinase = TextClassificationPipeline(
model=self.sentence_classifiers["kinase"]["language"],
tokenizer=self.sentence_tokenizers["kinase"]["language"])(req["media"]["sentences"])
classes = {
"expression": {
"all_info": classes_all_info_expression.tolist(),
Expand Down

0 comments on commit 706f23f

Please sign in to comment.