mirror of
https://github.com/fhswf/aki_prj23_transparenzregister.git
synced 2025-05-14 05:18:46 +02:00
300 ner service crash (#301)
- Bugfixing: Wenn das Dokument aus der Mongo keinen Eintrag für Text oder Titel hatte, ist die Pipeline gecrasht. -->Eine Abfrage wurde hinzugefügt. - Überschreiben des Datenbanknamens wurde entfernt - ein Attribut des Transformers hat sich geändert --------- Co-authored-by: Philipp Horstenkamp <philipp@horstenkamp.de>
This commit is contained in:
parent
7620efc6da
commit
a96ebe916c
@ -22,7 +22,6 @@ class EntityPipeline:
|
|||||||
def __init__(self, conn_string: conn.MongoConnection) -> None:
|
def __init__(self, conn_string: conn.MongoConnection) -> None:
|
||||||
"""Method to connect to StagingDB."""
|
"""Method to connect to StagingDB."""
|
||||||
self.connect_string = conn_string
|
self.connect_string = conn_string
|
||||||
self.connect_string.database = "transparenzregister_ner"
|
|
||||||
self.connector = conn.MongoConnector(self.connect_string)
|
self.connector = conn.MongoConnector(self.connect_string)
|
||||||
self.news_obj = news.MongoNewsService(self.connector)
|
self.news_obj = news.MongoNewsService(self.connector)
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ class NerAnalysisService:
|
|||||||
self.classifier = pipeline(
|
self.classifier = pipeline(
|
||||||
"ner",
|
"ner",
|
||||||
model="fhswf/bert_de_ner",
|
model="fhswf/bert_de_ner",
|
||||||
grouped_entities=True,
|
aggregation_strategy="simple",
|
||||||
tokenizer="dbmdz/bert-base-german-cased",
|
tokenizer="dbmdz/bert-base-german-cased",
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -72,8 +72,9 @@ class NerAnalysisService:
|
|||||||
# init list for entities
|
# init list for entities
|
||||||
entities = []
|
entities = []
|
||||||
|
|
||||||
text = doc[doc_attrib]
|
text = doc[doc_attrib].strip()
|
||||||
|
# check if text is a string and not empty
|
||||||
|
if isinstance(text, str) and text:
|
||||||
# get entities
|
# get entities
|
||||||
doc_nlp = self.nlp(text)
|
doc_nlp = self.nlp(text)
|
||||||
|
|
||||||
@ -104,7 +105,9 @@ class NerAnalysisService:
|
|||||||
entities = []
|
entities = []
|
||||||
|
|
||||||
# Search the text for company names
|
# Search the text for company names
|
||||||
text = doc[doc_attrib]
|
text = doc[doc_attrib].strip()
|
||||||
|
# check if text is a string and not empty
|
||||||
|
if isinstance(text, str) and text:
|
||||||
# Convert title to lowercase
|
# Convert title to lowercase
|
||||||
text = text.lower()
|
text = text.lower()
|
||||||
|
|
||||||
@ -136,6 +139,8 @@ class NerAnalysisService:
|
|||||||
# init list for entities
|
# init list for entities
|
||||||
entities = []
|
entities = []
|
||||||
text = doc[doc_attrib]
|
text = doc[doc_attrib]
|
||||||
|
# check if text is a string and not empty
|
||||||
|
if isinstance(text, str) and text:
|
||||||
sentences = text.split(". ") # Split text into sentences based on '. '
|
sentences = text.split(". ") # Split text into sentences based on '. '
|
||||||
|
|
||||||
# Process each sentence separately
|
# Process each sentence separately
|
||||||
@ -144,7 +149,8 @@ class NerAnalysisService:
|
|||||||
sentence
|
sentence
|
||||||
) # Assuming 'classifier' processes a single sentence at a time
|
) # Assuming 'classifier' processes a single sentence at a time
|
||||||
|
|
||||||
for i in range(len(res)):
|
for _ in res:
|
||||||
if res[i]["entity_group"] == ent_type:
|
if _["entity_group"] == ent_type:
|
||||||
entities.append(res[i]["word"])
|
entities.append(_["word"])
|
||||||
|
|
||||||
return dict(Counter(entities))
|
return dict(Counter(entities))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user