Update Readme.md

parent 770391ed
...@@ -21,7 +21,7 @@ class EmotionAnalyzer(Analyzer): ...@@ -21,7 +21,7 @@ class EmotionAnalyzer(Analyzer):
task: the task defining which pipeline will be returned. task: the task defining which pipeline will be returned.
model: the model that will be used by the pipeline to make predictions. model: the model that will be used by the pipeline to make predictions.
allScores: True, if we want that the classifier returns all scores. False, in other case. allScores: True, if we want that the classifier returns all scores. False, in other case.
maxEmbedding: The number of max_position_embedings in the config.json of the model selected. maxEmbedding: The number of max_position_embeddings in the config.json of the model selected.
""" """
self.emotionsClassifier = pipeline(task,model=modelEmotions, return_all_scores=allScores) self.emotionsClassifier = pipeline(task,model=modelEmotions, return_all_scores=allScores)
self.maxEmbedding = maxEmbedding self.maxEmbedding = maxEmbedding
......
...@@ -11,7 +11,7 @@ class IronityAnalyzer(Analyzer): ...@@ -11,7 +11,7 @@ class IronityAnalyzer(Analyzer):
Attributes: Attributes:
ironityClassifier: a pipeline that uses a model for inference the ironity of the text of a sequence. ironityClassifier: a pipeline that uses a model for inference the ironity of the text of a sequence.
By default, the label 'NI' is non-ironic and 'I' ironic. By default, the label 'NI' is non-ironic and 'I' ironic.
maxEmbedding: The number of max_position_embedings in the config.json of the model selected. maxEmbedding: The number of max_position_embeddings in the config.json of the model selected.
""" """
def __init__(self, task = "text-classification",modelIronity = 'dtomas/roberta-base-bne-irony', allScores = True, maxEmbedding = 514): def __init__(self, task = "text-classification",modelIronity = 'dtomas/roberta-base-bne-irony', allScores = True, maxEmbedding = 514):
...@@ -22,7 +22,7 @@ class IronityAnalyzer(Analyzer): ...@@ -22,7 +22,7 @@ class IronityAnalyzer(Analyzer):
task: the task defining which pipeline will be returned. task: the task defining which pipeline will be returned.
model: the model that will be used by the pipeline to make predictions. model: the model that will be used by the pipeline to make predictions.
allScores: True, if we want that the classifier returns all scores. False, in other case. allScores: True, if we want that the classifier returns all scores. False, in other case.
maxEmbedding: The number of max_position_embedings in the config.json of the model selected. maxEmbedding: The number of max_position_embeddings in the config.json of the model selected.
""" """
if modelIronity == 'dtomas/roberta-base-bne-irony': if modelIronity == 'dtomas/roberta-base-bne-irony':
model = AutoModelForSequenceClassification.from_pretrained(modelIronity) model = AutoModelForSequenceClassification.from_pretrained(modelIronity)
...@@ -32,7 +32,7 @@ class IronityAnalyzer(Analyzer): ...@@ -32,7 +32,7 @@ class IronityAnalyzer(Analyzer):
self.ironityClassifier = pipeline(task,model= model, tokenizer=tokenizer,return_all_scores=allScores, truncation=True) self.ironityClassifier = pipeline(task,model= model, tokenizer=tokenizer,return_all_scores=allScores, truncation=True)
else: else:
self.ironityClassifier = pipeline(task,model= modelIronity, return_all_scores=allScores) self.ironityClassifier = pipeline(task,model= modelIronity, return_all_scores=allScores)
self.maxEmbeding = maxEmbedding self.maxEmbedding = maxEmbedding
...@@ -60,7 +60,7 @@ class IronityAnalyzer(Analyzer): ...@@ -60,7 +60,7 @@ class IronityAnalyzer(Analyzer):
""" """
arrayResults =[] arrayResults =[]
for text in arrayText: for text in arrayText:
prediction = self.ironityClassifier(text[:self.maxEmbeding]) prediction = self.ironityClassifier(text[:self.maxEmbedding])
arrayResults.append(prediction) arrayResults.append(prediction)
return arrayResults return arrayResults
...@@ -18,10 +18,10 @@ class PolarityAnalyzer(Analyzer): ...@@ -18,10 +18,10 @@ class PolarityAnalyzer(Analyzer):
task: the task defining which pipeline will be returned task: the task defining which pipeline will be returned
model: the model that will be used by the pipeline to make predictions model: the model that will be used by the pipeline to make predictions
allScores: True, if we want that the classifier returns all scores. False, in other case allScores: True, if we want that the classifier returns all scores. False, in other case
maxEmbedding: The number of max_position_embedings in the config.json of the model selected. maxEmbedding: The number of max_position_embeddings in the config.json of the model selected.
""" """
self.polarityClassifier = pipeline(task,model= modelPolarity, return_all_scores=allScores) self.polarityClassifier = pipeline(task,model= modelPolarity, return_all_scores=allScores)
self.maxEmbeding = maxEmbedding self.maxEmbedding = maxEmbedding
...@@ -49,7 +49,7 @@ class PolarityAnalyzer(Analyzer): ...@@ -49,7 +49,7 @@ class PolarityAnalyzer(Analyzer):
""" """
arrayResults =[] arrayResults =[]
for text in arrayText: for text in arrayText:
prediction = self.polarityClassifier(text[:self.maxEmbeding]) prediction = self.polarityClassifier(text[:self.maxEmbedding])
#arrayResults.append(prediction[0][0]) #arrayResults.append(prediction[0][0])
arrayResults.append(prediction) arrayResults.append(prediction)
return arrayResults return arrayResults
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment