Experimento 2

parent 887c1e63
Showing with 32 additions and 3 deletions
...@@ -142,10 +142,10 @@ def classification_embedings_rnn(tweets_train, tweets_train_labels_numeric, twee ...@@ -142,10 +142,10 @@ def classification_embedings_rnn(tweets_train, tweets_train_labels_numeric, twee
#number of features:_32 each vector of 200 dim is converted to a vector of 32 dim #number of features:_32 each vector of 200 dim is converted to a vector of 32 dim
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2)) model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
#model.add(Bidirectional(LSTM(2,dropout=0.2,recurrent_dropout=0.2,return_sequences=True))) #model.add(Bidirectional(LSTM(2,dropout=0.2,recurrent_dropout=0.2,return_sequences=True)))
model.add(Dense(32, activation='tanh')) model.add(Dense(32, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5)) model.add(Dropout(0.5))
model.add(Dense(len(CLASSES), activation='softmax')) model.add(Dense(len(CLASSES), activation='softmax'))
model.add(Activation('sigmoid'))
# summarize the model # summarize the model
print(model.summary()) print(model.summary())
...@@ -154,7 +154,7 @@ def classification_embedings_rnn(tweets_train, tweets_train_labels_numeric, twee ...@@ -154,7 +154,7 @@ def classification_embedings_rnn(tweets_train, tweets_train_labels_numeric, twee
# compile the model # compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
print("Training the model...") print("Training the model...")
model.fit(train_features_pad, tweets_train_labels_numeric, batch_size=32, epochs=10, verbose=1, validation_data=(train_features_pad,tweets_train_labels_numeric)) model.fit(train_features_pad, tweets_train_labels_numeric, batch_size=32, epochs=50, verbose=1, validation_data=(train_features_pad,tweets_train_labels_numeric))
loss, accuracy = model.evaluate(train_features_pad, tweets_train_labels_numeric, batch_size=32, verbose=1) loss, accuracy = model.evaluate(train_features_pad, tweets_train_labels_numeric, batch_size=32, verbose=1)
print('Accuracy trainning: %f' % (accuracy*100)) print('Accuracy trainning: %f' % (accuracy*100))
......
Experiment 1:
Params: Params:
vocab_embeddings = 100000 vocab_embeddings = 100000
...@@ -24,3 +26,30 @@ Macro-Recall: 0.5156404166549265 ...@@ -24,3 +26,30 @@ Macro-Recall: 0.5156404166549265
Macro-F1: 0.5100454275009482 Macro-F1: 0.5100454275009482
Accuracy: 0.5155875299760192 Accuracy: 0.5155875299760192
Experiment 2:
Params:
vocab_embeddings = 100000
max_lenght_tweet = 40
Layers:
e = Embedding(feature_size, EMBEDDING_DIM, input_length=max_len_input, weights=[embedding_matrix], trainable=False)
model.add(e)
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(32, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(CLASSES), activation='softmax'))
Accuracy trainning: 69.861719
Results evaluation:
*** Results RNN_LSTM ***
Macro-Precision: 0.6040684610078678
Macro-Recall: 0.6040370418815006
Macro-F1: 0.6024329096077434
Accuracy: 0.604003753518924
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment