Skip to content

Commit

Permalink
minor update
Browse files Browse the repository at this point in the history
  • Loading branch information
jiesutd committed Jan 10, 2019
1 parent c9b14d4 commit 3d57dc6
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 5 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
*.py[cod]
__pycache__
*.dset
*.model
4 changes: 3 additions & 1 deletion model/sentclassifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Author: Jie Yang
# @Date: 2019-01-01 21:11:50
# @Last Modified by: Jie Yang, Contact: [email protected]
# @Last Modified time: 2019-01-02 00:35:39
# @Last Modified time: 2019-01-10 14:53:57

from __future__ import print_function
from __future__ import absolute_import
Expand Down Expand Up @@ -42,7 +42,9 @@ def neg_log_likelihood_loss(self, word_inputs, feature_inputs, word_seq_lengths,
_, tag_seq = torch.max(score, 1)
if self.average_batch:
total_loss = total_loss / batch_size
print("aa")
print(total_loss)
exit(0)
return total_loss, tag_seq


Expand Down
7 changes: 4 additions & 3 deletions model/wordsequence.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Author: Jie Yang
# @Date: 2017-10-17 16:47:32
# @Last Modified by: Jie Yang, Contact: [email protected]
# @Last Modified time: 2019-01-02 00:37:16
# @Last Modified time: 2019-01-10 15:01:16
from __future__ import print_function
from __future__ import absolute_import
import torch
Expand Down Expand Up @@ -121,7 +121,7 @@ def sentence_representation(self, word_inputs, feature_inputs, word_seq_lengths,
Variable(batch_size, sent_len, hidden_dim)
"""
word_represent = self.wordrep(word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover)
print("b",word_represent)
# print("b",word_represent)
## word_embs (batch_size, seq_len, embed_size)
batch_size = word_inputs.size(0)
if self.word_feature_extractor == "CNN":
Expand All @@ -131,9 +131,10 @@ def sentence_representation(self, word_inputs, feature_inputs, word_seq_lengths,
cnn_feature = F.relu(self.cnn_list[idx](word_in))
else:
cnn_feature = F.relu(self.cnn_list[idx](cnn_feature))
# print("cnn: %s"%idx, cnn_feature)
cnn_feature = self.cnn_drop_list[idx](cnn_feature)
cnn_feature = self.cnn_batchnorm_list[idx](cnn_feature)
print("a", cnn_feature)
# print("a", cnn_feature)
feature_out = F.max_pool1d(cnn_feature, cnn_feature.size(2)).view(batch_size, -1)
print(feature_out)
exit(0)
Expand Down
16 changes: 15 additions & 1 deletion utils/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Author: Jie
# @Date: 2017-06-15 14:23:06
# @Last Modified by: Jie Yang, Contact: [email protected]
# @Last Modified time: 2019-01-01 23:44:28
# @Last Modified time: 2019-01-10 15:03:31
from __future__ import print_function
from __future__ import absolute_import
import sys
Expand Down Expand Up @@ -81,6 +81,16 @@ def read_instance(input_file, word_alphabet, char_alphabet, feature_alphabets, l
word_Ids = []
feature_Ids = []
label_Ids = []
if (len(words) > 0) and ((max_sent_length < 0) or (len(words) < max_sent_length)) :
instence_texts.append([words, feat_list, chars, label])
instence_Ids.append([word_Ids, feat_Id, char_Ids,label_Id])
words = []
features = []
chars = []
char_Ids = []
word_Ids = []
feature_Ids = []
label_Ids = []

else:
### for sequence labeling data format i.e. CoNLL 2003
Expand Down Expand Up @@ -136,6 +146,10 @@ def read_instance(input_file, word_alphabet, char_alphabet, feature_alphabets, l
feature_Ids = []
char_Ids = []
label_Ids = []
if (len(words) > 0) and ((max_sent_length < 0) or (len(words) < max_sent_length)) :
instence_texts.append([words, features, chars, labels])
instence_Ids.append([word_Ids, feature_Ids, char_Ids,label_Ids])

return instence_texts, instence_Ids


Expand Down

0 comments on commit 3d57dc6

Please sign in to comment.