Skip to content

Commit b1908b4

Browse files
Added CNN Model
1 parent 0b724b1 commit b1908b4

File tree

1 file changed

+87
-0
lines changed

1 file changed

+87
-0
lines changed

models/CNN.py

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
# _*_ coding: utf-8 _*_
2+
3+
import torch
4+
import torch.nn as nn
5+
from torch.autograd import Variable
6+
from torch.nn import functional as F
7+
8+
class CNN(nn.Module):
9+
def __init__(self, batch_size, output_size, in_channels, out_channels, kernel_heights, stride, padding, keep_probab, vocab_size, embedding_length, weights):
10+
super(CNN, self).__init__()
11+
12+
"""
13+
Arguments
14+
---------
15+
batch_size : Size of each batch which is same as the batch_size of the data returned by the TorchText BucketIterator
16+
output_size : 2 = (pos, neg)
17+
in_channels : Number of input channels. Here it is 1 as the input data has dimension = (batch_size, num_seq, embedding_length)
18+
out_channels : Number of output channels after convolution operation performed on the input matrix
19+
kernel_heights : A list consisting of 3 different kernel_heights. Convolution will be performed 3 times and finally results from each kernel_height will be concatenated.
20+
keep_probab : Probability of retaining an activation node during dropout operation
21+
vocab_size : Size of the vocabulary containing unique words
22+
embedding_length : Embedding dimension of GloVe word embeddings
23+
weights : Pre-trained GloVe word_embeddings which we will use to create our word_embedding look-up table
24+
--------
25+
26+
"""
27+
self.batch_size = batch_size
28+
self.output_size = output_size
29+
self.in_channels = in_channels
30+
self.out_channels = out_channels
31+
self.kernel_heights = kernel_heights
32+
self.stride = stride
33+
self.padding = padding
34+
self.vocab_size = vocab_size
35+
self.embedding_length = embedding_length
36+
37+
self.word_embeddings = nn.Embedding(vocab_size, embedding_length)
38+
self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False)
39+
self.conv1 = nn.Conv2d(in_channels, out_channels, (kernel_heights[0], embedding_length), stride, padding)
40+
self.conv2 = nn.Conv2d(in_channels, out_channels, (kernel_heights[1], embedding_length), stride, padding)
41+
self.conv3 = nn.Conv2d(in_channels, out_channels, (kernel_heights[2], embedding_length), stride, padding)
42+
self.dropout = nn.Dropout(keep_probab)
43+
self.label = nn.Linear(len(kernel_heights)*out_channels, output_size)
44+
45+
def conv_block(self, input, conv_layer):
46+
conv_out = conv_layer(input)# conv_out.size() = (batch_size, out_channels, dim, 1)
47+
activation = F.relu(conv_out.squeeze(3))# activation.size() = (batch_size, out_channels, dim1)
48+
max_out = F.max_pool1d(activation, activation.size()[2]).squeeze(2)# maxpool_out.size() = (batch_size, out_channels)
49+
50+
return max_out
51+
52+
def forward(self, input_sentences, batch_size=None):
53+
54+
"""
55+
The idea of the Convolutional Neural Netwok for Text Classification is very simple. We perform convolution operation on the embedding matrix
56+
whose shape for each batch is (num_seq, embedding_length) with kernel of varying height but constant width which is same as the embedding_length.
57+
We will be using ReLU activation after the convolution operation and then for each kernel height, we will use max_pool operation on each tensor
58+
and will filter all the maximum activation for every channel and then we will concatenate the resulting tensors. This output is then fully connected
59+
to the output layers consisting two units which basically gives us the logits for both positive and negative classes.
60+
61+
Parameters
62+
----------
63+
input_sentences: input_sentences of shape = (batch_size, num_sequences)
64+
batch_size : default = None. Used only for prediction on a single sentence after training (batch_size = 1)
65+
66+
Returns
67+
-------
68+
Output of the linear layer containing logits for pos & neg class.
69+
logits.size() = (batch_size, output_size)
70+
71+
"""
72+
73+
input = self.word_embeddings(input_sentences)
74+
# input.size() = (batch_size, num_seq, embedding_length)
75+
input = input.unsqueeze(1)
76+
# input.size() = (batch_size, 1, num_seq, embedding_length)
77+
max_out1 = self.conv_block(input, self.conv1)
78+
max_out2 = self.conv_block(input, self.conv2)
79+
max_out3 = self.conv_block(input, self.conv3)
80+
81+
all_out = torch.cat((max_out1, max_out2, max_out3), 1)
82+
# all_out.size() = (batch_size, num_kernels*out_channels)
83+
fc_in = self.dropout(all_out)
84+
# fc_in.size()) = (batch_size, num_kernels*out_channels)
85+
logits = self.label(fc_in)
86+
87+
return logits

0 commit comments

Comments
 (0)