@@ -91,7 +91,7 @@ class Conv2d(Module):
9191 stride: the stride of the convolving kernel. Can be a single number s or a tuple (sh x sw). Default: 1
9292 padding: implicit zero padding on the input. Can be a single number s or a tuple. Default: 0
9393 dilation: If given, will do dilated (or atrous) convolutions. Can be a single number s or a tuple. Default: None
94- no_bias : If set to true , the layer will not learn an additive bias. Default: False
94+ bias : If set to False , the layer will not learn an additive bias. Default: True
9595 Input Shape: [ * , in_channels , * , * ] : Input is minibatch x in_channels x iH x iW
9696 Output Shape:[ * , out_channels , * , * ] : Output shape is precisely minibatch x out_channels x floor((iH + 2*padH - kH) / dH + 1) x floor((iW + 2*padW - kW) / dW + 1)
9797 Members:
@@ -108,7 +108,7 @@ class Conv2d(Module):
108108 >>> output = m(input)
109109 """
110110 def __init__ (self , in_channels , out_channels , kernel_size , stride = 1 ,
111- padding = 0 , dilation = None , groups = 1 , no_bias = False ):
111+ padding = 0 , dilation = None , groups = 1 , bias = True ):
112112 self .in_channels = in_channels
113113 self .out_channels = out_channels
114114 self .kh , self .kw = _pair (kernel_size )
@@ -121,7 +121,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
121121
122122 weight = torch .Tensor (self .out_channels , self .in_channels , self .kh ,
123123 self .kw )
124- bias = None if no_bias else torch .Tensor (self .out_channels )
124+ bias = torch .Tensor (self .out_channels ) if bias else None
125125 super (Conv2d , self ).__init__ (
126126 weight = weight ,
127127 bias = bias ,
@@ -166,7 +166,7 @@ class FullConv2d(Conv2d):
166166 stride: the stride of the convolving kernel. Can be a single number or a tuple (sh x sw). Default: 1
167167 padding: implicit zero padding on the input. Can be a single number or a tuple. Default: 0
168168 output_padding: A padding of 0 or 1 pixels that should be added to the output. Can be a single number or a tuple. Default: 0
169- no_bias : If set to true , the layer will not learn an additive bias. Default: False
169+ bias : If set to False , the layer will not learn an additive bias. Default: True
170170 Input Shape: [ * , in_channels , * , * ] : Input is minibatch x in_channels x iH x iW
171171 Output Shape:[ * , out_channels , * , * ] : Output shape is precisely minibatch x out_channels x (iH - 1) * sH - 2*padH + kH + output_paddingH x (iW - 1) * sW - 2*padW + kW
172172 Members:
@@ -181,9 +181,9 @@ class FullConv2d(Conv2d):
181181 >>> output = m(input)
182182 """
183183 def __init__ (self , in_channels , out_channels , kernel_size , stride = 1 ,
184- padding = 0 , output_padding = 0 , no_bias = False ):
184+ padding = 0 , output_padding = 0 , bias = True ):
185185 super (FullConv2d , self ).__init__ (in_channels , out_channels , kernel_size ,
186- stride , padding , no_bias )
186+ stride , padding , bias )
187187 self .out_padh , self .out_padw = _pair (output_padding )
188188
189189 def forward (self , input ):
0 commit comments