@@ -17,8 +17,8 @@ static void upsample_bicubic2d_out_frame(
1717 int64_t nbatch,
1818 int64_t channels,
1919 bool align_corners,
20- double scales_1 ,
21- double scales_2 ) {
20+ double scales_h ,
21+ double scales_w ) {
2222 // Special case: input/output same size, just copy
2323 if (input_height == output_height && input_width == output_width) {
2424 for (int64_t output_y = 0 ; output_y < output_height; output_y++) {
@@ -38,9 +38,9 @@ static void upsample_bicubic2d_out_frame(
3838
3939 // Bicubic interpolation
4040 const scalar_t height_scale = area_pixel_compute_scale<scalar_t >(
41- input_height, output_height, align_corners, scales_1 );
41+ input_height, output_height, align_corners, scales_h );
4242 const scalar_t width_scale = area_pixel_compute_scale<scalar_t >(
43- input_width, output_width, align_corners, scales_2 );
43+ input_width, output_width, align_corners, scales_w );
4444
4545 for (int64_t output_y = 0 ; output_y < output_height; output_y++) {
4646 for (int64_t output_x = 0 ; output_x < output_width; output_x++) {
@@ -99,8 +99,8 @@ static void upsample_bicubic2d_backward_out_frame(
9999 int64_t nbatch,
100100 int64_t channels,
101101 bool align_corners,
102- double scales_1 ,
103- double scales_2 ) {
102+ double scales_h ,
103+ double scales_w ) {
104104 channels = channels * nbatch;
105105
106106 // Special case: input/output same size, just copy
@@ -120,9 +120,9 @@ static void upsample_bicubic2d_backward_out_frame(
120120 }
121121
122122 const scalar_t height_scale = area_pixel_compute_scale<scalar_t >(
123- input_height, output_height, align_corners, scales_1 );
123+ input_height, output_height, align_corners, scales_h );
124124 const scalar_t width_scale = area_pixel_compute_scale<scalar_t >(
125- input_width, output_width, align_corners, scales_2 );
125+ input_width, output_width, align_corners, scales_w );
126126
127127 for (int64_t output_y = 0 ; output_y < output_height; output_y++) {
128128 for (int64_t output_x = 0 ; output_x < output_width; output_x++) {
@@ -170,8 +170,8 @@ static void upsample_bicubic2d_out_cpu_template(
170170 const Tensor& input_,
171171 IntArrayRef output_size,
172172 bool align_corners,
173- double scales_1 ,
174- double scales_2 ) {
173+ double scales_h ,
174+ double scales_w ) {
175175 TORCH_CHECK (
176176 output_size.size () == 2 ,
177177 " It is expected output_size equals to 2, but got size " ,
@@ -214,8 +214,8 @@ static void upsample_bicubic2d_out_cpu_template(
214214 nbatch,
215215 channels,
216216 align_corners,
217- scales_1 ,
218- scales_2 );
217+ scales_h ,
218+ scales_w );
219219 });
220220}
221221
@@ -225,8 +225,8 @@ static void upsample_bicubic2d_backward_out_cpu_template(
225225 IntArrayRef output_size,
226226 IntArrayRef input_size,
227227 bool align_corners,
228- double scales_1 ,
229- double scales_2 ) {
228+ double scales_h ,
229+ double scales_w ) {
230230 TORCH_CHECK (
231231 output_size.size () == 2 ,
232232 " It is expected output_size equals to 2, but got size " ,
@@ -275,8 +275,8 @@ static void upsample_bicubic2d_backward_out_cpu_template(
275275 nbatch,
276276 channels,
277277 align_corners,
278- scales_1 ,
279- scales_2 );
278+ scales_h ,
279+ scales_w );
280280 });
281281}
282282} // namespace
@@ -286,22 +286,22 @@ Tensor& upsample_bicubic2d_out_cpu(
286286 const Tensor& input,
287287 IntArrayRef output_size,
288288 bool align_corners,
289- double scales_1 ,
290- double scales_2 ) {
289+ double scales_h ,
290+ double scales_w ) {
291291 upsample_bicubic2d_out_cpu_template (
292- output, input, output_size, align_corners, scales_1, scales_2 );
292+ output, input, output_size, align_corners, scales_h, scales_w );
293293 return output;
294294}
295295
296296Tensor upsample_bicubic2d_cpu (
297297 const Tensor& input,
298298 IntArrayRef output_size,
299299 bool align_corners,
300- double scales_1 ,
301- double scales_2 ) {
300+ double scales_h ,
301+ double scales_w ) {
302302 auto output = at::empty ({0 }, input.options ());
303303 upsample_bicubic2d_out_cpu_template (
304- output, input, output_size, align_corners, scales_1, scales_2 );
304+ output, input, output_size, align_corners, scales_h, scales_w );
305305 return output;
306306}
307307
@@ -311,10 +311,10 @@ Tensor& upsample_bicubic2d_backward_out_cpu(
311311 IntArrayRef output_size,
312312 IntArrayRef input_size,
313313 bool align_corners,
314- double scales_1 ,
315- double scales_2 ) {
314+ double scales_h ,
315+ double scales_w ) {
316316 upsample_bicubic2d_backward_out_cpu_template (
317- grad_input, grad_output, output_size, input_size, align_corners, scales_1, scales_2 );
317+ grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w );
318318 return grad_input;
319319}
320320
@@ -323,11 +323,11 @@ Tensor upsample_bicubic2d_backward_cpu(
323323 IntArrayRef output_size,
324324 IntArrayRef input_size,
325325 bool align_corners,
326- double scales_1 ,
327- double scales_2 ) {
326+ double scales_h ,
327+ double scales_w ) {
328328 auto grad_input = at::zeros (input_size, grad_output.options ());
329329 upsample_bicubic2d_backward_out_cpu_template (
330- grad_input, grad_output, output_size, input_size, align_corners, scales_1, scales_2 );
330+ grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w );
331331 return grad_input;
332332}
333333
0 commit comments