Skip to content

Commit ca555ab

Browse files
apaszkesoumith
authored andcommitted
fix comments
1 parent 63893c3 commit ca555ab

File tree

1 file changed

+3
-4
lines changed

1 file changed

+3
-4
lines changed

torch/optim/lbfgs.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ class LBFGS(Optimizer):
1616
1717
.. note::
1818
This is a very memory intensive optimizer (it requires additional
19-
``param_bytes * history_size`` bytes). If it doesn't fit in memory
19+
``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory
2020
try reducing the history size, or use a different algorithm.
2121
2222
Arguments:
@@ -73,7 +73,6 @@ def step(self, closure):
7373
closure (callable): A closure that reevaluates the model
7474
and returns the loss.
7575
"""
76-
# TODO: backward after closure?
7776
assert len(self.param_groups) == 1
7877

7978
group = self.param_groups[0]
@@ -156,7 +155,7 @@ def step(self, closure):
156155
al = state['al']
157156

158157
for i in range(num_old):
159-
ro[i] = 1 / old_stps[i].dot(old_dirs[i])
158+
ro[i] = 1. / old_stps[i].dot(old_dirs[i])
160159

161160
# iteration in L-BFGS loop collapsed to use just one buffer
162161
q = flat_grad.neg()
@@ -189,7 +188,7 @@ def step(self, closure):
189188

190189
# reset initial guess for step size
191190
if state['n_iter'] == 1:
192-
t = min(1, 1 / abs_grad_sum) * lr
191+
t = min(1., 1. / abs_grad_sum) * lr
193192
else:
194193
t = lr
195194

0 commit comments

Comments
 (0)