- Notifications
You must be signed in to change notification settings - Fork 537
Pytest with 89% coverage #19
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
b2f91f2 c6e648f a31d3c2 f8e822c 7d9c5e7 709d8cb 83ecc6d 64cf2fc 33f3d30 bd705ed f204e98 5aad08a a8d7301 e11b1d1 11f0652 46f297f 68d7490 67b011a 347e628 4a45135 2bc41ad 6a02db0 86418eb 286de0a 0e06129 81118f2 109fc2a e0fa14b d101e08 77037cc fac003d 0097017 251af8e 84aa318 96f8b96 838550e File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
- Loading branch information
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,63 @@ | ||
| import ot | ||
| import numpy as np | ||
| import pytest | ||
| | ||
| try: # test if cudamat installed | ||
| import ot.dr | ||
| nogo = False | ||
| except ImportError: | ||
| nogo = True | ||
| | ||
| | ||
| @pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)") | ||
| Collaborator There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. now it says autograd and pymanopt :) Collaborator Author There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. corrected the comment top of the test file. | ||
| def test_fda(): | ||
| | ||
| n = 100 # nb samples in source and target datasets | ||
| nz = 0.2 | ||
| np.random.seed(0) | ||
| Collaborator There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. use RandomState | ||
| | ||
| # generate circle dataset | ||
| t = np.random.rand(n) * 2 * np.pi | ||
| ys = np.floor((np.arange(n) * 1.0 / n * 3)) + 1 | ||
| xs = np.concatenate( | ||
| (np.cos(t).reshape((-1, 1)), np.sin(t).reshape((-1, 1))), 1) | ||
| xs = xs * ys.reshape(-1, 1) + nz * np.random.randn(n, 2) | ||
| | ||
| nbnoise = 8 | ||
| ||
| | ||
| xs = np.hstack((xs, np.random.randn(n, nbnoise))) | ||
| | ||
| p = 2 | ||
| | ||
| Pfda, projfda = ot.dr.fda(xs, ys, p) | ||
| | ||
| projfda(xs) | ||
| | ||
| assert np.allclose(np.sum(Pfda**2, 0), np.ones(p)) | ||
| | ||
| | ||
| @pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)") | ||
| def test_wda(): | ||
| | ||
| n = 100 # nb samples in source and target datasets | ||
| ||
| nz = 0.2 | ||
| np.random.seed(0) | ||
| Collaborator There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. RandomState | ||
| | ||
| # generate circle dataset | ||
| t = np.random.rand(n) * 2 * np.pi | ||
| ys = np.floor((np.arange(n) * 1.0 / n * 3)) + 1 | ||
| xs = np.concatenate( | ||
| (np.cos(t).reshape((-1, 1)), np.sin(t).reshape((-1, 1))), 1) | ||
| xs = xs * ys.reshape(-1, 1) + nz * np.random.randn(n, 2) | ||
| | ||
| nbnoise = 8 | ||
| | ||
| xs = np.hstack((xs, np.random.randn(n, nbnoise))) | ||
| | ||
| p = 2 | ||
| | ||
| Pwda, projwda = ot.dr.wda(xs, ys, p, maxiter=10) | ||
| Collaborator There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. if you step back a little guessing for a new user what maybe you can leaving with a bit more typing and use less jardon/acronyms | ||
| | ||
| projwda(xs) | ||
| | ||
| assert np.allclose(np.sum(Pwda**2, 0), np.ones(p)) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| | @@ -12,7 +12,8 @@ | |
| | ||
| @pytest.mark.skipif(nogpu, reason="No GPU available") | ||
| def test_gpu_sinkhorn(): | ||
| import ot.gpu | ||
| | ||
| np.random.seed(0) | ||
| ||
| | ||
| def describeRes(r): | ||
| print("min:{:.3E}, max::{:.3E}, mean::{:.3E}, std::{:.3E}".format( | ||
| | @@ -41,29 +42,31 @@ def describeRes(r): | |
| | ||
| @pytest.mark.skipif(nogpu, reason="No GPU available") | ||
| def test_gpu_sinkhorn_lpl1(): | ||
| def describeRes(r): | ||
| print("min:{:.3E}, max:{:.3E}, mean:{:.3E}, std:{:.3E}" | ||
| .format(np.min(r), np.max(r), np.mean(r), np.std(r))) | ||
| np.random.seed(0) | ||
| | ||
| def describeRes(r): | ||
| ||
| print("min:{:.3E}, max:{:.3E}, mean:{:.3E}, std:{:.3E}" | ||
| .format(np.min(r), np.max(r), np.mean(r), np.std(r))) | ||
| | ||
| for n in [50, 100, 500, 1000]: | ||
| print(n) | ||
| a = np.random.rand(n // 4, 100) | ||
| labels_a = np.random.randint(10, size=(n // 4)) | ||
| b = np.random.rand(n, 100) | ||
| time1 = time.time() | ||
| transport = ot.da.OTDA_lpl1() | ||
| transport.fit(a, labels_a, b) | ||
| G1 = transport.G | ||
| time2 = time.time() | ||
| transport = ot.gpu.da.OTDA_lpl1() | ||
| transport.fit(a, labels_a, b) | ||
| G2 = transport.G | ||
| time3 = time.time() | ||
| print("Normal sinkhorn lpl1, time: {:6.2f} sec ".format( | ||
| time2 - time1)) | ||
| describeRes(G1) | ||
| print(" GPU sinkhorn lpl1, time: {:6.2f} sec ".format( | ||
| time3 - time2)) | ||
| describeRes(G2) | ||
| for n in [50, 100, 500, 1000]: | ||
| print(n) | ||
| a = np.random.rand(n // 4, 100) | ||
| labels_a = np.random.randint(10, size=(n // 4)) | ||
| b = np.random.rand(n, 100) | ||
| time1 = time.time() | ||
| transport = ot.da.OTDA_lpl1() | ||
| transport.fit(a, labels_a, b) | ||
| G1 = transport.G | ||
| time2 = time.time() | ||
| transport = ot.gpu.da.OTDA_lpl1() | ||
| transport.fit(a, labels_a, b) | ||
| G2 = transport.G | ||
| time3 = time.time() | ||
| print("Normal sinkhorn lpl1, time: {:6.2f} sec ".format( | ||
| time2 - time1)) | ||
| describeRes(G1) | ||
| print(" GPU sinkhorn lpl1, time: {:6.2f} sec ".format( | ||
| time3 - time2)) | ||
| describeRes(G2) | ||
| | ||
| assert np.allclose(G1, G2, rtol=1e-5, atol=1e-5) | ||
| assert np.allclose(G1, G2, rtol=1e-5, atol=1e-5) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| | @@ -9,7 +9,7 @@ | |
| def test_conditional_gradient(): | ||
| | ||
| n = 100 # nb bins | ||
| ||
| | ||
| np.random.seed(0) | ||
| Collaborator There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. RandomState | ||
| # bin positions | ||
| x = np.arange(n, dtype=np.float64) | ||
| | ||
| | @@ -38,7 +38,7 @@ def df(G): | |
| def test_generalized_conditional_gradient(): | ||
| | ||
| n = 100 # nb bins | ||
| | ||
| np.random.seed(0) | ||
| # bin positions | ||
| x = np.arange(n, dtype=np.float64) | ||
| | ||
| | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
test for what you really need to test ie if cudamat is available