@@ -238,7 +238,7 @@ def run_mongo_thread(self):
238
238
r .lock .release ()
239
239
else :
240
240
r .lock .release ()
241
- r .ready .wait (2 ) # Wait two seconds
241
+ r .ready .wait (timeout = 60 )
242
242
assert r .ready .isSet (), "Rendezvous timed out"
243
243
244
244
for i in range (self .end_request ):
@@ -652,10 +652,10 @@ def leak_request():
652
652
# Access the thread local from the main thread to trigger the
653
653
# ThreadVigil's delete callback, returning the request socket to
654
654
# the pool.
655
- # In Python 2.6 and lesser, a dead thread's locals are deleted
655
+ # In Python 2.7.0 and lesser, a dead thread's locals are deleted
656
656
# and those locals' weakref callbacks are fired only when another
657
- # thread accesses the locals and finds the thread state is stale.
658
- # This is more or less a bug in Python <= 2.6 . Accessing the thread
657
+ # thread accesses the locals and finds the thread state is stale,
658
+ # see http://bugs.python.org/issue1868 . Accessing the thread
659
659
# local from the main thread is a necessary part of this test, and
660
660
# realistic: in a multithreaded web server a new thread will access
661
661
# Pool._ident._local soon after an old thread has died.
@@ -664,20 +664,34 @@ def leak_request():
664
664
# Pool reclaimed the socket
665
665
self .assertEqual (1 , len (cx_pool .sockets ))
666
666
self .assertEqual (the_sock [0 ], id (one (cx_pool .sockets ).sock ))
667
+ self .assertEqual (0 , len (cx_pool ._tid_to_sock ))
667
668
668
669
669
670
class _TestMaxPoolSize (_TestPoolingBase ):
670
671
"""Test that connection pool keeps proper number of idle sockets open,
671
672
no matter how start/end_request are called. To be run both with threads and
672
673
with greenlets.
673
674
"""
674
- def _test_max_pool_size (self , start_request , end_request ):
675
- c = self .get_client (max_pool_size = 4 , auto_start_request = False )
676
- # If you increase nthreads over about 35, note a
677
- # Gevent 0.13.6 bug on Mac, Greenlet.join() hangs if more than
678
- # about 35 Greenlets share a MongoClient. Apparently fixed in
679
- # recent Gevent development.
680
- nthreads = 10
675
+ def _test_max_pool_size (
676
+ self , start_request , end_request , max_pool_size = 4 , nthreads = 10 ):
677
+ """Start `nthreads` threads. Each calls start_request `start_request`
678
+ times, then find_one and waits at a barrier; once all reach the barrier
679
+ each calls end_request `end_request` times. The test asserts that the
680
+ pool ends with min(max_pool_size, nthreads) sockets or, if
681
+ start_request wasn't called, at least one socket.
682
+
683
+ This tests both max_pool_size enforcement and that leaked request
684
+ sockets are eventually returned to the pool when their threads end.
685
+
686
+ You may need to increase ulimit -n on Mac.
687
+
688
+ If you increase nthreads over about 35, note a
689
+ Gevent 0.13.6 bug on Mac: Greenlet.join() hangs if more than
690
+ about 35 Greenlets share a MongoClient. Apparently fixed in
691
+ recent Gevent development.
692
+ """
693
+ c = self .get_client (
694
+ max_pool_size = max_pool_size , auto_start_request = False )
681
695
682
696
rendevous = CreateAndReleaseSocket .Rendezvous (
683
697
nthreads , self .use_greenlets )
@@ -716,14 +730,27 @@ def _test_max_pool_size(self, start_request, end_request):
716
730
the_hub .shutdown ()
717
731
718
732
if start_request :
719
- self .assertEqual (4 , len (cx_pool .sockets ))
733
+ # Trigger final cleanup in Python <= 2.7.0.
734
+ cx_pool ._ident .get ()
735
+
736
+ expected_idle = min (max_pool_size , nthreads )
737
+ message = (
738
+ '%d idle sockets (expected %d) and %d request sockets'
739
+ ' (expected 0)' % (
740
+ len (cx_pool .sockets ), expected_idle ,
741
+ len (cx_pool ._tid_to_sock )))
742
+
743
+ self .assertEqual (
744
+ expected_idle , len (cx_pool .sockets ), message )
720
745
else :
721
746
# Without calling start_request(), threads can safely share
722
747
# sockets; the number running concurrently, and hence the number
723
- # of sockets needed, is between 1 and 10, depending on thread-
724
- # scheduling.
748
+ # of sockets needed, is between 1 and
749
+ # min(max_pool_size, nthreads), depending on thread- scheduling.
725
750
self .assertTrue (len (cx_pool .sockets ) >= 1 )
726
751
752
+ self .assertEqual (0 , len (cx_pool ._tid_to_sock ))
753
+
727
754
def test_max_pool_size (self ):
728
755
self ._test_max_pool_size (0 , 0 )
729
756
@@ -732,13 +759,20 @@ def test_max_pool_size_with_request(self):
732
759
733
760
def test_max_pool_size_with_redundant_request (self ):
734
761
self ._test_max_pool_size (2 , 1 )
762
+
763
+ def test_max_pool_size_with_redundant_request2 (self ):
735
764
self ._test_max_pool_size (20 , 1 )
736
765
737
766
def test_max_pool_size_with_leaked_request (self ):
738
767
# Call start_request() but not end_request() -- when threads die, they
739
768
# should return their request sockets to the pool.
740
769
self ._test_max_pool_size (1 , 0 )
741
770
771
+ def test_max_pool_size_with_leaked_request_massive (self ):
772
+ nthreads = 100
773
+ self ._test_max_pool_size (
774
+ 2 , 1 , max_pool_size = 2 * nthreads , nthreads = nthreads )
775
+
742
776
def test_max_pool_size_with_end_request_only (self ):
743
777
# Call end_request() but not start_request()
744
778
self ._test_max_pool_size (0 , 1 )
0 commit comments