mit neuen venv und exe-Files

This commit is contained in:
2024-11-03 17:26:54 +01:00
parent 07c05a338a
commit 0c373ff593
15115 changed files with 1998469 additions and 0 deletions

View File

@@ -0,0 +1,187 @@
# Copyright (c) 2008-2009 AG Projects
# Copyright 2018 gevent community
# Author: Denis Bilenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
# pylint:disable=unused-import
# It's important to do this ASAP, because if we're monkey patched,
# then importing things like the standard library test.support can
# need to construct the hub (to check for IPv6 support using a socket).
# We can't do it in the testrunner, as the testrunner spawns new, unrelated
# processes.
from .hub import QuietHub
import gevent.hub
gevent.hub.set_default_hub_class(QuietHub)
try:
import faulthandler
except ImportError:
# The backport isn't installed.
pass
else:
# Enable faulthandler for stack traces. We have to do this here
# for the same reasons as above.
faulthandler.enable()
try:
from gevent.libuv import _corecffi
except ImportError:
pass
else:
_corecffi.lib.gevent_test_setup() # pylint:disable=no-member
del _corecffi
from .sysinfo import VERBOSE
from .sysinfo import WIN
from .sysinfo import LINUX
from .sysinfo import OSX
from .sysinfo import LIBUV
from .sysinfo import CFFI_BACKEND
from .sysinfo import DEBUG
from .sysinfo import RUN_LEAKCHECKS
from .sysinfo import RUN_COVERAGE
from .sysinfo import PY2
from .sysinfo import PY3
from .sysinfo import PY36
from .sysinfo import PY37
from .sysinfo import PY38
from .sysinfo import PY39
from .sysinfo import PY310
from .sysinfo import PYPY
from .sysinfo import PYPY3
from .sysinfo import CPYTHON
from .sysinfo import PLATFORM_SPECIFIC_SUFFIXES
from .sysinfo import NON_APPLICABLE_SUFFIXES
from .sysinfo import SHARED_OBJECT_EXTENSION
from .sysinfo import RUNNING_ON_TRAVIS
from .sysinfo import RUNNING_ON_APPVEYOR
from .sysinfo import RUNNING_ON_CI
from .sysinfo import RESOLVER_NOT_SYSTEM
from .sysinfo import RESOLVER_DNSPYTHON
from .sysinfo import RESOLVER_ARES
from .sysinfo import resolver_dnspython_available
from .sysinfo import EXPECT_POOR_TIMER_RESOLUTION
from .sysinfo import CONN_ABORTED_ERRORS
from .skipping import skipOnWindows
from .skipping import skipOnAppVeyor
from .skipping import skipOnCI
from .skipping import skipOnPyPy3OnCI
from .skipping import skipOnPyPy
from .skipping import skipOnPyPyOnCI
from .skipping import skipOnPyPyOnWindows
from .skipping import skipOnPyPy3
from .skipping import skipIf
from .skipping import skipUnless
from .skipping import skipOnLibev
from .skipping import skipOnLibuv
from .skipping import skipOnLibuvOnWin
from .skipping import skipOnLibuvOnCI
from .skipping import skipOnLibuvOnCIOnPyPy
from .skipping import skipOnLibuvOnPyPyOnWin
from .skipping import skipOnPurePython
from .skipping import skipWithCExtensions
from .skipping import skipOnPy37
from .skipping import skipOnPy310
from .skipping import skipOnPy312
from .skipping import skipOnPy3
from .skipping import skipWithoutResource
from .skipping import skipWithoutExternalNetwork
from .skipping import skipOnManylinux
from .skipping import skipOnMacOnCI
from .exception import ExpectedException
from .leakcheck import ignores_leakcheck
from .params import LARGE_TIMEOUT
from .params import DEFAULT_LOCAL_HOST_ADDR
from .params import DEFAULT_LOCAL_HOST_ADDR6
from .params import DEFAULT_BIND_ADDR
from .params import DEFAULT_BIND_ADDR_TUPLE
from .params import DEFAULT_CONNECT_HOST
from .params import DEFAULT_SOCKET_TIMEOUT
from .params import DEFAULT_XPC_SOCKET_TIMEOUT
main = unittest.main
SkipTest = unittest.SkipTest
from .sockets import bind_and_listen
from .sockets import tcp_listener
from .openfiles import get_number_open_files
from .openfiles import get_open_files
from .testcase import TestCase
from .modules import walk_modules
BaseTestCase = unittest.TestCase
from .flaky import reraiseFlakyTestTimeout
from .flaky import reraiseFlakyTestRaceCondition
from .flaky import reraises_flaky_timeout
from .flaky import reraises_flaky_race_condition
def gc_collect_if_needed():
"Collect garbage if necessary for destructors to run"
import gc
if PYPY: # pragma: no cover
gc.collect()
# Our usage of mock should be limited to '@mock.patch()'
# and other things that are easily...mocked...here on Python 2
# when mock is not installed.
try:
from unittest import mock
except ImportError: # Python 2
try:
import mock
except ImportError: # pragma: no cover
# Backport not installed
class mock(object):
@staticmethod
def patch(reason):
return unittest.skip(reason)
mock = mock
# zope.interface
from zope.interface import verify

View File

@@ -0,0 +1,17 @@
# When testrunner.py is invoked with --coverage, it puts this first
# on the path as per https://coverage.readthedocs.io/en/coverage-4.0b3/subprocess.html.
# Note that this disables other sitecustomize.py files.
import coverage
try:
coverage.process_startup()
except coverage.CoverageException as e:
if str(e) == "Can't support concurrency=greenlet with PyTracer, only threads are supported":
pass
else:
import traceback
traceback.print_exc()
raise
except:
import traceback
traceback.print_exc()
raise

View File

@@ -0,0 +1,57 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
from functools import wraps
def wrap_error_fatal(method):
from gevent._hub_local import get_hub_class
system_error = get_hub_class().SYSTEM_ERROR
@wraps(method)
def fatal_error_wrapper(self, *args, **kwargs):
# XXX should also be able to do gevent.SYSTEM_ERROR = object
# which is a global default to all hubs
get_hub_class().SYSTEM_ERROR = object
try:
return method(self, *args, **kwargs)
finally:
get_hub_class().SYSTEM_ERROR = system_error
return fatal_error_wrapper
def wrap_restore_handle_error(method):
from gevent._hub_local import get_hub_if_exists
from gevent import getcurrent
@wraps(method)
def restore_fatal_error_wrapper(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
finally:
# Remove any customized handle_error, if set on the
# instance.
try:
del get_hub_if_exists().handle_error
except AttributeError:
pass
if self.peek_error()[0] is not None:
getcurrent().throw(*self.peek_error()[1:])
return restore_fatal_error_wrapper

View File

@@ -0,0 +1,23 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
class ExpectedException(Exception):
"""An exception whose traceback should be ignored by the hub"""

View File

@@ -0,0 +1,114 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import sys
import functools
import unittest
from . import sysinfo
from . import six
class FlakyAssertionError(AssertionError):
"Re-raised so that we know it's a known-flaky test."
# The next exceptions allow us to raise them in a highly
# greppable way so that we can debug them later.
class FlakyTest(unittest.SkipTest):
"""
A unittest exception that causes the test to be skipped when raised.
Use this carefully, it is a code smell and indicates an undebugged problem.
"""
class FlakyTestRaceCondition(FlakyTest):
"""
Use this when the flaky test is definitely caused by a race condition.
"""
class FlakyTestTimeout(FlakyTest):
"""
Use this when the flaky test is definitely caused by an
unexpected timeout.
"""
class FlakyTestCrashes(FlakyTest):
"""
Use this when the test sometimes crashes.
"""
def reraiseFlakyTestRaceCondition():
six.reraise(FlakyAssertionError,
FlakyAssertionError(sys.exc_info()[1]),
sys.exc_info()[2])
reraiseFlakyTestTimeout = reraiseFlakyTestRaceCondition
reraiseFlakyTestRaceConditionLibuv = reraiseFlakyTestRaceCondition
reraiseFlakyTestTimeoutLibuv = reraiseFlakyTestRaceCondition
if sysinfo.RUNNING_ON_CI or (sysinfo.PYPY and sysinfo.WIN):
# pylint: disable=function-redefined
def reraiseFlakyTestRaceCondition():
# Getting stack traces is incredibly expensive
# in pypy on win, at least in test virtual machines.
# It can take minutes. The traceback consistently looks like
# the following when interrupted:
# dump_stacks -> traceback.format_stack
# -> traceback.extract_stack -> linecache.checkcache
# -> os.stat -> _structseq.structseq_new
# Moreover, without overriding __repr__ or __str__,
# the msg doesn't get printed like we would want (its basically
# unreadable, all printed on one line). So skip that.
#msg = '\n'.join(dump_stacks())
msg = str(sys.exc_info()[1])
six.reraise(FlakyTestRaceCondition,
FlakyTestRaceCondition(msg),
sys.exc_info()[2])
def reraiseFlakyTestTimeout():
msg = str(sys.exc_info()[1])
six.reraise(FlakyTestTimeout,
FlakyTestTimeout(msg),
sys.exc_info()[2])
if sysinfo.LIBUV:
reraiseFlakyTestRaceConditionLibuv = reraiseFlakyTestRaceCondition
reraiseFlakyTestTimeoutLibuv = reraiseFlakyTestTimeout
def reraises_flaky_timeout(exc_kind=AssertionError, _func=reraiseFlakyTestTimeout):
def wrapper(f):
@functools.wraps(f)
def m(*args):
try:
f(*args)
except exc_kind:
_func()
return m
return wrapper
def reraises_flaky_race_condition(exc_kind=AssertionError):
return reraises_flaky_timeout(exc_kind, _func=reraiseFlakyTestRaceCondition)

View File

@@ -0,0 +1,70 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
from contextlib import contextmanager
from gevent.hub import Hub
from .exception import ExpectedException
class QuietHub(Hub):
_resolver = None
_threadpool = None
EXPECTED_TEST_ERROR = (ExpectedException,)
IGNORE_EXPECTED_TEST_ERROR = False
@contextmanager
def ignoring_expected_test_error(self):
"""
Code in the body of this context manager will ignore
``EXPECTED_TEST_ERROR`` objects reported to ``handle_error``;
they will not get a chance to go to the hub's parent.
This completely changes the semantics of normal error handling
by avoiding some switches (to the main greenlet, and eventually
once a callback is processed, back to the hub). This should be used
in narrow ways for test compatibility for tests that assume
``ExpectedException`` objects behave this way.
"""
old = self.IGNORE_EXPECTED_TEST_ERROR
self.IGNORE_EXPECTED_TEST_ERROR = True
try:
yield
finally:
self.IGNORE_EXPECTED_TEST_ERROR = old
def handle_error(self, context, type, value, tb):
type, value, tb = self._normalize_exception(type, value, tb)
# If we check that the ``type`` is a subclass of ``EXPECTED_TEST_ERROR``,
# and return, we completely change the semantics: We avoid raising
# this error in the main greenlet, which cuts out several switches.
# Overall, not good.
if self.IGNORE_EXPECTED_TEST_ERROR and issubclass(type, self.EXPECTED_TEST_ERROR):
# Don't pass these up; avoid switches
return
return Hub.handle_error(self, context, type, value, tb)
def print_exception(self, context, t, v, tb):
t, v, tb = self._normalize_exception(t, v, tb)
if issubclass(t, self.EXPECTED_TEST_ERROR):
# see handle_error
return
return Hub.print_exception(self, context, t, v, tb)

View File

@@ -0,0 +1,231 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import sys
import gc
from functools import wraps
import unittest
try:
import objgraph
except ImportError: # pragma: no cover
# Optional test dependency
objgraph = None
import gevent
import gevent.core
def ignores_leakcheck(func):
"""
Ignore the given object during leakchecks.
Can be applied to a method, in which case the method will run, but
will not be subject to leak checks.
If applied to a class, the entire class will be skipped during leakchecks. This
is intended to be used for classes that are very slow and cause problems such as
test timeouts; typically it will be used for classes that are subclasses of a base
class and specify variants of behaviour (such as pool sizes).
"""
func.ignore_leakcheck = True
return func
class _RefCountChecker(object):
# Some builtin things that we ignore.
# For awhile, we also ignored types.FrameType and types.TracebackType,
# but those are important and often involved in leaks.
IGNORED_TYPES = (tuple, dict,)
try:
CALLBACK_KIND = gevent.core.callback
except AttributeError:
# Must be using FFI.
from gevent._ffi.callback import callback as CALLBACK_KIND
def __init__(self, testcase, function):
self.testcase = testcase
self.function = function
self.deltas = []
self.peak_stats = {}
# The very first time we are called, we have already been
# self.setUp() by the test runner, so we don't need to do it again.
self.needs_setUp = False
def _ignore_object_p(self, obj):
if obj is self:
return False
try:
# Certain badly written __eq__ and __contains__ methods
# (I'm looking at you, Python 3.10 importlib.metadata._text!
# ``__eq__(self, other): return self.lower() == other.lower()``)
# raise AttributeError which propagates here, and must be caught.
# Similarly, we can get a TypeError
if (
obj in self.__dict__.values()
or obj == self._ignore_object_p # pylint:disable=comparison-with-callable
):
return False
except (AttributeError, TypeError):
# `obj` is things like that _text class. Also have seen
# - psycopg2._psycopg.type
# - relstorage.adapters.drivers._ClassDriverFactory
return True
kind = type(obj)
if kind in self.IGNORED_TYPES:
return False
if kind is self.CALLBACK_KIND and obj.callback is None and obj.args is None:
# these represent callbacks that have been stopped, but
# the event loop hasn't cycled around to run them. The only
# known cause of this is killing greenlets before they get a chance
# to run for the first time.
return False
return True
def _growth(self):
return objgraph.growth(limit=None, peak_stats=self.peak_stats, filter=self._ignore_object_p)
def _report_diff(self, growth):
if not growth:
return "<Unable to calculate growth>"
lines = []
width = max(len(name) for name, _, _ in growth)
for name, count, delta in growth:
lines.append('%-*s%9d %+9d' % (width, name, count, delta))
diff = '\n'.join(lines)
return diff
def _run_test(self, args, kwargs):
gc_enabled = gc.isenabled()
gc.disable()
if self.needs_setUp:
self.testcase.setUp()
self.testcase.skipTearDown = False
try:
self.function(self.testcase, *args, **kwargs)
finally:
self.testcase.tearDown()
self.testcase.doCleanups()
self.testcase.skipTearDown = True
self.needs_setUp = True
if gc_enabled:
gc.enable()
def _growth_after(self):
# Grab post snapshot
if 'urlparse' in sys.modules:
sys.modules['urlparse'].clear_cache()
if 'urllib.parse' in sys.modules:
sys.modules['urllib.parse'].clear_cache() # pylint:disable=no-member
return self._growth()
def _check_deltas(self, growth):
# Return false when we have decided there is no leak,
# true if we should keep looping, raises an assertion
# if we have decided there is a leak.
deltas = self.deltas
if not deltas:
# We haven't run yet, no data, keep looping
return True
if gc.garbage:
raise AssertionError("Generated uncollectable garbage %r" % (gc.garbage,))
# the following configurations are classified as "no leak"
# [0, 0]
# [x, 0, 0]
# [... a, b, c, d] where a+b+c+d = 0
#
# the following configurations are classified as "leak"
# [... z, z, z] where z > 0
if deltas[-2:] == [0, 0] and len(deltas) in (2, 3):
return False
if deltas[-3:] == [0, 0, 0]:
return False
if len(deltas) >= 4 and sum(deltas[-4:]) == 0:
return False
if len(deltas) >= 3 and deltas[-1] > 0 and deltas[-1] == deltas[-2] and deltas[-2] == deltas[-3]:
diff = self._report_diff(growth)
raise AssertionError('refcount increased by %r\n%s' % (deltas, diff))
# OK, we don't know for sure yet. Let's search for more
if sum(deltas[-3:]) <= 0 or sum(deltas[-4:]) <= 0 or deltas[-4:].count(0) >= 2:
# this is suspicious, so give a few more runs
limit = 11
else:
limit = 7
if len(deltas) >= limit:
raise AssertionError('refcount increased by %r\n%s'
% (deltas,
self._report_diff(growth)))
# We couldn't decide yet, keep going
return True
def __call__(self, args, kwargs):
for _ in range(3):
gc.collect()
# Capture state before; the incremental will be
# updated by each call to _growth_after
growth = self._growth()
while self._check_deltas(growth):
self._run_test(args, kwargs)
growth = self._growth_after()
self.deltas.append(sum((stat[2] for stat in growth)))
def wrap_refcount(method):
if objgraph is None or getattr(method, 'ignore_leakcheck', False):
if objgraph is None:
import warnings
warnings.warn("objgraph not available, leakchecks disabled")
@wraps(method)
def _method_skipped_during_leakcheck(self, *_args, **_kwargs):
self.skipTest("This method ignored during leakchecks")
return _method_skipped_during_leakcheck
@wraps(method)
def wrapper(self, *args, **kwargs): # pylint:disable=too-many-branches
if getattr(self, 'ignore_leakcheck', False):
raise unittest.SkipTest("This class ignored during leakchecks")
return _RefCountChecker(self, method)(args, kwargs)
return wrapper

View File

@@ -0,0 +1,137 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import importlib
import os.path
import warnings
import gevent
from . import sysinfo
# Avoid importing this at the top level because
# it imports threading and subprocess, and this module
# is always imported in our monkey-patched stdlib unittests,
# and some of them don't like it when those are imported.
# from . import util
OPTIONAL_MODULES = frozenset({
## Resolvers.
# ares might not be built
'gevent.resolver_ares',
'gevent.resolver.ares',
# dnspython might not be installed
'gevent.resolver.dnspython',
## Backends
'gevent.libev',
'gevent.libev.watcher',
'gevent.libuv.loop',
'gevent.libuv.watcher',
})
EXCLUDED_MODULES = frozenset({
'__init__',
'core',
'ares',
'_util',
'_semaphore',
'corecffi',
'_corecffi',
'_corecffi_build',
})
def walk_modules(
basedir=None,
modpath=None,
include_so=False,
recursive=False,
check_optional=True,
include_tests=False,
optional_modules=OPTIONAL_MODULES,
excluded_modules=EXCLUDED_MODULES,
):
"""
Find gevent modules, yielding tuples of ``(path, importable_module_name)``.
:keyword bool check_optional: If true (the default), then if we discover a
module that is known to be optional on this system (such as a backend),
we will attempt to import it; if the import fails, it will not be returned.
If false, then we will not make such an attempt, the caller will need to be prepared
for an `ImportError`; the caller can examine *optional_modules* against
the yielded *importable_module_name*.
"""
# pylint:disable=too-many-branches,too-many-locals
if sysinfo.PYPY:
include_so = False
if basedir is None:
basedir = os.path.dirname(gevent.__file__)
if modpath is None:
modpath = 'gevent.'
else:
if modpath is None: # pylint:disable=else-if-used
modpath = ''
for fn in sorted(os.listdir(basedir)):
path = os.path.join(basedir, fn)
if os.path.isdir(path):
if not recursive:
continue
if not include_tests and fn in ['testing', 'tests']:
continue
pkg_init = os.path.join(path, '__init__.py')
if os.path.exists(pkg_init):
yield pkg_init, modpath + fn
for p, m in walk_modules(
path, modpath + fn + ".",
include_so=include_so,
recursive=recursive,
check_optional=check_optional,
include_tests=include_tests,
optional_modules=optional_modules,
excluded_modules=excluded_modules,
):
yield p, m
continue
if fn.endswith('.py'):
x = fn[:-3]
if x.endswith('_d'):
x = x[:-2]
if x in excluded_modules:
continue
modname = modpath + x
if check_optional and modname in optional_modules:
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
importlib.import_module(modname)
except ImportError:
from . import util
util.debug("Unable to import optional module %s", modname)
continue
yield path, modname
elif include_so and fn.endswith(sysinfo.SHARED_OBJECT_EXTENSION):
if '.pypy-' in fn:
continue
if fn.endswith('_d.so'):
yield path, modpath + fn[:-5]
else:
yield path, modpath + fn[:-3]

View File

@@ -0,0 +1,170 @@
import sys
import os
test_filename = sys.argv[1]
del sys.argv[1]
from gevent import monkey
# Only test the default set of patch arguments.
monkey.patch_all()
from .patched_tests_setup import disable_tests_in_source
from . import support
from . import resources
from . import SkipTest
from . import util
# This uses the internal built-in function ``_thread._count()``,
# which we don't/can't monkey-patch, so it returns inaccurate information.
def threading_setup():
return (1, ())
# This then tries to wait for that value to return to its original value;
# but if we started worker threads that can never happen.
def threading_cleanup(*_args):
return
assert support.threading_setup
assert support.threading_cleanup
support.threading_setup = threading_setup
support.threading_cleanup = threading_cleanup
# On all versions of Python 3.6+, this also uses ``_thread._count()``,
# meaning it suffers from inaccuracies,
# and test_socket.py constantly fails with an extra thread
# on some random test. We disable it entirely.
# XXX: Figure out how to make a *definition* in ./support.py actually
# override the original in test.support, without having to
# manually set it
#
import contextlib
@contextlib.contextmanager
def wait_threads_exit(timeout=None): # pylint:disable=unused-argument
# On < 3.10, this is support.wait_threads_exit;
# on >= 3.10, this is threading_helper.wait_threads_exit
yield
support.wait_threads_exit = wait_threads_exit
# On Python 3.11, they changed the way that they deal with this,
# meaning that this method no longer works. (Actually, it's not
# clear that any of our patches to `support` are doing anything on
# Python 3 at all? They certainly aren't on 3.11). This was a good
# thing As it led to adding the timeout value for the threadpool
# idle threads. But...the default of 5s meant that many tests in
# test_socket were each taking at least 5s to run, leading to the
# whole thing exceeding the allowed test timeout. We could set the
# GEVENT_THREADPOOL_IDLE_TASK_TIMEOUT env variable to a smaller
# value, and although that might stress the system nicely, it's
# not indicative of what end users see. And it's still hard to get
# a correct value.
#
# So try harder to make sure our patches apply.
#
# If this fails, symptoms are very long running tests that can be resolved
# by setting that TASK_TIMEOUT value small, and/or setting GEVENT_RESOLVER=block.
# Also, some number of warnings about dangling threads, or failures
# from wait_threads_exit
try:
from test import support as ts
except ImportError:
pass
else:
ts.threading_setup = threading_setup
ts.threading_cleanup = threading_cleanup
ts.wait_threads_exit = wait_threads_exit
ts.print_warning = lambda msg: msg
try:
from test.support import threading_helper
except ImportError:
pass
else:
threading_helper.wait_threads_exit = wait_threads_exit
threading_helper.threading_setup = threading_setup
threading_helper.threading_cleanup = threading_cleanup
try:
from test.support import import_helper
except ImportError:
pass
else:
# Importing fresh modules breaks our monkey patches. we can't allow that.
def import_fresh_module(name, *_args, **_kwargs):
import importlib
return importlib.import_module(name)
import_helper.import_fresh_module = import_fresh_module
# So we don't have to patch test_threading to use our
# version of lock_tests, we patch
from gevent.tests import lock_tests
try:
import test.lock_tests
except ImportError:
pass
else:
test.lock_tests = lock_tests
sys.modules['tests.lock_tests'] = lock_tests
# Configure allowed resources
resources.setup_resources()
if not os.path.exists(test_filename) and os.sep not in test_filename:
# A simple filename, given without a path, that doesn't exist.
# So we change to the appropriate directory, if we can find it.
# This happens when copy-pasting the output of the testrunner
for d in util.find_stdlib_tests():
if os.path.exists(os.path.join(d, test_filename)):
os.chdir(d)
break
__file__ = os.path.abspath(test_filename) #os.path.join(os.getcwd(), test_filename)
test_name = os.path.splitext(os.path.basename(test_filename))[0]
print('Running with patch_all(): %s' % (__file__,))
with open(test_filename, encoding='utf-8') as module_file:
module_source = module_file.read()
module_source = disable_tests_in_source(module_source, test_name)
# We write the module source to a file so that tracebacks
# show correctly, since disabling the tests changes line
# numbers. However, note that __file__ must still point to the
# real location so that data files can be found.
# See https://github.com/gevent/gevent/issues/1306
import tempfile
temp_handle, temp_path = tempfile.mkstemp(prefix=test_name, suffix='.py', text=True)
os.write(temp_handle, module_source.encode('utf-8'))
os.close(temp_handle)
remove_file = not os.environ.get('GEVENT_DEBUG')
try:
module_code = compile(module_source,
temp_path,
'exec',
dont_inherit=True)
exec(module_code, globals())
remove_file = True
except SkipTest as e:
remove_file = True
# Some tests can raise test.support.ResourceDenied
# in their main method before the testrunner takes over.
# That's a kind of SkipTest. we can't get a true skip count because it
# hasn't run, though.
print(e)
# Match the regular unittest output, including ending with skipped
print("Ran 0 tests in 0.0s")
print('OK (skipped=0)')
finally:
if remove_file:
try:
os.remove(temp_path)
except OSError:
pass

View File

@@ -0,0 +1,227 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import os
import unittest
import re
import gc
import functools
from . import sysinfo
# Linux/OS X/BSD platforms /can/ implement this by calling out to lsof.
# However, if psutil is available (it is cross-platform) use that.
# It is *much* faster than shelling out to lsof each time
# (Running 14 tests takes 3.964s with lsof and 0.046 with psutil)
# However, it still doesn't completely solve the issue on Windows: fds are reported
# as -1 there, so we can't fully check those.
def _collects(func):
# We've seen OSError: No such file or directory /proc/PID/fd/NUM.
# This occurs in the loop that checks open files. It first does
# listdir() and then tries readlink() on each file. But the file
# went away. This must be because of async GC in PyPy running
# destructors at arbitrary times. This became an issue in PyPy 7.2
# but could theoretically be an issue with any objects caught in a
# cycle. This is one reason we GC before we begin. (The other is
# to clean up outstanding objects that will close files in
# __del__.)
#
# Note that this can hide errors, though, by causing greenlets to get
# collected and drop references and thus close files. We should be deterministic
# and careful about closing things.
@functools.wraps(func)
def f(**kw):
gc.collect()
gc.collect()
enabled = gc.isenabled()
gc.disable()
try:
return func(**kw)
finally:
if enabled:
gc.enable()
return f
if sysinfo.WIN:
def _run_lsof():
raise unittest.SkipTest("lsof not expected on Windows")
else:
@_collects
def _run_lsof():
import tempfile
pid = os.getpid()
fd, tmpname = tempfile.mkstemp('get_open_files')
os.close(fd)
lsof_command = 'lsof -p %s > %s' % (pid, tmpname)
if os.system(lsof_command):
# XXX: This prints to the console an annoying message: 'lsof is not recognized'
raise unittest.SkipTest("lsof failed")
with open(tmpname) as fobj: # pylint:disable=unspecified-encoding
data = fobj.read().strip()
os.remove(tmpname)
return data
def default_get_open_files(pipes=False, **_kwargs):
data = _run_lsof()
results = {}
for line in data.split('\n'):
line = line.strip()
if not line or line.startswith("COMMAND"):
# Skip header and blank lines
continue
split = re.split(r'\s+', line)
# Note that this needs the real lsof; it won't work with
# the lsof that comes from BusyBox. You'll get parsing errors
# here.
_command, _pid, _user, fd = split[:4]
# Pipes (on OS X, at least) get an fd like "3" while normal files get an fd like "1u"
if fd[:-1].isdigit() or fd.isdigit():
if not pipes and fd[-1].isdigit():
continue
fd = int(fd[:-1]) if not fd[-1].isdigit() else int(fd)
if fd in results:
params = (fd, line, split, results.get(fd), data)
raise AssertionError('error when parsing lsof output: duplicate fd=%r\nline=%r\nsplit=%r\nprevious=%r\ndata:\n%s' % params)
results[fd] = line
if not results:
raise AssertionError('failed to parse lsof:\n%s' % (data, ))
results['data'] = data
return results
@_collects
def default_get_number_open_files():
if os.path.exists('/proc/'):
# Linux only
fd_directory = '/proc/%d/fd' % os.getpid()
return len(os.listdir(fd_directory))
try:
return len(get_open_files(pipes=True)) - 1
except (OSError, AssertionError, unittest.SkipTest):
return 0
lsof_get_open_files = default_get_open_files
try:
# psutil import subprocess which on Python 3 imports selectors.
# This can expose issues with monkey-patching.
import psutil
except ImportError:
get_open_files = default_get_open_files
get_number_open_files = default_get_number_open_files
else:
class _TrivialOpenFile(object):
__slots__ = ('fd',)
def __init__(self, fd):
self.fd = fd
@_collects
def get_open_files(count_closing_as_open=True, **_kw):
"""
Return a list of popenfile and pconn objects.
Note that other than `fd`, they have different attributes.
.. important:: If you want to find open sockets, on Windows
and linux, it is important that the socket at least be listening
(socket.listen(1)). Unlike the lsof implementation, this will only
return sockets in a state like that.
"""
results = {}
for _ in range(3):
try:
if count_closing_as_open and os.path.exists('/proc/'):
# Linux only.
# psutil doesn't always see all connections, even though
# they exist in the filesystem. It's not entirely clear why.
# It sees them on Travis (prior to Ubuntu Bionic, at least)
# but doesn't in the manylinux image or Fedora 33 Rawhide image.
# This happens in test__makefile_ref TestSSL.*; in particular, if a
# ``sslsock.makefile()`` is opened and used to read all data, and the sending
# side shuts down, psutil no longer finds the open file. So we add them
# back in.
#
# Of course, the flip side of this is that we sometimes find connections
# we're not expecting.
# I *think* this has to do with CLOSE_WAIT handling?
fd_directory = '/proc/%d/fd' % os.getpid()
fd_files = os.listdir(fd_directory)
else:
fd_files = []
process = psutil.Process()
results['data'] = process.open_files()
results['data'] += process.connections('all')
break
except OSError:
pass
else:
# No break executed
raise unittest.SkipTest("Unable to read open files")
for x in results['data']:
results[x.fd] = x
for fd_str in fd_files:
if fd_str not in results:
fd = int(fd_str)
results[fd] = _TrivialOpenFile(fd)
results['data'] += [('From psutil', process)]
results['data'] += [('fd files', fd_files)]
return results
@_collects
def get_number_open_files():
process = psutil.Process()
try:
return process.num_fds()
except AttributeError:
# num_fds is unix only. Is num_handles close enough on Windows?
return 0
class DoesNotLeakFilesMixin(object): # pragma: no cover
"""
A test case mixin that helps find a method that's leaking an
open file.
Only mix this in when needed to debug, it slows tests down.
"""
def setUp(self):
self.__open_files_count = get_number_open_files()
super(DoesNotLeakFilesMixin, self).setUp()
def tearDown(self):
super(DoesNotLeakFilesMixin, self).tearDown()
after = get_number_open_files()
if after > self.__open_files_count:
raise AssertionError(
"Too many open files. Before: %s < After: %s.\n%s" % (
self.__open_files_count,
after,
get_open_files()
)
)

View File

@@ -0,0 +1,68 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from . import support
from .sysinfo import PY3
from .sysinfo import PYPY
from .sysinfo import WIN
from .sysinfo import LIBUV
from .sysinfo import EXPECT_POOR_TIMER_RESOLUTION
# Travis is slow and overloaded; Appveyor used to be faster, but
# as of Dec 2015 it's almost always slower and/or has much worse timer
# resolution
CI_TIMEOUT = 15
if (PY3 and PYPY) or (PYPY and WIN and LIBUV):
# pypy3 is very slow right now,
# as is PyPy2 on windows (which only has libuv)
CI_TIMEOUT = 20
if PYPY and LIBUV:
# slow and flaky timeouts
LOCAL_TIMEOUT = CI_TIMEOUT
else:
LOCAL_TIMEOUT = 2
LARGE_TIMEOUT = max(LOCAL_TIMEOUT, CI_TIMEOUT)
# Previously we set this manually to 'localhost'
# and then had some conditions where we changed it to
# 127.0.0.1 (e.g., on Windows or OSX or travis), but Python's test.support says
# # Don't use "localhost", since resolving it uses the DNS under recent
# # Windows versions (see issue #18792).
# and sets it unconditionally to 127.0.0.1.
DEFAULT_LOCAL_HOST_ADDR = support.HOST
DEFAULT_LOCAL_HOST_ADDR6 = support.HOSTv6
# Not all TCP stacks support dual binding where ''
# binds to both v4 and v6.
# XXX: This is badly named; you often want DEFAULT_BIND_ADDR_TUPLE
DEFAULT_BIND_ADDR = support.HOST
DEFAULT_CONNECT_HOST = DEFAULT_CONNECT = DEFAULT_LOCAL_HOST_ADDR
DEFAULT_BIND_ADDR_TUPLE = (DEFAULT_BIND_ADDR, 0)
# For in-process sockets
DEFAULT_SOCKET_TIMEOUT = 0.1 if not EXPECT_POOR_TIMER_RESOLUTION else 2.0
# For cross-process sockets
DEFAULT_XPC_SOCKET_TIMEOUT = 2.0 if not EXPECT_POOR_TIMER_RESOLUTION else 4.0

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,209 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Test environment setup.
This establishes the resources that are available for use,
which are tested with `support.is_resource_enabled`.
"""
from __future__ import absolute_import, division, print_function
# This file may be imported early, so it should take care not to import
# things it doesn't need, which means deferred imports.
def get_ALL_RESOURCES():
"Return a fresh list of resource names."
# RESOURCE_NAMES is the list of all known resources, including those that
# shouldn't be enabled by default or when asking for "all" resources.
# ALL_RESOURCES is the list of resources enabled by default or with "all" resources.
try:
# 3.6 and 3.7
from test.libregrtest import ALL_RESOURCES
except ImportError:
# 2.7 through 3.5
# Don't do this:
## from test.regrtest import ALL_RESOURCES
# On Python 2.7 to 3.5, importing regrtest iterates
# sys.modules and does modifications. That doesn't work well
# when it's imported from another module at module scope.
# Also, it makes some assumptions about module __file__ that
# may not hold true (at least on 2.7), especially when six or
# other module proxy objects are involved.
# So we hardcode the list. This is from 2.7, which is a superset
# of the defined resources through 3.5.
ALL_RESOURCES = (
'audio', 'curses', 'largefile', 'network', 'bsddb',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui',
'xpickle'
)
return list(ALL_RESOURCES) + [
# Do we test the stdlib monkey-patched?
'gevent_monkey',
]
def parse_resources(resource_str=None):
# str -> Sequence[str]
# Parse it like libregrtest.cmdline documents:
# -u is used to specify which special resource intensive tests to run,
# such as those requiring large file support or network connectivity.
# The argument is a comma-separated list of words indicating the
# resources to test. Currently only the following are defined:
# all - Enable all special resources.
#
# none - Disable all special resources (this is the default).
# <snip>
# network - It is okay to run tests that use external network
# resource, e.g. testing SSL support for sockets.
# <snip>
#
# subprocess Run all tests for the subprocess module.
# <snip>
#
# To enable all resources except one, use '-uall,-<resource>'. For
# example, to run all the tests except for the gui tests, give the
# option '-uall,-gui'.
# We make a change though: we default to 'all' resources, instead of
# 'none'. Encountering either of those later in the string resets
# it, for ease of working with appending to environment variables.
if resource_str is None:
import os
resource_str = os.environ.get('GEVENTTEST_USE_RESOURCES')
resources = get_ALL_RESOURCES()
if not resource_str:
return resources
requested_resources = resource_str.split(',')
for requested_resource in requested_resources:
# empty strings are ignored; this can happen when working with
# the environment variable if not already set:
# ENV=$ENV,-network
if not requested_resource:
continue
if requested_resource == 'all':
resources = get_ALL_RESOURCES()
elif requested_resource == 'none':
resources = []
elif requested_resource.startswith('-'):
if requested_resource[1:] in resources:
resources.remove(requested_resource[1:])
else:
# TODO: Produce a warning if it's an unknown resource?
resources.append(requested_resource)
return resources
def unparse_resources(resources):
"""
Given a list of enabled resources, produce the correct environment variable
setting to enable (only) that list.
"""
# By default, we assume all resources are enabled, so explicitly
# listing them here doesn't actually disable anything. To do that, we want to
# list the ones that are disabled. This is usually shorter than starting with
# 'none', and manually adding them back in one by one.
#
# 'none' must be special cased because an empty option string
# means 'all'. Still, we're explicit about that.
#
# TODO: Make this produce the minimal output; sometimes 'none' and
# adding would be shorter.
all_resources = set(get_ALL_RESOURCES())
enabled = set(resources)
if enabled == all_resources:
result = 'all'
elif resources:
explicitly_disabled = all_resources - enabled
result = ''.join(sorted('-' + x for x in explicitly_disabled))
else:
result = 'none'
return result
def setup_resources(resources=None):
"""
Call either with a list of resources or a resource string.
If ``None`` is given, get the resource string from the environment.
"""
if isinstance(resources, str) or resources is None:
resources = parse_resources(resources)
from . import support
support.use_resources = list(resources)
support.gevent_has_setup_resources = True
return resources
def ensure_setup_resources():
# Call when you don't know if resources have been setup and you want to
# get the environment variable if needed.
# Returns an object with `is_resource_enabled`.
from . import support
if not support.gevent_has_setup_resources:
setup_resources()
return support
def exit_without_resource(resource):
"""
Call this in standalone test modules that can't use unittest.SkipTest.
Exits with a status of 0 if the resource isn't enabled.
"""
if not ensure_setup_resources().is_resource_enabled(resource):
print("Skipped: %r not enabled" % (resource,))
import sys
sys.exit(0)
def skip_without_resource(resource, reason=''):
requires = 'Requires resource %r' % (resource,)
if not reason:
reason = requires
else:
reason = reason + ' (' + requires + ')'
if not ensure_setup_resources().is_resource_enabled(resource):
import unittest
raise unittest.SkipTest(reason)
if __name__ == '__main__':
print(setup_resources())

View File

@@ -0,0 +1,43 @@
import sys
# pylint:disable=unused-argument,import-error
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] >= 3
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
xrange = range
string_types = (str,)
text_type = str
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
import __builtin__ as builtins
xrange = builtins.xrange
string_types = (builtins.basestring,)
text_type = builtins.unicode
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")

View File

@@ -0,0 +1,198 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import functools
import unittest
from . import sysinfo
def _identity(f):
return f
def _do_not_skip(reason):
assert reason
return _identity
skipOnMac = _do_not_skip
skipOnMacOnCI = _do_not_skip
skipOnWindows = _do_not_skip
skipOnAppVeyor = _do_not_skip
skipOnCI = _do_not_skip
skipOnManylinux = _do_not_skip
skipOnPyPy = _do_not_skip
skipOnPyPyOnCI = _do_not_skip
skipOnPyPy3OnCI = _do_not_skip
skipOnPyPy3 = _do_not_skip
skipOnPyPyOnWindows = _do_not_skip
skipOnPy3 = unittest.skip if sysinfo.PY3 else _do_not_skip
skipOnPy37 = unittest.skip if sysinfo.PY37 else _do_not_skip
skipOnPy310 = unittest.skip if sysinfo.PY310 else _do_not_skip
skipOnPy312 = unittest.skip if sysinfo.PY312 else _do_not_skip
skipOnPurePython = unittest.skip if sysinfo.PURE_PYTHON else _do_not_skip
skipWithCExtensions = unittest.skip if not sysinfo.PURE_PYTHON else _do_not_skip
skipOnLibuv = _do_not_skip
skipOnLibuvOnWin = _do_not_skip
skipOnLibuvOnCI = _do_not_skip
skipOnLibuvOnCIOnPyPy = _do_not_skip
skipOnLibuvOnPyPyOnWin = _do_not_skip
skipOnLibev = _do_not_skip
if sysinfo.WIN:
skipOnWindows = unittest.skip
if sysinfo.OSX:
skipOnMac = unittest.skip
if sysinfo.RUNNING_ON_APPVEYOR:
# See comments scattered around about timeouts and the timer
# resolution available on appveyor (lots of jitter). this
# seems worse with the 62-bit builds.
# Note that we skip/adjust these tests only on AppVeyor, not
# win32---we don't think there's gevent related problems but
# environment related problems. These can be tested and debugged
# separately on windows in a more stable environment.
skipOnAppVeyor = unittest.skip
if sysinfo.RUNNING_ON_CI:
skipOnCI = unittest.skip
if sysinfo.OSX:
skipOnMacOnCI = unittest.skip
if sysinfo.RUNNING_ON_MANYLINUX:
skipOnManylinux = unittest.skip
if sysinfo.PYPY:
skipOnPyPy = unittest.skip
if sysinfo.RUNNING_ON_CI:
skipOnPyPyOnCI = unittest.skip
if sysinfo.WIN:
skipOnPyPyOnWindows = unittest.skip
if sysinfo.PYPY3:
skipOnPyPy3 = unittest.skip
if sysinfo.RUNNING_ON_CI:
# Same as above, for PyPy3.3-5.5-alpha and 3.5-5.7.1-beta and 3.5-5.8
skipOnPyPy3OnCI = unittest.skip
skipUnderCoverage = unittest.skip if sysinfo.RUN_COVERAGE else _do_not_skip
skipIf = unittest.skipIf
skipUnless = unittest.skipUnless
_has_psutil_process = None
def _check_psutil():
global _has_psutil_process
if _has_psutil_process is None:
_has_psutil_process = sysinfo.get_this_psutil_process() is not None
return _has_psutil_process
def _make_runtime_skip_decorator(reason, predicate):
def decorator(test_item):
if not isinstance(test_item, type):
f = test_item
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
if not predicate():
raise unittest.SkipTest(reason)
return f(*args, **kwargs)
test_item = skip_wrapper
else:
# given a class, override setUp() to skip it.
#
# Internally, unittest uses two flags on the class to do this:
# __unittest_skip__ and __unittest_skip_why__. It *appears*
# these are evaluated for each method in the test, so we can safely
# change them at runtime. **This isn't documented.**
#
# If they are set before execution begins, then the class setUpClass
# and tearDownClass are skipped. So changing them at runtime could result
# in something being set up but not torn down. It is substantially
# faster, though, to set them.
base = test_item
base_setUp = base.setUp
@functools.wraps(test_item)
def setUp(self):
if not predicate():
base.__unittest_skip__ = True
base.__unittest_skip_why__ = reason
raise unittest.SkipTest(reason)
base_setUp(self)
base.setUp = setUp
return test_item
return decorator
def skipWithoutPSUtil(reason):
reason = "psutil not available: " + reason
# Defer the check until runtime to avoid imports
return _make_runtime_skip_decorator(reason, _check_psutil)
if sysinfo.LIBUV:
skipOnLibuv = unittest.skip
if sysinfo.RUNNING_ON_CI:
skipOnLibuvOnCI = unittest.skip
if sysinfo.PYPY:
skipOnLibuvOnCIOnPyPy = unittest.skip
if sysinfo.WIN:
skipOnLibuvOnWin = unittest.skip
if sysinfo.PYPY:
skipOnLibuvOnPyPyOnWin = unittest.skip
else:
skipOnLibev = unittest.skip
def skipWithoutResource(resource, reason=''):
requires = 'Requires resource %r' % (resource,)
if not reason:
reason = requires
else:
reason = reason + ' (' + requires + ')'
# Defer until runtime; resources are established as part
# of test startup.
def predicate(): # This is easily cached if needed
from . import resources
return resources.ensure_setup_resources().is_resource_enabled(resource)
return _make_runtime_skip_decorator(reason, predicate)
def skipWithoutExternalNetwork(reason=''):
# Use to decorate test functions or classes that
# need access to external network resources (e.g., DNS, HTTP servers, etc)
#
# Important: If you use this on classes, you must not use the
# two-argument form of super()
return skipWithoutResource('network', reason)

View File

@@ -0,0 +1,49 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
from .params import DEFAULT_BIND_ADDR_TUPLE
def bind_and_listen(sock, address=DEFAULT_BIND_ADDR_TUPLE, backlog=50, reuse_addr=True):
from socket import SOL_SOCKET, SO_REUSEADDR, error
if reuse_addr:
try:
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR,
sock.getsockopt(SOL_SOCKET, SO_REUSEADDR) | 1)
except error:
pass
sock.bind(address)
if backlog is not None: # udp
sock.listen(backlog)
def tcp_listener(address=DEFAULT_BIND_ADDR_TUPLE, backlog=50, reuse_addr=True):
"""A shortcut to create a TCP socket, bind it and put it into listening state."""
from gevent import socket
sock = socket.socket()
bind_and_listen(sock, address, backlog=backlog, reuse_addr=reuse_addr)
return sock
def udp_listener(address=DEFAULT_BIND_ADDR_TUPLE, reuse_addr=True):
"""A shortcut to create a UDF socket, bind it and put it into listening state."""
from gevent import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
bind_and_listen(sock, address, backlog=None, reuse_addr=reuse_addr)
return sock

View File

@@ -0,0 +1,142 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
A re-export of the support module from Python's test package, with some
version compatibility shims and overrides.
The manylinux docker images do not include test.support at all, for space reasons,
so we need to be vaguely functional to run tests in that environment.
"""
import sys
# Proxy through, so that changes to this module reflect in the real
# module too. (In 3.7, this is natively supported with __getattr__ at
# module scope.) This breaks static analysis (pylint), so we configure
# pylint to ignore this module.
class _Default(object):
# A descriptor-like object that will
# only be used if the actual stdlib module
# doesn't have the value.
def __init__(self, value):
self.value = value
class _ModuleProxy(object):
__slots__ = ('_this_mod', '_stdlib_support')
def __init__(self):
self._this_mod = sys.modules[__name__]
self._stdlib_support = self
def __get_stdlib_support(self):
if self._stdlib_support is self:
try:
from test import support as stdlib_support
except ImportError:
stdlib_support = None
self._stdlib_support = stdlib_support
return self._stdlib_support
def __getattr__(self, name):
try:
local_val = getattr(self._this_mod, name)
except AttributeError:
return getattr(self.__get_stdlib_support(), name)
if isinstance(local_val, _Default):
try:
return getattr(self.__get_stdlib_support(), name)
except AttributeError:
return local_val.value
return local_val
def __setattr__(self, name, value):
if name in _ModuleProxy.__slots__:
super(_ModuleProxy, self).__setattr__(name, value)
return
# Setting it deletes it from this module, so that
# we then continue to fall through to the original module.
try:
setattr(self.__get_stdlib_support(), name, value)
except AttributeError:
setattr(self._this_mod, name, value)
else:
try:
delattr(self._this_mod, name)
except AttributeError:
pass
def __repr__(self):
return repr(self._this_mod)
HOSTv6 = _Default('::1')
HOST = _Default("localhost")
HOSTv4 = _Default("127.0.0.1")
verbose = _Default(False)
@_Default
def is_resource_enabled(_):
return False
@_Default
def bind_port(sock, host=None): # pragma: no cover
import socket
host = host if host is not None else sys.modules[__name__].HOST
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) # pylint:disable=no-member
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
@_Default
def find_unused_port(family=None, socktype=None): # pragma: no cover
import socket
family = family or socket.AF_INET
socktype = socktype or socket.SOCK_STREAM
tempsock = socket.socket(family, socktype)
try:
port = sys.modules[__name__].bind_port(tempsock)
finally:
tempsock.close()
del tempsock
return port
@_Default
def threading_setup():
return []
@_Default
def threading_cleanup(*_):
pass
@_Default
def reap_children():
pass
# Set by resources.setup_resources()
gevent_has_setup_resources = False
sys.modules[__name__] = _ModuleProxy()

View File

@@ -0,0 +1,64 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
from functools import wraps
from gevent.hub import _get_hub
from .hub import QuietHub
from .patched_tests_setup import get_switch_expected
def wrap_switch_count_check(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
initial_switch_count = getattr(_get_hub(), 'switch_count', None)
self.switch_expected = getattr(self, 'switch_expected', True)
if initial_switch_count is not None:
fullname = getattr(self, 'fullname', None)
if self.switch_expected == 'default' and fullname:
self.switch_expected = get_switch_expected(fullname)
result = method(self, *args, **kwargs)
if initial_switch_count is not None and self.switch_expected is not None:
switch_count = _get_hub().switch_count - initial_switch_count
if self.switch_expected is True:
assert switch_count >= 0
if not switch_count:
raise AssertionError('%s did not switch' % fullname)
elif self.switch_expected is False:
if switch_count:
raise AssertionError('%s switched but not expected to' % fullname)
else:
raise AssertionError('Invalid value for switch_expected: %r' % (self.switch_expected, ))
return result
return wrapper
class CountingHub(QuietHub):
switch_count = 0
def switch(self, *args):
# pylint:disable=arguments-differ
self.switch_count += 1
return QuietHub.switch(self, *args)

View File

@@ -0,0 +1,258 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import errno
import os
import sys
import gevent.core
from gevent import _compat as gsysinfo
VERBOSE = sys.argv.count('-v') > 1
# Python implementations
PYPY = gsysinfo.PYPY
CPYTHON = not PYPY
# Platform/operating system
WIN = gsysinfo.WIN
LINUX = gsysinfo.LINUX
OSX = gsysinfo.OSX
PURE_PYTHON = gsysinfo.PURE_PYTHON
get_this_psutil_process = gsysinfo.get_this_psutil_process
# XXX: Formalize this better
LIBUV = 'libuv' in gevent.core.loop.__module__ # pylint:disable=no-member
CFFI_BACKEND = PYPY or LIBUV or 'cffi' in os.getenv('GEVENT_LOOP', '')
if '--debug-greentest' in sys.argv:
sys.argv.remove('--debug-greentest')
DEBUG = True
else:
DEBUG = False
RUN_LEAKCHECKS = os.getenv('GEVENTTEST_LEAKCHECK')
RUN_COVERAGE = os.getenv("COVERAGE_PROCESS_START") or os.getenv("GEVENTTEST_COVERAGE")
# Generally, ignore the portions that are only implemented
# on particular platforms; they generally contain partial
# implementations completed in different modules.
PLATFORM_SPECIFIC_SUFFIXES = ('2', '279', '3')
if WIN:
PLATFORM_SPECIFIC_SUFFIXES += ('posix',)
PY2 = False # Never again
PY3 = True
PY35 = None
PY36 = None
PY37 = None
PY38 = None
PY39 = None
PY39_EXACTLY = None
PY310 = None
PY311 = None
PY312 = None
PY313 = None
NON_APPLICABLE_SUFFIXES = ()
if sys.version_info[0] == 3:
# Python 3
NON_APPLICABLE_SUFFIXES += ('2', '279')
PY2 = False
PY3 = True
if sys.version_info[1] >= 5:
PY35 = True
if sys.version_info[1] >= 6:
PY36 = True
if sys.version_info[1] >= 7:
PY37 = True
if sys.version_info[1] >= 8:
PY38 = True
if sys.version_info[1] >= 9:
PY39 = True
if sys.version_info[:2] == (3, 9):
PY39_EXACTLY = True
if sys.version_info[1] >= 10:
PY310 = True
if sys.version_info[1] >= 11:
PY311 = True
if sys.version_info[1] >= 12:
PY312 = True
if sys.version_info[1] >= 13:
PY313 = True
else: # pragma: no cover
# Python 4?
raise ImportError('Unsupported major python version')
PYPY3 = PYPY and PY3
if WIN:
NON_APPLICABLE_SUFFIXES += ("posix",)
# This is intimately tied to FileObjectPosix
NON_APPLICABLE_SUFFIXES += ("fileobject2",)
SHARED_OBJECT_EXTENSION = ".pyd"
else:
SHARED_OBJECT_EXTENSION = ".so"
# We define GitHub actions to be similar to travis
RUNNING_ON_GITHUB_ACTIONS = os.environ.get('GITHUB_ACTIONS')
RUNNING_ON_TRAVIS = os.environ.get('TRAVIS') or RUNNING_ON_GITHUB_ACTIONS
RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR')
RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR
RUNNING_ON_MANYLINUX = os.environ.get('GEVENT_MANYLINUX')
# I'm not sure how to reliably auto-detect this, without
# importing platform, something we don't want to do.
RUNNING_ON_MUSLLINUX = 'musllinux' in os.environ.get('GEVENT_MANYLINUX_NAME', '')
if RUNNING_ON_APPVEYOR:
# We can't exec corecext on appveyor if we haven't run setup.py in
# 'develop' mode (i.e., we install)
NON_APPLICABLE_SUFFIXES += ('corecext',)
EXPECT_POOR_TIMER_RESOLUTION = (
PYPY3
# Really, this is probably only in VMs. But that's all I test
# Windows with.
or WIN
or (LIBUV and PYPY)
or RUN_COVERAGE
or (OSX and RUNNING_ON_CI)
)
CONN_ABORTED_ERRORS = []
def _make_socket_errnos(*names):
result = []
for name in names:
try:
x = getattr(errno, name)
except AttributeError:
pass
else:
result.append(x)
return frozenset(result)
CONN_ABORTED_ERRORS = _make_socket_errnos('WSAECONNABORTED', 'ECONNRESET')
CONN_REFUSED_ERRORS = _make_socket_errnos('WSAECONNREFUSED', 'ECONNREFUSED')
RESOLVER_ARES = os.getenv('GEVENT_RESOLVER') == 'ares'
RESOLVER_DNSPYTHON = os.getenv('GEVENT_RESOLVER') == 'dnspython'
RESOLVER_NOT_SYSTEM = RESOLVER_ARES or RESOLVER_DNSPYTHON
def get_python_version():
"""
Return a string of the simple python version,
such as '3.8.0b4'. Handles alpha, beta, release candidate, and final releases.
"""
version = '%s.%s.%s' % sys.version_info[:3]
if sys.version_info[3] == 'alpha':
version += 'a%s' % sys.version_info[4]
elif sys.version_info[3] == 'beta':
version += 'b%s' % sys.version_info[4]
elif sys.version_info[3] == 'candidate':
version += 'rc%s' % sys.version_info[4]
return version
def _parse_version(ver_str):
try:
from packaging.version import Version
# InvalidVersion is a type of ValueError
except ImportError:
import warnings
warnings.warn('packaging.version not available; assuming no advanced Linux backends')
raise ValueError
try:
return Version(ver_str)
except ValueError:
import warnings
warnings.warn('Unable to parse version %s' % (ver_str,))
raise
def _check_linux_version_at_least(major, minor, error_kind):
# pylint:disable=too-many-return-statements
# ^ Yeah, but this is the most linear and simple way to
# write this.
from platform import system
if system() != 'Linux':
return False
from platform import release as _release
release = _release()
try:
# Linux versions like '6.8.0-1014-azure' cannot be parsed
# by packaging.version.Version, and distutils.LooseVersion, which
# did handle that, is deprecated. Neither module is guaranteed to be available
# anyway, so do the best we can manually.
ver_strings = (release or '0').split('.', 2)
if not ver_strings or int(ver_strings[0]) < major: # no way.
return False
if int(ver_strings[0]) > major: # Way newer!
return True
assert major == int(ver_strings[0]) # Exactly the major
if len(ver_strings) < 2: # no minor version, assume no
return False
if int(ver_strings[1]) < minor:
return False
assert int(ver_strings[1]) >= minor, (ver_strings[1], minor)
return True
except AssertionError:
raise
except Exception: # pylint:disable=broad-exception-caught
import warnings
warnings.warn('Unable to parse version %r; assuming no %s support' % (
release, error_kind
))
return False
def libev_supports_linux_aio():
# libev requires kernel 4.19 or above to be able to support
# linux AIO. It can still be compiled in, but will fail to create
# the loop at runtime.
return _check_linux_version_at_least(4, 19, 'aio')
def libev_supports_linux_iouring():
# libev requires kernel XXX to be able to support linux io_uring.
# It fails with the kernel in fedora rawhide (4.19.76) but
# works (doesn't fail catastrophically when asked to create one)
# with kernel 5.3.0 (Ubuntu Bionic)
return _check_linux_version_at_least(5, 3, 'iouring')
def resolver_dnspython_available():
# Try hard not to leave around junk we don't have to.
from importlib import metadata
try:
metadata.distribution('dnspython')
except metadata.PackageNotFoundError:
return False
return True

View File

@@ -0,0 +1,480 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import sys
import os.path
from contextlib import contextmanager
from unittest import TestCase as BaseTestCase
from functools import wraps
import gevent
from gevent._util import LazyOnClass
from gevent._compat import perf_counter
from gevent._compat import get_clock_info
from gevent._hub_local import get_hub_if_exists
from . import sysinfo
from . import params
from . import leakcheck
from . import errorhandler
from . import flaky
from .patched_tests_setup import get_switch_expected
class TimeAssertMixin(object):
@flaky.reraises_flaky_timeout()
def assertTimeoutAlmostEqual(self, first, second, places=None, msg=None, delta=None):
try:
self.assertAlmostEqual(first, second, places=places, msg=msg, delta=delta)
except AssertionError:
flaky.reraiseFlakyTestTimeout()
if sysinfo.EXPECT_POOR_TIMER_RESOLUTION:
# pylint:disable=unused-argument
def assertTimeWithinRange(self, time_taken, min_time, max_time):
return
else:
def assertTimeWithinRange(self, time_taken, min_time, max_time):
self.assertLessEqual(time_taken, max_time)
self.assertGreaterEqual(time_taken, min_time)
@contextmanager
def runs_in_given_time(self, expected, fuzzy=None, min_time=None):
if fuzzy is None:
if sysinfo.EXPECT_POOR_TIMER_RESOLUTION or sysinfo.LIBUV:
# The noted timer jitter issues on appveyor/pypy3
fuzzy = expected * 5.0
else:
fuzzy = expected / 2.0
min_time = min_time if min_time is not None else expected - fuzzy
max_time = expected + fuzzy
start = perf_counter()
yield (min_time, max_time)
elapsed = perf_counter() - start
try:
self.assertTrue(
min_time <= elapsed <= max_time,
'Expected: %r; elapsed: %r; min: %r; max: %r; fuzzy %r; clock_info: %s' % (
expected, elapsed, min_time, max_time, fuzzy, get_clock_info('perf_counter')
))
except AssertionError:
flaky.reraiseFlakyTestRaceCondition()
def runs_in_no_time(
self,
fuzzy=(0.01 if not sysinfo.EXPECT_POOR_TIMER_RESOLUTION and not sysinfo.LIBUV else 1.0)):
return self.runs_in_given_time(0.0, fuzzy)
class GreenletAssertMixin(object):
"""Assertions related to greenlets."""
def assert_greenlet_ready(self, g):
self.assertTrue(g.dead, g)
self.assertTrue(g.ready(), g)
self.assertFalse(g, g)
def assert_greenlet_not_ready(self, g):
self.assertFalse(g.dead, g)
self.assertFalse(g.ready(), g)
def assert_greenlet_spawned(self, g):
self.assertTrue(g.started, g)
self.assertFalse(g.dead, g)
# No difference between spawned and switched-to once
assert_greenlet_started = assert_greenlet_spawned
def assert_greenlet_finished(self, g):
self.assertFalse(g.started, g)
self.assertTrue(g.dead, g)
class StringAssertMixin(object):
"""
Assertions dealing with strings.
"""
@LazyOnClass
def HEX_NUM_RE(self):
import re
return re.compile('-?0x[0123456789abcdef]+L?', re.I)
def normalize_addr(self, s, replace='X'):
# https://github.com/PyCQA/pylint/issues/1127
return self.HEX_NUM_RE.sub(replace, s) # pylint:disable=no-member
def normalize_module(self, s, module=None, replace='module'):
if module is None:
module = type(self).__module__
return s.replace(module, replace)
def normalize(self, s):
return self.normalize_module(self.normalize_addr(s))
def assert_nstr_endswith(self, o, val):
s = str(o)
n = self.normalize(s)
self.assertTrue(n.endswith(val), (s, n))
def assert_nstr_startswith(self, o, val):
s = str(o)
n = self.normalize(s)
self.assertTrue(n.startswith(val), (s, n))
class TestTimeout(gevent.Timeout):
_expire_info = ''
def __init__(self, timeout, method='Not Given'):
gevent.Timeout.__init__(
self,
timeout,
'%r: test timed out (set class __timeout__ to increase)\n' % (method,),
ref=False
)
def _on_expiration(self, prev_greenlet, ex):
from gevent.util import format_run_info
loop = gevent.get_hub().loop
debug_info = 'N/A'
if hasattr(loop, 'debug'):
debug_info = [str(s) for s in loop.debug()]
run_info = format_run_info()
self._expire_info = 'Loop Debug:\n%s\nRun Info:\n%s' % (
'\n'.join(debug_info), '\n'.join(run_info)
)
gevent.Timeout._on_expiration(self, prev_greenlet, ex)
def __str__(self):
s = gevent.Timeout.__str__(self)
s += self._expire_info
return s
def _wrap_timeout(timeout, method):
if timeout is None:
return method
@wraps(method)
def timeout_wrapper(self, *args, **kwargs):
with TestTimeout(timeout, method):
return method(self, *args, **kwargs)
return timeout_wrapper
def _get_class_attr(classDict, bases, attr, default=AttributeError):
NONE = object()
value = classDict.get(attr, NONE)
if value is not NONE:
return value
for base in bases:
value = getattr(base, attr, NONE)
if value is not NONE:
return value
if default is AttributeError:
raise AttributeError('Attribute %r not found\n%s\n%s\n' % (attr, classDict, bases))
return default
class TestCaseMetaClass(type):
# wrap each test method with
# a) timeout check
# b) fatal error check
# c) restore the hub's error handler (see expect_one_error)
# d) totalrefcount check
def __new__(mcs, classname, bases, classDict):
# pylint and pep8 fight over what this should be called (mcs or cls).
# pylint gets it right, but we cant scope disable pep8, so we go with
# its convention.
# pylint: disable=bad-mcs-classmethod-argument
timeout = classDict.get('__timeout__', 'NONE')
if timeout == 'NONE':
timeout = getattr(bases[0], '__timeout__', None)
if sysinfo.RUN_LEAKCHECKS and timeout is not None:
timeout *= 6
check_totalrefcount = _get_class_attr(classDict, bases, 'check_totalrefcount', True)
error_fatal = _get_class_attr(classDict, bases, 'error_fatal', True)
uses_handle_error = _get_class_attr(classDict, bases, 'uses_handle_error', True)
# Python 3: must copy, we mutate the classDict. Interestingly enough,
# it doesn't actually error out, but under 3.6 we wind up wrapping
# and re-wrapping the same items over and over and over.
for key, value in list(classDict.items()):
if key.startswith('test') and callable(value):
classDict.pop(key)
# XXX: When did we stop doing this?
#value = wrap_switch_count_check(value)
value = _wrap_timeout(timeout, value)
error_fatal = getattr(value, 'error_fatal', error_fatal)
if error_fatal:
value = errorhandler.wrap_error_fatal(value)
if uses_handle_error:
value = errorhandler.wrap_restore_handle_error(value)
if check_totalrefcount and sysinfo.RUN_LEAKCHECKS:
value = leakcheck.wrap_refcount(value)
classDict[key] = value
return type.__new__(mcs, classname, bases, classDict)
def _noop():
return
class SubscriberCleanupMixin(object):
def setUp(self):
super(SubscriberCleanupMixin, self).setUp()
from gevent import events
self.__old_subscribers = events.subscribers[:]
def addSubscriber(self, sub):
from gevent import events
events.subscribers.append(sub)
def tearDown(self):
from gevent import events
events.subscribers[:] = self.__old_subscribers
super(SubscriberCleanupMixin, self).tearDown()
class TestCase(
# TestCaseMetaClass("NewBase",
# (SubscriberCleanupMixin,
# TimeAssertMixin,
# GreenletAssertMixin,
# StringAssertMixin,
# BaseTestCase,),
# {})):
SubscriberCleanupMixin, TimeAssertMixin, GreenletAssertMixin,
StringAssertMixin, BaseTestCase,
metaclass=TestCaseMetaClass
):
__timeout__ = params.LOCAL_TIMEOUT if not sysinfo.RUNNING_ON_CI else params.CI_TIMEOUT
switch_expected = 'default'
#: Set this to true to cause errors that get reported to the hub to
#: always get propagated to the main greenlet. This can be done at the
#: class or method level.
#: .. caution:: This can hide errors and make it look like exceptions
#: are propagated even if they're not.
error_fatal = True
uses_handle_error = True
close_on_teardown = ()
# This is really used by the SubscriberCleanupMixin
__old_subscribers = () # pylint:disable=unused-private-member
def run(self, *args, **kwargs): # pylint:disable=signature-differs
if self.switch_expected == 'default':
self.switch_expected = get_switch_expected(self.fullname)
return super(TestCase, self).run(*args, **kwargs)
def setUp(self):
super(TestCase, self).setUp()
# Especially if we're running in leakcheck mode, where
# the same test gets executed repeatedly, we need to update the
# current time. Tests don't always go through the full event loop,
# so that doesn't always happen. test__pool.py:TestPoolYYY.test_async
# tends to show timeouts that are too short if we don't.
# XXX: Should some core part of the loop call this?
hub = get_hub_if_exists()
if hub and hub.loop:
hub.loop.update_now()
self.close_on_teardown = []
self.addCleanup(self._tearDownCloseOnTearDown)
def _callTestMethod(self, method):
# 3.12 started raising a stupid warning about returning
# non-None from ``test_...()`` being deprecated. Since the
# test framework never cares about the return value anyway,
# this is an utterly pointless annoyance. Override the method
# that raises that deprecation. (Are the maintainers planning
# to make the return value _mean_ something someday? That's
# the only valid reason for them to do this. Answer: No, no
# they're not. They're just trying to protect people from
# writing broken tests that accidentally turn into generators
# or something. Which...if people don't notice their tests
# aren't working...well. Now, perhaps this got worse in the
# era of asyncio where *everything* is a generator. But that's
# not our problem; we have better ways of dealing with the
# shortcomings of asyncio, namely, don't use it.
# https://bugs.python.org/issue41322)
method()
def tearDown(self):
if getattr(self, 'skipTearDown', False):
del self.close_on_teardown[:]
return
cleanup = getattr(self, 'cleanup', _noop)
cleanup()
self._error = self._none
super(TestCase, self).tearDown()
def _tearDownCloseOnTearDown(self):
while self.close_on_teardown:
x = self.close_on_teardown.pop()
close = getattr(x, 'close', x)
try:
close()
except Exception: # pylint:disable=broad-except
pass
def _close_on_teardown(self, resource):
"""
*resource* either has a ``close`` method, or is a
callable.
"""
self.close_on_teardown.append(resource)
return resource
@property
def testname(self):
return getattr(self, '_testMethodName', '') or getattr(self, '_TestCase__testMethodName')
@property
def testcasename(self):
return self.__class__.__name__ + '.' + self.testname
@property
def modulename(self):
return os.path.basename(sys.modules[self.__class__.__module__].__file__).rsplit('.', 1)[0]
@property
def fullname(self):
return os.path.splitext(os.path.basename(self.modulename))[0] + '.' + self.testcasename
_none = (None, None, None)
# (context, kind, value)
_error = _none
def expect_one_error(self):
self.assertEqual(self._error, self._none)
gevent.get_hub().handle_error = self._store_error
def _store_error(self, where, t, value, tb):
del tb
if self._error != self._none:
gevent.get_hub().parent.throw(t, value)
else:
self._error = (where, t, value)
def peek_error(self):
return self._error
def get_error(self):
try:
return self._error
finally:
self._error = self._none
def assert_error(self, kind=None, value=None, error=None, where_type=None):
if error is None:
error = self.get_error()
econtext, ekind, evalue = error
if kind is not None:
self.assertIsInstance(kind, type)
self.assertIsNotNone(
ekind,
"Error must not be none %r" % (error,))
assert issubclass(ekind, kind), error
if value is not None:
if isinstance(value, str):
self.assertEqual(str(evalue), value)
else:
self.assertIs(evalue, value)
if where_type is not None:
self.assertIsInstance(econtext, where_type)
return error
def assertMonkeyPatchedFuncSignatures(self, mod_name, func_names=(), exclude=()):
# If inspect.getfullargspec is not available,
# We use inspect.getargspec because it's the only thing available
# in Python 2.7, but it is deprecated
# pylint:disable=deprecated-method,too-many-locals
import inspect
import warnings
from gevent.monkey import get_original
# XXX: Very similar to gevent.monkey.patch_module. Should refactor?
gevent_module = getattr(__import__('gevent.' + mod_name), mod_name)
module_name = getattr(gevent_module, '__target__', mod_name)
funcs_given = True
if not func_names:
funcs_given = False
func_names = getattr(gevent_module, '__implements__')
for func_name in func_names:
if func_name in exclude:
continue
gevent_func = getattr(gevent_module, func_name)
if not inspect.isfunction(gevent_func) and not funcs_given:
continue
func = get_original(module_name, func_name)
try:
with warnings.catch_warnings():
getfullargspec = inspect.getfullargspec
gevent_sig = getfullargspec(gevent_func)
sig = getfullargspec(func)
except TypeError:
if funcs_given:
raise
# Can't do this one. If they specifically asked for it,
# it's an error, otherwise it's not.
# Python 3 can check a lot more than Python 2 can.
continue
self.assertEqual(sig.args, gevent_sig.args, func_name)
# The next two might not actually matter?
self.assertEqual(sig.varargs, gevent_sig.varargs, func_name)
self.assertEqual(sig.defaults, gevent_sig.defaults, func_name)
if hasattr(sig, 'keywords'): # the old version
msg = (func_name, sig.keywords, gevent_sig.keywords)
try:
self.assertEqual(sig.keywords, gevent_sig.keywords, msg)
except AssertionError:
# Ok, if we take `kwargs` and the original function doesn't,
# that's OK. We have to do that as a compatibility hack sometimes to
# work across multiple python versions.
self.assertIsNone(sig.keywords, msg)
self.assertEqual('kwargs', gevent_sig.keywords)
else:
# The new hotness. Unfortunately, we can't actually check these things
# until we drop Python 2 support from the shared code. The only known place
# this is a problem is python 3.11 socket.create_connection(), which we manually
# ignore. So the checks all pass as is.
self.assertEqual(sig.kwonlyargs, gevent_sig.kwonlyargs, func_name)
self.assertEqual(sig.kwonlydefaults, gevent_sig.kwonlydefaults, func_name)
# Should deal with others: https://docs.python.org/3/library/inspect.html#inspect.getfullargspec
def assertEqualFlakyRaceCondition(self, a, b):
try:
self.assertEqual(a, b)
except AssertionError:
flaky.reraiseFlakyTestRaceCondition()
def assertStartsWith(self, it, has_prefix):
self.assertTrue(it.startswith(has_prefix), (it, has_prefix))
def assertNotMonkeyPatched(self):
from gevent import monkey
self.assertFalse(monkey.is_anything_patched())

View File

@@ -0,0 +1,991 @@
# -*- coding: utf-8 -*-
import re
import sys
import os
import glob
import operator
import traceback
import importlib
from contextlib import contextmanager
from datetime import timedelta
from multiprocessing.pool import ThreadPool
from multiprocessing import cpu_count
from gevent._util import Lazy
from . import util
from .resources import parse_resources
from .resources import setup_resources
from .resources import unparse_resources
from .sysinfo import RUNNING_ON_CI
from .sysinfo import PYPY
from .sysinfo import PY2
from .sysinfo import RESOLVER_ARES
from .sysinfo import RUN_LEAKCHECKS
from .sysinfo import OSX
from . import six
from . import travis
# Import this while we're probably single-threaded/single-processed
# to try to avoid issues with PyPy 5.10.
# See https://bitbucket.org/pypy/pypy/issues/2769/systemerror-unexpected-internal-exception
try:
__import__('_testcapi')
except (ImportError, OSError):
# This can raise a wide variety of errors
pass
TIMEOUT = 100 # seconds
AVAIL_NWORKERS = cpu_count() - 1
DEFAULT_NWORKERS = int(os.environ.get('NWORKERS') or max(AVAIL_NWORKERS, 4))
if DEFAULT_NWORKERS > 15:
DEFAULT_NWORKERS = 10
if RUN_LEAKCHECKS:
# Capturing the stats takes time, and we run each
# test at least twice
TIMEOUT = 200
DEFAULT_RUN_OPTIONS = {
'timeout': TIMEOUT
}
if RUNNING_ON_CI:
# Too many and we get spurious timeouts
DEFAULT_NWORKERS = 4 if not OSX else 2
def _package_relative_filename(filename, package):
if not os.path.isfile(filename) and package:
# Ok, try to locate it as a module in the package
package_dir = _dir_from_package_name(package)
return os.path.join(package_dir, filename)
return filename
def _dir_from_package_name(package):
package_mod = importlib.import_module(package)
package_dir = os.path.dirname(package_mod.__file__)
return package_dir
class ResultCollector(object):
def __init__(self):
self.total = 0
self.failed = {}
self.passed = {}
self.total_cases = 0
self.total_skipped = 0
# Every RunResult reported: failed, passed, rerun
self._all_results = []
self.reran = {}
def __iadd__(self, result):
self._all_results.append(result)
if not result:
self.failed[result.name] = result #[cmd, kwargs]
else:
self.passed[result.name] = True
self.total_cases += result.run_count
self.total_skipped += result.skipped_count
return self
def __ilshift__(self, result):
"""
collector <<= result
Stores the result, but does not count it towards
the number of cases run, skipped, passed or failed.
"""
self._all_results.append(result)
self.reran[result.name] = result
return self
@property
def longest_running_tests(self):
"""
A new list of RunResult objects, sorted from longest running
to shortest running.
"""
return sorted(self._all_results,
key=operator.attrgetter('run_duration'),
reverse=True)
class FailFast(Exception):
pass
class Runner(object):
TIME_WAIT_REAP = 0.1
TIME_WAIT_SPAWN = 0.05
def __init__(self,
tests,
*,
allowed_return_codes=(),
configured_failing_tests=(),
failfast=False,
quiet=False,
configured_run_alone_tests=(),
worker_count=DEFAULT_NWORKERS,
second_chance=False):
"""
:keyword allowed_return_codes: Return codes other than
0 that are counted as a success. Needed because some versions
of Python give ``unittest`` weird return codes.
:keyword quiet: Set to True or False to explicitly choose. Set to
`None` to use the default, which may come from the environment variable
``GEVENTTEST_QUIET``.
"""
self._tests = tests
self._configured_failing_tests = configured_failing_tests
self._quiet = quiet
self._configured_run_alone_tests = configured_run_alone_tests
assert not (failfast and second_chance)
self._failfast = failfast
self._second_chance = second_chance
self.results = ResultCollector()
self.results.total = len(self._tests)
self._running_jobs = []
self._worker_count = min(len(tests), worker_count) or 1
self._allowed_return_codes = allowed_return_codes
def _run_one(self, cmd, **kwargs):
kwargs['allowed_return_codes'] = self._allowed_return_codes
if self._quiet is not None:
kwargs['quiet'] = self._quiet
result = util.run(cmd, **kwargs)
if not result and self._second_chance:
self.results <<= result
util.log("> %s", result.name, color='warning')
result = util.run(cmd, **kwargs)
if not result and self._failfast:
# Under Python 3.9 (maybe older versions?), raising the
# SystemExit here (a background thread belonging to the
# pool) doesn't seem to work well. It gets stuck waiting
# for a lock? The job never shows up as finished.
raise FailFast(cmd)
self.results += result
def _reap(self):
"Clean up the list of running jobs, returning how many are still outstanding."
for r in self._running_jobs[:]:
if not r.ready():
continue
if r.successful():
self._running_jobs.remove(r)
else:
r.get()
sys.exit('Internal error in testrunner.py: %r' % (r, ))
return len(self._running_jobs)
def _reap_all(self):
util.log("Reaping %d jobs", len(self._running_jobs), color="debug")
while self._running_jobs:
if not self._reap():
break
util.sleep(self.TIME_WAIT_REAP)
def _spawn(self, pool, cmd, options):
while True:
if self._reap() < self._worker_count:
job = pool.apply_async(self._run_one, (cmd, ), options or {})
self._running_jobs.append(job)
return
util.sleep(self.TIME_WAIT_SPAWN)
def __call__(self):
util.log("Running tests in parallel with concurrency %s %s." % (
self._worker_count,
util._colorize('number', '(concurrency available: %d)' % AVAIL_NWORKERS)
),)
# Setting global state, in theory we can be used multiple times.
# This is fine as long as we are single threaded and call these
# sequentially.
util.BUFFER_OUTPUT = self._worker_count > 1 or self._quiet
start = util.perf_counter()
try:
self._run_tests()
except KeyboardInterrupt:
self._report(util.perf_counter() - start, exit=False)
util.log('(partial results)\n')
raise
except:
traceback.print_exc()
raise
self._reap_all()
self._report(util.perf_counter() - start, exit=True)
def _run_tests(self):
"Runs the tests, produces no report."
run_alone = []
tests = self._tests
pool = ThreadPool(self._worker_count)
try:
for cmd, options in tests:
options = options or {}
if matches(self._configured_run_alone_tests, cmd):
run_alone.append((cmd, options))
else:
self._spawn(pool, cmd, options)
pool.close()
pool.join()
if run_alone:
util.log("Running tests marked standalone")
for cmd, options in run_alone:
self._run_one(cmd, **options)
except KeyboardInterrupt:
try:
util.log('Waiting for currently running to finish...')
self._reap_all()
except KeyboardInterrupt:
pool.terminate()
raise
except:
pool.terminate()
raise
def _report(self, elapsed_time, exit=False):
results = self.results
report(
results,
exit=exit,
took=elapsed_time,
configured_failing_tests=self._configured_failing_tests,
)
class TravisFoldingRunner(object):
def __init__(self, runner, travis_fold_msg):
self._runner = runner
self._travis_fold_msg = travis_fold_msg
self._travis_fold_name = str(int(util.perf_counter()))
# A zope-style acquisition proxy would be convenient here.
run_tests = runner._run_tests
def _run_tests():
self._begin_fold()
try:
run_tests()
finally:
self._end_fold()
runner._run_tests = _run_tests
def _begin_fold(self):
travis.fold_start(self._travis_fold_name,
self._travis_fold_msg)
def _end_fold(self):
travis.fold_end(self._travis_fold_name)
def __call__(self):
return self._runner()
class Discovery(object):
package_dir = None
package = None
def __init__(
self,
tests=None,
ignore_files=None,
ignored=(),
coverage=False,
package=None,
config=None,
allow_combine=True,
):
self.config = config or {}
self.ignore = set(ignored or ())
self.tests = tests
self.configured_test_options = config.get('TEST_FILE_OPTIONS', set())
self.allow_combine = allow_combine
if ignore_files:
ignore_files = ignore_files.split(',')
for f in ignore_files:
self.ignore.update(set(load_list_from_file(f, package)))
if coverage:
self.ignore.update(config.get('IGNORE_COVERAGE', set()))
if package:
self.package = package
self.package_dir = _dir_from_package_name(package)
class Discovered(object):
def __init__(self, package, configured_test_options, ignore, config, allow_combine):
self.orig_dir = os.getcwd()
self.configured_run_alone = config['RUN_ALONE']
self.configured_failing_tests = config['FAILING_TESTS']
self.package = package
self.configured_test_options = configured_test_options
self.allow_combine = allow_combine
self.ignore = ignore
self.to_import = []
self.std_monkey_patch_files = []
self.no_monkey_patch_files = []
self.commands = []
@staticmethod
def __makes_simple_monkey_patch(
contents,
_patch_present=re.compile(br'[^#].*patch_all\(\)'),
_patch_indented=re.compile(br' .*patch_all\(\)')
):
return (
# A non-commented patch_all() call is present
bool(_patch_present.search(contents))
# that is not indented (because that implies its not at the top-level,
# so some preconditions are being set)
and not _patch_indented.search(contents)
)
@staticmethod
def __file_allows_monkey_combine(contents):
return b'testrunner-no-monkey-combine' not in contents
@staticmethod
def __file_allows_combine(contents):
return b'testrunner-no-combine' not in contents
@staticmethod
def __calls_unittest_main_toplevel(
contents,
_greentest_main=re.compile(br' greentest.main\(\)'),
_unittest_main=re.compile(br' unittest.main\(\)'),
_import_main=re.compile(br'from gevent.testing import.*main'),
_main=re.compile(br' main\(\)'),
):
# TODO: Add a check that this comes in a line directly after
# if __name__ == __main__.
return (
_greentest_main.search(contents)
or _unittest_main.search(contents)
or (_import_main.search(contents) and _main.search(contents))
)
def __has_config(self, filename):
return (
RUN_LEAKCHECKS
or filename in self.configured_test_options
or filename in self.configured_run_alone
or matches(self.configured_failing_tests, filename)
)
def __can_monkey_combine(self, filename, contents):
return (
self.allow_combine
and not self.__has_config(filename)
and self.__makes_simple_monkey_patch(contents)
and self.__file_allows_monkey_combine(contents)
and self.__file_allows_combine(contents)
and self.__calls_unittest_main_toplevel(contents)
)
@staticmethod
def __makes_no_monkey_patch(contents, _patch_present=re.compile(br'[^#].*patch_\w*\(')):
return not _patch_present.search(contents)
def __can_nonmonkey_combine(self, filename, contents):
return (
self.allow_combine
and not self.__has_config(filename)
and self.__makes_no_monkey_patch(contents)
and self.__file_allows_combine(contents)
and self.__calls_unittest_main_toplevel(contents)
)
def __begin_command(self):
cmd = [sys.executable, '-u']
# XXX: -X track-resources is broken. This happened when I updated to
# PyPy 7.3.2. It started failing to even start inside the virtual environment
# with
#
# debug: OperationError:
# debug: operror-type: ImportError
# debug: operror-value: No module named traceback
#
# I don't know if this is PyPy's problem or a problem in virtualenv:
#
# virtualenv==20.0.35
# virtualenv-clone==0.5.4
# virtualenvwrapper==4.8.4
#
# Deferring investigation until I need this...
# if PYPY and PY2:
# # Doesn't seem to be an env var for this.
# # XXX: track-resources is broken in virtual environments
# # on 7.3.2.
# cmd.extend(('-X', 'track-resources'))
return cmd
def __add_test(self, qualified_name, filename, contents):
if b'TESTRUNNER' in contents: # test__monkey_patching.py
# XXX: Rework this to avoid importing.
# XXX: Rework this to allow test combining (it could write the files out and return
# them directly; we would use 'python -m gevent.monkey --module unittest ...)
self.to_import.append(qualified_name)
elif self.__can_monkey_combine(filename, contents):
self.std_monkey_patch_files.append(qualified_name if self.package else filename)
elif self.__can_nonmonkey_combine(filename, contents):
self.no_monkey_patch_files.append(qualified_name if self.package else filename)
else:
# XXX: For simple python module tests, try this with
# `runpy.run_module`, very similar to the way we run
# things for monkey patching. The idea here is that we
# can perform setup ahead of time (e.g.,
# setup_resources()) in each test without having to do
# it manually or force calls or modifications to those
# tests.
cmd = self.__begin_command()
if self.package:
# Using a package is the best way to work with coverage 5
# when we specify 'source = <package>'
cmd.append('-m' + qualified_name)
else:
cmd.append(filename)
options = DEFAULT_RUN_OPTIONS.copy()
options.update(self.configured_test_options.get(filename, {}))
self.commands.append((cmd, options))
@staticmethod
def __remove_options(lst):
return [x for x in lst if x and not x.startswith('-')]
def __expand_imports(self):
for qualified_name in self.to_import:
module = importlib.import_module(qualified_name)
for cmd, options in module.TESTRUNNER():
if self.__remove_options(cmd)[-1] in self.ignore:
continue
self.commands.append((cmd, options))
del self.to_import[:]
def __combine_commands(self, files, group_size=5):
if not files:
return
from itertools import groupby
cnt = [0, 0]
def make_group(_):
if cnt[0] > group_size:
cnt[0] = 0
cnt[1] += 1
cnt[0] += 1
return cnt[1]
for _, group in groupby(files, make_group):
cmd = self.__begin_command()
cmd.append('-m')
cmd.append('unittest')
# cmd.append('-v')
for name in group:
cmd.append(name)
self.commands.insert(0, (cmd, DEFAULT_RUN_OPTIONS.copy()))
del files[:]
def visit_file(self, filename):
# Support either 'gevent.tests.foo' or 'gevent/tests/foo.py'
if filename.startswith('gevent.tests'):
# XXX: How does this interact with 'package'? Probably not well
qualified_name = module_name = filename
filename = filename[len('gevent.tests') + 1:]
filename = filename.replace('.', os.sep) + '.py'
else:
module_name = os.path.splitext(filename)[0]
qualified_name = self.package + '.' + module_name if self.package else module_name
# Also allow just 'foo' as a shortcut for 'gevent.tests.foo'
abs_filename = os.path.abspath(filename)
if (
not os.path.exists(abs_filename)
and not filename.endswith('.py')
and os.path.exists(abs_filename + '.py') ):
abs_filename += '.py'
with open(abs_filename, 'rb') as f:
# Some of the test files (e.g., test__socket_dns) are
# UTF8 encoded. Depending on the environment, Python 3 may
# try to decode those as ASCII, which fails with UnicodeDecodeError.
# Thus, be sure to open and compare in binary mode.
# Open the absolute path to make errors more clear,
# but we can't store the absolute path, our configuration is based on
# relative file names.
contents = f.read()
self.__add_test(qualified_name, filename, contents)
def visit_files(self, filenames):
for filename in filenames:
self.visit_file(filename)
with Discovery._in_dir(self.orig_dir):
self.__expand_imports()
self.__combine_commands(self.std_monkey_patch_files)
self.__combine_commands(self.no_monkey_patch_files)
@staticmethod
@contextmanager
def _in_dir(package_dir):
olddir = os.getcwd()
if package_dir:
os.chdir(package_dir)
try:
yield
finally:
os.chdir(olddir)
@Lazy
def discovered(self):
tests = self.tests
discovered = self.Discovered(self.package, self.configured_test_options,
self.ignore, self.config, self.allow_combine)
# We need to glob relative names, our config is based on filenames still
with self._in_dir(self.package_dir):
if not tests:
tests = set(glob.glob('test_*.py')) - set(['test_support.py'])
else:
tests = set(tests)
if self.ignore:
# Always ignore the designated list, even if tests
# were specified on the command line. This fixes a
# nasty interaction with
# test__threading_vs_settrace.py being run under
# coverage when 'grep -l subprocess test*py' is used
# to list the tests to run.
tests -= self.ignore
tests = sorted(tests)
discovered.visit_files(tests)
return discovered
def __iter__(self):
return iter(self.discovered.commands) # pylint:disable=no-member
def __len__(self):
return len(self.discovered.commands) # pylint:disable=no-member
def load_list_from_file(filename, package):
result = []
if filename:
# pylint:disable=unspecified-encoding
with open(_package_relative_filename(filename, package)) as f:
for x in f:
x = x.split('#', 1)[0].strip()
if x:
result.append(x)
return result
def matches(possibilities, command, include_flaky=True):
if isinstance(command, list):
command = ' '.join(command)
for line in possibilities:
if not include_flaky and line.startswith('FLAKY '):
continue
line = line.replace('FLAKY ', '')
# Our configs are still mostly written in terms of file names,
# but the non-monkey tests are now using package names.
# Strip off '.py' from filenames to see if we match a module.
# XXX: This could be much better. Our command needs better structure.
if command.endswith(' ' + line) or command.endswith(line.replace(".py", '')):
return True
if ' ' not in command and command == line:
return True
return False
def format_seconds(seconds):
if seconds < 20:
return '%.1fs' % seconds
seconds = str(timedelta(seconds=round(seconds)))
if seconds.startswith('0:'):
seconds = seconds[2:]
return seconds
def _show_longest_running(result_collector, how_many=5):
longest_running_tests = result_collector.longest_running_tests
if not longest_running_tests:
return
# The only tricky part is handling repeats. we want to show them,
# but not count them as a distinct entry.
util.log('\nLongest-running tests:')
length_of_longest_formatted_decimal = len('%.1f' % longest_running_tests[0].run_duration)
frmt = '%' + str(length_of_longest_formatted_decimal) + '.1f seconds: %s'
seen_names = set()
for result in longest_running_tests:
util.log(frmt, result.run_duration, result.name)
seen_names.add(result.name)
if len(seen_names) >= how_many:
break
def report(result_collector, # type: ResultCollector
exit=True, took=None,
configured_failing_tests=()):
# pylint:disable=redefined-builtin,too-many-branches,too-many-locals
total = result_collector.total
failed = result_collector.failed
passed = result_collector.passed
total_cases = result_collector.total_cases
total_skipped = result_collector.total_skipped
_show_longest_running(result_collector)
if took:
took = ' in %s' % format_seconds(took)
else:
took = ''
failed_expected = []
failed_unexpected = []
passed_unexpected = []
for name in passed:
if matches(configured_failing_tests, name, include_flaky=False):
passed_unexpected.append(name)
if passed_unexpected:
util.log('\n%s/%s unexpected passes', len(passed_unexpected), total, color='error')
print_list(passed_unexpected)
if result_collector.reran:
util.log('\n%s/%s tests rerun', len(result_collector.reran), total, color='warning')
print_list(result_collector.reran)
if failed:
util.log('\n%s/%s tests failed%s', len(failed), total, took, color='warning')
for name in failed:
if matches(configured_failing_tests, name, include_flaky=True):
failed_expected.append(name)
else:
failed_unexpected.append(name)
if failed_expected:
util.log('\n%s/%s expected failures', len(failed_expected), total, color='warning')
print_list(failed_expected)
if failed_unexpected:
util.log('\n%s/%s unexpected failures', len(failed_unexpected), total, color='error')
print_list(failed_unexpected)
util.log(
'\nRan %s tests%s in %s files%s',
total_cases,
util._colorize('skipped', " (skipped=%d)" % total_skipped) if total_skipped else '',
total,
took,
)
if exit:
if failed_unexpected:
sys.exit(min(100, len(failed_unexpected)))
if passed_unexpected:
sys.exit(101)
if total <= 0:
sys.exit('No tests found.')
def print_list(lst):
for name in lst:
util.log(' - %s', name)
def _setup_environ(debug=False):
def not_set(key):
return not bool(os.environ.get(key))
if (not_set('PYTHONWARNINGS')
and (not sys.warnoptions
# Python 3.7 goes from [] to ['default'] for nothing
or sys.warnoptions == ['default'])):
# action:message:category:module:line
# - when a warning matches
# more than one option, the action for the last matching
# option is performed.
# - action is one of : ignore, default, all, module, once, error
# Enable default warnings such as ResourceWarning.
# ResourceWarning doesn't exist on Py2, so don't put it
# in there to avoid a warnnig.
defaults = [
'default',
'default::DeprecationWarning',
]
if not PY2:
defaults.append('default::ResourceWarning')
os.environ['PYTHONWARNINGS'] = ','.join(defaults + [
# On Python 3[.6], the system site.py module has
# "open(fullname, 'rU')" which produces the warning that
# 'U' is deprecated, so ignore warnings from site.py
'ignore:::site:',
# pkgutil on Python 2 complains about missing __init__.py
'ignore:::pkgutil:',
# importlib/_bootstrap.py likes to spit out "ImportWarning:
# can't resolve package from __spec__ or __package__, falling
# back on __name__ and __path__". I have no idea what that means, but it seems harmless
# and is annoying.
'ignore:::importlib._bootstrap:',
'ignore:::importlib._bootstrap_external:',
# importing ABCs from collections, not collections.abc
'ignore:::pkg_resources._vendor.pyparsing:',
'ignore:::dns.namedict:',
# dns.hash itself is being deprecated, importing it raises the warning;
# we don't import it, but dnspython still does
'ignore:::dns.hash:',
# dns.zone uses some raw regular expressions
# without the r'' syntax, leading to DeprecationWarning: invalid
# escape sequence. This is fixed in 2.0 (Python 3 only).
'ignore:::dns.zone:',
])
if not_set('PYTHONFAULTHANDLER'):
os.environ['PYTHONFAULTHANDLER'] = 'true'
if not_set('GEVENT_DEBUG') and debug:
os.environ['GEVENT_DEBUG'] = 'debug'
if not_set('PYTHONTRACEMALLOC') and debug:
# This slows the tests down quite a bit. Reserve
# for debugging.
os.environ['PYTHONTRACEMALLOC'] = '10'
if not_set('PYTHONDEVMODE'):
# Python 3.7 and above.
os.environ['PYTHONDEVMODE'] = '1'
if not_set('PYTHONMALLOC') and debug:
# Python 3.6 and above.
# This slows the tests down some, but
# can detect memory corruption. Unfortunately
# it can also be flaky, especially in pre-release
# versions of Python (e.g., lots of crashes on Python 3.8b4).
os.environ['PYTHONMALLOC'] = 'debug'
if sys.version_info.releaselevel != 'final' and not debug:
os.environ['PYTHONMALLOC'] = 'default'
os.environ['PYTHONDEVMODE'] = ''
# PYTHONSAFEPATH breaks the assumptions of some tests, notably test_interpreters.py
os.environ.pop('PYTHONSAFEPATH', None)
interesting_envs = {
k: os.environ[k]
for k in os.environ
if k.startswith(('PYTHON', 'GEVENT'))
}
widest_k = max(len(k) for k in interesting_envs)
for k, v in sorted(interesting_envs.items()):
util.log('%*s\t=\t%s', widest_k, k, v, color="debug")
def main():
# pylint:disable=too-many-locals,too-many-statements,too-many-branches
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--ignore')
parser.add_argument(
'--discover', action='store_true',
help="Only print the tests found."
)
parser.add_argument(
'--config', default='known_failures.py',
help="The path to the config file containing "
"FAILING_TESTS, IGNORED_TESTS and RUN_ALONE. "
"Defaults to %(default)s."
)
parser.add_argument(
"--coverage", action="store_true",
help="Enable coverage recording with coverage.py."
)
# TODO: Quiet and verbose should be mutually exclusive
parser.add_argument(
"--quiet", action="store_true", default=True,
help="Be quiet. Defaults to %(default)s. Also the "
"GEVENTTEST_QUIET environment variable."
)
parser.add_argument("--verbose", action="store_false", dest='quiet')
parser.add_argument(
"--debug", action="store_true", default=False,
help="Enable debug settings. If the GEVENT_DEBUG environment variable is not set, "
"this sets it to 'debug'. This can also enable PYTHONTRACEMALLOC and the debug PYTHONMALLOC "
"allocators, if not already set. Defaults to %(default)s."
)
parser.add_argument(
"--package", default="gevent.tests",
help="Load tests from the given package. Defaults to %(default)s."
)
parser.add_argument(
"--processes", "-j", default=DEFAULT_NWORKERS, type=int,
help="Use up to the given number of parallel processes to execute tests. "
"Defaults to %(default)s."
)
parser.add_argument(
'--no-combine', default=True, action='store_false',
help="Do not combine tests into process groups."
)
parser.add_argument('-u', '--use', metavar='RES1,RES2,...',
action='store', type=parse_resources,
help='specify which special resource intensive tests '
'to run. "all" is the default; "none" may also be used. '
'Disable individual resources with a leading -.'
'For example, "-u-network". GEVENTTEST_USE_RESOURCES is used '
'if no argument is given. To only use one resources, specify '
'"-unone,resource".')
parser.add_argument("--travis-fold", metavar="MSG",
help="Emit Travis CI log fold markers around the output.")
fail_parser = parser.add_mutually_exclusive_group()
fail_parser.add_argument(
"--second-chance", action="store_true", default=False,
help="Give failed tests a second chance.")
fail_parser.add_argument(
'--failfast', '-x', action='store_true', default=False,
help="Stop running after the first failure.")
parser.add_argument('tests', nargs='*')
options = parser.parse_args()
# options.use will be either None for not given, or a list
# of the last specified -u argument.
# If not given, use the default, which we'll take from the environment, if set.
options.use = list(set(parse_resources() if options.use is None else options.use))
# Whether or not it came from the environment, put it in the
# environment now.
os.environ['GEVENTTEST_USE_RESOURCES'] = unparse_resources(options.use)
setup_resources(options.use)
# Set this before any test imports in case of 'from .util import QUIET';
# not that this matters much because we spawn tests in subprocesses,
# it's the environment setting that matters
util.QUIET = options.quiet
if 'GEVENTTEST_QUIET' not in os.environ:
os.environ['GEVENTTEST_QUIET'] = str(options.quiet)
FAILING_TESTS = []
IGNORED_TESTS = []
RUN_ALONE = []
coverage = False
if options.coverage or os.environ.get("GEVENTTEST_COVERAGE"):
if PYPY and RUNNING_ON_CI:
print("Ignoring coverage option on PyPy on CI; slow")
else:
coverage = True
cov_config = os.environ['COVERAGE_PROCESS_START'] = os.path.abspath(".coveragerc")
if PYPY:
cov_config = os.environ['COVERAGE_PROCESS_START'] = os.path.abspath(".coveragerc-pypy")
this_dir = os.path.dirname(__file__)
site_dir = os.path.join(this_dir, 'coveragesite')
site_dir = os.path.abspath(site_dir)
os.environ['PYTHONPATH'] = site_dir + os.pathsep + os.environ.get("PYTHONPATH", "")
# We change directory often, use an absolute path to keep all the
# coverage files (which will have distinct suffixes because of parallel=true in .coveragerc
# in this directory; makes them easier to combine and use with coverage report)
os.environ['COVERAGE_FILE'] = os.path.abspath(".") + os.sep + ".coverage"
# XXX: Log this with color. Right now, it interferes (buffering) with other early
# output.
print("Enabling coverage to", os.environ['COVERAGE_FILE'],
"with site", site_dir,
"and configuration file", cov_config)
assert os.path.exists(cov_config)
assert os.path.exists(os.path.join(site_dir, 'sitecustomize.py'))
_setup_environ(debug=options.debug)
if options.config:
config = {}
options.config = _package_relative_filename(options.config, options.package)
with open(options.config) as f: # pylint:disable=unspecified-encoding
config_data = f.read()
six.exec_(config_data, config)
FAILING_TESTS = config['FAILING_TESTS']
IGNORED_TESTS = config['IGNORED_TESTS']
RUN_ALONE = config['RUN_ALONE']
tests = Discovery(
options.tests,
ignore_files=options.ignore,
ignored=IGNORED_TESTS,
coverage=coverage,
package=options.package,
config=config,
allow_combine=options.no_combine,
)
if options.discover:
for cmd, options in tests:
print(util.getname(cmd, env=options.get('env'), setenv=options.get('setenv')))
print('%s tests found.' % len(tests))
else:
if PYPY and RESOLVER_ARES:
# XXX: Add a way to force these.
print("Not running tests on pypy with c-ares; not a supported configuration")
return
if options.package:
# Put this directory on the path so relative imports work.
package_dir = _dir_from_package_name(options.package)
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', "") + os.pathsep + package_dir
allowed_return_codes = ()
if sys.version_info[:3] >= (3, 12, 1):
# unittest suddenly started failing with this return code
# if all tests in a module are skipped in 3.12.1.
allowed_return_codes += (5,)
runner = Runner(
tests,
allowed_return_codes=allowed_return_codes,
configured_failing_tests=FAILING_TESTS,
failfast=options.failfast,
quiet=options.quiet,
configured_run_alone_tests=RUN_ALONE,
worker_count=options.processes,
second_chance=options.second_chance,
)
if options.travis_fold:
runner = TravisFoldingRunner(runner, options.travis_fold)
runner()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,138 @@
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import gevent
from gevent._compat import perf_counter
from . import sysinfo
from . import leakcheck
from .testcase import TestCase
SMALLEST_RELIABLE_DELAY = 0.001 # 1ms, because of libuv
SMALL_TICK = 0.01
SMALL_TICK_MIN_ADJ = SMALLEST_RELIABLE_DELAY
SMALL_TICK_MAX_ADJ = 0.11
if sysinfo.RUNNING_ON_APPVEYOR:
# Timing resolution is extremely poor on Appveyor
# and subject to jitter.
SMALL_TICK_MAX_ADJ = 1.5
LARGE_TICK = 0.2
LARGE_TICK_MIN_ADJ = LARGE_TICK / 2.0
LARGE_TICK_MAX_ADJ = SMALL_TICK_MAX_ADJ
class _DelayWaitMixin(object):
_default_wait_timeout = SMALL_TICK
_default_delay_min_adj = SMALL_TICK_MIN_ADJ
_default_delay_max_adj = SMALL_TICK_MAX_ADJ
def wait(self, timeout):
raise NotImplementedError('override me in subclass')
def _check_delay_bounds(self, timeout, delay,
delay_min_adj=None,
delay_max_adj=None):
delay_min_adj = self._default_delay_min_adj if not delay_min_adj else delay_min_adj
delay_max_adj = self._default_delay_max_adj if not delay_max_adj else delay_max_adj
self.assertTimeWithinRange(delay,
timeout - delay_min_adj,
timeout + delay_max_adj)
def _wait_and_check(self, timeout=None):
if timeout is None:
timeout = self._default_wait_timeout
# gevent.timer instances have a 'seconds' attribute,
# otherwise it's the raw number
seconds = getattr(timeout, 'seconds', timeout)
gevent.get_hub().loop.update_now()
start = perf_counter()
try:
result = self.wait(timeout)
finally:
self._check_delay_bounds(seconds, perf_counter() - start,
self._default_delay_min_adj,
self._default_delay_max_adj)
return result
def test_outer_timeout_is_not_lost(self):
timeout = gevent.Timeout.start_new(SMALLEST_RELIABLE_DELAY, ref=False)
try:
with self.assertRaises(gevent.Timeout) as exc:
self.wait(timeout=1)
self.assertIs(exc.exception, timeout)
finally:
timeout.close()
class AbstractGenericWaitTestCase(_DelayWaitMixin, TestCase):
# pylint:disable=abstract-method
_default_wait_timeout = LARGE_TICK
_default_delay_min_adj = LARGE_TICK_MIN_ADJ
_default_delay_max_adj = LARGE_TICK_MAX_ADJ
@leakcheck.ignores_leakcheck # waiting checks can be very sensitive to timing
def test_returns_none_after_timeout(self):
result = self._wait_and_check()
# join and wait simply return after timeout expires
self.assertIsNone(result)
class AbstractGenericGetTestCase(_DelayWaitMixin, TestCase):
# pylint:disable=abstract-method
Timeout = gevent.Timeout
def cleanup(self):
pass
def test_raises_timeout_number(self):
with self.assertRaises(self.Timeout):
self._wait_and_check(timeout=SMALL_TICK)
# get raises Timeout after timeout expired
self.cleanup()
def test_raises_timeout_Timeout(self):
timeout = gevent.Timeout(self._default_wait_timeout)
try:
self._wait_and_check(timeout=timeout)
except gevent.Timeout as ex:
self.assertIs(ex, timeout)
finally:
timeout.close()
self.cleanup()
def test_raises_timeout_Timeout_exc_customized(self):
error = RuntimeError('expected error')
timeout = gevent.Timeout(self._default_wait_timeout, exception=error)
try:
with self.assertRaises(RuntimeError) as exc:
self._wait_and_check(timeout=timeout)
self.assertIs(exc.exception, error)
self.cleanup()
finally:
timeout.close()

View File

@@ -0,0 +1,41 @@
# -*- coding: utf-8 -*-
# Support functions for travis
# See https://github.com/travis-ci/travis-rubies/blob/9f7962a881c55d32da7c76baefc58b89e3941d91/build.sh
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
commands = {}
def command(func):
commands[func.__name__] = lambda: func(*sys.argv[2:])
return func
@command
def fold_start(name, msg):
sys.stdout.write('travis_fold:start:')
sys.stdout.write(name)
sys.stdout.write(chr(0o33))
sys.stdout.write('[33;1m')
sys.stdout.write(msg)
sys.stdout.write(chr(0o33))
sys.stdout.write('[33;0m\n')
@command
def fold_end(name):
sys.stdout.write("\ntravis_fold:end:")
sys.stdout.write(name)
sys.stdout.write("\r\n")
def main():
cmd = sys.argv[1]
commands[cmd]()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,655 @@
"""
.. caution::
This module imports `subprocess` and `threading`; this can
break monkey-patched unittests. Specifically,
``test_threading.ThreadTests.test_import_from_another_thread``.
"""
import re
import sys
import os
import traceback
import unittest
import threading
import subprocess
from time import sleep
from . import six
from gevent._config import validate_bool
from gevent._compat import perf_counter
from gevent.monkey import get_original
# pylint: disable=broad-except,attribute-defined-outside-init
BUFFER_OUTPUT = False
# This is set by the testrunner, defaulting to true (be quiet)
# But if we're run standalone, default to false
QUIET = validate_bool(os.environ.get('GEVENTTEST_QUIET', '0'))
class Popen(subprocess.Popen):
"""
Depending on when we're imported and if the process has been monkey-patched,
this could use cooperative or native Popen.
"""
timer = None # a threading.Timer instance
def __enter__(self):
return self
def __exit__(self, *args):
kill(self)
# Coloring code based on zope.testrunner
# These colors are carefully chosen to have enough contrast
# on terminals with both black and white background.
_colorscheme = {
'normal': 'normal',
'default': 'default',
'actual-output': 'red',
'character-diffs': 'magenta',
'debug': 'cyan',
'diff-chunk': 'magenta',
'error': 'brightred',
'error-number': 'brightred',
'exception': 'red',
'expected-output': 'green',
'failed-example': 'cyan',
'filename': 'lightblue',
'info': 'normal',
'lineno': 'lightred',
'number': 'green',
'ok-number': 'green',
'skipped': 'brightyellow',
'slow-test': 'brightmagenta',
'suboptimal-behaviour': 'magenta',
'testname': 'lightcyan',
'warning': 'cyan',
}
_prefixes = [
('dark', '0;'),
('light', '1;'),
('bright', '1;'),
('bold', '1;'),
]
_colorcodes = {
'default': 0,
'normal': 0,
'black': 30,
'red': 31,
'green': 32,
'brown': 33, 'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'grey': 37, 'gray': 37, 'white': 37
}
def _color_code(color):
prefix_code = ''
for prefix, code in _prefixes:
if color.startswith(prefix):
color = color[len(prefix):]
prefix_code = code
break
color_code = _colorcodes[color]
return '\033[%s%sm' % (prefix_code, color_code)
def _color(what):
return _color_code(_colorscheme[what])
def _colorize(what, message, normal='normal'):
return _color(what) + message + _color(normal)
def log(message, *args, **kwargs):
"""
Log a *message*
:keyword str color: One of the values from _colorscheme
"""
color = kwargs.pop('color', 'normal')
if args:
string = message % args
else:
string = message
string = _colorize(color, string)
with output_lock: # pylint:disable=not-context-manager
sys.stderr.write(string + '\n')
def debug(message, *args, **kwargs):
"""
Log the *message* only if we're not in quiet mode.
"""
if not QUIET:
kwargs.setdefault('color', 'debug')
log(message, *args, **kwargs)
def killpg(pid):
if not hasattr(os, 'killpg'):
return
try:
return os.killpg(pid, 9)
except OSError as ex:
if ex.errno != 3:
log('killpg(%r, 9) failed: %s: %s', pid, type(ex).__name__, ex)
except Exception as ex:
log('killpg(%r, 9) failed: %s: %s', pid, type(ex).__name__, ex)
def kill_processtree(pid):
ignore_msg = 'ERROR: The process "%s" not found.' % pid
err = Popen('taskkill /F /PID %s /T' % pid, stderr=subprocess.PIPE).communicate()[1]
if err and err.strip() not in [ignore_msg, '']:
log('%r', err)
def _kill(popen):
if hasattr(popen, 'kill'):
try:
popen.kill()
except OSError as ex:
if ex.errno == 3: # No such process
return
if ex.errno == 13: # Permission denied (translated from windows error 5: "Access is denied")
return
raise
else:
try:
os.kill(popen.pid, 9)
except EnvironmentError:
pass
def kill(popen):
if popen.timer is not None:
popen.timer.cancel()
popen.timer = None
if popen.poll() is not None:
return
popen.was_killed = True
try:
if getattr(popen, 'setpgrp_enabled', None):
killpg(popen.pid)
elif sys.platform.startswith('win'):
kill_processtree(popen.pid)
except Exception:
traceback.print_exc()
try:
_kill(popen)
except Exception:
traceback.print_exc()
try:
popen.wait()
except Exception:
traceback.print_exc()
# A set of environment keys we ignore for printing purposes
IGNORED_GEVENT_ENV_KEYS = {
'GEVENTTEST_QUIET',
'GEVENT_DEBUG',
'GEVENTSETUP_EV_VERIFY',
'GEVENTSETUP_EMBED',
}
# A set of (name, value) pairs we ignore for printing purposes.
# These should match the defaults.
IGNORED_GEVENT_ENV_ITEMS = {
('GEVENT_RESOLVER', 'thread'),
('GEVENT_RESOLVER_NAMESERVERS', '8.8.8.8'),
('GEVENTTEST_USE_RESOURCES', 'all'),
}
def getname(command, env=None, setenv=None):
result = []
env = (env or os.environ).copy()
env.update(setenv or {})
for key, value in sorted(env.items()):
if not key.startswith('GEVENT'):
continue
if key in IGNORED_GEVENT_ENV_KEYS:
continue
if (key, value) in IGNORED_GEVENT_ENV_ITEMS:
continue
result.append('%s=%s' % (key, value))
if isinstance(command, six.string_types):
result.append(command)
else:
result.extend(command)
return ' '.join(result)
def start(command, quiet=False, **kwargs):
timeout = kwargs.pop('timeout', None)
preexec_fn = None
if not os.environ.get('DO_NOT_SETPGRP'):
preexec_fn = getattr(os, 'setpgrp', None)
env = kwargs.pop('env', None)
setenv = kwargs.pop('setenv', None) or {}
name = getname(command, env=env, setenv=setenv)
if preexec_fn is not None:
setenv['DO_NOT_SETPGRP'] = '1'
if setenv:
env = env.copy() if env else os.environ.copy()
env.update(setenv)
if not quiet:
log('+ %s', name)
popen = Popen(command, preexec_fn=preexec_fn, env=env, **kwargs)
popen.name = name
popen.setpgrp_enabled = preexec_fn is not None
popen.was_killed = False
if timeout is not None:
t = get_original('threading', 'Timer')(timeout, kill, args=(popen, ))
popen.timer = t
t.daemon = True
t.start()
popen.timer = t
return popen
class RunResult(object):
"""
The results of running an external command.
If the command was successful, this has a boolean
value of True; otherwise, a boolean value of false.
The integer value of this object is the command's exit code.
"""
def __init__(self,
command,
run_kwargs,
code,
output=None, # type: str
error=None, # type: str
name=None,
run_count=0, skipped_count=0,
run_duration=0, # type: float
):
self.command = command
self.run_kwargs = run_kwargs
self.code = code
self.output = output
self.error = error
self.name = name
self.run_count = run_count
self.skipped_count = skipped_count
self.run_duration = run_duration
@property
def output_lines(self):
return self.output.splitlines()
def __bool__(self):
return not bool(self.code)
__nonzero__ = __bool__
def __int__(self):
return self.code
def __repr__(self):
return (
"RunResult of: %r\n"
"Code: %s\n"
"kwargs: %r\n"
"Output:\n"
"----\n"
"%s"
"----\n"
"Error:\n"
"----\n"
"%s"
"----\n"
) % (
self.command,
self.code,
self.run_kwargs,
self.output,
self.error
)
def _should_show_warning_output(out):
if 'Warning' in out:
# Strip out some patterns we specifically do not
# care about.
# from test.support for monkey-patched tests
out = out.replace('Warning -- reap_children', 'NADA')
out = out.replace("Warning -- threading_cleanup", 'NADA')
# The below *could* be done with sophisticated enough warning
# filters passed to the children
# collections.abc is the new home; setuptools uses the old one,
# as does dnspython
out = out.replace("DeprecationWarning: Using or importing the ABCs", 'NADA')
# libuv poor timer resolution
out = out.replace('UserWarning: libuv only supports', 'NADA')
# Packages on Python 2
out = out.replace('ImportWarning: Not importing directory', 'NADA')
# Testing that U mode does the same thing
out = out.replace("DeprecationWarning: 'U' mode is deprecated", 'NADA')
out = out.replace("DeprecationWarning: dns.hash module", 'NADA')
return 'Warning' in out
output_lock = threading.Lock()
def _find_test_status(took, out):
status = '[took %.1fs%s]'
skipped = ''
run_count = 0
skipped_count = 0
if out:
m = re.search(r"Ran (\d+) tests in", out)
if m:
result = out[m.start():m.end()]
status = status.replace('took', result)
run_count = int(out[m.start(1):m.end(1)])
m = re.search(r' \(skipped=(\d+)\)$', out)
if m:
skipped = _colorize('skipped', out[m.start():m.end()])
skipped_count = int(out[m.start(1):m.end(1)])
status = status % (took, skipped) # pylint:disable=consider-using-augmented-assign
if took > 10:
status = _colorize('slow-test', status)
return status, run_count, skipped_count
def run(command, **kwargs): # pylint:disable=too-many-locals
"""
Execute *command*, returning a `RunResult`.
This blocks until *command* finishes or until it times out.
"""
buffer_output = kwargs.pop('buffer_output', BUFFER_OUTPUT)
quiet = kwargs.pop('quiet', QUIET)
verbose = not quiet
nested = kwargs.pop('nested', False)
allowed_return_codes = kwargs.pop('allowed_return_codes', ())
if buffer_output:
assert 'stdout' not in kwargs and 'stderr' not in kwargs, kwargs
kwargs['stderr'] = subprocess.STDOUT
kwargs['stdout'] = subprocess.PIPE
popen = start(command, quiet=quiet, **kwargs)
name = popen.name
try:
time_start = perf_counter()
out, err = popen.communicate()
duration = perf_counter() - time_start
if popen.was_killed or popen.poll() is None:
result = 'TIMEOUT'
else:
result = popen.poll()
finally:
kill(popen)
assert popen.timer is None
# We don't want to treat return codes that are allowed as failures,
# but we do want to log those specially. That's why we retain the distinction
# between ``failed`` and ``result`` (failed takes the allowed codes into account).
failed = bool(result) and result not in allowed_return_codes
if out:
out = out.strip()
out = out if isinstance(out, str) else out.decode('utf-8', 'ignore')
if out and (failed or verbose or _should_show_warning_output(out)):
out = ' ' + out.replace('\n', '\n ')
out = out.rstrip()
out += '\n'
log('| %s\n%s', name, out)
status, run_count, skipped_count = _find_test_status(duration, out)
if result:
log('! %s [code %s] %s', name, result, status,
color='error' if failed else 'suboptimal-behaviour')
elif not nested:
log('- %s %s', name, status)
# For everything outside this function, we need to pretend that
# allowed codes are actually successes.
return RunResult(
command, kwargs,
0 if result in allowed_return_codes else result,
output=out, error=err,
name=name,
run_count=run_count,
skipped_count=skipped_count,
run_duration=duration,
)
class NoSetupPyFound(Exception):
"Raised by find_setup_py_above"
def find_setup_py_above(a_file):
"Return the directory containing setup.py somewhere above *a_file*"
root = os.path.dirname(os.path.abspath(a_file))
while not os.path.exists(os.path.join(root, 'setup.py')):
prev, root = root, os.path.dirname(root)
if root == prev:
# Let's avoid infinite loops at root
raise NoSetupPyFound('could not find my setup.py above %r' % (a_file,))
return root
def search_for_setup_py(a_file=None, a_module_name=None, a_class=None, climb_cwd=True):
if a_file is not None:
try:
return find_setup_py_above(a_file)
except NoSetupPyFound:
pass
if a_class is not None:
try:
return find_setup_py_above(sys.modules[a_class.__module__].__file__)
except NoSetupPyFound:
pass
if a_module_name is not None:
try:
return find_setup_py_above(sys.modules[a_module_name].__file__)
except NoSetupPyFound:
pass
if climb_cwd:
return find_setup_py_above("./dne")
raise NoSetupPyFound("After checking %r" % (locals(),))
def _version_dir_components():
directory = '%s.%s' % sys.version_info[:2]
full_directory = '%s.%s.%s' % sys.version_info[:3]
if hasattr(sys, 'pypy_version_info'):
directory += 'pypy'
full_directory += 'pypy'
return directory, full_directory
def find_stdlib_tests():
"""
Return a sequence of directories that could contain
stdlib tests for the running version of Python.
The most specific tests are at the end of the sequence.
No checks are performed on existence of the directories.
"""
setup_py = search_for_setup_py(a_file=__file__)
greentest = os.path.join(setup_py, 'src', 'greentest')
directory, full_directory = _version_dir_components()
directory = '%s.%s' % sys.version_info[:2]
full_directory = '%s.%s.%s' % sys.version_info[:3]
if hasattr(sys, 'pypy_version_info'):
directory += 'pypy'
full_directory += 'pypy'
directory = os.path.join(greentest, directory)
full_directory = os.path.join(greentest, full_directory)
return directory, full_directory
def absolute_pythonpath():
"""
Return the PYTHONPATH environment variable (if set) with each
entry being an absolute path. If not set, returns None.
"""
if 'PYTHONPATH' not in os.environ:
return None
path = os.environ['PYTHONPATH']
path = [os.path.abspath(p) for p in path.split(os.path.pathsep)]
return os.path.pathsep.join(path)
class ExampleMixin(object):
"""
Something that uses the ``examples/`` directory
from the root of the gevent distribution.
The `cwd` property is set to the root of the gevent distribution.
"""
#: Arguments to pass to the example file.
example_args = []
before_delay = 3
after_delay = 0.5
#: Path of the example Python file, relative to `cwd`
example = None # subclasses define this to be the path to the server.py
#: Keyword arguments to pass to the start or run method.
start_kwargs = None
def find_setup_py(self):
"Return the directory containing setup.py"
return search_for_setup_py(
a_file=__file__,
a_class=type(self)
)
@property
def cwd(self):
try:
root = self.find_setup_py()
except NoSetupPyFound as e:
raise unittest.SkipTest("Unable to locate file/dir to run: %s" % (e,))
return os.path.join(root, 'examples')
@property
def setenv(self):
"""
Returns a dictionary of environment variables to set for the
child in addition to (or replacing) the ones already in the
environment.
Since the child is run in `cwd`, relative paths in ``PYTHONPATH``
need to be converted to absolute paths.
"""
abs_pythonpath = absolute_pythonpath()
return {'PYTHONPATH': abs_pythonpath} if abs_pythonpath else None
def _start(self, meth):
if getattr(self, 'args', None):
raise AssertionError("Invalid test", self, self.args)
if getattr(self, 'server', None):
raise AssertionError("Invalid test", self, self.server)
try:
# These could be or are properties that can raise
server = self.example
server_dir = self.cwd
except NoSetupPyFound as e:
raise unittest.SkipTest("Unable to locate file/dir to run: %s" % (e,))
kwargs = self.start_kwargs or {}
setenv = self.setenv
if setenv:
if 'setenv' in kwargs:
kwargs['setenv'].update(setenv)
else:
kwargs['setenv'] = setenv
return meth(
[sys.executable, '-W', 'ignore', '-u', server] + self.example_args,
cwd=server_dir,
**kwargs
)
def start_example(self):
return self._start(meth=start)
def run_example(self):# run() is a unittest method.
return self._start(meth=run)
class TestServer(ExampleMixin,
unittest.TestCase):
popen = None
def running_server(self):
from contextlib import contextmanager
@contextmanager
def running_server():
with self.start_example() as popen:
self.popen = popen
self.before()
yield
self.after()
return running_server()
def test(self):
with self.running_server():
self._run_all_tests()
def before(self):
if self.before_delay is not None:
sleep(self.before_delay)
self.assertIsNone(self.popen.poll(),
'%s died with code %s' % (
self.example, self.popen.poll(),
))
def after(self):
if self.after_delay is not None:
sleep(self.after_delay)
self.assertIsNone(self.popen.poll(),
'%s died with code %s' % (
self.example, self.popen.poll(),
))
def _run_all_tests(self):
ran = False
for method in sorted(dir(self)):
if method.startswith('_test'):
function = getattr(self, method)
if callable(function):
function()
ran = True
assert ran
class alarm(threading.Thread):
# can't use signal.alarm because of Windows
def __init__(self, timeout):
threading.Thread.__init__(self)
self.daemon = True
self.timeout = timeout
self.start()
def run(self):
sleep(self.timeout)
sys.stderr.write('Timeout.\n')
os._exit(5)