mit neuen venv und exe-Files
This commit is contained in:
114
venv3_12/Lib/site-packages/gevent/__init__.py
Normal file
114
venv3_12/Lib/site-packages/gevent/__init__.py
Normal file
@@ -0,0 +1,114 @@
|
||||
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||
"""
|
||||
gevent is a coroutine-based Python networking library that uses greenlet
|
||||
to provide a high-level synchronous API on top of libev event loop.
|
||||
|
||||
See http://www.gevent.org/ for the documentation.
|
||||
|
||||
.. versionchanged:: 1.3a2
|
||||
Add the `config` object.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
_version_info = namedtuple('version_info',
|
||||
('major', 'minor', 'micro', 'releaselevel', 'serial'))
|
||||
|
||||
#: The programatic version identifier. The fields have (roughly) the
|
||||
#: same meaning as :data:`sys.version_info`
|
||||
#: .. deprecated:: 1.2
|
||||
#: Use ``pkg_resources.parse_version(__version__)`` (or the equivalent
|
||||
#: ``packaging.version.Version(__version__)``).
|
||||
version_info = _version_info(20, 0, 0, 'dev', 0) # XXX: Remove me
|
||||
|
||||
#: The human-readable PEP 440 version identifier.
|
||||
#: Use ``pkg_resources.parse_version(__version__)`` or
|
||||
#: ``packaging.version.Version(__version__)`` to get a machine-usable
|
||||
#: value.
|
||||
__version__ = '24.10.3'
|
||||
|
||||
|
||||
__all__ = [
|
||||
'Greenlet',
|
||||
'GreenletExit',
|
||||
'Timeout',
|
||||
'config', # Added in 1.3a2
|
||||
'fork',
|
||||
'get_hub',
|
||||
'getcurrent',
|
||||
'getswitchinterval',
|
||||
'idle',
|
||||
'iwait',
|
||||
'joinall',
|
||||
'kill',
|
||||
'killall',
|
||||
'reinit',
|
||||
'setswitchinterval',
|
||||
'signal_handler',
|
||||
'sleep',
|
||||
'spawn',
|
||||
'spawn_later',
|
||||
'spawn_raw',
|
||||
'wait',
|
||||
'with_timeout',
|
||||
]
|
||||
|
||||
|
||||
import sys
|
||||
if sys.platform == 'win32':
|
||||
# trigger WSAStartup call
|
||||
import socket # pylint:disable=unused-import,useless-suppression
|
||||
del socket
|
||||
|
||||
|
||||
# Floating point number, in number of seconds,
|
||||
# like time.time
|
||||
getswitchinterval = sys.getswitchinterval
|
||||
setswitchinterval = sys.setswitchinterval
|
||||
|
||||
from gevent._config import config
|
||||
from gevent._hub_local import get_hub
|
||||
from gevent._hub_primitives import iwait_on_objects as iwait
|
||||
from gevent._hub_primitives import wait_on_objects as wait
|
||||
|
||||
from gevent.greenlet import Greenlet, joinall, killall
|
||||
spawn = Greenlet.spawn
|
||||
spawn_later = Greenlet.spawn_later
|
||||
#: The singleton configuration object for gevent.
|
||||
|
||||
from gevent.timeout import Timeout, with_timeout
|
||||
from gevent.hub import getcurrent, GreenletExit, spawn_raw, sleep, idle, kill, reinit
|
||||
try:
|
||||
from gevent.os import fork
|
||||
except ImportError:
|
||||
__all__.remove('fork')
|
||||
|
||||
# This used to be available as gevent.signal; that broke in 1.1b4 but
|
||||
# a temporary alias was added (See
|
||||
# https://github.com/gevent/gevent/issues/648). It was ugly and complex and
|
||||
# caused confusion, so it was removed in 1.5. See https://github.com/gevent/gevent/issues/1529
|
||||
from gevent.hub import signal as signal_handler
|
||||
|
||||
# the following makes hidden imports visible to freezing tools like
|
||||
# py2exe. see https://github.com/gevent/gevent/issues/181
|
||||
# This is not well maintained or tested, though, so it likely becomes
|
||||
# outdated on each major release.
|
||||
|
||||
def __dependencies_for_freezing(): # pragma: no cover
|
||||
# pylint:disable=unused-import, import-outside-toplevel
|
||||
from gevent import core
|
||||
from gevent import resolver_thread
|
||||
from gevent import resolver_ares
|
||||
from gevent import socket as _socket
|
||||
from gevent import threadpool
|
||||
from gevent import thread
|
||||
from gevent import threading
|
||||
from gevent import select
|
||||
from gevent import subprocess
|
||||
import pprint
|
||||
import traceback
|
||||
import signal as _signal
|
||||
|
||||
del __dependencies_for_freezing
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
venv3_12/Lib/site-packages/gevent/__pycache__/os.cpython-312.pyc
Normal file
BIN
venv3_12/Lib/site-packages/gevent/__pycache__/os.cpython-312.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
546
venv3_12/Lib/site-packages/gevent/_abstract_linkable.py
Normal file
546
venv3_12/Lib/site-packages/gevent/_abstract_linkable.py
Normal file
@@ -0,0 +1,546 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
"""
|
||||
Internal module, support for the linkable protocol for "event" like objects.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
from gc import get_objects
|
||||
|
||||
from greenlet import greenlet
|
||||
from greenlet import error as greenlet_error
|
||||
|
||||
from gevent._compat import thread_mod_name
|
||||
from gevent._hub_local import get_hub_noargs as get_hub
|
||||
from gevent._hub_local import get_hub_if_exists
|
||||
|
||||
from gevent.exceptions import InvalidSwitchError
|
||||
from gevent.exceptions import InvalidThreadUseError
|
||||
from gevent.timeout import Timeout
|
||||
|
||||
locals()['getcurrent'] = __import__('greenlet').getcurrent
|
||||
locals()['greenlet_init'] = lambda: None
|
||||
|
||||
__all__ = [
|
||||
'AbstractLinkable',
|
||||
]
|
||||
|
||||
# Need the real get_ident. We're imported early enough during monkey-patching
|
||||
# that we can be sure nothing is monkey patched yet.
|
||||
_get_thread_ident = __import__(thread_mod_name).get_ident
|
||||
_allocate_thread_lock = __import__(thread_mod_name).allocate_lock
|
||||
|
||||
class _FakeNotifier(object):
|
||||
__slots__ = (
|
||||
'pending',
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.pending = False
|
||||
|
||||
def get_roots_and_hubs():
|
||||
from gevent.hub import Hub # delay import
|
||||
return {
|
||||
x.parent: x
|
||||
for x in get_objects()
|
||||
# Make sure to only find hubs that have a loop
|
||||
# and aren't destroyed. If we don't do that, we can
|
||||
# get an old hub that no longer works leading to issues in
|
||||
# combined test cases.
|
||||
if isinstance(x, Hub) and x.loop is not None
|
||||
}
|
||||
|
||||
|
||||
class AbstractLinkable(object):
|
||||
# Encapsulates the standard parts of the linking and notifying
|
||||
# protocol common to both repeatable events (Event, Semaphore) and
|
||||
# one-time events (AsyncResult).
|
||||
#
|
||||
# With a few careful exceptions, instances of this object can only
|
||||
# be used from a single thread. The exception is that certain methods
|
||||
# may be used from multiple threads IFF:
|
||||
#
|
||||
# 1. They are documented as safe for that purpose; AND
|
||||
# 2a. This object is compiled with Cython and thus is holding the GIL
|
||||
# for the entire duration of the method; OR
|
||||
# 2b. A subclass ensures that a Python-level native thread lock is held
|
||||
# for the duration of the method; this is necessary in pure-Python mode.
|
||||
# The only known implementation of such
|
||||
# a subclass is for Semaphore. AND
|
||||
# 3. The subclass that calls ``capture_hub`` catches
|
||||
# and handles ``InvalidThreadUseError``
|
||||
#
|
||||
# TODO: As of gevent 1.5, we use the same datastructures and almost
|
||||
# the same algorithm as Greenlet. See about unifying them more.
|
||||
|
||||
__slots__ = (
|
||||
'hub',
|
||||
'_links',
|
||||
'_notifier',
|
||||
'_notify_all',
|
||||
'__weakref__'
|
||||
)
|
||||
|
||||
def __init__(self, hub=None):
|
||||
# Before this implementation, AsyncResult and Semaphore
|
||||
# maintained the order of notifications, but Event did not.
|
||||
|
||||
# In gevent 1.3, before Semaphore extended this class, that
|
||||
# was changed to not maintain the order. It was done because
|
||||
# Event guaranteed to only call callbacks once (a set) but
|
||||
# AsyncResult had no such guarantees. When Semaphore was
|
||||
# changed to extend this class, it lost its ordering
|
||||
# guarantees. Unfortunately, that made it unfair. There are
|
||||
# rare cases that this can starve a greenlet
|
||||
# (https://github.com/gevent/gevent/issues/1487) and maybe
|
||||
# even lead to deadlock (not tested).
|
||||
|
||||
# So in gevent 1.5 we go back to maintaining order. But it's
|
||||
# still important not to make duplicate calls, and it's also
|
||||
# important to avoid O(n^2) behaviour that can result from
|
||||
# naive use of a simple list due to the need to handle removed
|
||||
# links in the _notify_links loop. Cython has special support for
|
||||
# built-in sets, lists, and dicts, but not ordereddict. Rather than
|
||||
# use two data structures, or a dict({link: order}), we simply use a
|
||||
# list and remove objects as we go, keeping track of them so as not to
|
||||
# have duplicates called. This makes `unlink` O(n), but we can avoid
|
||||
# calling it in the common case in _wait_core (even so, the number of
|
||||
# waiters should usually be pretty small)
|
||||
self._links = []
|
||||
self._notifier = None
|
||||
# This is conceptually a class attribute, defined here for ease of access in
|
||||
# cython. If it's true, when notifiers fire, all existing callbacks are called.
|
||||
# If its false, we only call callbacks as long as ready() returns true.
|
||||
self._notify_all = True
|
||||
# we don't want to do get_hub() here to allow defining module-level objects
|
||||
# without initializing the hub. However, for multiple-thread safety, as soon
|
||||
# as a waiting method is entered, even if it won't have to wait, we
|
||||
# need to grab the hub and assign ownership. But we don't want to grab one prematurely.
|
||||
# The example is three threads, the main thread and two worker threads; if we create
|
||||
# a Semaphore in the main thread but only use it in the two threads, if we had grabbed
|
||||
# the main thread's hub, the two worker threads would have a dependency on it, meaning that
|
||||
# if the main event loop is blocked, the worker threads might get blocked too.
|
||||
self.hub = hub
|
||||
|
||||
def linkcount(self):
|
||||
# For testing: how many objects are linked to this one?
|
||||
return len(self._links)
|
||||
|
||||
def ready(self):
|
||||
# Instances must define this
|
||||
raise NotImplementedError
|
||||
|
||||
def rawlink(self, callback):
|
||||
"""
|
||||
Register a callback to call when this object is ready.
|
||||
|
||||
*callback* will be called in the :class:`Hub
|
||||
<gevent.hub.Hub>`, so it must not use blocking gevent API.
|
||||
*callback* will be passed one argument: this instance.
|
||||
"""
|
||||
if not callable(callback):
|
||||
raise TypeError('Expected callable: %r' % (callback, ))
|
||||
self._links.append(callback)
|
||||
self._check_and_notify()
|
||||
|
||||
def unlink(self, callback):
|
||||
"""Remove the callback set by :meth:`rawlink`"""
|
||||
try:
|
||||
self._links.remove(callback)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if not self._links and self._notifier is not None and self._notifier.pending:
|
||||
# If we currently have one queued, but not running, de-queue it.
|
||||
# This will break a reference cycle.
|
||||
# (self._notifier -> self._notify_links -> self)
|
||||
# If it's actually running, though, (and we're here as a result of callbacks)
|
||||
# we don't want to change it; it needs to finish what its doing
|
||||
# so we don't attempt to start a fresh one or swap it out from underneath the
|
||||
# _notify_links method.
|
||||
self._notifier.stop()
|
||||
|
||||
def _allocate_lock(self):
|
||||
return _allocate_thread_lock()
|
||||
|
||||
def _getcurrent(self):
|
||||
return getcurrent() # pylint:disable=undefined-variable
|
||||
|
||||
def _get_thread_ident(self):
|
||||
return _get_thread_ident()
|
||||
|
||||
def _capture_hub(self, create):
|
||||
# Subclasses should call this as the first action from any
|
||||
# public method that could, in theory, block and switch
|
||||
# to the hub. This may release the GIL. It may
|
||||
# raise InvalidThreadUseError if the result would
|
||||
|
||||
# First, detect a dead hub and drop it.
|
||||
while 1:
|
||||
my_hub = self.hub
|
||||
if my_hub is None:
|
||||
break
|
||||
if my_hub.dead: # dead is a property, could release GIL
|
||||
# back, holding GIL
|
||||
if self.hub is my_hub:
|
||||
self.hub = None
|
||||
my_hub = None
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
if self.hub is None:
|
||||
# This next line might release the GIL.
|
||||
current_hub = get_hub() if create else get_hub_if_exists()
|
||||
|
||||
# We have the GIL again. Did anything change? If so,
|
||||
# we lost the race.
|
||||
if self.hub is None:
|
||||
self.hub = current_hub
|
||||
|
||||
if self.hub is not None and self.hub.thread_ident != _get_thread_ident():
|
||||
raise InvalidThreadUseError(
|
||||
self.hub,
|
||||
get_hub_if_exists(),
|
||||
getcurrent() # pylint:disable=undefined-variable
|
||||
)
|
||||
return self.hub
|
||||
|
||||
def _check_and_notify(self):
|
||||
# If this object is ready to be notified, begin the process.
|
||||
if self.ready() and self._links and not self._notifier:
|
||||
hub = None
|
||||
try:
|
||||
hub = self._capture_hub(False) # Must create, we need it.
|
||||
except InvalidThreadUseError:
|
||||
# The current hub doesn't match self.hub. That's OK,
|
||||
# we still want to start the notifier in the thread running
|
||||
# self.hub (because the links probably contains greenlet.switch
|
||||
# calls valid only in that hub)
|
||||
pass
|
||||
if hub is not None:
|
||||
self._notifier = hub.loop.run_callback(self._notify_links, [])
|
||||
else:
|
||||
# Hmm, no hub. We must be the only thing running. Then its OK
|
||||
# to just directly call the callbacks.
|
||||
self._notifier = 1
|
||||
try:
|
||||
self._notify_links([])
|
||||
finally:
|
||||
self._notifier = None
|
||||
|
||||
def _notify_link_list(self, links):
|
||||
# The core of the _notify_links method to notify
|
||||
# links in order. Lets the ``links`` list be mutated,
|
||||
# and only notifies up to the last item in the list, in case
|
||||
# objects are added to it.
|
||||
if not links:
|
||||
# HMM. How did we get here? Running two threads at once?
|
||||
# Seen once on Py27/Win/Appveyor
|
||||
# https://ci.appveyor.com/project/jamadden/gevent/builds/36875645/job/9wahj9ft4h4qa170
|
||||
return []
|
||||
|
||||
only_while_ready = not self._notify_all
|
||||
final_link = links[-1]
|
||||
done = set() # of ids
|
||||
hub = self.hub if self.hub is not None else get_hub_if_exists()
|
||||
unswitched = []
|
||||
while links: # remember this can be mutated
|
||||
if only_while_ready and not self.ready():
|
||||
break
|
||||
|
||||
link = links.pop(0) # Cython optimizes using list internals
|
||||
id_link = id(link)
|
||||
if id_link not in done:
|
||||
# XXX: JAM: What was I thinking? This doesn't make much sense,
|
||||
# there's a good chance `link` will be deallocated, and its id() will
|
||||
# be free to be reused. This also makes looping difficult, you have to
|
||||
# create new functions inside a loop rather than just once outside the loop.
|
||||
done.add(id_link)
|
||||
try:
|
||||
self._drop_lock_for_switch_out()
|
||||
try:
|
||||
link(self)
|
||||
except greenlet_error:
|
||||
# couldn't switch to a greenlet, we must be
|
||||
# running in a different thread. back on the list it goes for next time.
|
||||
unswitched.append(link)
|
||||
finally:
|
||||
self._acquire_lock_for_switch_in()
|
||||
|
||||
except: # pylint:disable=bare-except
|
||||
# We're running in the hub, errors must not escape.
|
||||
if hub is not None:
|
||||
hub.handle_error((link, self), *sys.exc_info())
|
||||
else:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
if link is final_link:
|
||||
break
|
||||
return unswitched
|
||||
|
||||
def _notify_links(self, arrived_while_waiting):
|
||||
# This method must hold the GIL, or be guarded with the lock that guards
|
||||
# this object. Thus, while we are notifying objects, an object from another
|
||||
# thread simply cannot arrive and mutate ``_links`` or ``arrived_while_waiting``
|
||||
|
||||
# ``arrived_while_waiting`` is a list of greenlet.switch methods
|
||||
# to call. These were objects that called wait() while we were processing,
|
||||
# and which would have run *before* those that had actually waited
|
||||
# and blocked. Instead of returning True immediately, we add them to this
|
||||
# list so they wait their turn.
|
||||
|
||||
# We release self._notifier here when done invoking links.
|
||||
# The object itself becomes false in a boolean way as soon
|
||||
# as this method returns.
|
||||
notifier = self._notifier
|
||||
if notifier is None:
|
||||
# XXX: How did we get here?
|
||||
self._check_and_notify()
|
||||
return
|
||||
# Early links are allowed to remove later links, and links
|
||||
# are allowed to add more links, thus we must not
|
||||
# make a copy of our the ``_links`` list, we must traverse it and
|
||||
# mutate in place.
|
||||
#
|
||||
# We were ready() at the time this callback was scheduled; we
|
||||
# may not be anymore, and that status may change during
|
||||
# callback processing. Some of our subclasses (Event) will
|
||||
# want to notify everyone who was registered when the status
|
||||
# became true that it was once true, even though it may not be
|
||||
# any more. In that case, we must not keep notifying anyone that's
|
||||
# newly added after that, even if we go ready again.
|
||||
try:
|
||||
unswitched = self._notify_link_list(self._links)
|
||||
# Now, those that arrived after we had begun the notification
|
||||
# process. Follow the same rules, stop with those that are
|
||||
# added so far to prevent starvation.
|
||||
if arrived_while_waiting:
|
||||
un2 = self._notify_link_list(arrived_while_waiting)
|
||||
unswitched.extend(un2)
|
||||
|
||||
# Anything left needs to go back on the main list.
|
||||
self._links.extend(arrived_while_waiting)
|
||||
finally:
|
||||
# We should not have created a new notifier even if callbacks
|
||||
# released us because we loop through *all* of our links on the
|
||||
# same callback while self._notifier is still true.
|
||||
assert self._notifier is notifier, (self._notifier, notifier)
|
||||
self._notifier = None
|
||||
# TODO: Maybe we should intelligently reset self.hub to
|
||||
# free up thread affinity? In case of a pathological situation where
|
||||
# one object was used from one thread once & first, but usually is
|
||||
# used by another thread.
|
||||
#
|
||||
# BoundedSemaphore does this.
|
||||
# Now we may be ready or not ready. If we're ready, which
|
||||
# could have happened during the last link we called, then we
|
||||
# must have more links than we started with. We need to schedule the
|
||||
# wakeup.
|
||||
self._check_and_notify()
|
||||
if unswitched:
|
||||
self._handle_unswitched_notifications(unswitched)
|
||||
|
||||
|
||||
def _handle_unswitched_notifications(self, unswitched):
|
||||
# Given a list of callable objects that raised
|
||||
# ``greenlet.error`` when we called them: If we can determine
|
||||
# that it is a parked greenlet (the callablle is a
|
||||
# ``greenlet.switch`` method) and we can determine the hub
|
||||
# that the greenlet belongs to (either its parent, or, in the
|
||||
# case of a main greenlet, find a hub with the same parent as
|
||||
# this greenlet object) then:
|
||||
|
||||
# Move this to be a callback in that thread.
|
||||
# (This relies on holding the GIL *or* ``Hub.loop.run_callback`` being
|
||||
# thread-safe! Note that the CFFI implementations are definitely
|
||||
# NOT thread-safe. TODO: Make them? Or an alternative?)
|
||||
#
|
||||
# Otherwise, print some error messages.
|
||||
|
||||
# TODO: Inline this for individual links. That handles the
|
||||
# "only while ready" case automatically. Be careful about locking in that case.
|
||||
#
|
||||
# TODO: Add a 'strict' mode that prevents doing this dance, since it's
|
||||
# inherently not safe.
|
||||
root_greenlets = None
|
||||
printed_tb = False
|
||||
only_while_ready = not self._notify_all
|
||||
|
||||
while unswitched:
|
||||
if only_while_ready and not self.ready():
|
||||
self.__print_unswitched_warning(unswitched, printed_tb)
|
||||
break
|
||||
|
||||
link = unswitched.pop(0)
|
||||
|
||||
hub = None # Also serves as a "handled?" flag
|
||||
# Is it a greenlet.switch method?
|
||||
if (getattr(link, '__name__', None) == 'switch'
|
||||
and isinstance(getattr(link, '__self__', None), greenlet)):
|
||||
glet = link.__self__
|
||||
parent = glet.parent
|
||||
|
||||
while parent is not None:
|
||||
if hasattr(parent, 'loop'): # Assuming the hub.
|
||||
hub = glet.parent
|
||||
break
|
||||
parent = glet.parent
|
||||
|
||||
if hub is None:
|
||||
if root_greenlets is None:
|
||||
root_greenlets = get_roots_and_hubs()
|
||||
hub = root_greenlets.get(glet)
|
||||
|
||||
if hub is not None and hub.loop is not None:
|
||||
hub.loop.run_callback_threadsafe(link, self)
|
||||
if hub is None or hub.loop is None:
|
||||
# We couldn't handle it
|
||||
self.__print_unswitched_warning(link, printed_tb)
|
||||
printed_tb = True
|
||||
|
||||
|
||||
def __print_unswitched_warning(self, link, printed_tb):
|
||||
print('gevent: error: Unable to switch to greenlet', link,
|
||||
'from', self, '; crossing thread boundaries is not allowed.',
|
||||
file=sys.stderr)
|
||||
|
||||
if not printed_tb:
|
||||
printed_tb = True
|
||||
print(
|
||||
'gevent: error: '
|
||||
'This is a result of using gevent objects from multiple threads,',
|
||||
'and is a bug in the calling code.', file=sys.stderr)
|
||||
|
||||
import traceback
|
||||
traceback.print_stack()
|
||||
|
||||
def _quiet_unlink_all(self, obj):
|
||||
if obj is None:
|
||||
return
|
||||
|
||||
self.unlink(obj)
|
||||
if self._notifier is not None and self._notifier.args:
|
||||
try:
|
||||
self._notifier.args[0].remove(obj)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def __wait_to_be_notified(self, rawlink): # pylint:disable=too-many-branches
|
||||
resume_this_greenlet = getcurrent().switch # pylint:disable=undefined-variable
|
||||
if rawlink:
|
||||
self.rawlink(resume_this_greenlet)
|
||||
else:
|
||||
self._notifier.args[0].append(resume_this_greenlet)
|
||||
|
||||
try:
|
||||
self._switch_to_hub(self.hub)
|
||||
# If we got here, we were automatically unlinked already.
|
||||
resume_this_greenlet = None
|
||||
finally:
|
||||
self._quiet_unlink_all(resume_this_greenlet)
|
||||
|
||||
def _switch_to_hub(self, the_hub):
|
||||
self._drop_lock_for_switch_out()
|
||||
try:
|
||||
result = the_hub.switch()
|
||||
finally:
|
||||
self._acquire_lock_for_switch_in()
|
||||
if result is not self: # pragma: no cover
|
||||
raise InvalidSwitchError(
|
||||
'Invalid switch into %s.wait(): %r' % (
|
||||
self.__class__.__name__,
|
||||
result,
|
||||
)
|
||||
)
|
||||
|
||||
def _acquire_lock_for_switch_in(self):
|
||||
return
|
||||
|
||||
def _drop_lock_for_switch_out(self):
|
||||
return
|
||||
|
||||
def _wait_core(self, timeout, catch=Timeout):
|
||||
"""
|
||||
The core of the wait implementation, handling switching and
|
||||
linking.
|
||||
|
||||
This method is NOT safe to call from multiple threads.
|
||||
|
||||
``self.hub`` must be initialized before entering this method.
|
||||
The hub that is set is considered the owner and cannot be changed
|
||||
while this method is running. It must only be called from the thread
|
||||
where ``self.hub`` is the current hub.
|
||||
|
||||
If *catch* is set to ``()``, a timeout that elapses will be
|
||||
allowed to be raised.
|
||||
|
||||
:return: A true value if the wait succeeded without timing out.
|
||||
That is, a true return value means we were notified and control
|
||||
resumed in this greenlet.
|
||||
"""
|
||||
with Timeout._start_new_or_dummy(timeout) as timer: # Might release
|
||||
# We already checked above (_wait()) if we're ready()
|
||||
try:
|
||||
self.__wait_to_be_notified(
|
||||
True,# Use rawlink()
|
||||
)
|
||||
return True
|
||||
except catch as ex:
|
||||
if ex is not timer:
|
||||
raise
|
||||
# test_set_and_clear and test_timeout in test_threading
|
||||
# rely on the exact return values, not just truthish-ness
|
||||
return False
|
||||
|
||||
def _wait_return_value(self, waited, wait_success):
|
||||
# pylint:disable=unused-argument
|
||||
# Subclasses should override this to return a value from _wait.
|
||||
# By default we return None.
|
||||
return None # pragma: no cover all extent subclasses override
|
||||
|
||||
def _wait(self, timeout=None):
|
||||
# Watch where we could potentially release the GIL.
|
||||
self._capture_hub(True) # Must create, we must have an owner. Might release
|
||||
|
||||
if self.ready(): # *might* release, if overridden in Python.
|
||||
result = self._wait_return_value(False, False) # pylint:disable=assignment-from-none
|
||||
if self._notifier:
|
||||
# We're already notifying waiters; one of them must have run
|
||||
# and switched to this greenlet, which arrived here. Alternately,
|
||||
# we could be in a separate thread (but we're holding the GIL/object lock)
|
||||
self.__wait_to_be_notified(False) # Use self._notifier.args[0] instead of self.rawlink
|
||||
|
||||
return result
|
||||
|
||||
gotit = self._wait_core(timeout)
|
||||
return self._wait_return_value(True, gotit)
|
||||
|
||||
def _at_fork_reinit(self):
|
||||
"""
|
||||
This method was added in Python 3.9 and is called by logging.py
|
||||
``_after_at_fork_child_reinit_locks`` on Lock objects.
|
||||
|
||||
It is also called from threading.py, ``_after_fork`` in
|
||||
``_reset_internal_locks``, and that can hit ``Event`` objects.
|
||||
|
||||
Subclasses should reset themselves to an initial state. This
|
||||
includes unlocking/releasing, if possible. This method detaches from the
|
||||
previous hub and drops any existing notifier.
|
||||
"""
|
||||
self.hub = None
|
||||
self._notifier = None
|
||||
|
||||
def _init():
|
||||
greenlet_init() # pylint:disable=undefined-variable
|
||||
|
||||
_init()
|
||||
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__abstract_linkable')
|
||||
109
venv3_12/Lib/site-packages/gevent/_compat.py
Normal file
109
venv3_12/Lib/site-packages/gevent/_compat.py
Normal file
@@ -0,0 +1,109 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
internal gevent python 2/python 3 bridges. Not for external use.
|
||||
"""
|
||||
|
||||
from __future__ import print_function, absolute_import, division
|
||||
|
||||
## Important: This module should generally not have any other gevent
|
||||
## imports (the exception is _util_py2)
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
PY39 = sys.version_info[:2] >= (3, 9)
|
||||
PY311 = sys.version_info[:2] >= (3, 11)
|
||||
PY312 = sys.version_info[:2] >= (3, 12)
|
||||
PY313 = sys.version_info[:2] >= (3, 13)
|
||||
PYPY = hasattr(sys, 'pypy_version_info')
|
||||
WIN = sys.platform.startswith("win")
|
||||
LINUX = sys.platform.startswith('linux')
|
||||
OSX = MAC = sys.platform == 'darwin'
|
||||
|
||||
|
||||
PURE_PYTHON = PYPY or os.getenv('PURE_PYTHON')
|
||||
|
||||
## Types
|
||||
|
||||
|
||||
string_types = (str,)
|
||||
integer_types = (int,)
|
||||
text_type = str
|
||||
native_path_types = (str, bytes)
|
||||
thread_mod_name = '_thread'
|
||||
|
||||
hostname_types = tuple(set(string_types + (bytearray, bytes)))
|
||||
|
||||
def NativeStrIO():
|
||||
import io
|
||||
return io.BytesIO() if str is bytes else io.StringIO()
|
||||
|
||||
|
||||
from abc import ABC # pylint:disable=unused-import
|
||||
|
||||
|
||||
## Exceptions
|
||||
|
||||
def reraise(t, value, tb=None): # pylint:disable=unused-argument
|
||||
if value.__traceback__ is not tb and tb is not None:
|
||||
raise value.with_traceback(tb)
|
||||
raise value
|
||||
def exc_clear():
|
||||
pass
|
||||
|
||||
|
||||
|
||||
## import locks
|
||||
try:
|
||||
# In Python 3.4 and newer in CPython and PyPy3,
|
||||
# imp.acquire_lock and imp.release_lock are delegated to
|
||||
# '_imp'. (Which is also used by importlib.) 'imp' itself is
|
||||
# deprecated. Avoid that warning.
|
||||
import _imp as imp
|
||||
except ImportError:
|
||||
import imp # pylint:disable=deprecated-module
|
||||
imp_acquire_lock = imp.acquire_lock
|
||||
imp_release_lock = imp.release_lock
|
||||
|
||||
## Functions
|
||||
iteritems = dict.items
|
||||
itervalues = dict.values
|
||||
xrange = range
|
||||
izip = zip
|
||||
|
||||
|
||||
## The __fspath__ protocol
|
||||
from os import PathLike # pylint:disable=unused-import
|
||||
from os import fspath
|
||||
_fspath = fspath
|
||||
from os import fsencode # pylint:disable=unused-import
|
||||
from os import fsdecode # pylint:disable=unused-import
|
||||
|
||||
## Clocks
|
||||
# Python 3.3+ (PEP 418)
|
||||
from time import perf_counter
|
||||
from time import get_clock_info
|
||||
from time import monotonic
|
||||
perf_counter = perf_counter
|
||||
monotonic = monotonic
|
||||
get_clock_info = get_clock_info
|
||||
|
||||
|
||||
## Monitoring
|
||||
def get_this_psutil_process():
|
||||
# Depends on psutil. Defer the import until needed, who knows what
|
||||
# it imports (psutil imports subprocess which on Python 3 imports
|
||||
# selectors. This can expose issues with monkey-patching.)
|
||||
# Returns a freshly queried object each time.
|
||||
try:
|
||||
from psutil import Process, AccessDenied
|
||||
# Make sure it works (why would we be denied access to our own process?)
|
||||
try:
|
||||
proc = Process()
|
||||
proc.memory_full_info()
|
||||
except AccessDenied: # pragma: no cover
|
||||
proc = None
|
||||
except ImportError:
|
||||
proc = None
|
||||
return proc
|
||||
736
venv3_12/Lib/site-packages/gevent/_config.py
Normal file
736
venv3_12/Lib/site-packages/gevent/_config.py
Normal file
@@ -0,0 +1,736 @@
|
||||
# Copyright (c) 2018 gevent. See LICENSE for details.
|
||||
"""
|
||||
gevent tunables.
|
||||
|
||||
This should be used as ``from gevent import config``. That variable
|
||||
is an object of :class:`Config`.
|
||||
|
||||
.. versionadded:: 1.3a2
|
||||
|
||||
.. versionchanged:: 22.08.0
|
||||
Invoking this module like ``python -m gevent._config`` will
|
||||
print a help message about available configuration properties.
|
||||
This is handy to quickly look for environment variables.
|
||||
"""
|
||||
|
||||
from __future__ import print_function, absolute_import, division
|
||||
|
||||
import importlib
|
||||
import os
|
||||
import textwrap
|
||||
|
||||
from gevent._compat import string_types
|
||||
from gevent._compat import WIN
|
||||
|
||||
__all__ = [
|
||||
'config',
|
||||
]
|
||||
|
||||
ALL_SETTINGS = []
|
||||
|
||||
class SettingType(type):
|
||||
# pylint:disable=bad-mcs-classmethod-argument
|
||||
|
||||
def __new__(cls, name, bases, cls_dict):
|
||||
if name == 'Setting':
|
||||
return type.__new__(cls, name, bases, cls_dict)
|
||||
|
||||
cls_dict["order"] = len(ALL_SETTINGS)
|
||||
if 'name' not in cls_dict:
|
||||
cls_dict['name'] = name.lower()
|
||||
|
||||
if 'environment_key' not in cls_dict:
|
||||
cls_dict['environment_key'] = 'GEVENT_' + cls_dict['name'].upper()
|
||||
|
||||
|
||||
new_class = type.__new__(cls, name, bases, cls_dict)
|
||||
new_class.fmt_desc(cls_dict.get("desc", ""))
|
||||
new_class.__doc__ = new_class.desc
|
||||
ALL_SETTINGS.append(new_class)
|
||||
|
||||
if new_class.document:
|
||||
setting_name = cls_dict['name']
|
||||
|
||||
def getter(self):
|
||||
return self.settings[setting_name].get()
|
||||
|
||||
def setter(self, value): # pragma: no cover
|
||||
# The setter should never be hit, Config has a
|
||||
# __setattr__ that would override. But for the sake
|
||||
# of consistency we provide one.
|
||||
self.settings[setting_name].set(value)
|
||||
|
||||
prop = property(getter, setter, doc=new_class.__doc__)
|
||||
|
||||
setattr(Config, cls_dict['name'], prop)
|
||||
return new_class
|
||||
|
||||
def fmt_desc(cls, desc):
|
||||
desc = textwrap.dedent(desc).strip()
|
||||
if hasattr(cls, 'shortname_map'):
|
||||
desc += (
|
||||
"\n\nThis is an importable value. It can be "
|
||||
"given as a string naming an importable object, "
|
||||
"or a list of strings in preference order and the first "
|
||||
"successfully importable object will be used. (Separate values "
|
||||
"in the environment variable with commas.) "
|
||||
"It can also be given as the callable object itself (in code). "
|
||||
)
|
||||
if cls.shortname_map:
|
||||
desc += "Shorthand names for default objects are %r" % (list(cls.shortname_map),)
|
||||
if getattr(cls.validate, '__doc__'):
|
||||
desc += '\n\n' + textwrap.dedent(cls.validate.__doc__).strip()
|
||||
if isinstance(cls.default, str) and hasattr(cls, 'shortname_map'):
|
||||
default = "`%s`" % (cls.default,)
|
||||
else:
|
||||
default = "`%r`" % (cls.default,)
|
||||
desc += "\n\nThe default value is %s" % (default,)
|
||||
desc += ("\n\nThe environment variable ``%s`` "
|
||||
"can be used to control this." % (cls.environment_key,))
|
||||
setattr(cls, "desc", desc)
|
||||
return desc
|
||||
|
||||
def validate_invalid(value):
|
||||
raise ValueError("Not a valid value: %r" % (value,))
|
||||
|
||||
def validate_bool(value):
|
||||
"""
|
||||
This is a boolean value.
|
||||
|
||||
In the environment variable, it may be given as ``1``, ``true``,
|
||||
``on`` or ``yes`` for `True`, or ``0``, ``false``, ``off``, or
|
||||
``no`` for `False`.
|
||||
"""
|
||||
if isinstance(value, string_types):
|
||||
value = value.lower().strip()
|
||||
if value in ('1', 'true', 'on', 'yes'):
|
||||
value = True
|
||||
elif value in ('0', 'false', 'off', 'no') or not value:
|
||||
value = False
|
||||
else:
|
||||
raise ValueError("Invalid boolean string: %r" % (value,))
|
||||
return bool(value)
|
||||
|
||||
def validate_anything(value):
|
||||
return value
|
||||
|
||||
convert_str_value_as_is = validate_anything
|
||||
|
||||
class Setting(object):
|
||||
name = None
|
||||
value = None
|
||||
validate = staticmethod(validate_invalid)
|
||||
default = None
|
||||
environment_key = None
|
||||
document = True
|
||||
|
||||
desc = """\
|
||||
|
||||
A long ReST description.
|
||||
|
||||
The first line should be a single sentence.
|
||||
|
||||
"""
|
||||
|
||||
def _convert(self, value):
|
||||
if isinstance(value, string_types):
|
||||
return value.split(',')
|
||||
return value
|
||||
|
||||
def _default(self):
|
||||
result = os.environ.get(self.environment_key, self.default)
|
||||
result = self._convert(result)
|
||||
return result
|
||||
|
||||
def get(self):
|
||||
# If we've been specifically set, return it
|
||||
if 'value' in self.__dict__:
|
||||
return self.value
|
||||
# Otherwise, read from the environment and reify
|
||||
# so we return consistent results.
|
||||
self.value = self.validate(self._default())
|
||||
return self.value
|
||||
|
||||
def set(self, val):
|
||||
self.value = self.validate(self._convert(val))
|
||||
|
||||
|
||||
Setting = SettingType('Setting', (Setting,), dict(Setting.__dict__))
|
||||
|
||||
def make_settings():
|
||||
"""
|
||||
Return fresh instances of all classes defined in `ALL_SETTINGS`.
|
||||
"""
|
||||
settings = {}
|
||||
for setting_kind in ALL_SETTINGS:
|
||||
setting = setting_kind()
|
||||
assert setting.name not in settings
|
||||
settings[setting.name] = setting
|
||||
return settings
|
||||
|
||||
|
||||
class Config(object):
|
||||
"""
|
||||
Global configuration for gevent.
|
||||
|
||||
There is one instance of this object at ``gevent.config``. If you
|
||||
are going to make changes in code, instead of using the documented
|
||||
environment variables, you need to make the changes before using
|
||||
any parts of gevent that might need those settings. For example::
|
||||
|
||||
>>> from gevent import config
|
||||
>>> config.fileobject = 'thread'
|
||||
|
||||
>>> from gevent import fileobject
|
||||
>>> fileobject.FileObject.__name__
|
||||
'FileObjectThread'
|
||||
|
||||
.. versionadded:: 1.3a2
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.settings = make_settings()
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name not in self.settings:
|
||||
raise AttributeError("No configuration setting for: %r" % name)
|
||||
return self.settings[name].get()
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name != "settings" and name in self.settings:
|
||||
self.set(name, value)
|
||||
else:
|
||||
super(Config, self).__setattr__(name, value)
|
||||
|
||||
def set(self, name, value):
|
||||
if name not in self.settings:
|
||||
raise AttributeError("No configuration setting for: %r" % name)
|
||||
self.settings[name].set(value)
|
||||
|
||||
def __dir__(self):
|
||||
return list(self.settings)
|
||||
|
||||
def print_help(self):
|
||||
for k, v in self.settings.items():
|
||||
print(k)
|
||||
print(textwrap.indent(v.__doc__.lstrip(), ' ' * 4))
|
||||
print()
|
||||
|
||||
|
||||
class ImportableSetting(object):
|
||||
|
||||
def _import_one_of(self, candidates):
|
||||
assert isinstance(candidates, list)
|
||||
if not candidates:
|
||||
raise ImportError('Cannot import from empty list')
|
||||
|
||||
for item in candidates[:-1]:
|
||||
try:
|
||||
return self._import_one(item)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
return self._import_one(candidates[-1])
|
||||
|
||||
def _import_one(self, path, _MISSING=object()):
|
||||
if not isinstance(path, string_types):
|
||||
return path
|
||||
|
||||
if '.' not in path or '/' in path:
|
||||
raise ImportError("Cannot import %r. "
|
||||
"Required format: [package.]module.class. "
|
||||
"Or choose from %r"
|
||||
% (path, list(self.shortname_map)))
|
||||
|
||||
|
||||
module, item = path.rsplit('.', 1)
|
||||
module = importlib.import_module(module)
|
||||
x = getattr(module, item, _MISSING)
|
||||
if x is _MISSING:
|
||||
raise ImportError('Cannot import %r from %r' % (item, module))
|
||||
return x
|
||||
|
||||
shortname_map = {}
|
||||
|
||||
def validate(self, value):
|
||||
if isinstance(value, type):
|
||||
return value
|
||||
return self._import_one_of([self.shortname_map.get(x, x) for x in value])
|
||||
|
||||
def get_options(self):
|
||||
result = {}
|
||||
for name, val in self.shortname_map.items():
|
||||
try:
|
||||
result[name] = self._import_one(val)
|
||||
except ImportError as e:
|
||||
result[name] = e
|
||||
return result
|
||||
|
||||
|
||||
class BoolSettingMixin(object):
|
||||
validate = staticmethod(validate_bool)
|
||||
# Don't do string-to-list conversion.
|
||||
_convert = staticmethod(convert_str_value_as_is)
|
||||
|
||||
|
||||
class IntSettingMixin(object):
|
||||
# Don't do string-to-list conversion.
|
||||
def _convert(self, value):
|
||||
if value:
|
||||
return int(value)
|
||||
|
||||
validate = staticmethod(validate_anything)
|
||||
|
||||
|
||||
class _PositiveValueMixin(object):
|
||||
|
||||
def validate(self, value):
|
||||
if value is not None and value <= 0:
|
||||
raise ValueError("Must be positive")
|
||||
return value
|
||||
|
||||
|
||||
class FloatSettingMixin(_PositiveValueMixin):
|
||||
def _convert(self, value):
|
||||
if value:
|
||||
return float(value)
|
||||
|
||||
|
||||
class ByteCountSettingMixin(_PositiveValueMixin):
|
||||
|
||||
_MULTIPLES = {
|
||||
# All keys must be the same size.
|
||||
'kb': 1024,
|
||||
'mb': 1024 * 1024,
|
||||
'gb': 1024 * 1024 * 1024,
|
||||
}
|
||||
|
||||
_SUFFIX_SIZE = 2
|
||||
|
||||
def _convert(self, value):
|
||||
if not value or not isinstance(value, str):
|
||||
return value
|
||||
value = value.lower()
|
||||
for s, m in self._MULTIPLES.items():
|
||||
if value[-self._SUFFIX_SIZE:] == s:
|
||||
return int(value[:-self._SUFFIX_SIZE]) * m
|
||||
return int(value)
|
||||
|
||||
|
||||
class Resolver(ImportableSetting, Setting):
|
||||
|
||||
desc = """\
|
||||
The callable that will be used to create
|
||||
:attr:`gevent.hub.Hub.resolver`.
|
||||
|
||||
See :doc:`dns` for more information.
|
||||
"""
|
||||
|
||||
default = [
|
||||
'thread',
|
||||
'dnspython',
|
||||
'ares',
|
||||
'block',
|
||||
]
|
||||
|
||||
shortname_map = {
|
||||
'ares': 'gevent.resolver.ares.Resolver',
|
||||
'thread': 'gevent.resolver.thread.Resolver',
|
||||
'block': 'gevent.resolver.blocking.Resolver',
|
||||
'dnspython': 'gevent.resolver.dnspython.Resolver',
|
||||
}
|
||||
|
||||
|
||||
|
||||
class Threadpool(ImportableSetting, Setting):
|
||||
|
||||
desc = """\
|
||||
The kind of threadpool we use.
|
||||
"""
|
||||
|
||||
default = 'gevent.threadpool.ThreadPool'
|
||||
|
||||
class ThreadpoolIdleTaskTimeout(FloatSettingMixin, Setting):
|
||||
document = True
|
||||
name = 'threadpool_idle_task_timeout'
|
||||
environment_key = 'GEVENT_THREADPOOL_IDLE_TASK_TIMEOUT'
|
||||
|
||||
desc = """\
|
||||
How long threads in the default threadpool (used for
|
||||
DNS by default) are allowed to be idle before exiting.
|
||||
|
||||
Use -1 for no timeout.
|
||||
|
||||
.. versionadded:: 22.08.0
|
||||
"""
|
||||
|
||||
# This value is picked pretty much arbitrarily.
|
||||
# We want to balance performance (keeping threads around)
|
||||
# with memory/cpu usage (letting threads go).
|
||||
default = 5.0
|
||||
|
||||
class Loop(ImportableSetting, Setting):
|
||||
|
||||
desc = """\
|
||||
The kind of the loop we use.
|
||||
|
||||
On Windows, this defaults to libuv, while on
|
||||
other platforms it defaults to libev.
|
||||
|
||||
"""
|
||||
|
||||
default = [
|
||||
'libev-cext',
|
||||
'libev-cffi',
|
||||
'libuv-cffi',
|
||||
] if not WIN else [
|
||||
'libuv-cffi',
|
||||
'libev-cext',
|
||||
'libev-cffi',
|
||||
]
|
||||
|
||||
shortname_map = { # pylint:disable=dict-init-mutate
|
||||
'libev-cext': 'gevent.libev.corecext.loop',
|
||||
'libev-cffi': 'gevent.libev.corecffi.loop',
|
||||
'libuv-cffi': 'gevent.libuv.loop.loop',
|
||||
}
|
||||
|
||||
shortname_map['libuv'] = shortname_map['libuv-cffi']
|
||||
|
||||
|
||||
class FormatContext(ImportableSetting, Setting):
|
||||
name = 'format_context'
|
||||
|
||||
# using pprint.pformat can override custom __repr__ methods on dict/list
|
||||
# subclasses, which can be a security concern
|
||||
default = 'pprint.saferepr'
|
||||
|
||||
|
||||
class LibevBackend(Setting):
|
||||
name = 'libev_backend'
|
||||
environment_key = 'GEVENT_BACKEND'
|
||||
|
||||
desc = """\
|
||||
The backend for libev, such as 'select'
|
||||
"""
|
||||
|
||||
default = None
|
||||
|
||||
validate = staticmethod(validate_anything)
|
||||
|
||||
|
||||
class FileObject(ImportableSetting, Setting):
|
||||
desc = """\
|
||||
The kind of ``FileObject`` we will use.
|
||||
|
||||
See :mod:`gevent.fileobject` for a detailed description.
|
||||
|
||||
"""
|
||||
environment_key = 'GEVENT_FILE'
|
||||
|
||||
default = [
|
||||
'posix',
|
||||
'thread',
|
||||
]
|
||||
|
||||
shortname_map = {
|
||||
'thread': 'gevent._fileobjectcommon.FileObjectThread',
|
||||
'posix': 'gevent._fileobjectposix.FileObjectPosix',
|
||||
'block': 'gevent._fileobjectcommon.FileObjectBlock'
|
||||
}
|
||||
|
||||
|
||||
class WatchChildren(BoolSettingMixin, Setting):
|
||||
desc = """\
|
||||
Should we *not* watch children with the event loop watchers?
|
||||
|
||||
This is an advanced setting.
|
||||
|
||||
See :mod:`gevent.os` for a detailed description.
|
||||
"""
|
||||
name = 'disable_watch_children'
|
||||
environment_key = 'GEVENT_NOWAITPID'
|
||||
default = False
|
||||
|
||||
|
||||
class TraceMalloc(IntSettingMixin, Setting):
|
||||
name = 'trace_malloc'
|
||||
environment_key = 'PYTHONTRACEMALLOC'
|
||||
default = False
|
||||
|
||||
desc = """\
|
||||
Should FFI objects track their allocation?
|
||||
|
||||
This is only useful for low-level debugging.
|
||||
|
||||
On Python 3, this environment variable is built in to the
|
||||
interpreter, and it may also be set with the ``-X
|
||||
tracemalloc`` command line argument.
|
||||
|
||||
On Python 2, gevent interprets this argument and adds extra
|
||||
tracking information for FFI objects.
|
||||
"""
|
||||
|
||||
|
||||
class TrackGreenletTree(BoolSettingMixin, Setting):
|
||||
name = 'track_greenlet_tree'
|
||||
environment_key = 'GEVENT_TRACK_GREENLET_TREE'
|
||||
default = True
|
||||
|
||||
desc = """\
|
||||
Should `Greenlet` objects track their spawning tree?
|
||||
|
||||
Setting this to a false value will make spawning `Greenlet`
|
||||
objects and using `spawn_raw` faster, but the
|
||||
``spawning_greenlet``, ``spawn_tree_locals`` and ``spawning_stack``
|
||||
will not be captured. Setting this to a false value can also
|
||||
reduce memory usage because capturing the stack captures
|
||||
some information about Python frames.
|
||||
|
||||
.. versionadded:: 1.3b1
|
||||
"""
|
||||
|
||||
|
||||
## Monitoring settings
|
||||
# All env keys should begin with GEVENT_MONITOR
|
||||
|
||||
class MonitorThread(BoolSettingMixin, Setting):
|
||||
name = 'monitor_thread'
|
||||
environment_key = 'GEVENT_MONITOR_THREAD_ENABLE'
|
||||
default = False
|
||||
|
||||
desc = """\
|
||||
Should each hub start a native OS thread to monitor
|
||||
for problems?
|
||||
|
||||
Such a thread will periodically check to see if the event loop
|
||||
is blocked for longer than `max_blocking_time`, producing output on
|
||||
the hub's exception stream (stderr by default) if it detects this condition.
|
||||
|
||||
If this setting is true, then this thread will be created
|
||||
the first time the hub is switched to,
|
||||
or you can call :meth:`gevent.hub.Hub.start_periodic_monitoring_thread` at any
|
||||
time to create it (from the same thread that will run the hub). That function
|
||||
will return an instance of :class:`gevent.events.IPeriodicMonitorThread`
|
||||
to which you can add your own monitoring functions. That function
|
||||
also emits an event of :class:`gevent.events.PeriodicMonitorThreadStartedEvent`.
|
||||
|
||||
.. seealso:: `max_blocking_time`
|
||||
|
||||
.. versionadded:: 1.3b1
|
||||
"""
|
||||
|
||||
class MaxBlockingTime(FloatSettingMixin, Setting):
|
||||
name = 'max_blocking_time'
|
||||
# This environment key doesn't follow the convention because it's
|
||||
# meant to match a key used by existing projects
|
||||
environment_key = 'GEVENT_MAX_BLOCKING_TIME'
|
||||
default = 0.1
|
||||
|
||||
desc = """\
|
||||
If the `monitor_thread` is enabled, this is
|
||||
approximately how long (in seconds)
|
||||
the event loop will be allowed to block before a warning is issued.
|
||||
|
||||
This function depends on using `greenlet.settrace`, so installing
|
||||
your own trace function after starting the monitoring thread will
|
||||
cause this feature to misbehave unless you call the function
|
||||
returned by `greenlet.settrace`. If you install a tracing function *before*
|
||||
the monitoring thread is started, it will still be called.
|
||||
|
||||
.. note:: In the unlikely event of creating and using multiple different
|
||||
gevent hubs in the same native thread in a short period of time,
|
||||
especially without destroying the hubs, false positives may be reported.
|
||||
|
||||
.. versionadded:: 1.3b1
|
||||
"""
|
||||
|
||||
class MonitorMemoryPeriod(FloatSettingMixin, Setting):
|
||||
name = 'memory_monitor_period'
|
||||
|
||||
environment_key = 'GEVENT_MONITOR_MEMORY_PERIOD'
|
||||
default = 5
|
||||
|
||||
desc = """\
|
||||
If `monitor_thread` is enabled, this is approximately how long
|
||||
(in seconds) we will go between checking the processes memory usage.
|
||||
|
||||
Checking the memory usage is relatively expensive on some operating
|
||||
systems, so this should not be too low. gevent will place a floor
|
||||
value on it.
|
||||
"""
|
||||
|
||||
class MonitorMemoryMaxUsage(ByteCountSettingMixin, Setting):
|
||||
name = 'max_memory_usage'
|
||||
|
||||
environment_key = 'GEVENT_MONITOR_MEMORY_MAX'
|
||||
default = None
|
||||
|
||||
desc = """\
|
||||
If `monitor_thread` is enabled,
|
||||
then if memory usage exceeds this amount (in bytes), events will
|
||||
be emitted. See `gevent.events`. In the environment variable, you can use
|
||||
a suffix of 'kb', 'mb' or 'gb' to specify the value in kilobytes, megabytes
|
||||
or gigibytes.
|
||||
|
||||
There is no default value for this setting. If you wish to
|
||||
cap memory usage, you must choose a value.
|
||||
"""
|
||||
|
||||
# The ares settings are all interpreted by
|
||||
# gevent/resolver/ares.pyx, so we don't do
|
||||
# any validation here.
|
||||
|
||||
class AresSettingMixin(object):
|
||||
|
||||
document = False
|
||||
|
||||
@property
|
||||
def kwarg_name(self):
|
||||
return self.name[5:]
|
||||
|
||||
validate = staticmethod(validate_anything)
|
||||
|
||||
_convert = staticmethod(convert_str_value_as_is)
|
||||
|
||||
class AresFlags(AresSettingMixin, Setting):
|
||||
name = 'ares_flags'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_FLAGS'
|
||||
|
||||
class AresTimeout(AresSettingMixin, Setting):
|
||||
document = True
|
||||
name = 'ares_timeout'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_TIMEOUT'
|
||||
desc = """\
|
||||
|
||||
.. deprecated:: 1.3a2
|
||||
Prefer the :attr:`resolver_timeout` setting. If both are set,
|
||||
the results are not defined.
|
||||
"""
|
||||
|
||||
class AresTries(AresSettingMixin, Setting):
|
||||
name = 'ares_tries'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_TRIES'
|
||||
|
||||
class AresNdots(AresSettingMixin, Setting):
|
||||
name = 'ares_ndots'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_NDOTS'
|
||||
|
||||
class AresUDPPort(AresSettingMixin, Setting):
|
||||
name = 'ares_udp_port'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_UDP_PORT'
|
||||
|
||||
class AresTCPPort(AresSettingMixin, Setting):
|
||||
name = 'ares_tcp_port'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_TCP_PORT'
|
||||
|
||||
class AresServers(AresSettingMixin, Setting):
|
||||
document = True
|
||||
name = 'ares_servers'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_SERVERS'
|
||||
desc = """\
|
||||
A list of strings giving the IP addresses of nameservers for the ares resolver.
|
||||
|
||||
In the environment variable, these strings are separated by commas.
|
||||
|
||||
.. deprecated:: 1.3a2
|
||||
Prefer the :attr:`resolver_nameservers` setting. If both are set,
|
||||
the results are not defined.
|
||||
"""
|
||||
|
||||
# Generic nameservers, works for dnspython and ares.
|
||||
class ResolverNameservers(AresSettingMixin, Setting):
|
||||
document = True
|
||||
name = 'resolver_nameservers'
|
||||
default = None
|
||||
environment_key = 'GEVENT_RESOLVER_NAMESERVERS'
|
||||
desc = """\
|
||||
A list of strings giving the IP addresses of nameservers for the (non-system) resolver.
|
||||
|
||||
In the environment variable, these strings are separated by commas.
|
||||
|
||||
.. rubric:: Resolver Behaviour
|
||||
|
||||
* blocking
|
||||
|
||||
Ignored
|
||||
|
||||
* Threaded
|
||||
|
||||
Ignored
|
||||
|
||||
* dnspython
|
||||
|
||||
If this setting is not given, the dnspython resolver will
|
||||
load nameservers to use from ``/etc/resolv.conf``
|
||||
or the Windows registry. This setting replaces any nameservers read
|
||||
from those means. Note that the file and registry are still read
|
||||
for other settings.
|
||||
|
||||
.. caution:: dnspython does not validate the members of the list.
|
||||
An improper address (such as a hostname instead of IP) has
|
||||
undefined results, including hanging the process.
|
||||
|
||||
* ares
|
||||
|
||||
Similar to dnspython, but with more platform and compile-time
|
||||
options. ares validates that the members of the list are valid
|
||||
addresses.
|
||||
"""
|
||||
|
||||
# Normal string-to-list rules. But still validate_anything.
|
||||
_convert = Setting._convert
|
||||
|
||||
# TODO: In the future, support reading a resolv.conf file
|
||||
# *other* than /etc/resolv.conf, and do that both on Windows
|
||||
# and other platforms. Also offer the option to disable the system
|
||||
# configuration entirely.
|
||||
|
||||
@property
|
||||
def kwarg_name(self):
|
||||
return 'servers'
|
||||
|
||||
# Generic timeout, works for dnspython and ares
|
||||
class ResolverTimeout(FloatSettingMixin, AresSettingMixin, Setting):
|
||||
document = True
|
||||
name = 'resolver_timeout'
|
||||
environment_key = 'GEVENT_RESOLVER_TIMEOUT'
|
||||
desc = """\
|
||||
The total amount of time that the DNS resolver will spend making queries.
|
||||
|
||||
Only the ares and dnspython resolvers support this.
|
||||
|
||||
.. versionadded:: 1.3a2
|
||||
"""
|
||||
|
||||
@property
|
||||
def kwarg_name(self):
|
||||
return 'timeout'
|
||||
|
||||
config = Config()
|
||||
|
||||
# Go ahead and attempt to import the loop when this class is
|
||||
# instantiated. The hub won't work if the loop can't be found. This
|
||||
# can solve problems with the class being imported from multiple
|
||||
# threads at once, leading to one of the imports failing.
|
||||
# factories are themselves handled lazily. See #687.
|
||||
|
||||
# Don't cache it though, in case the user re-configures through the
|
||||
# API.
|
||||
|
||||
try:
|
||||
Loop().get()
|
||||
except ImportError: # pragma: no cover
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
config.print_help()
|
||||
27
venv3_12/Lib/site-packages/gevent/_ffi/__init__.py
Normal file
27
venv3_12/Lib/site-packages/gevent/_ffi/__init__.py
Normal file
@@ -0,0 +1,27 @@
|
||||
"""
|
||||
Internal helpers for FFI implementations.
|
||||
"""
|
||||
from __future__ import print_function, absolute_import
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
def _dbg(*args, **kwargs):
|
||||
# pylint:disable=unused-argument
|
||||
pass
|
||||
|
||||
#_dbg = print
|
||||
|
||||
def _pid_dbg(*args, **kwargs):
|
||||
kwargs['file'] = sys.stderr
|
||||
print(os.getpid(), *args, **kwargs)
|
||||
|
||||
CRITICAL = 1
|
||||
ERROR = 3
|
||||
DEBUG = 5
|
||||
TRACE = 9
|
||||
|
||||
GEVENT_DEBUG_LEVEL = vars()[os.getenv("GEVENT_DEBUG", 'CRITICAL').upper()]
|
||||
|
||||
if GEVENT_DEBUG_LEVEL >= TRACE:
|
||||
_dbg = _pid_dbg
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
57
venv3_12/Lib/site-packages/gevent/_ffi/callback.py
Normal file
57
venv3_12/Lib/site-packages/gevent/_ffi/callback.py
Normal file
@@ -0,0 +1,57 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
from gevent._interfaces import ICallback
|
||||
|
||||
__all__ = [
|
||||
'callback',
|
||||
]
|
||||
|
||||
|
||||
@implementer(ICallback)
|
||||
class callback(object):
|
||||
|
||||
__slots__ = ('callback', 'args')
|
||||
|
||||
def __init__(self, cb, args):
|
||||
self.callback = cb
|
||||
self.args = args
|
||||
|
||||
def stop(self):
|
||||
self.callback = None
|
||||
self.args = None
|
||||
|
||||
close = stop
|
||||
|
||||
# Note that __nonzero__ and pending are different
|
||||
# bool() is used in contexts where we need to know whether to schedule another callback,
|
||||
# so it's true if it's pending or currently running
|
||||
# 'pending' has the same meaning as libev watchers: it is cleared before actually
|
||||
# running the callback
|
||||
|
||||
def __bool__(self):
|
||||
# it's nonzero if it's pending or currently executing
|
||||
# NOTE: This depends on loop._run_callbacks setting the args property
|
||||
# to None.
|
||||
return self.args is not None
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
return self.callback is not None
|
||||
|
||||
def _format(self):
|
||||
return ''
|
||||
|
||||
def __repr__(self):
|
||||
result = "<%s at 0x%x" % (self.__class__.__name__, id(self))
|
||||
if self.pending:
|
||||
result += " pending"
|
||||
if self.callback is not None:
|
||||
result += " callback=%r" % (self.callback, )
|
||||
if self.args is not None:
|
||||
result += " args=%r" % (self.args, )
|
||||
if self.callback is None and self.args is None:
|
||||
result += " stopped"
|
||||
return result + ">"
|
||||
789
venv3_12/Lib/site-packages/gevent/_ffi/loop.py
Normal file
789
venv3_12/Lib/site-packages/gevent/_ffi/loop.py
Normal file
@@ -0,0 +1,789 @@
|
||||
"""
|
||||
Basic loop implementation for ffi-based cores.
|
||||
"""
|
||||
# pylint: disable=too-many-lines, protected-access, redefined-outer-name, not-callable
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
from collections import deque
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from gevent._ffi import _dbg
|
||||
from gevent._ffi import GEVENT_DEBUG_LEVEL
|
||||
from gevent._ffi import TRACE
|
||||
from gevent._ffi.callback import callback
|
||||
from gevent._compat import PYPY
|
||||
from gevent.exceptions import HubDestroyed
|
||||
|
||||
from gevent import getswitchinterval
|
||||
|
||||
__all__ = [
|
||||
'AbstractLoop',
|
||||
'assign_standard_callbacks',
|
||||
]
|
||||
|
||||
|
||||
class _EVENTSType(object):
|
||||
def __repr__(self):
|
||||
return 'gevent.core.EVENTS'
|
||||
|
||||
EVENTS = GEVENT_CORE_EVENTS = _EVENTSType()
|
||||
|
||||
|
||||
class _DiscardedSet(frozenset):
|
||||
__slots__ = ()
|
||||
|
||||
def discard(self, o):
|
||||
"Does nothing."
|
||||
|
||||
#####
|
||||
## Note on CFFI objects, callbacks and the lifecycle of watcher objects
|
||||
#
|
||||
# Each subclass of `watcher` allocates a C structure of the
|
||||
# appropriate type e.g., struct gevent_ev_io and holds this pointer in
|
||||
# its `_gwatcher` attribute. When that watcher instance is garbage
|
||||
# collected, then the C structure is also freed. The C structure is
|
||||
# passed to libev from the watcher's start() method and then to the
|
||||
# appropriate C callback function, e.g., _gevent_ev_io_callback, which
|
||||
# passes it back to python's _python_callback where we need the
|
||||
# watcher instance. Therefore, as long as that callback is active (the
|
||||
# watcher is started), the watcher instance must not be allowed to get
|
||||
# GC'd---any access at the C level or even the FFI level to the freed
|
||||
# memory could crash the process.
|
||||
#
|
||||
# However, the typical idiom calls for writing something like this:
|
||||
# loop.io(fd, python_cb).start()
|
||||
# thus forgetting the newly created watcher subclass and allowing it to be immediately
|
||||
# GC'd. To combat this, when the watcher is started, it places itself into the loop's
|
||||
# `_keepaliveset`, and it only removes itself when the watcher's `stop()` method is called.
|
||||
# Often, this is the *only* reference keeping the watcher object, and hence its C structure,
|
||||
# alive.
|
||||
#
|
||||
# This is slightly complicated by the fact that the python-level
|
||||
# callback, called from the C callback, could choose to manually stop
|
||||
# the watcher. When we return to the C level callback, we now have an
|
||||
# invalid pointer, and attempting to pass it back to Python (e.g., to
|
||||
# handle an error) could crash. Hence, _python_callback,
|
||||
# _gevent_io_callback, and _python_handle_error cooperate to make sure
|
||||
# that the watcher instance stays in the loops `_keepaliveset` while
|
||||
# the C code could be running---and if it gets removed, to not call back
|
||||
# to Python again.
|
||||
# See also https://github.com/gevent/gevent/issues/676
|
||||
####
|
||||
class AbstractCallbacks(object):
|
||||
|
||||
|
||||
def __init__(self, ffi):
|
||||
self.ffi = ffi
|
||||
self.callbacks = []
|
||||
if GEVENT_DEBUG_LEVEL < TRACE:
|
||||
self.from_handle = ffi.from_handle
|
||||
|
||||
def from_handle(self, handle): # pylint:disable=method-hidden
|
||||
x = self.ffi.from_handle(handle)
|
||||
return x
|
||||
|
||||
def python_callback(self, handle, revents):
|
||||
"""
|
||||
Returns an integer having one of three values:
|
||||
|
||||
- -1
|
||||
An exception occurred during the callback and you must call
|
||||
:func:`_python_handle_error` to deal with it. The Python watcher
|
||||
object will have the exception tuple saved in ``_exc_info``.
|
||||
- 1
|
||||
Everything went according to plan. You should check to see if the native
|
||||
watcher is still active, and call :func:`python_stop` if it is not. This will
|
||||
clean up the memory. Finding the watcher still active at the event loop level,
|
||||
but not having stopped itself at the gevent level is a buggy scenario and
|
||||
shouldn't happen.
|
||||
- 2
|
||||
Everything went according to plan, but the watcher has already
|
||||
been stopped. Its memory may no longer be valid.
|
||||
|
||||
This function should never return 0, as that's the default value that
|
||||
Python exceptions will produce.
|
||||
"""
|
||||
#_dbg("Running callback", handle)
|
||||
orig_ffi_watcher = None
|
||||
orig_loop = None
|
||||
try:
|
||||
# Even dereferencing the handle needs to be inside the try/except;
|
||||
# if we don't return normally (e.g., a signal) then we wind up going
|
||||
# to the 'onerror' handler (unhandled_onerror), which
|
||||
# is not what we want; that can permanently wedge the loop depending
|
||||
# on which callback was executing.
|
||||
# XXX: See comments in that function. We may be able to restart and do better?
|
||||
if not handle:
|
||||
# Hmm, a NULL handle. That's not supposed to happen.
|
||||
# We can easily get into a loop if we deref it and allow that
|
||||
# to raise.
|
||||
_dbg("python_callback got null handle")
|
||||
return 1
|
||||
the_watcher = self.from_handle(handle)
|
||||
orig_ffi_watcher = the_watcher._watcher
|
||||
orig_loop = the_watcher.loop
|
||||
args = the_watcher.args
|
||||
if args is None:
|
||||
# Legacy behaviour from corecext: convert None into ()
|
||||
# See test__core_watcher.py
|
||||
args = _NOARGS
|
||||
if args and args[0] == GEVENT_CORE_EVENTS:
|
||||
args = (revents, ) + args[1:]
|
||||
the_watcher.callback(*args) # None here means we weren't started
|
||||
except: # pylint:disable=bare-except
|
||||
# It's possible for ``the_watcher`` to be undefined (UnboundLocalError)
|
||||
# if we threw an exception (signal) on the line that created that variable.
|
||||
# This is typically the case with a signal under libuv
|
||||
try:
|
||||
the_watcher
|
||||
except UnboundLocalError:
|
||||
the_watcher = self.from_handle(handle)
|
||||
|
||||
# It may not be safe to do anything with `handle` or `orig_ffi_watcher`
|
||||
# anymore. If the watcher closed or stopped itself *before* throwing the exception,
|
||||
# then the `handle` and `orig_ffi_watcher` may no longer be valid. Attempting to
|
||||
# e.g., dereference the handle is likely to crash the process.
|
||||
the_watcher._exc_info = sys.exc_info()
|
||||
|
||||
|
||||
# If it hasn't been stopped, we need to make sure its
|
||||
# memory stays valid so we can stop it at the native level if needed.
|
||||
# If its loop is gone, it has already been stopped,
|
||||
# see https://github.com/gevent/gevent/issues/1295 for a case where
|
||||
# that happened, as well as issue #1482
|
||||
if (
|
||||
# The last thing it does. Full successful close.
|
||||
the_watcher.loop is None
|
||||
# Only a partial close. We could leak memory and even crash later.
|
||||
or the_watcher._handle is None
|
||||
):
|
||||
# Prevent unhandled_onerror from using the invalid handle
|
||||
handle = None
|
||||
exc_info = the_watcher._exc_info
|
||||
del the_watcher._exc_info
|
||||
try:
|
||||
if orig_loop is not None:
|
||||
orig_loop.handle_error(the_watcher, *exc_info)
|
||||
else:
|
||||
self.unhandled_onerror(*exc_info)
|
||||
except:
|
||||
print("WARNING: gevent: Error when handling error",
|
||||
file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
# Signal that we're closed, no need to do more.
|
||||
return 2
|
||||
|
||||
# Keep it around so we can close it later.
|
||||
the_watcher.loop._keepaliveset.add(the_watcher)
|
||||
return -1
|
||||
|
||||
if (the_watcher.loop is not None
|
||||
and the_watcher in the_watcher.loop._keepaliveset
|
||||
and the_watcher._watcher is orig_ffi_watcher):
|
||||
# It didn't stop itself, *and* it didn't stop itself, reset
|
||||
# its watcher, and start itself again. libuv's io watchers
|
||||
# multiplex and may do this.
|
||||
|
||||
# The normal, expected scenario when we find the watcher still
|
||||
# in the keepaliveset is that it is still active at the event loop
|
||||
# level, so we don't expect that python_stop gets called.
|
||||
#_dbg("The watcher has not stopped itself, possibly still active", the_watcher)
|
||||
return 1
|
||||
return 2 # it stopped itself
|
||||
|
||||
def python_handle_error(self, handle, _revents):
|
||||
_dbg("Handling error for handle", handle)
|
||||
if not handle:
|
||||
return
|
||||
try:
|
||||
watcher = self.from_handle(handle)
|
||||
exc_info = watcher._exc_info
|
||||
del watcher._exc_info
|
||||
# In the past, we passed the ``watcher`` itself as the context,
|
||||
# which typically meant that the Hub would just print
|
||||
# the exception. This is a problem because sometimes we can't
|
||||
# detect signals until late in ``python_callback``; specifically,
|
||||
# test_selectors.py:DefaultSelectorTest.test_select_interrupt_exc
|
||||
# installs a SIGALRM handler that raises an exception. That exception can happen
|
||||
# before we enter ``python_callback`` or at any point within it because of the way
|
||||
# libuv swallows signals. By passing None, we get the exception prapagated into
|
||||
# the main greenlet (which is probably *also* not what we always want, but
|
||||
# I see no way to distinguish the cases).
|
||||
watcher.loop.handle_error(None, *exc_info)
|
||||
finally:
|
||||
# XXX Since we're here on an error condition, and we
|
||||
# made sure that the watcher object was put in loop._keepaliveset,
|
||||
# what about not stopping the watcher? Looks like a possible
|
||||
# memory leak?
|
||||
# XXX: This used to do "if revents & (libev.EV_READ | libev.EV_WRITE)"
|
||||
# before stopping. Why?
|
||||
try:
|
||||
watcher.stop()
|
||||
except: # pylint:disable=bare-except
|
||||
watcher.loop.handle_error(watcher, *sys.exc_info())
|
||||
return # pylint:disable=lost-exception,return-in-finally
|
||||
|
||||
def unhandled_onerror(self, t, v, tb):
|
||||
# This is supposed to be called for signals, etc.
|
||||
# This is the onerror= value for CFFI.
|
||||
# If we return None, C will get a value of 0/NULL;
|
||||
# if we raise, CFFI will print the exception and then
|
||||
# return 0/NULL; (unless error= was configured)
|
||||
# If things go as planned, we return the value that asks
|
||||
# C to call back and check on if the watcher needs to be closed or
|
||||
# not.
|
||||
|
||||
# XXX: TODO: Could this cause events to be lost? Maybe we need to return
|
||||
# a value that causes the C loop to try the callback again?
|
||||
# at least for signals under libuv, which are delivered at very odd times.
|
||||
# Hopefully the event still shows up when we poll the next time.
|
||||
watcher = None
|
||||
handle = tb.tb_frame.f_locals.get('handle') if tb is not None else None
|
||||
if handle: # handle could be NULL
|
||||
watcher = self.from_handle(handle)
|
||||
if watcher is not None:
|
||||
watcher.loop.handle_error(None, t, v, tb)
|
||||
return 1
|
||||
|
||||
# Raising it causes a lot of noise from CFFI
|
||||
print("WARNING: gevent: Unhandled error with no watcher",
|
||||
file=sys.stderr)
|
||||
traceback.print_exception(t, v, tb)
|
||||
|
||||
def python_stop(self, handle):
|
||||
if not handle: # pragma: no cover
|
||||
print(
|
||||
"WARNING: gevent: Unable to dereference handle; not stopping watcher. "
|
||||
"Native resources may leak. This is most likely a bug in gevent.",
|
||||
file=sys.stderr)
|
||||
# The alternative is to crash with no helpful information
|
||||
# NOTE: Raising exceptions here does nothing, they're swallowed by CFFI.
|
||||
# Since the C level passed in a null pointer, even dereferencing the handle
|
||||
# will just produce some exceptions.
|
||||
return
|
||||
watcher = self.from_handle(handle)
|
||||
watcher.stop()
|
||||
|
||||
if not PYPY:
|
||||
def python_check_callback(self, watcher_ptr): # pylint:disable=unused-argument
|
||||
# If we have the onerror callback, this is a no-op; all the real
|
||||
# work to rethrow the exception is done by the onerror callback
|
||||
|
||||
# NOTE: Unlike the rest of the functions, this is called with a pointer
|
||||
# to the C level structure, *not* a pointer to the void* that represents a
|
||||
# <cdata> for the Python Watcher object.
|
||||
pass
|
||||
else: # PyPy
|
||||
# On PyPy, we need the function to have some sort of body, otherwise
|
||||
# the signal exceptions don't always get caught, *especially* with
|
||||
# libuv (however, there's no reason to expect this to only be a libuv
|
||||
# issue; it's just that we don't depend on the periodic signal timer
|
||||
# under libev, so the issue is much more pronounced under libuv)
|
||||
# test_socket's test_sendall_interrupted can hang.
|
||||
# See https://github.com/gevent/gevent/issues/1112
|
||||
|
||||
def python_check_callback(self, watcher_ptr): # pylint:disable=unused-argument
|
||||
# Things we've tried that *don't* work:
|
||||
# greenlet.getcurrent()
|
||||
# 1 + 1
|
||||
try:
|
||||
raise MemoryError()
|
||||
except MemoryError:
|
||||
pass
|
||||
|
||||
def python_prepare_callback(self, watcher_ptr):
|
||||
loop = self._find_loop_from_c_watcher(watcher_ptr)
|
||||
if loop is None: # pragma: no cover
|
||||
print("WARNING: gevent: running prepare callbacks from a destroyed handle: ",
|
||||
watcher_ptr)
|
||||
return
|
||||
loop._run_callbacks()
|
||||
|
||||
def check_callback_onerror(self, t, v, tb):
|
||||
loop = None
|
||||
watcher_ptr = self._find_watcher_ptr_in_traceback(tb)
|
||||
if watcher_ptr:
|
||||
loop = self._find_loop_from_c_watcher(watcher_ptr)
|
||||
if loop is not None:
|
||||
# None as the context argument causes the exception to be raised
|
||||
# in the main greenlet.
|
||||
loop.handle_error(None, t, v, tb)
|
||||
return None
|
||||
raise v # Let CFFI print
|
||||
|
||||
def _find_loop_from_c_watcher(self, watcher_ptr):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _find_watcher_ptr_in_traceback(self, tb):
|
||||
return tb.tb_frame.f_locals['watcher_ptr'] if tb is not None else None
|
||||
|
||||
|
||||
def assign_standard_callbacks(ffi, lib, callbacks_class, extras=()): # pylint:disable=unused-argument
|
||||
"""
|
||||
Given the typical *ffi* and *lib* arguments, and a subclass of :class:`AbstractCallbacks`
|
||||
in *callbacks_class*, set up the ``def_extern`` Python callbacks from C
|
||||
into an instance of *callbacks_class*.
|
||||
|
||||
:param tuple extras: If given, this is a sequence of ``(name, error_function)``
|
||||
additional callbacks to register. Each *name* is an attribute of
|
||||
the *callbacks_class* instance. (Each element cas also be just a *name*.)
|
||||
:return: The *callbacks_class* instance. This object must be kept alive,
|
||||
typically at module scope.
|
||||
"""
|
||||
# callbacks keeps these cdata objects alive at the python level
|
||||
callbacks = callbacks_class(ffi)
|
||||
extras = [extra if len(extra) == 2 else (extra, None) for extra in extras]
|
||||
extras = tuple((getattr(callbacks, name), error) for name, error in extras)
|
||||
for (func, error_func) in (
|
||||
(callbacks.python_callback, None),
|
||||
(callbacks.python_handle_error, None),
|
||||
(callbacks.python_stop, None),
|
||||
(callbacks.python_check_callback, callbacks.check_callback_onerror),
|
||||
(callbacks.python_prepare_callback, callbacks.check_callback_onerror)
|
||||
) + extras:
|
||||
# The name of the callback function matches the 'extern Python' declaration.
|
||||
error_func = error_func or callbacks.unhandled_onerror
|
||||
callback = ffi.def_extern(onerror=error_func)(func)
|
||||
# keep alive the cdata
|
||||
# (def_extern returns the original function, and it requests that
|
||||
# the function be "global", so maybe it keeps a hard reference to it somewhere now
|
||||
# unlike ffi.callback(), and we don't need to do this?)
|
||||
callbacks.callbacks.append(callback)
|
||||
|
||||
# At this point, the library C variable (static function, actually)
|
||||
# is filled in.
|
||||
|
||||
return callbacks
|
||||
|
||||
|
||||
|
||||
basestring = (bytes, str)
|
||||
integer_types = (int,)
|
||||
|
||||
|
||||
_NOARGS = ()
|
||||
|
||||
|
||||
class AbstractLoop(object):
|
||||
# pylint:disable=too-many-public-methods,too-many-instance-attributes
|
||||
|
||||
# How many callbacks we should run between checking against the
|
||||
# switch interval.
|
||||
CALLBACK_CHECK_COUNT = 50
|
||||
|
||||
error_handler = None
|
||||
|
||||
_CHECK_POINTER = None
|
||||
|
||||
_TIMER_POINTER = None
|
||||
_TIMER_CALLBACK_SIG = None
|
||||
|
||||
_PREPARE_POINTER = None
|
||||
|
||||
starting_timer_may_update_loop_time = False
|
||||
|
||||
# Subclasses should set this in __init__ to reflect
|
||||
# whether they were the default loop.
|
||||
_default = None
|
||||
|
||||
_keepaliveset = _DiscardedSet()
|
||||
_threadsafe_async = None
|
||||
|
||||
def __init__(self, ffi, lib, watchers, flags=None, default=None):
|
||||
self._ffi = ffi
|
||||
self._lib = lib
|
||||
self._ptr = None
|
||||
self._handle_to_self = self._ffi.new_handle(self) # XXX: Reference cycle?
|
||||
self._watchers = watchers
|
||||
self._in_callback = False
|
||||
self._callbacks = deque()
|
||||
# Stores python watcher objects while they are started
|
||||
self._keepaliveset = set()
|
||||
self._init_loop_and_aux_watchers(flags, default)
|
||||
|
||||
def _init_loop_and_aux_watchers(self, flags=None, default=None):
|
||||
self._ptr = self._init_loop(flags, default)
|
||||
|
||||
# self._check is a watcher that runs in each iteration of the
|
||||
# mainloop, just after the blocking call. It's point is to handle
|
||||
# signals. It doesn't run watchers or callbacks, it just exists to give
|
||||
# CFFI a chance to raise signal exceptions so we can handle them.
|
||||
self._check = self._ffi.new(self._CHECK_POINTER)
|
||||
self._check.data = self._handle_to_self
|
||||
self._init_and_start_check()
|
||||
|
||||
# self._prepare is a watcher that runs in each iteration of the mainloop,
|
||||
# just before the blocking call. It's where we run deferred callbacks
|
||||
# from self.run_callback. This cooperates with _setup_for_run_callback()
|
||||
# to schedule self._timer0 if needed.
|
||||
self._prepare = self._ffi.new(self._PREPARE_POINTER)
|
||||
self._prepare.data = self._handle_to_self
|
||||
self._init_and_start_prepare()
|
||||
|
||||
# A timer we start and stop on demand. If we have callbacks,
|
||||
# too many to run in one iteration of _run_callbacks, we turn this
|
||||
# on so as to have the next iteration of the run loop return to us
|
||||
# as quickly as possible.
|
||||
# TODO: There may be a more efficient way to do this using ev_timer_again;
|
||||
# see the "ev_timer" section of the ev manpage (http://linux.die.net/man/3/ev)
|
||||
# Alternatively, setting the ev maximum block time may also work.
|
||||
self._timer0 = self._ffi.new(self._TIMER_POINTER)
|
||||
self._timer0.data = self._handle_to_self
|
||||
self._init_callback_timer()
|
||||
|
||||
self._threadsafe_async = self.async_(ref=False)
|
||||
# No need to do anything with this on ``fork()``, both libev and libuv
|
||||
# take care of creating a new pipe in their respective ``loop_fork()`` methods.
|
||||
self._threadsafe_async.start(lambda: None)
|
||||
# TODO: We may be able to do something nicer and use the existing python_callback
|
||||
# combined with onerror and the class check/timer/prepare to simplify things
|
||||
# and unify our handling
|
||||
|
||||
def _init_loop(self, flags, default):
|
||||
"""
|
||||
Called by __init__ to create or find the loop. The return value
|
||||
is assigned to self._ptr.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _init_and_start_check(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _init_and_start_prepare(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _init_callback_timer(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _stop_callback_timer(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _start_callback_timer(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _check_callback_handle_error(self, t, v, tb):
|
||||
self.handle_error(None, t, v, tb)
|
||||
|
||||
def _run_callbacks(self): # pylint:disable=too-many-branches
|
||||
# When we're running callbacks, its safe for timers to
|
||||
# update the notion of the current time (because if we're here,
|
||||
# we're not running in a timer callback that may let other timers
|
||||
# run; this is mostly an issue for libuv).
|
||||
|
||||
# That's actually a bit of a lie: on libev, self._timer0 really is
|
||||
# a timer, and so sometimes this is running in a timer callback, not
|
||||
# a prepare callback. But that's OK, libev doesn't suffer from cascading
|
||||
# timer expiration and its safe to update the loop time at any
|
||||
# moment there.
|
||||
self.starting_timer_may_update_loop_time = True
|
||||
try:
|
||||
count = self.CALLBACK_CHECK_COUNT
|
||||
now = self.now()
|
||||
expiration = now + getswitchinterval()
|
||||
self._stop_callback_timer()
|
||||
while self._callbacks:
|
||||
cb = self._callbacks.popleft() # pylint:disable=assignment-from-no-return
|
||||
count -= 1
|
||||
self.unref() # XXX: libuv doesn't have a global ref count!
|
||||
callback = cb.callback
|
||||
cb.callback = None
|
||||
args = cb.args
|
||||
if callback is None or args is None:
|
||||
# it's been stopped
|
||||
continue
|
||||
|
||||
try:
|
||||
callback(*args)
|
||||
except: # pylint:disable=bare-except
|
||||
# If we allow an exception to escape this method (while we are running the ev callback),
|
||||
# then CFFI will print the error and libev will continue executing.
|
||||
# There are two problems with this. The first is that the code after
|
||||
# the loop won't run. The second is that any remaining callbacks scheduled
|
||||
# for this loop iteration will be silently dropped; they won't run, but they'll
|
||||
# also not be *stopped* (which is not a huge deal unless you're looking for
|
||||
# consistency or checking the boolean/pending status; the loop doesn't keep
|
||||
# a reference to them like it does to watchers...*UNLESS* the callback itself had
|
||||
# a reference to a watcher; then I don't know what would happen, it depends on
|
||||
# the state of the watcher---a leak or crash is not totally inconceivable).
|
||||
# The Cython implementation in core.ppyx uses gevent_call from callbacks.c
|
||||
# to run the callback, which uses gevent_handle_error to handle any errors the
|
||||
# Python callback raises...it unconditionally simply prints any error raised
|
||||
# by loop.handle_error and clears it, so callback handling continues.
|
||||
# We take a similar approach (but are extra careful about printing)
|
||||
try:
|
||||
self.handle_error(cb, *sys.exc_info())
|
||||
except: # pylint:disable=bare-except
|
||||
try:
|
||||
print("Exception while handling another error", file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
except: # pylint:disable=bare-except
|
||||
pass # Nothing we can do here
|
||||
finally:
|
||||
# NOTE: this must be reset here, because cb.args is used as a flag in
|
||||
# the callback class so that bool(cb) of a callback that has been run
|
||||
# becomes False
|
||||
cb.args = None
|
||||
|
||||
# We've finished running one group of callbacks
|
||||
# but we may have more, so before looping check our
|
||||
# switch interval.
|
||||
if count == 0 and self._callbacks:
|
||||
count = self.CALLBACK_CHECK_COUNT
|
||||
self.update_now()
|
||||
if self.now() >= expiration:
|
||||
now = 0
|
||||
break
|
||||
|
||||
# Update the time before we start going again, if we didn't
|
||||
# just do so.
|
||||
if now != 0:
|
||||
self.update_now()
|
||||
|
||||
if self._callbacks:
|
||||
self._start_callback_timer()
|
||||
finally:
|
||||
self.starting_timer_may_update_loop_time = False
|
||||
|
||||
def _stop_aux_watchers(self):
|
||||
if self._threadsafe_async is not None:
|
||||
self._threadsafe_async.close()
|
||||
self._threadsafe_async = None
|
||||
|
||||
def destroy(self):
|
||||
ptr = self.ptr
|
||||
if ptr:
|
||||
try:
|
||||
if not self._can_destroy_loop(ptr):
|
||||
return False
|
||||
self._stop_aux_watchers()
|
||||
self._destroy_loop(ptr)
|
||||
finally:
|
||||
# not ffi.NULL, we don't want something that can be
|
||||
# passed to C and crash later. This will create nice friendly
|
||||
# TypeError from CFFI.
|
||||
self._ptr = None
|
||||
del self._handle_to_self
|
||||
del self._callbacks
|
||||
del self._keepaliveset
|
||||
|
||||
return True
|
||||
|
||||
def _can_destroy_loop(self, ptr):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _destroy_loop(self, ptr):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def ptr(self):
|
||||
# Use this when you need to be sure the pointer is valid.
|
||||
return self._ptr
|
||||
|
||||
@property
|
||||
def WatcherType(self):
|
||||
return self._watchers.watcher
|
||||
|
||||
@property
|
||||
def MAXPRI(self):
|
||||
return 1
|
||||
|
||||
@property
|
||||
def MINPRI(self):
|
||||
return 1
|
||||
|
||||
def _handle_syserr(self, message, errno):
|
||||
try:
|
||||
errno = os.strerror(errno)
|
||||
except: # pylint:disable=bare-except
|
||||
traceback.print_exc()
|
||||
try:
|
||||
message = '%s: %s' % (message, errno)
|
||||
except: # pylint:disable=bare-except
|
||||
traceback.print_exc()
|
||||
self.handle_error(None, SystemError, SystemError(message), None)
|
||||
|
||||
def handle_error(self, context, type, value, tb):
|
||||
if type is HubDestroyed:
|
||||
self._callbacks.clear()
|
||||
self.break_()
|
||||
return
|
||||
|
||||
handle_error = None
|
||||
error_handler = self.error_handler
|
||||
if error_handler is not None:
|
||||
# we do want to do getattr every time so that setting Hub.handle_error property just works
|
||||
handle_error = getattr(error_handler, 'handle_error', error_handler)
|
||||
handle_error(context, type, value, tb)
|
||||
else:
|
||||
self._default_handle_error(context, type, value, tb)
|
||||
|
||||
def _default_handle_error(self, context, type, value, tb): # pylint:disable=unused-argument
|
||||
# note: Hub sets its own error handler so this is not used by gevent
|
||||
# this is here to make core.loop usable without the rest of gevent
|
||||
# Should cause the loop to stop running.
|
||||
traceback.print_exception(type, value, tb)
|
||||
|
||||
|
||||
def run(self, nowait=False, once=False):
|
||||
raise NotImplementedError()
|
||||
|
||||
def reinit(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def ref(self):
|
||||
# XXX: libuv doesn't do it this way
|
||||
raise NotImplementedError()
|
||||
|
||||
def unref(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def break_(self, how=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
def verify(self):
|
||||
pass
|
||||
|
||||
def now(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def update_now(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def update(self):
|
||||
import warnings
|
||||
warnings.warn("'update' is deprecated; use 'update_now'",
|
||||
DeprecationWarning,
|
||||
stacklevel=2)
|
||||
self.update_now()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s.%s at 0x%x %s>' % (
|
||||
self.__class__.__module__,
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
self._format()
|
||||
)
|
||||
|
||||
@property
|
||||
def default(self):
|
||||
return self._default if self.ptr else False
|
||||
|
||||
@property
|
||||
def iteration(self):
|
||||
return -1
|
||||
|
||||
@property
|
||||
def depth(self):
|
||||
return -1
|
||||
|
||||
@property
|
||||
def backend_int(self):
|
||||
return 0
|
||||
|
||||
@property
|
||||
def backend(self):
|
||||
return "default"
|
||||
|
||||
@property
|
||||
def pendingcnt(self):
|
||||
return 0
|
||||
|
||||
def io(self, fd, events, ref=True, priority=None):
|
||||
return self._watchers.io(self, fd, events, ref, priority)
|
||||
|
||||
def closing_fd(self, fd): # pylint:disable=unused-argument
|
||||
return False
|
||||
|
||||
def timer(self, after, repeat=0.0, ref=True, priority=None):
|
||||
return self._watchers.timer(self, after, repeat, ref, priority)
|
||||
|
||||
def signal(self, signum, ref=True, priority=None):
|
||||
return self._watchers.signal(self, signum, ref, priority)
|
||||
|
||||
def idle(self, ref=True, priority=None):
|
||||
return self._watchers.idle(self, ref, priority)
|
||||
|
||||
def prepare(self, ref=True, priority=None):
|
||||
return self._watchers.prepare(self, ref, priority)
|
||||
|
||||
def check(self, ref=True, priority=None):
|
||||
return self._watchers.check(self, ref, priority)
|
||||
|
||||
def fork(self, ref=True, priority=None):
|
||||
return self._watchers.fork(self, ref, priority)
|
||||
|
||||
def async_(self, ref=True, priority=None):
|
||||
return self._watchers.async_(self, ref, priority)
|
||||
|
||||
# Provide BWC for those that can use 'async' as is
|
||||
locals()['async'] = async_
|
||||
|
||||
if sys.platform != "win32":
|
||||
|
||||
def child(self, pid, trace=0, ref=True):
|
||||
return self._watchers.child(self, pid, trace, ref)
|
||||
|
||||
def install_sigchld(self):
|
||||
pass
|
||||
|
||||
def stat(self, path, interval=0.0, ref=True, priority=None):
|
||||
return self._watchers.stat(self, path, interval, ref, priority)
|
||||
|
||||
def callback(self, priority=None):
|
||||
return callback(self, priority)
|
||||
|
||||
def _setup_for_run_callback(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def run_callback(self, func, *args):
|
||||
# If we happen to already be running callbacks (inside
|
||||
# _run_callbacks), this could happen almost immediately,
|
||||
# without the loop cycling.
|
||||
cb = callback(func, args)
|
||||
self._callbacks.append(cb) # Relying on the GIL for this to be threadsafe
|
||||
self._setup_for_run_callback() # XXX: This may not be threadsafe.
|
||||
return cb
|
||||
|
||||
def run_callback_threadsafe(self, func, *args):
|
||||
cb = self.run_callback(func, *args)
|
||||
self._threadsafe_async.send()
|
||||
return cb
|
||||
|
||||
def _format(self):
|
||||
ptr = self.ptr
|
||||
if not ptr:
|
||||
return 'destroyed'
|
||||
msg = "backend=" + self.backend
|
||||
msg += ' ptr=' + str(ptr)
|
||||
if self.default:
|
||||
msg += ' default'
|
||||
msg += ' pending=%s' % self.pendingcnt
|
||||
msg += self._format_details()
|
||||
return msg
|
||||
|
||||
def _format_details(self):
|
||||
msg = ''
|
||||
fileno = self.fileno() # pylint:disable=assignment-from-none
|
||||
try:
|
||||
activecnt = self.activecnt
|
||||
except AttributeError:
|
||||
activecnt = None
|
||||
if activecnt is not None:
|
||||
msg += ' ref=' + repr(activecnt)
|
||||
if fileno is not None:
|
||||
msg += ' fileno=' + repr(fileno)
|
||||
#if sigfd is not None and sigfd != -1:
|
||||
# msg += ' sigfd=' + repr(sigfd)
|
||||
msg += ' callbacks=' + str(len(self._callbacks))
|
||||
return msg
|
||||
|
||||
def fileno(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def activecnt(self):
|
||||
if not self.ptr:
|
||||
raise ValueError('operation on destroyed loop')
|
||||
return 0
|
||||
644
venv3_12/Lib/site-packages/gevent/_ffi/watcher.py
Normal file
644
venv3_12/Lib/site-packages/gevent/_ffi/watcher.py
Normal file
@@ -0,0 +1,644 @@
|
||||
"""
|
||||
Useful base classes for watchers. The available
|
||||
watchers will depend on the specific event loop.
|
||||
"""
|
||||
# pylint:disable=not-callable
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
import signal as signalmodule
|
||||
import functools
|
||||
import warnings
|
||||
|
||||
from gevent._config import config
|
||||
from gevent._util import LazyOnClass
|
||||
|
||||
try:
|
||||
from tracemalloc import get_object_traceback
|
||||
|
||||
def tracemalloc(init):
|
||||
# PYTHONTRACEMALLOC env var controls this on Python 3.
|
||||
return init
|
||||
except ImportError: # Python < 3.4
|
||||
|
||||
if config.trace_malloc:
|
||||
# Use the same env var to turn this on for Python 2
|
||||
import traceback
|
||||
|
||||
class _TB(object):
|
||||
__slots__ = ('lines',)
|
||||
|
||||
def __init__(self, lines):
|
||||
# These end in newlines, which we don't want for consistency
|
||||
self.lines = [x.rstrip() for x in lines]
|
||||
|
||||
def format(self):
|
||||
return self.lines
|
||||
|
||||
def tracemalloc(init):
|
||||
@functools.wraps(init)
|
||||
def traces(self, *args, **kwargs):
|
||||
init(self, *args, **kwargs)
|
||||
self._captured_malloc = _TB(traceback.format_stack())
|
||||
return traces
|
||||
|
||||
def get_object_traceback(obj):
|
||||
return obj._captured_malloc
|
||||
|
||||
else:
|
||||
def get_object_traceback(_obj):
|
||||
return None
|
||||
|
||||
def tracemalloc(init):
|
||||
return init
|
||||
|
||||
from gevent._compat import fsencode
|
||||
|
||||
from gevent._ffi import _dbg # pylint:disable=unused-import
|
||||
from gevent._ffi import GEVENT_DEBUG_LEVEL
|
||||
from gevent._ffi import DEBUG
|
||||
from gevent._ffi.loop import GEVENT_CORE_EVENTS
|
||||
from gevent._ffi.loop import _NOARGS
|
||||
|
||||
ALLOW_WATCHER_DEL = GEVENT_DEBUG_LEVEL >= DEBUG
|
||||
|
||||
__all__ = [
|
||||
|
||||
]
|
||||
|
||||
try:
|
||||
ResourceWarning # pylint:disable=used-before-assignment
|
||||
except NameError:
|
||||
class ResourceWarning(Warning):
|
||||
"Python 2 fallback"
|
||||
|
||||
class _NoWatcherResult(int):
|
||||
|
||||
def __repr__(self):
|
||||
return "<NoWatcher>"
|
||||
|
||||
_NoWatcherResult = _NoWatcherResult(0)
|
||||
|
||||
def events_to_str(event_field, all_events):
|
||||
result = []
|
||||
for (flag, string) in all_events:
|
||||
c_flag = flag
|
||||
if event_field & c_flag:
|
||||
result.append(string)
|
||||
event_field &= (~c_flag)
|
||||
if not event_field:
|
||||
break
|
||||
if event_field:
|
||||
result.append(hex(event_field))
|
||||
return '|'.join(result)
|
||||
|
||||
|
||||
def not_while_active(func):
|
||||
@functools.wraps(func)
|
||||
def nw(self, *args, **kwargs):
|
||||
if self.active:
|
||||
raise ValueError("not while active")
|
||||
func(self, *args, **kwargs)
|
||||
return nw
|
||||
|
||||
def only_if_watcher(func):
|
||||
@functools.wraps(func)
|
||||
def if_w(self):
|
||||
if self._watcher:
|
||||
return func(self)
|
||||
return _NoWatcherResult
|
||||
return if_w
|
||||
|
||||
|
||||
class AbstractWatcherType(type):
|
||||
"""
|
||||
Base metaclass for watchers.
|
||||
|
||||
To use, you will:
|
||||
|
||||
- subclass the watcher class defined from this type.
|
||||
- optionally subclass this type
|
||||
"""
|
||||
# pylint:disable=bad-mcs-classmethod-argument
|
||||
|
||||
_FFI = None
|
||||
_LIB = None
|
||||
|
||||
def __new__(cls, name, bases, cls_dict):
|
||||
if name != 'watcher' and not cls_dict.get('_watcher_skip_ffi'):
|
||||
cls._fill_watcher(name, bases, cls_dict)
|
||||
if '__del__' in cls_dict and not ALLOW_WATCHER_DEL: # pragma: no cover
|
||||
raise TypeError("CFFI watchers are not allowed to have __del__")
|
||||
return type.__new__(cls, name, bases, cls_dict)
|
||||
|
||||
@classmethod
|
||||
def _fill_watcher(cls, name, bases, cls_dict):
|
||||
# TODO: refactor smaller
|
||||
# pylint:disable=too-many-locals
|
||||
if name.endswith('_'):
|
||||
# Strip trailing _ added to avoid keyword duplications
|
||||
# e.g., async_
|
||||
name = name[:-1]
|
||||
|
||||
def _mro_get(attr, bases, error=True):
|
||||
for b in bases:
|
||||
try:
|
||||
return getattr(b, attr)
|
||||
except AttributeError:
|
||||
continue
|
||||
if error: # pragma: no cover
|
||||
raise AttributeError(attr)
|
||||
_watcher_prefix = cls_dict.get('_watcher_prefix') or _mro_get('_watcher_prefix', bases)
|
||||
|
||||
if '_watcher_type' not in cls_dict:
|
||||
watcher_type = _watcher_prefix + '_' + name
|
||||
cls_dict['_watcher_type'] = watcher_type
|
||||
elif not cls_dict['_watcher_type'].startswith(_watcher_prefix):
|
||||
watcher_type = _watcher_prefix + '_' + cls_dict['_watcher_type']
|
||||
cls_dict['_watcher_type'] = watcher_type
|
||||
|
||||
active_name = _watcher_prefix + '_is_active'
|
||||
|
||||
def _watcher_is_active(self):
|
||||
return getattr(self._LIB, active_name)
|
||||
|
||||
LazyOnClass.lazy(cls_dict, _watcher_is_active)
|
||||
|
||||
watcher_struct_name = cls_dict.get('_watcher_struct_name')
|
||||
if not watcher_struct_name:
|
||||
watcher_struct_pattern = (cls_dict.get('_watcher_struct_pattern')
|
||||
or _mro_get('_watcher_struct_pattern', bases, False)
|
||||
or 'struct %s')
|
||||
watcher_struct_name = watcher_struct_pattern % (watcher_type,)
|
||||
|
||||
def _watcher_struct_pointer_type(self):
|
||||
return self._FFI.typeof(watcher_struct_name + ' *')
|
||||
|
||||
LazyOnClass.lazy(cls_dict, _watcher_struct_pointer_type)
|
||||
|
||||
callback_name = (cls_dict.get('_watcher_callback_name')
|
||||
or _mro_get('_watcher_callback_name', bases, False)
|
||||
or '_gevent_generic_callback')
|
||||
|
||||
def _watcher_callback(self):
|
||||
return self._FFI.addressof(self._LIB, callback_name)
|
||||
|
||||
LazyOnClass.lazy(cls_dict, _watcher_callback)
|
||||
|
||||
def _make_meth(name, watcher_name):
|
||||
def meth(self):
|
||||
lib_name = self._watcher_type + '_' + name
|
||||
return getattr(self._LIB, lib_name)
|
||||
meth.__name__ = watcher_name
|
||||
return meth
|
||||
|
||||
for meth_name in 'start', 'stop', 'init':
|
||||
watcher_name = '_watcher' + '_' + meth_name
|
||||
if watcher_name not in cls_dict:
|
||||
LazyOnClass.lazy(cls_dict, _make_meth(meth_name, watcher_name))
|
||||
|
||||
def new_handle(cls, obj):
|
||||
return cls._FFI.new_handle(obj)
|
||||
|
||||
def new(cls, kind):
|
||||
return cls._FFI.new(kind)
|
||||
|
||||
class watcher(object):
|
||||
|
||||
_callback = None
|
||||
_args = None
|
||||
_watcher = None
|
||||
# self._handle has a reference to self, keeping it alive.
|
||||
# We must keep self._handle alive for ffi.from_handle() to be
|
||||
# able to work. We only fill this in when we are started,
|
||||
# and when we are stopped we destroy it.
|
||||
# NOTE: This is a GC cycle, so we keep it around for as short
|
||||
# as possible.
|
||||
_handle = None
|
||||
|
||||
@tracemalloc
|
||||
def __init__(self, _loop, ref=True, priority=None, args=_NOARGS):
|
||||
self.loop = _loop
|
||||
self.__init_priority = priority
|
||||
self.__init_args = args
|
||||
self.__init_ref = ref
|
||||
self._watcher_full_init()
|
||||
|
||||
|
||||
def _watcher_full_init(self):
|
||||
priority = self.__init_priority
|
||||
ref = self.__init_ref
|
||||
args = self.__init_args
|
||||
|
||||
self._watcher_create(ref)
|
||||
|
||||
if priority is not None:
|
||||
self._watcher_ffi_set_priority(priority)
|
||||
|
||||
try:
|
||||
self._watcher_ffi_init(args)
|
||||
except:
|
||||
# Let these be GC'd immediately.
|
||||
# If we keep them around to when *we* are gc'd,
|
||||
# they're probably invalid, meaning any native calls
|
||||
# we do then to close() them are likely to fail
|
||||
self._watcher = None
|
||||
raise
|
||||
self._watcher_ffi_set_init_ref(ref)
|
||||
|
||||
@classmethod
|
||||
def _watcher_ffi_close(cls, ffi_watcher):
|
||||
pass
|
||||
|
||||
def _watcher_create(self, ref): # pylint:disable=unused-argument
|
||||
self._watcher = self._watcher_new()
|
||||
|
||||
def _watcher_new(self):
|
||||
return type(self).new(self._watcher_struct_pointer_type) # pylint:disable=no-member
|
||||
|
||||
def _watcher_ffi_set_init_ref(self, ref):
|
||||
pass
|
||||
|
||||
def _watcher_ffi_set_priority(self, priority):
|
||||
pass
|
||||
|
||||
def _watcher_ffi_init(self, args):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _watcher_ffi_start(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _watcher_ffi_stop(self):
|
||||
self._watcher_stop(self.loop.ptr, self._watcher)
|
||||
|
||||
def _watcher_ffi_ref(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _watcher_ffi_unref(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _watcher_ffi_start_unref(self):
|
||||
# While a watcher is active, we don't keep it
|
||||
# referenced. This allows a timer, for example, to be started,
|
||||
# and still allow the loop to end if there is nothing
|
||||
# else to do. see test__order.TestSleep0 for one example.
|
||||
self._watcher_ffi_unref()
|
||||
|
||||
def _watcher_ffi_stop_ref(self):
|
||||
self._watcher_ffi_ref()
|
||||
|
||||
# A string identifying the type of libev object we watch, e.g., 'ev_io'
|
||||
# This should be a class attribute.
|
||||
_watcher_type = None
|
||||
# A class attribute that is the callback on the libev object that init's the C struct,
|
||||
# e.g., libev.ev_io_init. If None, will be set by _init_subclasses.
|
||||
_watcher_init = None
|
||||
# A class attribute that is the callback on the libev object that starts the C watcher,
|
||||
# e.g., libev.ev_io_start. If None, will be set by _init_subclasses.
|
||||
_watcher_start = None
|
||||
# A class attribute that is the callback on the libev object that stops the C watcher,
|
||||
# e.g., libev.ev_io_stop. If None, will be set by _init_subclasses.
|
||||
_watcher_stop = None
|
||||
# A cffi ctype object identifying the struct pointer we create.
|
||||
# This is a class attribute set based on the _watcher_type
|
||||
_watcher_struct_pointer_type = None
|
||||
# The attribute of the libev object identifying the custom
|
||||
# callback function for this type of watcher. This is a class
|
||||
# attribute set based on the _watcher_type in _init_subclasses.
|
||||
_watcher_callback = None
|
||||
_watcher_is_active = None
|
||||
|
||||
def close(self):
|
||||
if self._watcher is None:
|
||||
return
|
||||
|
||||
self.stop()
|
||||
_watcher = self._watcher
|
||||
self._watcher = None
|
||||
self._watcher_set_data(_watcher, self._FFI.NULL) # pylint: disable=no-member
|
||||
self._watcher_ffi_close(_watcher)
|
||||
self.loop = None
|
||||
|
||||
def _watcher_set_data(self, the_watcher, data):
|
||||
# This abstraction exists for the sole benefit of
|
||||
# libuv.watcher.stat, which "subclasses" uv_handle_t.
|
||||
# Can we do something to avoid this extra function call?
|
||||
the_watcher.data = data
|
||||
return data
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
self.close()
|
||||
|
||||
if ALLOW_WATCHER_DEL:
|
||||
def __del__(self):
|
||||
if self._watcher:
|
||||
tb = get_object_traceback(self)
|
||||
tb_msg = ''
|
||||
if tb is not None:
|
||||
tb_msg = '\n'.join(tb.format())
|
||||
tb_msg = '\nTraceback:\n' + tb_msg
|
||||
warnings.warn("Failed to close watcher %r%s" % (self, tb_msg),
|
||||
ResourceWarning)
|
||||
|
||||
# may fail if __init__ did; will be harmlessly printed
|
||||
self.close()
|
||||
|
||||
__in_repr = False
|
||||
|
||||
def __repr__(self):
|
||||
basic = "<%s at 0x%x" % (self.__class__.__name__, id(self))
|
||||
if self.__in_repr:
|
||||
return basic + '>'
|
||||
# Running child watchers have been seen to have a
|
||||
# recursive repr in ``self.args``, thanks to ``gevent.os.fork_and_watch``
|
||||
# passing the watcher as an argument to its callback.
|
||||
self.__in_repr = True
|
||||
try:
|
||||
result = '%s%s' % (basic, self._format())
|
||||
if self.pending:
|
||||
result += " pending"
|
||||
if self.callback is not None:
|
||||
fself = getattr(self.callback, '__self__', None)
|
||||
if fself is self:
|
||||
result += " callback=<bound method %s of self>" % (self.callback.__name__)
|
||||
else:
|
||||
result += " callback=%r" % (self.callback, )
|
||||
if self.args is not None:
|
||||
result += " args=%r" % (self.args, )
|
||||
if self.callback is None and self.args is None:
|
||||
result += " stopped"
|
||||
result += " watcher=%s" % (self._watcher)
|
||||
result += " handle=%s" % (self._watcher_handle)
|
||||
result += " ref=%s" % (self.ref)
|
||||
return result + ">"
|
||||
finally:
|
||||
self.__in_repr = False
|
||||
|
||||
@property
|
||||
def _watcher_handle(self):
|
||||
if self._watcher:
|
||||
return self._watcher.data
|
||||
|
||||
def _format(self):
|
||||
return ''
|
||||
|
||||
@property
|
||||
def ref(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_callback(self):
|
||||
return self._callback if '_callback' in self.__dict__ else None
|
||||
|
||||
def _set_callback(self, cb):
|
||||
if not callable(cb) and cb is not None:
|
||||
raise TypeError("Expected callable, not %r" % (cb, ))
|
||||
if cb is None:
|
||||
if '_callback' in self.__dict__:
|
||||
del self._callback
|
||||
else:
|
||||
self._callback = cb
|
||||
callback = property(_get_callback, _set_callback)
|
||||
|
||||
def _get_args(self):
|
||||
return self._args
|
||||
|
||||
def _set_args(self, args):
|
||||
if not isinstance(args, tuple) and args is not None:
|
||||
raise TypeError("args must be a tuple or None")
|
||||
if args is None:
|
||||
if '_args' in self.__dict__:
|
||||
del self._args
|
||||
else:
|
||||
self._args = args
|
||||
|
||||
args = property(_get_args, _set_args)
|
||||
|
||||
def start(self, callback, *args):
|
||||
if callback is None:
|
||||
raise TypeError('callback must be callable, not None')
|
||||
self.callback = callback
|
||||
self.args = args or _NOARGS
|
||||
self.loop._keepaliveset.add(self)
|
||||
self._handle = self._watcher_set_data(self._watcher, type(self).new_handle(self)) # pylint:disable=no-member
|
||||
self._watcher_ffi_start()
|
||||
self._watcher_ffi_start_unref()
|
||||
|
||||
def stop(self):
|
||||
if self.callback is None:
|
||||
assert self.loop is None or self not in self.loop._keepaliveset
|
||||
return
|
||||
self.callback = None
|
||||
# Only after setting the signal to make this idempotent do
|
||||
# we move ahead.
|
||||
self._watcher_ffi_stop_ref()
|
||||
self._watcher_ffi_stop()
|
||||
self.loop._keepaliveset.discard(self)
|
||||
self._handle = None
|
||||
self._watcher_set_data(self._watcher, self._FFI.NULL) # pylint:disable=no-member
|
||||
|
||||
self.args = None
|
||||
|
||||
def _get_priority(self):
|
||||
return None
|
||||
|
||||
@not_while_active
|
||||
def _set_priority(self, priority):
|
||||
pass
|
||||
|
||||
priority = property(_get_priority, _set_priority)
|
||||
|
||||
|
||||
@property
|
||||
def active(self):
|
||||
if self._watcher is not None and self._watcher_is_active(self._watcher):
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
return False
|
||||
|
||||
watcher = AbstractWatcherType('watcher', (object,), dict(watcher.__dict__))
|
||||
|
||||
class IoMixin(object):
|
||||
|
||||
EVENT_MASK = 0
|
||||
|
||||
def __init__(self, loop, fd, events, ref=True, priority=None, _args=None):
|
||||
# Win32 only works with sockets, and only when we use libuv, because
|
||||
# we don't use _open_osfhandle. See libuv/watchers.py:io for a description.
|
||||
if fd < 0:
|
||||
raise ValueError('fd must be non-negative: %r' % fd)
|
||||
if events & ~self.EVENT_MASK:
|
||||
raise ValueError('illegal event mask: %r' % events)
|
||||
self._fd = fd
|
||||
super(IoMixin, self).__init__(loop, ref=ref, priority=priority,
|
||||
args=_args or (fd, events))
|
||||
|
||||
def start(self, callback, *args, **kwargs):
|
||||
args = args or _NOARGS
|
||||
if kwargs.get('pass_events'):
|
||||
args = (GEVENT_CORE_EVENTS, ) + args
|
||||
super(IoMixin, self).start(callback, *args)
|
||||
|
||||
def _format(self):
|
||||
return ' fd=%d' % self._fd
|
||||
|
||||
class TimerMixin(object):
|
||||
_watcher_type = 'timer'
|
||||
|
||||
def __init__(self, loop, after=0.0, repeat=0.0, ref=True, priority=None):
|
||||
if repeat < 0.0:
|
||||
raise ValueError("repeat must be positive or zero: %r" % repeat)
|
||||
self._after = after
|
||||
self._repeat = repeat
|
||||
super(TimerMixin, self).__init__(loop, ref=ref, priority=priority, args=(after, repeat))
|
||||
|
||||
def start(self, callback, *args, **kw):
|
||||
update = kw.get("update", self.loop.starting_timer_may_update_loop_time)
|
||||
if update:
|
||||
# Quoth the libev doc: "This is a costly operation and is
|
||||
# usually done automatically within ev_run(). This
|
||||
# function is rarely useful, but when some event callback
|
||||
# runs for a very long time without entering the event
|
||||
# loop, updating libev's idea of the current time is a
|
||||
# good idea."
|
||||
|
||||
# 1.3 changed the default for this to False *unless* the loop is
|
||||
# running a callback; see libuv for details. Note that
|
||||
# starting Timeout objects still sets this to true.
|
||||
|
||||
self.loop.update_now()
|
||||
super(TimerMixin, self).start(callback, *args)
|
||||
|
||||
def again(self, callback, *args, **kw):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class SignalMixin(object):
|
||||
_watcher_type = 'signal'
|
||||
|
||||
def __init__(self, loop, signalnum, ref=True, priority=None):
|
||||
if signalnum < 1 or signalnum >= signalmodule.NSIG:
|
||||
raise ValueError('illegal signal number: %r' % signalnum)
|
||||
# still possible to crash on one of libev's asserts:
|
||||
# 1) "libev: ev_signal_start called with illegal signal number"
|
||||
# EV_NSIG might be different from signal.NSIG on some platforms
|
||||
# 2) "libev: a signal must not be attached to two different loops"
|
||||
# we probably could check that in LIBEV_EMBED mode, but not in general
|
||||
self._signalnum = signalnum
|
||||
super(SignalMixin, self).__init__(loop, ref=ref, priority=priority, args=(signalnum, ))
|
||||
|
||||
|
||||
class IdleMixin(object):
|
||||
_watcher_type = 'idle'
|
||||
|
||||
|
||||
class PrepareMixin(object):
|
||||
_watcher_type = 'prepare'
|
||||
|
||||
|
||||
class CheckMixin(object):
|
||||
_watcher_type = 'check'
|
||||
|
||||
|
||||
class ForkMixin(object):
|
||||
_watcher_type = 'fork'
|
||||
|
||||
|
||||
class AsyncMixin(object):
|
||||
_watcher_type = 'async'
|
||||
|
||||
def send(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def send_ignoring_arg(self, _ignored):
|
||||
"""
|
||||
Calling compatibility with ``greenlet.switch(arg)``
|
||||
as used by waiters that have ``rawlink``.
|
||||
|
||||
This is an advanced method, not usually needed.
|
||||
"""
|
||||
return self.send()
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class ChildMixin(object):
|
||||
|
||||
# hack for libuv which doesn't extend watcher
|
||||
_CALL_SUPER_INIT = True
|
||||
|
||||
def __init__(self, loop, pid, trace=0, ref=True):
|
||||
if not loop.default:
|
||||
raise TypeError('child watchers are only available on the default loop')
|
||||
loop.install_sigchld()
|
||||
self._pid = pid
|
||||
if self._CALL_SUPER_INIT:
|
||||
super(ChildMixin, self).__init__(loop, ref=ref, args=(pid, trace))
|
||||
|
||||
def _format(self):
|
||||
return ' pid=%r rstatus=%r' % (self.pid, self.rstatus)
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
return self._pid
|
||||
|
||||
@property
|
||||
def rpid(self):
|
||||
# The received pid, the result of the waitpid() call.
|
||||
return self._rpid
|
||||
|
||||
_rpid = None
|
||||
_rstatus = 0
|
||||
|
||||
@property
|
||||
def rstatus(self):
|
||||
return self._rstatus
|
||||
|
||||
class StatMixin(object):
|
||||
|
||||
@staticmethod
|
||||
def _encode_path(path):
|
||||
return fsencode(path)
|
||||
|
||||
def __init__(self, _loop, path, interval=0.0, ref=True, priority=None):
|
||||
# Store the encoded path in the same attribute that corecext does
|
||||
self._paths = self._encode_path(path)
|
||||
|
||||
# Keep the original path to avoid re-encoding, especially on Python 3
|
||||
self._path = path
|
||||
|
||||
# Although CFFI would automatically convert a bytes object into a char* when
|
||||
# calling ev_stat_init(..., char*, ...), on PyPy the char* pointer is not
|
||||
# guaranteed to live past the function call. On CPython, only with a constant/interned
|
||||
# bytes object is the pointer guaranteed to last path the function call. (And since
|
||||
# Python 3 is pretty much guaranteed to produce a newly-encoded bytes object above, thats
|
||||
# rarely the case). Therefore, we must keep a reference to the produced cdata object
|
||||
# so that the struct ev_stat_watcher's `path` pointer doesn't become invalid/deallocated
|
||||
self._cpath = self._FFI.new('char[]', self._paths)
|
||||
|
||||
self._interval = interval
|
||||
super(StatMixin, self).__init__(_loop, ref=ref, priority=priority,
|
||||
args=(self._cpath,
|
||||
interval))
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self._path
|
||||
|
||||
@property
|
||||
def attr(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def prev(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def interval(self):
|
||||
return self._interval
|
||||
697
venv3_12/Lib/site-packages/gevent/_fileobjectcommon.py
Normal file
697
venv3_12/Lib/site-packages/gevent/_fileobjectcommon.py
Normal file
@@ -0,0 +1,697 @@
|
||||
"""
|
||||
gevent internals.
|
||||
"""
|
||||
from __future__ import absolute_import, print_function, division
|
||||
|
||||
try:
|
||||
from errno import EBADF
|
||||
except ImportError:
|
||||
EBADF = 9
|
||||
|
||||
import io
|
||||
import functools
|
||||
import sys
|
||||
import os
|
||||
|
||||
from gevent.hub import _get_hub_noargs as get_hub
|
||||
from gevent._compat import integer_types
|
||||
from gevent._compat import reraise
|
||||
from gevent._compat import fspath
|
||||
from gevent.lock import Semaphore, DummySemaphore
|
||||
|
||||
class cancel_wait_ex(IOError):
|
||||
|
||||
def __init__(self):
|
||||
IOError.__init__(
|
||||
self,
|
||||
EBADF, 'File descriptor was closed in another greenlet')
|
||||
|
||||
class FileObjectClosed(IOError):
|
||||
|
||||
def __init__(self):
|
||||
IOError.__init__(
|
||||
self,
|
||||
EBADF, 'Bad file descriptor (FileObject was closed)')
|
||||
|
||||
class UniversalNewlineBytesWrapper(io.TextIOWrapper):
|
||||
"""
|
||||
Uses TextWrapper to decode universal newlines, but returns the
|
||||
results as bytes.
|
||||
|
||||
This is for Python 2 where the 'rU' mode did that.
|
||||
"""
|
||||
mode = None
|
||||
def __init__(self, fobj, line_buffering):
|
||||
# latin-1 has the ability to round-trip arbitrary bytes.
|
||||
io.TextIOWrapper.__init__(self, fobj, encoding='latin-1',
|
||||
newline=None,
|
||||
line_buffering=line_buffering)
|
||||
|
||||
def read(self, *args, **kwargs):
|
||||
result = io.TextIOWrapper.read(self, *args, **kwargs)
|
||||
return result.encode('latin-1')
|
||||
|
||||
def readline(self, limit=-1):
|
||||
result = io.TextIOWrapper.readline(self, limit)
|
||||
return result.encode('latin-1')
|
||||
|
||||
def __iter__(self):
|
||||
# readlines() is implemented in terms of __iter__
|
||||
# and TextIOWrapper.__iter__ checks that readline returns
|
||||
# a unicode object, which we don't, so we override
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
line = self.readline()
|
||||
if not line:
|
||||
raise StopIteration
|
||||
return line
|
||||
|
||||
next = __next__
|
||||
|
||||
|
||||
class FlushingBufferedWriter(io.BufferedWriter):
|
||||
|
||||
def write(self, b):
|
||||
ret = io.BufferedWriter.write(self, b)
|
||||
self.flush()
|
||||
return ret
|
||||
|
||||
|
||||
class WriteallMixin(object):
|
||||
|
||||
def writeall(self, value):
|
||||
"""
|
||||
Similar to :meth:`socket.socket.sendall`, ensures that all the contents of
|
||||
*value* have been written (though not necessarily flushed) before returning.
|
||||
|
||||
Returns the length of *value*.
|
||||
|
||||
.. versionadded:: 20.12.0
|
||||
"""
|
||||
# Do we need to play the same get_memory games we do with sockets?
|
||||
# And what about chunking for large values? See _socketcommon.py
|
||||
write = super(WriteallMixin, self).write
|
||||
|
||||
total = len(value)
|
||||
while value:
|
||||
l = len(value)
|
||||
w = write(value)
|
||||
if w == l:
|
||||
break
|
||||
value = value[w:]
|
||||
return total
|
||||
|
||||
|
||||
class FileIO(io.FileIO):
|
||||
"""A subclass that we can dynamically assign __class__ for."""
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
class WriteIsWriteallMixin(WriteallMixin):
|
||||
|
||||
def write(self, value):
|
||||
return self.writeall(value)
|
||||
|
||||
|
||||
class WriteallFileIO(WriteIsWriteallMixin, io.FileIO):
|
||||
pass
|
||||
|
||||
|
||||
class OpenDescriptor(object): # pylint:disable=too-many-instance-attributes
|
||||
"""
|
||||
Interprets the arguments to `open`. Internal use only.
|
||||
|
||||
Originally based on code in the stdlib's _pyio.py (Python implementation of
|
||||
the :mod:`io` module), but modified for gevent:
|
||||
|
||||
- Native strings are returned on Python 2 when neither
|
||||
'b' nor 't' are in the mode string and no encoding is specified.
|
||||
- Universal newlines work in that mode.
|
||||
- Allows externally unbuffered text IO.
|
||||
|
||||
:keyword bool atomic_write: If true, then if the opened, wrapped, stream
|
||||
is unbuffered (meaning that ``write`` can produce short writes and the return
|
||||
value needs to be checked), then the implementation will be adjusted so that
|
||||
``write`` behaves like Python 2 on a built-in file object and writes the
|
||||
entire value. Only set this on Python 2; the only intended user is
|
||||
:class:`gevent.subprocess.Popen`.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _collapse_arg(pref_name, preferred_val, old_name, old_val, default):
|
||||
# We could play tricks with the callers ``locals()`` to avoid having to specify
|
||||
# the name (which we only use for error handling) but ``locals()`` may be slow and
|
||||
# inhibit JIT (on PyPy), so we just write it out long hand.
|
||||
if preferred_val is not None and old_val is not None:
|
||||
raise TypeError("Cannot specify both %s=%s and %s=%s" % (
|
||||
pref_name, preferred_val,
|
||||
old_name, old_val
|
||||
))
|
||||
if preferred_val is None and old_val is None:
|
||||
return default
|
||||
return preferred_val if preferred_val is not None else old_val
|
||||
|
||||
def __init__(self, fobj, mode='r', bufsize=None, close=None,
|
||||
encoding=None, errors=None, newline=None,
|
||||
buffering=None, closefd=None,
|
||||
atomic_write=False):
|
||||
# Based on code in the stdlib's _pyio.py from 3.8.
|
||||
# pylint:disable=too-many-locals,too-many-branches,too-many-statements
|
||||
|
||||
closefd = self._collapse_arg('closefd', closefd, 'close', close, True)
|
||||
del close
|
||||
buffering = self._collapse_arg('buffering', buffering, 'bufsize', bufsize, -1)
|
||||
del bufsize
|
||||
|
||||
if not hasattr(fobj, 'fileno'):
|
||||
if not isinstance(fobj, integer_types):
|
||||
# Not a fd. Support PathLike on Python 2 and Python <= 3.5.
|
||||
fobj = fspath(fobj)
|
||||
if not isinstance(fobj, (str, bytes) + integer_types): # pragma: no cover
|
||||
raise TypeError("invalid file: %r" % fobj)
|
||||
if isinstance(fobj, (str, bytes)):
|
||||
closefd = True
|
||||
|
||||
if not isinstance(mode, str):
|
||||
raise TypeError("invalid mode: %r" % mode)
|
||||
if not isinstance(buffering, integer_types):
|
||||
raise TypeError("invalid buffering: %r" % buffering)
|
||||
if encoding is not None and not isinstance(encoding, str):
|
||||
raise TypeError("invalid encoding: %r" % encoding)
|
||||
if errors is not None and not isinstance(errors, str):
|
||||
raise TypeError("invalid errors: %r" % errors)
|
||||
|
||||
modes = set(mode)
|
||||
if modes - set("axrwb+tU") or len(mode) > len(modes):
|
||||
raise ValueError("invalid mode: %r" % mode)
|
||||
|
||||
creating = "x" in modes
|
||||
reading = "r" in modes
|
||||
writing = "w" in modes
|
||||
appending = "a" in modes
|
||||
updating = "+" in modes
|
||||
text = "t" in modes
|
||||
binary = "b" in modes
|
||||
universal = 'U' in modes
|
||||
|
||||
can_write = creating or writing or appending or updating
|
||||
|
||||
if universal:
|
||||
if can_write:
|
||||
raise ValueError("mode U cannot be combined with 'x', 'w', 'a', or '+'")
|
||||
# Just because the stdlib deprecates this, no need for us to do so as well.
|
||||
# Especially not while we still support Python 2.
|
||||
# import warnings
|
||||
# warnings.warn("'U' mode is deprecated",
|
||||
# DeprecationWarning, 4)
|
||||
reading = True
|
||||
if text and binary:
|
||||
raise ValueError("can't have text and binary mode at once")
|
||||
if creating + reading + writing + appending > 1:
|
||||
raise ValueError("can't have read/write/append mode at once")
|
||||
if not (creating or reading or writing or appending):
|
||||
raise ValueError("must have exactly one of read/write/append mode")
|
||||
if binary and encoding is not None:
|
||||
raise ValueError("binary mode doesn't take an encoding argument")
|
||||
if binary and errors is not None:
|
||||
raise ValueError("binary mode doesn't take an errors argument")
|
||||
if binary and newline is not None:
|
||||
raise ValueError("binary mode doesn't take a newline argument")
|
||||
if binary and buffering == 1:
|
||||
import warnings
|
||||
warnings.warn("line buffering (buffering=1) isn't supported in binary "
|
||||
"mode, the default buffer size will be used",
|
||||
RuntimeWarning, 4)
|
||||
|
||||
self._fobj = fobj
|
||||
self.fileio_mode = (
|
||||
(creating and "x" or "")
|
||||
+ (reading and "r" or "")
|
||||
+ (writing and "w" or "")
|
||||
+ (appending and "a" or "")
|
||||
+ (updating and "+" or "")
|
||||
)
|
||||
self.mode = self.fileio_mode + ('t' if text else '') + ('b' if binary else '')
|
||||
|
||||
self.creating = creating
|
||||
self.reading = reading
|
||||
self.writing = writing
|
||||
self.appending = appending
|
||||
self.updating = updating
|
||||
self.text = text
|
||||
self.binary = binary
|
||||
self.can_write = can_write
|
||||
self.can_read = reading or updating
|
||||
self.native = (
|
||||
not self.text and not self.binary # Neither t nor b given.
|
||||
and not encoding and not errors # And no encoding or error handling either.
|
||||
)
|
||||
self.universal = universal
|
||||
|
||||
self.buffering = buffering
|
||||
self.encoding = encoding
|
||||
self.errors = errors
|
||||
self.newline = newline
|
||||
self.closefd = closefd
|
||||
self.atomic_write = atomic_write
|
||||
|
||||
default_buffer_size = io.DEFAULT_BUFFER_SIZE
|
||||
|
||||
_opened = None
|
||||
_opened_raw = None
|
||||
|
||||
def is_fd(self):
|
||||
return isinstance(self._fobj, integer_types)
|
||||
|
||||
def opened(self):
|
||||
"""
|
||||
Return the :meth:`wrapped` file object.
|
||||
"""
|
||||
if self._opened is None:
|
||||
raw = self.opened_raw()
|
||||
try:
|
||||
self._opened = self.__wrapped(raw)
|
||||
except:
|
||||
# XXX: This might be a bug? Could we wind up closing
|
||||
# something we shouldn't close?
|
||||
raw.close()
|
||||
raise
|
||||
return self._opened
|
||||
|
||||
def _raw_object_is_new(self, raw):
|
||||
return self._fobj is not raw
|
||||
|
||||
def opened_raw(self):
|
||||
if self._opened_raw is None:
|
||||
self._opened_raw = self._do_open_raw()
|
||||
return self._opened_raw
|
||||
|
||||
def _do_open_raw(self):
|
||||
if hasattr(self._fobj, 'fileno'):
|
||||
return self._fobj
|
||||
# io.FileIO doesn't allow assigning to its __class__,
|
||||
# and we can't know for sure here whether we need the atomic write()
|
||||
# method or not (it depends on the layers on top of us),
|
||||
# so we use a subclass that *does* allow assigning.
|
||||
return FileIO(self._fobj, self.fileio_mode, self.closefd)
|
||||
|
||||
@staticmethod
|
||||
def is_buffered(stream):
|
||||
return (
|
||||
# buffering happens internally in the text codecs
|
||||
isinstance(stream, (io.BufferedIOBase, io.TextIOBase))
|
||||
or (hasattr(stream, 'buffer') and stream.buffer is not None)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def buffer_size_for_stream(cls, stream):
|
||||
result = cls.default_buffer_size
|
||||
try:
|
||||
bs = os.fstat(stream.fileno()).st_blksize
|
||||
except (OSError, AttributeError):
|
||||
pass
|
||||
else:
|
||||
if bs > 1:
|
||||
result = bs
|
||||
return result
|
||||
|
||||
def __buffered(self, stream, buffering):
|
||||
if self.updating:
|
||||
Buffer = io.BufferedRandom
|
||||
elif self.creating or self.writing or self.appending:
|
||||
Buffer = io.BufferedWriter
|
||||
elif self.reading:
|
||||
Buffer = io.BufferedReader
|
||||
else: # prgama: no cover
|
||||
raise ValueError("unknown mode: %r" % self.mode)
|
||||
|
||||
try:
|
||||
result = Buffer(stream, buffering)
|
||||
except AttributeError:
|
||||
# Python 2 file() objects don't have the readable/writable
|
||||
# attributes. But they handle their own buffering.
|
||||
result = stream
|
||||
|
||||
return result
|
||||
|
||||
def _make_atomic_write(self, result, raw):
|
||||
# The idea was to swizzle the class with one that defines
|
||||
# write() to call writeall(). This avoids setting any
|
||||
# attribute on the return object, avoids an additional layer
|
||||
# of proxying, and avoids any reference cycles (if setting a
|
||||
# method on the object).
|
||||
#
|
||||
# However, this is not possible with the built-in io classes
|
||||
# (static types defined in C cannot have __class__ assigned).
|
||||
# Fortunately, we need this only for the specific case of
|
||||
# opening a file descriptor (subprocess.py) on Python 2, in
|
||||
# which we fully control the types involved.
|
||||
#
|
||||
# So rather than attempt that, we only implement exactly what we need.
|
||||
if result is not raw or self._raw_object_is_new(raw):
|
||||
if result.__class__ is FileIO:
|
||||
result.__class__ = WriteallFileIO
|
||||
else: # pragma: no cover
|
||||
raise NotImplementedError(
|
||||
"Don't know how to make %s have atomic write. "
|
||||
"Please open a gevent issue with your use-case." % (
|
||||
result
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
def __wrapped(self, raw):
|
||||
"""
|
||||
Wraps the raw IO object (`RawIOBase` or `io.TextIOBase`) in
|
||||
buffers, text decoding, and newline handling.
|
||||
"""
|
||||
if self.binary and isinstance(raw, io.TextIOBase):
|
||||
# Can't do it. The TextIO object will have its own buffer, and
|
||||
# trying to read from the raw stream or the buffer without going through
|
||||
# the TextIO object is likely to lead to problems with the codec.
|
||||
raise ValueError("Unable to perform binary IO on top of text IO stream")
|
||||
|
||||
result = raw
|
||||
buffering = self.buffering
|
||||
|
||||
line_buffering = False
|
||||
if buffering == 1 or buffering < 0 and raw.isatty():
|
||||
buffering = -1
|
||||
line_buffering = True
|
||||
if buffering < 0:
|
||||
buffering = self.buffer_size_for_stream(result)
|
||||
|
||||
if buffering < 0: # pragma: no cover
|
||||
raise ValueError("invalid buffering size")
|
||||
|
||||
if buffering != 0 and not self.is_buffered(result):
|
||||
# Need to wrap our own buffering around it. If it
|
||||
# is already buffered, don't do so.
|
||||
result = self.__buffered(result, buffering)
|
||||
|
||||
if not self.binary:
|
||||
# Either native or text at this point.
|
||||
# Python 2 and text mode, or Python 3 and either text or native (both are the same)
|
||||
if not isinstance(raw, io.TextIOBase):
|
||||
# Avoid double-wrapping a TextIOBase in another TextIOWrapper.
|
||||
# That tends not to work. See https://github.com/gevent/gevent/issues/1542
|
||||
result = io.TextIOWrapper(result, self.encoding, self.errors, self.newline,
|
||||
line_buffering)
|
||||
|
||||
if result is not raw or self._raw_object_is_new(raw):
|
||||
# Set the mode, if possible, but only if we created a new
|
||||
# object.
|
||||
try:
|
||||
result.mode = self.mode
|
||||
except (AttributeError, TypeError):
|
||||
# AttributeError: No such attribute
|
||||
# TypeError: Readonly attribute (py2)
|
||||
pass
|
||||
|
||||
if (
|
||||
self.atomic_write
|
||||
and not self.is_buffered(result)
|
||||
and not isinstance(result, WriteIsWriteallMixin)
|
||||
):
|
||||
# Let subclasses have a say in how they make this atomic, and
|
||||
# whether or not they do so even if we're actually returning the raw object.
|
||||
result = self._make_atomic_write(result, raw)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class _ClosedIO(object):
|
||||
# Used for FileObjectBase._io when FOB.close()
|
||||
# is called. Lets us drop references to ``_io``
|
||||
# for GC/resource cleanup reasons, but keeps some useful
|
||||
# information around.
|
||||
__slots__ = ('name',)
|
||||
|
||||
def __init__(self, io_obj):
|
||||
try:
|
||||
self.name = io_obj.name
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name == 'name':
|
||||
# We didn't set it in __init__ because there wasn't one
|
||||
raise AttributeError
|
||||
raise FileObjectClosed
|
||||
|
||||
def __bool__(self):
|
||||
return False
|
||||
__nonzero__ = __bool__
|
||||
|
||||
|
||||
class FileObjectBase(object):
|
||||
"""
|
||||
Internal base class to ensure a level of consistency
|
||||
between :class:`~.FileObjectPosix`, :class:`~.FileObjectThread`
|
||||
and :class:`~.FileObjectBlock`.
|
||||
"""
|
||||
|
||||
# List of methods we delegate to the wrapping IO object, if they
|
||||
# implement them and we do not.
|
||||
_delegate_methods = (
|
||||
# General methods
|
||||
'flush',
|
||||
'fileno',
|
||||
'writable',
|
||||
'readable',
|
||||
'seek',
|
||||
'seekable',
|
||||
'tell',
|
||||
|
||||
# Read
|
||||
'read',
|
||||
'readline',
|
||||
'readlines',
|
||||
'read1',
|
||||
'readinto',
|
||||
|
||||
# Write.
|
||||
# Note that we do not extend WriteallMixin,
|
||||
# so writeall will be copied, if it exists, and
|
||||
# wrapped.
|
||||
'write',
|
||||
'writeall',
|
||||
'writelines',
|
||||
'truncate',
|
||||
)
|
||||
|
||||
|
||||
_io = None
|
||||
|
||||
def __init__(self, descriptor):
|
||||
# type: (OpenDescriptor) -> None
|
||||
self._io = descriptor.opened()
|
||||
# We don't actually use this property ourself, but we save it (and
|
||||
# pass it along) for compatibility.
|
||||
self._close = descriptor.closefd
|
||||
self._do_delegate_methods()
|
||||
|
||||
|
||||
io = property(lambda s: s._io,
|
||||
# Historically we either hand-wrote all the delegation methods
|
||||
# to use self.io, or we simply used __getattr__ to look them up at
|
||||
# runtime. This meant people could change the io attribute on the fly
|
||||
# and it would mostly work (subprocess.py used to do that). We don't recommend
|
||||
# that, but we still support it.
|
||||
lambda s, nv: setattr(s, '_io', nv) or s._do_delegate_methods())
|
||||
|
||||
def _do_delegate_methods(self):
|
||||
for meth_name in self._delegate_methods:
|
||||
meth = getattr(self._io, meth_name, None)
|
||||
implemented_by_class = hasattr(type(self), meth_name)
|
||||
if meth and not implemented_by_class:
|
||||
setattr(self, meth_name, self._wrap_method(meth))
|
||||
elif hasattr(self, meth_name) and not implemented_by_class:
|
||||
delattr(self, meth_name)
|
||||
|
||||
def _wrap_method(self, method):
|
||||
"""
|
||||
Wrap a method we're copying into our dictionary from the underlying
|
||||
io object to do something special or different, if necessary.
|
||||
"""
|
||||
return method
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
"""True if the file is closed"""
|
||||
return isinstance(self._io, _ClosedIO)
|
||||
|
||||
def close(self):
|
||||
if isinstance(self._io, _ClosedIO):
|
||||
return
|
||||
|
||||
fobj = self._io
|
||||
self._io = _ClosedIO(self._io)
|
||||
try:
|
||||
self._do_close(fobj, self._close)
|
||||
finally:
|
||||
fobj = None
|
||||
# Remove delegate methods to drop remaining references to
|
||||
# _io.
|
||||
d = self.__dict__
|
||||
for meth_name in self._delegate_methods:
|
||||
d.pop(meth_name, None)
|
||||
|
||||
def _do_close(self, fobj, closefd):
|
||||
raise NotImplementedError()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._io, name)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s at 0x%x %s_fobj=%r%s>' % (
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
'closed' if self.closed else '',
|
||||
self.io,
|
||||
self._extra_repr()
|
||||
)
|
||||
|
||||
def _extra_repr(self):
|
||||
return ''
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.close()
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
line = self.readline()
|
||||
if not line:
|
||||
raise StopIteration
|
||||
return line
|
||||
|
||||
next = __next__
|
||||
|
||||
def __bool__(self):
|
||||
return True
|
||||
|
||||
__nonzero__ = __bool__
|
||||
|
||||
|
||||
class FileObjectBlock(FileObjectBase):
|
||||
"""
|
||||
FileObjectBlock()
|
||||
|
||||
A simple synchronous wrapper around a file object.
|
||||
|
||||
Adds no concurrency or gevent compatibility.
|
||||
"""
|
||||
|
||||
def __init__(self, fobj, *args, **kwargs):
|
||||
descriptor = OpenDescriptor(fobj, *args, **kwargs)
|
||||
FileObjectBase.__init__(self, descriptor)
|
||||
|
||||
def _do_close(self, fobj, closefd):
|
||||
fobj.close()
|
||||
|
||||
|
||||
class FileObjectThread(FileObjectBase):
|
||||
"""
|
||||
FileObjectThread()
|
||||
|
||||
A file-like object wrapping another file-like object, performing all blocking
|
||||
operations on that object in a background thread.
|
||||
|
||||
.. caution::
|
||||
Attempting to change the threadpool or lock of an existing FileObjectThread
|
||||
has undefined consequences.
|
||||
|
||||
.. versionchanged:: 1.1b1
|
||||
The file object is closed using the threadpool. Note that whether or
|
||||
not this action is synchronous or asynchronous is not documented.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""
|
||||
:keyword bool lock: If True (the default) then all operations will
|
||||
be performed one-by-one. Note that this does not guarantee that, if using
|
||||
this file object from multiple threads/greenlets, operations will be performed
|
||||
in any particular order, only that no two operations will be attempted at the
|
||||
same time. You can also pass your own :class:`gevent.lock.Semaphore` to synchronize
|
||||
file operations with an external resource.
|
||||
:keyword bool closefd: If True (the default) then when this object is closed,
|
||||
the underlying object is closed as well. If *fobj* is a path, then
|
||||
*closefd* must be True.
|
||||
"""
|
||||
lock = kwargs.pop('lock', True)
|
||||
threadpool = kwargs.pop('threadpool', None)
|
||||
descriptor = OpenDescriptor(*args, **kwargs)
|
||||
|
||||
self.threadpool = threadpool or get_hub().threadpool
|
||||
self.lock = lock
|
||||
if self.lock is True:
|
||||
self.lock = Semaphore()
|
||||
elif not self.lock:
|
||||
self.lock = DummySemaphore()
|
||||
if not hasattr(self.lock, '__enter__'):
|
||||
raise TypeError('Expected a Semaphore or boolean, got %r' % type(self.lock))
|
||||
|
||||
self.__io_holder = [descriptor.opened()] # signal for _wrap_method
|
||||
FileObjectBase.__init__(self, descriptor)
|
||||
|
||||
def _do_close(self, fobj, closefd):
|
||||
self.__io_holder[0] = None # for _wrap_method
|
||||
try:
|
||||
with self.lock:
|
||||
self.threadpool.apply(fobj.flush)
|
||||
finally:
|
||||
if closefd:
|
||||
# Note that we're not taking the lock; older code
|
||||
# did fobj.close() without going through the threadpool at all,
|
||||
# so acquiring the lock could potentially introduce deadlocks
|
||||
# that weren't present before. Avoiding the lock doesn't make
|
||||
# the existing race condition any worse.
|
||||
# We wrap the close in an exception handler and re-raise directly
|
||||
# to avoid the (common, expected) IOError from being logged by the pool
|
||||
def close(_fobj=fobj):
|
||||
try:
|
||||
_fobj.close()
|
||||
except: # pylint:disable=bare-except
|
||||
# pylint:disable-next=return-in-finally
|
||||
return sys.exc_info()
|
||||
finally:
|
||||
_fobj = None
|
||||
del fobj
|
||||
|
||||
exc_info = self.threadpool.apply(close)
|
||||
del close
|
||||
|
||||
if exc_info:
|
||||
reraise(*exc_info)
|
||||
|
||||
def _do_delegate_methods(self):
|
||||
FileObjectBase._do_delegate_methods(self)
|
||||
self.__io_holder[0] = self._io
|
||||
|
||||
def _extra_repr(self):
|
||||
return ' threadpool=%r' % (self.threadpool,)
|
||||
|
||||
def _wrap_method(self, method):
|
||||
# NOTE: We are careful to avoid introducing a refcycle
|
||||
# within self. Our wrapper cannot refer to self.
|
||||
io_holder = self.__io_holder
|
||||
lock = self.lock
|
||||
threadpool = self.threadpool
|
||||
|
||||
@functools.wraps(method)
|
||||
def thread_method(*args, **kwargs):
|
||||
if io_holder[0] is None:
|
||||
# This is different than FileObjectPosix, etc,
|
||||
# because we want to save the expensive trip through
|
||||
# the threadpool.
|
||||
raise FileObjectClosed
|
||||
with lock:
|
||||
return threadpool.apply(method, args, kwargs)
|
||||
|
||||
return thread_method
|
||||
343
venv3_12/Lib/site-packages/gevent/_fileobjectposix.py
Normal file
343
venv3_12/Lib/site-packages/gevent/_fileobjectposix.py
Normal file
@@ -0,0 +1,343 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
from io import BytesIO
|
||||
from io import DEFAULT_BUFFER_SIZE
|
||||
from io import FileIO
|
||||
from io import RawIOBase
|
||||
from io import UnsupportedOperation
|
||||
|
||||
from gevent._compat import reraise
|
||||
from gevent._fileobjectcommon import cancel_wait_ex
|
||||
from gevent._fileobjectcommon import FileObjectBase
|
||||
from gevent._fileobjectcommon import OpenDescriptor
|
||||
from gevent._fileobjectcommon import WriteIsWriteallMixin
|
||||
from gevent._hub_primitives import wait_on_watcher
|
||||
from gevent.hub import get_hub
|
||||
from gevent.os import _read
|
||||
from gevent.os import _write
|
||||
from gevent.os import ignored_errors
|
||||
from gevent.os import make_nonblocking
|
||||
|
||||
|
||||
class GreenFileDescriptorIO(RawIOBase):
|
||||
# Internal, undocumented, class. All that's documented is that this
|
||||
# is a IOBase object. Constructor is private.
|
||||
|
||||
# Note that RawIOBase has a __del__ method that calls
|
||||
# self.close(). (In C implementations like CPython, this is
|
||||
# the type's tp_dealloc slot; prior to Python 3, the object doesn't
|
||||
# appear to have a __del__ method, even though it functionally does)
|
||||
|
||||
_read_watcher = None
|
||||
_write_watcher = None
|
||||
_closed = False
|
||||
_seekable = None
|
||||
_keep_alive = None # An object that needs to live as long as we do.
|
||||
|
||||
def __init__(self, fileno, open_descriptor, closefd=True):
|
||||
RawIOBase.__init__(self)
|
||||
|
||||
self._closefd = closefd
|
||||
self._fileno = fileno
|
||||
self.name = fileno
|
||||
self.mode = open_descriptor.fileio_mode
|
||||
make_nonblocking(fileno)
|
||||
readable = open_descriptor.can_read
|
||||
writable = open_descriptor.can_write
|
||||
|
||||
self.hub = get_hub()
|
||||
io_watcher = self.hub.loop.io
|
||||
try:
|
||||
if readable:
|
||||
self._read_watcher = io_watcher(fileno, 1)
|
||||
|
||||
if writable:
|
||||
self._write_watcher = io_watcher(fileno, 2)
|
||||
except:
|
||||
# If anything goes wrong, it's important to go ahead and
|
||||
# close these watchers *now*, especially under libuv, so
|
||||
# that they don't get eventually reclaimed by the garbage
|
||||
# collector at some random time, thanks to the C level
|
||||
# slot (even though we don't seem to have any actual references
|
||||
# at the Python level). Previously, if we didn't close now,
|
||||
# that random close in the future would cause issues if we had duplicated
|
||||
# the fileno (if a wrapping with statement had closed an open fileobject,
|
||||
# for example)
|
||||
|
||||
# test__fileobject can show a failure if this doesn't happen
|
||||
# TRAVIS=true GEVENT_LOOP=libuv python -m gevent.tests.test__fileobject \
|
||||
# TestFileObjectPosix.test_seek TestFileObjectThread.test_bufsize_0
|
||||
self.close()
|
||||
raise
|
||||
|
||||
def isatty(self):
|
||||
# TODO: Couldn't we just subclass FileIO?
|
||||
f = FileIO(self._fileno, 'r', False)
|
||||
try:
|
||||
return f.isatty()
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def readable(self):
|
||||
return self._read_watcher is not None
|
||||
|
||||
def writable(self):
|
||||
return self._write_watcher is not None
|
||||
|
||||
def seekable(self):
|
||||
if self._seekable is None:
|
||||
try:
|
||||
os.lseek(self._fileno, 0, os.SEEK_CUR)
|
||||
except OSError:
|
||||
self._seekable = False
|
||||
else:
|
||||
self._seekable = True
|
||||
return self._seekable
|
||||
|
||||
def fileno(self):
|
||||
return self._fileno
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self._closed
|
||||
|
||||
def __destroy_events(self):
|
||||
read_event = self._read_watcher
|
||||
write_event = self._write_watcher
|
||||
hub = self.hub
|
||||
self.hub = self._read_watcher = self._write_watcher = None
|
||||
|
||||
hub.cancel_waits_close_and_then(
|
||||
(read_event, write_event),
|
||||
cancel_wait_ex,
|
||||
self.__finish_close,
|
||||
self._closefd,
|
||||
self._fileno,
|
||||
self._keep_alive
|
||||
)
|
||||
|
||||
def close(self):
|
||||
if self._closed:
|
||||
return
|
||||
self.flush()
|
||||
# TODO: Can we use 'read_event is not None and write_event is
|
||||
# not None' to mean _closed?
|
||||
self._closed = True
|
||||
try:
|
||||
self.__destroy_events()
|
||||
finally:
|
||||
self._fileno = self._keep_alive = None
|
||||
|
||||
@staticmethod
|
||||
def __finish_close(closefd, fileno, keep_alive):
|
||||
try:
|
||||
if closefd:
|
||||
os.close(fileno)
|
||||
finally:
|
||||
if hasattr(keep_alive, 'close'):
|
||||
keep_alive.close()
|
||||
|
||||
# RawIOBase provides a 'read' method that will call readall() if
|
||||
# the `size` was missing or -1 and otherwise call readinto(). We
|
||||
# want to take advantage of this to avoid single byte reads when
|
||||
# possible. This is highlighted by a bug in BufferedIOReader that
|
||||
# calls read() in a loop when its readall() method is invoked;
|
||||
# this was fixed in Python 3.3, but we still need our workaround for 2.7. See
|
||||
# https://github.com/gevent/gevent/issues/675)
|
||||
def __read(self, n):
|
||||
if self._read_watcher is None:
|
||||
raise UnsupportedOperation('read')
|
||||
while 1:
|
||||
try:
|
||||
return _read(self._fileno, n)
|
||||
except OSError as ex:
|
||||
if ex.args[0] not in ignored_errors:
|
||||
raise
|
||||
wait_on_watcher(self._read_watcher, None, None, self.hub)
|
||||
|
||||
def readall(self):
|
||||
ret = BytesIO()
|
||||
while True:
|
||||
try:
|
||||
data = self.__read(DEFAULT_BUFFER_SIZE)
|
||||
except cancel_wait_ex:
|
||||
# We were closed while reading. A buffered reader
|
||||
# just returns what it has handy at that point,
|
||||
# so we do to.
|
||||
data = None
|
||||
if not data:
|
||||
break
|
||||
ret.write(data)
|
||||
return ret.getvalue()
|
||||
|
||||
def readinto(self, b):
|
||||
data = self.__read(len(b))
|
||||
n = len(data)
|
||||
try:
|
||||
b[:n] = data
|
||||
except TypeError as err:
|
||||
import array
|
||||
if not isinstance(b, array.array):
|
||||
raise err
|
||||
b[:n] = array.array(b'b', data)
|
||||
return n
|
||||
|
||||
def write(self, b):
|
||||
if self._write_watcher is None:
|
||||
raise UnsupportedOperation('write')
|
||||
while True:
|
||||
try:
|
||||
return _write(self._fileno, b)
|
||||
except OSError as ex:
|
||||
if ex.args[0] not in ignored_errors:
|
||||
raise
|
||||
wait_on_watcher(self._write_watcher, None, None, self.hub)
|
||||
|
||||
def seek(self, offset, whence=0):
|
||||
try:
|
||||
return os.lseek(self._fileno, offset, whence)
|
||||
except IOError: # pylint:disable=try-except-raise
|
||||
raise
|
||||
except OSError as ex: # pylint:disable=duplicate-except
|
||||
# Python 2.x
|
||||
# make sure on Python 2.x we raise an IOError
|
||||
# as documented for RawIOBase.
|
||||
# See https://github.com/gevent/gevent/issues/1323
|
||||
reraise(IOError, IOError(*ex.args), sys.exc_info()[2])
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s at 0x%x fileno=%s mode=%r>" % (
|
||||
type(self).__name__, id(self), self._fileno, self.mode
|
||||
)
|
||||
|
||||
|
||||
class GreenFileDescriptorIOWriteall(WriteIsWriteallMixin,
|
||||
GreenFileDescriptorIO):
|
||||
pass
|
||||
|
||||
|
||||
class GreenOpenDescriptor(OpenDescriptor):
|
||||
|
||||
def _do_open_raw(self):
|
||||
if self.is_fd():
|
||||
fileio = GreenFileDescriptorIO(self._fobj, self, closefd=self.closefd)
|
||||
else:
|
||||
# Either an existing file object or a path string (which
|
||||
# we open to get a file object). In either case, the other object
|
||||
# owns the descriptor and we must not close it.
|
||||
closefd = False
|
||||
|
||||
raw = OpenDescriptor._do_open_raw(self)
|
||||
|
||||
fileno = raw.fileno()
|
||||
fileio = GreenFileDescriptorIO(fileno, self, closefd=closefd)
|
||||
fileio._keep_alive = raw
|
||||
# We can usually do better for a name, though.
|
||||
try:
|
||||
fileio.name = raw.name
|
||||
except AttributeError:
|
||||
del fileio.name
|
||||
return fileio
|
||||
|
||||
def _make_atomic_write(self, result, raw):
|
||||
# Our return value from _do_open_raw is always a new
|
||||
# object that we own, so we're always free to change
|
||||
# the class.
|
||||
assert result is not raw or self._raw_object_is_new(raw)
|
||||
if result.__class__ is GreenFileDescriptorIO:
|
||||
result.__class__ = GreenFileDescriptorIOWriteall
|
||||
else:
|
||||
result = OpenDescriptor._make_atomic_write(self, result, raw)
|
||||
return result
|
||||
|
||||
|
||||
class FileObjectPosix(FileObjectBase):
|
||||
"""
|
||||
FileObjectPosix()
|
||||
|
||||
A file-like object that operates on non-blocking files but
|
||||
provides a synchronous, cooperative interface.
|
||||
|
||||
.. caution::
|
||||
This object is only effective wrapping files that can be used meaningfully
|
||||
with :func:`select.select` such as sockets and pipes.
|
||||
|
||||
In general, on most platforms, operations on regular files
|
||||
(e.g., ``open('a_file.txt')``) are considered non-blocking
|
||||
already, even though they can take some time to complete as
|
||||
data is copied to the kernel and flushed to disk: this time
|
||||
is relatively bounded compared to sockets or pipes, though.
|
||||
A :func:`~os.read` or :func:`~os.write` call on such a file
|
||||
will still effectively block for some small period of time.
|
||||
Therefore, wrapping this class around a regular file is
|
||||
unlikely to make IO gevent-friendly: reading or writing large
|
||||
amounts of data could still block the event loop.
|
||||
|
||||
If you'll be working with regular files and doing IO in large
|
||||
chunks, you may consider using
|
||||
:class:`~gevent.fileobject.FileObjectThread` or
|
||||
:func:`~gevent.os.tp_read` and :func:`~gevent.os.tp_write` to bypass this
|
||||
concern.
|
||||
|
||||
.. tip::
|
||||
Although this object provides a :meth:`fileno` method and so
|
||||
can itself be passed to :func:`fcntl.fcntl`, setting the
|
||||
:data:`os.O_NONBLOCK` flag will have no effect (reads will
|
||||
still block the greenlet, although other greenlets can run).
|
||||
However, removing that flag *will cause this object to no
|
||||
longer be cooperative* (other greenlets will no longer run).
|
||||
|
||||
You can use the internal ``fileio`` attribute of this object
|
||||
(a :class:`io.RawIOBase`) to perform non-blocking byte reads.
|
||||
Note, however, that once you begin directly using this
|
||||
attribute, the results from using methods of *this* object
|
||||
are undefined, especially in text mode. (See :issue:`222`.)
|
||||
|
||||
.. versionchanged:: 1.1
|
||||
Now uses the :mod:`io` package internally. Under Python 2, previously
|
||||
used the undocumented class :class:`socket._fileobject`. This provides
|
||||
better file-like semantics (and portability to Python 3).
|
||||
.. versionchanged:: 1.2a1
|
||||
Document the ``fileio`` attribute for non-blocking reads.
|
||||
.. versionchanged:: 1.2a1
|
||||
|
||||
A bufsize of 0 in write mode is no longer forced to be 1.
|
||||
Instead, the underlying buffer is flushed after every write
|
||||
operation to simulate a bufsize of 0. In gevent 1.0, a
|
||||
bufsize of 0 was flushed when a newline was written, while
|
||||
in gevent 1.1 it was flushed when more than one byte was
|
||||
written. Note that this may have performance impacts.
|
||||
.. versionchanged:: 1.3a1
|
||||
On Python 2, enabling universal newlines no longer forces unicode
|
||||
IO.
|
||||
.. versionchanged:: 1.5
|
||||
The default value for *mode* was changed from ``rb`` to ``r``. This is consistent
|
||||
with :func:`open`, :func:`io.open`, and :class:`~.FileObjectThread`, which is the
|
||||
default ``FileObject`` on some platforms.
|
||||
.. versionchanged:: 1.5
|
||||
Stop forcing buffering. Previously, given a ``buffering=0`` argument,
|
||||
*buffering* would be set to 1, and ``buffering=1`` would be forced to
|
||||
the default buffer size. This was a workaround for a long-standing concurrency
|
||||
issue. Now the *buffering* argument is interpreted as intended.
|
||||
"""
|
||||
|
||||
default_bufsize = DEFAULT_BUFFER_SIZE
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
descriptor = GreenOpenDescriptor(*args, **kwargs)
|
||||
FileObjectBase.__init__(self, descriptor)
|
||||
# This attribute is documented as available for non-blocking reads.
|
||||
self.fileio = descriptor.opened_raw()
|
||||
|
||||
def _do_close(self, fobj, closefd):
|
||||
try:
|
||||
fobj.close()
|
||||
# self.fileio already knows whether or not to close the
|
||||
# file descriptor
|
||||
self.fileio.close()
|
||||
finally:
|
||||
self.fileio = None
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
132
venv3_12/Lib/site-packages/gevent/_greenlet_primitives.py
Normal file
132
venv3_12/Lib/site-packages/gevent/_greenlet_primitives.py
Normal file
@@ -0,0 +1,132 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# copyright (c) 2018 gevent. See LICENSE.
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
"""
|
||||
A collection of primitives used by the hub, and suitable for
|
||||
compilation with Cython because of their frequency of use.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from weakref import ref as wref
|
||||
from gc import get_objects
|
||||
|
||||
from greenlet import greenlet
|
||||
|
||||
from gevent.exceptions import BlockingSwitchOutError
|
||||
|
||||
|
||||
# In Cython, we define these as 'cdef inline' functions. The
|
||||
# compilation unit cannot have a direct assignment to them (import
|
||||
# is assignment) without generating a 'lvalue is not valid target'
|
||||
# error.
|
||||
locals()['getcurrent'] = __import__('greenlet').getcurrent
|
||||
locals()['greenlet_init'] = lambda: None
|
||||
locals()['_greenlet_switch'] = greenlet.switch
|
||||
|
||||
|
||||
__all__ = [
|
||||
'TrackedRawGreenlet',
|
||||
'SwitchOutGreenletWithLoop',
|
||||
]
|
||||
|
||||
class TrackedRawGreenlet(greenlet):
|
||||
|
||||
def __init__(self, function, parent):
|
||||
greenlet.__init__(self, function, parent)
|
||||
# See greenlet.py's Greenlet class. We capture the cheap
|
||||
# parts to maintain the tree structure, but we do not capture
|
||||
# the stack because that's too expensive for 'spawn_raw'.
|
||||
|
||||
current = getcurrent() # pylint:disable=undefined-variable
|
||||
self.spawning_greenlet = wref(current)
|
||||
# See Greenlet for how trees are maintained.
|
||||
try:
|
||||
self.spawn_tree_locals = current.spawn_tree_locals
|
||||
except AttributeError:
|
||||
self.spawn_tree_locals = {}
|
||||
if current.parent:
|
||||
current.spawn_tree_locals = self.spawn_tree_locals
|
||||
|
||||
|
||||
class SwitchOutGreenletWithLoop(TrackedRawGreenlet):
|
||||
# Subclasses must define:
|
||||
# - self.loop
|
||||
|
||||
# This class defines loop in its .pxd for Cython. This lets us avoid
|
||||
# circular dependencies with the hub.
|
||||
|
||||
def switch(self):
|
||||
switch_out = getattr(getcurrent(), 'switch_out', None) # pylint:disable=undefined-variable
|
||||
if switch_out is not None:
|
||||
switch_out()
|
||||
return _greenlet_switch(self) # pylint:disable=undefined-variable
|
||||
|
||||
def switch_out(self):
|
||||
raise BlockingSwitchOutError('Impossible to call blocking function in the event loop callback')
|
||||
|
||||
|
||||
def get_reachable_greenlets():
|
||||
# We compile this loop with Cython so that it's faster, and so that
|
||||
# the GIL isn't dropped at unpredictable times during the loop.
|
||||
# Dropping the GIL could lead to accessing partly constructed objects
|
||||
# in undefined states (particularly, tuples). This helps close a hole
|
||||
# where a `SystemError: Objects/tupleobject.c bad argument to internal function`
|
||||
# could get raised. (Note that this probably doesn't completely close the hole,
|
||||
# if other threads have dropped the GIL, but hopefully the speed makes that
|
||||
# more rare.) See https://github.com/gevent/gevent/issues/1302
|
||||
return [
|
||||
x for x in get_objects()
|
||||
if isinstance(x, greenlet) and not getattr(x, 'greenlet_tree_is_ignored', False)
|
||||
]
|
||||
|
||||
# Cache the global memoryview so cython can optimize.
|
||||
_memoryview = memoryview
|
||||
try:
|
||||
if isinstance(__builtins__, dict):
|
||||
# Pure-python mode on CPython
|
||||
_buffer = __builtins__['buffer']
|
||||
else:
|
||||
# Cythonized mode, or PyPy
|
||||
_buffer = __builtins__.buffer
|
||||
except (AttributeError, KeyError):
|
||||
# Python 3.
|
||||
_buffer = memoryview
|
||||
|
||||
def get_memory(data):
|
||||
# On Python 2, memoryview(memoryview()) can leak in some cases,
|
||||
# notably when an io.BufferedWriter object produced the memoryview.
|
||||
# So we need to check to see if we already have one before we convert.
|
||||
# We do this in Cython to mitigate the performance cost (which turns out to be a
|
||||
# net win.)
|
||||
|
||||
# We don't specifically test for this leak.
|
||||
|
||||
# https://github.com/gevent/gevent/issues/1318
|
||||
try:
|
||||
mv = _memoryview(data) if not isinstance(data, _memoryview) else data
|
||||
if mv.shape:
|
||||
return mv
|
||||
# No shape, probably working with a ctypes object,
|
||||
# or something else exotic that supports the buffer interface
|
||||
return mv.tobytes()
|
||||
except TypeError:
|
||||
# fixes "python2.7 array.array doesn't support memoryview used in
|
||||
# gevent.socket.send" issue
|
||||
# (http://code.google.com/p/gevent/issues/detail?id=94)
|
||||
if _buffer is _memoryview:
|
||||
# Py3
|
||||
raise
|
||||
return _buffer(data)
|
||||
|
||||
|
||||
|
||||
def _init():
|
||||
greenlet_init() # pylint:disable=undefined-variable
|
||||
|
||||
_init()
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__greenlet_primitives')
|
||||
150
venv3_12/Lib/site-packages/gevent/_hub_local.py
Normal file
150
venv3_12/Lib/site-packages/gevent/_hub_local.py
Normal file
@@ -0,0 +1,150 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# copyright 2018 gevent. See LICENSE
|
||||
"""
|
||||
Maintains the thread local hub.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import _thread
|
||||
|
||||
__all__ = [
|
||||
'get_hub',
|
||||
'get_hub_noargs',
|
||||
'get_hub_if_exists',
|
||||
]
|
||||
|
||||
# These must be the "real" native thread versions,
|
||||
# not monkey-patched.
|
||||
# We are imported early enough (by gevent/__init__) that
|
||||
# we can rely on not being monkey-patched in any way yet.
|
||||
assert 'gevent' not in str(_thread._local)
|
||||
class _Threadlocal(_thread._local):
|
||||
|
||||
def __init__(self):
|
||||
# Use a class with an initializer so that we can test
|
||||
# for 'is None' instead of catching AttributeError, making
|
||||
# the code cleaner and possibly solving some corner cases
|
||||
# (like #687).
|
||||
#
|
||||
# However, under some weird circumstances, it _seems_ like the
|
||||
# __init__ method doesn't get called properly ("seems" is the
|
||||
# keyword). We've seen at least one instance
|
||||
# (https://github.com/gevent/gevent/issues/1961) of
|
||||
# ``AttributeError: '_Threadlocal' object has no attribute # 'hub'``
|
||||
# which should be impossible unless:
|
||||
#
|
||||
# - Someone manually deletes the attribute
|
||||
# - The _threadlocal object itself is in the process of being
|
||||
# deleted. The C ``tp_clear`` slot for it deletes the ``__dict__``
|
||||
# of each instance in each thread (and/or the ``tp_clear`` of ``dict`` itself
|
||||
# clears the instance). Now, how we could be getting
|
||||
# cleared while still being used is unclear, but clearing is part of
|
||||
# circular garbage collection, and in the bug report it looks like we're inside a
|
||||
# weakref finalizer or ``__del__`` method, which could suggest that
|
||||
# garbage collection is happening.
|
||||
#
|
||||
# See https://github.com/gevent/gevent/issues/1961
|
||||
# and ``get_hub_if_exists()``
|
||||
super(_Threadlocal, self).__init__()
|
||||
self.Hub = None
|
||||
self.loop = None
|
||||
self.hub = None
|
||||
|
||||
_threadlocal = _Threadlocal()
|
||||
|
||||
Hub = None # Set when gevent.hub is imported
|
||||
|
||||
def get_hub_class():
|
||||
"""Return the type of hub to use for the current thread.
|
||||
|
||||
If there's no type of hub for the current thread yet, 'gevent.hub.Hub' is used.
|
||||
"""
|
||||
hubtype = _threadlocal.Hub
|
||||
if hubtype is None:
|
||||
hubtype = _threadlocal.Hub = Hub
|
||||
return hubtype
|
||||
|
||||
def set_default_hub_class(hubtype):
|
||||
global Hub
|
||||
Hub = hubtype
|
||||
|
||||
def get_hub():
|
||||
"""
|
||||
Return the hub for the current thread.
|
||||
|
||||
If a hub does not exist in the current thread, a new one is
|
||||
created of the type returned by :func:`get_hub_class`.
|
||||
|
||||
.. deprecated:: 1.3b1
|
||||
The ``*args`` and ``**kwargs`` arguments are deprecated. They were
|
||||
only used when the hub was created, and so were non-deterministic---to be
|
||||
sure they were used, *all* callers had to pass them, or they were order-dependent.
|
||||
Use ``set_hub`` instead.
|
||||
|
||||
.. versionchanged:: 1.5a3
|
||||
The *args* and *kwargs* arguments are now completely ignored.
|
||||
|
||||
.. versionchanged:: 23.7.0
|
||||
The long-deprecated ``args`` and ``kwargs`` parameters are no
|
||||
longer accepted.
|
||||
"""
|
||||
# See get_hub_if_exists
|
||||
try:
|
||||
hub = _threadlocal.hub
|
||||
except AttributeError:
|
||||
hub = None
|
||||
if hub is None:
|
||||
hubtype = get_hub_class()
|
||||
hub = _threadlocal.hub = hubtype()
|
||||
return hub
|
||||
|
||||
# For Cython purposes, we need to duplicate get_hub into this function so it
|
||||
# can be directly called.
|
||||
def get_hub_noargs():
|
||||
# See get_hub_if_exists
|
||||
try:
|
||||
hub = _threadlocal.hub
|
||||
except AttributeError:
|
||||
hub = None
|
||||
if hub is None:
|
||||
hubtype = get_hub_class()
|
||||
hub = _threadlocal.hub = hubtype()
|
||||
return hub
|
||||
|
||||
def get_hub_if_exists():
|
||||
"""
|
||||
Return the hub for the current thread.
|
||||
|
||||
Return ``None`` if no hub has been created yet.
|
||||
"""
|
||||
# Attempt a band-aid for the poorly-understood behaviour
|
||||
# seen in https://github.com/gevent/gevent/issues/1961
|
||||
# where the ``hub`` attribute has gone missing.
|
||||
try:
|
||||
return _threadlocal.hub
|
||||
except AttributeError:
|
||||
# XXX: I'd really like to report this, but I'm not sure how
|
||||
# that can be done safely (because I don't know how we get
|
||||
# here in the first place). We may be in a place where imports
|
||||
# are unsafe, or the interpreter is shutting down, or the
|
||||
# thread is exiting, or...
|
||||
return None
|
||||
|
||||
|
||||
|
||||
|
||||
def set_hub(hub):
|
||||
_threadlocal.hub = hub
|
||||
|
||||
def get_loop():
|
||||
return _threadlocal.loop
|
||||
|
||||
def set_loop(loop):
|
||||
_threadlocal.loop = loop
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__hub_local')
|
||||
427
venv3_12/Lib/site-packages/gevent/_hub_primitives.py
Normal file
427
venv3_12/Lib/site-packages/gevent/_hub_primitives.py
Normal file
@@ -0,0 +1,427 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# copyright (c) 2018 gevent. See LICENSE.
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False,binding=True
|
||||
"""
|
||||
A collection of primitives used by the hub, and suitable for
|
||||
compilation with Cython because of their frequency of use.
|
||||
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import traceback
|
||||
|
||||
from gevent.exceptions import InvalidSwitchError
|
||||
from gevent.exceptions import ConcurrentObjectUseError
|
||||
|
||||
from gevent import _greenlet_primitives
|
||||
from gevent import _waiter
|
||||
from gevent._util import _NONE
|
||||
from gevent._hub_local import get_hub_noargs as get_hub
|
||||
from gevent.timeout import Timeout
|
||||
|
||||
# In Cython, we define these as 'cdef inline' functions. The
|
||||
# compilation unit cannot have a direct assignment to them (import
|
||||
# is assignment) without generating a 'lvalue is not valid target'
|
||||
# error.
|
||||
locals()['getcurrent'] = __import__('greenlet').getcurrent
|
||||
locals()['greenlet_init'] = lambda: None
|
||||
locals()['Waiter'] = _waiter.Waiter
|
||||
locals()['MultipleWaiter'] = _waiter.MultipleWaiter
|
||||
locals()['SwitchOutGreenletWithLoop'] = _greenlet_primitives.SwitchOutGreenletWithLoop
|
||||
|
||||
__all__ = [
|
||||
'WaitOperationsGreenlet',
|
||||
'iwait_on_objects',
|
||||
'wait_on_objects',
|
||||
'wait_read',
|
||||
'wait_write',
|
||||
'wait_readwrite',
|
||||
]
|
||||
|
||||
class WaitOperationsGreenlet(SwitchOutGreenletWithLoop): # pylint:disable=undefined-variable
|
||||
|
||||
def wait(self, watcher):
|
||||
"""
|
||||
Wait until the *watcher* (which must not be started) is ready.
|
||||
|
||||
The current greenlet will be unscheduled during this time.
|
||||
"""
|
||||
waiter = Waiter(self) # pylint:disable=undefined-variable
|
||||
watcher.start(waiter.switch, waiter)
|
||||
try:
|
||||
result = waiter.get()
|
||||
if result is not waiter:
|
||||
raise InvalidSwitchError(
|
||||
'Invalid switch into %s: got %r (expected %r; waiting on %r with %r)' % (
|
||||
getcurrent(), # pylint:disable=undefined-variable
|
||||
result,
|
||||
waiter,
|
||||
self,
|
||||
watcher
|
||||
)
|
||||
)
|
||||
finally:
|
||||
watcher.stop()
|
||||
|
||||
def cancel_waits_close_and_then(self, watchers, exc_kind, then, *then_args):
|
||||
deferred = []
|
||||
for watcher in watchers:
|
||||
if watcher is None:
|
||||
continue
|
||||
if watcher.callback is None:
|
||||
watcher.close()
|
||||
else:
|
||||
deferred.append(watcher)
|
||||
if deferred:
|
||||
self.loop.run_callback(self._cancel_waits_then, deferred, exc_kind, then, then_args)
|
||||
else:
|
||||
then(*then_args)
|
||||
|
||||
def _cancel_waits_then(self, watchers, exc_kind, then, then_args):
|
||||
for watcher in watchers:
|
||||
self._cancel_wait(watcher, exc_kind, True)
|
||||
then(*then_args)
|
||||
|
||||
def cancel_wait(self, watcher, error, close_watcher=False):
|
||||
"""
|
||||
Cancel an in-progress call to :meth:`wait` by throwing the given *error*
|
||||
in the waiting greenlet.
|
||||
|
||||
.. versionchanged:: 1.3a1
|
||||
Added the *close_watcher* parameter. If true, the watcher
|
||||
will be closed after the exception is thrown. The watcher should then
|
||||
be discarded. Closing the watcher is important to release native resources.
|
||||
.. versionchanged:: 1.3a2
|
||||
Allow the *watcher* to be ``None``. No action is taken in that case.
|
||||
|
||||
"""
|
||||
if watcher is None:
|
||||
# Presumably already closed.
|
||||
# See https://github.com/gevent/gevent/issues/1089
|
||||
return
|
||||
|
||||
if watcher.callback is not None:
|
||||
self.loop.run_callback(self._cancel_wait, watcher, error, close_watcher)
|
||||
return
|
||||
|
||||
if close_watcher:
|
||||
watcher.close()
|
||||
|
||||
def _cancel_wait(self, watcher, error, close_watcher):
|
||||
# Running in the hub. Switches to the waiting greenlet to raise
|
||||
# the error; assuming the waiting greenlet dies, switches back
|
||||
# to this (because the waiting greenlet's parent is the hub.)
|
||||
|
||||
# We have to check again to see if it was still active by the time
|
||||
# our callback actually runs.
|
||||
active = watcher.active
|
||||
cb = watcher.callback
|
||||
if close_watcher:
|
||||
watcher.close()
|
||||
if active:
|
||||
# The callback should be greenlet.switch(). It may or may not be None.
|
||||
glet = getattr(cb, '__self__', None)
|
||||
if glet is not None:
|
||||
glet.throw(error)
|
||||
|
||||
|
||||
class _WaitIterator(object):
|
||||
|
||||
def __init__(self, objects, hub, timeout, count):
|
||||
self._hub = hub
|
||||
self._waiter = MultipleWaiter(hub) # pylint:disable=undefined-variable
|
||||
self._switch = self._waiter.switch
|
||||
self._timeout = timeout
|
||||
self._objects = objects
|
||||
|
||||
self._timer = None
|
||||
self._begun = False
|
||||
|
||||
# Even if we're only going to return 1 object,
|
||||
# we must still rawlink() *all* of them, so that no
|
||||
# matter which one finishes first we find it.
|
||||
self._count = len(objects) if count is None else min(count, len(objects))
|
||||
|
||||
def _begin(self):
|
||||
if self._begun:
|
||||
return
|
||||
|
||||
self._begun = True
|
||||
|
||||
# XXX: If iteration doesn't actually happen, we
|
||||
# could leave these links around!
|
||||
for obj in self._objects:
|
||||
obj.rawlink(self._switch)
|
||||
|
||||
if self._timeout is not None:
|
||||
self._timer = self._hub.loop.timer(self._timeout, priority=-1)
|
||||
self._timer.start(self._switch, self)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
self._begin()
|
||||
|
||||
if self._count == 0:
|
||||
# Exhausted
|
||||
self._cleanup()
|
||||
raise StopIteration()
|
||||
|
||||
self._count -= 1
|
||||
try:
|
||||
item = self._waiter.get()
|
||||
self._waiter.clear()
|
||||
if item is self:
|
||||
# Timer expired, no more
|
||||
self._cleanup()
|
||||
raise StopIteration()
|
||||
return item
|
||||
except:
|
||||
self._cleanup()
|
||||
raise
|
||||
|
||||
next = __next__
|
||||
|
||||
def _cleanup(self):
|
||||
if self._timer is not None:
|
||||
self._timer.close()
|
||||
self._timer = None
|
||||
|
||||
objs = self._objects
|
||||
self._objects = ()
|
||||
for aobj in objs:
|
||||
unlink = getattr(aobj, 'unlink', None)
|
||||
if unlink is not None:
|
||||
try:
|
||||
unlink(self._switch)
|
||||
except: # pylint:disable=bare-except
|
||||
traceback.print_exc()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, typ, value, tb):
|
||||
self._cleanup()
|
||||
|
||||
|
||||
def iwait_on_objects(objects, timeout=None, count=None):
|
||||
"""
|
||||
Iteratively yield *objects* as they are ready, until all (or *count*) are ready
|
||||
or *timeout* expired.
|
||||
|
||||
If you will only be consuming a portion of the *objects*, you should
|
||||
do so inside a ``with`` block on this object to avoid leaking resources::
|
||||
|
||||
with gevent.iwait((a, b, c)) as it:
|
||||
for i in it:
|
||||
if i is a:
|
||||
break
|
||||
|
||||
:param objects: A sequence (supporting :func:`len`) containing objects
|
||||
implementing the wait protocol (rawlink() and unlink()).
|
||||
:keyword int count: If not `None`, then a number specifying the maximum number
|
||||
of objects to wait for. If ``None`` (the default), all objects
|
||||
are waited for.
|
||||
:keyword float timeout: If given, specifies a maximum number of seconds
|
||||
to wait. If the timeout expires before the desired waited-for objects
|
||||
are available, then this method returns immediately.
|
||||
|
||||
.. seealso:: :func:`wait`
|
||||
|
||||
.. versionchanged:: 1.1a1
|
||||
Add the *count* parameter.
|
||||
.. versionchanged:: 1.1a2
|
||||
No longer raise :exc:`LoopExit` if our caller switches greenlets
|
||||
in between items yielded by this function.
|
||||
.. versionchanged:: 1.4
|
||||
Add support to use the returned object as a context manager.
|
||||
"""
|
||||
# QQQ would be nice to support iterable here that can be generated slowly (why?)
|
||||
hub = get_hub()
|
||||
if objects is None:
|
||||
return [hub.join(timeout=timeout)]
|
||||
return _WaitIterator(objects, hub, timeout, count)
|
||||
|
||||
|
||||
def wait_on_objects(objects=None, timeout=None, count=None):
|
||||
"""
|
||||
Wait for *objects* to become ready or for event loop to finish.
|
||||
|
||||
If *objects* is provided, it must be a list containing objects
|
||||
implementing the wait protocol (rawlink() and unlink() methods):
|
||||
|
||||
- :class:`gevent.Greenlet` instance
|
||||
- :class:`gevent.event.Event` instance
|
||||
- :class:`gevent.lock.Semaphore` instance
|
||||
- :class:`gevent.subprocess.Popen` instance
|
||||
|
||||
If *objects* is ``None`` (the default), ``wait()`` blocks until
|
||||
the current event loop has nothing to do (or until *timeout* passes):
|
||||
|
||||
- all greenlets have finished
|
||||
- all servers were stopped
|
||||
- all event loop watchers were stopped.
|
||||
|
||||
If *count* is ``None`` (the default), wait for all *objects*
|
||||
to become ready.
|
||||
|
||||
If *count* is a number, wait for (up to) *count* objects to become
|
||||
ready. (For example, if count is ``1`` then the function exits
|
||||
when any object in the list is ready).
|
||||
|
||||
If *timeout* is provided, it specifies the maximum number of
|
||||
seconds ``wait()`` will block.
|
||||
|
||||
Returns the list of ready objects, in the order in which they were
|
||||
ready.
|
||||
|
||||
.. seealso:: :func:`iwait`
|
||||
"""
|
||||
if objects is None:
|
||||
hub = get_hub()
|
||||
return hub.join(timeout=timeout) # pylint:disable=
|
||||
return list(iwait_on_objects(objects, timeout, count))
|
||||
|
||||
_timeout_error = Exception
|
||||
|
||||
def set_default_timeout_error(e):
|
||||
global _timeout_error
|
||||
_timeout_error = e
|
||||
|
||||
def _primitive_wait(watcher, timeout, timeout_exc, hub):
|
||||
if watcher.callback is not None:
|
||||
raise ConcurrentObjectUseError('This socket is already used by another greenlet: %r'
|
||||
% (watcher.callback, ))
|
||||
|
||||
if hub is None:
|
||||
hub = get_hub()
|
||||
|
||||
if timeout is None:
|
||||
hub.wait(watcher)
|
||||
return
|
||||
|
||||
timeout = Timeout._start_new_or_dummy(
|
||||
timeout,
|
||||
(timeout_exc
|
||||
if timeout_exc is not _NONE or timeout is None
|
||||
else _timeout_error('timed out')))
|
||||
|
||||
with timeout:
|
||||
hub.wait(watcher)
|
||||
|
||||
# Suitable to be bound as an instance method
|
||||
def wait_on_socket(socket, watcher, timeout_exc=None):
|
||||
if socket is None or watcher is None:
|
||||
# test__hub TestCloseSocketWhilePolling, on Python 2; Python 3
|
||||
# catches the EBADF differently.
|
||||
raise ConcurrentObjectUseError("The socket has already been closed by another greenlet")
|
||||
_primitive_wait(watcher, socket.timeout,
|
||||
timeout_exc if timeout_exc is not None else _NONE,
|
||||
socket.hub)
|
||||
|
||||
def wait_on_watcher(watcher, timeout=None, timeout_exc=_NONE, hub=None):
|
||||
"""
|
||||
wait(watcher, timeout=None, [timeout_exc=None]) -> None
|
||||
|
||||
Block the current greenlet until *watcher* is ready.
|
||||
|
||||
If *timeout* is non-negative, then *timeout_exc* is raised after
|
||||
*timeout* second has passed.
|
||||
|
||||
If :func:`cancel_wait` is called on *watcher* by another greenlet,
|
||||
raise an exception in this blocking greenlet
|
||||
(``socket.error(EBADF, 'File descriptor was closed in another
|
||||
greenlet')`` by default).
|
||||
|
||||
:param watcher: An event loop watcher, most commonly an IO watcher obtained from
|
||||
:meth:`gevent.core.loop.io`
|
||||
:keyword timeout_exc: The exception to raise if the timeout expires.
|
||||
By default, a :class:`socket.timeout` exception is raised.
|
||||
If you pass a value for this keyword, it is interpreted as for
|
||||
:class:`gevent.timeout.Timeout`.
|
||||
|
||||
:raises ~gevent.hub.ConcurrentObjectUseError: If the *watcher* is
|
||||
already started.
|
||||
"""
|
||||
_primitive_wait(watcher, timeout, timeout_exc, hub)
|
||||
|
||||
|
||||
def wait_read(fileno, timeout=None, timeout_exc=_NONE):
|
||||
"""
|
||||
wait_read(fileno, timeout=None, [timeout_exc=None]) -> None
|
||||
|
||||
Block the current greenlet until *fileno* is ready to read.
|
||||
|
||||
For the meaning of the other parameters and possible exceptions,
|
||||
see :func:`wait`.
|
||||
|
||||
.. seealso:: :func:`cancel_wait`
|
||||
"""
|
||||
hub = get_hub()
|
||||
io = hub.loop.io(fileno, 1)
|
||||
try:
|
||||
return wait_on_watcher(io, timeout, timeout_exc, hub)
|
||||
finally:
|
||||
io.close()
|
||||
|
||||
|
||||
def wait_write(fileno, timeout=None, timeout_exc=_NONE, event=_NONE):
|
||||
"""
|
||||
wait_write(fileno, timeout=None, [timeout_exc=None]) -> None
|
||||
|
||||
Block the current greenlet until *fileno* is ready to write.
|
||||
|
||||
For the meaning of the other parameters and possible exceptions,
|
||||
see :func:`wait`.
|
||||
|
||||
.. deprecated:: 1.1
|
||||
The keyword argument *event* is ignored. Applications should not pass this parameter.
|
||||
In the future, doing so will become an error.
|
||||
|
||||
.. seealso:: :func:`cancel_wait`
|
||||
"""
|
||||
# pylint:disable=unused-argument
|
||||
hub = get_hub()
|
||||
io = hub.loop.io(fileno, 2)
|
||||
try:
|
||||
return wait_on_watcher(io, timeout, timeout_exc, hub)
|
||||
finally:
|
||||
io.close()
|
||||
|
||||
|
||||
def wait_readwrite(fileno, timeout=None, timeout_exc=_NONE, event=_NONE):
|
||||
"""
|
||||
wait_readwrite(fileno, timeout=None, [timeout_exc=None]) -> None
|
||||
|
||||
Block the current greenlet until *fileno* is ready to read or
|
||||
write.
|
||||
|
||||
For the meaning of the other parameters and possible exceptions,
|
||||
see :func:`wait`.
|
||||
|
||||
.. deprecated:: 1.1
|
||||
The keyword argument *event* is ignored. Applications should not pass this parameter.
|
||||
In the future, doing so will become an error.
|
||||
|
||||
.. seealso:: :func:`cancel_wait`
|
||||
"""
|
||||
# pylint:disable=unused-argument
|
||||
hub = get_hub()
|
||||
io = hub.loop.io(fileno, 3)
|
||||
try:
|
||||
return wait_on_watcher(io, timeout, timeout_exc, hub)
|
||||
finally:
|
||||
io.close()
|
||||
|
||||
|
||||
def _init():
|
||||
greenlet_init() # pylint:disable=undefined-variable
|
||||
|
||||
_init()
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__hub_primitives')
|
||||
82
venv3_12/Lib/site-packages/gevent/_ident.py
Normal file
82
venv3_12/Lib/site-packages/gevent/_ident.py
Normal file
@@ -0,0 +1,82 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 gevent contributors. See LICENSE for details.
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
from weakref import WeakKeyDictionary
|
||||
from weakref import ref
|
||||
|
||||
from heapq import heappop
|
||||
from heapq import heappush
|
||||
|
||||
__all__ = [
|
||||
'IdentRegistry',
|
||||
]
|
||||
|
||||
class ValuedWeakRef(ref):
|
||||
"""
|
||||
A weak ref with an associated value.
|
||||
"""
|
||||
|
||||
__slots__ = ('value',)
|
||||
|
||||
|
||||
class IdentRegistry(object):
|
||||
"""
|
||||
Maintains a unique mapping of (small) non-negative integer identifiers
|
||||
to objects that can be weakly referenced.
|
||||
|
||||
It is guaranteed that no two objects will have the the same
|
||||
identifier at the same time, as long as those objects are
|
||||
also uniquely hashable.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# {obj -> (ident, wref(obj))}
|
||||
self._registry = WeakKeyDictionary()
|
||||
|
||||
# A heap of numbers that have been used and returned
|
||||
self._available_idents = []
|
||||
|
||||
def get_ident(self, obj):
|
||||
"""
|
||||
Retrieve the identifier for *obj*, creating one
|
||||
if necessary.
|
||||
"""
|
||||
|
||||
try:
|
||||
return self._registry[obj][0]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if self._available_idents:
|
||||
# Take the smallest free number
|
||||
ident = heappop(self._available_idents)
|
||||
else:
|
||||
# Allocate a bigger one
|
||||
ident = len(self._registry)
|
||||
|
||||
vref = ValuedWeakRef(obj, self._return_ident)
|
||||
vref.value = ident # pylint:disable=assigning-non-slot,attribute-defined-outside-init
|
||||
self._registry[obj] = (ident, vref)
|
||||
return ident
|
||||
|
||||
def _return_ident(self, vref):
|
||||
# By the time this is called, self._registry has been
|
||||
# updated
|
||||
if heappush is not None:
|
||||
# Under some circumstances we can get called
|
||||
# when the interpreter is shutting down, and globals
|
||||
# aren't available any more.
|
||||
heappush(self._available_idents, vref.value)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._registry)
|
||||
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__ident')
|
||||
226
venv3_12/Lib/site-packages/gevent/_imap.py
Normal file
226
venv3_12/Lib/site-packages/gevent/_imap.py
Normal file
@@ -0,0 +1,226 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018 gevent
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False,infer_types=True
|
||||
|
||||
"""
|
||||
Iterators across greenlets or AsyncResult objects.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
from gevent import lock
|
||||
from gevent import queue
|
||||
|
||||
|
||||
__all__ = [
|
||||
'IMapUnordered',
|
||||
'IMap',
|
||||
]
|
||||
|
||||
locals()['Greenlet'] = __import__('gevent').Greenlet
|
||||
locals()['Semaphore'] = lock.Semaphore
|
||||
locals()['UnboundQueue'] = queue.UnboundQueue
|
||||
|
||||
|
||||
class Failure(object):
|
||||
__slots__ = ('exc', 'raise_exception')
|
||||
|
||||
def __init__(self, exc, raise_exception=None):
|
||||
self.exc = exc
|
||||
self.raise_exception = raise_exception
|
||||
|
||||
|
||||
def _raise_exc(failure):
|
||||
# For cython.
|
||||
if failure.raise_exception:
|
||||
failure.raise_exception()
|
||||
else:
|
||||
raise failure.exc
|
||||
|
||||
class IMapUnordered(Greenlet): # pylint:disable=undefined-variable
|
||||
"""
|
||||
At iterator of map results.
|
||||
"""
|
||||
|
||||
def __init__(self, func, iterable, spawn, maxsize=None, _zipped=False):
|
||||
"""
|
||||
An iterator that.
|
||||
|
||||
:param callable spawn: The function we use to create new greenlets.
|
||||
:keyword int maxsize: If given and not-None, specifies the maximum number of
|
||||
finished results that will be allowed to accumulated awaiting the reader;
|
||||
more than that number of results will cause map function greenlets to begin
|
||||
to block. This is most useful is there is a great disparity in the speed of
|
||||
the mapping code and the consumer and the results consume a great deal of resources.
|
||||
Using a bound is more computationally expensive than not using a bound.
|
||||
|
||||
.. versionchanged:: 1.1b3
|
||||
Added the *maxsize* parameter.
|
||||
"""
|
||||
Greenlet.__init__(self) # pylint:disable=undefined-variable
|
||||
self.spawn = spawn
|
||||
self._zipped = _zipped
|
||||
self.func = func
|
||||
self.iterable = iterable
|
||||
self.queue = UnboundQueue() # pylint:disable=undefined-variable
|
||||
|
||||
|
||||
if maxsize:
|
||||
# Bounding the queue is not enough if we want to keep from
|
||||
# accumulating objects; the result value will be around as
|
||||
# the greenlet's result, blocked on self.queue.put(), and
|
||||
# we'll go on to spawn another greenlet, which in turn can
|
||||
# create the result. So we need a semaphore to prevent a
|
||||
# greenlet from exiting while the queue is full so that we
|
||||
# don't spawn the next greenlet (assuming that self.spawn
|
||||
# is of course bounded). (Alternatively we could have the
|
||||
# greenlet itself do the insert into the pool, but that
|
||||
# takes some rework).
|
||||
#
|
||||
# Given the use of a semaphore at this level, sizing the queue becomes
|
||||
# redundant, and that lets us avoid having to use self.link() instead
|
||||
# of self.rawlink() to avoid having blocking methods called in the
|
||||
# hub greenlet.
|
||||
self._result_semaphore = Semaphore(maxsize) # pylint:disable=undefined-variable
|
||||
else:
|
||||
self._result_semaphore = None
|
||||
|
||||
self._outstanding_tasks = 0
|
||||
# The index (zero based) of the maximum number of
|
||||
# results we will have.
|
||||
self._max_index = -1
|
||||
self.finished = False
|
||||
|
||||
|
||||
# We're iterating in a different greenlet than we're running.
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
if self._result_semaphore is not None:
|
||||
self._result_semaphore.release()
|
||||
value = self._inext()
|
||||
if isinstance(value, Failure):
|
||||
_raise_exc(value)
|
||||
return value
|
||||
|
||||
next = __next__ # Py2
|
||||
|
||||
def _inext(self):
|
||||
return self.queue.get()
|
||||
|
||||
def _ispawn(self, func, item, item_index):
|
||||
if self._result_semaphore is not None:
|
||||
self._result_semaphore.acquire()
|
||||
self._outstanding_tasks += 1
|
||||
g = self.spawn(func, item) if not self._zipped else self.spawn(func, *item)
|
||||
g._imap_task_index = item_index
|
||||
g.rawlink(self._on_result)
|
||||
return g
|
||||
|
||||
def _run(self): # pylint:disable=method-hidden
|
||||
try:
|
||||
func = self.func
|
||||
for item in self.iterable:
|
||||
self._max_index += 1
|
||||
self._ispawn(func, item, self._max_index)
|
||||
self._on_finish(None)
|
||||
except BaseException as e:
|
||||
self._on_finish(e)
|
||||
raise
|
||||
finally:
|
||||
self.spawn = None
|
||||
self.func = None
|
||||
self.iterable = None
|
||||
self._result_semaphore = None
|
||||
|
||||
def _on_result(self, greenlet):
|
||||
# This method will be called in the hub greenlet (we rawlink)
|
||||
self._outstanding_tasks -= 1
|
||||
count = self._outstanding_tasks
|
||||
finished = self.finished
|
||||
ready = self.ready()
|
||||
put_finished = False
|
||||
|
||||
if ready and count <= 0 and not finished:
|
||||
finished = self.finished = True
|
||||
put_finished = True
|
||||
|
||||
if greenlet.successful():
|
||||
self.queue.put(self._iqueue_value_for_success(greenlet))
|
||||
else:
|
||||
self.queue.put(self._iqueue_value_for_failure(greenlet))
|
||||
|
||||
if put_finished:
|
||||
self.queue.put(self._iqueue_value_for_self_finished())
|
||||
|
||||
def _on_finish(self, exception):
|
||||
# Called in this greenlet.
|
||||
if self.finished:
|
||||
return
|
||||
|
||||
if exception is not None:
|
||||
self.finished = True
|
||||
self.queue.put(self._iqueue_value_for_self_failure(exception))
|
||||
return
|
||||
|
||||
if self._outstanding_tasks <= 0:
|
||||
self.finished = True
|
||||
self.queue.put(self._iqueue_value_for_self_finished())
|
||||
|
||||
def _iqueue_value_for_success(self, greenlet):
|
||||
return greenlet.value
|
||||
|
||||
def _iqueue_value_for_failure(self, greenlet):
|
||||
return Failure(greenlet.exception, getattr(greenlet, '_raise_exception'))
|
||||
|
||||
def _iqueue_value_for_self_finished(self):
|
||||
return Failure(StopIteration())
|
||||
|
||||
def _iqueue_value_for_self_failure(self, exception):
|
||||
return Failure(exception, self._raise_exception)
|
||||
|
||||
|
||||
class IMap(IMapUnordered):
|
||||
# A specialization of IMapUnordered that returns items
|
||||
# in the order in which they were generated, not
|
||||
# the order in which they finish.
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
# The result dictionary: {index: value}
|
||||
self._results = {}
|
||||
|
||||
# The index of the result to return next.
|
||||
self.index = 0
|
||||
IMapUnordered.__init__(self, *args, **kwargs)
|
||||
|
||||
def _inext(self):
|
||||
try:
|
||||
value = self._results.pop(self.index)
|
||||
except KeyError:
|
||||
# Wait for our index to finish.
|
||||
while 1:
|
||||
index, value = self.queue.get()
|
||||
if index == self.index:
|
||||
break
|
||||
self._results[index] = value
|
||||
self.index += 1
|
||||
return value
|
||||
|
||||
def _iqueue_value_for_success(self, greenlet):
|
||||
return (greenlet._imap_task_index, IMapUnordered._iqueue_value_for_success(self, greenlet))
|
||||
|
||||
def _iqueue_value_for_failure(self, greenlet):
|
||||
return (greenlet._imap_task_index, IMapUnordered._iqueue_value_for_failure(self, greenlet))
|
||||
|
||||
def _iqueue_value_for_self_finished(self):
|
||||
return (self._max_index + 1, IMapUnordered._iqueue_value_for_self_finished(self))
|
||||
|
||||
def _iqueue_value_for_self_failure(self, exception):
|
||||
return (self._max_index + 1, IMapUnordered._iqueue_value_for_self_failure(self, exception))
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__imap')
|
||||
318
venv3_12/Lib/site-packages/gevent/_interfaces.py
Normal file
318
venv3_12/Lib/site-packages/gevent/_interfaces.py
Normal file
@@ -0,0 +1,318 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018 gevent contributors. See LICENSE for details.
|
||||
"""
|
||||
Interfaces gevent uses that don't belong any one place.
|
||||
|
||||
This is not a public module, these interfaces are not
|
||||
currently exposed to the public, they mostly exist for
|
||||
documentation and testing purposes.
|
||||
|
||||
.. versionadded:: 1.3b2
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
|
||||
from zope.interface import Interface
|
||||
from zope.interface import Attribute
|
||||
|
||||
_text_type = type(u'')
|
||||
|
||||
try:
|
||||
from zope import schema
|
||||
except ImportError: # pragma: no cover
|
||||
class _Field(Attribute):
|
||||
__allowed_kw__ = ('readonly', 'min',)
|
||||
def __init__(self, description, required=False, **kwargs):
|
||||
description = u"%s (required? %s)" % (description, required)
|
||||
assert isinstance(description, _text_type)
|
||||
for k in self.__allowed_kw__:
|
||||
kwargs.pop(k, None)
|
||||
if kwargs:
|
||||
raise TypeError("Unexpected keyword arguments: %r" % (kwargs,))
|
||||
Attribute.__init__(self, description)
|
||||
|
||||
class schema(object):
|
||||
Bool = _Field
|
||||
Float = _Field
|
||||
|
||||
|
||||
# pylint:disable=no-method-argument, unused-argument, no-self-argument
|
||||
# pylint:disable=inherit-non-class
|
||||
|
||||
__all__ = [
|
||||
'ILoop',
|
||||
'IWatcher',
|
||||
'ICallback',
|
||||
]
|
||||
|
||||
class ILoop(Interface):
|
||||
"""
|
||||
The common interface expected for all event loops.
|
||||
|
||||
.. caution::
|
||||
This is an internal, low-level interface. It may change
|
||||
between minor versions of gevent.
|
||||
|
||||
.. rubric:: Watchers
|
||||
|
||||
The methods that create event loop watchers are `io`, `timer`,
|
||||
`signal`, `idle`, `prepare`, `check`, `fork`, `async_`, `child`,
|
||||
`stat`. These all return various types of :class:`IWatcher`.
|
||||
|
||||
All of those methods have one or two common arguments. *ref* is a
|
||||
boolean saying whether the event loop is allowed to exit even if
|
||||
this watcher is still started. *priority* is event loop specific.
|
||||
"""
|
||||
|
||||
default = schema.Bool(
|
||||
description=u"Boolean indicating whether this is the default loop",
|
||||
required=True,
|
||||
readonly=True,
|
||||
)
|
||||
|
||||
approx_timer_resolution = schema.Float(
|
||||
description=u"Floating point number of seconds giving (approximately) the minimum "
|
||||
"resolution of a timer (and hence the minimun value the sleep can sleep for). "
|
||||
"On libuv, this is fixed by the library, but on libev it is just a guess "
|
||||
"and the actual value is system dependent.",
|
||||
required=True,
|
||||
min=0.0,
|
||||
readonly=True,
|
||||
)
|
||||
|
||||
def run(nowait=False, once=False):
|
||||
"""
|
||||
Run the event loop.
|
||||
|
||||
This is usually called automatically by the hub greenlet, but
|
||||
in special cases (when the hub is *not* running) you can use
|
||||
this to control how the event loop runs (for example, to integrate
|
||||
it with another event loop).
|
||||
"""
|
||||
|
||||
def now():
|
||||
"""
|
||||
now() -> float
|
||||
|
||||
Return the loop's notion of the current time.
|
||||
|
||||
This may not necessarily be related to :func:`time.time` (it
|
||||
may have a different starting point), but it must be expressed
|
||||
in fractional seconds (the same *units* used by :func:`time.time`).
|
||||
"""
|
||||
|
||||
def update_now():
|
||||
"""
|
||||
Update the loop's notion of the current time.
|
||||
|
||||
.. versionadded:: 1.3
|
||||
In the past, this available as ``update``. This is still available as
|
||||
an alias but will be removed in the future.
|
||||
"""
|
||||
|
||||
def destroy():
|
||||
"""
|
||||
Clean up resources used by this loop.
|
||||
|
||||
If you create loops
|
||||
(especially loops that are not the default) you *should* call
|
||||
this method when you are done with the loop.
|
||||
|
||||
.. caution::
|
||||
|
||||
As an implementation note, the libev C loop implementation has a
|
||||
finalizer (``__del__``) that destroys the object, but the libuv
|
||||
and libev CFFI implementations do not. The C implementation may change.
|
||||
|
||||
"""
|
||||
|
||||
def io(fd, events, ref=True, priority=None):
|
||||
"""
|
||||
Create and return a new IO watcher for the given *fd*.
|
||||
|
||||
*events* is a bitmask specifying which events to watch
|
||||
for. 1 means read, and 2 means write.
|
||||
"""
|
||||
|
||||
def closing_fd(fd):
|
||||
"""
|
||||
Inform the loop that the file descriptor *fd* is about to be closed.
|
||||
|
||||
The loop may choose to schedule events to be delivered to any active
|
||||
IO watchers for the fd. libev does this so that the active watchers
|
||||
can be closed.
|
||||
|
||||
:return: A boolean value that's true if active IO watchers were
|
||||
queued to run. Closing the FD should be deferred until the next
|
||||
run of the eventloop with a callback.
|
||||
"""
|
||||
|
||||
def timer(after, repeat=0.0, ref=True, priority=None):
|
||||
"""
|
||||
Create and return a timer watcher that will fire after *after* seconds.
|
||||
|
||||
If *repeat* is given, the timer will continue to fire every *repeat* seconds.
|
||||
"""
|
||||
|
||||
def signal(signum, ref=True, priority=None):
|
||||
"""
|
||||
Create and return a signal watcher for the signal *signum*,
|
||||
one of the constants defined in :mod:`signal`.
|
||||
|
||||
This is platform and event loop specific.
|
||||
"""
|
||||
|
||||
def idle(ref=True, priority=None):
|
||||
"""
|
||||
Create and return a watcher that fires when the event loop is idle.
|
||||
"""
|
||||
|
||||
def prepare(ref=True, priority=None):
|
||||
"""
|
||||
Create and return a watcher that fires before the event loop
|
||||
polls for IO.
|
||||
|
||||
.. caution:: This method is not supported by libuv.
|
||||
"""
|
||||
|
||||
def check(ref=True, priority=None):
|
||||
"""
|
||||
Create and return a watcher that fires after the event loop
|
||||
polls for IO.
|
||||
"""
|
||||
|
||||
def fork(ref=True, priority=None):
|
||||
"""
|
||||
Create a watcher that fires when the process forks.
|
||||
|
||||
Availability: Unix.
|
||||
"""
|
||||
|
||||
def async_(ref=True, priority=None):
|
||||
"""
|
||||
Create a watcher that fires when triggered, possibly
|
||||
from another thread.
|
||||
|
||||
.. versionchanged:: 1.3
|
||||
This was previously just named ``async``; for compatibility
|
||||
with Python 3.7 where ``async`` is a keyword it was renamed.
|
||||
On older versions of Python the old name is still around, but
|
||||
it will be removed in the future.
|
||||
"""
|
||||
|
||||
if sys.platform != "win32":
|
||||
|
||||
def child(pid, trace=0, ref=True):
|
||||
"""
|
||||
Create a watcher that fires for events on the child with process ID *pid*.
|
||||
|
||||
This is platform specific and not available on Windows.
|
||||
|
||||
Availability: Unix.
|
||||
"""
|
||||
|
||||
def stat(path, interval=0.0, ref=True, priority=None):
|
||||
"""
|
||||
Create a watcher that monitors the filesystem item at *path*.
|
||||
|
||||
If the operating system doesn't support event notifications
|
||||
from the filesystem, poll for changes every *interval* seconds.
|
||||
"""
|
||||
|
||||
def run_callback(func, *args):
|
||||
"""
|
||||
Run the *func* passing it *args* at the next opportune moment.
|
||||
|
||||
The next opportune moment may be the next iteration of the event loop,
|
||||
the current iteration, or some other time in the future.
|
||||
|
||||
Returns a :class:`ICallback` object. See that documentation for
|
||||
important caveats.
|
||||
|
||||
.. seealso:: :meth:`asyncio.loop.call_soon`
|
||||
The :mod:`asyncio` equivalent.
|
||||
"""
|
||||
|
||||
def run_callback_threadsafe(func, *args):
|
||||
"""
|
||||
Like :meth:`run_callback`, but for use from *outside* the
|
||||
thread that is running this loop.
|
||||
|
||||
This not only schedules the *func* to run, it also causes the
|
||||
loop to notice that the *func* has been scheduled (e.g., it causes
|
||||
the loop to wake up).
|
||||
|
||||
.. versionadded:: 21.1.0
|
||||
|
||||
.. seealso:: :meth:`asyncio.loop.call_soon_threadsafe`
|
||||
The :mod:`asyncio` equivalent.
|
||||
"""
|
||||
|
||||
class IWatcher(Interface):
|
||||
"""
|
||||
An event loop watcher.
|
||||
|
||||
These objects call their *callback* function when the event
|
||||
loop detects the event has happened.
|
||||
|
||||
.. important:: You *must* call :meth:`close` when you are
|
||||
done with this object to avoid leaking native resources.
|
||||
"""
|
||||
|
||||
def start(callback, *args, **kwargs):
|
||||
"""
|
||||
Have the event loop begin watching for this event.
|
||||
|
||||
When the event is detected, *callback* will be called with
|
||||
*args*.
|
||||
|
||||
.. caution::
|
||||
|
||||
Not all watchers accept ``**kwargs``,
|
||||
and some watchers define special meanings for certain keyword args.
|
||||
"""
|
||||
|
||||
def stop():
|
||||
"""
|
||||
Have the event loop stop watching this event.
|
||||
|
||||
In the future you may call :meth:`start` to begin watching
|
||||
again.
|
||||
"""
|
||||
|
||||
def close():
|
||||
"""
|
||||
Dispose of any native resources associated with the watcher.
|
||||
|
||||
If we were active, stop.
|
||||
|
||||
Attempting to operate on this object after calling close is
|
||||
undefined. You should dispose of any references you have to it
|
||||
after calling this method.
|
||||
"""
|
||||
|
||||
class ICallback(Interface):
|
||||
"""
|
||||
Represents a function that will be run some time in the future.
|
||||
|
||||
Callback functions run in the hub, and as such they cannot use
|
||||
gevent's blocking API; any exception they raise cannot be caught.
|
||||
"""
|
||||
|
||||
pending = schema.Bool(description=u"Has this callback run yet?",
|
||||
readonly=True)
|
||||
|
||||
def stop():
|
||||
"""
|
||||
If this object is still `pending`, cause it to
|
||||
no longer be `pending`; the function will not be run.
|
||||
"""
|
||||
|
||||
def close():
|
||||
"""
|
||||
An alias of `stop`.
|
||||
"""
|
||||
314
venv3_12/Lib/site-packages/gevent/_monitor.py
Normal file
314
venv3_12/Lib/site-packages/gevent/_monitor.py
Normal file
@@ -0,0 +1,314 @@
|
||||
# Copyright (c) 2018 gevent. See LICENSE for details.
|
||||
from __future__ import print_function, absolute_import, division
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from weakref import ref as wref
|
||||
|
||||
from greenlet import getcurrent
|
||||
|
||||
from gevent import config as GEVENT_CONFIG
|
||||
from gevent.monkey import get_original
|
||||
from gevent.events import notify
|
||||
from gevent.events import EventLoopBlocked
|
||||
from gevent.events import MemoryUsageThresholdExceeded
|
||||
from gevent.events import MemoryUsageUnderThreshold
|
||||
from gevent.events import IPeriodicMonitorThread
|
||||
from gevent.events import implementer
|
||||
|
||||
from gevent._tracer import GreenletTracer
|
||||
from gevent._compat import thread_mod_name
|
||||
from gevent._compat import perf_counter
|
||||
from gevent._compat import get_this_psutil_process
|
||||
|
||||
|
||||
|
||||
__all__ = [
|
||||
'PeriodicMonitoringThread',
|
||||
]
|
||||
|
||||
get_thread_ident = get_original(thread_mod_name, 'get_ident')
|
||||
start_new_thread = get_original(thread_mod_name, 'start_new_thread')
|
||||
thread_sleep = get_original('time', 'sleep')
|
||||
|
||||
|
||||
|
||||
class MonitorWarning(RuntimeWarning):
|
||||
"""The type of warnings we emit."""
|
||||
|
||||
|
||||
class _MonitorEntry(object):
|
||||
|
||||
__slots__ = ('function', 'period', 'last_run_time')
|
||||
|
||||
def __init__(self, function, period):
|
||||
self.function = function
|
||||
self.period = period
|
||||
self.last_run_time = 0
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.function == other.function and self.period == other.period
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.function, self.period))
|
||||
|
||||
def __repr__(self):
|
||||
return repr((self.function, self.period, self.last_run_time))
|
||||
|
||||
|
||||
@implementer(IPeriodicMonitorThread)
|
||||
class PeriodicMonitoringThread(object):
|
||||
# This doesn't extend threading.Thread because that gets monkey-patched.
|
||||
# We use the low-level 'start_new_thread' primitive instead.
|
||||
|
||||
# The amount of seconds we will sleep when we think we have nothing
|
||||
# to do.
|
||||
inactive_sleep_time = 2.0
|
||||
|
||||
# The absolute minimum we will sleep, regardless of
|
||||
# what particular monitoring functions want to say.
|
||||
min_sleep_time = 0.005
|
||||
|
||||
# The minimum period in seconds at which we will check memory usage.
|
||||
# Getting memory usage is fairly expensive.
|
||||
min_memory_monitor_period = 2
|
||||
|
||||
# A list of _MonitorEntry objects: [(function(hub), period, last_run_time))]
|
||||
# The first entry is always our entry for self.monitor_blocking
|
||||
_monitoring_functions = None
|
||||
|
||||
# The calculated min sleep time for the monitoring functions list.
|
||||
_calculated_sleep_time = None
|
||||
|
||||
# A boolean value that also happens to capture the
|
||||
# memory usage at the time we exceeded the threshold. Reset
|
||||
# to 0 when we go back below.
|
||||
_memory_exceeded = 0
|
||||
|
||||
# The instance of GreenletTracer we're using
|
||||
_greenlet_tracer = None
|
||||
|
||||
def __init__(self, hub):
|
||||
self._hub_wref = wref(hub, self._on_hub_gc)
|
||||
self.should_run = True
|
||||
|
||||
# Must be installed in the thread that the hub is running in;
|
||||
# the trace function is threadlocal
|
||||
assert get_thread_ident() == hub.thread_ident
|
||||
self._greenlet_tracer = GreenletTracer()
|
||||
|
||||
self._monitoring_functions = [_MonitorEntry(self.monitor_blocking,
|
||||
GEVENT_CONFIG.max_blocking_time)]
|
||||
self._calculated_sleep_time = GEVENT_CONFIG.max_blocking_time
|
||||
# Create the actual monitoring thread. This is effectively a "daemon"
|
||||
# thread.
|
||||
self.monitor_thread_ident = start_new_thread(self, ())
|
||||
|
||||
# We must track the PID to know if your thread has died after a fork
|
||||
self.pid = os.getpid()
|
||||
|
||||
def _on_fork(self):
|
||||
# Pseudo-standard method that resolver_ares and threadpool
|
||||
# also have, called by hub.reinit()
|
||||
pid = os.getpid()
|
||||
if pid != self.pid:
|
||||
self.pid = pid
|
||||
self.monitor_thread_ident = start_new_thread(self, ())
|
||||
|
||||
@property
|
||||
def hub(self):
|
||||
return self._hub_wref()
|
||||
|
||||
|
||||
def monitoring_functions(self):
|
||||
# Return a list of _MonitorEntry objects
|
||||
|
||||
# Update max_blocking_time each time.
|
||||
mbt = GEVENT_CONFIG.max_blocking_time # XXX: Events so we know when this changes.
|
||||
if mbt != self._monitoring_functions[0].period:
|
||||
self._monitoring_functions[0].period = mbt
|
||||
self._calculated_sleep_time = min(x.period for x in self._monitoring_functions)
|
||||
return self._monitoring_functions
|
||||
|
||||
def add_monitoring_function(self, function, period):
|
||||
if not callable(function):
|
||||
raise ValueError("function must be callable")
|
||||
|
||||
if period is None:
|
||||
# Remove.
|
||||
self._monitoring_functions = [
|
||||
x for x in self._monitoring_functions
|
||||
if x.function != function
|
||||
]
|
||||
elif period <= 0:
|
||||
raise ValueError("Period must be positive.")
|
||||
else:
|
||||
# Add or update period
|
||||
entry = _MonitorEntry(function, period)
|
||||
self._monitoring_functions = [
|
||||
x if x.function != function else entry
|
||||
for x in self._monitoring_functions
|
||||
]
|
||||
if entry not in self._monitoring_functions:
|
||||
self._monitoring_functions.append(entry)
|
||||
self._calculated_sleep_time = min(x.period for x in self._monitoring_functions)
|
||||
|
||||
def calculate_sleep_time(self):
|
||||
min_sleep = self._calculated_sleep_time
|
||||
if min_sleep <= 0:
|
||||
# Everyone wants to be disabled. Sleep for a longer period of
|
||||
# time than usual so we don't spin unnecessarily. We might be
|
||||
# enabled again in the future.
|
||||
return self.inactive_sleep_time
|
||||
return max((min_sleep, self.min_sleep_time))
|
||||
|
||||
def kill(self):
|
||||
if not self.should_run:
|
||||
# Prevent overwriting trace functions.
|
||||
return
|
||||
# Stop this monitoring thread from running.
|
||||
self.should_run = False
|
||||
# Uninstall our tracing hook
|
||||
self._greenlet_tracer.kill()
|
||||
|
||||
def _on_hub_gc(self, _):
|
||||
self.kill()
|
||||
|
||||
def __call__(self):
|
||||
# The function that runs in the monitoring thread.
|
||||
# We cannot use threading.current_thread because it would
|
||||
# create an immortal DummyThread object.
|
||||
getcurrent().gevent_monitoring_thread = wref(self)
|
||||
|
||||
try:
|
||||
while self.should_run:
|
||||
functions = self.monitoring_functions()
|
||||
assert functions
|
||||
sleep_time = self.calculate_sleep_time()
|
||||
|
||||
thread_sleep(sleep_time)
|
||||
|
||||
# Make sure the hub is still around, and still active,
|
||||
# and keep it around while we are here.
|
||||
hub = self.hub
|
||||
if not hub:
|
||||
self.kill()
|
||||
|
||||
if self.should_run:
|
||||
this_run = perf_counter()
|
||||
for entry in functions:
|
||||
f = entry.function
|
||||
period = entry.period
|
||||
last_run = entry.last_run_time
|
||||
if period and last_run + period <= this_run:
|
||||
entry.last_run_time = this_run
|
||||
f(hub)
|
||||
del hub # break our reference to hub while we sleep
|
||||
|
||||
except SystemExit:
|
||||
pass
|
||||
except: # pylint:disable=bare-except
|
||||
# We're a daemon thread, so swallow any exceptions that get here
|
||||
# during interpreter shutdown.
|
||||
if not sys or not sys.stderr: # pragma: no cover
|
||||
# Interpreter is shutting down
|
||||
pass
|
||||
else:
|
||||
hub = self.hub
|
||||
if hub is not None:
|
||||
# XXX: This tends to do bad things like end the process, because we
|
||||
# try to switch *threads*, which can't happen. Need something better.
|
||||
hub.handle_error(self, *sys.exc_info())
|
||||
|
||||
def monitor_blocking(self, hub):
|
||||
# Called periodically to see if the trace function has
|
||||
# fired to switch greenlets. If not, we will print
|
||||
# the greenlet tree.
|
||||
|
||||
# For tests, we return a true value when we think we found something
|
||||
# blocking
|
||||
|
||||
did_block = self._greenlet_tracer.did_block_hub(hub)
|
||||
if not did_block:
|
||||
return
|
||||
|
||||
active_greenlet = did_block[1] # pylint:disable=unsubscriptable-object
|
||||
report = self._greenlet_tracer.did_block_hub_report(
|
||||
hub, active_greenlet,
|
||||
dict(greenlet_stacks=False, current_thread_ident=self.monitor_thread_ident))
|
||||
|
||||
stream = hub.exception_stream
|
||||
for line in report:
|
||||
# Printing line by line may interleave with other things,
|
||||
# but it should also prevent a "reentrant call to print"
|
||||
# when the report is large.
|
||||
print(line, file=stream)
|
||||
|
||||
notify(EventLoopBlocked(active_greenlet, GEVENT_CONFIG.max_blocking_time, report))
|
||||
return (active_greenlet, report)
|
||||
|
||||
def ignore_current_greenlet_blocking(self):
|
||||
self._greenlet_tracer.ignore_current_greenlet_blocking()
|
||||
|
||||
def monitor_current_greenlet_blocking(self):
|
||||
self._greenlet_tracer.monitor_current_greenlet_blocking()
|
||||
|
||||
def _get_process(self): # pylint:disable=method-hidden
|
||||
proc = get_this_psutil_process()
|
||||
self._get_process = lambda: proc
|
||||
return proc
|
||||
|
||||
def can_monitor_memory_usage(self):
|
||||
return self._get_process() is not None
|
||||
|
||||
def install_monitor_memory_usage(self):
|
||||
# Start monitoring memory usage, if possible.
|
||||
# If not possible, emit a warning.
|
||||
if not self.can_monitor_memory_usage():
|
||||
import warnings
|
||||
warnings.warn("Unable to monitor memory usage. Install psutil.",
|
||||
MonitorWarning)
|
||||
return
|
||||
|
||||
self.add_monitoring_function(self.monitor_memory_usage,
|
||||
max(GEVENT_CONFIG.memory_monitor_period,
|
||||
self.min_memory_monitor_period))
|
||||
|
||||
def monitor_memory_usage(self, _hub):
|
||||
max_allowed = GEVENT_CONFIG.max_memory_usage
|
||||
if not max_allowed:
|
||||
# They disabled it.
|
||||
return -1 # value for tests
|
||||
|
||||
rusage = self._get_process().memory_full_info()
|
||||
# uss only documented available on Windows, Linux, and OS X.
|
||||
# If not available, fall back to rss as an aproximation.
|
||||
mem_usage = getattr(rusage, 'uss', 0) or rusage.rss
|
||||
|
||||
event = None # Return value for tests
|
||||
|
||||
if mem_usage > max_allowed:
|
||||
if mem_usage > self._memory_exceeded:
|
||||
# We're still growing
|
||||
event = MemoryUsageThresholdExceeded(
|
||||
mem_usage, max_allowed, rusage)
|
||||
notify(event)
|
||||
self._memory_exceeded = mem_usage
|
||||
else:
|
||||
# we're below. Were we above it last time?
|
||||
if self._memory_exceeded:
|
||||
event = MemoryUsageUnderThreshold(
|
||||
mem_usage, max_allowed, rusage, self._memory_exceeded)
|
||||
notify(event)
|
||||
self._memory_exceeded = 0
|
||||
|
||||
return event
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s at %s in thread %s greenlet %r for %r>' % (
|
||||
self.__class__.__name__,
|
||||
hex(id(self)),
|
||||
hex(self.monitor_thread_ident),
|
||||
getcurrent(),
|
||||
self._hub_wref())
|
||||
252
venv3_12/Lib/site-packages/gevent/_patcher.py
Normal file
252
venv3_12/Lib/site-packages/gevent/_patcher.py
Normal file
@@ -0,0 +1,252 @@
|
||||
# Copyright 2018 gevent. See LICENSE for details.
|
||||
|
||||
# Portions of the following are inspired by code from eventlet. I
|
||||
# believe they are distinct enough that no eventlet copyright would
|
||||
# apply (they are not a copy or substantial portion of the eventlot
|
||||
# code).
|
||||
|
||||
# Added in gevent 1.3a2. Not public in that release.
|
||||
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
import importlib
|
||||
import sys
|
||||
|
||||
|
||||
from gevent._compat import iteritems
|
||||
from gevent._compat import imp_acquire_lock
|
||||
from gevent._compat import imp_release_lock
|
||||
|
||||
|
||||
from gevent.builtins import __import__ as g_import
|
||||
|
||||
|
||||
MAPPING = {
|
||||
'gevent.local': '_threading_local',
|
||||
'gevent.socket': 'socket',
|
||||
'gevent.select': 'select',
|
||||
'gevent.selectors': 'selectors',
|
||||
'gevent.ssl': 'ssl',
|
||||
'gevent.thread': '_thread',
|
||||
'gevent.subprocess': 'subprocess',
|
||||
'gevent.os': 'os',
|
||||
'gevent.threading': 'threading',
|
||||
'gevent.builtins': 'builtins',
|
||||
'gevent.signal': 'signal',
|
||||
'gevent.time': 'time',
|
||||
'gevent.queue': 'queue',
|
||||
'gevent.contextvars': 'contextvars',
|
||||
}
|
||||
|
||||
OPTIONAL_STDLIB_MODULES = frozenset()
|
||||
_PATCH_PREFIX = '__g_patched_module_'
|
||||
|
||||
def _collect_stdlib_gevent_modules():
|
||||
"""
|
||||
Return a map from standard library name to
|
||||
imported gevent module that provides the same API.
|
||||
|
||||
Optional modules are skipped if they cannot be imported.
|
||||
"""
|
||||
result = {}
|
||||
|
||||
for gevent_name, stdlib_name in iteritems(MAPPING):
|
||||
try:
|
||||
result[stdlib_name] = importlib.import_module(gevent_name)
|
||||
except ImportError:
|
||||
if stdlib_name in OPTIONAL_STDLIB_MODULES:
|
||||
continue
|
||||
raise
|
||||
return result
|
||||
|
||||
|
||||
class _SysModulesPatcher(object):
|
||||
|
||||
def __init__(self, importing, extra_all=lambda mod_name: ()):
|
||||
# Permanent state.
|
||||
self.extra_all = extra_all
|
||||
self.importing = importing
|
||||
# green modules, replacing regularly imported modules.
|
||||
# This begins as the gevent list of modules, and
|
||||
# then gets extended with green things from the tree we import.
|
||||
self._green_modules = _collect_stdlib_gevent_modules()
|
||||
|
||||
## Transient, reset each time we're called.
|
||||
# The set of things imported before we began.
|
||||
self._t_modules_to_restore = {}
|
||||
|
||||
def _save(self):
|
||||
self._t_modules_to_restore = {}
|
||||
|
||||
# Copy all the things we know we are going to overwrite.
|
||||
for modname in self._green_modules:
|
||||
self._t_modules_to_restore[modname] = sys.modules.get(modname, None)
|
||||
|
||||
# Copy anything else in the import tree.
|
||||
for modname, mod in list(iteritems(sys.modules)):
|
||||
if modname.startswith(self.importing):
|
||||
self._t_modules_to_restore[modname] = mod
|
||||
# And remove it. If it had been imported green, it will
|
||||
# be put right back. Otherwise, it was imported "manually"
|
||||
# outside this process and isn't green.
|
||||
del sys.modules[modname]
|
||||
|
||||
# Cover the target modules so that when you import the module it
|
||||
# sees only the patched versions
|
||||
for name, mod in iteritems(self._green_modules):
|
||||
sys.modules[name] = mod
|
||||
|
||||
def _restore(self):
|
||||
# Anything from the same package tree we imported this time
|
||||
# needs to be saved so we can restore it later, and so it doesn't
|
||||
# leak into the namespace.
|
||||
|
||||
for modname, mod in list(iteritems(sys.modules)):
|
||||
if modname.startswith(self.importing):
|
||||
self._green_modules[modname] = mod
|
||||
del sys.modules[modname]
|
||||
|
||||
# Now, what we saved at the beginning needs to be restored.
|
||||
for modname, mod in iteritems(self._t_modules_to_restore):
|
||||
if mod is not None:
|
||||
sys.modules[modname] = mod
|
||||
else:
|
||||
try:
|
||||
del sys.modules[modname]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
try:
|
||||
self._restore()
|
||||
finally:
|
||||
imp_release_lock()
|
||||
self._t_modules_to_restore = None
|
||||
|
||||
|
||||
def __enter__(self):
|
||||
imp_acquire_lock()
|
||||
self._save()
|
||||
return self
|
||||
|
||||
module = None
|
||||
|
||||
def __call__(self, after_import_hook):
|
||||
if self.module is None:
|
||||
with self:
|
||||
self.module = self.import_one(self.importing, after_import_hook)
|
||||
# Circular reference. Someone must keep a reference to this module alive
|
||||
# for it to be visible. We record it in sys.modules to be that someone, and
|
||||
# to aid debugging. In the past, we worked with multiple completely separate
|
||||
# invocations of `import_patched`, but we no longer do.
|
||||
self.module.__gevent_patcher__ = self
|
||||
sys.modules[_PATCH_PREFIX + self.importing] = self.module
|
||||
return self
|
||||
|
||||
def import_one(self, module_name, after_import_hook):
|
||||
patched_name = _PATCH_PREFIX + module_name
|
||||
if patched_name in sys.modules:
|
||||
return sys.modules[patched_name]
|
||||
|
||||
assert module_name.startswith(self.importing)
|
||||
sys.modules.pop(module_name, None)
|
||||
|
||||
module = g_import(module_name, {}, {}, module_name.split('.')[:-1])
|
||||
self.module = module
|
||||
# On Python 3, we could probably do something much nicer with the
|
||||
# import machinery? Set the __loader__ or __finder__ or something like that?
|
||||
self._import_all([module])
|
||||
after_import_hook(module)
|
||||
return module
|
||||
|
||||
def _import_all(self, queue):
|
||||
# Called while monitoring for patch changes.
|
||||
while queue:
|
||||
module = queue.pop(0)
|
||||
name = module.__name__
|
||||
mod_all = tuple(getattr(module, '__all__', ())) + self.extra_all(name)
|
||||
for attr_name in mod_all:
|
||||
try:
|
||||
getattr(module, attr_name)
|
||||
except AttributeError:
|
||||
module_name = module.__name__ + '.' + attr_name
|
||||
new_module = g_import(module_name, {}, {}, attr_name)
|
||||
setattr(module, attr_name, new_module)
|
||||
queue.append(new_module)
|
||||
|
||||
|
||||
def import_patched(module_name,
|
||||
extra_all=lambda mod_name: (),
|
||||
after_import_hook=lambda module: None):
|
||||
"""
|
||||
Import *module_name* with gevent monkey-patches active,
|
||||
and return an object holding the greened module as *module*.
|
||||
|
||||
Any sub-modules that were imported by the package are also
|
||||
saved.
|
||||
|
||||
.. versionchanged:: 1.5a4
|
||||
If the module defines ``__all__``, then each of those
|
||||
attributes/modules is also imported as part of the same transaction,
|
||||
recursively. The order of ``__all__`` is respected. Anything passed in
|
||||
*extra_all* (which must be in the same namespace tree) is also imported.
|
||||
|
||||
.. versionchanged:: 1.5a4
|
||||
You must now do all patching for a given module tree
|
||||
with one call to this method, or at least by using the returned
|
||||
object.
|
||||
"""
|
||||
|
||||
with cached_platform_architecture():
|
||||
# Save the current module state, and restore on exit,
|
||||
# capturing desirable changes in the modules package.
|
||||
patcher = _SysModulesPatcher(module_name, extra_all)
|
||||
patcher(after_import_hook)
|
||||
return patcher
|
||||
|
||||
|
||||
class cached_platform_architecture(object):
|
||||
"""
|
||||
Context manager that caches ``platform.architecture``.
|
||||
|
||||
Some things that load shared libraries (like Cryptodome, via
|
||||
dnspython) invoke ``platform.architecture()`` for each one. That
|
||||
in turn wants to fork and run commands , which in turn wants to
|
||||
call ``threading._after_fork`` if the GIL has been initialized.
|
||||
All of that means that certain imports done early may wind up
|
||||
wanting to have the hub initialized potentially much earlier than
|
||||
before.
|
||||
|
||||
Part of the fix is to observe when that happens and delay
|
||||
initializing parts of gevent until as late as possible (e.g., we
|
||||
delay importing and creating the resolver until the hub needs it,
|
||||
unless explicitly configured).
|
||||
|
||||
The rest of the fix is to avoid the ``_after_fork`` issues by
|
||||
first caching the results of platform.architecture before doing
|
||||
patched imports.
|
||||
|
||||
(See events.py for similar issues with platform, and
|
||||
test__threading_2.py for notes about threading._after_fork if the
|
||||
GIL has been initialized)
|
||||
"""
|
||||
|
||||
_arch_result = None
|
||||
_orig_arch = None
|
||||
_platform = None
|
||||
|
||||
def __enter__(self):
|
||||
import platform
|
||||
self._platform = platform
|
||||
self._arch_result = platform.architecture()
|
||||
self._orig_arch = platform.architecture
|
||||
def arch(*args, **kwargs):
|
||||
if not args and not kwargs:
|
||||
return self._arch_result
|
||||
return self._orig_arch(*args, **kwargs)
|
||||
platform.architecture = arch
|
||||
return self
|
||||
|
||||
def __exit__(self, *_args):
|
||||
self._platform.architecture = self._orig_arch
|
||||
self._platform = None
|
||||
527
venv3_12/Lib/site-packages/gevent/_semaphore.py
Normal file
527
venv3_12/Lib/site-packages/gevent/_semaphore.py
Normal file
@@ -0,0 +1,527 @@
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
###
|
||||
# This file is ``gevent._semaphore`` so that it can be compiled by Cython
|
||||
# individually. However, this is not the place to import from. Everyone,
|
||||
# gevent internal code included, must import from ``gevent.lock``.
|
||||
# The only exception are .pxd files which need access to the
|
||||
# C code; the PURE_PYTHON things that have to happen and which are
|
||||
# handled in ``gevent.lock``, do not apply to them.
|
||||
###
|
||||
from __future__ import print_function, absolute_import, division
|
||||
|
||||
__all__ = [
|
||||
'Semaphore',
|
||||
'BoundedSemaphore',
|
||||
]
|
||||
|
||||
from time import sleep as _native_sleep
|
||||
|
||||
from gevent._compat import monotonic
|
||||
from gevent.exceptions import InvalidThreadUseError
|
||||
from gevent.exceptions import LoopExit
|
||||
from gevent.timeout import Timeout
|
||||
|
||||
def _get_linkable():
|
||||
x = __import__('gevent._abstract_linkable')
|
||||
return x._abstract_linkable.AbstractLinkable
|
||||
locals()['AbstractLinkable'] = _get_linkable()
|
||||
del _get_linkable
|
||||
|
||||
from gevent._hub_local import get_hub_if_exists
|
||||
from gevent._hub_local import get_hub
|
||||
from gevent.hub import spawn_raw
|
||||
|
||||
class _LockReleaseLink(object):
|
||||
__slots__ = (
|
||||
'lock',
|
||||
)
|
||||
|
||||
def __init__(self, lock):
|
||||
self.lock = lock
|
||||
|
||||
def __call__(self, _):
|
||||
self.lock.release()
|
||||
|
||||
_UNSET = object()
|
||||
_MULTI = object()
|
||||
|
||||
class Semaphore(AbstractLinkable): # pylint:disable=undefined-variable
|
||||
"""
|
||||
Semaphore(value=1) -> Semaphore
|
||||
|
||||
.. seealso:: :class:`BoundedSemaphore` for a safer version that prevents
|
||||
some classes of bugs. If unsure, most users should opt for `BoundedSemaphore`.
|
||||
|
||||
A semaphore manages a counter representing the number of `release`
|
||||
calls minus the number of `acquire` calls, plus an initial value.
|
||||
The `acquire` method blocks if necessary until it can return
|
||||
without making the counter negative. A semaphore does not track ownership
|
||||
by greenlets; any greenlet can call `release`, whether or not it has previously
|
||||
called `acquire`.
|
||||
|
||||
If not given, ``value`` defaults to 1.
|
||||
|
||||
The semaphore is a context manager and can be used in ``with`` statements.
|
||||
|
||||
This Semaphore's ``__exit__`` method does not call the trace function
|
||||
on CPython, but does under PyPy.
|
||||
|
||||
.. versionchanged:: 1.4.0
|
||||
Document that the order in which waiters are awakened is not specified. It was not
|
||||
specified previously, but due to CPython implementation quirks usually went in FIFO order.
|
||||
.. versionchanged:: 1.5a3
|
||||
Waiting greenlets are now awakened in the order in which they waited.
|
||||
.. versionchanged:: 1.5a3
|
||||
The low-level ``rawlink`` method (most users won't use this) now automatically
|
||||
unlinks waiters before calling them.
|
||||
.. versionchanged:: 20.12.0
|
||||
Improved support for multi-threaded usage. When multi-threaded usage is detected,
|
||||
instances will no longer create the thread's hub if it's not present.
|
||||
|
||||
.. versionchanged:: 24.2.1
|
||||
Uses Python 3 native lock timeouts for cross-thread operations instead
|
||||
of spinning.
|
||||
"""
|
||||
|
||||
__slots__ = (
|
||||
'counter',
|
||||
# long integer, signed (Py2) or unsigned (Py3); see comments
|
||||
# in the .pxd file for why we store as Python object. Set to ``_UNSET``
|
||||
# initially. Set to the ident of the first thread that
|
||||
# acquires us. If we later see a different thread ident, set
|
||||
# to ``_MULTI``.
|
||||
'_multithreaded',
|
||||
)
|
||||
|
||||
def __init__(self, value=1, hub=None):
|
||||
self.counter = value
|
||||
if self.counter < 0: # Do the check after Cython native int conversion
|
||||
raise ValueError("semaphore initial value must be >= 0")
|
||||
super(Semaphore, self).__init__(hub)
|
||||
self._notify_all = False
|
||||
self._multithreaded = _UNSET
|
||||
|
||||
def __str__(self):
|
||||
return '<%s at 0x%x counter=%s _links[%s]>' % (
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
self.counter,
|
||||
self.linkcount()
|
||||
)
|
||||
|
||||
def locked(self):
|
||||
"""
|
||||
Return a boolean indicating whether the semaphore can be
|
||||
acquired (`False` if the semaphore *can* be acquired). Most
|
||||
useful with binary semaphores (those with an initial value of 1).
|
||||
|
||||
:rtype: bool
|
||||
"""
|
||||
return self.counter <= 0
|
||||
|
||||
def release(self):
|
||||
"""
|
||||
Release the semaphore, notifying any waiters if needed. There
|
||||
is no return value.
|
||||
|
||||
.. note::
|
||||
|
||||
This can be used to over-release the semaphore.
|
||||
(Release more times than it has been acquired or was initially
|
||||
created with.)
|
||||
|
||||
This is usually a sign of a bug, but under some circumstances it can be
|
||||
used deliberately, for example, to model the arrival of additional
|
||||
resources.
|
||||
|
||||
:rtype: None
|
||||
"""
|
||||
self.counter += 1
|
||||
self._check_and_notify()
|
||||
return self.counter
|
||||
|
||||
def ready(self):
|
||||
"""
|
||||
Return a boolean indicating whether the semaphore can be
|
||||
acquired (`True` if the semaphore can be acquired).
|
||||
|
||||
:rtype: bool
|
||||
"""
|
||||
return self.counter > 0
|
||||
|
||||
def _start_notify(self):
|
||||
self._check_and_notify()
|
||||
|
||||
def _wait_return_value(self, waited, wait_success):
|
||||
if waited:
|
||||
return wait_success
|
||||
# We didn't even wait, we must be good to go.
|
||||
# XXX: This is probably dead code, we're careful not to go into the wait
|
||||
# state if we don't expect to need to
|
||||
return True
|
||||
|
||||
def wait(self, timeout=None):
|
||||
"""
|
||||
Wait until it is possible to acquire this semaphore, or until the optional
|
||||
*timeout* elapses.
|
||||
|
||||
.. note:: If this semaphore was initialized with a *value* of 0,
|
||||
this method will block forever if no timeout is given.
|
||||
|
||||
:keyword float timeout: If given, specifies the maximum amount of seconds
|
||||
this method will block.
|
||||
:return: A number indicating how many times the semaphore can be acquired
|
||||
before blocking. *This could be 0,* if other waiters acquired
|
||||
the semaphore.
|
||||
:rtype: int
|
||||
"""
|
||||
if self.counter > 0:
|
||||
return self.counter
|
||||
|
||||
self._wait(timeout) # return value irrelevant, whether we got it or got a timeout
|
||||
return self.counter
|
||||
|
||||
def acquire(self, blocking=True, timeout=None):
|
||||
"""
|
||||
acquire(blocking=True, timeout=None) -> bool
|
||||
|
||||
Acquire the semaphore.
|
||||
|
||||
.. note:: If this semaphore was initialized with a *value* of 0,
|
||||
this method will block forever (unless a timeout is given or blocking is
|
||||
set to false).
|
||||
|
||||
:keyword bool blocking: If True (the default), this function will block
|
||||
until the semaphore is acquired.
|
||||
:keyword float timeout: If given, and *blocking* is true,
|
||||
specifies the maximum amount of seconds
|
||||
this method will block.
|
||||
:return: A `bool` indicating whether the semaphore was acquired.
|
||||
If ``blocking`` is True and ``timeout`` is None (the default), then
|
||||
(so long as this semaphore was initialized with a size greater than 0)
|
||||
this will always return True. If a timeout was given, and it expired before
|
||||
the semaphore was acquired, False will be returned. (Note that this can still
|
||||
raise a ``Timeout`` exception, if some other caller had already started a timer.)
|
||||
"""
|
||||
# pylint:disable=too-many-return-statements,too-many-branches
|
||||
# Sadly, the body of this method is rather complicated.
|
||||
if self._multithreaded is _UNSET:
|
||||
self._multithreaded = self._get_thread_ident()
|
||||
elif self._multithreaded != self._get_thread_ident():
|
||||
self._multithreaded = _MULTI
|
||||
|
||||
# We conceptually now belong to the hub of the thread that
|
||||
# called this, whether or not we have to block. Note that we
|
||||
# cannot force it to be created yet, because Semaphore is used
|
||||
# by importlib.ModuleLock which is used when importing the hub
|
||||
# itself! This also checks for cross-thread issues.
|
||||
invalid_thread_use = None
|
||||
try:
|
||||
self._capture_hub(False)
|
||||
except InvalidThreadUseError as e:
|
||||
# My hub belongs to some other thread. We didn't release the GIL/object lock
|
||||
# by raising the exception, so we know this is still true.
|
||||
invalid_thread_use = e.args
|
||||
e = None
|
||||
if not self.counter and blocking:
|
||||
# We would need to block. So coordinate with the main hub.
|
||||
return self.__acquire_from_other_thread(invalid_thread_use, blocking, timeout)
|
||||
|
||||
if self.counter > 0:
|
||||
self.counter -= 1
|
||||
return True
|
||||
|
||||
if not blocking:
|
||||
return False
|
||||
|
||||
if self._multithreaded is not _MULTI and self.hub is None: # pylint:disable=access-member-before-definition
|
||||
self.hub = get_hub() # pylint:disable=attribute-defined-outside-init
|
||||
|
||||
if self.hub is None and not invalid_thread_use:
|
||||
# Someone else is holding us. There's not a hub here,
|
||||
# nor is there a hub in that thread. We'll need to use regular locks.
|
||||
# This will be unfair to yet a third thread that tries to use us with greenlets.
|
||||
return self.__acquire_from_other_thread(
|
||||
(None, None, self._getcurrent(), "NoHubs"),
|
||||
blocking,
|
||||
timeout
|
||||
)
|
||||
|
||||
# self._wait may drop both the GIL and the _lock_lock.
|
||||
# By the time we regain control, both have been reacquired.
|
||||
try:
|
||||
success = self._wait(timeout)
|
||||
except LoopExit as ex:
|
||||
args = ex.args
|
||||
ex = None
|
||||
if self.counter:
|
||||
success = True
|
||||
else:
|
||||
# Avoid using ex.hub property to keep holding the GIL
|
||||
if len(args) == 3 and args[1].main_hub:
|
||||
# The main hub, meaning the main thread. We probably can do nothing with this.
|
||||
raise
|
||||
return self.__acquire_from_other_thread(
|
||||
(self.hub, get_hub_if_exists(), self._getcurrent(), "LoopExit"),
|
||||
blocking,
|
||||
timeout)
|
||||
|
||||
if not success:
|
||||
assert timeout is not None
|
||||
# Our timer expired.
|
||||
return False
|
||||
|
||||
# Neither our timer or another one expired, so we blocked until
|
||||
# awoke. Therefore, the counter is ours
|
||||
assert self.counter > 0, (self.counter, blocking, timeout, success,)
|
||||
self.counter -= 1
|
||||
return True
|
||||
|
||||
_py3k_acquire = acquire # PyPy needs this; it must be static for Cython
|
||||
|
||||
def __enter__(self):
|
||||
self.acquire()
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
self.release()
|
||||
|
||||
def _handle_unswitched_notifications(self, unswitched):
|
||||
# If we fail to switch to a greenlet in another thread to send
|
||||
# a notification, just re-queue it, in the hopes that the
|
||||
# other thread will eventually run notifications itself.
|
||||
#
|
||||
# We CANNOT do what the ``super()`` does and actually allow
|
||||
# this notification to get run sometime in the future by
|
||||
# scheduling a callback in the other thread. The algorithm
|
||||
# that we use to handle cross-thread locking/unlocking was
|
||||
# designed before the schedule-a-callback mechanism was
|
||||
# implemented. If we allow this to be run as a callback, we
|
||||
# can find ourself the victim of ``InvalidSwitchError`` (or
|
||||
# worse, silent corruption) because the switch can come at an
|
||||
# unexpected time: *after* the destination thread has already
|
||||
# acquired the lock.
|
||||
#
|
||||
# This manifests in a fairly reliable test failure,
|
||||
# ``gevent.tests.test__semaphore``
|
||||
# ``TestSemaphoreMultiThread.test_dueling_threads_with_hub``,
|
||||
# but ONLY when running in PURE_PYTHON mode.
|
||||
#
|
||||
# TODO: Maybe we can rewrite that part of the algorithm to be friendly to
|
||||
# running the callbacks?
|
||||
self._links.extend(unswitched)
|
||||
|
||||
def __add_link(self, link):
|
||||
if not self._notifier:
|
||||
self.rawlink(link)
|
||||
else:
|
||||
self._notifier.args[0].append(link)
|
||||
|
||||
def __acquire_from_other_thread(self, ex_args, blocking, timeout):
|
||||
assert blocking
|
||||
# Some other hub owns this object. We must ask it to wake us
|
||||
# up. In general, we can't use a Python-level ``Lock`` because
|
||||
#
|
||||
# (1) it doesn't support a timeout on all platforms; and
|
||||
# (2) we don't want to block this hub from running.
|
||||
#
|
||||
# So we need to do so in a way that cooperates with *two*
|
||||
# hubs. That's what an async watcher is built for.
|
||||
#
|
||||
# Of course, if we don't actually have two hubs, then we must find some other
|
||||
# solution. That involves using a lock.
|
||||
|
||||
# We have to take an action that drops the GIL and drops the object lock
|
||||
# to allow the main thread (the thread for our hub) to advance.
|
||||
owning_hub = ex_args[0]
|
||||
hub_for_this_thread = ex_args[1]
|
||||
current_greenlet = ex_args[2]
|
||||
|
||||
if owning_hub is None and hub_for_this_thread is None:
|
||||
return self.__acquire_without_hubs(timeout)
|
||||
|
||||
if hub_for_this_thread is None:
|
||||
# Probably a background worker thread. We don't want to create
|
||||
# the hub if not needed, and since it didn't exist there are no
|
||||
# other greenlets that we could yield to anyway, so there's nothing
|
||||
# to block and no reason to try to avoid blocking, so using a native
|
||||
# lock is the simplest way to go.
|
||||
return self.__acquire_using_other_hub(owning_hub, timeout)
|
||||
|
||||
# We have a hub we don't want to block. Use an async watcher
|
||||
# and ask the next releaser of this object to wake us up.
|
||||
return self.__acquire_using_two_hubs(hub_for_this_thread,
|
||||
current_greenlet,
|
||||
timeout)
|
||||
|
||||
def __acquire_using_two_hubs(self,
|
||||
hub_for_this_thread,
|
||||
current_greenlet,
|
||||
timeout):
|
||||
# Allocating and starting the watcher *could* release the GIL.
|
||||
# with the libev corcext, allocating won't, but starting briefly will.
|
||||
# With other backends, allocating might, and starting might also.
|
||||
# So...
|
||||
watcher = hub_for_this_thread.loop.async_()
|
||||
send = watcher.send_ignoring_arg
|
||||
watcher.start(current_greenlet.switch, self)
|
||||
try:
|
||||
with Timeout._start_new_or_dummy(timeout) as timer:
|
||||
# ... now that we're back holding the GIL, we need to verify our
|
||||
# state.
|
||||
try:
|
||||
while 1:
|
||||
if self.counter > 0:
|
||||
self.counter -= 1
|
||||
assert self.counter >= 0, (self,)
|
||||
return True
|
||||
|
||||
self.__add_link(send)
|
||||
|
||||
# Releases the object lock
|
||||
self._switch_to_hub(hub_for_this_thread)
|
||||
# We waited and got notified. We should be ready now, so a non-blocking
|
||||
# acquire() should succeed. But sometimes we get spurious notifications?
|
||||
# It's not entirely clear how. So we need to loop until we get it, or until
|
||||
# the timer expires
|
||||
result = self.acquire(0)
|
||||
if result:
|
||||
return result
|
||||
except Timeout as tex:
|
||||
if tex is not timer:
|
||||
raise
|
||||
return False
|
||||
finally:
|
||||
self._quiet_unlink_all(send)
|
||||
watcher.stop()
|
||||
watcher.close()
|
||||
|
||||
def __acquire_from_other_thread_cb(self, results, blocking, timeout, thread_lock):
|
||||
try:
|
||||
result = self.acquire(blocking, timeout)
|
||||
results.append(result)
|
||||
finally:
|
||||
thread_lock.release()
|
||||
return result
|
||||
|
||||
def __acquire_using_other_hub(self, owning_hub, timeout):
|
||||
assert owning_hub is not get_hub_if_exists()
|
||||
thread_lock = self._allocate_lock()
|
||||
thread_lock.acquire()
|
||||
results = []
|
||||
|
||||
owning_hub.loop.run_callback_threadsafe(
|
||||
spawn_raw,
|
||||
self.__acquire_from_other_thread_cb,
|
||||
results,
|
||||
1, # blocking,
|
||||
timeout, # timeout,
|
||||
thread_lock)
|
||||
|
||||
# We MUST use a blocking acquire here, or at least be sure we keep going
|
||||
# until we acquire it. If we timed out waiting here,
|
||||
# just before the callback runs, then we would be out of sync.
|
||||
self.__spin_on_native_lock(thread_lock, None)
|
||||
return results[0]
|
||||
|
||||
def __acquire_without_hubs(self, timeout):
|
||||
thread_lock = self._allocate_lock()
|
||||
thread_lock.acquire()
|
||||
absolute_expiration = 0
|
||||
begin = 0
|
||||
if timeout:
|
||||
absolute_expiration = monotonic() + timeout
|
||||
|
||||
# Cython won't compile a lambda here
|
||||
link = _LockReleaseLink(thread_lock)
|
||||
while 1:
|
||||
self.__add_link(link)
|
||||
if absolute_expiration:
|
||||
begin = monotonic()
|
||||
|
||||
got_native = self.__spin_on_native_lock(thread_lock, timeout)
|
||||
self._quiet_unlink_all(link)
|
||||
if got_native:
|
||||
if self.acquire(0):
|
||||
return True
|
||||
if absolute_expiration:
|
||||
now = monotonic()
|
||||
if now >= absolute_expiration:
|
||||
return False
|
||||
duration = now - begin
|
||||
timeout -= duration
|
||||
if timeout <= 0:
|
||||
return False
|
||||
|
||||
def __spin_on_native_lock(self, thread_lock, timeout):
|
||||
self._drop_lock_for_switch_out()
|
||||
try:
|
||||
# Unlike Python 2, Python 3 thread locks
|
||||
# can be interrupted when blocking, with or
|
||||
# without a timeout. Python 2 didn't even
|
||||
# support a timeout for non -blocking.
|
||||
if timeout:
|
||||
return thread_lock.acquire(True, timeout)
|
||||
|
||||
return thread_lock.acquire()
|
||||
finally:
|
||||
self._acquire_lock_for_switch_in()
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
"""
|
||||
BoundedSemaphore(value=1) -> BoundedSemaphore
|
||||
|
||||
A bounded semaphore checks to make sure its current value doesn't
|
||||
exceed its initial value. If it does, :class:`ValueError` is
|
||||
raised. In most situations semaphores are used to guard resources
|
||||
with limited capacity. If the semaphore is released too many times
|
||||
it's a sign of a bug.
|
||||
|
||||
If not given, *value* defaults to 1.
|
||||
"""
|
||||
|
||||
__slots__ = (
|
||||
'_initial_value',
|
||||
)
|
||||
|
||||
#: For monkey-patching, allow changing the class of error we raise
|
||||
_OVER_RELEASE_ERROR = ValueError
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
Semaphore.__init__(self, *args, **kwargs)
|
||||
self._initial_value = self.counter
|
||||
|
||||
def release(self):
|
||||
"""
|
||||
Like :meth:`Semaphore.release`, but raises :class:`ValueError`
|
||||
if the semaphore is being over-released.
|
||||
"""
|
||||
if self.counter >= self._initial_value:
|
||||
raise self._OVER_RELEASE_ERROR("Semaphore released too many times")
|
||||
counter = Semaphore.release(self)
|
||||
# When we are absolutely certain that no one holds this semaphore,
|
||||
# release our hub and go back to floating. This assists in cross-thread
|
||||
# uses.
|
||||
if counter == self._initial_value:
|
||||
self.hub = None # pylint:disable=attribute-defined-outside-init
|
||||
return counter
|
||||
|
||||
def _at_fork_reinit(self):
|
||||
super(BoundedSemaphore, self)._at_fork_reinit()
|
||||
self.counter = self._initial_value
|
||||
|
||||
|
||||
# By building the semaphore with Cython under PyPy, we get
|
||||
# atomic operations (specifically, exiting/releasing), at the
|
||||
# cost of some speed (one trivial semaphore micro-benchmark put the pure-python version
|
||||
# at around 1s and the compiled version at around 4s). Some clever subclassing
|
||||
# and having only the bare minimum be in cython might help reduce that penalty.
|
||||
# NOTE: You must use version 0.23.4 or later to avoid a memory leak.
|
||||
# https://mail.python.org/pipermail/cython-devel/2015-October/004571.html
|
||||
# However, that's all for naught on up to and including PyPy 4.0.1 which
|
||||
# have some serious crashing bugs with GC interacting with cython.
|
||||
# It hasn't been tested since then, and PURE_PYTHON is assumed to be true
|
||||
# for PyPy in all cases anyway, so this does nothing.
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__semaphore')
|
||||
648
venv3_12/Lib/site-packages/gevent/_socket3.py
Normal file
648
venv3_12/Lib/site-packages/gevent/_socket3.py
Normal file
@@ -0,0 +1,648 @@
|
||||
# Port of Python 3.3's socket module to gevent
|
||||
"""
|
||||
Python 3 socket module.
|
||||
"""
|
||||
# Our import magic sadly makes this warning useless
|
||||
# pylint: disable=undefined-variable
|
||||
# pylint: disable=too-many-statements,too-many-branches
|
||||
# pylint: disable=too-many-public-methods,unused-argument
|
||||
from __future__ import absolute_import
|
||||
import io
|
||||
import os
|
||||
|
||||
|
||||
from gevent import _socketcommon
|
||||
from gevent._util import copy_globals
|
||||
from gevent._compat import PYPY
|
||||
import _socket
|
||||
from os import dup
|
||||
|
||||
|
||||
copy_globals(_socketcommon, globals(),
|
||||
names_to_ignore=_socketcommon.__extensions__,
|
||||
dunder_names_to_keep=())
|
||||
|
||||
|
||||
__socket__ = _socketcommon.__socket__
|
||||
__implements__ = _socketcommon._implements
|
||||
__extensions__ = _socketcommon.__extensions__
|
||||
__imports__ = _socketcommon.__imports__
|
||||
__dns__ = _socketcommon.__dns__
|
||||
|
||||
|
||||
SocketIO = __socket__.SocketIO # pylint:disable=no-member
|
||||
|
||||
|
||||
class _closedsocket(object):
|
||||
__slots__ = ('family', 'type', 'proto', 'orig_fileno', 'description')
|
||||
|
||||
def __init__(self, family, type, proto, orig_fileno, description):
|
||||
self.family = family
|
||||
self.type = type
|
||||
self.proto = proto
|
||||
self.orig_fileno = orig_fileno
|
||||
self.description = description
|
||||
|
||||
def fileno(self):
|
||||
return -1
|
||||
|
||||
def close(self):
|
||||
"No-op"
|
||||
|
||||
detach = fileno
|
||||
|
||||
def _dummy(*args, **kwargs): # pylint:disable=no-method-argument,unused-argument,no-self-argument
|
||||
raise OSError(EBADF, 'Bad file descriptor')
|
||||
# All _delegate_methods must also be initialized here.
|
||||
send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
|
||||
getsockname = _dummy
|
||||
|
||||
def __bool__(self):
|
||||
return False
|
||||
|
||||
__getattr__ = _dummy
|
||||
|
||||
def __repr__(self):
|
||||
return "<socket object [closed proxy at 0x%x fd=%s %s]>" % (
|
||||
id(self),
|
||||
self.orig_fileno,
|
||||
self.description,
|
||||
)
|
||||
|
||||
class _wrefsocket(_socket.socket):
|
||||
# Plain stdlib socket.socket objects subclass _socket.socket
|
||||
# and add weakref ability. The ssl module, for one, counts on this.
|
||||
# We don't create socket.socket objects (because they may have been
|
||||
# monkey patched to be the object from this module), but we still
|
||||
# need to make sure what we do create can be weakrefd.
|
||||
|
||||
__slots__ = ("__weakref__", )
|
||||
|
||||
if PYPY:
|
||||
# server.py unwraps the socket object to get the raw _sock;
|
||||
# it depends on having a timeout property alias, which PyPy does not
|
||||
# provide.
|
||||
timeout = property(lambda s: s.gettimeout(),
|
||||
lambda s, nv: s.settimeout(nv))
|
||||
|
||||
|
||||
class socket(_socketcommon.SocketMixin):
|
||||
"""
|
||||
gevent `socket.socket <https://docs.python.org/3/library/socket.html#socket-objects>`_
|
||||
for Python 3.
|
||||
|
||||
This object should have the same API as the standard library socket linked to above. Not all
|
||||
methods are specifically documented here; when they are they may point out a difference
|
||||
to be aware of or may document a method the standard library does not.
|
||||
"""
|
||||
|
||||
# Subclasses can set this to customize the type of the
|
||||
# native _socket.socket we create. It MUST be a subclass
|
||||
# of _wrefsocket. (gevent internal usage only)
|
||||
_gevent_sock_class = _wrefsocket
|
||||
|
||||
__slots__ = (
|
||||
'_io_refs',
|
||||
'_closed',
|
||||
)
|
||||
|
||||
# Take the same approach as socket2: wrap a real socket object,
|
||||
# don't subclass it. This lets code that needs the raw _sock (not tied to the hub)
|
||||
# get it. This shows up in tests like test__example_udp_server.
|
||||
|
||||
# In 3.7, socket changed to auto-detecting family, type, and proto
|
||||
# when given a fileno.
|
||||
def __init__(self, family=-1, type=-1, proto=-1, fileno=None):
|
||||
super().__init__()
|
||||
self._closed = False
|
||||
if fileno is None:
|
||||
if family == -1:
|
||||
family = AddressFamily.AF_INET
|
||||
if type == -1:
|
||||
type = SOCK_STREAM
|
||||
if proto == -1:
|
||||
proto = 0
|
||||
self._sock = self._gevent_sock_class(family, type, proto, fileno)
|
||||
self.timeout = None
|
||||
|
||||
self._io_refs = 0
|
||||
_socket.socket.setblocking(self._sock, False)
|
||||
fileno = _socket.socket.fileno(self._sock)
|
||||
self.hub = get_hub()
|
||||
io_class = self.hub.loop.io
|
||||
self._read_event = io_class(fileno, 1)
|
||||
self._write_event = io_class(fileno, 2)
|
||||
self.timeout = _socket.getdefaulttimeout()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._sock, name)
|
||||
|
||||
def _accept(self):
|
||||
# Python 3.11 started checking for this method on the class object,
|
||||
# so we need to explicitly delegate.
|
||||
return self._sock._accept()
|
||||
|
||||
if hasattr(_socket, 'SOCK_NONBLOCK'):
|
||||
# Only defined under Linux
|
||||
@property
|
||||
def type(self):
|
||||
# See https://github.com/gevent/gevent/pull/399
|
||||
if self.timeout != 0.0:
|
||||
return self._sock.type & ~_socket.SOCK_NONBLOCK # pylint:disable=no-member
|
||||
return self._sock.type
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
if not self._closed:
|
||||
self.close()
|
||||
|
||||
def __repr__(self):
|
||||
"""Wrap __repr__() to reveal the real class name."""
|
||||
try:
|
||||
s = repr(self._sock)
|
||||
except Exception as ex: # pylint:disable=broad-except
|
||||
# Observed on Windows Py3.3, printing the repr of a socket
|
||||
# that just suffered a ConnectionResetError [WinError 10054]:
|
||||
# "OverflowError: no printf formatter to display the socket descriptor in decimal"
|
||||
# Not sure what the actual cause is or if there's a better way to handle this
|
||||
s = '<socket [%r]>' % ex
|
||||
|
||||
if s.startswith("<socket object"):
|
||||
s = "<%s.%s%s at 0x%x%s%s" % (
|
||||
self.__class__.__module__,
|
||||
self.__class__.__name__,
|
||||
getattr(self, '_closed', False) and " [closed]" or "",
|
||||
id(self),
|
||||
self._extra_repr(),
|
||||
s[7:])
|
||||
return s
|
||||
|
||||
def _extra_repr(self):
|
||||
return ''
|
||||
|
||||
def __getstate__(self):
|
||||
raise TypeError("Cannot serialize socket object")
|
||||
|
||||
def dup(self):
|
||||
"""dup() -> socket object
|
||||
|
||||
Return a new socket object connected to the same system resource.
|
||||
"""
|
||||
fd = dup(self.fileno())
|
||||
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
|
||||
sock.settimeout(self.gettimeout())
|
||||
return sock
|
||||
|
||||
def accept(self):
|
||||
"""accept() -> (socket object, address info)
|
||||
|
||||
Wait for an incoming connection. Return a new socket
|
||||
representing the connection, and the address of the client.
|
||||
For IP sockets, the address info is a pair (hostaddr, port).
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
fd, addr = self._accept()
|
||||
break
|
||||
except BlockingIOError:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event)
|
||||
sock = socket(self.family, self.type, self.proto, fileno=fd)
|
||||
# Python Issue #7995: if no default timeout is set and the listening
|
||||
# socket had a (non-zero) timeout, force the new socket in blocking
|
||||
# mode to override platform-specific socket flags inheritance.
|
||||
# XXX do we need to do this?
|
||||
if getdefaulttimeout() is None and self.gettimeout():
|
||||
sock.setblocking(True)
|
||||
return sock, addr
|
||||
|
||||
def makefile(self, mode="r", buffering=None, *,
|
||||
encoding=None, errors=None, newline=None):
|
||||
"""Return an I/O stream connected to the socket
|
||||
|
||||
The arguments are as for io.open() after the filename,
|
||||
except the only mode characters supported are 'r', 'w' and 'b'.
|
||||
The semantics are similar too.
|
||||
"""
|
||||
# XXX refactor to share code? We ought to be able to use our FileObject,
|
||||
# adding the appropriate amount of refcounting. At the very least we can use our
|
||||
# OpenDescriptor to handle the parsing.
|
||||
for c in mode:
|
||||
if c not in {"r", "w", "b"}:
|
||||
raise ValueError("invalid mode %r (only r, w, b allowed)")
|
||||
writing = "w" in mode
|
||||
reading = "r" in mode or not writing
|
||||
assert reading or writing
|
||||
binary = "b" in mode
|
||||
rawmode = ""
|
||||
if reading:
|
||||
rawmode += "r"
|
||||
if writing:
|
||||
rawmode += "w"
|
||||
raw = SocketIO(self, rawmode)
|
||||
self._io_refs += 1
|
||||
if buffering is None:
|
||||
buffering = -1
|
||||
if buffering < 0:
|
||||
buffering = io.DEFAULT_BUFFER_SIZE
|
||||
if buffering == 0:
|
||||
if not binary:
|
||||
raise ValueError("unbuffered streams must be binary")
|
||||
return raw
|
||||
if reading and writing:
|
||||
buffer = io.BufferedRWPair(raw, raw, buffering)
|
||||
elif reading:
|
||||
buffer = io.BufferedReader(raw, buffering)
|
||||
else:
|
||||
assert writing
|
||||
buffer = io.BufferedWriter(raw, buffering)
|
||||
if binary:
|
||||
return buffer
|
||||
text = io.TextIOWrapper(buffer, encoding, errors, newline)
|
||||
text.mode = mode
|
||||
return text
|
||||
|
||||
def _decref_socketios(self):
|
||||
# Called by SocketIO when it is closed.
|
||||
if self._io_refs > 0:
|
||||
self._io_refs -= 1
|
||||
if self._closed:
|
||||
self.close()
|
||||
|
||||
def _drop_ref_on_close(self, sock):
|
||||
# Send the close event to wake up any watchers we don't know about
|
||||
# so that (hopefully) they can be closed before we destroy
|
||||
# the FD and invalidate them. We may be in the hub running pending
|
||||
# callbacks now, or this may take until the next iteration.
|
||||
scheduled_new = self.hub.loop.closing_fd(sock.fileno())
|
||||
# Schedule the actual close to happen after that, but only if needed.
|
||||
# (If we always defer, we wind up closing things much later than expected.)
|
||||
if scheduled_new:
|
||||
self.hub.loop.run_callback(sock.close)
|
||||
else:
|
||||
sock.close()
|
||||
|
||||
|
||||
def _detach_socket(self, reason):
|
||||
if not self._sock:
|
||||
return
|
||||
|
||||
# Break any references to the underlying socket object. Tested
|
||||
# by test__refcount. (Why does this matter?). Be sure to
|
||||
# preserve our same family/type/proto if possible (if we
|
||||
# don't, we can get TypeError instead of OSError; see
|
||||
# test_socket.SendmsgUDP6Test.testSendmsgAfterClose)... but
|
||||
# this isn't always possible (see test_socket.test_unknown_socket_family_repr)
|
||||
sock = self._sock
|
||||
family = -1
|
||||
type = -1
|
||||
proto = -1
|
||||
fileno = None
|
||||
try:
|
||||
family = sock.family
|
||||
type = sock.type
|
||||
proto = sock.proto
|
||||
fileno = sock.fileno()
|
||||
except OSError:
|
||||
pass
|
||||
# Break any reference to the loop.io objects. Our fileno,
|
||||
# which they were tied to, is about to be free to be reused, so these
|
||||
# objects are no longer functional.
|
||||
# pylint:disable-next=superfluous-parens
|
||||
self._drop_events_and_close(closefd=(reason == 'closed'))
|
||||
|
||||
self._sock = _closedsocket(family, type, proto, fileno, reason)
|
||||
|
||||
def _real_close(self, _ss=_socket.socket):
|
||||
# This function should not reference any globals. See Python issue #808164.
|
||||
if not self._sock:
|
||||
return
|
||||
|
||||
self._detach_socket('closed')
|
||||
|
||||
|
||||
def close(self):
|
||||
# This function should not reference any globals. See Python issue #808164.
|
||||
self._closed = True
|
||||
if self._io_refs <= 0:
|
||||
self._real_close()
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self._closed
|
||||
|
||||
def detach(self):
|
||||
"""
|
||||
detach() -> file descriptor
|
||||
|
||||
Close the socket object without closing the underlying file
|
||||
descriptor. The object cannot be used after this call; when the
|
||||
real file descriptor is closed, the number that was previously
|
||||
used here may be reused. The fileno() method, after this call,
|
||||
will return an invalid socket id.
|
||||
|
||||
The previous descriptor is returned.
|
||||
|
||||
.. versionchanged:: 1.5
|
||||
|
||||
Also immediately drop any native event loop resources.
|
||||
"""
|
||||
self._closed = True
|
||||
sock = self._sock
|
||||
self._detach_socket('detached')
|
||||
return sock.detach()
|
||||
|
||||
if hasattr(_socket.socket, 'recvmsg'):
|
||||
# Only on Unix; PyPy 3.5 5.10.0 provides sendmsg and recvmsg, but not
|
||||
# recvmsg_into (at least on os x)
|
||||
|
||||
def recvmsg(self, *args):
|
||||
while True:
|
||||
try:
|
||||
return self._sock.recvmsg(*args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event)
|
||||
|
||||
if hasattr(_socket.socket, 'recvmsg_into'):
|
||||
|
||||
def recvmsg_into(self, buffers, *args):
|
||||
while True:
|
||||
try:
|
||||
if args:
|
||||
# The C code is sensitive about whether extra arguments are
|
||||
# passed or not.
|
||||
return self._sock.recvmsg_into(buffers, *args)
|
||||
return self._sock.recvmsg_into(buffers)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event)
|
||||
|
||||
if hasattr(_socket.socket, 'sendmsg'):
|
||||
# Only on Unix
|
||||
def sendmsg(self, buffers, ancdata=(), flags=0, address=None):
|
||||
try:
|
||||
return self._sock.sendmsg(buffers, ancdata, flags, address)
|
||||
except error as ex:
|
||||
if flags & getattr(_socket, 'MSG_DONTWAIT', 0):
|
||||
# Enable non-blocking behaviour
|
||||
# XXX: Do all platforms that have sendmsg have MSG_DONTWAIT?
|
||||
raise
|
||||
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._write_event)
|
||||
try:
|
||||
return self._sock.sendmsg(buffers, ancdata, flags, address)
|
||||
except error as ex2:
|
||||
if ex2.args[0] == EWOULDBLOCK:
|
||||
return 0
|
||||
raise
|
||||
|
||||
|
||||
# sendfile: new in 3.5. But there's no real reason to not
|
||||
# support it everywhere. Note that we can't use os.sendfile()
|
||||
# because it's not cooperative.
|
||||
def _sendfile_use_sendfile(self, file, offset=0, count=None):
|
||||
# This is called directly by tests
|
||||
raise __socket__._GiveupOnSendfile() # pylint:disable=no-member
|
||||
|
||||
def _sendfile_use_send(self, file, offset=0, count=None):
|
||||
self._check_sendfile_params(file, offset, count)
|
||||
if self.gettimeout() == 0:
|
||||
raise ValueError("non-blocking sockets are not supported")
|
||||
if offset:
|
||||
file.seek(offset)
|
||||
blocksize = min(count, 8192) if count else 8192
|
||||
total_sent = 0
|
||||
# localize variable access to minimize overhead
|
||||
file_read = file.read
|
||||
sock_send = self.send
|
||||
try:
|
||||
while True:
|
||||
if count:
|
||||
blocksize = min(count - total_sent, blocksize)
|
||||
if blocksize <= 0:
|
||||
break
|
||||
data = memoryview(file_read(blocksize))
|
||||
if not data:
|
||||
break # EOF
|
||||
while True:
|
||||
try:
|
||||
sent = sock_send(data)
|
||||
except BlockingIOError:
|
||||
continue
|
||||
else:
|
||||
total_sent += sent
|
||||
if sent < len(data):
|
||||
data = data[sent:]
|
||||
else:
|
||||
break
|
||||
return total_sent
|
||||
finally:
|
||||
if total_sent > 0 and hasattr(file, 'seek'):
|
||||
file.seek(offset + total_sent)
|
||||
|
||||
def _check_sendfile_params(self, file, offset, count):
|
||||
if 'b' not in getattr(file, 'mode', 'b'):
|
||||
raise ValueError("file should be opened in binary mode")
|
||||
if not self.type & SOCK_STREAM:
|
||||
raise ValueError("only SOCK_STREAM type sockets are supported")
|
||||
if count is not None:
|
||||
if not isinstance(count, int):
|
||||
raise TypeError(
|
||||
"count must be a positive integer (got {!r})".format(count))
|
||||
if count <= 0:
|
||||
raise ValueError(
|
||||
"count must be a positive integer (got {!r})".format(count))
|
||||
|
||||
def sendfile(self, file, offset=0, count=None):
|
||||
"""sendfile(file[, offset[, count]]) -> sent
|
||||
|
||||
Send a file until EOF is reached by using high-performance
|
||||
os.sendfile() and return the total number of bytes which
|
||||
were sent.
|
||||
*file* must be a regular file object opened in binary mode.
|
||||
If os.sendfile() is not available (e.g. Windows) or file is
|
||||
not a regular file socket.send() will be used instead.
|
||||
*offset* tells from where to start reading the file.
|
||||
If specified, *count* is the total number of bytes to transmit
|
||||
as opposed to sending the file until EOF is reached.
|
||||
File position is updated on return or also in case of error in
|
||||
which case file.tell() can be used to figure out the number of
|
||||
bytes which were sent.
|
||||
The socket must be of SOCK_STREAM type.
|
||||
Non-blocking sockets are not supported.
|
||||
|
||||
.. versionadded:: 1.1rc4
|
||||
Added in Python 3.5, but available under all Python 3 versions in
|
||||
gevent.
|
||||
"""
|
||||
return self._sendfile_use_send(file, offset, count)
|
||||
|
||||
|
||||
if os.name == 'nt':
|
||||
def get_inheritable(self):
|
||||
return os.get_handle_inheritable(self.fileno())
|
||||
|
||||
def set_inheritable(self, inheritable):
|
||||
os.set_handle_inheritable(self.fileno(), inheritable)
|
||||
else:
|
||||
def get_inheritable(self):
|
||||
return os.get_inheritable(self.fileno())
|
||||
|
||||
def set_inheritable(self, inheritable):
|
||||
os.set_inheritable(self.fileno(), inheritable)
|
||||
|
||||
get_inheritable.__doc__ = "Get the inheritable flag of the socket"
|
||||
set_inheritable.__doc__ = "Set the inheritable flag of the socket"
|
||||
|
||||
|
||||
|
||||
SocketType = socket
|
||||
|
||||
|
||||
def fromfd(fd, family, type, proto=0):
|
||||
""" fromfd(fd, family, type[, proto]) -> socket object
|
||||
|
||||
Create a socket object from a duplicate of the given file
|
||||
descriptor. The remaining arguments are the same as for socket().
|
||||
"""
|
||||
nfd = dup(fd)
|
||||
return socket(family, type, proto, nfd)
|
||||
|
||||
|
||||
if hasattr(_socket.socket, "share"):
|
||||
def fromshare(info):
|
||||
""" fromshare(info) -> socket object
|
||||
|
||||
Create a socket object from a the bytes object returned by
|
||||
socket.share(pid).
|
||||
"""
|
||||
return socket(0, 0, 0, info)
|
||||
|
||||
__implements__.append('fromshare')
|
||||
|
||||
|
||||
def _fallback_socketpair(family=AF_INET, type=SOCK_STREAM, proto=0):
|
||||
# We originally used https://gist.github.com/4325783, by Geert Jansen. (Public domain.)
|
||||
# We took it from 3.6 release, confirmed unchanged in 3.7 and
|
||||
# 3.8a1. Expected to be used only on Win. Added to Win/3.5.
|
||||
# It is always available as `socket._fallback_socketpair` from at least 3.9,
|
||||
# We would like to stop carrying around our own implementation, but
|
||||
# using _fallback_socketpair directly would only work if we are monkey patched.
|
||||
|
||||
# Current version taken from 3.13rc2
|
||||
|
||||
# PyPy doesn't name its fallback `_fallback_socketpair`, it uses
|
||||
# an older copy of socket.py.
|
||||
_LOCALHOST = '127.0.0.1'
|
||||
_LOCALHOST_V6 = '::1'
|
||||
|
||||
if family == AF_INET:
|
||||
host = _LOCALHOST
|
||||
elif family == AF_INET6:
|
||||
host = _LOCALHOST_V6
|
||||
else:
|
||||
raise ValueError("Only AF_INET and AF_INET6 socket address families "
|
||||
"are supported")
|
||||
if type != SOCK_STREAM:
|
||||
raise ValueError("Only SOCK_STREAM socket type is supported")
|
||||
if proto != 0:
|
||||
raise ValueError("Only protocol zero is supported")
|
||||
|
||||
# We create a connected TCP socket. Note the trick with
|
||||
# setblocking(False) that prevents us from having to create a thread.
|
||||
lsock = socket(family, type, proto)
|
||||
try:
|
||||
lsock.bind((host, 0))
|
||||
lsock.listen()
|
||||
# On IPv6, ignore flow_info and scope_id
|
||||
addr, port = lsock.getsockname()[:2]
|
||||
csock = socket(family, type, proto)
|
||||
try:
|
||||
csock.setblocking(False)
|
||||
try:
|
||||
csock.connect((addr, port))
|
||||
except (BlockingIOError, InterruptedError):
|
||||
pass
|
||||
csock.setblocking(True)
|
||||
ssock, _ = lsock.accept()
|
||||
except:
|
||||
csock.close()
|
||||
raise
|
||||
finally:
|
||||
lsock.close()
|
||||
|
||||
# Authenticating avoids using a connection from something else
|
||||
# able to connect to {host}:{port} instead of us.
|
||||
# We expect only AF_INET and AF_INET6 families.
|
||||
try:
|
||||
if (
|
||||
ssock.getsockname() != csock.getpeername()
|
||||
or csock.getsockname() != ssock.getpeername()
|
||||
):
|
||||
raise ConnectionError("Unexpected peer connection")
|
||||
except:
|
||||
# getsockname() and getpeername() can fail
|
||||
# if either socket isn't connected.
|
||||
ssock.close()
|
||||
csock.close()
|
||||
raise
|
||||
|
||||
return (ssock, csock)
|
||||
|
||||
if hasattr(__socket__, _fallback_socketpair.__name__):
|
||||
__implements__.append(_fallback_socketpair.__name__)
|
||||
|
||||
if hasattr(_socket, "socketpair"):
|
||||
|
||||
def socketpair(family=None, type=SOCK_STREAM, proto=0):
|
||||
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
|
||||
|
||||
Create a pair of socket objects from the sockets returned by the platform
|
||||
socketpair() function.
|
||||
The arguments are the same as for socket() except the default family is
|
||||
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
|
||||
|
||||
.. versionchanged:: 1.2
|
||||
All Python 3 versions on Windows supply this function (natively
|
||||
supplied by Python 3.5 and above).
|
||||
"""
|
||||
if family is None:
|
||||
try:
|
||||
family = AF_UNIX
|
||||
except NameError:
|
||||
family = AF_INET
|
||||
a, b = _socket.socketpair(family, type, proto)
|
||||
a = socket(family, type, proto, a.detach())
|
||||
b = socket(family, type, proto, b.detach())
|
||||
return a, b
|
||||
|
||||
else: # pragma: no cover
|
||||
socketpair = _fallback_socketpair
|
||||
|
||||
|
||||
|
||||
__all__ = __implements__ + __extensions__ + __imports__
|
||||
if _fallback_socketpair.__name__ in __all__:
|
||||
__all__.remove(_fallback_socketpair.__name__)
|
||||
|
||||
__version_specific__ = (
|
||||
# Python 3.7b1+
|
||||
'close',
|
||||
# Python 3.10rc1+
|
||||
'TCP_KEEPALIVE',
|
||||
'TCP_KEEPCNT',
|
||||
)
|
||||
for _x in __version_specific__:
|
||||
if hasattr(__socket__, _x):
|
||||
vars()[_x] = getattr(__socket__, _x)
|
||||
if _x not in __all__:
|
||||
__all__.append(_x)
|
||||
del _x
|
||||
754
venv3_12/Lib/site-packages/gevent/_socketcommon.py
Normal file
754
venv3_12/Lib/site-packages/gevent/_socketcommon.py
Normal file
@@ -0,0 +1,754 @@
|
||||
# Copyright (c) 2009-2014 Denis Bilenko and gevent contributors. See LICENSE for details.
|
||||
from __future__ import absolute_import
|
||||
|
||||
# standard functions and classes that this module re-implements in a gevent-aware way:
|
||||
_implements = [
|
||||
'create_connection',
|
||||
'socket',
|
||||
'SocketType',
|
||||
'fromfd',
|
||||
'socketpair',
|
||||
]
|
||||
|
||||
__dns__ = [
|
||||
'getaddrinfo',
|
||||
'gethostbyname',
|
||||
'gethostbyname_ex',
|
||||
'gethostbyaddr',
|
||||
'getnameinfo',
|
||||
'getfqdn',
|
||||
]
|
||||
|
||||
_implements += __dns__
|
||||
|
||||
# non-standard functions that this module provides:
|
||||
__extensions__ = [
|
||||
'cancel_wait',
|
||||
'wait_read',
|
||||
'wait_write',
|
||||
'wait_readwrite',
|
||||
]
|
||||
|
||||
# standard functions and classes that this module re-imports
|
||||
__imports__ = [
|
||||
'error',
|
||||
'gaierror',
|
||||
'herror',
|
||||
'htonl',
|
||||
'htons',
|
||||
'ntohl',
|
||||
'ntohs',
|
||||
'inet_aton',
|
||||
'inet_ntoa',
|
||||
'inet_pton',
|
||||
'inet_ntop',
|
||||
'timeout',
|
||||
'gethostname',
|
||||
'getprotobyname',
|
||||
'getservbyname',
|
||||
'getservbyport',
|
||||
'getdefaulttimeout',
|
||||
'setdefaulttimeout',
|
||||
# Windows:
|
||||
'errorTab',
|
||||
# Python 3
|
||||
'AddressFamily',
|
||||
'SocketKind',
|
||||
'CMSG_LEN',
|
||||
'CMSG_SPACE',
|
||||
'dup',
|
||||
'if_indextoname',
|
||||
'if_nameindex',
|
||||
'if_nametoindex',
|
||||
'sethostname',
|
||||
'create_server',
|
||||
'has_dualstack_ipv6',
|
||||
]
|
||||
|
||||
|
||||
import time
|
||||
|
||||
from gevent._hub_local import get_hub_noargs as get_hub
|
||||
from gevent._compat import string_types, integer_types
|
||||
from gevent._compat import PY39
|
||||
from gevent._compat import WIN as is_windows
|
||||
from gevent._compat import OSX as is_macos
|
||||
from gevent._compat import exc_clear
|
||||
from gevent._util import copy_globals
|
||||
from gevent._greenlet_primitives import get_memory as _get_memory
|
||||
from gevent._hub_primitives import wait_on_socket as _wait_on_socket
|
||||
|
||||
from gevent.timeout import Timeout
|
||||
|
||||
|
||||
if PY39:
|
||||
__imports__.extend([
|
||||
'recv_fds',
|
||||
'send_fds',
|
||||
])
|
||||
|
||||
# pylint:disable=no-name-in-module,unused-import
|
||||
if is_windows:
|
||||
# no such thing as WSAEPERM or error code 10001 according to winsock.h or MSDN
|
||||
from errno import WSAEINVAL as EINVAL
|
||||
from errno import WSAEWOULDBLOCK as EWOULDBLOCK
|
||||
from errno import WSAEINPROGRESS as EINPROGRESS
|
||||
from errno import WSAEALREADY as EALREADY
|
||||
from errno import WSAEISCONN as EISCONN
|
||||
from gevent.win32util import formatError as strerror
|
||||
EAGAIN = EWOULDBLOCK
|
||||
else:
|
||||
from errno import EINVAL
|
||||
from errno import EWOULDBLOCK
|
||||
from errno import EINPROGRESS
|
||||
from errno import EALREADY
|
||||
from errno import EAGAIN
|
||||
from errno import EISCONN
|
||||
from os import strerror
|
||||
|
||||
try:
|
||||
from errno import EBADF
|
||||
except ImportError:
|
||||
EBADF = 9
|
||||
|
||||
try:
|
||||
from errno import EHOSTUNREACH
|
||||
except ImportError:
|
||||
EHOSTUNREACH = -1
|
||||
|
||||
try:
|
||||
from errno import ECONNREFUSED
|
||||
except ImportError:
|
||||
ECONNREFUSED = -1
|
||||
|
||||
# macOS can return EPROTOTYPE when writing to a socket that is shutting
|
||||
# Down. Retrying the write should return the expected EPIPE error.
|
||||
# Downstream classes (like pywsgi) know how to handle/ignore EPIPE.
|
||||
# This set is used by socket.send() to decide whether the write should
|
||||
# be retried. The default is to retry only on EWOULDBLOCK. Here we add
|
||||
# EPROTOTYPE on macOS to handle this platform-specific race condition.
|
||||
GSENDAGAIN = (EWOULDBLOCK,)
|
||||
if is_macos:
|
||||
from errno import EPROTOTYPE
|
||||
GSENDAGAIN += (EPROTOTYPE,)
|
||||
|
||||
import _socket
|
||||
_realsocket = _socket.socket
|
||||
import socket as __socket__
|
||||
|
||||
|
||||
_SocketError = __socket__.error
|
||||
|
||||
_name = _value = None
|
||||
__imports__ = copy_globals(__socket__, globals(),
|
||||
only_names=__imports__,
|
||||
ignore_missing_names=True)
|
||||
|
||||
for _name in __socket__.__all__:
|
||||
_value = getattr(__socket__, _name)
|
||||
if isinstance(_value, (integer_types, string_types)):
|
||||
globals()[_name] = _value
|
||||
__imports__.append(_name)
|
||||
|
||||
del _name, _value
|
||||
|
||||
_timeout_error = timeout # pylint: disable=undefined-variable
|
||||
|
||||
from gevent import _hub_primitives
|
||||
_hub_primitives.set_default_timeout_error(_timeout_error)
|
||||
|
||||
wait = _hub_primitives.wait_on_watcher
|
||||
wait_read = _hub_primitives.wait_read
|
||||
wait_write = _hub_primitives.wait_write
|
||||
wait_readwrite = _hub_primitives.wait_readwrite
|
||||
|
||||
#: The exception raised by default on a call to :func:`cancel_wait`
|
||||
class cancel_wait_ex(error): # pylint: disable=undefined-variable
|
||||
def __init__(self):
|
||||
super(cancel_wait_ex, self).__init__(
|
||||
EBADF,
|
||||
'File descriptor was closed in another greenlet')
|
||||
|
||||
|
||||
def cancel_wait(watcher, error=cancel_wait_ex):
|
||||
"""See :meth:`gevent.hub.Hub.cancel_wait`"""
|
||||
get_hub().cancel_wait(watcher, error)
|
||||
|
||||
|
||||
def gethostbyname(hostname):
|
||||
"""
|
||||
gethostbyname(host) -> address
|
||||
|
||||
Return the IP address (a string of the form '255.255.255.255') for a host.
|
||||
|
||||
.. seealso:: :doc:`/dns`
|
||||
"""
|
||||
return get_hub().resolver.gethostbyname(hostname)
|
||||
|
||||
|
||||
def gethostbyname_ex(hostname):
|
||||
"""
|
||||
gethostbyname_ex(host) -> (name, aliaslist, addresslist)
|
||||
|
||||
Return the true host name, a list of aliases, and a list of IP addresses,
|
||||
for a host. The host argument is a string giving a host name or IP number.
|
||||
Resolve host and port into list of address info entries.
|
||||
|
||||
.. seealso:: :doc:`/dns`
|
||||
"""
|
||||
return get_hub().resolver.gethostbyname_ex(hostname)
|
||||
|
||||
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
|
||||
"""
|
||||
Resolve host and port into list of address info entries.
|
||||
|
||||
Translate the host/port argument into a sequence of 5-tuples that contain
|
||||
all the necessary arguments for creating a socket connected to that service.
|
||||
host is a domain name, a string representation of an IPv4/v6 address or
|
||||
None. port is a string service name such as 'http', a numeric port number or
|
||||
None. By passing None as the value of host and port, you can pass NULL to
|
||||
the underlying C API.
|
||||
|
||||
The family, type and proto arguments can be optionally specified in order to
|
||||
narrow the list of addresses returned. Passing zero as a value for each of
|
||||
these arguments selects the full range of results.
|
||||
|
||||
.. seealso:: :doc:`/dns`
|
||||
"""
|
||||
# Also, on Python 3, we need to translate into the special enums.
|
||||
# Our lower-level resolvers, including the thread and blocking, which use _socket,
|
||||
# function simply with integers.
|
||||
addrlist = get_hub().resolver.getaddrinfo(host, port, family, type, proto, flags)
|
||||
result = [
|
||||
# pylint:disable=undefined-variable
|
||||
(_intenum_converter(af, AddressFamily),
|
||||
_intenum_converter(socktype, SocketKind),
|
||||
proto, canonname, sa)
|
||||
for af, socktype, proto, canonname, sa
|
||||
in addrlist
|
||||
]
|
||||
return result
|
||||
|
||||
def _intenum_converter(value, enum_klass):
|
||||
try:
|
||||
return enum_klass(value)
|
||||
except ValueError: # pragma: no cover
|
||||
return value
|
||||
|
||||
|
||||
def gethostbyaddr(ip_address):
|
||||
"""
|
||||
gethostbyaddr(ip_address) -> (name, aliaslist, addresslist)
|
||||
|
||||
Return the true host name, a list of aliases, and a list of IP addresses,
|
||||
for a host. The host argument is a string giving a host name or IP number.
|
||||
|
||||
.. seealso:: :doc:`/dns`
|
||||
"""
|
||||
return get_hub().resolver.gethostbyaddr(ip_address)
|
||||
|
||||
|
||||
def getnameinfo(sockaddr, flags):
|
||||
"""
|
||||
getnameinfo(sockaddr, flags) -> (host, port)
|
||||
|
||||
Get host and port for a sockaddr.
|
||||
|
||||
.. seealso:: :doc:`/dns`
|
||||
"""
|
||||
return get_hub().resolver.getnameinfo(sockaddr, flags)
|
||||
|
||||
|
||||
def getfqdn(name=''):
|
||||
"""Get fully qualified domain name from name.
|
||||
|
||||
An empty argument is interpreted as meaning the local host.
|
||||
|
||||
First the hostname returned by gethostbyaddr() is checked, then
|
||||
possibly existing aliases. In case no FQDN is available, hostname
|
||||
from gethostname() is returned.
|
||||
|
||||
.. versionchanged:: 23.7.0
|
||||
The IPv6 generic address '::' now returns the result of
|
||||
``gethostname``, like the IPv4 address '0.0.0.0'.
|
||||
"""
|
||||
# pylint: disable=undefined-variable
|
||||
name = name.strip()
|
||||
# IPv6 added in a late Python 3.10/3.11 patch release.
|
||||
# https://github.com/python/cpython/issues/100374
|
||||
if not name or name in ('0.0.0.0', '::'):
|
||||
name = gethostname()
|
||||
try:
|
||||
hostname, aliases, _ = gethostbyaddr(name)
|
||||
except error:
|
||||
pass
|
||||
else:
|
||||
aliases.insert(0, hostname)
|
||||
for name in aliases: # EWW! pylint:disable=redefined-argument-from-local
|
||||
if isinstance(name, bytes):
|
||||
if b'.' in name:
|
||||
break
|
||||
elif '.' in name:
|
||||
break
|
||||
else:
|
||||
name = hostname
|
||||
return name
|
||||
|
||||
def __send_chunk(socket, data_memory, flags, timeleft, end, timeout=_timeout_error):
|
||||
"""
|
||||
Send the complete contents of ``data_memory`` before returning.
|
||||
This is the core loop around :meth:`send`.
|
||||
|
||||
:param timeleft: Either ``None`` if there is no timeout involved,
|
||||
or a float indicating the timeout to use.
|
||||
:param end: Either ``None`` if there is no timeout involved, or
|
||||
a float giving the absolute end time.
|
||||
:return: An updated value for ``timeleft`` (or None)
|
||||
:raises timeout: If ``timeleft`` was given and elapsed while
|
||||
sending this chunk.
|
||||
"""
|
||||
data_sent = 0
|
||||
len_data_memory = len(data_memory)
|
||||
started_timer = 0
|
||||
while data_sent < len_data_memory:
|
||||
chunk = data_memory[data_sent:]
|
||||
if timeleft is None:
|
||||
data_sent += socket.send(chunk, flags)
|
||||
elif started_timer and timeleft <= 0:
|
||||
# Check before sending to guarantee a check
|
||||
# happens even if each chunk successfully sends its data
|
||||
# (especially important for SSL sockets since they have large
|
||||
# buffers). But only do this if we've actually tried to
|
||||
# send something once to avoid spurious timeouts on non-blocking
|
||||
# sockets.
|
||||
raise timeout('timed out')
|
||||
else:
|
||||
started_timer = 1
|
||||
data_sent += socket.send(chunk, flags, timeout=timeleft)
|
||||
timeleft = end - time.time()
|
||||
|
||||
return timeleft
|
||||
|
||||
def _sendall(socket, data_memory, flags,
|
||||
SOL_SOCKET=__socket__.SOL_SOCKET, # pylint:disable=no-member
|
||||
SO_SNDBUF=__socket__.SO_SNDBUF): # pylint:disable=no-member
|
||||
"""
|
||||
Send the *data_memory* (which should be a memoryview)
|
||||
using the gevent *socket*, performing well on PyPy.
|
||||
"""
|
||||
|
||||
# On PyPy up through 5.10.0, both PyPy2 and PyPy3, subviews
|
||||
# (slices) of a memoryview() object copy the underlying bytes the
|
||||
# first time the builtin socket.send() method is called. On a
|
||||
# non-blocking socket (that thus calls socket.send() many times)
|
||||
# with a large input, this results in many repeated copies of an
|
||||
# ever smaller string, depending on the networking buffering. For
|
||||
# example, if each send() can process 1MB of a 50MB input, and we
|
||||
# naively pass the entire remaining subview each time, we'd copy
|
||||
# 49MB, 48MB, 47MB, etc, thus completely killing performance. To
|
||||
# workaround this problem, we work in reasonable, fixed-size
|
||||
# chunks. This results in a 10x improvement to bench_sendall.py,
|
||||
# while having no measurable impact on CPython (since it doesn't
|
||||
# copy at all the only extra overhead is a few python function
|
||||
# calls, which is negligible for large inputs).
|
||||
|
||||
# On one macOS machine, PyPy3 5.10.1 produced ~ 67.53 MB/s before this change,
|
||||
# and ~ 616.01 MB/s after.
|
||||
|
||||
# See https://bitbucket.org/pypy/pypy/issues/2091/non-blocking-socketsend-slow-gevent
|
||||
|
||||
# Too small of a chunk (the socket's buf size is usually too
|
||||
# small) results in reduced perf due to *too many* calls to send and too many
|
||||
# small copies. With a buffer of 143K (the default on my system), for
|
||||
# example, bench_sendall.py yields ~264MB/s, while using 1MB yields
|
||||
# ~653MB/s (matching CPython). 1MB is arbitrary and might be better
|
||||
# chosen, say, to match a page size?
|
||||
|
||||
len_data_memory = len(data_memory)
|
||||
if not len_data_memory:
|
||||
# Don't try to send empty data at all, no point, and breaks ssl
|
||||
# See issue 719
|
||||
return 0
|
||||
|
||||
|
||||
chunk_size = max(socket.getsockopt(SOL_SOCKET, SO_SNDBUF), 1024 * 1024)
|
||||
|
||||
data_sent = 0
|
||||
end = None
|
||||
timeleft = None
|
||||
if socket.timeout is not None:
|
||||
timeleft = socket.timeout
|
||||
end = time.time() + timeleft
|
||||
|
||||
while data_sent < len_data_memory:
|
||||
chunk_end = min(data_sent + chunk_size, len_data_memory)
|
||||
chunk = data_memory[data_sent:chunk_end]
|
||||
|
||||
timeleft = __send_chunk(socket, chunk, flags, timeleft, end)
|
||||
data_sent += len(chunk) # Guaranteed it sent the whole thing
|
||||
|
||||
# pylint:disable=no-member
|
||||
_RESOLVABLE_FAMILIES = (__socket__.AF_INET,)
|
||||
if __socket__.has_ipv6:
|
||||
_RESOLVABLE_FAMILIES += (__socket__.AF_INET6,)
|
||||
|
||||
def _resolve_addr(sock, address):
|
||||
# Internal method: resolve the AF_INET[6] address using
|
||||
# getaddrinfo.
|
||||
if sock.family not in _RESOLVABLE_FAMILIES or not isinstance(address, tuple):
|
||||
return address
|
||||
# address is (host, port) (ipv4) or (host, port, flowinfo, scopeid) (ipv6).
|
||||
# If it's already resolved, no need to go through getaddrinfo() again.
|
||||
# That can lose precision (e.g., on IPv6, it can lose scopeid). The standard library
|
||||
# does this in socketmodule.c:setipaddr. (This is only part of the logic, the real
|
||||
# thing is much more complex.)
|
||||
try:
|
||||
if __socket__.inet_pton(sock.family, address[0]):
|
||||
return address
|
||||
except AttributeError: # pragma: no cover
|
||||
# inet_pton might not be available.
|
||||
pass
|
||||
except _SocketError:
|
||||
# Not parseable, needs resolved.
|
||||
pass
|
||||
|
||||
|
||||
# We don't pass the port to getaddrinfo because the C
|
||||
# socket module doesn't either (on some systems its
|
||||
# illegal to do that without also passing socket type and
|
||||
# protocol). Instead we join the port back at the end.
|
||||
# See https://github.com/gevent/gevent/issues/1252
|
||||
host, port = address[:2]
|
||||
r = getaddrinfo(host, None, sock.family)
|
||||
address = r[0][-1]
|
||||
if len(address) == 2:
|
||||
address = (address[0], port)
|
||||
else:
|
||||
address = (address[0], port, address[2], address[3])
|
||||
return address
|
||||
|
||||
|
||||
timeout_default = object()
|
||||
|
||||
class SocketMixin(object):
|
||||
# pylint:disable=too-many-public-methods
|
||||
__slots__ = (
|
||||
'hub',
|
||||
'timeout',
|
||||
'_read_event',
|
||||
'_write_event',
|
||||
'_sock',
|
||||
'__weakref__',
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
# Writing:
|
||||
# (self.a, self.b) = (None,) * 2
|
||||
# generates the fastest bytecode. But At least on PyPy,
|
||||
# where the SSLSocket subclass has a timeout property,
|
||||
# it results in the settimeout() method getting the tuple
|
||||
# as the value, not the unpacked None.
|
||||
self._read_event = None
|
||||
self._write_event = None
|
||||
self._sock = None
|
||||
self.hub = None
|
||||
self.timeout = None
|
||||
|
||||
def _drop_events_and_close(self, closefd=True, _cancel_wait_ex=cancel_wait_ex):
|
||||
hub = self.hub
|
||||
read_event = self._read_event
|
||||
write_event = self._write_event
|
||||
self._read_event = self._write_event = None
|
||||
hub.cancel_waits_close_and_then(
|
||||
(read_event, write_event),
|
||||
_cancel_wait_ex,
|
||||
# Pass the socket to keep it alive until such time as
|
||||
# the waiters are guaranteed to be closed.
|
||||
self._drop_ref_on_close if closefd else id,
|
||||
self._sock
|
||||
)
|
||||
|
||||
def _drop_ref_on_close(self, sock):
|
||||
raise NotImplementedError
|
||||
|
||||
def _get_ref(self):
|
||||
return self._read_event.ref or self._write_event.ref
|
||||
|
||||
def _set_ref(self, value):
|
||||
self._read_event.ref = value
|
||||
self._write_event.ref = value
|
||||
|
||||
ref = property(_get_ref, _set_ref)
|
||||
|
||||
_wait = _wait_on_socket
|
||||
|
||||
###
|
||||
# Common methods defined here need to be added to the
|
||||
# API documentation specifically.
|
||||
###
|
||||
|
||||
def settimeout(self, howlong):
|
||||
if howlong is not None:
|
||||
try:
|
||||
f = howlong.__float__
|
||||
except AttributeError:
|
||||
raise TypeError('a float is required', howlong, type(howlong))
|
||||
howlong = f()
|
||||
if howlong < 0.0:
|
||||
raise ValueError('Timeout value out of range')
|
||||
# avoid recursion with any property on self.timeout
|
||||
SocketMixin.timeout.__set__(self, howlong)
|
||||
|
||||
def gettimeout(self):
|
||||
# avoid recursion with any property on self.timeout
|
||||
return SocketMixin.timeout.__get__(self, type(self))
|
||||
|
||||
def setblocking(self, flag):
|
||||
# Beginning in 3.6.0b3 this is supposed to raise
|
||||
# if the file descriptor is closed, but the test for it
|
||||
# involves closing the fileno directly. Since we
|
||||
# don't touch the fileno here, it doesn't make sense for
|
||||
# us.
|
||||
if flag:
|
||||
self.timeout = None
|
||||
else:
|
||||
self.timeout = 0.0
|
||||
|
||||
def shutdown(self, how):
|
||||
if how == 0: # SHUT_RD
|
||||
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
|
||||
elif how == 1: # SHUT_WR
|
||||
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
|
||||
else:
|
||||
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
|
||||
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
|
||||
self._sock.shutdown(how)
|
||||
|
||||
# pylint:disable-next=undefined-variable
|
||||
family = property(lambda self: _intenum_converter(self._sock.family, AddressFamily))
|
||||
# pylint:disable-next=undefined-variable
|
||||
type = property(lambda self: _intenum_converter(self._sock.type, SocketKind))
|
||||
proto = property(lambda self: self._sock.proto)
|
||||
|
||||
def fileno(self):
|
||||
return self._sock.fileno()
|
||||
|
||||
def getsockname(self):
|
||||
return self._sock.getsockname()
|
||||
|
||||
def getpeername(self):
|
||||
return self._sock.getpeername()
|
||||
|
||||
def bind(self, address):
|
||||
return self._sock.bind(address)
|
||||
|
||||
def listen(self, *args):
|
||||
return self._sock.listen(*args)
|
||||
|
||||
def getsockopt(self, *args):
|
||||
return self._sock.getsockopt(*args)
|
||||
|
||||
def setsockopt(self, *args):
|
||||
return self._sock.setsockopt(*args)
|
||||
|
||||
if hasattr(__socket__.socket, 'ioctl'): # os.name == 'nt'
|
||||
def ioctl(self, *args):
|
||||
return self._sock.ioctl(*args)
|
||||
if hasattr(__socket__.socket, 'sleeptaskw'): # os.name == 'riscos
|
||||
def sleeptaskw(self, *args):
|
||||
return self._sock.sleeptaskw(*args)
|
||||
|
||||
def getblocking(self):
|
||||
"""
|
||||
Returns whether the socket will approximate blocking
|
||||
behaviour.
|
||||
|
||||
.. versionadded:: 1.3a2
|
||||
Added in Python 3.7.
|
||||
"""
|
||||
return self.timeout != 0.0
|
||||
|
||||
def connect(self, address):
|
||||
"""
|
||||
Connect to *address*.
|
||||
|
||||
.. versionchanged:: 20.6.0
|
||||
If the host part of the address includes an IPv6 scope ID,
|
||||
it will be used instead of ignored, if the platform supplies
|
||||
:func:`socket.inet_pton`.
|
||||
"""
|
||||
# In the standard library, ``connect`` and ``connect_ex`` are implemented
|
||||
# in C, and they both call a C function ``internal_connect`` to do the real
|
||||
# work. This means that it is a visible behaviour difference to have our
|
||||
# Python implementation of ``connect_ex`` simply call ``connect``:
|
||||
# it could be overridden in a subclass or at runtime! Because of our exception handling,
|
||||
# this can make a difference for known subclasses like SSLSocket.
|
||||
self._internal_connect(address)
|
||||
|
||||
def connect_ex(self, address):
|
||||
"""
|
||||
Connect to *address*, returning a result code.
|
||||
|
||||
.. versionchanged:: 23.7.0
|
||||
No longer uses an overridden ``connect`` method on
|
||||
this object. Instead, like the standard library, this method always
|
||||
uses a non-replacable internal connection function.
|
||||
"""
|
||||
try:
|
||||
return self._internal_connect(address) or 0
|
||||
except __socket__.timeout:
|
||||
return EAGAIN
|
||||
except __socket__.gaierror: # pylint:disable=try-except-raise
|
||||
# gaierror/overflowerror/typerror is not silenced by connect_ex;
|
||||
# gaierror extends error so catch it first
|
||||
raise
|
||||
except _SocketError as ex:
|
||||
# Python 3: error is now OSError and it has various subclasses.
|
||||
# Only those that apply to actually connecting are silenced by
|
||||
# connect_ex.
|
||||
# On Python 3, we want to check ex.errno; on Python 2
|
||||
# there is no such attribute, we need to look at the first
|
||||
# argument.
|
||||
try:
|
||||
err = ex.errno
|
||||
except AttributeError:
|
||||
err = ex.args[0]
|
||||
if err:
|
||||
return err
|
||||
raise
|
||||
|
||||
def _internal_connect(self, address):
|
||||
# Like the C function ``internal_connect``, not meant to be overridden,
|
||||
# but exposed for testing.
|
||||
if self.timeout == 0.0:
|
||||
return self._sock.connect(address)
|
||||
address = _resolve_addr(self._sock, address)
|
||||
with Timeout._start_new_or_dummy(self.timeout, __socket__.timeout("timed out")):
|
||||
while 1:
|
||||
err = self.getsockopt(__socket__.SOL_SOCKET, __socket__.SO_ERROR)
|
||||
if err:
|
||||
raise _SocketError(err, strerror(err))
|
||||
result = self._sock.connect_ex(address)
|
||||
|
||||
if not result or result == EISCONN:
|
||||
break
|
||||
if (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows):
|
||||
self._wait(self._write_event)
|
||||
else:
|
||||
if (isinstance(address, tuple)
|
||||
and address[0] == 'fe80::1'
|
||||
and result == EHOSTUNREACH):
|
||||
# On Python 3.7 on mac, we see EHOSTUNREACH
|
||||
# returned for this link-local address, but it really is
|
||||
# supposed to be ECONNREFUSED according to the standard library
|
||||
# tests (test_socket.NetworkConnectionNoServer.test_create_connection)
|
||||
# (On previous versions, that code passed the '127.0.0.1' IPv4 address, so
|
||||
# ipv6 link locals were never a factor; 3.7 passes 'localhost'.)
|
||||
# It is something of a mystery how the stdlib socket code doesn't
|
||||
# produce EHOSTUNREACH---I (JAM) can't see how socketmodule.c would avoid
|
||||
# that. The normal connect just calls connect_ex much like we do.
|
||||
result = ECONNREFUSED
|
||||
raise _SocketError(result, strerror(result))
|
||||
|
||||
def recv(self, *args):
|
||||
while 1:
|
||||
try:
|
||||
return self._sock.recv(*args)
|
||||
except _SocketError as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
# QQQ without clearing exc_info test__refcount.test_clean_exit fails
|
||||
exc_clear() # Python 2
|
||||
self._wait(self._read_event)
|
||||
|
||||
def recvfrom(self, *args):
|
||||
while 1:
|
||||
try:
|
||||
return self._sock.recvfrom(*args)
|
||||
except _SocketError as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
exc_clear() # Python 2
|
||||
self._wait(self._read_event)
|
||||
|
||||
def recvfrom_into(self, *args):
|
||||
while 1:
|
||||
try:
|
||||
return self._sock.recvfrom_into(*args)
|
||||
except _SocketError as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
exc_clear() # Python 2
|
||||
self._wait(self._read_event)
|
||||
|
||||
def recv_into(self, *args):
|
||||
while 1:
|
||||
try:
|
||||
return self._sock.recv_into(*args)
|
||||
except _SocketError as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
exc_clear() # Python 2
|
||||
self._wait(self._read_event)
|
||||
|
||||
def sendall(self, data, flags=0):
|
||||
# this sendall is also reused by gevent.ssl.SSLSocket subclass,
|
||||
# so it should not call self._sock methods directly
|
||||
data_memory = _get_memory(data)
|
||||
return _sendall(self, data_memory, flags)
|
||||
|
||||
def sendto(self, *args):
|
||||
try:
|
||||
return self._sock.sendto(*args)
|
||||
except _SocketError as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
exc_clear()
|
||||
self._wait(self._write_event)
|
||||
|
||||
try:
|
||||
return self._sock.sendto(*args)
|
||||
except _SocketError as ex2:
|
||||
if ex2.args[0] == EWOULDBLOCK:
|
||||
exc_clear()
|
||||
return 0
|
||||
raise
|
||||
|
||||
def send(self, data, flags=0, timeout=timeout_default):
|
||||
if timeout is timeout_default:
|
||||
timeout = self.timeout
|
||||
try:
|
||||
return self._sock.send(data, flags)
|
||||
except _SocketError as ex:
|
||||
if ex.args[0] not in GSENDAGAIN or timeout == 0.0:
|
||||
raise
|
||||
exc_clear()
|
||||
self._wait(self._write_event)
|
||||
try:
|
||||
return self._sock.send(data, flags)
|
||||
except _SocketError as ex2:
|
||||
if ex2.args[0] == EWOULDBLOCK:
|
||||
exc_clear()
|
||||
return 0
|
||||
raise
|
||||
|
||||
@classmethod
|
||||
def _fixup_docstrings(cls):
|
||||
for k, v in vars(cls).items():
|
||||
if k.startswith('_'):
|
||||
continue
|
||||
if not hasattr(v, '__doc__') or v.__doc__:
|
||||
continue
|
||||
smeth = getattr(__socket__.socket, k, None)
|
||||
if not smeth or not smeth.__doc__:
|
||||
continue
|
||||
|
||||
try:
|
||||
v.__doc__ = smeth.__doc__
|
||||
except (AttributeError, TypeError):
|
||||
# slots can't have docs. Py2 raises TypeError,
|
||||
# Py3 raises AttributeError
|
||||
continue
|
||||
|
||||
SocketMixin._fixup_docstrings()
|
||||
del SocketMixin._fixup_docstrings
|
||||
376
venv3_12/Lib/site-packages/gevent/_tblib.py
Normal file
376
venv3_12/Lib/site-packages/gevent/_tblib.py
Normal file
@@ -0,0 +1,376 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# A vendored version of part of https://github.com/ionelmc/python-tblib
|
||||
# pylint:disable=redefined-outer-name,reimported,function-redefined,bare-except,no-else-return,broad-except
|
||||
####
|
||||
# Copyright (c) 2013-2016, Ionel Cristian Mărieș
|
||||
# All rights reserved.
|
||||
|
||||
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
|
||||
# following conditions are met:
|
||||
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
|
||||
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
####
|
||||
|
||||
|
||||
# __init__.py
|
||||
import re
|
||||
import sys
|
||||
from types import CodeType
|
||||
|
||||
__version__ = '2.0.0'
|
||||
__all__ = 'Traceback', 'TracebackParseError', 'Frame', 'Code'
|
||||
|
||||
FRAME_RE = re.compile(r'^\s*File "(?P<co_filename>.+)", line (?P<tb_lineno>\d+)(, in (?P<co_name>.+))?$')
|
||||
|
||||
|
||||
class _AttrDict(dict):
|
||||
__slots__ = ()
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return self[name]
|
||||
except KeyError:
|
||||
raise AttributeError(name) from None
|
||||
|
||||
|
||||
# noinspection PyPep8Naming
|
||||
class __traceback_maker(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TracebackParseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Code:
|
||||
"""
|
||||
Class that replicates just enough of the builtin Code object to enable serialization and traceback rendering.
|
||||
"""
|
||||
|
||||
co_code = None
|
||||
|
||||
def __init__(self, code):
|
||||
self.co_filename = code.co_filename
|
||||
self.co_name = code.co_name
|
||||
self.co_argcount = 0
|
||||
self.co_kwonlyargcount = 0
|
||||
self.co_varnames = ()
|
||||
self.co_nlocals = 0
|
||||
self.co_stacksize = 0
|
||||
self.co_flags = 64
|
||||
self.co_firstlineno = 0
|
||||
|
||||
|
||||
class Frame:
|
||||
"""
|
||||
Class that replicates just enough of the builtin Frame object to enable serialization and traceback rendering.
|
||||
"""
|
||||
|
||||
def __init__(self, frame):
|
||||
self.f_locals = {}
|
||||
self.f_globals = {k: v for k, v in frame.f_globals.items() if k in ('__file__', '__name__')}
|
||||
self.f_code = Code(frame.f_code)
|
||||
self.f_lineno = frame.f_lineno
|
||||
|
||||
def clear(self):
|
||||
"""
|
||||
For compatibility with PyPy 3.5;
|
||||
clear() was added to frame in Python 3.4
|
||||
and is called by traceback.clear_frames(), which
|
||||
in turn is called by unittest.TestCase.assertRaises
|
||||
"""
|
||||
|
||||
|
||||
class Traceback:
|
||||
"""
|
||||
Class that wraps builtin Traceback objects.
|
||||
"""
|
||||
|
||||
tb_next = None
|
||||
|
||||
def __init__(self, tb):
|
||||
self.tb_frame = Frame(tb.tb_frame)
|
||||
# noinspection SpellCheckingInspection
|
||||
self.tb_lineno = int(tb.tb_lineno)
|
||||
|
||||
# Build in place to avoid exceeding the recursion limit
|
||||
tb = tb.tb_next
|
||||
prev_traceback = self
|
||||
cls = type(self)
|
||||
while tb is not None:
|
||||
traceback = object.__new__(cls)
|
||||
traceback.tb_frame = Frame(tb.tb_frame)
|
||||
traceback.tb_lineno = int(tb.tb_lineno)
|
||||
prev_traceback.tb_next = traceback
|
||||
prev_traceback = traceback
|
||||
tb = tb.tb_next
|
||||
|
||||
def as_traceback(self):
|
||||
"""
|
||||
Convert to a builtin Traceback object that is usable for raising or rendering a stacktrace.
|
||||
"""
|
||||
current = self
|
||||
top_tb = None
|
||||
tb = None
|
||||
while current:
|
||||
f_code = current.tb_frame.f_code
|
||||
code = compile('\n' * (current.tb_lineno - 1) + 'raise __traceback_maker', current.tb_frame.f_code.co_filename, 'exec')
|
||||
if hasattr(code, 'replace'):
|
||||
# Python 3.8 and newer
|
||||
code = code.replace(co_argcount=0, co_filename=f_code.co_filename, co_name=f_code.co_name, co_freevars=(), co_cellvars=())
|
||||
else:
|
||||
code = CodeType(
|
||||
0,
|
||||
code.co_kwonlyargcount,
|
||||
code.co_nlocals,
|
||||
code.co_stacksize,
|
||||
code.co_flags,
|
||||
code.co_code,
|
||||
code.co_consts,
|
||||
code.co_names,
|
||||
code.co_varnames,
|
||||
f_code.co_filename,
|
||||
f_code.co_name,
|
||||
code.co_firstlineno,
|
||||
code.co_lnotab,
|
||||
(),
|
||||
(),
|
||||
)
|
||||
|
||||
# noinspection PyBroadException
|
||||
try:
|
||||
exec(code, dict(current.tb_frame.f_globals), {}) # noqa: S102
|
||||
except Exception:
|
||||
next_tb = sys.exc_info()[2].tb_next
|
||||
if top_tb is None:
|
||||
top_tb = next_tb
|
||||
if tb is not None:
|
||||
tb.tb_next = next_tb
|
||||
tb = next_tb
|
||||
del next_tb
|
||||
|
||||
current = current.tb_next
|
||||
try:
|
||||
return top_tb
|
||||
finally:
|
||||
del top_tb
|
||||
del tb
|
||||
|
||||
to_traceback = as_traceback
|
||||
|
||||
def as_dict(self):
|
||||
"""
|
||||
Converts to a dictionary representation. You can serialize the result to JSON as it only has
|
||||
builtin objects like dicts, lists, ints or strings.
|
||||
"""
|
||||
if self.tb_next is None:
|
||||
tb_next = None
|
||||
else:
|
||||
tb_next = self.tb_next.to_dict()
|
||||
|
||||
code = {
|
||||
'co_filename': self.tb_frame.f_code.co_filename,
|
||||
'co_name': self.tb_frame.f_code.co_name,
|
||||
}
|
||||
frame = {
|
||||
'f_globals': self.tb_frame.f_globals,
|
||||
'f_code': code,
|
||||
'f_lineno': self.tb_frame.f_lineno,
|
||||
}
|
||||
return {
|
||||
'tb_frame': frame,
|
||||
'tb_lineno': self.tb_lineno,
|
||||
'tb_next': tb_next,
|
||||
}
|
||||
|
||||
to_dict = as_dict
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, dct):
|
||||
"""
|
||||
Creates an instance from a dictionary with the same structure as ``.as_dict()`` returns.
|
||||
"""
|
||||
if dct['tb_next']:
|
||||
tb_next = cls.from_dict(dct['tb_next'])
|
||||
else:
|
||||
tb_next = None
|
||||
|
||||
code = _AttrDict(
|
||||
co_filename=dct['tb_frame']['f_code']['co_filename'],
|
||||
co_name=dct['tb_frame']['f_code']['co_name'],
|
||||
)
|
||||
frame = _AttrDict(
|
||||
f_globals=dct['tb_frame']['f_globals'],
|
||||
f_code=code,
|
||||
f_lineno=dct['tb_frame']['f_lineno'],
|
||||
)
|
||||
tb = _AttrDict(
|
||||
tb_frame=frame,
|
||||
tb_lineno=dct['tb_lineno'],
|
||||
tb_next=tb_next,
|
||||
)
|
||||
return cls(tb)
|
||||
|
||||
@classmethod
|
||||
def from_string(cls, string, strict=True):
|
||||
"""
|
||||
Creates an instance by parsing a stacktrace. Strict means that parsing stops when lines are not indented by at least two spaces
|
||||
anymore.
|
||||
"""
|
||||
frames = []
|
||||
header = strict
|
||||
|
||||
for line in string.splitlines():
|
||||
line = line.rstrip()
|
||||
if header:
|
||||
if line == 'Traceback (most recent call last):':
|
||||
header = False
|
||||
continue
|
||||
frame_match = FRAME_RE.match(line)
|
||||
if frame_match:
|
||||
frames.append(frame_match.groupdict())
|
||||
elif line.startswith(' '):
|
||||
pass
|
||||
elif strict:
|
||||
break # traceback ended
|
||||
|
||||
if frames:
|
||||
previous = None
|
||||
for frame in reversed(frames):
|
||||
previous = _AttrDict(
|
||||
frame,
|
||||
tb_frame=_AttrDict(
|
||||
frame,
|
||||
f_globals=_AttrDict(
|
||||
__file__=frame['co_filename'],
|
||||
__name__='?',
|
||||
),
|
||||
f_code=_AttrDict(frame),
|
||||
f_lineno=int(frame['tb_lineno']),
|
||||
),
|
||||
tb_next=previous,
|
||||
)
|
||||
return cls(previous)
|
||||
else:
|
||||
raise TracebackParseError('Could not find any frames in %r.' % string)
|
||||
|
||||
# pickling_support.py
|
||||
# gevent: Trying the dict support, so maybe we don't even need this
|
||||
# at all.
|
||||
|
||||
import sys
|
||||
from types import TracebackType
|
||||
#from . import Frame # gevent
|
||||
#from . import Traceback # gevent
|
||||
|
||||
# gevent: defer
|
||||
# if sys.version_info.major >= 3:
|
||||
# import copyreg
|
||||
# else:
|
||||
# import copy_reg as copyreg
|
||||
|
||||
|
||||
def unpickle_traceback(tb_frame, tb_lineno, tb_next):
|
||||
ret = object.__new__(Traceback)
|
||||
ret.tb_frame = tb_frame
|
||||
ret.tb_lineno = tb_lineno
|
||||
ret.tb_next = tb_next
|
||||
return ret.as_traceback()
|
||||
|
||||
|
||||
def pickle_traceback(tb):
|
||||
return unpickle_traceback, (Frame(tb.tb_frame), tb.tb_lineno, tb.tb_next and Traceback(tb.tb_next))
|
||||
|
||||
|
||||
def unpickle_exception(func, args, cause, tb):
|
||||
inst = func(*args)
|
||||
inst.__cause__ = cause
|
||||
inst.__traceback__ = tb
|
||||
return inst
|
||||
|
||||
|
||||
def pickle_exception(obj):
|
||||
# All exceptions, unlike generic Python objects, define __reduce_ex__
|
||||
# __reduce_ex__(4) should be no different from __reduce_ex__(3).
|
||||
# __reduce_ex__(5) could bring benefits in the unlikely case the exception
|
||||
# directly contains buffers, but PickleBuffer objects will cause a crash when
|
||||
# running on protocol=4, and there's no clean way to figure out the current
|
||||
# protocol from here. Note that any object returned by __reduce_ex__(3) will
|
||||
# still be pickled with protocol 5 if pickle.dump() is running with it.
|
||||
rv = obj.__reduce_ex__(3)
|
||||
if isinstance(rv, str):
|
||||
raise TypeError('str __reduce__ output is not supported')
|
||||
assert isinstance(rv, tuple)
|
||||
assert len(rv) >= 2
|
||||
|
||||
return (unpickle_exception, rv[:2] + (obj.__cause__, obj.__traceback__)) + rv[2:]
|
||||
|
||||
|
||||
def _get_subclasses(cls):
|
||||
# Depth-first traversal of all direct and indirect subclasses of cls
|
||||
to_visit = [cls]
|
||||
while to_visit:
|
||||
this = to_visit.pop()
|
||||
yield this
|
||||
to_visit += list(this.__subclasses__())
|
||||
|
||||
|
||||
def install(*exc_classes_or_instances):
|
||||
import copyreg
|
||||
copyreg.pickle(TracebackType, pickle_traceback)
|
||||
|
||||
if sys.version_info.major < 3:
|
||||
# Dummy decorator?
|
||||
if len(exc_classes_or_instances) == 1:
|
||||
exc = exc_classes_or_instances[0]
|
||||
if isinstance(exc, type) and issubclass(exc, BaseException):
|
||||
return exc
|
||||
return
|
||||
|
||||
if not exc_classes_or_instances:
|
||||
for exception_cls in _get_subclasses(BaseException):
|
||||
copyreg.pickle(exception_cls, pickle_exception)
|
||||
return
|
||||
|
||||
for exc in exc_classes_or_instances:
|
||||
if isinstance(exc, BaseException):
|
||||
while exc is not None:
|
||||
copyreg.pickle(type(exc), pickle_exception)
|
||||
exc = exc.__cause__
|
||||
elif isinstance(exc, type) and issubclass(exc, BaseException):
|
||||
copyreg.pickle(exc, pickle_exception)
|
||||
# Allow using @install as a decorator for Exception classes
|
||||
if len(exc_classes_or_instances) == 1:
|
||||
return exc
|
||||
else:
|
||||
raise TypeError('Expected subclasses or instances of BaseException, got %s' % (type(exc)))
|
||||
|
||||
# gevent API
|
||||
_installed = False
|
||||
def dump_traceback(tb):
|
||||
from pickle import dumps
|
||||
if tb is None:
|
||||
return dumps(None)
|
||||
tb = Traceback(tb)
|
||||
return dumps(tb.to_dict())
|
||||
|
||||
|
||||
def load_traceback(s):
|
||||
from pickle import loads
|
||||
as_dict = loads(s)
|
||||
if as_dict is None:
|
||||
return None
|
||||
tb = Traceback.from_dict(as_dict)
|
||||
return tb.as_traceback()
|
||||
234
venv3_12/Lib/site-packages/gevent/_threading.py
Normal file
234
venv3_12/Lib/site-packages/gevent/_threading.py
Normal file
@@ -0,0 +1,234 @@
|
||||
"""
|
||||
A small selection of primitives that always work with
|
||||
native threads. This has very limited utility and is
|
||||
targeted only for the use of gevent's threadpool.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from collections import deque
|
||||
|
||||
from gevent import monkey
|
||||
from gevent._compat import thread_mod_name
|
||||
|
||||
__all__ = [
|
||||
'Lock',
|
||||
'Queue',
|
||||
'EmptyTimeout',
|
||||
]
|
||||
|
||||
|
||||
start_new_thread, Lock, get_thread_ident, = monkey.get_original(thread_mod_name, [
|
||||
'start_new_thread', 'allocate_lock', 'get_ident',
|
||||
])
|
||||
|
||||
|
||||
# We want to support timeouts on locks. In this way, we can allow idle threads to
|
||||
# expire from a thread pool. On Python 3, this is native behaviour; on Python 2,
|
||||
# we have to emulate it. For Python 3, we want this to have the lowest possible overhead,
|
||||
# so we'd prefer to use a direct call, rather than go through a wrapper. But we also
|
||||
# don't want to allocate locks at import time because..., so we swizzle out the method
|
||||
# at runtime.
|
||||
#
|
||||
#
|
||||
# In all cases, a timeout value of -1 means "infinite". Sigh.
|
||||
def acquire_with_timeout(lock, timeout=-1):
|
||||
globals()['acquire_with_timeout'] = type(lock).acquire
|
||||
return lock.acquire(timeout=timeout)
|
||||
|
||||
|
||||
class _Condition(object):
|
||||
# We could use libuv's ``uv_cond_wait`` to implement this whole
|
||||
# class and get native timeouts and native performance everywhere.
|
||||
|
||||
# pylint:disable=method-hidden
|
||||
|
||||
__slots__ = (
|
||||
'_lock',
|
||||
'_waiters',
|
||||
)
|
||||
|
||||
def __init__(self, lock):
|
||||
# This lock is used to protect our own data structures;
|
||||
# calls to ``wait`` and ``notify_one`` *must* be holding this
|
||||
# lock.
|
||||
self._lock = lock
|
||||
self._waiters = []
|
||||
|
||||
# No need to special case for _release_save and
|
||||
# _acquire_restore; those are only used for RLock, and
|
||||
# we don't use those.
|
||||
|
||||
def __enter__(self):
|
||||
return self._lock.__enter__()
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
return self._lock.__exit__(t, v, tb)
|
||||
|
||||
def __repr__(self):
|
||||
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
|
||||
|
||||
def wait(self, wait_lock, timeout=-1, _wait_for_notify=acquire_with_timeout):
|
||||
# This variable is for the monitoring utils to know that
|
||||
# this is an idle frame and shouldn't be counted.
|
||||
gevent_threadpool_worker_idle = True # pylint:disable=unused-variable
|
||||
|
||||
# The _lock must be held.
|
||||
# The ``wait_lock`` must be *un*owned, so the timeout doesn't apply there.
|
||||
# Take that lock now.
|
||||
wait_lock.acquire()
|
||||
self._waiters.append(wait_lock)
|
||||
|
||||
self._lock.release()
|
||||
try:
|
||||
# We're already holding this native lock, so when we try to acquire it again,
|
||||
# that won't work and we'll block until someone calls notify_one() (which might
|
||||
# have already happened).
|
||||
notified = _wait_for_notify(wait_lock, timeout)
|
||||
finally:
|
||||
self._lock.acquire()
|
||||
|
||||
# Now that we've acquired _lock again, no one can call notify_one(), or this
|
||||
# method.
|
||||
if not notified:
|
||||
# We need to come out of the waiters list. IF we're still there; it's
|
||||
# possible that between the call to _acquire() returning False,
|
||||
# and the time that we acquired _lock, someone did a ``notify_one``
|
||||
# and released the lock. For that reason, do a non-blocking acquire()
|
||||
notified = wait_lock.acquire(False)
|
||||
if not notified:
|
||||
# Well narf. No go. We must stil be in the waiters list, so take us out
|
||||
self._waiters.remove(wait_lock)
|
||||
# We didn't get notified, but we're still holding a lock that we
|
||||
# need to release.
|
||||
wait_lock.release()
|
||||
else:
|
||||
# We got notified, so we need to reset.
|
||||
wait_lock.release()
|
||||
return notified
|
||||
|
||||
def notify_one(self):
|
||||
# The lock SHOULD be owned, but we don't check that.
|
||||
try:
|
||||
waiter = self._waiters.pop()
|
||||
except IndexError:
|
||||
# Nobody around
|
||||
pass
|
||||
else:
|
||||
# The owner of the ``waiter`` is blocked on
|
||||
# acquiring it again, so when we ``release`` it, it
|
||||
# is free to be scheduled and resume.
|
||||
waiter.release()
|
||||
|
||||
class EmptyTimeout(Exception):
|
||||
"""Raised from :meth:`Queue.get` if no item is available in the timeout."""
|
||||
|
||||
|
||||
class Queue(object):
|
||||
"""
|
||||
Create a queue object.
|
||||
|
||||
The queue is always infinite size.
|
||||
"""
|
||||
|
||||
__slots__ = ('_queue', '_mutex', '_not_empty', 'unfinished_tasks')
|
||||
|
||||
def __init__(self):
|
||||
self._queue = deque()
|
||||
# mutex must be held whenever the queue is mutating. All methods
|
||||
# that acquire mutex must release it before returning. mutex
|
||||
# is shared between the three conditions, so acquiring and
|
||||
# releasing the conditions also acquires and releases mutex.
|
||||
self._mutex = Lock()
|
||||
# Notify not_empty whenever an item is added to the queue; a
|
||||
# thread waiting to get is notified then.
|
||||
self._not_empty = _Condition(self._mutex)
|
||||
|
||||
self.unfinished_tasks = 0
|
||||
|
||||
def task_done(self):
|
||||
"""Indicate that a formerly enqueued task is complete.
|
||||
|
||||
Used by Queue consumer threads. For each get() used to fetch a task,
|
||||
a subsequent call to task_done() tells the queue that the processing
|
||||
on the task is complete.
|
||||
|
||||
If a join() is currently blocking, it will resume when all items
|
||||
have been processed (meaning that a task_done() call was received
|
||||
for every item that had been put() into the queue).
|
||||
|
||||
Raises a ValueError if called more times than there were items
|
||||
placed in the queue.
|
||||
"""
|
||||
with self._mutex:
|
||||
unfinished = self.unfinished_tasks - 1
|
||||
if unfinished <= 0:
|
||||
if unfinished < 0:
|
||||
raise ValueError(
|
||||
'task_done() called too many times; %s remaining tasks' % (
|
||||
self.unfinished_tasks
|
||||
)
|
||||
)
|
||||
self.unfinished_tasks = unfinished
|
||||
|
||||
def qsize(self, len=len):
|
||||
"""Return the approximate size of the queue (not reliable!)."""
|
||||
return len(self._queue)
|
||||
|
||||
def empty(self):
|
||||
"""Return True if the queue is empty, False otherwise (not reliable!)."""
|
||||
return not self.qsize()
|
||||
|
||||
def full(self):
|
||||
"""Return True if the queue is full, False otherwise (not reliable!)."""
|
||||
return False
|
||||
|
||||
def put(self, item):
|
||||
"""Put an item into the queue.
|
||||
"""
|
||||
with self._mutex:
|
||||
self._queue.append(item)
|
||||
self.unfinished_tasks += 1
|
||||
self._not_empty.notify_one()
|
||||
|
||||
def get(self, cookie, timeout=-1):
|
||||
"""
|
||||
Remove and return an item from the queue.
|
||||
|
||||
If *timeout* is given, and is not -1, then we will
|
||||
attempt to wait for only that many seconds to get an item.
|
||||
If those seconds elapse and no item has become available,
|
||||
raises :class:`EmptyTimeout`.
|
||||
"""
|
||||
with self._mutex:
|
||||
while not self._queue:
|
||||
# Temporarily release our mutex and wait for someone
|
||||
# to wake us up. There *should* be an item in the queue
|
||||
# after that.
|
||||
notified = self._not_empty.wait(cookie, timeout)
|
||||
# Ok, we're holding the mutex again, so our state is guaranteed stable.
|
||||
# It's possible that in the brief window where we didn't hold the lock,
|
||||
# someone put something in the queue, and if so, we can take it.
|
||||
if not notified and not self._queue:
|
||||
raise EmptyTimeout
|
||||
item = self._queue.popleft()
|
||||
return item
|
||||
|
||||
def allocate_cookie(self):
|
||||
"""
|
||||
Create and return the *cookie* to pass to `get()`.
|
||||
|
||||
Each thread that will use `get` needs a distinct cookie.
|
||||
"""
|
||||
return Lock()
|
||||
|
||||
def kill(self):
|
||||
"""
|
||||
Call to destroy this object.
|
||||
|
||||
Use this when it's not possible to safely drain the queue, e.g.,
|
||||
after a fork when the locks are in an uncertain state.
|
||||
"""
|
||||
self._queue = None
|
||||
self._mutex = None
|
||||
self._not_empty = None
|
||||
self.unfinished_tasks = None
|
||||
182
venv3_12/Lib/site-packages/gevent/_tracer.py
Normal file
182
venv3_12/Lib/site-packages/gevent/_tracer.py
Normal file
@@ -0,0 +1,182 @@
|
||||
# Copyright (c) 2018 gevent. See LICENSE for details.
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
from __future__ import print_function, absolute_import, division
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from greenlet import settrace
|
||||
from greenlet import getcurrent
|
||||
|
||||
from gevent.util import format_run_info
|
||||
|
||||
from gevent._compat import perf_counter
|
||||
from gevent._util import gmctime
|
||||
|
||||
|
||||
__all__ = [
|
||||
'GreenletTracer',
|
||||
'HubSwitchTracer',
|
||||
'MaxSwitchTracer',
|
||||
]
|
||||
|
||||
# Recall these classes are cython compiled, so
|
||||
# class variable declarations are bad.
|
||||
|
||||
|
||||
class GreenletTracer(object):
|
||||
def __init__(self):
|
||||
# A counter, incremented by the greenlet trace function
|
||||
# we install on every greenlet switch. This is reset when the
|
||||
# periodic monitoring thread runs.
|
||||
|
||||
self.greenlet_switch_counter = 0
|
||||
|
||||
# The greenlet last switched to.
|
||||
self.active_greenlet = None
|
||||
|
||||
# The trace function that was previously installed,
|
||||
# if any.
|
||||
# NOTE: Calling a class instance is cheaper than
|
||||
# calling a bound method (at least when compiled with cython)
|
||||
# even when it redirects to another function.
|
||||
prev_trace = settrace(self)
|
||||
|
||||
self.previous_trace_function = prev_trace
|
||||
|
||||
self._killed = False
|
||||
|
||||
def kill(self):
|
||||
# Must be called in the monitored thread.
|
||||
if not self._killed:
|
||||
self._killed = True
|
||||
settrace(self.previous_trace_function)
|
||||
self.previous_trace_function = None
|
||||
|
||||
def _trace(self, event, args):
|
||||
# This function runs in the thread we are monitoring.
|
||||
self.greenlet_switch_counter += 1
|
||||
if event in ('switch', 'throw'):
|
||||
# args is (origin, target). This is the only defined
|
||||
# case
|
||||
self.active_greenlet = args[1]
|
||||
else:
|
||||
self.active_greenlet = None
|
||||
if self.previous_trace_function is not None:
|
||||
self.previous_trace_function(event, args)
|
||||
|
||||
def __call__(self, event, args):
|
||||
return self._trace(event, args)
|
||||
|
||||
def did_block_hub(self, hub):
|
||||
# Check to see if we have blocked since the last call to this
|
||||
# method. Returns a true value if we blocked (not in the hub),
|
||||
# a false value if everything is fine.
|
||||
|
||||
# This may be called in the same thread being traced or a
|
||||
# different thread; if a different thread, there is a race
|
||||
# condition with this being incremented in the thread we're
|
||||
# monitoring, but probably not often enough to lead to
|
||||
# annoying false positives.
|
||||
|
||||
active_greenlet = self.active_greenlet
|
||||
did_switch = self.greenlet_switch_counter != 0
|
||||
self.greenlet_switch_counter = 0
|
||||
|
||||
if did_switch or active_greenlet is None or active_greenlet is hub:
|
||||
# Either we switched, or nothing is running (we got a
|
||||
# trace event we don't know about or were requested to
|
||||
# ignore), or we spent the whole time in the hub, blocked
|
||||
# for IO. Nothing to report.
|
||||
return False
|
||||
return True, active_greenlet
|
||||
|
||||
def ignore_current_greenlet_blocking(self):
|
||||
# Don't pay attention to the current greenlet.
|
||||
self.active_greenlet = None
|
||||
|
||||
def monitor_current_greenlet_blocking(self):
|
||||
self.active_greenlet = getcurrent()
|
||||
|
||||
def did_block_hub_report(self, hub, active_greenlet, format_kwargs):
|
||||
# XXX: On Python 2 with greenlet 1.0a1, '%s' formatting a greenlet
|
||||
# results in a unicode object. This is a bug in greenlet, I think.
|
||||
# https://github.com/python-greenlet/greenlet/issues/218
|
||||
report = ['=' * 80,
|
||||
'\n%s : Greenlet %s appears to be blocked' %
|
||||
(gmctime(), str(active_greenlet))]
|
||||
report.append(" Reported by %s" % (self,))
|
||||
try:
|
||||
frame = sys._current_frames()[hub.thread_ident]
|
||||
except KeyError:
|
||||
# The thread holding the hub has died. Perhaps we shouldn't
|
||||
# even report this?
|
||||
stack = ["Unknown: No thread found for hub %r\n" % (hub,)]
|
||||
else:
|
||||
stack = traceback.format_stack(frame)
|
||||
report.append('Blocked Stack (for thread id %s):' % (hex(hub.thread_ident),))
|
||||
report.append(''.join(stack))
|
||||
report.append("Info:")
|
||||
report.extend(format_run_info(**format_kwargs))
|
||||
|
||||
return report
|
||||
|
||||
|
||||
class _HubTracer(GreenletTracer):
|
||||
def __init__(self, hub, max_blocking_time):
|
||||
GreenletTracer.__init__(self)
|
||||
self.max_blocking_time = max_blocking_time
|
||||
self.hub = hub
|
||||
|
||||
def kill(self):
|
||||
self.hub = None
|
||||
GreenletTracer.kill(self)
|
||||
|
||||
|
||||
class HubSwitchTracer(_HubTracer):
|
||||
# A greenlet tracer that records the last time we switched *into* the hub.
|
||||
|
||||
def __init__(self, hub, max_blocking_time):
|
||||
_HubTracer.__init__(self, hub, max_blocking_time)
|
||||
self.last_entered_hub = 0
|
||||
|
||||
def _trace(self, event, args):
|
||||
GreenletTracer._trace(self, event, args)
|
||||
if self.active_greenlet is self.hub:
|
||||
self.last_entered_hub = perf_counter()
|
||||
|
||||
def did_block_hub(self, hub):
|
||||
if perf_counter() - self.last_entered_hub > self.max_blocking_time:
|
||||
return True, self.active_greenlet
|
||||
|
||||
|
||||
class MaxSwitchTracer(_HubTracer):
|
||||
# A greenlet tracer that records the maximum time between switches,
|
||||
# not including time spent in the hub.
|
||||
|
||||
def __init__(self, hub, max_blocking_time):
|
||||
_HubTracer.__init__(self, hub, max_blocking_time)
|
||||
self.last_switch = perf_counter()
|
||||
self.max_blocking = 0
|
||||
|
||||
def _trace(self, event, args):
|
||||
old_active = self.active_greenlet
|
||||
GreenletTracer._trace(self, event, args)
|
||||
if old_active is not self.hub and old_active is not None:
|
||||
# If we're switching out of the hub, the blocking
|
||||
# time doesn't count.
|
||||
switched_at = perf_counter()
|
||||
self.max_blocking = max(self.max_blocking,
|
||||
switched_at - self.last_switch)
|
||||
|
||||
def did_block_hub(self, hub):
|
||||
if self.max_blocking == 0:
|
||||
# We never switched. Check the time now
|
||||
self.max_blocking = perf_counter() - self.last_switch
|
||||
|
||||
if self.max_blocking > self.max_blocking_time:
|
||||
return True, self.active_greenlet
|
||||
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__tracer')
|
||||
351
venv3_12/Lib/site-packages/gevent/_util.py
Normal file
351
venv3_12/Lib/site-packages/gevent/_util.py
Normal file
@@ -0,0 +1,351 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
internal gevent utilities, not for external use.
|
||||
"""
|
||||
|
||||
# Be very careful not to import anything that would cause issues with
|
||||
# monkey-patching.
|
||||
|
||||
from __future__ import print_function, absolute_import, division
|
||||
|
||||
from gevent._compat import iteritems
|
||||
|
||||
|
||||
class _NONE(object):
|
||||
"""
|
||||
A special object you must never pass to any gevent API.
|
||||
Used as a marker object for keyword arguments that cannot have the
|
||||
builtin None (because that might be a valid value).
|
||||
"""
|
||||
__slots__ = ()
|
||||
|
||||
def __repr__(self):
|
||||
return '<default value>'
|
||||
|
||||
_NONE = _NONE()
|
||||
|
||||
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
|
||||
'__annotations__')
|
||||
WRAPPER_UPDATES = ('__dict__',)
|
||||
def update_wrapper(wrapper,
|
||||
wrapped,
|
||||
assigned=WRAPPER_ASSIGNMENTS,
|
||||
updated=WRAPPER_UPDATES):
|
||||
"""
|
||||
Based on code from the standard library ``functools``, but
|
||||
doesn't perform any of the troublesome imports.
|
||||
|
||||
functools imports RLock from _thread for purposes of the
|
||||
``lru_cache``, making it problematic to use from gevent.
|
||||
|
||||
The other imports are somewhat heavy: abc, collections, types.
|
||||
"""
|
||||
for attr in assigned:
|
||||
try:
|
||||
value = getattr(wrapped, attr)
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
setattr(wrapper, attr, value)
|
||||
for attr in updated:
|
||||
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
|
||||
# Issue #17482: set __wrapped__ last so we don't inadvertently copy it
|
||||
# from the wrapped function when updating __dict__
|
||||
wrapper.__wrapped__ = wrapped
|
||||
# Return the wrapper so this can be used as a decorator via partial()
|
||||
return wrapper
|
||||
|
||||
|
||||
def copy_globals(source,
|
||||
globs,
|
||||
only_names=None,
|
||||
ignore_missing_names=False,
|
||||
names_to_ignore=(),
|
||||
dunder_names_to_keep=('__implements__', '__all__', '__imports__'),
|
||||
cleanup_globs=True):
|
||||
"""
|
||||
Copy attributes defined in ``source.__dict__`` to the dictionary
|
||||
in globs (which should be the caller's :func:`globals`).
|
||||
|
||||
Names that start with ``__`` are ignored (unless they are in
|
||||
*dunder_names_to_keep*). Anything found in *names_to_ignore* is
|
||||
also ignored.
|
||||
|
||||
If *only_names* is given, only those attributes will be
|
||||
considered. In this case, *ignore_missing_names* says whether or
|
||||
not to raise an :exc:`AttributeError` if one of those names can't
|
||||
be found.
|
||||
|
||||
If *cleanup_globs* has a true value, then common things imported but
|
||||
not used at runtime are removed, including this function.
|
||||
|
||||
Returns a list of the names copied; this should be assigned to ``__imports__``.
|
||||
"""
|
||||
if only_names:
|
||||
if ignore_missing_names:
|
||||
items = ((k, getattr(source, k, _NONE)) for k in only_names)
|
||||
else:
|
||||
items = ((k, getattr(source, k)) for k in only_names)
|
||||
else:
|
||||
items = iteritems(source.__dict__)
|
||||
|
||||
copied = []
|
||||
for key, value in items:
|
||||
if value is _NONE:
|
||||
continue
|
||||
if key in names_to_ignore:
|
||||
continue
|
||||
if key.startswith("__") and key not in dunder_names_to_keep:
|
||||
continue
|
||||
globs[key] = value
|
||||
copied.append(key)
|
||||
|
||||
if cleanup_globs:
|
||||
if 'copy_globals' in globs:
|
||||
del globs['copy_globals']
|
||||
|
||||
return copied
|
||||
|
||||
def import_c_accel(globs, cname):
|
||||
"""
|
||||
Import the C-accelerator for the *cname*
|
||||
and copy its globals.
|
||||
|
||||
The *cname* should be hardcoded to match the expected
|
||||
C accelerator module.
|
||||
|
||||
Unless PURE_PYTHON is set (in the environment or automatically
|
||||
on PyPy), then the C-accelerator is required.
|
||||
"""
|
||||
if not cname.startswith('gevent._gevent_c'):
|
||||
# Old module code that hasn't been updated yet.
|
||||
cname = cname.replace('gevent._',
|
||||
'gevent._gevent_c')
|
||||
|
||||
name = globs.get('__name__')
|
||||
|
||||
if not name or name == cname:
|
||||
# Do nothing if we're being exec'd as a file (no name)
|
||||
# or we're running from the C extension
|
||||
return
|
||||
|
||||
|
||||
from gevent._compat import PURE_PYTHON
|
||||
if PURE_PYTHON:
|
||||
return
|
||||
|
||||
import importlib
|
||||
import warnings
|
||||
with warnings.catch_warnings():
|
||||
# Python 3.7 likes to produce
|
||||
# "ImportWarning: can't resolve
|
||||
# package from __spec__ or __package__, falling back on
|
||||
# __name__ and __path__"
|
||||
# when we load cython compiled files. This is probably a bug in
|
||||
# Cython, but it doesn't seem to have any consequences, it's
|
||||
# just annoying to see and can mess up our unittests.
|
||||
warnings.simplefilter('ignore', ImportWarning)
|
||||
mod = importlib.import_module(cname)
|
||||
|
||||
# By adopting the entire __dict__, we get a more accurate
|
||||
# __file__ and module repr, plus we don't leak any imported
|
||||
# things we no longer need.
|
||||
globs.clear()
|
||||
globs.update(mod.__dict__)
|
||||
|
||||
if 'import_c_accel' in globs:
|
||||
del globs['import_c_accel']
|
||||
|
||||
|
||||
class Lazy(object):
|
||||
"""
|
||||
A non-data descriptor used just like @property. The
|
||||
difference is the function value is assigned to the instance
|
||||
dict the first time it is accessed and then the function is never
|
||||
called again.
|
||||
|
||||
Contrast with `readproperty`.
|
||||
"""
|
||||
def __init__(self, func):
|
||||
self.data = (func, func.__name__)
|
||||
update_wrapper(self, func)
|
||||
|
||||
def __get__(self, inst, class_):
|
||||
if inst is None:
|
||||
return self
|
||||
|
||||
func, name = self.data
|
||||
value = func(inst)
|
||||
inst.__dict__[name] = value
|
||||
return value
|
||||
|
||||
class readproperty(object):
|
||||
"""
|
||||
A non-data descriptor similar to :class:`property`.
|
||||
|
||||
The difference is that the property can be assigned to directly,
|
||||
without invoking a setter function. When the property is assigned
|
||||
to, it is cached in the instance and the function is not called on
|
||||
that instance again.
|
||||
|
||||
Contrast with `Lazy`, which caches the result of the function in the
|
||||
instance the first time it is called and never calls the function on that
|
||||
instance again.
|
||||
"""
|
||||
|
||||
def __init__(self, func):
|
||||
self.func = func
|
||||
update_wrapper(self, func)
|
||||
|
||||
def __get__(self, inst, class_):
|
||||
if inst is None:
|
||||
return self
|
||||
|
||||
return self.func(inst)
|
||||
|
||||
class LazyOnClass(object):
|
||||
"""
|
||||
Similar to `Lazy`, but stores the value in the class.
|
||||
|
||||
This is useful when the getter is expensive and conceptually
|
||||
a shared class value, but we don't want import-time side-effects
|
||||
such as expensive imports because it may not always be used.
|
||||
|
||||
Probably doesn't mix well with inheritance?
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def lazy(cls, cls_dict, func):
|
||||
"Put a LazyOnClass object in *cls_dict* with the same name as *func*"
|
||||
cls_dict[func.__name__] = cls(func)
|
||||
|
||||
def __init__(self, func, name=None):
|
||||
self.name = name or func.__name__
|
||||
self.func = func
|
||||
|
||||
def __get__(self, inst, klass):
|
||||
if inst is None: # pragma: no cover
|
||||
return self
|
||||
|
||||
val = self.func(inst)
|
||||
setattr(klass, self.name, val)
|
||||
return val
|
||||
|
||||
|
||||
def gmctime():
|
||||
"""
|
||||
Returns the current time as a string in RFC3339 format.
|
||||
"""
|
||||
import time
|
||||
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
|
||||
|
||||
|
||||
###
|
||||
# Release automation.
|
||||
#
|
||||
# Most of this is to integrate zest.releaser with towncrier. There is
|
||||
# a plugin package that can do the same:
|
||||
# https://github.com/collective/zestreleaser.towncrier
|
||||
###
|
||||
|
||||
def prereleaser_middle(data): # pragma: no cover
|
||||
"""
|
||||
zest.releaser prerelease middle hook for gevent.
|
||||
|
||||
The prerelease step:
|
||||
|
||||
asks you for a version number
|
||||
updates the setup.py or version.txt and the
|
||||
CHANGES/HISTORY/CHANGELOG file (with either
|
||||
this new version
|
||||
number and offers to commit those changes to git
|
||||
|
||||
The middle hook:
|
||||
|
||||
All data dictionary items are available and some questions
|
||||
(like new version number) have been asked.
|
||||
No filesystem changes have been made yet.
|
||||
|
||||
It is our job to finish up the filesystem changes needed, including:
|
||||
|
||||
- Calling towncrier to handle CHANGES.rst
|
||||
- Add the version number to ``versionadded``, ``versionchanged`` and
|
||||
``deprecated`` directives in Python source.
|
||||
"""
|
||||
if data['name'] != 'gevent':
|
||||
# We are specified in ``setup.cfg``, not ``setup.py``, so we do not
|
||||
# come into play for other projects, only this one. We shouldn't
|
||||
# need this check, but there it is.
|
||||
return
|
||||
|
||||
import re
|
||||
import os
|
||||
import subprocess
|
||||
from gevent.testing import modules
|
||||
|
||||
new_version = data['new_version']
|
||||
|
||||
# Generate CHANGES.rst, remove old news entries.
|
||||
subprocess.check_call([
|
||||
'towncrier',
|
||||
'build',
|
||||
'--version', data['new_version'],
|
||||
'--yes'
|
||||
])
|
||||
|
||||
data['update_history'] = False # Because towncrier already did.
|
||||
|
||||
# But unstage it; we want it to show in the diff zest.releaser will do
|
||||
subprocess.check_call([
|
||||
'git',
|
||||
'restore',
|
||||
'--staged',
|
||||
'CHANGES.rst',
|
||||
])
|
||||
|
||||
# Put the version number in source files.
|
||||
regex = re.compile(b'.. (versionchanged|versionadded|deprecated):: NEXT')
|
||||
if not isinstance(new_version, bytes):
|
||||
new_version_bytes = new_version.encode('ascii')
|
||||
else:
|
||||
new_version_bytes = new_version
|
||||
new_version_bytes = new_version.encode('ascii')
|
||||
replacement = br'.. \1:: %s' % (new_version_bytes,)
|
||||
# TODO: This should also look in the docs/ directory at
|
||||
# *.rst
|
||||
for path, _ in modules.walk_modules(
|
||||
# Start here
|
||||
basedir=os.path.join(data['reporoot'], 'src', 'gevent'),
|
||||
# Include sub-dirs
|
||||
recursive=True,
|
||||
# Include tests
|
||||
include_tests=True,
|
||||
# and other things usually excluded
|
||||
excluded_modules=(),
|
||||
# Don't return build binaries
|
||||
include_so=False,
|
||||
# Don't try to import things; we want all files.
|
||||
check_optional=False,
|
||||
):
|
||||
with open(path, 'rb') as f:
|
||||
contents = f.read()
|
||||
new_contents, count = regex.subn(replacement, contents)
|
||||
if count:
|
||||
print("Replaced version NEXT in", path)
|
||||
with open(path, 'wb') as f:
|
||||
f.write(new_contents)
|
||||
|
||||
def postreleaser_before(data): # pragma: no cover
|
||||
"""
|
||||
Prevents zest.releaser from modifying the CHANGES.rst to add the
|
||||
'no changes yet' section; towncrier is in charge of CHANGES.rst.
|
||||
|
||||
Needs zest.releaser 6.15.0.
|
||||
"""
|
||||
if data['name'] != 'gevent':
|
||||
# We are specified in ``setup.cfg``, not ``setup.py``, so we do not
|
||||
# come into play for other projects, only this one. We shouldn't
|
||||
# need this check, but there it is.
|
||||
return
|
||||
|
||||
data['update_history'] = False
|
||||
207
venv3_12/Lib/site-packages/gevent/_waiter.py
Normal file
207
venv3_12/Lib/site-packages/gevent/_waiter.py
Normal file
@@ -0,0 +1,207 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# copyright 2018 gevent
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
"""
|
||||
Low-level waiting primitives.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
|
||||
from gevent._hub_local import get_hub_noargs as get_hub
|
||||
from gevent.exceptions import ConcurrentObjectUseError
|
||||
|
||||
__all__ = [
|
||||
'Waiter',
|
||||
]
|
||||
|
||||
_NONE = object()
|
||||
|
||||
locals()['getcurrent'] = __import__('greenlet').getcurrent
|
||||
locals()['greenlet_init'] = lambda: None
|
||||
|
||||
|
||||
class Waiter(object):
|
||||
"""
|
||||
A low level communication utility for greenlets.
|
||||
|
||||
Waiter is a wrapper around greenlet's ``switch()`` and ``throw()`` calls that makes them somewhat safer:
|
||||
|
||||
* switching will occur only if the waiting greenlet is executing :meth:`get` method currently;
|
||||
* any error raised in the greenlet is handled inside :meth:`switch` and :meth:`throw`
|
||||
* if :meth:`switch`/:meth:`throw` is called before the receiver calls :meth:`get`, then :class:`Waiter`
|
||||
will store the value/exception. The following :meth:`get` will return the value/raise the exception.
|
||||
|
||||
The :meth:`switch` and :meth:`throw` methods must only be called from the :class:`Hub` greenlet.
|
||||
The :meth:`get` method must be called from a greenlet other than :class:`Hub`.
|
||||
|
||||
>>> from gevent.hub import Waiter
|
||||
>>> from gevent import get_hub
|
||||
>>> result = Waiter()
|
||||
>>> timer = get_hub().loop.timer(0.1)
|
||||
>>> timer.start(result.switch, 'hello from Waiter')
|
||||
>>> result.get() # blocks for 0.1 seconds
|
||||
'hello from Waiter'
|
||||
>>> timer.close()
|
||||
|
||||
If switch is called before the greenlet gets a chance to call :meth:`get` then
|
||||
:class:`Waiter` stores the value.
|
||||
|
||||
>>> from gevent.time import sleep
|
||||
>>> result = Waiter()
|
||||
>>> timer = get_hub().loop.timer(0.1)
|
||||
>>> timer.start(result.switch, 'hi from Waiter')
|
||||
>>> sleep(0.2)
|
||||
>>> result.get() # returns immediately without blocking
|
||||
'hi from Waiter'
|
||||
>>> timer.close()
|
||||
|
||||
.. warning::
|
||||
|
||||
This is a limited and dangerous way to communicate between
|
||||
greenlets. It can easily leave a greenlet unscheduled forever
|
||||
if used incorrectly. Consider using safer classes such as
|
||||
:class:`gevent.event.Event`, :class:`gevent.event.AsyncResult`,
|
||||
or :class:`gevent.queue.Queue`.
|
||||
"""
|
||||
|
||||
__slots__ = ['hub', 'greenlet', 'value', '_exception']
|
||||
|
||||
def __init__(self, hub=None):
|
||||
self.hub = get_hub() if hub is None else hub
|
||||
self.greenlet = None
|
||||
self.value = None
|
||||
self._exception = _NONE
|
||||
|
||||
def clear(self):
|
||||
self.greenlet = None
|
||||
self.value = None
|
||||
self._exception = _NONE
|
||||
|
||||
def __str__(self):
|
||||
if self._exception is _NONE:
|
||||
return '<%s greenlet=%s>' % (type(self).__name__, self.greenlet)
|
||||
if self._exception is None:
|
||||
return '<%s greenlet=%s value=%r>' % (type(self).__name__, self.greenlet, self.value)
|
||||
return '<%s greenlet=%s exc_info=%r>' % (type(self).__name__, self.greenlet, self.exc_info)
|
||||
|
||||
def ready(self):
|
||||
"""Return true if and only if it holds a value or an exception"""
|
||||
return self._exception is not _NONE
|
||||
|
||||
def successful(self):
|
||||
"""Return true if and only if it is ready and holds a value"""
|
||||
return self._exception is None
|
||||
|
||||
@property
|
||||
def exc_info(self):
|
||||
"Holds the exception info passed to :meth:`throw` if :meth:`throw` was called. Otherwise ``None``."
|
||||
if self._exception is not _NONE:
|
||||
return self._exception
|
||||
|
||||
def switch(self, value):
|
||||
"""
|
||||
Switch to the greenlet if one's available. Otherwise store the
|
||||
*value*.
|
||||
|
||||
.. versionchanged:: 1.3b1
|
||||
The *value* is no longer optional.
|
||||
"""
|
||||
greenlet = self.greenlet
|
||||
if greenlet is None:
|
||||
self.value = value
|
||||
self._exception = None
|
||||
else:
|
||||
if getcurrent() is not self.hub: # pylint:disable=undefined-variable
|
||||
raise AssertionError("Can only use Waiter.switch method from the Hub greenlet")
|
||||
switch = greenlet.switch
|
||||
try:
|
||||
switch(value)
|
||||
except: # pylint:disable=bare-except
|
||||
self.hub.handle_error(switch, *sys.exc_info())
|
||||
|
||||
def switch_args(self, *args):
|
||||
return self.switch(args)
|
||||
|
||||
def throw(self, *throw_args):
|
||||
"""Switch to the greenlet with the exception. If there's no greenlet, store the exception."""
|
||||
greenlet = self.greenlet
|
||||
if greenlet is None:
|
||||
self._exception = throw_args
|
||||
else:
|
||||
if getcurrent() is not self.hub: # pylint:disable=undefined-variable
|
||||
raise AssertionError("Can only use Waiter.switch method from the Hub greenlet")
|
||||
throw = greenlet.throw
|
||||
try:
|
||||
throw(*throw_args)
|
||||
except: # pylint:disable=bare-except
|
||||
self.hub.handle_error(throw, *sys.exc_info())
|
||||
|
||||
def get(self):
|
||||
"""If a value/an exception is stored, return/raise it. Otherwise until switch() or throw() is called."""
|
||||
if self._exception is not _NONE:
|
||||
if self._exception is None:
|
||||
return self.value
|
||||
getcurrent().throw(*self._exception) # pylint:disable=undefined-variable
|
||||
else:
|
||||
if self.greenlet is not None:
|
||||
raise ConcurrentObjectUseError('This Waiter is already used by %r' % (self.greenlet, ))
|
||||
self.greenlet = getcurrent() # pylint:disable=undefined-variable
|
||||
try:
|
||||
return self.hub.switch()
|
||||
finally:
|
||||
self.greenlet = None
|
||||
|
||||
def __call__(self, source):
|
||||
if source.exception is None:
|
||||
self.switch(source.value)
|
||||
else:
|
||||
self.throw(source.exception)
|
||||
|
||||
# can also have a debugging version, that wraps the value in a tuple (self, value) in switch()
|
||||
# and unwraps it in wait() thus checking that switch() was indeed called
|
||||
|
||||
|
||||
|
||||
class MultipleWaiter(Waiter):
|
||||
"""
|
||||
An internal extension of Waiter that can be used if multiple objects
|
||||
must be waited on, and there is a chance that in between waits greenlets
|
||||
might be switched out. All greenlets that switch to this waiter
|
||||
will have their value returned.
|
||||
|
||||
This does not handle exceptions or throw methods.
|
||||
"""
|
||||
__slots__ = ['_values']
|
||||
|
||||
def __init__(self, hub=None):
|
||||
Waiter.__init__(self, hub)
|
||||
# we typically expect a relatively small number of these to be outstanding.
|
||||
# since we pop from the left, a deque might be slightly
|
||||
# more efficient, but since we're in the hub we avoid imports if
|
||||
# we can help it to better support monkey-patching, and delaying the import
|
||||
# here can be impractical (see https://github.com/gevent/gevent/issues/652)
|
||||
self._values = []
|
||||
|
||||
def switch(self, value):
|
||||
self._values.append(value)
|
||||
Waiter.switch(self, True)
|
||||
|
||||
def get(self):
|
||||
if not self._values:
|
||||
Waiter.get(self)
|
||||
Waiter.clear(self)
|
||||
|
||||
return self._values.pop(0)
|
||||
|
||||
def _init():
|
||||
greenlet_init() # pylint:disable=undefined-variable
|
||||
|
||||
_init()
|
||||
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__waiter')
|
||||
10
venv3_12/Lib/site-packages/gevent/ares.py
Normal file
10
venv3_12/Lib/site-packages/gevent/ares.py
Normal file
@@ -0,0 +1,10 @@
|
||||
"""Backwards compatibility alias for :mod:`gevent.resolver.cares`.
|
||||
|
||||
.. deprecated:: 1.3
|
||||
Use :mod:`gevent.resolver.cares`
|
||||
"""
|
||||
# pylint:disable=no-name-in-module,import-error
|
||||
from gevent.resolver.cares import * # pylint:disable=wildcard-import,unused-wildcard-import,
|
||||
import gevent.resolver.cares as _cares
|
||||
__all__ = _cares.__all__ # pylint:disable=c-extension-no-member
|
||||
del _cares
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user