diff -pruN 0.36.1-12/.github/workflows/docs.yaml 0.39.0-0ubuntu1/.github/workflows/docs.yaml
--- 0.36.1-12/.github/workflows/docs.yaml	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/.github/workflows/docs.yaml	2020-02-02 00:00:00.000000000 +0000
@@ -11,9 +11,9 @@ jobs:
     runs-on: ubuntu-latest
 
     steps:
-    - uses: actions/checkout@v3
+    - uses: actions/checkout@v4
     - name: Set up Python
-      uses: actions/setup-python@v3
+      uses: actions/setup-python@v5
       with:
         python-version: '3.x'
     - name: Install dependencies
diff -pruN 0.36.1-12/.github/workflows/publish.yaml 0.39.0-0ubuntu1/.github/workflows/publish.yaml
--- 0.36.1-12/.github/workflows/publish.yaml	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/.github/workflows/publish.yaml	2020-02-02 00:00:00.000000000 +0000
@@ -19,9 +19,9 @@ jobs:
     runs-on: ubuntu-latest
 
     steps:
-    - uses: actions/checkout@v3
+    - uses: actions/checkout@v4
     - name: Set up Python
-      uses: actions/setup-python@v3
+      uses: actions/setup-python@v5
       with:
         python-version: '3.x'
     - name: Install dependencies
diff -pruN 0.36.1-12/.github/workflows/style.yaml 0.39.0-0ubuntu1/.github/workflows/style.yaml
--- 0.36.1-12/.github/workflows/style.yaml	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/.github/workflows/style.yaml	2020-02-02 00:00:00.000000000 +0000
@@ -10,10 +10,10 @@ jobs:
     timeout-minutes: 5
 
     steps:
-      - uses: actions/checkout@v3
+      - uses: actions/checkout@v4
 
       - name: cache pip
-        uses: actions/cache@v3
+        uses: actions/cache@v4
         with:
           path: ~/.cache/pip
           key: ${{ runner.os }}-pip-${{ hashFiles('.github/workflows/style.yaml') }}
@@ -21,7 +21,7 @@ jobs:
             ${{ runner.os }}-pip-
             ${{ runner.os }}-
       - name: cache tox
-        uses: actions/cache@v3
+        uses: actions/cache@v4
         with:
           path: .tox
           key: ${{ runner.os }}-tox-style-${{ hashFiles('tox.ini') }}
@@ -31,7 +31,7 @@ jobs:
             ${{ runner.os }}-
 
       - name: setup python
-        uses: actions/setup-python@v3
+        uses: actions/setup-python@v5
         with:
           python-version: 3.x
       - name: install tox
diff -pruN 0.36.1-12/.github/workflows/test.yaml 0.39.0-0ubuntu1/.github/workflows/test.yaml
--- 0.36.1-12/.github/workflows/test.yaml	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/.github/workflows/test.yaml	2020-02-02 00:00:00.000000000 +0000
@@ -29,36 +29,36 @@ jobs:
       fail-fast: false
       matrix:
         include:
-          - { py: 3.7, toxenv: py37-epolls, ignore-error: false, os: ubuntu-latest }
           - { py: 3.8, toxenv: py38-epolls, ignore-error: false, os: ubuntu-latest }
           - { py: 3.8, toxenv: py38-openssl, ignore-error: false, os: ubuntu-latest }
           - { py: 3.8, toxenv: py38-poll, ignore-error: false, os: ubuntu-latest }
           - { py: 3.8, toxenv: py38-selects, ignore-error: false, os: ubuntu-latest }
+          - { py: 3.8, toxenv: py38-asyncio, ignore-error: false, os: ubuntu-latest }
           - { py: 3.9, toxenv: py39-epolls, ignore-error: false, os: ubuntu-latest }
           - { py: 3.9, toxenv: py39-poll, ignore-error: false, os: ubuntu-latest }
           - { py: 3.9, toxenv: py39-selects, ignore-error: false, os: ubuntu-latest }
           - { py: 3.9, toxenv: py39-dnspython1, ignore-error: false, os: ubuntu-latest }
+          - { py: 3.9, toxenv: py39-asyncio, ignore-error: false, os: ubuntu-latest }
           - { py: "3.10", toxenv: py310-epolls, ignore-error: false, os: ubuntu-latest }
           - { py: "3.10", toxenv: py310-poll, ignore-error: false, os: ubuntu-latest }
           - { py: "3.10", toxenv: py310-selects, ignore-error: false, os: ubuntu-latest }
           - { py: "3.10", toxenv: ipv6, ignore-error: false, os: ubuntu-latest }
-          - { py: "3.11", toxenv: py311-epolls, ignore-error: false, os: ubuntu-latest }
-          - { py: "3.12", toxenv: py312-epolls, ignore-error: false, os: ubuntu-latest }
-          - { py: "3.7", toxenv: py37-asyncio, ignore-error: false, os: ubuntu-latest }
-          - { py: "3.8", toxenv: py38-asyncio, ignore-error: false, os: ubuntu-latest }
-          - { py: "3.9", toxenv: py39-asyncio, ignore-error: false, os: ubuntu-latest }
           - { py: "3.10", toxenv: py310-asyncio, ignore-error: false, os: ubuntu-latest }
+          - { py: "3.11", toxenv: py311-epolls, ignore-error: false, os: ubuntu-latest }
           - { py: "3.11", toxenv: py311-asyncio, ignore-error: false, os: ubuntu-latest }
+          - { py: "3.12", toxenv: py312-epolls, ignore-error: false, os: ubuntu-latest }
           - { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: ubuntu-latest }
+          - { py: "3.13", toxenv: py313-epolls, ignore-error: false, os: ubuntu-24.04 }
+          - { py: "3.13", toxenv: py313-asyncio, ignore-error: false, os: ubuntu-24.04 }
           - { py: pypy3.9, toxenv: pypy3-epolls, ignore-error: true, os: ubuntu-20.04 }
 
     steps:
       - name: install system packages
         run: sudo apt install -y --no-install-recommends ccache libffi-dev default-libmysqlclient-dev libpq-dev libssl-dev libzmq3-dev
 
-      - uses: actions/checkout@v3
+      - uses: actions/checkout@v4
       - name: cache pip
-        uses: actions/cache@v3
+        uses: actions/cache@v4
         with:
           path: ~/.cache/pip
           key: ${{ runner.os }}-pip-${{ matrix.toxenv }}-${{ hashFiles('.github/workflows/test.yaml', 'setup.py') }}
@@ -66,7 +66,7 @@ jobs:
             ${{ runner.os }}-pip-
             ${{ runner.os }}-
       - name: cache tox
-        uses: actions/cache@v3
+        uses: actions/cache@v4
         with:
           path: .tox
           key: ${{ runner.os }}-tox-${{ matrix.toxenv }}-${{ hashFiles('tox.ini') }}
@@ -75,7 +75,7 @@ jobs:
             ${{ runner.os }}-
 
       - name: setup python ${{ matrix.py }}
-        uses: actions/setup-python@v4
+        uses: actions/setup-python@v5
         with:
           python-version: ${{ matrix.py }}
       - name: install codecov, tox
@@ -98,13 +98,14 @@ jobs:
       matrix:
         include:
           - { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: macos-latest }
+          - { py: "3.13", toxenv: py313-asyncio, ignore-error: false, os: macos-latest }
           # This isn't working very well at the moment, but that might just be
           # tox config? In any case main focus is on asyncio so someone can
           # revisit separately.
           #- { py: "3.12", toxenv: py312-kqueue, ignore-error: false, os: macos-latest }
 
     steps:
-      - uses: actions/checkout@v3
+      - uses: actions/checkout@v4
       - name: install codecov, tox
         run: pip install codecov tox
       - run: env
diff -pruN 0.36.1-12/AUTHORS 0.39.0-0ubuntu1/AUTHORS
--- 0.36.1-12/AUTHORS	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/AUTHORS	2020-02-02 00:00:00.000000000 +0000
@@ -1,12 +1,24 @@
 Maintainer (i.e., Who To Hassle If You Find Bugs)
 -------------------------------------------------
-Jakub Stasiak
-Nat Goodspeed
 
 The current maintainer(s) are volunteers with unrelated jobs.
 We can only pay sporadic attention to responding to your issue and pull request submissions.
 Your patience is greatly appreciated!
 
+Active maintainers
+~~~~~~~~~~~~~~~~~~
+
+* Itamar Turner-Trauring https://github.com/itamarst
+* Tim Burke https://github.com/tipabu
+* Hervé Beraud https://github.com/4383
+
+Less active maintainers
+~~~~~~~~~~~~~~~~~~~~~~~
+
+* Sergey Shepelev https://github.com/temoto
+* Jakub Stasiak https://github.com/jstasiak
+* Nat Goodspeed https://github.com/nat-goodspeed
+
 Original Authors
 ----------------
 * Bob Ippolito
diff -pruN 0.36.1-12/NEWS 0.39.0-0ubuntu1/NEWS
--- 0.36.1-12/NEWS	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/NEWS	2020-02-02 00:00:00.000000000 +0000
@@ -1,6 +1,45 @@
 Unreleased
 ==========
 
+0.39.0
+======
+
+* [fix] Remove monotonic from requirements (#1018)
+* [fix] wsgi: Clean up some override logic (#999)
+* [fix] Correct line lookup from inspect.getsourcelines() (#990)
+* Drop support of Python 3.7 (#967)
+* [fix] Calling eventlet.sleep(0) isn't really blocking, so don't blow up (#1015)
+
+
+0.38.2
+======
+
+* [fix] fix the monkey patching with the asyncio hub
+* [feature] introduce the unmonkeypatching feature
+
+0.38.1
+======
+
+* [fix] Python 3.13: Use greenthread's dead state where possible (#1000)
+* [env] bump github Actions (#996)
+* [fix] Fix bug where asyncio hub didn't support multiple os threads (#995)
+
+0.38.0
+======
+
+* Python 3.13 Support (#988)
+* [fix] wsgi: server MUST NOT send Content-Length/Transfer-Encoding header in response with a status code of 1xx, 204 or (2xx to CONNECT request) (#747)
+* [fix] wsgi: No request Content-Length nor Transfer-Encoding implies no body (#985)
+
+
+0.37.0
+======
+
+* [fix] os.read/write waits until file descriptor is ready. https://github.com/eventlet/eventlet/pull/975
+* [fix] Upgrade RLocks as last thing we do https://github.com/eventlet/eventlet/pull/970
+* [security] drop header keys with underscores https://github.com/eventlet/eventlet/pull/959
+* [doc] Various doc updates (Migration Guide, repair links, warns...)
+
 0.36.1
 ======
 
diff -pruN 0.36.1-12/PKG-INFO 0.39.0-0ubuntu1/PKG-INFO
--- 0.36.1-12/PKG-INFO	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/PKG-INFO	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,130 @@
+Metadata-Version: 2.4
+Name: eventlet
+Version: 0.39.0
+Summary: Highly concurrent networking library
+Project-URL: Homepage, https://github.com/eventlet/eventlet
+Project-URL: History, https://github.com/eventlet/eventlet/blob/master/NEWS
+Project-URL: Tracker, https://github.com/eventlet/eventlet/issues
+Project-URL: Source, https://github.com/eventlet/eventlet
+Project-URL: Documentation, https://eventlet.readthedocs.io/
+Author-email: Sergey Shepelev <temotor@gmail.com>, Jakub Stasiak <jakub@stasiak.at>, Tim Burke <tim.burke@gmail.com>, Nat Goodspeed <nat@lindenlab.com>, Itamar Turner-Trauring <itamar@itamarst.org>, Hervé Beraud <hberaud@redhat.com>
+License: MIT
+License-File: AUTHORS
+License-File: LICENSE
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.13
+Classifier: Topic :: Internet
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.8
+Requires-Dist: dnspython>=1.15.0
+Requires-Dist: greenlet>=1.0
+Provides-Extra: dev
+Requires-Dist: black; extra == 'dev'
+Requires-Dist: build; extra == 'dev'
+Requires-Dist: commitizen; extra == 'dev'
+Requires-Dist: isort; extra == 'dev'
+Requires-Dist: pip-tools; extra == 'dev'
+Requires-Dist: pre-commit; extra == 'dev'
+Requires-Dist: twine; extra == 'dev'
+Description-Content-Type: text/x-rst
+
+Warning
+=======
+
+**New usages of eventlet are now heavily discouraged! Please read the
+following.**
+
+Eventlet was created almost 18 years ago, at a time where async
+features were absent from the CPython stdlib. With time eventlet evolved and
+CPython too, but since several years the maintenance activity of eventlet
+decreased leading to a growing gap between eventlet and the CPython
+implementation.
+
+This gap is now too high and can lead you to unexpected side effects and bugs
+in your applications.
+
+Eventlet now follows a new maintenance policy. **Only maintenance for
+stability and bug fixing** will be provided. **No new features will be
+accepted**, except those related to the asyncio migration. **Usages in new
+projects are discouraged**. **Our goal is to plan the retirement of eventlet**
+and to give you ways to move away from eventlet.
+
+If you are looking for a library to manage async network programming,
+and if you do not yet use eventlet, then, we encourage you to use `asyncio`_,
+which is the official async library of the CPython stdlib.
+
+If you already use eventlet, we hope to enable migration to asyncio for some use
+cases; see `Migrating off of Eventlet`_. Only new features related to the migration
+solution will be accepted.
+
+If you have questions concerning maintenance goals or concerning
+the migration do not hesitate to `open a new issue`_, we will be happy to
+answer them.
+
+.. _asyncio: https://docs.python.org/3/library/asyncio.html
+.. _open a new issue: https://github.com/eventlet/eventlet/issues/new
+.. _Migrating off of Eventlet: https://eventlet.readthedocs.io/en/latest/asyncio/migration.html#migration-guide
+
+Eventlet
+========
+
+.. image:: https://img.shields.io/pypi/v/eventlet
+    :target: https://pypi.org/project/eventlet/
+
+.. image:: https://img.shields.io/github/actions/workflow/status/eventlet/eventlet/test.yaml?branch=master
+    :target: https://github.com/eventlet/eventlet/actions?query=workflow%3Atest+branch%3Amaster
+
+.. image:: https://codecov.io/gh/eventlet/eventlet/branch/master/graph/badge.svg
+    :target: https://codecov.io/gh/eventlet/eventlet
+
+
+Eventlet is a concurrent networking library for Python that allows you to change how you run your code, not how you write it.
+
+It uses epoll or libevent for highly scalable non-blocking I/O.  Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O.  The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application.
+
+It's easy to get started using Eventlet, and easy to convert existing
+applications to use it.  Start off by looking at the `examples`_,
+`common design patterns`_, and the list of `basic API primitives`_.
+
+.. _examples: https://eventlet.readthedocs.io/en/latest/examples.html
+.. _common design patterns: https://eventlet.readthedocs.io/en/latest/design_patterns.html
+.. _basic API primitives: https://eventlet.readthedocs.io/en/latest/basic_usage.html
+
+
+Getting Eventlet
+================
+
+The easiest way to get Eventlet is to use pip::
+
+  pip install -U eventlet
+
+To install latest development version once::
+
+  pip install -U https://github.com/eventlet/eventlet/archive/master.zip
+
+
+Building the Docs Locally
+=========================
+
+To build a complete set of HTML documentation::
+
+  tox -e docs
+
+The built html files can be found in doc/build/html afterward.
+
+Supported Python versions
+=========================
+
+Python 3.8-3.13 are currently supported.
diff -pruN 0.36.1-12/README.rst 0.39.0-0ubuntu1/README.rst
--- 0.36.1-12/README.rst	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/README.rst	2020-02-02 00:00:00.000000000 +0000
@@ -33,7 +33,7 @@ answer them.
 
 .. _asyncio: https://docs.python.org/3/library/asyncio.html
 .. _open a new issue: https://github.com/eventlet/eventlet/issues/new
-.. _Migrating off of Eventlet: https://eventlet.readthedocs.io/en/latest/migration.html#migration-guide
+.. _Migrating off of Eventlet: https://eventlet.readthedocs.io/en/latest/asyncio/migration.html#migration-guide
 
 Eventlet
 ========
@@ -85,4 +85,4 @@ The built html files can be found in doc
 Supported Python versions
 =========================
 
-Python 3.7-3.12 are currently supported.
+Python 3.8-3.13 are currently supported.
diff -pruN 0.36.1-12/asyncio_compat.md 0.39.0-0ubuntu1/asyncio_compat.md
--- 0.36.1-12/asyncio_compat.md	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/asyncio_compat.md	1970-01-01 00:00:00.000000000 +0000
@@ -1,96 +0,0 @@
-# Asyncio compatibility in eventlet
-
-It should be possible to:
-
-* Run eventlet and asyncio in the same thread.
-* Allow asyncio and eventlet to interact: eventlet code can use asyncio-based libraries, asyncio-based code can get results out of eventlet.
-
-If this works, it would allow migrating from eventlet to asyncio in a gradual manner both within and across projects:
-
-1. Within an OpenStack library, code could be a mixture of asyncio and eventlet code.
-   This means migration doesn't have to be done in one stop, neither in libraries nor in the applications that depend on them.
-2. Even when an OpenStack library fully migrates to asyncio, it will still be usable by anything that is still running on eventlet.
-
-## Prior art
-
-* Gevent has a similar model to eventlet.
-  There exists an integration between gevent and asyncio that follows model proposed below: https://pypi.org/project/asyncio-gevent/
-* Twisted can run on top of the asyncio event loop.
-  Separately, it includes utilities for mapping its `Deferred` objects (similar to a JavaScript Promise) to the async/await model introduced in newer versions in Python 3, and in the opposite direction it added support for turning async/await functions into `Deferred`s.
-  In an eventlet context, `GreenThread` would need a similar former of integration to Twisted's `Deferred`.
-
-## Part 1: Implementing asyncio/eventlet interoperability
-
-There are three different parts involved in integrating eventlet and asyncio for purposes
-
-### 1. Create a hub that runs on asyncio
-
-Like many networking frameworks, eventlet has pluggable event loops, in this case called a "hub". Typically hubs wrap system APIs like `select()` and `epoll()`, but there also used to be a hub that ran on Twisted.
-Creating a hub that runs on top of the asyncio event loop should be fairly straightforward.
-
-Once this is done, eventlet and asyncio code can run in the same process and the same thread, but they would still have difficulties talking to each other.
-This latter requirement requires additional work, as covered by the next two items.
-
-### 2. Calling `async def` functions from eventlet
-
-The goal is to allow something like this:
-
-```python
-import aiohttp
-from eventlet_asyncio import future_to_greenlet  # hypothetical API
-
-async def get_url_body(url):
-    async with aiohttp.ClientSession() as session:
-        async with session.get(url) as response:
-            return await response.text()
-
-def eventlet_code():
-    green_thread = future_to_greenlet(get_url_body("https://example.com"))
-    return green_thread.wait()
-```
-
-The code would presumably be similar to https://github.com/gfmio/asyncio-gevent/blob/main/asyncio_gevent/future_to_greenlet.py
-
-### 3. Calling eventlet code from asyncio
-
-The goal is to allow something like this:
-
-```python
-from urllib.request import urlopen
-from eventlet import spawn
-from eventlet_asyncio import greenlet_to_future  # hypothetical API
-
-def get_url_body(url):
-    # Looks blocking, but actually isn't
-    return urlopen(url).read()
-
-# This would likely be common pattern, so could be implemented as decorator...
-async def asyncio_code():
-    greenlet = eventlet.spawn(get_url_body, "https://example.com")
-    future = greenlet_to_future(greenlet)
-    return await future
-```
-
-The code would presumably be similar to https://github.com/gfmio/asyncio-gevent/blob/main/asyncio_gevent/future_to_greenlet.py
-
-### 4. Limitations and potential unexpected behavior
-
-``concurrent.futures.thread`` just uses normal threads, not Eventlet's special threads.
-Similarly, [``asyncio.to_thread()``](https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread) specifically requires regular blocking code, it won't work correctly with Eventlet code.
-
-## Part 2: How a port would work on a technical level
-
-### Porting a library
-
-1. Usage of eventlet-based APIs would be replaced with usage of asyncio APIs.
-   For example, `urllib` or `requests` might be replaced with [`aiohttp`](https://docs.aiohttp.org/en/stable/).
-   The interoperability above can be used to make sure this continues to work with eventlet-based APIs.
-2. Over time, APIs would need be migrated to be `async` function, but in the intermediate time frame a standard `def` can still be used, again using the interoperability layer above.
-3. Eventually all "blocking" APIs have been removed, at which point everything can be switched to `async def` and `await`, including external API, and the library will no longer depend on eventlet.
-
-### Porting an application
-
-An application would need to install the asyncio hub before kicking off eventlet.
-Beyond that porting would be the same as a library.
-
-Once all libraries are purely asyncio-based, eventlet usage can be removed and an asyncio loop run instead.
diff -pruN 0.36.1-12/debian/changelog 0.39.0-0ubuntu1/debian/changelog
--- 0.36.1-12/debian/changelog	2025-01-08 07:21:05.000000000 +0000
+++ 0.39.0-0ubuntu1/debian/changelog	2025-02-13 12:59:01.000000000 +0000
@@ -1,8 +1,18 @@
-python-eventlet (0.36.1-12) unstable; urgency=medium
+python-eventlet (0.39.0-0ubuntu1) plucky; urgency=medium
 
-  * Add test_ssl_close to blacklist of tests (Closes: #1092379).
+  * New upstream release:
+    - d/p/*.patch: Drop misc patches included in release.
+    - d/p/openssl-3.4.0-compat.patch: Refresh to include BrokenPipeError
+      as a valid raises with OpenSSL >= 3.4.0.
 
- -- Thomas Goirand <zigo@debian.org>  Wed, 08 Jan 2025 08:21:05 +0100
+ -- James Page <james.page@ubuntu.com>  Thu, 13 Feb 2025 12:59:01 +0000
+
+python-eventlet (0.36.1-11ubuntu1) plucky; urgency=medium
+
+  * d/p/openssl-3.4.0-compat.patch: Tweak tests to be compatible
+    with new version of OpenSSL (LP: #2091540).
+
+ -- James Page <james.page@ubuntu.com>  Tue, 17 Dec 2024 14:07:00 +0000
 
 python-eventlet (0.36.1-11) unstable; urgency=medium
 
diff -pruN 0.36.1-12/debian/control 0.39.0-0ubuntu1/debian/control
--- 0.36.1-12/debian/control	2025-01-08 07:21:05.000000000 +0000
+++ 0.39.0-0ubuntu1/debian/control	2024-12-17 14:05:43.000000000 +0000
@@ -1,7 +1,8 @@
 Source: python-eventlet
 Section: python
 Priority: optional
-Maintainer: Debian OpenStack <team+openstack@tracker.debian.org>
+Maintainer: Ubuntu Developers <ubuntu-devel-discuss@lists.ubuntu.com>
+XSBC-Original-Maintainer: Debian OpenStack <team+openstack@tracker.debian.org>
 Uploaders:
  Thomas Goirand <zigo@debian.org>,
 Build-Depends:
diff -pruN 0.36.1-12/debian/patches/Spew_Correct_line_lookup_from_inspect.getsourcelines.patch 0.39.0-0ubuntu1/debian/patches/Spew_Correct_line_lookup_from_inspect.getsourcelines.patch
--- 0.36.1-12/debian/patches/Spew_Correct_line_lookup_from_inspect.getsourcelines.patch	2025-01-08 07:21:05.000000000 +0000
+++ 0.39.0-0ubuntu1/debian/patches/Spew_Correct_line_lookup_from_inspect.getsourcelines.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,53 +0,0 @@
-Description: Spew: Correct line lookup from inspect.getsourcelines()
- I can't write a test for this, as I don't know how to trigger is part of
- Spew, without also falling into the OSError branch (which
- test_line_nofile tests).
- .
- However, it has been observed to hit this code when running the tests
- under pytest-xdist (execnext), and the code is obviously wrong. This
- corrects it.
-Author: Stefano Rivera <stefano@rivera.za.net>
-Date: Fri, 15 Nov 2024 11:37:21 +0100
-Origin: upstream, https://patch-diff.githubusercontent.com/raw/eventlet/eventlet/pull/990.patch
-Last-Update: 2024-11-15
-
-diff --git a/eventlet/debug.py b/eventlet/debug.py
-index befceb9d2..f78e2f807 100644
---- a/eventlet/debug.py
-+++ b/eventlet/debug.py
-@@ -35,8 +35,12 @@ def __call__(self, frame, event, arg):
-             else:
-                 name = '[unknown]'
-                 try:
--                    src = inspect.getsourcelines(frame)
--                    line = src[lineno]
-+                    src, offset = inspect.getsourcelines(frame)
-+                    # The first line is line 1
-+                    # But 0 may be returned when executing module-level code
-+                    if offset == 0:
-+                        offset = 1
-+                    line = src[lineno - offset]
-                 except OSError:
-                     line = 'Unknown code named [%s].  VM instruction #%d' % (
-                         frame.f_code.co_name, frame.f_lasti)
-
-diff --git a/tests/debug_test.py b/tests/debug_test.py
-index f78a5283e..ca82aea5d 100644
---- a/tests/debug_test.py
-+++ b/tests/debug_test.py
-@@ -1,4 +1,5 @@
- import io
-+import os
- import sys
- 
- from eventlet import debug
-@@ -50,7 +51,8 @@ def test_line_nofile(self):
-         s(f, "line", None)
-         output = sys.stdout.getvalue()
-         assert "[unknown]:%i" % lineno in output, "Didn't find [unknown]:%i in %s" % (lineno, output)
--        assert "VM instruction #" in output, output
-+        if "PYTEST_XDIST_WORKER" not in os.environ:
-+            assert "VM instruction #" in output, output
- 
-     def test_line_global(self):
-         frame_str = "f=<frame at"
diff -pruN 0.36.1-12/debian/patches/Use_greenthread_s_dead_state_where_possible.patch 0.39.0-0ubuntu1/debian/patches/Use_greenthread_s_dead_state_where_possible.patch
--- 0.36.1-12/debian/patches/Use_greenthread_s_dead_state_where_possible.patch	2025-01-08 07:21:05.000000000 +0000
+++ 0.39.0-0ubuntu1/debian/patches/Use_greenthread_s_dead_state_where_possible.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,77 +0,0 @@
-Author: Stefano Rivera <stefano@rivera.za.net>
-Date: Mon, 9 Dec 2024 13:33:42 -0400
-Description: Python 3.13: Use greenthread's dead state where possible
-Fixes: #998
-Origin: upstream, https://patch-diff.githubusercontent.com/raw/eventlet/eventlet/pull/1000.patch
-Last-Update: 2024-12-10
-
-Index: python-eventlet/eventlet/green/thread.py
-===================================================================
---- python-eventlet.orig/eventlet/green/thread.py
-+++ python-eventlet/eventlet/green/thread.py
-@@ -59,6 +59,8 @@ class _ThreadHandle:
-         self._done = True
- 
-     def is_done(self):
-+        if self._greenthread is not None:
-+            return self._greenthread.dead
-         return self._done
- 
-     @property
-Index: python-eventlet/tests/isolated/patcher_threading_subclass_done.py
-===================================================================
---- /dev/null
-+++ python-eventlet/tests/isolated/patcher_threading_subclass_done.py
-@@ -0,0 +1,40 @@
-+import queue
-+import threading
-+
-+
-+class Worker(threading.Thread):
-+    EXIT_SENTINEL = object()
-+
-+    def __init__(self, *args, **kwargs):
-+        super().__init__(*args, **kwargs)
-+        self.q = queue.Queue(maxsize=-1)
-+        self.daemon = True
-+
-+    def run(self):
-+        while True:
-+            task = self.q.get()
-+            if task == self.EXIT_SENTINEL:
-+                break
-+            print(f"Treating task {task}")
-+            # Pretend to work
-+
-+    def submit(self, job):
-+        self.q.put(job)
-+
-+    def terminate(self):
-+        self.q.put(self.EXIT_SENTINEL)
-+        self.join()
-+
-+
-+if __name__ == "__main__":
-+    import eventlet
-+    eventlet.patcher.monkey_patch()
-+
-+    worker = Worker()
-+    assert not worker.is_alive()
-+    worker.start()
-+    assert worker.is_alive()
-+    worker.submit(1)
-+    worker.terminate()
-+    assert not worker.is_alive()
-+    print("pass")
-Index: python-eventlet/tests/patcher_test.py
-===================================================================
---- python-eventlet.orig/tests/patcher_test.py
-+++ python-eventlet/tests/patcher_test.py
-@@ -529,3 +529,7 @@ def test_patcher_existing_locks():
- 
- def test_patcher_existing_locks_exception():
-     tests.run_isolated("patcher_existing_locks_exception.py")
-+
-+
-+def test_patcher_threading_subclass_done():
-+    tests.run_isolated("patcher_threading_subclass_done.py")
diff -pruN 0.36.1-12/debian/patches/openssl-3.4.0-compat.patch 0.39.0-0ubuntu1/debian/patches/openssl-3.4.0-compat.patch
--- 0.36.1-12/debian/patches/openssl-3.4.0-compat.patch	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/debian/patches/openssl-3.4.0-compat.patch	2025-02-13 12:51:28.000000000 +0000
@@ -0,0 +1,20 @@
+Description: OpenSSL >= 3.4.0 compatibility
+ Resolve test compatibility issues with newer versions
+ of OpenSSL - as the server is shutdown OpenSSL returns
+ an [SYS] unknown error which is handled differently and
+ raised an SSLError exception.
+Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/python-eventlet/+bug/2091540
+Forwarded: no
+
+--- a/tests/ssl_test.py
++++ b/tests/ssl_test.py
+@@ -80,7 +80,8 @@
+             sock.recv(8192)
+             try:
+                 self.assertEqual(b'', sock.recv(8192))
+-            except greenio.SSL.ZeroReturnError:
++            except (greenio.SSL.ZeroReturnError,
++                    ssl.SSLError, BrokenPipeError):
+                 pass
+ 
+         sock = listen_ssl_socket()
diff -pruN 0.36.1-12/debian/patches/python3.13-support.patch 0.39.0-0ubuntu1/debian/patches/python3.13-support.patch
--- 0.36.1-12/debian/patches/python3.13-support.patch	2025-01-08 07:21:05.000000000 +0000
+++ 0.39.0-0ubuntu1/debian/patches/python3.13-support.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,419 +0,0 @@
-Description: [PATCH 1/9] reorder python jobs by python versions
-From a79594aaa5b63b3d1e3fd4bfcb2f7c6f41770dfe Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Herv=C3=A9=20Beraud?= <hberaud@redhat.com>
-Date: Mon, 10 Jun 2024 15:57:42 +0200
-Origin: upstream, https://github.com/eventlet/eventlet/pull/988/files
-Last-Update: 2024-11-15
-
-diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
-index c14cf29f4..bc0863609 100644
---- a/.github/workflows/test.yaml
-+++ b/.github/workflows/test.yaml
-@@ -30,25 +30,25 @@ jobs:
-       matrix:
-         include:
-           - { py: 3.7, toxenv: py37-epolls, ignore-error: false, os: ubuntu-latest }
-+          - { py: 3.7, toxenv: py37-asyncio, ignore-error: false, os: ubuntu-latest }
-           - { py: 3.8, toxenv: py38-epolls, ignore-error: false, os: ubuntu-latest }
-           - { py: 3.8, toxenv: py38-openssl, ignore-error: false, os: ubuntu-latest }
-           - { py: 3.8, toxenv: py38-poll, ignore-error: false, os: ubuntu-latest }
-           - { py: 3.8, toxenv: py38-selects, ignore-error: false, os: ubuntu-latest }
-+          - { py: 3.8, toxenv: py38-asyncio, ignore-error: false, os: ubuntu-latest }
-           - { py: 3.9, toxenv: py39-epolls, ignore-error: false, os: ubuntu-latest }
-           - { py: 3.9, toxenv: py39-poll, ignore-error: false, os: ubuntu-latest }
-           - { py: 3.9, toxenv: py39-selects, ignore-error: false, os: ubuntu-latest }
-           - { py: 3.9, toxenv: py39-dnspython1, ignore-error: false, os: ubuntu-latest }
-+          - { py: 3.9, toxenv: py39-asyncio, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.10", toxenv: py310-epolls, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.10", toxenv: py310-poll, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.10", toxenv: py310-selects, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.10", toxenv: ipv6, ignore-error: false, os: ubuntu-latest }
--          - { py: "3.11", toxenv: py311-epolls, ignore-error: false, os: ubuntu-latest }
--          - { py: "3.12", toxenv: py312-epolls, ignore-error: false, os: ubuntu-latest }
--          - { py: "3.7", toxenv: py37-asyncio, ignore-error: false, os: ubuntu-latest }
--          - { py: "3.8", toxenv: py38-asyncio, ignore-error: false, os: ubuntu-latest }
--          - { py: "3.9", toxenv: py39-asyncio, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.10", toxenv: py310-asyncio, ignore-error: false, os: ubuntu-latest }
-+          - { py: "3.11", toxenv: py311-epolls, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.11", toxenv: py311-asyncio, ignore-error: false, os: ubuntu-latest }
-+          - { py: "3.12", toxenv: py312-epolls, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: ubuntu-latest }
-           - { py: pypy3.9, toxenv: pypy3-epolls, ignore-error: true, os: ubuntu-20.04 }
- 
-
-From 4928b947cfefee1bae1ecd5485029d3f332aa03d Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Herv=C3=A9=20Beraud?= <hberaud@redhat.com>
-Date: Mon, 10 Jun 2024 16:40:00 +0200
-Subject: [PATCH 2/9] Supporting Python 3.13
-
----
- .github/workflows/test.yaml | 3 +++
- README.rst                  | 2 +-
- pyproject.toml              | 1 +
- 3 files changed, 5 insertions(+), 1 deletion(-)
-
-diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
-index bc0863609..06ddea38c 100644
---- a/.github/workflows/test.yaml
-+++ b/.github/workflows/test.yaml
-@@ -50,6 +50,8 @@ jobs:
-           - { py: "3.11", toxenv: py311-asyncio, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.12", toxenv: py312-epolls, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: ubuntu-latest }
-+          - { py: "3.13", toxenv: py313-epolls, ignore-error: false, os:ubuntu-24.04 }
-+          - { py: "3.13", toxenv: py313-asyncio, ignore-error: false, os:ubuntu-24.04 }
-           - { py: pypy3.9, toxenv: pypy3-epolls, ignore-error: true, os: ubuntu-20.04 }
- 
-     steps:
-@@ -98,6 +100,7 @@ jobs:
-       matrix:
-         include:
-           - { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: macos-latest }
-+          - { py: "3.13", toxenv: py313-asyncio, ignore-error: false, os: macos-latest }
-           # This isn't working very well at the moment, but that might just be
-           # tox config? In any case main focus is on asyncio so someone can
-           # revisit separately.
-diff --git a/README.rst b/README.rst
-index 131255822..fad501597 100644
---- a/README.rst
-+++ b/README.rst
-@@ -85,4 +85,4 @@ The built html files can be found in doc/build/html afterward.
- Supported Python versions
- =========================
- 
--Python 3.7-3.12 are currently supported.
-+Python 3.7-3.13 are currently supported.
-diff --git a/pyproject.toml b/pyproject.toml
-index cabd66605..da72c8a12 100644
---- a/pyproject.toml
-+++ b/pyproject.toml
-@@ -33,6 +33,7 @@ classifiers = [
-     "Programming Language :: Python :: 3.10",
-     "Programming Language :: Python :: 3.11",
-     "Programming Language :: Python :: 3.12",
-+    "Programming Language :: Python :: 3.13",
-     "Programming Language :: Python",
-     "Topic :: Internet",
-     "Topic :: Software Development :: Libraries :: Python Modules",
-
-From 246724b8a883f94f096c72c205b430088385857d Mon Sep 17 00:00:00 2001
-From: Itamar Turner-Trauring <itamar@itamarst.org>
-Date: Tue, 11 Jun 2024 08:27:10 -0400
-Subject: [PATCH 3/9] Fix YAML syntax
-
----
- .github/workflows/test.yaml | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
-index 06ddea38c..350082646 100644
---- a/.github/workflows/test.yaml
-+++ b/.github/workflows/test.yaml
-@@ -50,8 +50,8 @@ jobs:
-           - { py: "3.11", toxenv: py311-asyncio, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.12", toxenv: py312-epolls, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: ubuntu-latest }
--          - { py: "3.13", toxenv: py313-epolls, ignore-error: false, os:ubuntu-24.04 }
--          - { py: "3.13", toxenv: py313-asyncio, ignore-error: false, os:ubuntu-24.04 }
-+          - { py: "3.13", toxenv: py313-epolls, ignore-error: false, os: ubuntu-24.04 }
-+          - { py: "3.13", toxenv: py313-asyncio, ignore-error: false, os: ubuntu-24.04 }
-           - { py: pypy3.9, toxenv: pypy3-epolls, ignore-error: true, os: ubuntu-20.04 }
- 
-     steps:
-
-From b5d55a31c2e4e2d5e0061beb61714297625c09eb Mon Sep 17 00:00:00 2001
-From: Itamar Turner-Trauring <itamar@itamarst.org>
-Date: Tue, 11 Jun 2024 08:29:46 -0400
-Subject: [PATCH 4/9] Explicit Python 3.13 version until it's out
-
----
- .github/workflows/test.yaml | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
-index 350082646..743ad2f01 100644
---- a/.github/workflows/test.yaml
-+++ b/.github/workflows/test.yaml
-@@ -50,8 +50,8 @@ jobs:
-           - { py: "3.11", toxenv: py311-asyncio, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.12", toxenv: py312-epolls, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: ubuntu-latest }
--          - { py: "3.13", toxenv: py313-epolls, ignore-error: false, os: ubuntu-24.04 }
--          - { py: "3.13", toxenv: py313-asyncio, ignore-error: false, os: ubuntu-24.04 }
-+          - { py: "3.13.0-beta.2", toxenv: py313-epolls, ignore-error: false, os: ubuntu-24.04 }
-+          - { py: " 3.13.0-beta.2", toxenv: py313-asyncio, ignore-error: false, os: ubuntu-24.04 }
-           - { py: pypy3.9, toxenv: pypy3-epolls, ignore-error: true, os: ubuntu-20.04 }
- 
-     steps:
-@@ -100,7 +100,7 @@ jobs:
-       matrix:
-         include:
-           - { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: macos-latest }
--          - { py: "3.13", toxenv: py313-asyncio, ignore-error: false, os: macos-latest }
-+          - { py: "3.13.0-beta.2", toxenv: py313-asyncio, ignore-error: false, os: macos-latest }
-           # This isn't working very well at the moment, but that might just be
-           # tox config? In any case main focus is on asyncio so someone can
-           # revisit separately.
-
-From c4725739c1f3873ad8132fc272c343a476590d02 Mon Sep 17 00:00:00 2001
-From: Itamar Turner-Trauring <itamar@itamarst.org>
-Date: Tue, 11 Jun 2024 08:31:12 -0400
-Subject: [PATCH 5/9] fix typo
-
----
- .github/workflows/test.yaml | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
-index 743ad2f01..e620bf30a 100644
---- a/.github/workflows/test.yaml
-+++ b/.github/workflows/test.yaml
-@@ -51,7 +51,7 @@ jobs:
-           - { py: "3.12", toxenv: py312-epolls, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.13.0-beta.2", toxenv: py313-epolls, ignore-error: false, os: ubuntu-24.04 }
--          - { py: " 3.13.0-beta.2", toxenv: py313-asyncio, ignore-error: false, os: ubuntu-24.04 }
-+          - { py: "3.13.0-beta.2", toxenv: py313-asyncio, ignore-error: false, os: ubuntu-24.04 }
-           - { py: pypy3.9, toxenv: pypy3-epolls, ignore-error: true, os: ubuntu-20.04 }
- 
-     steps:
-
-From 2b3838a11c49e7613fca12781192dc1986b4b89e Mon Sep 17 00:00:00 2001
-From: Itamar Turner-Trauring <itamar@itamarst.org>
-Date: Wed, 11 Sep 2024 08:11:49 -0400
-Subject: [PATCH 6/9] Use latest 3.13 prerelease
-
----
- .github/workflows/test.yaml | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
-index e620bf30a..af8c8cc0c 100644
---- a/.github/workflows/test.yaml
-+++ b/.github/workflows/test.yaml
-@@ -50,8 +50,8 @@ jobs:
-           - { py: "3.11", toxenv: py311-asyncio, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.12", toxenv: py312-epolls, ignore-error: false, os: ubuntu-latest }
-           - { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: ubuntu-latest }
--          - { py: "3.13.0-beta.2", toxenv: py313-epolls, ignore-error: false, os: ubuntu-24.04 }
--          - { py: "3.13.0-beta.2", toxenv: py313-asyncio, ignore-error: false, os: ubuntu-24.04 }
-+          - { py: "3.13-dev", toxenv: py313-epolls, ignore-error: false, os: ubuntu-24.04 }
-+          - { py: "3.13-dev", toxenv: py313-asyncio, ignore-error: false, os: ubuntu-24.04 }
-           - { py: pypy3.9, toxenv: pypy3-epolls, ignore-error: true, os: ubuntu-20.04 }
- 
-     steps:
-@@ -100,7 +100,7 @@ jobs:
-       matrix:
-         include:
-           - { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: macos-latest }
--          - { py: "3.13.0-beta.2", toxenv: py313-asyncio, ignore-error: false, os: macos-latest }
-+          - { py: "3.13-dev", toxenv: py313-asyncio, ignore-error: false, os: macos-latest }
-           # This isn't working very well at the moment, but that might just be
-           # tox config? In any case main focus is on asyncio so someone can
-           # revisit separately.
-
-From 59fae9d7e0607cfadb962eca80042f49ea8d53cc Mon Sep 17 00:00:00 2001
-From: Stefano Rivera <stefano@rivera.za.net>
-Date: Wed, 6 Nov 2024 21:30:29 -0800
-Subject: [PATCH 7/9] Python 3.13 support
-
-Emulate Python 3.13's start_joinable_thread API using greenthreads.
-
-We cut some corners, of course:
-* We aren't maintaining a table of green thread idents to threads, so we
-  can't wait for all threads on shutdown.
-* Our _make_thread_handle() can only make a handle for the current
-  thread (as we don't have a way to look up green threads by ident).
-* .join() on a non-GreenThread (e.g. the main thread) just returns
-  immediately.
-
-Fixes: #964
----
- eventlet/green/thread.py    | 66 ++++++++++++++++++++++++++++++++++---
- eventlet/green/threading.py |  7 ++--
- 2 files changed, 65 insertions(+), 8 deletions(-)
-
-diff --git a/eventlet/green/thread.py b/eventlet/green/thread.py
-index 053a1c3c6..e9c4f3830 100644
---- a/eventlet/green/thread.py
-+++ b/eventlet/green/thread.py
-@@ -2,13 +2,16 @@
- import _thread as __thread
- from eventlet.support import greenlets as greenlet
- from eventlet import greenthread
-+from eventlet.timeout import with_timeout
- from eventlet.lock import Lock
- import sys
- 
- 
--__patched__ = ['get_ident', 'start_new_thread', 'start_new', 'allocate_lock',
--               'allocate', 'exit', 'interrupt_main', 'stack_size', '_local',
--               'LockType', 'Lock', '_count']
-+__patched__ = ['Lock', 'LockType', '_ThreadHandle', '_count',
-+               '_get_main_thread_ident', '_local', '_make_thread_handle',
-+               'allocate', 'allocate_lock', 'exit', 'get_ident',
-+               'interrupt_main', 'stack_size', 'start_joinable_thread',
-+               'start_new', 'start_new_thread']
- 
- error = __thread.error
- LockType = Lock
-@@ -47,7 +50,36 @@ def __thread_body(func, args, kwargs):
-         __threadcount -= 1
- 
- 
--def start_new_thread(function, args=(), kwargs=None):
-+class _ThreadHandle:
-+    def __init__(self, greenthread=None):
-+        self._greenthread = greenthread
-+        self._done = False
-+
-+    def _set_done(self):
-+        self._done = True
-+
-+    def is_done(self):
-+        return self._done
-+
-+    @property
-+    def ident(self):
-+        return get_ident(self._greenthread)
-+
-+    def join(self, timeout=None):
-+        if not hasattr(self._greenthread, "wait"):
-+            return
-+        if timeout is not None:
-+            return with_timeout(timeout, self._greenthread.wait)
-+        return self._greenthread.wait()
-+
-+
-+def _make_thread_handle(ident):
-+    greenthread = greenlet.getcurrent()
-+    assert ident == get_ident(greenthread)
-+    return _ThreadHandle(greenthread=greenthread)
-+
-+
-+def __spawn_green(function, args=(), kwargs=None, joinable=False):
-     if (sys.version_info >= (3, 4)
-             and getattr(function, '__module__', '') == 'threading'
-             and hasattr(function, '__self__')):
-@@ -72,13 +104,34 @@ def wrap_bootstrap_inner():
-         thread._bootstrap_inner = wrap_bootstrap_inner
- 
-     kwargs = kwargs or {}
--    g = greenthread.spawn_n(__thread_body, function, args, kwargs)
-+    spawn_func = greenthread.spawn if joinable else greenthread.spawn_n
-+    return spawn_func(__thread_body, function, args, kwargs)
-+
-+
-+def start_joinable_thread(function, handle=None, daemon=True):
-+    g = __spawn_green(function, joinable=True)
-+    if handle is None:
-+        handle = _ThreadHandle(greenthread=g)
-+    else:
-+        handle._greenthread = g
-+    return handle
-+
-+
-+def start_new_thread(function, args=(), kwargs=None):
-+    g = __spawn_green(function, args=args, kwargs=kwargs)
-     return get_ident(g)
- 
- 
- start_new = start_new_thread
- 
- 
-+def _get_main_thread_ident():
-+    greenthread = greenlet.getcurrent()
-+    while greenthread.parent is not None:
-+        greenthread = greenthread.parent
-+    return get_ident(greenthread)
-+
-+
- def allocate_lock(*a):
-     return LockType(1)
- 
-@@ -118,3 +171,6 @@ def stack_size(size=None):
- 
- if hasattr(__thread, 'daemon_threads_allowed'):
-     daemon_threads_allowed = __thread.daemon_threads_allowed
-+
-+if hasattr(__thread, '_shutdown'):
-+    _shutdown = __thread._shutdown
-diff --git a/eventlet/green/threading.py b/eventlet/green/threading.py
-index 7ea20cdad..4be776682 100644
---- a/eventlet/green/threading.py
-+++ b/eventlet/green/threading.py
-@@ -4,9 +4,10 @@
- from eventlet.green import time
- from eventlet.support import greenlets as greenlet
- 
--__patched__ = ['_start_new_thread', '_allocate_lock',
--               '_sleep', 'local', 'stack_size', 'Lock', 'currentThread',
--               'current_thread', '_after_fork', '_shutdown']
-+__patched__ = ['Lock', '_after_fork', '_allocate_lock', '_get_main_thread_ident',
-+               '_make_thread_handle', '_shutdown', '_sleep',
-+               '_start_joinable_thread', '_start_new_thread', '_ThreadHandle',
-+               'currentThread', 'current_thread', 'local', 'stack_size']
- 
- __patched__ += ['get_ident', '_set_sentinel']
- 
-
-From 619fe81a15c3f3b8f2a36f210fcf902e0ee06643 Mon Sep 17 00:00:00 2001
-From: Stefano Rivera <stefano@rivera.za.net>
-Date: Thu, 7 Nov 2024 14:38:01 -0800
-Subject: [PATCH 8/9] _tstate_lock was removed in Python 3.13
-
-In python/cpython#114271, _tstate_lock was replaced with an event on
-PyThreadState.
----
- eventlet/green/thread.py | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/eventlet/green/thread.py b/eventlet/green/thread.py
-index e9c4f3830..ef723ff46 100644
---- a/eventlet/green/thread.py
-+++ b/eventlet/green/thread.py
-@@ -80,10 +80,10 @@ def _make_thread_handle(ident):
- 
- 
- def __spawn_green(function, args=(), kwargs=None, joinable=False):
--    if (sys.version_info >= (3, 4)
-+    if ((3, 4) <= sys.version_info < (3, 13)
-             and getattr(function, '__module__', '') == 'threading'
-             and hasattr(function, '__self__')):
--        # Since Python 3.4, threading.Thread uses an internal lock
-+        # In Python 3.4-3.12, threading.Thread uses an internal lock
-         # automatically released when the python thread state is deleted.
-         # With monkey patching, eventlet uses green threads without python
-         # thread state, so the lock is not automatically released.
-@@ -98,7 +98,7 @@ def wrap_bootstrap_inner():
-                 bootstrap_inner()
-             finally:
-                 # The lock can be cleared (ex: by a fork())
--                if thread._tstate_lock is not None:
-+                if getattr(thread, "_tstate_lock", None) is not None:
-                     thread._tstate_lock.release()
- 
-         thread._bootstrap_inner = wrap_bootstrap_inner
-
-From 53d2cde453218a2544a529176aeb5bbe34ad7b49 Mon Sep 17 00:00:00 2001
-From: Stefano Rivera <stefano@rivera.za.net>
-Date: Wed, 6 Nov 2024 21:37:49 -0800
-Subject: [PATCH 9/9] Add Python 3.13 to tox
-
----
- tox.ini | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/tox.ini b/tox.ini
-index d81a0db5e..3f1f84c9d 100644
---- a/tox.ini
-+++ b/tox.ini
-@@ -16,7 +16,7 @@ envlist =
-     py38-openssl
-     py39-dnspython1
-     pypy3-epolls
--    py{38,39,310,311,312}-{selects,poll,epolls,asyncio}
-+    py{38,39,310,311,312,313}-{selects,poll,epolls,asyncio}
- skipsdist = True
- 
- [testenv:ipv6]
diff -pruN 0.36.1-12/debian/patches/series 0.39.0-0ubuntu1/debian/patches/series
--- 0.36.1-12/debian/patches/series	2025-01-08 07:21:05.000000000 +0000
+++ 0.39.0-0ubuntu1/debian/patches/series	2025-02-13 12:45:31.000000000 +0000
@@ -15,6 +15,4 @@ neutralize-test_017_ssl_zeroreturnerror.
 #use-raw-strings-to-avoid-warnings.patch
 install-all-files.patch
 fix-detecting-version.patch
-python3.13-support.patch
-Spew_Correct_line_lookup_from_inspect.getsourcelines.patch
-Use_greenthread_s_dead_state_where_possible.patch
+openssl-3.4.0-compat.patch
diff -pruN 0.36.1-12/debian/rules 0.39.0-0ubuntu1/debian/rules
--- 0.36.1-12/debian/rules	2025-01-08 07:21:05.000000000 +0000
+++ 0.39.0-0ubuntu1/debian/rules	2024-12-17 07:19:23.000000000 +0000
@@ -50,6 +50,6 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD
 	# test_orig_thread
 	# See #1090270
 	set -e ; set -x ; for i in $(shell py3versions -vr 2>/dev/null) ; do \
-		PYTHONPATH=. PYTHON=python$$i python$$i -m pytest tests -v -n `nproc` -k ' not test_fork_after_monkey_patch and not test_cancel_proportion and not test_clear and not test_noraise_dns_tcp and not test_raise_dns_tcp and not test_dns_methods_are_green and not test_orig_thread and not test_send_1k_pub_sub and not test_ssl_close' ; \
+		PYTHONPATH=. PYTHON=python$$i python$$i -m pytest tests -v -n `nproc` -k ' not test_fork_after_monkey_patch and not test_cancel_proportion and not test_clear and not test_noraise_dns_tcp and not test_raise_dns_tcp and not test_dns_methods_are_green and not test_orig_thread and not test_send_1k_pub_sub' ; \
 	done
 endif
diff -pruN 0.36.1-12/debian/tests/unittests 0.39.0-0ubuntu1/debian/tests/unittests
--- 0.36.1-12/debian/tests/unittests	2025-01-08 07:21:05.000000000 +0000
+++ 0.39.0-0ubuntu1/debian/tests/unittests	2024-12-17 07:19:23.000000000 +0000
@@ -6,5 +6,5 @@ PYTHON3S=$(py3versions -vr 2>/dev/null)
 for i in ${PYTHON3S} ; do
 	python${i} setup.py install --install-layout=deb --root ${CWD}/debian/tmp
 	PYTHONPATH=${CWD}/debian/tmp/usr/lib/python3/dist-packages \
-		PYTHON=python${i} python${i} -m pytest tests -v -n `nproc` -k ' not test_fork_after_monkey_patch and not test_cancel_proportion and not test_clear and not test_noraise_dns_tcp and not test_raise_dns_tcp and not test_dns_methods_are_green and not test_orig_thread and not test_send_1k_pub_sub and not test_ssl_close'
+		PYTHON=python${i} python${i} -m pytest tests -v -n `nproc` -k ' not test_fork_after_monkey_patch and not test_cancel_proportion and not test_clear and not test_noraise_dns_tcp and not test_raise_dns_tcp and not test_dns_methods_are_green and not test_orig_thread and not test_send_1k_pub_sub'
 done
diff -pruN 0.36.1-12/doc/source/asyncio/asyncio.rst 0.39.0-0ubuntu1/doc/source/asyncio/asyncio.rst
--- 0.36.1-12/doc/source/asyncio/asyncio.rst	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/asyncio/asyncio.rst	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,70 @@
+.. _asyncio-index:
+
+Asyncio in Eventlet
+###################
+
+Asyncio Compatibility
+=====================
+
+Compatibility between Asyncio and Eventlet has been recently introduced.
+
+You may be interested by the state of the art of this compatibility and by
+the potential limitations, so please take a look at
+:ref:`asyncio-compatibility`.
+
+Asyncio Hub & Functions
+=======================
+
+Discover the :mod:`Asyncio Hub <eventlet.hubs.asyncio>`
+
+You may also want to take a look to the
+:mod:`Asyncio compatibility functions <eventlet.asyncio>`.
+
+Migrating from Eventlet to Asyncio
+==================================
+
+Why Migrating?
+--------------
+
+Eventlet is a broken and outdated technology.
+
+Eventlet was created almost 20 years ago (See the :ref:`history` of Eventlet),
+at a time where Python did not provided non-blocking features.
+
+Time passed and Python now provide AsyncIO.
+
+In parallel of the evolution of Python, the maintenance of Eventlet was
+discontinued during several versions of Python, increasing the gap between
+the monkey patching of Eventlet and the recent implementation of Python.
+
+This gap is now not recoverable. For this reason, we decided to officially
+abandon the maintenance of Eventlet in an incremental way.
+
+In a last effort, we want to lead Eventlet to a well deserved rest.
+Our goal is to provide you a guide to migrate off of Eventlet and then
+to properly retire Eventlet.
+
+For more details about the reasons who motivated this effort we invite the
+readers to show the discussions related to this scheduled abandon:
+
+https://review.opendev.org/c/openstack/governance/+/902585
+
+Getting Started
+---------------
+
+Want to use Asyncio and Eventlet together or you simply want to migrate
+off of Eventlet?
+
+Follow the :ref:`official migration guide <migration-guide>`.
+
+We encourage readers to first look at the :ref:`glossary_guide` to
+learn about the various terms that may be encountered during the migration.
+
+Alternatives & Tips
+-------------------
+
+You want to refactor your code to replace Eventlet usages? See the proposed
+alternatives and tips:
+
+- :ref:`awaitlet_alternative`
+- :ref:`manage-your-deprecations`
diff -pruN 0.36.1-12/doc/source/asyncio/compatibility.rst 0.39.0-0ubuntu1/doc/source/asyncio/compatibility.rst
--- 0.36.1-12/doc/source/asyncio/compatibility.rst	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/asyncio/compatibility.rst	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,115 @@
+.. _asyncio-compatibility:
+
+Asyncio compatibility in eventlet
+#################################
+
+It should be possible to:
+
+* Run eventlet and asyncio in the same thread.
+* Allow asyncio and eventlet to interact: eventlet code can use asyncio-based libraries, asyncio-based code can get results out of eventlet.
+
+If this works, it would allow migrating from eventlet to asyncio in a gradual manner both within and across projects:
+
+1. Within an OpenStack library, code could be a mixture of asyncio and eventlet code.
+   This means migration doesn't have to be done in one stop, neither in libraries nor in the applications that depend on them.
+2. Even when an OpenStack library fully migrates to asyncio, it will still be usable by anything that is still running on eventlet.
+
+Prior art
+=========
+
+* Gevent has a similar model to eventlet.
+  There exists an integration between gevent and asyncio that follows model proposed below: https://pypi.org/project/asyncio-gevent/
+* Twisted can run on top of the asyncio event loop.
+  Separately, it includes utilities for mapping its `Deferred` objects (similar to a JavaScript Promise) to the async/await model introduced in newer versions in Python 3, and in the opposite direction it added support for turning async/await functions into `Deferred`s.
+  In an eventlet context, `GreenThread` would need a similar former of integration to Twisted's `Deferred`.
+
+Part 1: Implementing asyncio/eventlet interoperability
+======================================================
+
+There are three different parts involved in integrating eventlet and asyncio for purposes
+
+1. Create a hub that runs on asyncio
+------------------------------------
+
+Like many networking frameworks, eventlet has pluggable event loops, in this case called a "hub". Typically hubs wrap system APIs like `select()` and `epoll()`, but there also used to be a hub that ran on Twisted.
+Creating a hub that runs on top of the asyncio event loop should be fairly straightforward.
+
+Once this is done, eventlet and asyncio code can run in the same process and the same thread, but they would still have difficulties talking to each other.
+This latter requirement requires additional work, as covered by the next two items.
+
+2. Calling `async def` functions from eventlet
+----------------------------------------------
+
+The goal is to allow something like this:
+
+.. code::
+
+    import aiohttp
+    from eventlet_asyncio import future_to_greenlet  # hypothetical API
+    
+    async def get_url_body(url):
+        async with aiohttp.ClientSession() as session:
+            async with session.get(url) as response:
+                return await response.text()
+    
+    def eventlet_code():
+        green_thread = future_to_greenlet(get_url_body("https://example.com"))
+        return green_thread.wait()
+
+The code would presumably be similar to https://github.com/gfmio/asyncio-gevent/blob/main/asyncio_gevent/future_to_greenlet.py
+
+3. Calling eventlet code from asyncio
+-------------------------------------
+
+The goal is to allow something like this:
+
+.. code::
+
+    from urllib.request import urlopen
+    from eventlet import spawn
+    from eventlet_asyncio import greenlet_to_future  # hypothetical API
+    
+    def get_url_body(url):
+        # Looks blocking, but actually isn't
+        return urlopen(url).read()
+    
+    # This would likely be common pattern, so could be implemented as decorator...
+    async def asyncio_code():
+        greenlet = eventlet.spawn(get_url_body, "https://example.com")
+        future = greenlet_to_future(greenlet)
+        return await future
+
+The code would presumably be similar to https://github.com/gfmio/asyncio-gevent/blob/main/asyncio_gevent/future_to_greenlet.py
+
+4. Limitations and potential unexpected behavior
+------------------------------------------------
+
+``concurrent.futures.thread`` just uses normal threads, not Eventlet's special threads.
+Similarly, `asyncio.to_thread() <https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread>`_
+specifically requires regular blocking code, it won't work correctly with Eventlet code.
+
+Multiple readers are not supported by the Asyncio hub.
+
+Part 2: How a port would work on a technical level
+==================================================
+
+Porting a library
+=================
+
+1. Usage of eventlet-based APIs would be replaced with usage of asyncio APIs.
+   For example, `urllib` or `requests` might be replaced with `aiohttp <https://docs.aiohttp.org/en/stable/>`_.
+   The interoperability above can be used to make sure this continues to work with eventlet-based APIs.
+
+   The `awesome-asyncio <https://github.com/timofurrer/awesome-asyncio>`_ github repository propose a curated list of awesome
+   Python asyncio frameworks, libraries, software and resources. Do not hesitate to take a look at it. You may find
+   candidates compatible with asyncio that can allow you to replace some of your actual underlying libraries.
+2. Over time, APIs would need be migrated to be `async` function, but in the intermediate time frame a standard `def` can still be used, again using the interoperability layer above.
+3. Eventually all "blocking" APIs have been removed, at which point everything can be switched to `async def` and `await`, including external API, and the library will no longer depend on eventlet.
+
+Porting an application
+======================
+
+An application would need to install the asyncio hub before kicking off eventlet.
+Beyond that porting would be the same as a library.
+
+Once all libraries are purely asyncio-based, eventlet usage can be removed and an asyncio loop run instead.
diff -pruN 0.36.1-12/doc/source/asyncio/guide/awaitlet.rst 0.39.0-0ubuntu1/doc/source/asyncio/guide/awaitlet.rst
--- 0.36.1-12/doc/source/asyncio/guide/awaitlet.rst	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/asyncio/guide/awaitlet.rst	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,47 @@
+.. _awaitlet_alternative:
+
+Awaitlet as an Alternative
+==========================
+
+Applications with several years of existence may have seen their code base
+growing again and again, thus, migrating this kind of existing code
+base toward AsyncIO, would be painful or even unrealistic. For most of these
+applications, migrating to AsyncIO would may mean a complete rewriting of
+these applications.
+
+`Awaitlet <https://awaitlet.sqlalchemy.org/en/latest/>`_ is an alternative
+which allow you to migrate this kind of existing code base without getting
+the headaches associated to migrating such deliverables.
+
+Awaitlet allows existing programs written to use threads and blocking APIs to
+be ported to asyncio, by replacing frontend and backend code with asyncio
+compatible approaches, but allowing intermediary code to remain completely
+unchanged, with no addition of ``async`` or ``await`` keywords throughout the
+entire codebase needed. Its primary use is to support code that is
+cross-compatible with asyncio and non-asyncio runtime environments.
+
+Awaitlet is a direct extract of `SQLAlchemy <https://www.sqlalchemy.org/>`_’s
+own asyncio mediation layer, with no dependencies on SQLAlchemy. This code has
+been in widespread production use in thousands of environments for several
+years.
+
+.. warning::
+
+    Using Awaitlet require to use the :mod:`Asyncio Hub
+    <eventlet.hubs.asyncio>`
+
+    :ref:`understanding_hubs`
+
+Here is an example of Awaitlet usage::
+
+    import asyncio
+    import awaitlet
+
+    def asyncio_sleep():
+        return awaitlet.awaitlet(asyncio.sleep(5, result='hello'))
+
+    print(asyncio.run(awaitlet.async_def(asyncio_sleep)))
+
+We invite the reader to read the `Awaitlet synopsis
+<https://awaitlet.sqlalchemy.org/en/latest/synopsis.html>`_ to get a better
+overview of the opportunities offered by this library.
diff -pruN 0.36.1-12/doc/source/asyncio/guide/deprecation.rst 0.39.0-0ubuntu1/doc/source/asyncio/guide/deprecation.rst
--- 0.36.1-12/doc/source/asyncio/guide/deprecation.rst	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/asyncio/guide/deprecation.rst	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,36 @@
+.. _manage-your-deprecations:
+
+Manage Your Deprecations
+========================
+
+Libraries or applications may have specific features who are strongly related
+to Eventlet, like the ``heartbeat_in_pthread`` feature in
+the Opentack `oslo.messaging
+<https://docs.openstack.org/oslo.messaging/latest/configuration/opts.html#oslo_messaging_rabbit.heartbeat_in_pthread>`_
+deliverable.
+
+Migrating off of Eventlet would make these features obsolete. As this kind of
+feature expose configuration endpoints people would have to deprecate them to
+allow your users to update their config files accordingly. However, the
+deprecation process would take several months or even numerous versions before
+hoping to see these features removed. Hence blocking the migration.
+
+The proposed solution is to mock these features with empty entrypoints
+who will only raise deprecation warnings to inform your users that they have
+to update their config files. After 1 or 2 new versions these empty mocks
+could be safely removed without impacting anybody.
+
+In other words, these feature will remain in the code, but they will do
+nothing. They will be empty feature allowing us to migrate properly.
+
+Example with the ``heartbeat_in_pthread`` feature, by using Asyncio
+we wouldn't have to run heartbeats in a separated threads. This feature,
+the RabbitMQ heartbeat, would be run in a coroutine. A coroutine who is
+ran in the main native thread. The config option will remain available but
+it will only show a deprecation warning like the following one::
+
+    __main__:1: DeprecationWarning: Using heartbeat_in_pthread is
+    deprecated and will be removed in {SERIES}. Enabling that feature
+    have no functional effects due to recent changes applied in the
+    networking model used by oslo.messaging. Please plan an update of your
+    configuration.
diff -pruN 0.36.1-12/doc/source/asyncio/guide/glossary.rst 0.39.0-0ubuntu1/doc/source/asyncio/guide/glossary.rst
--- 0.36.1-12/doc/source/asyncio/guide/glossary.rst	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/asyncio/guide/glossary.rst	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,158 @@
+.. _glossary_guide:
+
+Glossary
+========
+
+This glossary provides a brief description of some of the terms used within
+Eventlet in general, and more specifically in the migration context.
+The goal of this glossary is to ensure that everybody has the same
+understanding of the used terms.
+
+For more information about anything the migration, see the
+:ref:`migration-guide`.
+
+.. _glossary-concurrency:
+
+Concurrency
+-----------
+
+**Concurrency** is when two or more tasks can start, run, and complete in
+overlapping time **periods**. It doesn't necessarily mean they'll ever both be
+running **at the same instant**. For example, _multitasking_ on a single-core
+machine.
+
+.. _glossary-cooperative-multitasking:
+
+Cooperative Multitasking
+------------------------
+
+Whenever a **thread** begins sleeping or awaiting network I/O, there is a
+chance for another thread to take the **GIL** and execute Python code.
+This is **cooperative multitasking**.
+
+.. _glossary-coro:
+
+Coro
+----
+
+Using the name **coro** is a common convention in the Python API
+documentation. It refers to a coroutine; i.e., strictly speaking, the result
+of calling an async def function, and not the function itself.
+
+.. _glossary-coroutine:
+
+Coroutine
+---------
+
+**Coroutines** are programs components that allow execution to be suspended
+and resumed, generalizing. They have been described as "functions whose
+execution you can pause".
+
+.. _glossary-future:
+
+Future
+------
+
+A **future** represents a future completion state of some activity and is
+managed by the loop. A Future is a special low-level awaitable object that
+represents an eventual result of an asynchronous operation.
+
+.. _glossary-greenlet:
+
+Greenlet
+--------
+
+A **greenlet** is a lightweight **coroutine** for in-process sequential
+concurrent programming (see **concurrency**). You can usually think of
+greenlets as cooperatively scheduled **threads**. The major differences are
+that since they’re cooperatively scheduled, you are in control of when they
+execute, and since they are **coroutines**, many greenlets can exist in a
+single native **thread**.
+
+Greenlets are cooperative (see **cooperative multitasking**) and sequential.
+This means that when one greenlet is running, no other greenlet can be
+running; the programmer is fully in control of when execution switches between
+greenlets. In other words ones, when using greenlets, should not expect
+**preemptive** behavior.
+
+Greenlet is also the name of a `library
+<https://greenlet.readthedocs.io/en/latest/>`_ that provide the greenlet
+mechanism. Eventlet is based on the greenlet library.
+
+.. _glossary-green-thread:
+
+Green Thread
+------------
+
+A **green thread** is a **threads** that is scheduled by a runtime library
+or virtual machine (VM) instead of natively by the underlying operating system
+(OS). Green threads emulate multithreaded environments without relying on any
+native OS abilities, and they are managed in user space) instead of kernel
+space, enabling them to work in environments that do not have native thread
+support.
+
+.. _glossary-gil:
+
+Global Interpreter Lock (GIL)
+-----------------------------
+
+A **global interpreter lock (GIL**) is a lock used internally to CPython to
+ensure that only one **thread** runs in the Python VM at a time. In general,
+Python offers to switch among threads only between bytecode instructions (see
+**preemptive multitasking** and **cooperative multitasking**). 
+
+.. _glossary-parallelism:
+
+Parallelism
+-----------
+
+**Parallelism** is when tasks _literally_ run at the same time, e.g., on a
+multicore processor. A condition that arises when at least two threads are
+executing simultaneously.
+
+.. _glossary-preemptive:
+
+Preemptive/Preemption
+---------------------
+
+**Preemption** is the act of temporarily interrupting an executing **task**,
+with the intention of resuming it at a later time. This interrupt is done by
+an external scheduler with no assistance or cooperation from the task.
+
+.. _glossary-preemptive-multitasking:
+
+Preemptive multitasking
+-----------------------
+
+**Preemptive multitasking** involves the use of an interrupt mechanism which
+suspends the currently executing process and invokes a scheduler to determine
+which process should execute next. Therefore, all processes will get some
+amount of CPU time at any given time.
+
+CPython also has _preemptive multitasking_: If a thread runs
+uninterrupted for 1000 bytecode instructions in Python 2, or runs 15
+milliseconds in Python 3, then it gives up the GIL and another thread may run.
+
+.. _glossary-task:
+
+Task
+----
+
+A **task** is a scheduled and independently managed **coroutine**. Tasks are
+awaitable objects used to schedule coroutines concurrently.
+
+.. _glossary-thread:
+
+Thread
+------
+
+**Threads** are a way for a program to divide (termed "split") itself into two
+or more simultaneously (or pseudo-simultaneously) running tasks. Threads and
+processes differ from one operating system to another but, in general, a
+thread is contained inside a process and different threads in the same process
+share same resources while different processes in the same multitasking
+operating system do not.
+
+When do threads switch in Python? The switch depends on the context. The
+threads may be interrupted (see **preemptive multitasking**) or behave
+cooperatively (see **cooperative multitasking**).
diff -pruN 0.36.1-12/doc/source/asyncio/migration.rst 0.39.0-0ubuntu1/doc/source/asyncio/migration.rst
--- 0.36.1-12/doc/source/asyncio/migration.rst	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/asyncio/migration.rst	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,155 @@
+.. _migration-guide:
+
+Migrating off of Eventlet
+=========================
+
+There are two main use cases for Eventlet:
+
+1. As a required networking framework, much like one would use ``asyncio``,
+   ``trio``, or older frameworks like ``Twisted`` and ``tornado``.
+
+2. As an optional, pluggable backend that allows swapping out blocking APIs
+   for an event loop, transparently, without changing any code.
+   This is how Celery and Gunicorn use eventlet.
+
+Pretending to look like a blocking API while actually using an event loop
+underneath requires exact emulation of an ever-changing and ever-increasing
+API footprint, which is fundamentally unsustainable for a volunteer-driven
+open source project.
+This is why Eventlet is discouraging new users.
+
+**Most of this document will focus on the first use case: Eventlet as the sole
+networking framework.**
+For this use case, we recommend migrating to Python's ``asyncio``, and we are
+providing infrastructure that will make this much easier, and allow for
+*gradual* migration.
+
+For the second use case, we believe this is a fundamentally unsustainable
+approach and encourage the upstream frameworks to come up with different
+solutions.
+
+Step 1. Switch to the ``asyncio`` Hub
+-------------------------------------
+
+Eventlet has different pluggable networking event loops.
+By switching the event loop to use ``asyncio``, you enable running ``asyncio``
+and Eventlet code in the same thread in the same process.
+
+To do so, set the ``EVENTLET_HUB`` environment variable to ``asyncio`` before
+starting your Eventlet program.
+For example, if you start your program with a shell script, you can do
+``export EVENTLET_HUB=asyncio``.
+
+Alternatively, you can explicitly specify the ``asyncio`` hub at startup,
+before monkey patching or any other setup work::
+
+  import eventlet.hubs
+  eventlet.hubs.use_hub("eventlet.hubs.asyncio")
+
+Step 2. Migrate code to ``asyncio``
+-----------------------------------
+
+Now that you're running Eventlet on top of ``asyncio``, you can use some new
+APIs to call from Eventlet code into ``asyncio``, and vice-versa.
+
+To call ``asyncio`` code from Eventlet code, you can wrap a coroutine (or
+anything you can ``await``) into an Eventlet ``GreenThread``.
+For example, if you want to make a HTTP request from Eventlet, you can use
+the ``asyncio``-based ``aiohttp`` library::
+
+    import aiohttp
+    from eventlet.asyncio import spawn_for_awaitable
+
+    async def request():
+        async with aiohttp.ClientSession() as session:
+            url = "https://example.com"
+            async with session.get(url) as response:
+                html = await response.text()
+                return html
+
+
+    # This makes a coroutine; typically you'd ``await`` it:
+    coro = request()
+
+    # You can wrap this coroutine with an Eventlet GreenThread, similar to
+    # ``evenlet.spawn()``:
+    gthread = spawn_for_awaitable(request())
+
+    # And then get its result, the body of https://example.com:
+    result = gthread.wait()
+
+In the other direction, any ``eventlet.greenthread.GreenThread`` can be
+``await``-ed in ``async`` functions.
+In other words ``async`` functions can call into Eventlet code::
+
+    def blocking_eventlet_api():
+        eventlet.sleep(1)
+        # do some other pseudo-blocking work
+        # ...
+        return 12
+
+    async def my_async_func():
+        gthread = eventlet.spawn(blocking_eventlet_api)
+        # In normal Eventlet code we'd call gthread.wait(), but since this is an
+        # async function we'll want to await instead:
+        result = await gthread
+        # result is now 12
+        # ...
+
+Cancellation of ``asyncio.Future`` and killing of ``eventlet.GreenThread``
+should propagate between the two.
+
+Using these two APIs, with more to come, you can gradually migrate portions of
+your application or library to ``asyncio``.
+Calls to blocking APIs like ``urlopen()`` or ``requests.get()`` can get
+replaced with calls to ``aiohttp``, for example.
+
+Depending on your Eventlet usage, during your migration, you may have to
+deprecate CLI options that are related to Eventlet, we invite the reader
+to take a look to :ref:`manage-your-deprecations`.
+
+The `awesome-asyncio <https://github.com/timofurrer/awesome-asyncio>`_ github
+repository propose a curated list of awesome Python asyncio frameworks,
+libraries, software and resources. Do not hesitate to take a look at it.
+You may find candidates compatible with asyncio that can allow you to replace
+some of your actual underlying libraries.
+
+Step 3. Drop Eventlet altogether
+--------------------------------
+
+Eventually you won't be relying on Eventlet at all: all your code will be
+``asyncio``-based.
+At this point you can drop Eventlet and switch to running the ``asyncio``
+loop directly.
+
+Known limitations and work in progress
+--------------------------------------
+
+In general, ``async`` functions and Eventlet green threads are two separate
+universes that just happen to be able to call each other.
+
+In ``async`` functions:
+
+* Eventlet thread locals probably won't work correctly.
+* ``evenlet.greenthread.getcurrent()`` won't give the result you expect.
+* ``eventlet`` locks and queues won't work if used directly.
+* Eventlet multiple readers are not supported, and so using
+  ``eventtlet.debug.hub_prevent_multiple_readers`` neither.
+
+In Eventlet greenlets:
+
+* ``asyncio`` locks won't work if used directly.
+
+We expect to add more migration and integration APIs over time as we learn
+more about what works, common idioms, and requirements for migration.
+You can track progress in the
+`GitHub issue <https://github.com/eventlet/eventlet/issues/868>`_, and file
+new issues if you have problems.
+
+Alternatives
+------------
+
+If you really want to continue with Eventlet's pretend-to-be-blocking
+approach, you can use `gevent <https://www.gevent.org/>`_.
+But keep in mind that the same technical issues that make Eventlet maintenance
+unsustainable over the long term also apply to Gevent.
diff -pruN 0.36.1-12/doc/source/asyncio/warning.rst 0.39.0-0ubuntu1/doc/source/asyncio/warning.rst
--- 0.36.1-12/doc/source/asyncio/warning.rst	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/asyncio/warning.rst	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,10 @@
+.. warning::
+
+   Eventlet is now in maintenance mode, so only the changes who are related
+   to fixing a bug or who are related to the new Asyncio hub will be accepted.
+   New features outside of the scope of the Asyncio hub won't be accepted.
+
+   :ref:`migration-guide` is strongly encouraged. We encourage existing
+   users to migrate to Asyncio. For further details see the official
+   :ref:`migration guide <migration-guide>`.
+
diff -pruN 0.36.1-12/doc/source/authors.rst 0.39.0-0ubuntu1/doc/source/authors.rst
--- 0.36.1-12/doc/source/authors.rst	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/authors.rst	2020-02-02 00:00:00.000000000 +0000
@@ -1,3 +1,5 @@
+.. _authors:
+
 Authors
 =======
 
diff -pruN 0.36.1-12/doc/source/contribute.rst 0.39.0-0ubuntu1/doc/source/contribute.rst
--- 0.36.1-12/doc/source/contribute.rst	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/contribute.rst	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,46 @@
+.. _how-to-contribute:
+
+How to Contribute to Eventlet
+#############################
+
+.. include:: asyncio/warning.rst
+
+Contribution are welcome. 
+
+You want to report something? Read :ref:`report-a-bug`.
+
+You want to propose changes? Read :ref:`propose-changes`.
+
+.. _report-a-bug:
+
+Report a Bug
+=============
+
+You find a bug and you want to report it?
+
+You simply have to `create a new github issue <https://github.com/eventlet/eventlet/issues>`_ 
+where you describe your problem.
+
+Do not forget to provide technical details like:
+
+* the hub you use
+* the context of your bug
+* the error message you get
+* everything else that may help us to understand your problem.
+
+The more you give details, the more we will be able to help you.
+
+.. _propose-changes:
+
+Propose Changes
+===============
+
+You may want to propose changes to fix a bug, improve the documentation, etc.
+
+Feel free to open a pull request: https://github.com/eventlet/eventlet/pulls
+
+.. include:: asyncio/warning.rst
+
+We will be happy to review it.
+
+At this point you may be also interested by :ref:`how to test Eventlet <testing-eventlet>`.
diff -pruN 0.36.1-12/doc/source/history.rst 0.39.0-0ubuntu1/doc/source/history.rst
--- 0.36.1-12/doc/source/history.rst	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/history.rst	2020-02-02 00:00:00.000000000 +0000
@@ -1,3 +1,5 @@
+.. _history:
+
 History
 -------
 
diff -pruN 0.36.1-12/doc/source/hubs.rst 0.39.0-0ubuntu1/doc/source/hubs.rst
--- 0.36.1-12/doc/source/hubs.rst	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/hubs.rst	2020-02-02 00:00:00.000000000 +0000
@@ -13,7 +13,8 @@ Eventlet has multiple hub implementation
     | We discourage new Eventlet projects.
     | We encourage existing Eventlet projects to migrate from Eventlet to Asyncio.
     | This hub allow you incremental and smooth migration.
-    | See the `migration guide <https://eventlet.readthedocs.io/en/latest/migration.html>`_ for further details.
+    | See the :ref:`migration-guide` for further details.
+    | See the :ref:`asyncio-compatibility` for the current state of the art.
 **epolls**
     Linux. This is the fastest hub for Linux.
 **kqueue**
diff -pruN 0.36.1-12/doc/source/index.rst 0.39.0-0ubuntu1/doc/source/index.rst
--- 0.36.1-12/doc/source/index.rst	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/index.rst	2020-02-02 00:00:00.000000000 +0000
@@ -1,3 +1,6 @@
+Eventlet Documentation
+######################
+
 Warning
 =======
 
@@ -34,8 +37,19 @@ answer them.
 .. _asyncio: https://docs.python.org/3/library/asyncio.html
 .. _open a new issue: https://github.com/eventlet/eventlet/issues/new
 
-Eventlet Documentation
-======================
+Installation
+============
+
+The easiest way to get Eventlet is to use pip::
+
+  pip install -U eventlet
+
+To install latest development version once::
+
+  pip install -U https://github.com/eventlet/eventlet/archive/master.zip
+
+Usage
+=====
 
 Code talks!  This is a simple web crawler that fetches a bunch of urls concurrently:
 
@@ -57,19 +71,19 @@ Code talks!  This is a simple web crawle
     for body in pool.imap(fetch, urls):
         print("got body", len(body))
 
-Supported Python versions
+Supported Python Versions
 =========================
 
-Currently supporting CPython 3.7+.
+Currently supporting CPython 3.8+.
 
 
-Contents
-=========
+Concepts & References
+=====================
 
 .. toctree::
    :maxdepth: 2
 
-   migration
+   asyncio/asyncio
    basic_usage
    design_patterns
    patching
@@ -78,22 +92,45 @@ Contents
    threading
    zeromq
    hubs
-   testing
    environment
-
    modules
 
-   process
-   authors
-   history
+Want to contribute?
+===================
+
+.. toctree::
+   :maxdepth: 2
+
+   contribute
+   testing
+   maintenance
 
 License
----------
+=======
 Eventlet is made available under the terms of the open source `MIT license <http://www.opensource.org/licenses/mit-license.php>`_
 
+Changelog
+=========
+
+For further details about released versions of Eventlet please take a
+look at the `changelog`_.
+
+Authors & History
+=================
+
+You have questions or you may have find a bug and you want to contact authors
+or maintainers, then please take a look at :ref:`authors`.
+
+You want to learn more about the history of Eventlet, then, please take a
+look at :ref:`history`.
+
 Indices and tables
 ==================
 
 * :ref:`genindex`
 * :ref:`modindex`
 * :ref:`search`
+* `changelog`_
+
+
+.. _changelog: https://github.com/eventlet/eventlet/blob/master/NEWS
diff -pruN 0.36.1-12/doc/source/maintenance/process.rst 0.39.0-0ubuntu1/doc/source/maintenance/process.rst
--- 0.36.1-12/doc/source/maintenance/process.rst	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/maintenance/process.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,92 +0,0 @@
-Maintenance Process
-===================
-
-This section provide guidances and process to eventlet
-maintainers. They are useful to lead the life cycle of eventlet.
-
-Releases
---------
-
-Here we describe the process we usually follow to
-process a new release.
-
-1. Create a github issue to track the release
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The first step will be to `open a new github issue`_
-to warn other maintainers about our intention
-to produce a new release. They may want, or not,
-to land a specific patch to address a specific
-topic. This issue will allow them to raise their
-concerns.
-
-Here are some `previous examples of issues`_ specifically
-created to handle the release process. Usually we name this
-kind of issue with the following pattern "[release] eventlet <next-version-number>".
-
-Please add the `release` label to this issue. It would
-ease the tracking of works related to releases.
-
-2. Prepare the changelog
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-You now have to update the changelog by updating
-the `NEWS` file available at the root of eventlet the project.
-
-We would recommand to give the big picture of the changes
-landed by the coming version. The goal here is not to list
-each commit, but rather, to give a summarize of the significant
-changes made during this versions.
-
-Once your changes are done, then propose a pull request.
-
-Please add the `changelog` label to this pull request. It would
-ease the tracking of works related to releases.
-
-If you want, you can use the issue previously created to list
-each commits landed in this new version. Here is an example https://github.com/eventlet/eventlet/issues/897.
-
-3. Create the tag
-~~~~~~~~~~~~~~~~~
-
-Once the changelog patch is merged, then we are now
-able to produce the new corresponding tag, here are the
-commands we use to do that:
-
-```bash
-$ git fetch origin # get the latest updates from the remote repo
-$ git tag -s vX.Y.Z origin/master # create a signed tag where X.Y.Z correspond to the version you are eager to produce
-$ git push origin --tags
-```
-
-Do not hesitate to provide the list of changes in the tags message.
-Here is an example https://github.com/eventlet/eventlet/releases/tag/v0.34.3
-You can simply reuse the changelog you made previously.
-
-Alternatively, the Github UI also allow you creating tags.
-
-4. Final checks
-~~~~~~~~~~~~~~~
-
-Pushing the previous will produce a new build. This
-build will generate our release and will push this
-new version to Pypi.
-
-You should ensure that this new version is now
-well available on Pypi https://pypi.org/project/eventlet/#history.
-
-Your tag should be listed there https://github.com/eventlet/eventlet/tags.
-
-5. Close the issue
-~~~~~~~~~~~~~~~~~~
-
-If the previous steps were successful, then you can
-now update the Github issue that you previously created.
-
-I'd recommend to put a comment with the pypi link and the tag link
-like there https://github.com/eventlet/eventlet/issues/875#issuecomment-1887435752.
-
-You can now close this Github issue.
-
-.. _open a new github issue: https://github.com/eventlet/eventlet/issues/new
-.. _previous examples of issues: https://github.com/eventlet/eventlet/issues?q=label%3Arelease+is%3Aclosed
diff -pruN 0.36.1-12/doc/source/maintenance.rst 0.39.0-0ubuntu1/doc/source/maintenance.rst
--- 0.36.1-12/doc/source/maintenance.rst	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/maintenance.rst	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,95 @@
+.. _maintenance_process:
+
+Maintenance Process
+###################
+
+This section provide guidances and process to eventlet
+maintainers. They are mostly dedicated to Eventlet' core maintainers lead the
+life cycle of eventlet.
+
+Releases
+========
+
+Here we describe the process we usually follow to
+process a new release.
+
+1. Create a github issue to track the release
+---------------------------------------------
+
+The first step will be to `open a new github issue`_
+to warn other maintainers about our intention
+to produce a new release. They may want, or not,
+to land a specific patch to address a specific
+topic. This issue will allow them to raise their
+concerns.
+
+Here are some `previous examples of issues`_ specifically
+created to handle the release process. Usually we name this
+kind of issue with the following pattern "[release] eventlet <next-version-number>".
+
+Please add the `release` label to this issue. It would
+ease the tracking of works related to releases.
+
+2. Prepare the changelog
+------------------------
+
+You now have to update the changelog by updating
+the `NEWS` file available at the root of eventlet the project.
+
+We would recommand to give the big picture of the changes
+landed by the coming version. The goal here is not to list
+each commit, but rather, to give a summarize of the significant
+changes made during this versions.
+
+Once your changes are done, then propose a pull request.
+
+Please add the `changelog` label to this pull request. It would
+ease the tracking of works related to releases.
+
+If you want, you can use the issue previously created to list
+each commits landed in this new version. Here is an example https://github.com/eventlet/eventlet/issues/897.
+
+3. Create the tag
+-----------------
+
+Once the changelog patch is merged, then we are now
+able to produce the new corresponding tag, here are the
+commands we use to do that:
+
+```bash
+$ git fetch origin # get the latest updates from the remote repo
+$ git tag -s vX.Y.Z origin/master # create a signed tag where X.Y.Z correspond to the version you are eager to produce
+$ git push origin --tags
+```
+
+Do not hesitate to provide the list of changes in the tags message.
+Here is an example https://github.com/eventlet/eventlet/releases/tag/v0.34.3
+You can simply reuse the changelog you made previously.
+
+Alternatively, the Github UI also allow you creating tags.
+
+4. Final checks
+---------------
+
+Pushing the previous will produce a new build. This
+build will generate our release and will push this
+new version to Pypi.
+
+You should ensure that this new version is now
+well available on Pypi https://pypi.org/project/eventlet/#history.
+
+Your tag should be listed there https://github.com/eventlet/eventlet/tags.
+
+5. Close the issue
+------------------
+
+If the previous steps were successful, then you can
+now update the Github issue that you previously created.
+
+I'd recommend to put a comment with the pypi link and the tag link
+like there https://github.com/eventlet/eventlet/issues/875#issuecomment-1887435752.
+
+You can now close this Github issue.
+
+.. _open a new github issue: https://github.com/eventlet/eventlet/issues/new
+.. _previous examples of issues: https://github.com/eventlet/eventlet/issues?q=label%3Arelease+is%3Aclosed
diff -pruN 0.36.1-12/doc/source/migration.rst 0.39.0-0ubuntu1/doc/source/migration.rst
--- 0.36.1-12/doc/source/migration.rst	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/migration.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,120 +0,0 @@
-.. _migration-guide:
-
-Migrating off of Eventlet
-=========================
-
-There are two main use cases for Eventlet:
-
-1. As a required networking framework, much like one would use ``asyncio``, ``trio``, or older frameworks like ``Twisted`` and ``tornado``.
-
-2. As an optional, pluggable backend that allows swapping out blocking APIs for an event loop, transparently, without changing any code.
-   This is how Celery and Gunicorn use eventlet.
-
-Pretending to look like a blocking API while actually using an event loop underneath requires exact emulation of an ever-changing and ever-increasing API footprint, which is fundamentally unsustainable for a volunteer-driven open source project.
-This is why Eventlet is discouraging new users.
-
-**Most of this document will focus on the first use case: Eventlet as the sole networking framework.**
-For this use case, we recommend migrating to Python's ``asyncio``, and we are providing infrastructure that will make this much easier, and allow for *gradual* migration.
-
-For the second use case, we believe this is a fundamentally unsustainable approach and encourage the upstream frameworks to come up with different solutions.
-
-Step 1. Switch to the ``asyncio`` Hub
--------------------------------------
-
-Eventlet has different pluggable networking event loops.
-By switching the event loop to use ``asyncio``, you enable running ``asyncio`` and Eventlet code in the same thread in the same process.
-
-To do so, set the ``EVENTLET_HUB`` environment variable to ``asyncio`` before starting your Eventlet program.
-For example, if you start your program with a shell script, you can do ``export EVENTLET_HUB=asyncio``.
-
-Alternatively, you can explicitly specify the ``asyncio`` hub at startup, before monkey patching or any other setup work::
-
-  import eventlet.hubs
-  eventlet.hubs.use_hub("eventlet.hubs.asyncio")
-
-Step 2. Migrate code to ``asyncio``
------------------------------------
-
-Now that you're running Eventlet on top of ``asyncio``, you can use some new APIs to call from Eventlet code into ``asyncio``, and vice-versa.
-
-To call ``asyncio`` code from Eventlet code, you can wrap a coroutine (or anything you can ``await``) into an Eventlet ``GreenThread``.
-For example, if you want to make a HTTP request from Eventlet, you can use the ``asyncio``-based ``aiohttp`` library::
-
-    import aiohttp
-    from eventlet.asyncio import spawn_for_awaitable
-
-    async def request():
-        async with aiohttp.ClientSession() as session:
-            url = "https://example.com"
-            async with session.get(url) as response:
-                html = await response.text()
-                return html
-
-
-    # This makes a coroutine; typically you'd ``await`` it:
-    coro = request()
-
-    # You can wrap this coroutine with an Eventlet GreenThread, similar to
-    # ``evenlet.spawn()``:
-    gthread = spawn_for_awaitable(request())
-
-    # And then get its result, the body of https://example.com:
-    result = gthread.wait()
-
-In the other direction, any ``eventlet.greenthread.GreenThread`` can be ``await``-ed in ``async`` functions.
-In other words ``async`` functions can call into Eventlet code::
-
-    def blocking_eventlet_api():
-        eventlet.sleep(1)
-        # do some other pseudo-blocking work
-        # ...
-        return 12
-
-    async def my_async_func():
-        gthread = eventlet.spawn(blocking_eventlet_api)
-        # In normal Eventlet code we'd call gthread.wait(), but since this is an
-        # async function we'll want to await instead:
-        result = await gthread
-        # result is now 12
-        # ...
-
-Cancellation of ``asyncio.Future`` and killing of ``eventlet.GreenThread`` should propagate between the two.
-
-Using these two APIs, with more to come, you can gradually migrate portions of your application or library to ``asyncio``.
-Calls to blocking APIs like ``urlopen()`` or ``requests.get()`` can get replaced with calls to ``aiohttp``, for example.
-
-The `awesome-asyncio <https://github.com/timofurrer/awesome-asyncio>`_ github repository propose a curated list of awesome
-Python asyncio frameworks, libraries, software and resources. Do not hesitate to take a look at it. You may find
-candidates compatible with asyncio that can allow you to replace some of your actual underlying libraries.
-
-
-Step 3. Drop Eventlet altogether
---------------------------------
-
-Eventually you won't be relying on Eventlet at all: all your code will be ``asyncio``-based.
-At this point you can drop Eventlet and switch to running the ``asyncio`` loop directly.
-
-
-Known limitations and work in progress
---------------------------------------
-
-In general, ``async`` functions and Eventlet green threads are two separate universes that just happen to be able to call each other.
-In ``async`` functions:
-
-* Eventlet thread locals probably won't work correctly.
-* ``evenlet.greenthread.getcurrent()`` won't give the result you expect.
-* ``eventlet`` locks and queues won't work if used directly.
-
-In Eventlet greenlets:
-
-* ``asyncio`` locks won't work if used directly.
-
-We expect to add more migration and integration APIs over time as we learn more about what works, common idioms, and requirements for migration.
-You can track progress in the `GitHub issue <https://github.com/eventlet/eventlet/issues/868>`_, and file new issues if you have problems.
-
-
-Alternatives
-------------
-
-If you really want to continue with Eventlet's pretend-to-be-blocking approach, you can use `gevent <https://www.gevent.org/>`_.
-But keep in mind that the same technical issues that make Eventlet maintenance unsustainable over the long term also apply to Gevent.
diff -pruN 0.36.1-12/doc/source/reference/api/eventlet.greenio.rst 0.39.0-0ubuntu1/doc/source/reference/api/eventlet.greenio.rst
--- 0.36.1-12/doc/source/reference/api/eventlet.greenio.rst	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/reference/api/eventlet.greenio.rst	2020-02-02 00:00:00.000000000 +0000
@@ -12,14 +12,6 @@ eventlet.greenio.base module
    :undoc-members:
    :show-inheritance:
 
-eventlet.greenio.py2 module
----------------------------
-
-.. automodule:: eventlet.greenio.py2
-   :members:
-   :undoc-members:
-   :show-inheritance:
-
 eventlet.greenio.py3 module
 ---------------------------
 
diff -pruN 0.36.1-12/doc/source/reference/api/eventlet.rst 0.39.0-0ubuntu1/doc/source/reference/api/eventlet.rst
--- 0.36.1-12/doc/source/reference/api/eventlet.rst	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/reference/api/eventlet.rst	2020-02-02 00:00:00.000000000 +0000
@@ -144,14 +144,6 @@ eventlet.semaphore module
    :undoc-members:
    :show-inheritance:
 
-eventlet.temp module
---------------------
-
-.. automodule:: eventlet.temp
-   :members:
-   :undoc-members:
-   :show-inheritance:
-
 eventlet.timeout module
 -----------------------
 
diff -pruN 0.36.1-12/doc/source/testing.rst 0.39.0-0ubuntu1/doc/source/testing.rst
--- 0.36.1-12/doc/source/testing.rst	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/doc/source/testing.rst	2020-02-02 00:00:00.000000000 +0000
@@ -1,3 +1,5 @@
+.. _testing-eventlet:
+
 Testing Eventlet
 ================
 
diff -pruN 0.36.1-12/eventlet/_version.py 0.39.0-0ubuntu1/eventlet/_version.py
--- 0.36.1-12/eventlet/_version.py	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/eventlet/_version.py	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,16 @@
+# file generated by setuptools_scm
+# don't change, don't track in version control
+TYPE_CHECKING = False
+if TYPE_CHECKING:
+    from typing import Tuple, Union
+    VERSION_TUPLE = Tuple[Union[int, str], ...]
+else:
+    VERSION_TUPLE = object
+
+version: str
+__version__: str
+__version_tuple__: VERSION_TUPLE
+version_tuple: VERSION_TUPLE
+
+__version__ = version = '0.39.0'
+__version_tuple__ = version_tuple = (0, 39, 0)
diff -pruN 0.36.1-12/eventlet/debug.py 0.39.0-0ubuntu1/eventlet/debug.py
--- 0.36.1-12/eventlet/debug.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/eventlet/debug.py	2020-02-02 00:00:00.000000000 +0000
@@ -35,8 +35,12 @@ class Spew:
             else:
                 name = '[unknown]'
                 try:
-                    src = inspect.getsourcelines(frame)
-                    line = src[lineno]
+                    src, offset = inspect.getsourcelines(frame)
+                    # The first line is line 1
+                    # But 0 may be returned when executing module-level code
+                    if offset == 0:
+                        offset = 1
+                    line = src[lineno - offset]
                 except OSError:
                     line = 'Unknown code named [%s].  VM instruction #%d' % (
                         frame.f_code.co_name, frame.f_lasti)
@@ -150,8 +154,24 @@ def hub_prevent_multiple_readers(state=T
     to predict which greenlet will receive what data.  To achieve
     resource sharing consider using ``eventlet.pools.Pool`` instead.
 
-    But if you really know what you are doing you can change the state
-    to ``False`` to stop the hub from protecting against this mistake.
+    It is important to note that this feature is a debug
+    convenience. That's not a feature made to be integrated in a production
+    code in some sort.
+
+    **If you really know what you are doing** you can change the state
+    to ``False`` to stop the hub from protecting against this mistake. Else
+    we strongly discourage using this feature, or you should consider using it
+    really carefully.
+
+    You should be aware that disabling this prevention will be applied to
+    your entire stack and not only to the context where you may find it useful,
+    meaning that using this debug feature may have several significant
+    unexpected side effects on your process, which could cause race conditions
+    between your sockets and on all your I/O in general.
+
+    You should also notice that this debug convenience is not supported
+    by the Asyncio hub, which is the official plan for migrating off of
+    eventlet. Using this feature will lock your migration path.
     """
     from eventlet.hubs import hub, get_hub
     from eventlet.hubs import asyncio
diff -pruN 0.36.1-12/eventlet/green/os.py 0.39.0-0ubuntu1/eventlet/green/os.py
--- 0.36.1-12/eventlet/green/os.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/eventlet/green/os.py	2020-02-02 00:00:00.000000000 +0000
@@ -1,6 +1,7 @@
 os_orig = __import__("os")
 import errno
 socket = __import__("socket")
+from stat import S_ISREG
 
 from eventlet import greenio
 from eventlet.support import get_errno
@@ -38,6 +39,15 @@ def read(fd, n):
 
     Read a file descriptor."""
     while True:
+        # don't wait to read for regular files
+        # select/poll will always return True while epoll will simply crash
+        st_mode = os_orig.stat(fd).st_mode
+        if not S_ISREG(st_mode):
+            try:
+                hubs.trampoline(fd, read=True)
+            except hubs.IOClosed:
+                return ''
+
         try:
             return __original_read__(fd, n)
         except OSError as e:
@@ -45,10 +55,6 @@ def read(fd, n):
                 return ''
             if get_errno(e) != errno.EAGAIN:
                 raise
-        try:
-            hubs.trampoline(fd, read=True)
-        except hubs.IOClosed:
-            return ''
 
 
 __original_write__ = os_orig.write
@@ -60,12 +66,20 @@ def write(fd, st):
     Write a string to a file descriptor.
     """
     while True:
+        # don't wait to write for regular files
+        # select/poll will always return True while epoll will simply crash
+        st_mode = os_orig.stat(fd).st_mode
+        if not S_ISREG(st_mode):
+            try:
+                hubs.trampoline(fd, write=True)
+            except hubs.IOClosed:
+                return 0
+
         try:
             return __original_write__(fd, st)
         except OSError as e:
             if get_errno(e) not in [errno.EAGAIN, errno.EPIPE]:
                 raise
-        hubs.trampoline(fd, write=True)
 
 
 def wait():
diff -pruN 0.36.1-12/eventlet/green/thread.py 0.39.0-0ubuntu1/eventlet/green/thread.py
--- 0.36.1-12/eventlet/green/thread.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/eventlet/green/thread.py	2020-02-02 00:00:00.000000000 +0000
@@ -2,13 +2,16 @@
 import _thread as __thread
 from eventlet.support import greenlets as greenlet
 from eventlet import greenthread
+from eventlet.timeout import with_timeout
 from eventlet.lock import Lock
 import sys
 
 
-__patched__ = ['get_ident', 'start_new_thread', 'start_new', 'allocate_lock',
-               'allocate', 'exit', 'interrupt_main', 'stack_size', '_local',
-               'LockType', 'Lock', '_count']
+__patched__ = ['Lock', 'LockType', '_ThreadHandle', '_count',
+               '_get_main_thread_ident', '_local', '_make_thread_handle',
+               'allocate', 'allocate_lock', 'exit', 'get_ident',
+               'interrupt_main', 'stack_size', 'start_joinable_thread',
+               'start_new', 'start_new_thread']
 
 error = __thread.error
 LockType = Lock
@@ -47,11 +50,42 @@ def __thread_body(func, args, kwargs):
         __threadcount -= 1
 
 
-def start_new_thread(function, args=(), kwargs=None):
-    if (sys.version_info >= (3, 4)
+class _ThreadHandle:
+    def __init__(self, greenthread=None):
+        self._greenthread = greenthread
+        self._done = False
+
+    def _set_done(self):
+        self._done = True
+
+    def is_done(self):
+        if self._greenthread is not None:
+            return self._greenthread.dead
+        return self._done
+
+    @property
+    def ident(self):
+        return get_ident(self._greenthread)
+
+    def join(self, timeout=None):
+        if not hasattr(self._greenthread, "wait"):
+            return
+        if timeout is not None:
+            return with_timeout(timeout, self._greenthread.wait)
+        return self._greenthread.wait()
+
+
+def _make_thread_handle(ident):
+    greenthread = greenlet.getcurrent()
+    assert ident == get_ident(greenthread)
+    return _ThreadHandle(greenthread=greenthread)
+
+
+def __spawn_green(function, args=(), kwargs=None, joinable=False):
+    if ((3, 4) <= sys.version_info < (3, 13)
             and getattr(function, '__module__', '') == 'threading'
             and hasattr(function, '__self__')):
-        # Since Python 3.4, threading.Thread uses an internal lock
+        # In Python 3.4-3.12, threading.Thread uses an internal lock
         # automatically released when the python thread state is deleted.
         # With monkey patching, eventlet uses green threads without python
         # thread state, so the lock is not automatically released.
@@ -66,19 +100,40 @@ def start_new_thread(function, args=(),
                 bootstrap_inner()
             finally:
                 # The lock can be cleared (ex: by a fork())
-                if thread._tstate_lock is not None:
+                if getattr(thread, "_tstate_lock", None) is not None:
                     thread._tstate_lock.release()
 
         thread._bootstrap_inner = wrap_bootstrap_inner
 
     kwargs = kwargs or {}
-    g = greenthread.spawn_n(__thread_body, function, args, kwargs)
+    spawn_func = greenthread.spawn if joinable else greenthread.spawn_n
+    return spawn_func(__thread_body, function, args, kwargs)
+
+
+def start_joinable_thread(function, handle=None, daemon=True):
+    g = __spawn_green(function, joinable=True)
+    if handle is None:
+        handle = _ThreadHandle(greenthread=g)
+    else:
+        handle._greenthread = g
+    return handle
+
+
+def start_new_thread(function, args=(), kwargs=None):
+    g = __spawn_green(function, args=args, kwargs=kwargs)
     return get_ident(g)
 
 
 start_new = start_new_thread
 
 
+def _get_main_thread_ident():
+    greenthread = greenlet.getcurrent()
+    while greenthread.parent is not None:
+        greenthread = greenthread.parent
+    return get_ident(greenthread)
+
+
 def allocate_lock(*a):
     return LockType(1)
 
@@ -118,3 +173,6 @@ from eventlet.corolocal import local as
 
 if hasattr(__thread, 'daemon_threads_allowed'):
     daemon_threads_allowed = __thread.daemon_threads_allowed
+
+if hasattr(__thread, '_shutdown'):
+    _shutdown = __thread._shutdown
diff -pruN 0.36.1-12/eventlet/green/threading.py 0.39.0-0ubuntu1/eventlet/green/threading.py
--- 0.36.1-12/eventlet/green/threading.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/eventlet/green/threading.py	2020-02-02 00:00:00.000000000 +0000
@@ -4,9 +4,10 @@ from eventlet.green import thread
 from eventlet.green import time
 from eventlet.support import greenlets as greenlet
 
-__patched__ = ['_start_new_thread', '_allocate_lock',
-               '_sleep', 'local', 'stack_size', 'Lock', 'currentThread',
-               'current_thread', '_after_fork', '_shutdown']
+__patched__ = ['Lock', '_after_fork', '_allocate_lock', '_get_main_thread_ident',
+               '_make_thread_handle', '_shutdown', '_sleep',
+               '_start_joinable_thread', '_start_new_thread', '_ThreadHandle',
+               'currentThread', 'current_thread', 'local', 'stack_size']
 
 __patched__ += ['get_ident', '_set_sentinel']
 
diff -pruN 0.36.1-12/eventlet/greenthread.py 0.39.0-0ubuntu1/eventlet/greenthread.py
--- 0.36.1-12/eventlet/greenthread.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/eventlet/greenthread.py	2020-02-02 00:00:00.000000000 +0000
@@ -32,6 +32,13 @@ def sleep(seconds=0):
     hub = hubs.get_hub()
     current = getcurrent()
     if hub.greenlet is current:
+        if seconds <= 0:
+            # In this case, sleep(0) got called in the event loop threadlet.
+            # This isn't blocking, so it's not harmful. And it will not be
+            # possible to switch in this situation. So not much we can do other
+            # than just keep running. This does get triggered in real code,
+            # unfortunately.
+            return
         raise RuntimeError('do not call blocking functions from the mainloop')
     timer = hub.schedule_call_global(seconds, current.switch)
     try:
diff -pruN 0.36.1-12/eventlet/hubs/asyncio.py 0.39.0-0ubuntu1/eventlet/hubs/asyncio.py
--- 0.36.1-12/eventlet/hubs/asyncio.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/eventlet/hubs/asyncio.py	2020-02-02 00:00:00.000000000 +0000
@@ -2,19 +2,19 @@
 Asyncio-based hub, originally implemented by Miguel Grinberg.
 """
 
-import asyncio
-try:
-    import concurrent.futures.thread
-    concurrent_imported = True
-except RuntimeError:
-    # This happens in weird edge cases where asyncio hub is started at
-    # shutdown. Not much we can do if this happens.
-    concurrent_imported = False
+# The various modules involved in asyncio need to call the original, unpatched
+# standard library APIs to work: socket, select, threading, and so on. We
+# therefore don't import them on the module level, since that would involve
+# their imports getting patched, and instead delay importing them as much as
+# possible. Then, we do a little song and dance in Hub.__init__ below so that
+# when they're imported they import the original modules (select, socket, etc)
+# rather than the patched ones.
+
 import os
 import sys
 
 from eventlet.hubs import hub
-from eventlet.patcher import original
+from eventlet.patcher import _unmonkey_patch_asyncio_all
 
 
 def is_available():
@@ -32,22 +32,14 @@ class Hub(hub.BaseHub):
 
     def __init__(self):
         super().__init__()
-        # Make sure asyncio thread pools use real threads:
-        if concurrent_imported:
-            concurrent.futures.thread.threading = original("threading")
-            concurrent.futures.thread.queue = original("queue")
-
-        # Make sure select/poll/epoll/kqueue are usable by asyncio:
-        import selectors
-        selectors.select = original("select")
-
-        # Make sure DNS lookups use normal blocking API (which asyncio will run
-        # in a thread):
-        import asyncio.base_events
-        asyncio.base_events.socket = original("socket")
+
+        # Pre-emptively make sure we're using the right modules:
+        _unmonkey_patch_asyncio_all()
 
         # The presumption is that eventlet is driving the event loop, so we
         # want a new one we control.
+        import asyncio
+
         self.loop = asyncio.new_event_loop()
         asyncio.set_event_loop(self.loop)
         self.sleep_event = asyncio.Event()
@@ -83,7 +75,7 @@ class Hub(hub.BaseHub):
         try:
             os.fstat(fileno)
         except OSError:
-            raise ValueError('Invalid file descriptor')
+            raise ValueError("Invalid file descriptor")
         already_listening = self.listeners[evtype].get(fileno) is not None
         listener = super().add(evtype, fileno, cb, tb, mark_as_closed)
         if not already_listening:
@@ -126,6 +118,8 @@ class Hub(hub.BaseHub):
         """
         Start the ``Hub`` running. See the superclass for details.
         """
+        import asyncio
+
         async def async_run():
             if self.running:
                 raise RuntimeError("Already running!")
@@ -150,8 +144,7 @@ class Hub(hub.BaseHub):
                         sleep_time = wakeup_when - self.clock()
                     if sleep_time > 0:
                         try:
-                            await asyncio.wait_for(self.sleep_event.wait(),
-                                                   sleep_time)
+                            await asyncio.wait_for(self.sleep_event.wait(), sleep_time)
                         except asyncio.TimeoutError:
                             pass
                         self.sleep_event.clear()
diff -pruN 0.36.1-12/eventlet/patcher.py 0.39.0-0ubuntu1/eventlet/patcher.py
--- 0.36.1-12/eventlet/patcher.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/eventlet/patcher.py	2020-02-02 00:00:00.000000000 +0000
@@ -1,9 +1,12 @@
 from __future__ import annotations
+
 try:
     import _imp as imp
 except ImportError:
     import imp
+import importlib
 import sys
+
 try:
     # Only for this purpose, it's irrelevant if `os` was already patched.
     # https://github.com/eventlet/eventlet/pull/661
@@ -14,9 +17,9 @@ except ImportError:
 import eventlet
 
 
-__all__ = ['inject', 'import_patched', 'monkey_patch', 'is_monkey_patched']
+__all__ = ["inject", "import_patched", "monkey_patch", "is_monkey_patched"]
 
-__exclude = {'__builtins__', '__file__', '__name__'}
+__exclude = {"__builtins__", "__file__", "__name__"}
 
 
 class SysModulesSaver:
@@ -70,7 +73,7 @@ def inject(module_name, new_globals, *ad
     name/module pairs is used, which should cover all use cases but may be
     slower because there are inevitably redundant or unnecessary imports.
     """
-    patched_name = '__patched_module_' + module_name
+    patched_name = "__patched_module_" + module_name
     if patched_name in sys.modules:
         # returning already-patched module so as not to destroy existing
         # references to patched modules
@@ -79,11 +82,12 @@ def inject(module_name, new_globals, *ad
     if not additional_modules:
         # supply some defaults
         additional_modules = (
-            _green_os_modules() +
-            _green_select_modules() +
-            _green_socket_modules() +
-            _green_thread_modules() +
-            _green_time_modules())
+            _green_os_modules()
+            + _green_select_modules()
+            + _green_socket_modules()
+            + _green_thread_modules()
+            + _green_time_modules()
+        )
         # _green_MySQLdb()) # enable this after a short baking-in period
 
     # after this we are gonna screw with sys.modules, so capture the
@@ -103,10 +107,10 @@ def inject(module_name, new_globals, *ad
     # because of the pop operations will change the content of sys.modules
     # within th loop
     for imported_module_name in list(sys.modules.keys()):
-        if imported_module_name.startswith(module_name + '.'):
+        if imported_module_name.startswith(module_name + "."):
             sys.modules.pop(imported_module_name, None)
     try:
-        module = __import__(module_name, {}, {}, module_name.split('.')[:-1])
+        module = __import__(module_name, {}, {}, module_name.split(".")[:-1])
 
         if new_globals is not None:
             # Update the given globals dictionary with everything from this new module
@@ -130,9 +134,8 @@ def import_patched(module_name, *additio
     The only required argument is the name of the module to be imported.
     """
     return inject(
-        module_name,
-        None,
-        *additional_modules + tuple(kw_additional_modules.items()))
+        module_name, None, *additional_modules + tuple(kw_additional_modules.items())
+    )
 
 
 def patch_function(func, *additional_modules):
@@ -144,11 +147,12 @@ def patch_function(func, *additional_mod
     if not additional_modules:
         # supply some defaults
         additional_modules = (
-            _green_os_modules() +
-            _green_select_modules() +
-            _green_socket_modules() +
-            _green_thread_modules() +
-            _green_time_modules())
+            _green_os_modules()
+            + _green_select_modules()
+            + _green_socket_modules()
+            + _green_thread_modules()
+            + _green_time_modules()
+        )
 
     def patched(*args, **kw):
         saver = SysModulesSaver()
@@ -159,6 +163,7 @@ def patch_function(func, *additional_mod
             return func(*args, **kw)
         finally:
             saver.restore()
+
     return patched
 
 
@@ -169,6 +174,7 @@ def _original_patch_function(func, *modu
     patch_function, only the names of the modules need be supplied,
     and there are no defaults.  This is a gross hack; tell your kids not
     to import inside function bodies!"""
+
     def patched(*args, **kw):
         saver = SysModulesSaver(module_names)
         for name in module_names:
@@ -177,17 +183,18 @@ def _original_patch_function(func, *modu
             return func(*args, **kw)
         finally:
             saver.restore()
+
     return patched
 
 
 def original(modname):
-    """ This returns an unpatched version of a module; this is useful for
+    """This returns an unpatched version of a module; this is useful for
     Eventlet itself (i.e. tpool)."""
     # note that it's not necessary to temporarily install unpatched
     # versions of all patchable modules during the import of the
     # module; this is because none of them import each other, except
     # for threading which imports thread
-    original_name = '__original_module_' + modname
+    original_name = "__original_module_" + modname
     if original_name in sys.modules:
         return sys.modules.get(original_name)
 
@@ -198,20 +205,20 @@ def original(modname):
     # some rudimentary dependency checking -- fortunately the modules
     # we're working on don't have many dependencies so we can just do
     # some special-casing here
-    deps = {'threading': '_thread', 'queue': 'threading'}
+    deps = {"threading": "_thread", "queue": "threading"}
     if modname in deps:
         dependency = deps[modname]
         saver.save(dependency)
         sys.modules[dependency] = original(dependency)
     try:
-        real_mod = __import__(modname, {}, {}, modname.split('.')[:-1])
-        if modname in ('Queue', 'queue') and not hasattr(real_mod, '_threading'):
+        real_mod = __import__(modname, {}, {}, modname.split(".")[:-1])
+        if modname in ("Queue", "queue") and not hasattr(real_mod, "_threading"):
             # tricky hack: Queue's constructor in <2.7 imports
             # threading on every instantiation; therefore we wrap
             # it so that it always gets the original threading
             real_mod.Queue.__init__ = _original_patch_function(
-                real_mod.Queue.__init__,
-                'threading')
+                real_mod.Queue.__init__, "threading"
+            )
         # save a reference to the unpatched module so it doesn't get lost
         sys.modules[original_name] = real_mod
     finally:
@@ -223,6 +230,99 @@ def original(modname):
 already_patched = {}
 
 
+def _unmonkey_patch_asyncio(unmonkeypatch_refs_to_this_module):
+    """
+    When using asyncio hub, we want the asyncio modules to use the original,
+    blocking APIs.  So un-monkeypatch references to the given module name, e.g.
+    "select".
+    """
+    to_unpatch = unmonkeypatch_refs_to_this_module
+    original_module = original(to_unpatch)
+
+    # Lower down for asyncio modules, we will switch their imported modules to
+    # original ones instead of the green ones they probably have. This won't
+    # fix "from socket import whatev" but asyncio doesn't seem to do that in
+    # ways we care about for Python 3.8 to 3.13, with the one exception of
+    # get_ident() in some older versions.
+    if to_unpatch == "_thread":
+        import asyncio.base_futures
+
+        if hasattr(asyncio.base_futures, "get_ident"):
+            asyncio.base_futures = original_module.get_ident
+
+    # Asyncio uses these for its blocking thread pool:
+    if to_unpatch in ("threading", "queue"):
+        try:
+            import concurrent.futures.thread
+        except RuntimeError:
+            # This happens in weird edge cases where asyncio hub is started at
+            # shutdown. Not much we can do if this happens.
+            pass
+        else:
+            if to_unpatch == "threading":
+                concurrent.futures.thread.threading = original_module
+            if to_unpatch == "queue":
+                concurrent.futures.thread.queue = original_module
+
+    # Patch asyncio modules:
+    for module_name in [
+        "asyncio.base_events",
+        "asyncio.base_futures",
+        "asyncio.base_subprocess",
+        "asyncio.base_tasks",
+        "asyncio.constants",
+        "asyncio.coroutines",
+        "asyncio.events",
+        "asyncio.exceptions",
+        "asyncio.format_helpers",
+        "asyncio.futures",
+        "asyncio",
+        "asyncio.locks",
+        "asyncio.log",
+        "asyncio.mixins",
+        "asyncio.protocols",
+        "asyncio.queues",
+        "asyncio.runners",
+        "asyncio.selector_events",
+        "asyncio.sslproto",
+        "asyncio.staggered",
+        "asyncio.streams",
+        "asyncio.subprocess",
+        "asyncio.taskgroups",
+        "asyncio.tasks",
+        "asyncio.threads",
+        "asyncio.timeouts",
+        "asyncio.transports",
+        "asyncio.trsock",
+        "asyncio.unix_events",
+    ]:
+        try:
+            module = importlib.import_module(module_name)
+        except ImportError:
+            # The list is from Python 3.13, so some modules may not be present
+            # in older versions of Python:
+            continue
+        if getattr(module, to_unpatch, None) is sys.modules[to_unpatch]:
+            setattr(module, to_unpatch, original_module)
+
+
+def _unmonkey_patch_asyncio_all():
+    """
+    Unmonkey-patch all referred-to modules in asyncio.
+    """
+    for module_name, _ in sum([
+        _green_os_modules(),
+        _green_select_modules(),
+        _green_socket_modules(),
+        _green_thread_modules(),
+        _green_time_modules(),
+        _green_builtins(),
+        _green_subprocess_modules(),
+    ], []):
+        _unmonkey_patch_asyncio(module_name)
+    original("selectors").select = original("select")
+
+
 def monkey_patch(**on):
     """Globally patches certain system modules to be greenthread-friendly.
 
@@ -246,57 +346,68 @@ def monkey_patch(**on):
     # the hub calls into monkey-patched modules.
     eventlet.hubs.get_hub()
 
-    accepted_args = {'os', 'select', 'socket',
-                     'thread', 'time', 'psycopg', 'MySQLdb',
-                     'builtins', 'subprocess'}
+    accepted_args = {
+        "os",
+        "select",
+        "socket",
+        "thread",
+        "time",
+        "psycopg",
+        "MySQLdb",
+        "builtins",
+        "subprocess",
+    }
     # To make sure only one of them is passed here
-    assert not ('__builtin__' in on and 'builtins' in on)
+    assert not ("__builtin__" in on and "builtins" in on)
     try:
-        b = on.pop('__builtin__')
+        b = on.pop("__builtin__")
     except KeyError:
         pass
     else:
-        on['builtins'] = b
+        on["builtins"] = b
 
     default_on = on.pop("all", None)
 
     for k in on.keys():
         if k not in accepted_args:
-            raise TypeError("monkey_patch() got an unexpected "
-                            "keyword argument %r" % k)
+            raise TypeError(
+                "monkey_patch() got an unexpected " "keyword argument %r" % k
+            )
     if default_on is None:
         default_on = True not in on.values()
     for modname in accepted_args:
-        if modname == 'MySQLdb':
+        if modname == "MySQLdb":
             # MySQLdb is only on when explicitly patched for the moment
             on.setdefault(modname, False)
-        if modname == 'builtins':
+        if modname == "builtins":
             on.setdefault(modname, False)
         on.setdefault(modname, default_on)
 
-    if on['thread'] and not already_patched.get('thread'):
-        _green_existing_locks()
+    import threading
+
+    original_rlock_type = type(threading.RLock())
 
     modules_to_patch = []
     for name, modules_function in [
-        ('os', _green_os_modules),
-        ('select', _green_select_modules),
-        ('socket', _green_socket_modules),
-        ('thread', _green_thread_modules),
-        ('time', _green_time_modules),
-        ('MySQLdb', _green_MySQLdb),
-        ('builtins', _green_builtins),
-        ('subprocess', _green_subprocess_modules),
+        ("os", _green_os_modules),
+        ("select", _green_select_modules),
+        ("socket", _green_socket_modules),
+        ("thread", _green_thread_modules),
+        ("time", _green_time_modules),
+        ("MySQLdb", _green_MySQLdb),
+        ("builtins", _green_builtins),
+        ("subprocess", _green_subprocess_modules),
     ]:
         if on[name] and not already_patched.get(name):
             modules_to_patch += modules_function()
             already_patched[name] = True
 
-    if on['psycopg'] and not already_patched.get('psycopg'):
+    if on["psycopg"] and not already_patched.get("psycopg"):
         try:
             from eventlet.support import psycopg2_patcher
+
             psycopg2_patcher.make_psycopg_green()
-            already_patched['psycopg'] = True
+            already_patched["psycopg"] = True
         except ImportError:
             # note that if we get an importerror from trying to
             # monkeypatch psycopg, we will continually retry it
@@ -305,7 +416,7 @@ def monkey_patch(**on):
             # tell us whether or not we succeeded
             pass
 
-    _threading = original('threading')
+    _threading = original("threading")
     imp.acquire_lock()
     try:
         for name, mod in modules_to_patch:
@@ -316,13 +427,14 @@ def monkey_patch(**on):
                 patched_attr = getattr(mod, attr_name, None)
                 if patched_attr is not None:
                     setattr(orig_mod, attr_name, patched_attr)
-            deleted = getattr(mod, '__deleted__', [])
+            deleted = getattr(mod, "__deleted__", [])
             for attr_name in deleted:
                 if hasattr(orig_mod, attr_name):
                     delattr(orig_mod, attr_name)
 
             # https://github.com/eventlet/eventlet/issues/592
-            if name == 'threading' and register_at_fork:
+            if name == "threading" and register_at_fork:
+
                 def fix_threading_active(
                     _global_dict=_threading.current_thread.__globals__,
                     # alias orig_mod as patched to reflect its new state
@@ -332,21 +444,21 @@ def monkey_patch(**on):
                     _prefork_active = [None]
 
                     def before_fork():
-                        _prefork_active[0] = _global_dict['_active']
-                        _global_dict['_active'] = _patched._active
+                        _prefork_active[0] = _global_dict["_active"]
+                        _global_dict["_active"] = _patched._active
 
                     def after_fork():
-                        _global_dict['_active'] = _prefork_active[0]
+                        _global_dict["_active"] = _prefork_active[0]
+
+                    register_at_fork(before=before_fork, after_in_parent=after_fork)
 
-                    register_at_fork(
-                        before=before_fork,
-                        after_in_parent=after_fork)
                 fix_threading_active()
     finally:
         imp.release_lock()
 
     import importlib._bootstrap
-    thread = original('_thread')
+
+    thread = original("_thread")
     # importlib must use real thread locks, not eventlet.Semaphore
     importlib._bootstrap._thread = thread
 
@@ -355,13 +467,21 @@ def monkey_patch(**on):
     # threading.get_ident(). Force the Python implementation of RLock which
     # calls threading.get_ident() and so is compatible with eventlet.
     import threading
+
     threading.RLock = threading._PyRLock
 
     # Issue #508: Since Python 3.7 queue.SimpleQueue is implemented in C,
     # causing a deadlock.  Replace the C implementation with the Python one.
     import queue
+
     queue.SimpleQueue = queue._PySimpleQueue
 
+    # Green existing locks _after_ patching modules, since patching modules
+    # might involve imports that create new locks:
+    for name, _ in modules_to_patch:
+        if name == "threading":
+            _green_existing_locks(original_rlock_type)
+
 
 def is_monkey_patched(module):
     """Returns True if the given module is monkeypatched currently, False if
@@ -371,11 +491,13 @@ def is_monkey_patched(module):
     module some other way than with the import keyword (including
     import_patched), this might not be correct about that particular
     module."""
-    return module in already_patched or \
-        getattr(module, '__name__', None) in already_patched
+    return (
+        module in already_patched
+        or getattr(module, "__name__", None) in already_patched
+    )
 
 
-def _green_existing_locks():
+def _green_existing_locks(rlock_type):
     """Make locks created before monkey-patching safe.
 
     RLocks rely on a Lock and on Python 2, if an unpatched Lock blocks, it
@@ -384,9 +506,7 @@ def _green_existing_locks():
     This was originally noticed in the stdlib logging module."""
     import gc
     import os
-    import threading
     import eventlet.green.thread
-    rlock_type = type(threading.RLock())
 
     # We're monkey-patching so there can't be any greenlets yet, ergo our thread
     # ID is the only valid owner possible.
@@ -410,11 +530,29 @@ def _green_existing_locks():
     gc.collect()
     remaining_rlocks = len({o for o in gc.get_objects() if isinstance(o, rlock_type)})
     if remaining_rlocks:
+        try:
+            import _frozen_importlib
+        except ImportError:
+            pass
+        else:
+            for o in gc.get_objects():
+                # This can happen in Python 3.12, at least, if monkey patch
+                # happened as side-effect of importing a module.
+                if not isinstance(o, rlock_type):
+                    continue
+                if _frozen_importlib._ModuleLock in map(type, gc.get_referrers(o)):
+                    remaining_rlocks -= 1
+                del o
+
+    if remaining_rlocks:
         import logging
+
         logger = logging.Logger("eventlet")
-        logger.error("{} RLock(s) were not greened,".format(remaining_rlocks) +
-                     " to fix this error make sure you run eventlet.monkey_patch() " +
-                     "before importing any other modules.")
+        logger.error(
+            "{} RLock(s) were not greened,".format(remaining_rlocks)
+            + " to fix this error make sure you run eventlet.monkey_patch() "
+            + "before importing any other modules."
+        )
 
 
 def _upgrade_instances(container, klass, upgrade, visited=None, old_to_new=None):
@@ -475,10 +613,14 @@ def _upgrade_instances(container, klass,
                     setattr(container, k, new)
         except:
             import logging
+
             logger = logging.Logger("eventlet")
-            logger.exception("An exception was thrown while monkey_patching for eventlet. "
-                             "to fix this error make sure you run eventlet.monkey_patch() "
-                             "before importing any other modules.", exc_info=True)
+            logger.exception(
+                "An exception was thrown while monkey_patching for eventlet. "
+                "to fix this error make sure you run eventlet.monkey_patch() "
+                "before importing any other modules.",
+                exc_info=True,
+            )
 
 
 def _convert_py3_rlock(old, tid):
@@ -491,14 +633,16 @@ def _convert_py3_rlock(old, tid):
     """
     import threading
     from eventlet.green.thread import allocate_lock
+
     new = threading._PyRLock()
     if not hasattr(new, "_block") or not hasattr(new, "_owner"):
         # These will only fail if Python changes its internal implementation of
         # _PyRLock:
         raise RuntimeError(
-            "INTERNAL BUG. Perhaps you are using a major version " +
-            "of Python that is unsupported by eventlet? Please file a bug " +
-            "at https://github.com/eventlet/eventlet/issues/new")
+            "INTERNAL BUG. Perhaps you are using a major version "
+            + "of Python that is unsupported by eventlet? Please file a bug "
+            + "at https://github.com/eventlet/eventlet/issues/new"
+        )
     new._block = allocate_lock()
     acquired = False
     while old._is_owned():
@@ -515,49 +659,58 @@ def _convert_py3_rlock(old, tid):
 
 def _green_os_modules():
     from eventlet.green import os
-    return [('os', os)]
+
+    return [("os", os)]
 
 
 def _green_select_modules():
     from eventlet.green import select
-    modules = [('select', select)]
+
+    modules = [("select", select)]
 
     from eventlet.green import selectors
-    modules.append(('selectors', selectors))
+
+    modules.append(("selectors", selectors))
 
     return modules
 
 
 def _green_socket_modules():
     from eventlet.green import socket
+
     try:
         from eventlet.green import ssl
-        return [('socket', socket), ('ssl', ssl)]
+
+        return [("socket", socket), ("ssl", ssl)]
     except ImportError:
-        return [('socket', socket)]
+        return [("socket", socket)]
 
 
 def _green_subprocess_modules():
     from eventlet.green import subprocess
-    return [('subprocess', subprocess)]
+
+    return [("subprocess", subprocess)]
 
 
 def _green_thread_modules():
     from eventlet.green import Queue
     from eventlet.green import thread
     from eventlet.green import threading
-    return [('queue', Queue), ('_thread', thread), ('threading', threading)]
+
+    return [("queue", Queue), ("_thread", thread), ("threading", threading)]
 
 
 def _green_time_modules():
     from eventlet.green import time
-    return [('time', time)]
+
+    return [("time", time)]
 
 
 def _green_MySQLdb():
     try:
         from eventlet.green import MySQLdb
-        return [('MySQLdb', MySQLdb)]
+
+        return [("MySQLdb", MySQLdb)]
     except ImportError:
         return []
 
@@ -565,7 +718,8 @@ def _green_MySQLdb():
 def _green_builtins():
     try:
         from eventlet.green import builtin
-        return [('builtins', builtin)]
+
+        return [("builtins", builtin)]
     except ImportError:
         return []
 
@@ -580,16 +734,18 @@ def slurp_properties(source, destination
     """
     if srckeys is None:
         srckeys = source.__all__
-    destination.update({
-        name: getattr(source, name)
-        for name in srckeys
-        if not (name.startswith('__') or name in ignore)
-    })
+    destination.update(
+        {
+            name: getattr(source, name)
+            for name in srckeys
+            if not (name.startswith("__") or name in ignore)
+        }
+    )
 
 
 if __name__ == "__main__":
     sys.argv.pop(0)
     monkey_patch()
     with open(sys.argv[0]) as f:
-        code = compile(f.read(), sys.argv[0], 'exec')
+        code = compile(f.read(), sys.argv[0], "exec")
         exec(code)
diff -pruN 0.36.1-12/eventlet/wsgi.py 0.39.0-0ubuntu1/eventlet/wsgi.py
--- 0.36.1-12/eventlet/wsgi.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/eventlet/wsgi.py	2020-02-02 00:00:00.000000000 +0000
@@ -142,8 +142,7 @@ class Input:
             # 100 Continue response
             self.send_hundred_continue_response()
             self.is_hundred_continue_response_sent = True
-        if (self.content_length is not None) and (
-                length is None or length > self.content_length - self.position):
+        if length is None or length > self.content_length - self.position:
             length = self.content_length - self.position
         if not length:
             return b''
@@ -354,6 +353,11 @@ class HttpProtocol(BaseHTTPServer.BaseHT
         self.client_address = conn_state[0]
         self.conn_state = conn_state
         self.server = server
+        # Want to allow some overrides from the server before running setup
+        if server.minimum_chunk_size is not None:
+            self.minimum_chunk_size = server.minimum_chunk_size
+        self.capitalize_response_headers = server.capitalize_response_headers
+
         self.setup()
         try:
             self.handle()
@@ -508,6 +512,9 @@ class HttpProtocol(BaseHTTPServer.BaseHT
         use_chunked = [False]
         length = [0]
         status_code = [200]
+        # Status code of 1xx or 204 or 2xx to CONNECT request MUST NOT send body and related headers
+        # https://httpwg.org/specs/rfc7230.html#rfc.section.3.3.1
+        bodyless = [False]
 
         def write(data):
             towrite = []
@@ -539,10 +546,12 @@ class HttpProtocol(BaseHTTPServer.BaseHT
                     self.close_connection = 1
 
                 if 'content-length' not in header_list:
-                    if self.request_version == 'HTTP/1.1':
+                    if bodyless[0]:
+                        pass  # client didn't expect a body anyway
+                    elif self.request_version == 'HTTP/1.1':
                         use_chunked[0] = True
                         towrite.append(b'Transfer-Encoding: chunked\r\n')
-                    elif 'content-length' not in header_list:
+                    else:
                         # client is 1.0 and therefore must read to EOF
                         self.close_connection = 1
 
@@ -567,7 +576,7 @@ class HttpProtocol(BaseHTTPServer.BaseHT
             length[0] = length[0] + sum(map(len, towrite))
 
         def start_response(status, response_headers, exc_info=None):
-            status_code[0] = status.split()[0]
+            status_code[0] = int(status.split(" ", 1)[0])
             if exc_info:
                 try:
                     if headers_sent:
@@ -577,6 +586,13 @@ class HttpProtocol(BaseHTTPServer.BaseHT
                     # Avoid dangling circular ref
                     exc_info = None
 
+            bodyless[0] = (
+                status_code[0] in (204, 304)
+                or self.command == "HEAD"
+                or (100 <= status_code[0] < 200)
+                or (self.command == "CONNECT" and 200 <= status_code[0] < 300)
+            )
+
             # Response headers capitalization
             # CONTent-TYpe: TExt/PlaiN -> Content-Type: TExt/PlaiN
             # Per HTTP RFC standard, header name is case-insensitive.
@@ -600,7 +616,7 @@ class HttpProtocol(BaseHTTPServer.BaseHT
                 # Set content-length if possible
                 if headers_set and not headers_sent and hasattr(result, '__len__'):
                     # We've got a complete final response
-                    if 'Content-Length' not in [h for h, _v in headers_set[1]]:
+                    if not bodyless[0] and 'Content-Length' not in [h for h, _v in headers_set[1]]:
                         headers_set[1].append(('Content-Length', str(sum(map(len, result)))))
                     if request_input.should_send_hundred_continue:
                         # We've got a complete final response, and never sent a 100 Continue.
@@ -706,6 +722,23 @@ class HttpProtocol(BaseHTTPServer.BaseHT
                 host = forward + ',' + host
         return (host, port)
 
+    def formalize_key_naming(self, k):
+        """
+        Headers containing underscores are permitted by RFC9110,
+        but evenlet joining headers of different names into
+        the same environment variable will dangerously confuse applications as to which is which.
+        Cf.
+            - Nginx: http://nginx.org/en/docs/http/ngx_http_core_module.html#underscores_in_headers
+            - Django: https://www.djangoproject.com/weblog/2015/jan/13/security/
+            - Gunicorn: https://github.com/benoitc/gunicorn/commit/72b8970dbf2bf3444eb2e8b12aeff1a3d5922a9a
+            - Werkzeug: https://github.com/pallets/werkzeug/commit/5ee439a692dc4474e0311de2496b567eed2d02cf
+            - ...
+        """
+        if "_" in k:
+            return
+
+        return k.replace('-', '_').upper()
+
     def get_environ(self):
         env = self.server.get_environ()
         env['REQUEST_METHOD'] = self.command
@@ -748,7 +781,10 @@ class HttpProtocol(BaseHTTPServer.BaseHT
 
         env['headers_raw'] = headers_raw = tuple((k, v.strip(' \t\n\r')) for k, v in headers)
         for k, v in headers_raw:
-            k = k.replace('-', '_').upper()
+            k = self.formalize_key_naming(k)
+            if not k:
+                continue
+
             if k in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
                 # These do not get the HTTP_ prefix and were handled above
                 continue
@@ -765,6 +801,11 @@ class HttpProtocol(BaseHTTPServer.BaseHT
             wfile = None
             wfile_line = None
         chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
+        if not chunked and length is None:
+            # https://www.rfc-editor.org/rfc/rfc9112#section-6.3-2.7
+            # "If this is a request message and none of the above are true, then
+            # the message body length is zero (no message body is present)."
+            length = '0'
         env['wsgi.input'] = env['eventlet.input'] = Input(
             self.rfile, length, self.connection, wfile=wfile, wfile_line=wfile_line,
             chunked_input=chunked)
@@ -858,15 +899,10 @@ class Server(BaseHTTPServer.HTTPServer):
         return d
 
     def process_request(self, conn_state):
-        # The actual request handling takes place in __init__, so we need to
-        # set minimum_chunk_size before __init__ executes and we don't want to modify
-        # class variable
-        proto = new(self.protocol)
-        if self.minimum_chunk_size is not None:
-            proto.minimum_chunk_size = self.minimum_chunk_size
-        proto.capitalize_response_headers = self.capitalize_response_headers
         try:
-            proto.__init__(conn_state, self)
+            # protocol is responsible for pulling out any overrides it needs itself
+            # before it starts processing
+            self.protocol(conn_state, self)
         except socket.timeout:
             # Expected exceptions are not exceptional
             conn_state[1].close()
@@ -880,12 +916,6 @@ Please use server.log.info instead.''')
 
 
 try:
-    new = types.InstanceType
-except AttributeError:
-    new = lambda cls: cls.__new__(cls)
-
-
-try:
     import ssl
     ACCEPT_EXCEPTIONS = (socket.error, ssl.SSLError)
     ACCEPT_ERRNO = {errno.EPIPE, errno.ECONNRESET,
diff -pruN 0.36.1-12/pyproject.toml 0.39.0-0ubuntu1/pyproject.toml
--- 0.36.1-12/pyproject.toml	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/pyproject.toml	2020-02-02 00:00:00.000000000 +0000
@@ -17,7 +17,7 @@ authors = [
 ]
 description = "Highly concurrent networking library"
 readme = "README.rst"
-requires-python = ">=3.7"
+requires-python = ">=3.8"
 license = {text = "MIT"}
 classifiers = [
     "Development Status :: 4 - Beta",
@@ -27,12 +27,12 @@ classifiers = [
     "Operating System :: Microsoft :: Windows",
     "Operating System :: POSIX",
     "Programming Language :: Python :: 3",
-    "Programming Language :: Python :: 3.7",
     "Programming Language :: Python :: 3.8",
     "Programming Language :: Python :: 3.9",
     "Programming Language :: Python :: 3.10",
     "Programming Language :: Python :: 3.11",
     "Programming Language :: Python :: 3.12",
+    "Programming Language :: Python :: 3.13",
     "Programming Language :: Python",
     "Topic :: Internet",
     "Topic :: Software Development :: Libraries :: Python Modules",
@@ -41,7 +41,6 @@ dynamic = ["version"]
 dependencies = [
     'dnspython >= 1.15.0',
     'greenlet >= 1.0',
-    'monotonic >= 1.4;python_version<"3.5"',
 ]
 
 [project.urls]
diff -pruN 0.36.1-12/tests/api_test.py 0.39.0-0ubuntu1/tests/api_test.py
--- 0.36.1-12/tests/api_test.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/api_test.py	2020-02-02 00:00:00.000000000 +0000
@@ -184,3 +184,7 @@ def test_timeouterror_deprecated():
     code = '''import eventlet; eventlet.Timeout(1).cancel(); print('pass')'''
     args = ['-Werror:eventlet.Timeout:DeprecationWarning', '-c', code]
     tests.run_python(path=None, args=args, expect_pass=True)
+
+
+def test_zero_second_sleep():
+    tests.run_isolated("zero_second_sleep.py")
diff -pruN 0.36.1-12/tests/asyncio_test.py 0.39.0-0ubuntu1/tests/asyncio_test.py
--- 0.36.1-12/tests/asyncio_test.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/asyncio_test.py	2020-02-02 00:00:00.000000000 +0000
@@ -1,17 +1,20 @@
 """Tests for asyncio integration."""
 
+import pytest
+
+import eventlet
+from eventlet.hubs import get_hub
+from eventlet.hubs.asyncio import Hub as AsyncioHub
+if not isinstance(get_hub(), AsyncioHub):
+    pytest.skip("Only works on asyncio hub", allow_module_level=True)
+
 import asyncio
 from time import time
 import socket
 import sys
 
-import pytest
-
 from greenlet import GreenletExit
 
-import eventlet
-from eventlet.hubs import get_hub
-from eventlet.hubs.asyncio import Hub as AsyncioHub
 from eventlet.asyncio import spawn_for_awaitable
 from eventlet.greenthread import getcurrent
 from eventlet.support import greendns
@@ -19,9 +22,6 @@ from .wsgi_test import _TestBase, Site
 
 import tests
 
-if not isinstance(get_hub(), AsyncioHub):
-    pytest.skip("Only works on asyncio hub", allow_module_level=True)
-
 
 class CallingAsyncFunctionsFromGreenletsHighLevelTests(_TestBase):
     """
@@ -298,9 +298,16 @@ def test_asyncio_to_thread():
     tests.run_isolated("asyncio_to_thread.py")
 
 
-def test_asyncio_does_not_use_greendns(monkeypatch):
+def test_asyncio_does_not_use_greendns():
     """
     ``asyncio`` loops' ``getaddrinfo()`` and ``getnameinfo()`` do not use green
     DNS.
     """
     tests.run_isolated("asyncio_dns.py")
+
+
+def test_make_sure_monkey_patching_asyncio_is_restricted():
+    """
+    ``asyncio`` continues to have original, unpatched ``socket`` etc classes.
+    """
+    tests.run_isolated("asyncio_correct_patching.py")
diff -pruN 0.36.1-12/tests/debug_test.py 0.39.0-0ubuntu1/tests/debug_test.py
--- 0.36.1-12/tests/debug_test.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/debug_test.py	2020-02-02 00:00:00.000000000 +0000
@@ -1,4 +1,5 @@
 import io
+import os
 import sys
 
 from eventlet import debug
@@ -50,7 +51,8 @@ class TestSpew(tests.LimitedTestCase):
         s(f, "line", None)
         output = sys.stdout.getvalue()
         assert "[unknown]:%i" % lineno in output, "Didn't find [unknown]:%i in %s" % (lineno, output)
-        assert "VM instruction #" in output, output
+        if "PYTEST_XDIST_WORKER" not in os.environ:
+            assert "VM instruction #" in output, output
 
     def test_line_global(self):
         frame_str = "f=<frame at"
diff -pruN 0.36.1-12/tests/hub_test.py 0.39.0-0ubuntu1/tests/hub_test.py
--- 0.36.1-12/tests/hub_test.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/hub_test.py	2020-02-02 00:00:00.000000000 +0000
@@ -326,10 +326,12 @@ def test_repeated_select_bad_fd():
     once()
 
 
+@pytest.mark.skipif(sys.platform == "darwin", reason="on macOS using fork() is discouraged")
 def test_fork():
     tests.run_isolated('hub_fork.py')
 
 
+@pytest.mark.skipif(sys.platform == "darwin", reason="on macOS using fork() is discouraged")
 def test_fork_simple():
     tests.run_isolated('hub_fork_simple.py')
 
diff -pruN 0.36.1-12/tests/isolated/asyncio_correct_patching.py 0.39.0-0ubuntu1/tests/isolated/asyncio_correct_patching.py
--- 0.36.1-12/tests/isolated/asyncio_correct_patching.py	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/isolated/asyncio_correct_patching.py	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,32 @@
+"""
+asyncio submodules continue to have the original, real socket module even after
+monkeypatching.
+"""
+
+import sys
+
+
+def assert_correct_patching():
+    from eventlet.greenio.base import GreenSocket
+    import asyncio.selector_events
+    if asyncio.selector_events.socket.socket is GreenSocket:
+        raise RuntimeError("Wrong socket class, should've been normal socket.socket")
+
+    import asyncio.selector_events
+    if asyncio.selector_events.selectors is not sys.modules["__original_module_selectors"]:
+        raise RuntimeError("Wrong selectors")
+
+    if asyncio.selector_events.selectors.select is not sys.modules["__original_module_select"]:
+        raise RuntimeError("Wrong select")
+
+
+import eventlet.hubs
+eventlet.hubs.get_hub()
+assert_correct_patching()
+
+import eventlet
+eventlet.monkey_patch()
+assert_correct_patching()
+
+
+print("pass")
diff -pruN 0.36.1-12/tests/isolated/asyncio_dns.py 0.39.0-0ubuntu1/tests/isolated/asyncio_dns.py
--- 0.36.1-12/tests/isolated/asyncio_dns.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/isolated/asyncio_dns.py	2020-02-02 00:00:00.000000000 +0000
@@ -1,3 +1,6 @@
+import eventlet
+eventlet.monkey_patch()
+
 import asyncio
 import socket
 
@@ -12,10 +15,6 @@ def fail(*args, **kwargs):
 greendns.resolve = fail
 greendns.resolver.query = fail
 
-import eventlet
-
-eventlet.monkey_patch()
-
 
 async def lookups():
     loop = asyncio.get_running_loop()
diff -pruN 0.36.1-12/tests/isolated/asyncio_to_thread.py 0.39.0-0ubuntu1/tests/isolated/asyncio_to_thread.py
--- 0.36.1-12/tests/isolated/asyncio_to_thread.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/isolated/asyncio_to_thread.py	2020-02-02 00:00:00.000000000 +0000
@@ -1,8 +1,9 @@
 import eventlet
+eventlet.monkey_patch()
+
 from eventlet.patcher import original
 from eventlet.asyncio import spawn_for_awaitable
 
-eventlet.monkey_patch()
 import asyncio
 
 
diff -pruN 0.36.1-12/tests/isolated/os_read_nonblocking.py 0.39.0-0ubuntu1/tests/isolated/os_read_nonblocking.py
--- 0.36.1-12/tests/isolated/os_read_nonblocking.py	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/isolated/os_read_nonblocking.py	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,30 @@
+if __name__ == '__main__':
+    import eventlet
+    from eventlet.greenthread import sleep, spawn
+
+    eventlet.monkey_patch()
+
+    import signal
+    import os
+
+    thread = None
+    timed_out = False
+
+    def on_timeout(signum, frame):
+        global timed_out
+        timed_out = True
+        thread.kill()
+
+    def blocking_read(fd):
+        os.read(fd, 4096)
+
+    signal.signal(signal.SIGALRM, on_timeout)
+    signal.alarm(3)
+
+    read_fd, write_fd = os.pipe()
+    thread = spawn(blocking_read, read_fd)
+    sleep(0)
+
+    assert not timed_out
+
+    print('pass')
diff -pruN 0.36.1-12/tests/isolated/os_write_nonblocking.py 0.39.0-0ubuntu1/tests/isolated/os_write_nonblocking.py
--- 0.36.1-12/tests/isolated/os_write_nonblocking.py	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/isolated/os_write_nonblocking.py	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,35 @@
+if __name__ == '__main__':
+    import eventlet
+    from eventlet.greenthread import sleep, spawn
+
+    eventlet.monkey_patch()
+
+    import signal
+    import os
+
+    thread = None
+    timed_out = False
+
+    def on_timeout(signum, frame):
+        global timed_out
+        timed_out = True
+        thread.kill()
+
+    def blocking_write(fd):
+        # once the write buffer is filled up, it should go to sleep instead of blocking
+        # write 1 byte at a time as writing large data will block
+        # even if select/poll claims otherwise
+        for i in range(1, 1000000):
+            os.write(fd, b'\0')
+
+    signal.signal(signal.SIGALRM, on_timeout)
+    signal.alarm(5)
+
+    read_fd, write_fd = os.pipe()
+    thread = spawn(blocking_write, write_fd)
+    # 2 secs is enough time for write buffer to fill up
+    sleep(2)
+
+    assert not timed_out
+
+    print('pass')
diff -pruN 0.36.1-12/tests/isolated/osthreads.py 0.39.0-0ubuntu1/tests/isolated/osthreads.py
--- 0.36.1-12/tests/isolated/osthreads.py	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/isolated/osthreads.py	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,25 @@
+import eventlet
+import eventlet.patcher
+
+eventlet.monkey_patch()
+
+threading_orig = eventlet.patcher.original("threading")
+
+EVENTS = []
+
+
+def os_thread_2():
+    eventlet.sleep(0.1)
+    EVENTS.append(2)
+    eventlet.sleep(0.1)
+    EVENTS.append(2)
+
+
+threading_orig.Thread(target=os_thread_2).start()
+EVENTS.append(1)
+eventlet.sleep(0.05)
+EVENTS.append(1)
+eventlet.sleep(0.4)
+EVENTS.append(3)
+if EVENTS == [1, 1, 2, 2, 3]:
+    print("pass")
diff -pruN 0.36.1-12/tests/isolated/patcher_existing_locks_preexisting.py 0.39.0-0ubuntu1/tests/isolated/patcher_existing_locks_preexisting.py
--- 0.36.1-12/tests/isolated/patcher_existing_locks_preexisting.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/isolated/patcher_existing_locks_preexisting.py	2020-02-02 00:00:00.000000000 +0000
@@ -31,6 +31,10 @@ if __name__ == '__main__':
     if sys.version_info[:2] > (3, 9):
         print(unittest.mock.NonCallableMock._lock)
     print(NS.lock)
+    # unittest.mock imports asyncio, so clear out asyncio.
+    for name in list(sys.modules.keys()):
+        if name.startswith("asyncio"):
+            del sys.modules[name]
     eventlet.monkey_patch()
     ensure_upgraded(NS.lock)
     ensure_upgraded(NS.NS2.lock)
diff -pruN 0.36.1-12/tests/isolated/patcher_threading_subclass_done.py 0.39.0-0ubuntu1/tests/isolated/patcher_threading_subclass_done.py
--- 0.36.1-12/tests/isolated/patcher_threading_subclass_done.py	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/isolated/patcher_threading_subclass_done.py	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,40 @@
+import queue
+import threading
+
+
+class Worker(threading.Thread):
+    EXIT_SENTINEL = object()
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.q = queue.Queue(maxsize=-1)
+        self.daemon = True
+
+    def run(self):
+        while True:
+            task = self.q.get()
+            if task == self.EXIT_SENTINEL:
+                break
+            print(f"Treating task {task}")
+            # Pretend to work
+
+    def submit(self, job):
+        self.q.put(job)
+
+    def terminate(self):
+        self.q.put(self.EXIT_SENTINEL)
+        self.join()
+
+
+if __name__ == "__main__":
+    import eventlet
+    eventlet.patcher.monkey_patch()
+
+    worker = Worker()
+    assert not worker.is_alive()
+    worker.start()
+    assert worker.is_alive()
+    worker.submit(1)
+    worker.terminate()
+    assert not worker.is_alive()
+    print("pass")
diff -pruN 0.36.1-12/tests/isolated/zero_second_sleep.py 0.39.0-0ubuntu1/tests/isolated/zero_second_sleep.py
--- 0.36.1-12/tests/isolated/zero_second_sleep.py	1970-01-01 00:00:00.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/isolated/zero_second_sleep.py	2020-02-02 00:00:00.000000000 +0000
@@ -0,0 +1,31 @@
+import eventlet
+
+eventlet.sleep(0)
+eventlet.monkey_patch()
+
+from eventlet.hubs import get_hub
+import time
+
+FAILURES = []
+
+
+def zero_second_sleep():
+    try:
+        eventlet.sleep(0)
+        time.sleep(0)
+    except RuntimeError:
+        FAILURES.append(1)
+        raise
+
+
+# Simulate sleep(0) being called from a trampoline function. Or try to, anyway,
+# not really sure if this matches the original reported bug but it does at
+# least trigger the RuntimeError about blocking functions lacking the fix for
+# sleep(0).
+get_hub().schedule_call_local(0, zero_second_sleep)
+zero_second_sleep()
+
+if FAILURES:
+    raise RuntimeError("There were failures")
+
+print("pass")
diff -pruN 0.36.1-12/tests/os_test.py 0.39.0-0ubuntu1/tests/os_test.py
--- 0.36.1-12/tests/os_test.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/os_test.py	2020-02-02 00:00:00.000000000 +0000
@@ -1,4 +1,5 @@
 import eventlet
+import tests
 
 
 def test_pathlib_open_issue_534():
@@ -7,3 +8,11 @@ def test_pathlib_open_issue_534():
     with path.open():
         # should not raise
         pass
+
+
+def test_os_read_nonblocking():
+    tests.run_isolated('os_read_nonblocking.py')
+
+
+def test_os_write_nonblocking():
+    tests.run_isolated('os_write_nonblocking.py')
diff -pruN 0.36.1-12/tests/patcher_test.py 0.39.0-0ubuntu1/tests/patcher_test.py
--- 0.36.1-12/tests/patcher_test.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/patcher_test.py	2020-02-02 00:00:00.000000000 +0000
@@ -536,3 +536,7 @@ def test_patcher_existing_locks():
 
 def test_patcher_existing_locks_exception():
     tests.run_isolated("patcher_existing_locks_exception.py")
+
+
+def test_patcher_threading_subclass_done():
+    tests.run_isolated("patcher_threading_subclass_done.py")
diff -pruN 0.36.1-12/tests/thread_test.py 0.39.0-0ubuntu1/tests/thread_test.py
--- 0.36.1-12/tests/thread_test.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/thread_test.py	2020-02-02 00:00:00.000000000 +0000
@@ -8,7 +8,7 @@ from eventlet import greenthread
 from eventlet import patcher
 from eventlet.green import thread
 
-from tests import LimitedTestCase
+from tests import LimitedTestCase, run_isolated
 
 
 class Locals(LimitedTestCase):
@@ -122,3 +122,7 @@ def test_reinit():
     lk._at_fork_reinit()
     assert lk.acquire(blocking=False)
     assert not lk.acquire(blocking=False)
+
+
+def test_can_use_eventlet_in_os_threads():
+    run_isolated("osthreads.py")
diff -pruN 0.36.1-12/tests/wsgi_test.py 0.39.0-0ubuntu1/tests/wsgi_test.py
--- 0.36.1-12/tests/wsgi_test.py	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/tests/wsgi_test.py	2020-02-02 00:00:00.000000000 +0000
@@ -217,6 +217,8 @@ def read_http(sock):
     if content_length_str:
         num = int(content_length_str)
         body = fd.read(num)
+    elif response_line.split()[1] in ('204', '304'):
+        body = ''
     else:
         # read until EOF
         body = fd.read()
@@ -1924,8 +1926,8 @@ class TestHttpd(_TestBase):
         result = read_http(sock)
         sock.close()
         assert result.status == 'HTTP/1.1 200 OK', 'Received status {!r}'.format(result.status)
-        assert result.body == (b'HTTP_HOST: localhost\nHTTP_HTTP_X_ANY_K: two\n'
-                               b'HTTP_PATH_INFO: foo\nHTTP_X_ANY_K: one\n')
+        assert result.body == (b'HTTP_HOST: localhost\n'
+                               b'HTTP_PATH_INFO: foo\n')
 
     def test_env_header_stripping(self):
         def app(environ, start_response):
@@ -1994,6 +1996,47 @@ class TestHttpd(_TestBase):
         except Exception:
             assert False, self.logfile.getvalue()
 
+    def test_no_transfer_encoding_in_empty_response(self):
+        def app(environ, start_response):
+            if environ["PATH_INFO"] == "/304":
+                status = "304 Not Modified"
+            else:
+                status = "204 OK"
+            write = start_response(status, [])
+            write(b"")
+            # "An application must return an iterable object, even if it uses
+            #  write() to produce all or part of its response body."
+            return []
+
+        self.spawn_server(site=app)
+        sock = eventlet.connect(self.server_addr)
+
+        sock.sendall(b"DELETE /foo HTTP/1.1\r\nConnection: keep-alive\r\n\r\n")
+        response = read_http(sock)
+        assert response.status == "HTTP/1.1 204 OK"
+        assert "transfer-encoding" not in response.headers_lower
+        assert "content-length" not in response.headers_lower
+        assert response.headers_lower.get("connection") == "keep-alive"
+
+        # Since it's HTTP/1.1 and clients know there's no body,
+        # we can continue using the connection
+        sock.sendall(b"GET /304 HTTP/1.1\r\n\r\n")
+        response = read_http(sock)
+        assert response.status == "HTTP/1.1 304 Not Modified"
+        assert "transfer-encoding" not in response.headers_lower
+        assert "content-length" not in response.headers_lower
+        assert "connection" not in response.headers_lower
+
+        # HTTP/1.1 default to persistant connections, even without an explicit
+        # Connection header, so we can keep going
+        sock.sendall(b"DELETE /foo HTTP/1.1\r\n\r\n")
+        response = read_http(sock)
+        assert "transfer-encoding" not in response.headers_lower
+        assert "content-length" not in response.headers_lower
+        assert "connection" not in response.headers_lower
+
+        sock.close()
+
     @pytest.mark.xfail(sys.platform == "darwin", reason="Fails on macOS for some reason")
     def test_close_idle_connections_listen_socket_closed(self):
         self.reset_timeout(4)
@@ -2138,6 +2181,32 @@ class TestHttpd(_TestBase):
         assert result.status == 'HTTP/1.1 200 OK', 'Received status {!r}'.format(result.status)
         sock.close()
 
+    def test_no_content_length_or_transfer_encoding(self):
+        def wsgi_app(environ, start_response):
+            start_response('200 OK', [])
+            return [environ['wsgi.input'].read(1024)]
+
+        self.site.application = wsgi_app
+        sock = eventlet.connect(self.server_addr)
+        sock.send(
+            b'GET / HTTP/1.1\r\n'
+            b'Host: localhost\r\n'
+            b'\r\n')
+        result = read_http(sock)
+        assert result.status == 'HTTP/1.1 200 OK', 'Received status {!r}'.format(result.status)
+        assert result.body == b''
+        # socket's still good
+        sock.send(
+            b'GET / HTTP/1.1\r\n'
+            b'Content-Length: 6\r\n'
+            b'Host: localhost\r\n'
+            b'\r\n'
+            b'hello\n')
+        result = read_http(sock)
+        assert result.status == 'HTTP/1.1 200 OK', 'Received status {!r}'.format(result.status)
+        assert result.body == b'hello\n'
+        sock.close()
+
 
 def read_headers(sock):
     fd = sock.makefile('rb')
diff -pruN 0.36.1-12/tox.ini 0.39.0-0ubuntu1/tox.ini
--- 0.36.1-12/tox.ini	2024-03-29 13:36:38.000000000 +0000
+++ 0.39.0-0ubuntu1/tox.ini	2020-02-02 00:00:00.000000000 +0000
@@ -16,7 +16,7 @@ envlist =
     py38-openssl
     py39-dnspython1
     pypy3-epolls
-    py{38,39,310,311,312}-{selects,poll,epolls,asyncio}
+    py{38,39,310,311,312,313}-{selects,poll,epolls,asyncio}
 skipsdist = True
 
 [testenv:ipv6]
