diff --git a/debian/changelog b/debian/changelog index 5e0c271..fbf6ba2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,15 @@ +python3.12 (3.12.11-0deepin4) unstable; urgency=medium + + * Fix: Documentation doesn't generate with docutils >= 0.22 + + -- lichenggang Thu, 29 Jan 2026 15:04:07 +0800 + +python3.12 (3.12.11-0deepin3) unstable; urgency=medium + + * Fix: CVE-2025-13836 CVE-2025-6069 CVE-2025-6075 CVE-2025-8194 CVE-2025-8291 + + -- lichenggang Wed, 28 Jan 2026 15:37:16 +0800 + python3.12 (3.12.11-0deepin2) unstable; urgency=medium * feat: add sw64 support. diff --git a/debian/patches/CVE-2025-13836.patch b/debian/patches/CVE-2025-13836.patch new file mode 100644 index 0000000..ad115cc --- /dev/null +++ b/debian/patches/CVE-2025-13836.patch @@ -0,0 +1,140 @@ +From 03336d7373be3eb6dcdd94a075089c417dda5df1 Mon Sep 17 00:00:00 2001 +From: Serhiy Storchaka +Date: Mon, 1 Dec 2025 17:26:07 +0200 +Subject: [PATCH] gh-119451: Fix a potential denial of service in http.client + (GH-119454) + +Reading the whole body of the HTTP response could cause OOM if +the Content-Length value is too large even if the server does not send +a large amount of data. Now the HTTP client reads large data by chunks, +therefore the amount of consumed memory is proportional to the amount +of sent data. +(cherry picked from commit 5a4c4a033a4a54481be6870aa1896fad732555b5) + +Co-authored-by: Serhiy Storchaka +--- + Lib/http/client.py | 28 ++++++-- + Lib/test/test_httplib.py | 66 +++++++++++++++++++ + ...-05-23-11-47-48.gh-issue-119451.qkJe9-.rst | 5 ++ + 3 files changed, 95 insertions(+), 4 deletions(-) + create mode 100644 Misc/NEWS.d/next/Security/2024-05-23-11-47-48.gh-issue-119451.qkJe9-.rst + +--- python3.12-3.12.3.orig/Lib/http/client.py ++++ python3.12-3.12.3/Lib/http/client.py +@@ -111,6 +111,11 @@ responses = {v: v.phrase for v in http.H + _MAXLINE = 65536 + _MAXHEADERS = 100 + ++# Data larger than this will be read in chunks, to prevent extreme ++# overallocation. ++_MIN_READ_BUF_SIZE = 1 << 20 ++ ++ + # Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2) + # + # VCHAR = %x21-7E +@@ -637,10 +642,25 @@ class HTTPResponse(io.BufferedIOBase): + reading. If the bytes are truly not available (due to EOF), then the + IncompleteRead exception can be used to detect the problem. + """ +- data = self.fp.read(amt) +- if len(data) < amt: +- raise IncompleteRead(data, amt-len(data)) +- return data ++ cursize = min(amt, _MIN_READ_BUF_SIZE) ++ data = self.fp.read(cursize) ++ if len(data) >= amt: ++ return data ++ if len(data) < cursize: ++ raise IncompleteRead(data, amt - len(data)) ++ ++ data = io.BytesIO(data) ++ data.seek(0, 2) ++ while True: ++ # This is a geometric increase in read size (never more than ++ # doubling out the current length of data per loop iteration). ++ delta = min(cursize, amt - cursize) ++ data.write(self.fp.read(delta)) ++ if data.tell() >= amt: ++ return data.getvalue() ++ cursize += delta ++ if data.tell() < cursize: ++ raise IncompleteRead(data.getvalue(), amt - data.tell()) + + def _safe_readinto(self, b): + """Same as _safe_read, but for reading into a buffer.""" +--- python3.12-3.12.3.orig/Lib/test/test_httplib.py ++++ python3.12-3.12.3/Lib/test/test_httplib.py +@@ -1433,6 +1433,72 @@ class BasicTest(TestCase): + thread.join() + self.assertEqual(result, b"proxied data\n") + ++ def test_large_content_length(self): ++ serv = socket.create_server((HOST, 0)) ++ self.addCleanup(serv.close) ++ ++ def run_server(): ++ [conn, address] = serv.accept() ++ with conn: ++ while conn.recv(1024): ++ conn.sendall( ++ b"HTTP/1.1 200 Ok\r\n" ++ b"Content-Length: %d\r\n" ++ b"\r\n" % size) ++ conn.sendall(b'A' * (size//3)) ++ conn.sendall(b'B' * (size - size//3)) ++ ++ thread = threading.Thread(target=run_server) ++ thread.start() ++ self.addCleanup(thread.join, 1.0) ++ ++ conn = client.HTTPConnection(*serv.getsockname()) ++ try: ++ for w in range(15, 27): ++ size = 1 << w ++ conn.request("GET", "/") ++ with conn.getresponse() as response: ++ self.assertEqual(len(response.read()), size) ++ finally: ++ conn.close() ++ thread.join(1.0) ++ ++ def test_large_content_length_truncated(self): ++ serv = socket.create_server((HOST, 0)) ++ self.addCleanup(serv.close) ++ ++ def run_server(): ++ while True: ++ [conn, address] = serv.accept() ++ with conn: ++ conn.recv(1024) ++ if not size: ++ break ++ conn.sendall( ++ b"HTTP/1.1 200 Ok\r\n" ++ b"Content-Length: %d\r\n" ++ b"\r\n" ++ b"Text" % size) ++ ++ thread = threading.Thread(target=run_server) ++ thread.start() ++ self.addCleanup(thread.join, 1.0) ++ ++ conn = client.HTTPConnection(*serv.getsockname()) ++ try: ++ for w in range(18, 65): ++ size = 1 << w ++ conn.request("GET", "/") ++ with conn.getresponse() as response: ++ self.assertRaises(client.IncompleteRead, response.read) ++ conn.close() ++ finally: ++ conn.close() ++ size = 0 ++ conn.request("GET", "/") ++ conn.close() ++ thread.join(1.0) ++ + def test_putrequest_override_domain_validation(self): + """ + It should be possible to override the default validation diff --git a/debian/patches/CVE-2025-6069.patch b/debian/patches/CVE-2025-6069.patch new file mode 100644 index 0000000..e7c4b88 --- /dev/null +++ b/debian/patches/CVE-2025-6069.patch @@ -0,0 +1,235 @@ +From ab0893fd5c579d9cea30841680e6d35fc478afb5 Mon Sep 17 00:00:00 2001 +From: Serhiy Storchaka +Date: Fri, 4 Jul 2025 01:12:10 +0300 +Subject: [PATCH] [3.12] gh-135462: Fix quadratic complexity in processing + special input in HTMLParser (GH-135464) (GH-135483) + +End-of-file errors are now handled according to the HTML5 specs -- +comments and declarations are automatically closed, tags are ignored. +(cherry picked from commit 6eb6c5dbfb528bd07d77b60fd71fd05d81d45c41) +--- + Lib/html/parser.py | 41 +++++--- + Lib/test/test_htmlparser.py | 94 ++++++++++++++++--- + ...-06-13-15-55-22.gh-issue-135462.KBeJpc.rst | 4 + + 3 files changed, 116 insertions(+), 23 deletions(-) + create mode 100644 Misc/NEWS.d/next/Security/2025-06-13-15-55-22.gh-issue-135462.KBeJpc.rst + +Index: python3.12-3.12.3/Lib/html/parser.py +=================================================================== +--- python3.12-3.12.3.orig/Lib/html/parser.py ++++ python3.12-3.12.3/Lib/html/parser.py +@@ -25,6 +25,7 @@ entityref = re.compile('&([a-zA-Z][-.a-z + charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]') + + starttagopen = re.compile('<[a-zA-Z]') ++endtagopen = re.compile('') + commentclose = re.compile(r'--\s*>') + # Note: +@@ -177,7 +178,7 @@ class HTMLParser(_markupbase.ParserBase) + k = self.parse_pi(i) + elif startswith("', i + 1) +- if k < 0: +- k = rawdata.find('<', i + 1) +- if k < 0: +- k = i + 1 ++ if starttagopen.match(rawdata, i): # < + letter ++ pass ++ elif startswith("'), +- ('comment', '/img'), +- ('endtag', 'html<')]) ++ ('data', '\n')]) + + def test_starttag_junk_chars(self): ++ self._run_check("<", [('data', '<')]) ++ self._run_check("<>", [('data', '<>')]) ++ self._run_check("< >", [('data', '< >')]) ++ self._run_check("< ", [('data', '< ')]) + self._run_check("", []) ++ self._run_check("<$>", [('data', '<$>')]) + self._run_check("", [('comment', '$')]) + self._run_check("", [('endtag', 'a')]) ++ self._run_check("", [('starttag', 'a", [('endtag', 'a'", [('data', "'", []) ++ self._run_check("", [('starttag', 'a$b', [])]) + self._run_check("", [('startendtag', 'a$b', [])]) + self._run_check("", [('starttag', 'a$b', [])]) + self._run_check("", [('startendtag', 'a$b', [])]) ++ self._run_check("", [('endtag', 'a$b')]) + + def test_slashes_in_starttag(self): + self._run_check('', [('startendtag', 'a', [('foo', 'var')])]) +@@ -539,13 +546,56 @@ text + for html, expected in data: + self._run_check(html, expected) + +- def test_broken_comments(self): +- html = ('' ++ def test_eof_in_comments(self): ++ data = [ ++ ('', [('comment', '-!>')]), ++ ('' + '' + '' + '') + expected = [ ++ ('comment', 'ELEMENT br EMPTY'), + ('comment', ' not really a comment '), + ('comment', ' not a comment either --'), + ('comment', ' -- close enough --'), +@@ -600,6 +650,26 @@ text + ('endtag', 'a'), ('data', ' bar & baz')] + ) + ++ @support.requires_resource('cpu') ++ def test_eof_no_quadratic_complexity(self): ++ # Each of these examples used to take about an hour. ++ # Now they take a fraction of a second. ++ def check(source): ++ parser = html.parser.HTMLParser() ++ parser.feed(source) ++ parser.close() ++ n = 120_000 ++ check(" +Date: Fri, 31 Oct 2025 17:50:42 +0100 +Subject: [PATCH] [3.12] gh-136065: Fix quadratic complexity in + os.path.expandvars() (GH-134952) (GH-140847) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +(cherry picked from commit f029e8db626ddc6e3a3beea4eff511a71aaceb5c) + +Co-authored-by: Serhiy Storchaka +Co-authored-by: Ɓukasz Langa +--- + Lib/ntpath.py | 126 ++++++------------ + Lib/posixpath.py | 43 +++--- + Lib/test/test_genericpath.py | 14 ++ + Lib/test/test_ntpath.py | 23 +++- + ...-05-30-22-33-27.gh-issue-136065.bu337o.rst | 1 + + 5 files changed, 94 insertions(+), 113 deletions(-) + create mode 100644 Misc/NEWS.d/next/Security/2025-05-30-22-33-27.gh-issue-136065.bu337o.rst + +--- python3.12-3.12.3.orig/Lib/ntpath.py ++++ python3.12-3.12.3/Lib/ntpath.py +@@ -409,17 +409,23 @@ def expanduser(path): + # XXX With COMMAND.COM you can use any characters in a variable name, + # XXX except '^|<>='. + ++_varpattern = r"'[^']*'?|%(%|[^%]*%?)|\$(\$|[-\w]+|\{[^}]*\}?)" ++_varsub = None ++_varsubb = None ++ + def expandvars(path): + """Expand shell variables of the forms $var, ${var} and %var%. + + Unknown variables are left unchanged.""" + path = os.fspath(path) ++ global _varsub, _varsubb + if isinstance(path, bytes): + if b'$' not in path and b'%' not in path: + return path +- import string +- varchars = bytes(string.ascii_letters + string.digits + '_-', 'ascii') +- quote = b'\'' ++ if not _varsubb: ++ import re ++ _varsubb = re.compile(_varpattern.encode(), re.ASCII).sub ++ sub = _varsubb + percent = b'%' + brace = b'{' + rbrace = b'}' +@@ -428,94 +434,44 @@ def expandvars(path): + else: + if '$' not in path and '%' not in path: + return path +- import string +- varchars = string.ascii_letters + string.digits + '_-' +- quote = '\'' ++ if not _varsub: ++ import re ++ _varsub = re.compile(_varpattern, re.ASCII).sub ++ sub = _varsub + percent = '%' + brace = '{' + rbrace = '}' + dollar = '$' + environ = os.environ +- res = path[:0] +- index = 0 +- pathlen = len(path) +- while index < pathlen: +- c = path[index:index+1] +- if c == quote: # no expansion within single quotes +- path = path[index + 1:] +- pathlen = len(path) +- try: +- index = path.index(c) +- res += c + path[:index + 1] +- except ValueError: +- res += c + path +- index = pathlen - 1 +- elif c == percent: # variable or '%' +- if path[index + 1:index + 2] == percent: +- res += c +- index += 1 +- else: +- path = path[index+1:] +- pathlen = len(path) +- try: +- index = path.index(percent) +- except ValueError: +- res += percent + path +- index = pathlen - 1 +- else: +- var = path[:index] +- try: +- if environ is None: +- value = os.fsencode(os.environ[os.fsdecode(var)]) +- else: +- value = environ[var] +- except KeyError: +- value = percent + var + percent +- res += value +- elif c == dollar: # variable or '$$' +- if path[index + 1:index + 2] == dollar: +- res += c +- index += 1 +- elif path[index + 1:index + 2] == brace: +- path = path[index+2:] +- pathlen = len(path) +- try: +- index = path.index(rbrace) +- except ValueError: +- res += dollar + brace + path +- index = pathlen - 1 +- else: +- var = path[:index] +- try: +- if environ is None: +- value = os.fsencode(os.environ[os.fsdecode(var)]) +- else: +- value = environ[var] +- except KeyError: +- value = dollar + brace + var + rbrace +- res += value +- else: +- var = path[:0] +- index += 1 +- c = path[index:index + 1] +- while c and c in varchars: +- var += c +- index += 1 +- c = path[index:index + 1] +- try: +- if environ is None: +- value = os.fsencode(os.environ[os.fsdecode(var)]) +- else: +- value = environ[var] +- except KeyError: +- value = dollar + var +- res += value +- if c: +- index -= 1 ++ ++ def repl(m): ++ lastindex = m.lastindex ++ if lastindex is None: ++ return m[0] ++ name = m[lastindex] ++ if lastindex == 1: ++ if name == percent: ++ return name ++ if not name.endswith(percent): ++ return m[0] ++ name = name[:-1] + else: +- res += c +- index += 1 +- return res ++ if name == dollar: ++ return name ++ if name.startswith(brace): ++ if not name.endswith(rbrace): ++ return m[0] ++ name = name[1:-1] ++ ++ try: ++ if environ is None: ++ return os.fsencode(os.environ[os.fsdecode(name)]) ++ else: ++ return environ[name] ++ except KeyError: ++ return m[0] ++ ++ return sub(repl, path) + + + # Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B. +--- python3.12-3.12.3.orig/Lib/posixpath.py ++++ python3.12-3.12.3/Lib/posixpath.py +@@ -314,42 +314,41 @@ def expanduser(path): + # This expands the forms $variable and ${variable} only. + # Non-existent variables are left unchanged. + +-_varprog = None +-_varprogb = None ++_varpattern = r'\$(\w+|\{[^}]*\}?)' ++_varsub = None ++_varsubb = None + + def expandvars(path): + """Expand shell variables of form $var and ${var}. Unknown variables + are left unchanged.""" + path = os.fspath(path) +- global _varprog, _varprogb ++ global _varsub, _varsubb + if isinstance(path, bytes): + if b'$' not in path: + return path +- if not _varprogb: ++ if not _varsubb: + import re +- _varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII) +- search = _varprogb.search ++ _varsubb = re.compile(_varpattern.encode(), re.ASCII).sub ++ sub = _varsubb + start = b'{' + end = b'}' + environ = getattr(os, 'environb', None) + else: + if '$' not in path: + return path +- if not _varprog: ++ if not _varsub: + import re +- _varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII) +- search = _varprog.search ++ _varsub = re.compile(_varpattern, re.ASCII).sub ++ sub = _varsub + start = '{' + end = '}' + environ = os.environ +- i = 0 +- while True: +- m = search(path, i) +- if not m: +- break +- i, j = m.span(0) +- name = m.group(1) +- if name.startswith(start) and name.endswith(end): ++ ++ def repl(m): ++ name = m[1] ++ if name.startswith(start): ++ if not name.endswith(end): ++ return m[0] + name = name[1:-1] + try: + if environ is None: +@@ -357,13 +356,11 @@ def expandvars(path): + else: + value = environ[name] + except KeyError: +- i = j ++ return m[0] + else: +- tail = path[j:] +- path = path[:i] + value +- i = len(path) +- path += tail +- return path ++ return value ++ ++ return sub(repl, path) + + + # Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B. +--- python3.12-3.12.3.orig/Lib/test/test_genericpath.py ++++ python3.12-3.12.3/Lib/test/test_genericpath.py +@@ -7,6 +7,7 @@ import os + import sys + import unittest + import warnings ++from test import support + from test.support import is_emscripten + from test.support import os_helper + from test.support import warnings_helper +@@ -434,6 +435,19 @@ class CommonTest(GenericTest): + os.fsencode('$bar%s bar' % nonascii)) + check(b'$spam}bar', os.fsencode('%s}bar' % nonascii)) + ++ @support.requires_resource('cpu') ++ def test_expandvars_large(self): ++ expandvars = self.pathmodule.expandvars ++ with os_helper.EnvironmentVarGuard() as env: ++ env.clear() ++ env["A"] = "B" ++ n = 100_000 ++ self.assertEqual(expandvars('$A'*n), 'B'*n) ++ self.assertEqual(expandvars('${A}'*n), 'B'*n) ++ self.assertEqual(expandvars('$A!'*n), 'B!'*n) ++ self.assertEqual(expandvars('${A}A'*n), 'BA'*n) ++ self.assertEqual(expandvars('${'*10*n), '${'*10*n) ++ + def test_abspath(self): + self.assertIn("foo", self.pathmodule.abspath("foo")) + with warnings.catch_warnings(): +--- python3.12-3.12.3.orig/Lib/test/test_ntpath.py ++++ python3.12-3.12.3/Lib/test/test_ntpath.py +@@ -7,8 +7,8 @@ import sys + import unittest + import warnings + from ntpath import ALLOW_MISSING +-from test.support import cpython_only, os_helper +-from test.support import TestFailed, is_emscripten ++from test import support ++from test.support import os_helper, is_emscripten + from test.support.os_helper import FakePath + from test import test_genericpath + from tempfile import TemporaryFile +@@ -58,7 +58,7 @@ def tester(fn, wantResult): + fn = fn.replace("\\", "\\\\") + gotResult = eval(fn) + if wantResult != gotResult and _norm(wantResult) != _norm(gotResult): +- raise TestFailed("%s should return: %s but returned: %s" \ ++ raise support.TestFailed("%s should return: %s but returned: %s" \ + %(str(fn), str(wantResult), str(gotResult))) + + # then with bytes +@@ -74,7 +74,7 @@ def tester(fn, wantResult): + warnings.simplefilter("ignore", DeprecationWarning) + gotResult = eval(fn) + if _norm(wantResult) != _norm(gotResult): +- raise TestFailed("%s should return: %s but returned: %s" \ ++ raise support.TestFailed("%s should return: %s but returned: %s" \ + %(str(fn), str(wantResult), repr(gotResult))) + + +@@ -927,6 +927,19 @@ class TestNtpath(NtpathTestCase): + check('%spam%bar', '%sbar' % nonascii) + check('%{}%bar'.format(nonascii), 'ham%sbar' % nonascii) + ++ @support.requires_resource('cpu') ++ def test_expandvars_large(self): ++ expandvars = ntpath.expandvars ++ with os_helper.EnvironmentVarGuard() as env: ++ env.clear() ++ env["A"] = "B" ++ n = 100_000 ++ self.assertEqual(expandvars('%A%'*n), 'B'*n) ++ self.assertEqual(expandvars('%A%A'*n), 'BA'*n) ++ self.assertEqual(expandvars("''"*n + '%%'), "''"*n + '%') ++ self.assertEqual(expandvars("%%"*n), "%"*n) ++ self.assertEqual(expandvars("$$"*n), "$"*n) ++ + def test_expanduser(self): + tester('ntpath.expanduser("test")', 'test') + +@@ -1228,7 +1241,7 @@ class TestNtpath(NtpathTestCase): + self.assertTrue(os.path.exists(r"\\.\CON")) + + @unittest.skipIf(sys.platform != 'win32', "Fast paths are only for win32") +- @cpython_only ++ @support.cpython_only + def test_fast_paths_in_use(self): + # There are fast paths of these functions implemented in posixmodule.c. + # Confirm that they are being used, and not the Python fallbacks in +--- /dev/null ++++ python3.12-3.12.3/Misc/NEWS.d/next/Security/2025-05-30-22-33-27.gh-issue-136065.bu337o.rst +@@ -0,0 +1 @@ ++Fix quadratic complexity in :func:`os.path.expandvars`. diff --git a/debian/patches/CVE-2025-8194.patch b/debian/patches/CVE-2025-8194.patch new file mode 100644 index 0000000..ee1699c --- /dev/null +++ b/debian/patches/CVE-2025-8194.patch @@ -0,0 +1,214 @@ +From c9d9f78feb1467e73fd29356c040bde1c104f29f Mon Sep 17 00:00:00 2001 +From: "Miss Islington (bot)" + <31488909+miss-islington@users.noreply.github.com> +Date: Mon, 4 Aug 2025 13:45:06 +0200 +Subject: [PATCH] [3.12] gh-130577: tarfile now validates archives to ensure + member offsets are non-negative (GH-137027) (#137171) + +(cherry picked from commit 7040aa54f14676938970e10c5f74ea93cd56aa38) + +Co-authored-by: Alexander Urieles +Co-authored-by: Gregory P. Smith +--- + Lib/tarfile.py | 3 + + Lib/test/test_tarfile.py | 156 ++++++++++++++++++ + ...-07-23-00-35-29.gh-issue-130577.c7EITy.rst | 3 + + 3 files changed, 162 insertions(+) + create mode 100644 Misc/NEWS.d/next/Library/2025-07-23-00-35-29.gh-issue-130577.c7EITy.rst + +Index: python3.12-3.12.3/Lib/tarfile.py +=================================================================== +--- python3.12-3.12.3.orig/Lib/tarfile.py ++++ python3.12-3.12.3/Lib/tarfile.py +@@ -1615,6 +1615,9 @@ class TarInfo(object): + """Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + """ ++ # Only non-negative offsets are allowed ++ if count < 0: ++ raise InvalidHeaderError("invalid offset") + blocks, remainder = divmod(count, BLOCKSIZE) + if remainder: + blocks += 1 +Index: python3.12-3.12.3/Lib/test/test_tarfile.py +=================================================================== +--- python3.12-3.12.3.orig/Lib/test/test_tarfile.py ++++ python3.12-3.12.3/Lib/test/test_tarfile.py +@@ -50,6 +50,7 @@ bz2name = os.path.join(TEMPDIR, "testtar + xzname = os.path.join(TEMPDIR, "testtar.tar.xz") + tmpname = os.path.join(TEMPDIR, "tmp.tar") + dotlessname = os.path.join(TEMPDIR, "testtar") ++SPACE = b" " + + sha256_regtype = ( + "e09e4bc8b3c9d9177e77256353b36c159f5f040531bbd4b024a8f9b9196c71ce" +@@ -4458,6 +4459,161 @@ class OverwriteTests(archiver_tests.Over + ar.extractall(self.testdir, filter='fully_trusted') + + ++class OffsetValidationTests(unittest.TestCase): ++ tarname = tmpname ++ invalid_posix_header = ( ++ # name: 100 bytes ++ tarfile.NUL * tarfile.LENGTH_NAME ++ # mode, space, null terminator: 8 bytes ++ + b"000755" + SPACE + tarfile.NUL ++ # uid, space, null terminator: 8 bytes ++ + b"000001" + SPACE + tarfile.NUL ++ # gid, space, null terminator: 8 bytes ++ + b"000001" + SPACE + tarfile.NUL ++ # size, space: 12 bytes ++ + b"\xff" * 11 + SPACE ++ # mtime, space: 12 bytes ++ + tarfile.NUL * 11 + SPACE ++ # chksum: 8 bytes ++ + b"0011407" + tarfile.NUL ++ # type: 1 byte ++ + tarfile.REGTYPE ++ # linkname: 100 bytes ++ + tarfile.NUL * tarfile.LENGTH_LINK ++ # magic: 6 bytes, version: 2 bytes ++ + tarfile.POSIX_MAGIC ++ # uname: 32 bytes ++ + tarfile.NUL * 32 ++ # gname: 32 bytes ++ + tarfile.NUL * 32 ++ # devmajor, space, null terminator: 8 bytes ++ + tarfile.NUL * 6 + SPACE + tarfile.NUL ++ # devminor, space, null terminator: 8 bytes ++ + tarfile.NUL * 6 + SPACE + tarfile.NUL ++ # prefix: 155 bytes ++ + tarfile.NUL * tarfile.LENGTH_PREFIX ++ # padding: 12 bytes ++ + tarfile.NUL * 12 ++ ) ++ invalid_gnu_header = ( ++ # name: 100 bytes ++ tarfile.NUL * tarfile.LENGTH_NAME ++ # mode, null terminator: 8 bytes ++ + b"0000755" + tarfile.NUL ++ # uid, null terminator: 8 bytes ++ + b"0000001" + tarfile.NUL ++ # gid, space, null terminator: 8 bytes ++ + b"0000001" + tarfile.NUL ++ # size, space: 12 bytes ++ + b"\xff" * 11 + SPACE ++ # mtime, space: 12 bytes ++ + tarfile.NUL * 11 + SPACE ++ # chksum: 8 bytes ++ + b"0011327" + tarfile.NUL ++ # type: 1 byte ++ + tarfile.REGTYPE ++ # linkname: 100 bytes ++ + tarfile.NUL * tarfile.LENGTH_LINK ++ # magic: 8 bytes ++ + tarfile.GNU_MAGIC ++ # uname: 32 bytes ++ + tarfile.NUL * 32 ++ # gname: 32 bytes ++ + tarfile.NUL * 32 ++ # devmajor, null terminator: 8 bytes ++ + tarfile.NUL * 8 ++ # devminor, null terminator: 8 bytes ++ + tarfile.NUL * 8 ++ # padding: 167 bytes ++ + tarfile.NUL * 167 ++ ) ++ invalid_v7_header = ( ++ # name: 100 bytes ++ tarfile.NUL * tarfile.LENGTH_NAME ++ # mode, space, null terminator: 8 bytes ++ + b"000755" + SPACE + tarfile.NUL ++ # uid, space, null terminator: 8 bytes ++ + b"000001" + SPACE + tarfile.NUL ++ # gid, space, null terminator: 8 bytes ++ + b"000001" + SPACE + tarfile.NUL ++ # size, space: 12 bytes ++ + b"\xff" * 11 + SPACE ++ # mtime, space: 12 bytes ++ + tarfile.NUL * 11 + SPACE ++ # chksum: 8 bytes ++ + b"0010070" + tarfile.NUL ++ # type: 1 byte ++ + tarfile.REGTYPE ++ # linkname: 100 bytes ++ + tarfile.NUL * tarfile.LENGTH_LINK ++ # padding: 255 bytes ++ + tarfile.NUL * 255 ++ ) ++ valid_gnu_header = tarfile.TarInfo("filename").tobuf(tarfile.GNU_FORMAT) ++ data_block = b"\xff" * tarfile.BLOCKSIZE ++ ++ def _write_buffer(self, buffer): ++ with open(self.tarname, "wb") as f: ++ f.write(buffer) ++ ++ def _get_members(self, ignore_zeros=None): ++ with open(self.tarname, "rb") as f: ++ with tarfile.open( ++ mode="r", fileobj=f, ignore_zeros=ignore_zeros ++ ) as tar: ++ return tar.getmembers() ++ ++ def _assert_raises_read_error_exception(self): ++ with self.assertRaisesRegex( ++ tarfile.ReadError, "file could not be opened successfully" ++ ): ++ self._get_members() ++ ++ def test_invalid_offset_header_validations(self): ++ for tar_format, invalid_header in ( ++ ("posix", self.invalid_posix_header), ++ ("gnu", self.invalid_gnu_header), ++ ("v7", self.invalid_v7_header), ++ ): ++ with self.subTest(format=tar_format): ++ self._write_buffer(invalid_header) ++ self._assert_raises_read_error_exception() ++ ++ def test_early_stop_at_invalid_offset_header(self): ++ buffer = self.valid_gnu_header + self.invalid_gnu_header + self.valid_gnu_header ++ self._write_buffer(buffer) ++ members = self._get_members() ++ self.assertEqual(len(members), 1) ++ self.assertEqual(members[0].name, "filename") ++ self.assertEqual(members[0].offset, 0) ++ ++ def test_ignore_invalid_archive(self): ++ # 3 invalid headers with their respective data ++ buffer = (self.invalid_gnu_header + self.data_block) * 3 ++ self._write_buffer(buffer) ++ members = self._get_members(ignore_zeros=True) ++ self.assertEqual(len(members), 0) ++ ++ def test_ignore_invalid_offset_headers(self): ++ for first_block, second_block, expected_offset in ( ++ ( ++ (self.valid_gnu_header), ++ (self.invalid_gnu_header + self.data_block), ++ 0, ++ ), ++ ( ++ (self.invalid_gnu_header + self.data_block), ++ (self.valid_gnu_header), ++ 1024, ++ ), ++ ): ++ self._write_buffer(first_block + second_block) ++ members = self._get_members(ignore_zeros=True) ++ self.assertEqual(len(members), 1) ++ self.assertEqual(members[0].name, "filename") ++ self.assertEqual(members[0].offset, expected_offset) ++ ++ + def setUpModule(): + os_helper.unlink(TEMPDIR) + os.makedirs(TEMPDIR) +Index: python3.12-3.12.3/Misc/NEWS.d/next/Library/2025-07-23-00-35-29.gh-issue-130577.c7EITy.rst +=================================================================== +--- /dev/null ++++ python3.12-3.12.3/Misc/NEWS.d/next/Library/2025-07-23-00-35-29.gh-issue-130577.c7EITy.rst +@@ -0,0 +1,3 @@ ++:mod:`tarfile` now validates archives to ensure member offsets are ++non-negative. (Contributed by Alexander Enrique Urieles Nieto in ++:gh:`130577`.) diff --git a/debian/patches/CVE-2025-8291.patch b/debian/patches/CVE-2025-8291.patch new file mode 100644 index 0000000..2ed526b --- /dev/null +++ b/debian/patches/CVE-2025-8291.patch @@ -0,0 +1,284 @@ +From 162997bb70e067668c039700141770687bc8f267 Mon Sep 17 00:00:00 2001 +From: Serhiy Storchaka +Date: Tue, 7 Oct 2025 20:15:26 +0300 +Subject: [PATCH] gh-139700: Check consistency of the zip64 end of central + directory record (GH-139702) + +Support records with "zip64 extensible data" if there are no bytes +prepended to the ZIP file. +--- python3.12-3.12.3.orig/Lib/test/test_zipfile/test_core.py ++++ python3.12-3.12.3/Lib/test/test_zipfile/test_core.py +@@ -884,6 +884,8 @@ class StoredTestZip64InSmallFiles(Abstra + self, file_size_64_set=False, file_size_extra=False, + compress_size_64_set=False, compress_size_extra=False, + header_offset_64_set=False, header_offset_extra=False, ++ extensible_data=b'', ++ end_of_central_dir_size=None, offset_to_end_of_central_dir=None, + ): + """Generate bytes sequence for a zip with (incomplete) zip64 data. + +@@ -937,6 +939,12 @@ class StoredTestZip64InSmallFiles(Abstra + + central_dir_size = struct.pack(' 1: + raise BadZipFile("zipfiles that span multiple disks are not supported") + +- # Assume no 'zip64 extensible data' +- fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2) ++ offset -= sizeEndCentDir64 ++ if reloff > offset: ++ raise BadZipFile("Corrupt zip64 end of central directory locator") ++ # First, check the assumption that there is no prepended data. ++ fpin.seek(reloff) ++ extrasz = offset - reloff + data = fpin.read(sizeEndCentDir64) + if len(data) != sizeEndCentDir64: +- return endrec ++ raise OSError("Unknown I/O error") ++ if not data.startswith(stringEndArchive64) and reloff != offset: ++ # Since we already have seen the Zip64 EOCD Locator, it's ++ # possible we got here because there is prepended data. ++ # Assume no 'zip64 extensible data' ++ fpin.seek(offset) ++ extrasz = 0 ++ data = fpin.read(sizeEndCentDir64) ++ if len(data) != sizeEndCentDir64: ++ raise OSError("Unknown I/O error") ++ if not data.startswith(stringEndArchive64): ++ raise BadZipFile("Zip64 end of central directory record not found") ++ + sig, sz, create_version, read_version, disk_num, disk_dir, \ + dircount, dircount2, dirsize, diroffset = \ + struct.unpack(structEndArchive64, data) +- if sig != stringEndArchive64: +- return endrec ++ if (diroffset + dirsize != reloff or ++ sz + 12 != sizeEndCentDir64 + extrasz): ++ raise BadZipFile("Corrupt zip64 end of central directory record") + + # Update the original endrec using data from the ZIP64 record + endrec[_ECD_SIGNATURE] = sig +@@ -275,6 +291,7 @@ def _EndRecData64(fpin, offset, endrec): + endrec[_ECD_ENTRIES_TOTAL] = dircount2 + endrec[_ECD_SIZE] = dirsize + endrec[_ECD_OFFSET] = diroffset ++ endrec[_ECD_LOCATION] = offset - extrasz + return endrec + + +@@ -308,7 +325,7 @@ def _EndRecData(fpin): + endrec.append(filesize - sizeEndCentDir) + + # Try to read the "Zip64 end of central directory" structure +- return _EndRecData64(fpin, -sizeEndCentDir, endrec) ++ return _EndRecData64(fpin, filesize - sizeEndCentDir, endrec) + + # Either this is not a ZIP file, or it is a ZIP file with an archive + # comment. Search the end of the file for the "end of central directory" +@@ -332,8 +349,7 @@ def _EndRecData(fpin): + endrec.append(maxCommentStart + start) + + # Try to read the "Zip64 end of central directory" structure +- return _EndRecData64(fpin, maxCommentStart + start - filesize, +- endrec) ++ return _EndRecData64(fpin, maxCommentStart + start, endrec) + + # Unable to find a valid end of central directory structure + return None +@@ -1422,9 +1438,6 @@ class ZipFile: + + # "concat" is zero, unless zip was concatenated to another file + concat = endrec[_ECD_LOCATION] - size_cd - offset_cd +- if endrec[_ECD_SIGNATURE] == stringEndArchive64: +- # If Zip64 extension structures are present, account for them +- concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) + + if self.debug > 2: + inferred = concat + offset_cd +@@ -2034,7 +2047,7 @@ class ZipFile: + " would require ZIP64 extensions") + zip64endrec = struct.pack( + structEndArchive64, stringEndArchive64, +- 44, 45, 45, 0, 0, centDirCount, centDirCount, ++ sizeEndCentDir64 - 12, 45, 45, 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset) + self.fp.write(zip64endrec) + diff --git a/debian/patches/Remove-Docutils-list-monkeypatch.patch b/debian/patches/Remove-Docutils-list-monkeypatch.patch new file mode 100644 index 0000000..de71925 --- /dev/null +++ b/debian/patches/Remove-Docutils-list-monkeypatch.patch @@ -0,0 +1,202 @@ +GH-121970: Remove Docutils list monkeypatch +Frome: https://github.com/python/cpython/pull/142056 + +--- python3.12-3.12.11.orig/Doc/howto/functional.rst ++++ python3.12-3.12.11/Doc/howto/functional.rst +@@ -4,7 +4,7 @@ + Functional Programming HOWTO + ******************************** + +-:Author: A. M. Kuchling ++:Author: \A. M. Kuchling + :Release: 0.32 + + In this document, we'll take a tour of Python's features suitable for +--- python3.12-3.12.11.orig/Doc/library/decimal.rst ++++ python3.12-3.12.11/Doc/library/decimal.rst +@@ -2053,20 +2053,18 @@ to work with the :class:`Decimal` class: + Decimal FAQ + ----------- + +-Q. It is cumbersome to type ``decimal.Decimal('1234.5')``. Is there a way to +-minimize typing when using the interactive interpreter? +- +-A. Some users abbreviate the constructor to just a single letter: ++Q: It is cumbersome to type ``decimal.Decimal('1234.5')``. Is there a way to + ++A: Some users abbreviate the constructor to just a single letter:A: Some users abbreviate the constructor to just a single letter: + >>> D = decimal.Decimal + >>> D('1.23') + D('3.45') + Decimal('4.68') + +-Q. In a fixed-point application with two decimal places, some inputs have many ++Q: In a fixed-point application with two decimal places, some inputs have many + places and need to be rounded. Others are not supposed to have excess digits + and need to be validated. What methods should be used? + +-A. The :meth:`~Decimal.quantize` method rounds to a fixed number of decimal places. If ++A: The :meth:`~Decimal.quantize` method rounds to a fixed number of decimal places. If + the :const:`Inexact` trap is set, it is also useful for validation: + + >>> TWOPLACES = Decimal(10) ** -2 # same as Decimal('0.01') +@@ -2084,10 +2082,10 @@ the :const:`Inexact` trap is set, it is + ... + Inexact: None + +-Q. Once I have valid two place inputs, how do I maintain that invariant ++Q: Once I have valid two place inputs, how do I maintain that invariant + throughout an application? + +-A. Some operations like addition, subtraction, and multiplication by an integer ++A: Some operations like addition, subtraction, and multiplication by an integer + will automatically preserve fixed point. Others operations, like division and + non-integer multiplication, will change the number of decimal places and need to + be followed-up with a :meth:`~Decimal.quantize` step: +@@ -2119,21 +2117,21 @@ to handle the :meth:`~Decimal.quantize` + >>> div(b, a) + Decimal('0.03') + +-Q. There are many ways to express the same value. The numbers ``200``, ++Q: There are many ways to express the same value. The numbers ``200``, + ``200.000``, ``2E2``, and ``.02E+4`` all have the same value at + various precisions. Is there a way to transform them to a single recognizable + canonical value? + +-A. The :meth:`~Decimal.normalize` method maps all equivalent values to a single ++A: The :meth:`~Decimal.normalize` method maps all equivalent values to a single + representative: + + >>> values = map(Decimal, '200 200.000 2E2 .02E+4'.split()) + >>> [v.normalize() for v in values] + [Decimal('2E+2'), Decimal('2E+2'), Decimal('2E+2'), Decimal('2E+2')] + +-Q. When does rounding occur in a computation? ++Q: When does rounding occur in a computation? + +-A. It occurs *after* the computation. The philosophy of the decimal ++A: It occurs *after* the computation. The philosophy of the decimal + specification is that numbers are considered exact and are created + independent of the current context. They can even have greater + precision than current context. Computations process with those +@@ -2151,10 +2149,10 @@ applied to the *result* of the computati + >>> pi + 0 - Decimal('0.00005'). # Intermediate values are rounded + Decimal('3.1416') + +-Q. Some decimal values always print with exponential notation. Is there a way ++Q: Some decimal values always print with exponential notation. Is there a way + to get a non-exponential representation? + +-A. For some values, exponential notation is the only way to express the number ++A: For some values, exponential notation is the only way to express the number + of significant places in the coefficient. For example, expressing + ``5.0E+3`` as ``5000`` keeps the value constant but cannot show the + original's two-place significance. +@@ -2169,9 +2167,9 @@ value unchanged: + >>> remove_exponent(Decimal('5E+3')) + Decimal('5000') + +-Q. Is there a way to convert a regular float to a :class:`Decimal`? ++Q: Is there a way to convert a regular float to a :class:`Decimal`? + +-A. Yes, any binary floating-point number can be exactly expressed as a ++A: Yes, any binary floating-point number can be exactly expressed as a + Decimal though an exact conversion may take more precision than intuition would + suggest: + +@@ -2180,19 +2178,19 @@ suggest: + >>> Decimal(math.pi) + Decimal('3.141592653589793115997963468544185161590576171875') + +-Q. Within a complex calculation, how can I make sure that I haven't gotten a ++Q: Within a complex calculation, how can I make sure that I haven't gotten a + spurious result because of insufficient precision or rounding anomalies. + +-A. The decimal module makes it easy to test results. A best practice is to ++A: The decimal module makes it easy to test results. A best practice is to + re-run calculations using greater precision and with various rounding modes. + Widely differing results indicate insufficient precision, rounding mode issues, + ill-conditioned inputs, or a numerically unstable algorithm. + +-Q. I noticed that context precision is applied to the results of operations but ++Q: I noticed that context precision is applied to the results of operations but + not to the inputs. Is there anything to watch out for when mixing values of + different precisions? + +-A. Yes. The principle is that all values are considered to be exact and so is ++A: Yes. The principle is that all values are considered to be exact and so is + the arithmetic on those values. Only the results are rounded. The advantage + for inputs is that "what you type is what you get". A disadvantage is that the + results can look odd if you forget that the inputs haven't been rounded: +@@ -2220,16 +2218,16 @@ Alternatively, inputs can be rounded upo + >>> Context(prec=5, rounding=ROUND_DOWN).create_decimal('1.2345678') + Decimal('1.2345') + +-Q. Is the CPython implementation fast for large numbers? ++Q: Is the CPython implementation fast for large numbers? + +-A. Yes. In the CPython and PyPy3 implementations, the C/CFFI versions of ++A: Yes. In the CPython and PyPy3 implementations, the C/CFFI versions of + the decimal module integrate the high speed `libmpdec + `_ library for + arbitrary precision correctly rounded decimal floating-point arithmetic [#]_. + ``libmpdec`` uses `Karatsuba multiplication +-`_ ++`_ + for medium-sized numbers and the `Number Theoretic Transform +-`_ ++`_ + for very large numbers. + + The context must be adapted for exact arbitrary precision arithmetic. :attr:`~Context.Emin` +--- python3.12-3.12.11.orig/Doc/library/ssl.rst ++++ python3.12-3.12.11/Doc/library/ssl.rst +@@ -2672,16 +2672,16 @@ of TLS/SSL. Some new TLS 1.3 features ar + Steve Kent + + :rfc:`RFC 4086: Randomness Requirements for Security <4086>` +- Donald E., Jeffrey I. Schiller ++ Donald E. Eastlake, Jeffrey I. Schiller, Steve Crocker + + :rfc:`RFC 5280: Internet X.509 Public Key Infrastructure Certificate and Certificate Revocation List (CRL) Profile <5280>` +- D. Cooper ++ David Cooper et al. + + :rfc:`RFC 5246: The Transport Layer Security (TLS) Protocol Version 1.2 <5246>` +- T. Dierks et. al. ++ Tim Dierks and Eric Rescorla. + + :rfc:`RFC 6066: Transport Layer Security (TLS) Extensions <6066>` +- D. Eastlake ++ Donald E. Eastlake + + `IANA TLS: Transport Layer Security (TLS) Parameters `_ + IANA +--- python3.12-3.12.11.orig/Doc/tools/extensions/pyspecific.py ++++ python3.12-3.12.11/Doc/tools/extensions/pyspecific.py +@@ -23,15 +23,6 @@ from sphinx.util.docutils import SphinxD + + # Used in conf.py and updated here by python/release-tools/run_release.py + SOURCE_URI = 'https://github.com/python/cpython/tree/3.12/%s' +- +-# monkey-patch reST parser to disable alphabetic and roman enumerated lists +-from docutils.parsers.rst.states import Body +-Body.enum.converters['loweralpha'] = \ +- Body.enum.converters['upperalpha'] = \ +- Body.enum.converters['lowerroman'] = \ +- Body.enum.converters['upperroman'] = lambda x: None +- +- + class PyAwaitableMixin(object): + def handle_signature(self, sig, signode): + ret = super(PyAwaitableMixin, self).handle_signature(sig, signode) +--- python3.12-3.12.11.orig/Doc/whatsnew/3.4.rst ++++ python3.12-3.12.11/Doc/whatsnew/3.4.rst +@@ -2,7 +2,7 @@ + What's New In Python 3.4 + **************************** + +-:Author: R. David Murray (Editor) ++:Author: \R. David Murray (Editor) + + .. Rules for maintenance: + diff --git a/debian/patches/series b/debian/patches/series index 88dc3ba..9418687 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -34,3 +34,9 @@ issue108447.diff no-sphinx-8.2.diff Add-Debian-specific-documentation-path-to-IDLE-menu.diff 0001-deepin-sw_64-support.diff +#patchlevel-noplus.diff +CVE-2025-6069.patch +CVE-2025-8194.patch +CVE-2025-6075-12.patch +CVE-2025-8291.patch +Remove-Docutils-list-monkeypatch.patch