diff --git a/CVE-2018-20852.patch b/CVE-2018-20852.patch
new file mode 100644
index 0000000000000000000000000000000000000000..bbcc2c8e9dc8844c5596792937f2c7d99506f783
--- /dev/null
+++ b/CVE-2018-20852.patch
@@ -0,0 +1,101 @@
+diff -uNrp a/Lib/cookielib.py b/Lib/cookielib.py
+--- a/Lib/cookielib.py 2019-12-21 16:06:12.476000000 +0800
++++ b/Lib/cookielib.py 2019-12-21 16:09:31.556000000 +0800
+@@ -1139,6 +1139,12 @@ class DefaultCookiePolicy(CookiePolicy):
+ req_host, erhn = eff_request_host(request)
+ domain = cookie.domain
+
++ if domain and not domain.startswith("."):
++ dotdomain = "." + domain
++ else:
++ dotdomain = domain
++
++
+ # strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
+ if (cookie.version == 0 and
+ (self.strict_ns_domain & self.DomainStrictNonDomain) and
+@@ -1151,7 +1157,7 @@ class DefaultCookiePolicy(CookiePolicy):
+ _debug(" effective request-host name %s does not domain-match "
+ "RFC 2965 cookie domain %s", erhn, domain)
+ return False
+- if cookie.version == 0 and not ("."+erhn).endswith(domain):
++ if cookie.version == 0 and not ("."+erhn).endswith(dotdomain):
+ _debug(" request-host %s does not match Netscape cookie domain "
+ "%s", req_host, domain)
+ return False
+@@ -1165,7 +1171,11 @@ class DefaultCookiePolicy(CookiePolicy):
+ req_host = "."+req_host
+ if not erhn.startswith("."):
+ erhn = "."+erhn
+- if not (req_host.endswith(domain) or erhn.endswith(domain)):
++ if domain and not domain.startswith("."):
++ dotdomain = "." + domain
++ else:
++ dotdomain = domain
++ if not (req_host.endswith(dotdomain) or erhn.endswith(dotdomain)):
+ #_debug(" request domain %s does not match cookie domain %s",
+ # req_host, domain)
+ return False
+diff -uNrp a/Lib/test/test_cookielib.py b/Lib/test/test_cookielib.py
+--- a/Lib/test/test_cookielib.py 2019-12-21 16:06:12.640000000 +0800
++++ b/Lib/test/test_cookielib.py 2019-12-21 16:11:53.888000000 +0800
+@@ -368,6 +368,7 @@ class CookieTests(TestCase):
+ ("http://foo.bar.com/", ".foo.bar.com", True),
+ ("http://foo.bar.com/", "foo.bar.com", True),
+ ("http://foo.bar.com/", ".bar.com", True),
++ ("http://foo.bar.com/", "bar.com", True),
+ ("http://foo.bar.com/", "com", True),
+ ("http://foo.com/", "rhubarb.foo.com", False),
+ ("http://foo.com/", ".foo.com", True),
+@@ -378,6 +379,8 @@ class CookieTests(TestCase):
+ ("http://foo/", "foo", True),
+ ("http://foo/", "foo.local", True),
+ ("http://foo/", ".local", True),
++ ("http://barfoo.com", ".foo.com", False),
++ ("http://barfoo.com", "foo.com", False),
+ ]:
+ request = urllib2.Request(url)
+ r = pol.domain_return_ok(domain, request)
+@@ -938,6 +941,34 @@ class CookieTests(TestCase):
+ c.add_cookie_header(req)
+ self.assertFalse(req.has_header("Cookie"))
+
++ c.clear()
++
++ pol.set_blocked_domains([])
++ req = Request("http://acme.com/")
++ res = FakeResponse(headers, "http://acme.com/")
++ cookies = c.make_cookies(res, req)
++ c.extract_cookies(res, req)
++ self.assertEqual(len(c), 1)
++
++ req = Request("http://acme.com/")
++ c.add_cookie_header(req)
++ self.assertTrue(req.has_header("Cookie"))
++
++ req = Request("http://badacme.com/")
++ c.add_cookie_header(req)
++ self.assertFalse(pol.return_ok(cookies[0], req))
++ self.assertFalse(req.has_header("Cookie"))
++
++ p = pol.set_blocked_domains(["acme.com"])
++ req = Request("http://acme.com/")
++ c.add_cookie_header(req)
++ self.assertFalse(req.has_header("Cookie"))
++
++ req = Request("http://badacme.com/")
++ c.add_cookie_header(req)
++ self.assertFalse(req.has_header("Cookie"))
++
++
+ def test_secure(self):
+ from cookielib import CookieJar, DefaultCookiePolicy
+
+diff -uNrp a/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst b/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst
+--- a/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst 1970-01-01 08:00:00.000000000 +0800
++++ b/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst 2019-12-21 16:12:17.416000000 +0800
+@@ -0,0 +1,4 @@
++Don't send cookies of domain A without Domain attribute to domain B when
++domain A is a suffix match of domain B while using a cookiejar with
++:class:`cookielib.DefaultCookiePolicy` policy. Patch by Karthikeyan
++Singaravelan.
diff --git a/CVE-2019-10160-1.patch b/CVE-2019-10160-1.patch
new file mode 100644
index 0000000000000000000000000000000000000000..865f709c4f1da9f0cd1fdb41b5310186778c6d76
--- /dev/null
+++ b/CVE-2019-10160-1.patch
@@ -0,0 +1,45 @@
+diff -uNrp a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
+--- a/Lib/test/test_urlparse.py 2019-12-21 15:41:32.172000000 +0800
++++ b/Lib/test/test_urlparse.py 2019-12-21 15:44:28.316000000 +0800
+@@ -641,6 +641,12 @@ class UrlParseTestCase(unittest.TestCase
+ self.assertIn(u'\u2100', denorm_chars)
+ self.assertIn(u'\uFF03', denorm_chars)
+
++ # bpo-36742: Verify port separators are ignored when they
++ # existed prior to decomposition
++ urlparse.urlsplit(u'http://\u30d5\u309a:80')
++ with self.assertRaises(ValueError):
++ urlparse.urlsplit(u'http://\u30d5\u309a\ufe1380')
++
+ for scheme in [u"http", u"https", u"ftp"]:
+ for c in denorm_chars:
+ url = u"{}://netloc{}false.netloc/path".format(scheme, c)
+diff -uNrp a/Lib/urlparse.py b/Lib/urlparse.py
+--- a/Lib/urlparse.py 2019-12-21 15:41:32.080000000 +0800
++++ b/Lib/urlparse.py 2019-12-21 15:46:11.480000000 +0800
+@@ -171,13 +171,17 @@ def _checknetloc(netloc):
+ # looking for characters like \u2100 that expand to 'a/c'
+ # IDNA uses NFKC equivalence, so normalize for this check
+ import unicodedata
+- netloc2 = unicodedata.normalize('NFKC', netloc)
+- if netloc == netloc2:
++ n = netloc.rpartition('@')[2] # ignore anything to the left of '@'
++ n = n.replace(':', '') # ignore characters already included
++ n = n.replace('#', '') # but not the surrounding text
++ n = n.replace('?', '')
++ netloc2 = unicodedata.normalize('NFKC', n)
++ if n == netloc2:
+ return
+ _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
+ for c in '/?#@:':
+ if c in netloc2:
+- raise ValueError("netloc '" + netloc2 + "' contains invalid " +
++ raise ValueError("netloc '" + netloc + "' contains invalid " +
+ "characters under NFKC normalization")
+
+ def urlsplit(url, scheme='', allow_fragments=True):
+diff -uNrp a/Misc/NEWS.d/next/Security/2019-04-29-15-34-59.bpo-36742.QCUY0i.rst b/Misc/NEWS.d/next/Security/2019-04-29-15-34-59.bpo-36742.QCUY0i.rst
+--- a/Misc/NEWS.d/next/Security/2019-04-29-15-34-59.bpo-36742.QCUY0i.rst 1970-01-01 08:00:00.000000000 +0800
++++ b/Misc/NEWS.d/next/Security/2019-04-29-15-34-59.bpo-36742.QCUY0i.rst 2019-12-21 15:53:31.188000000 +0800
+@@ -0,0 +1 @@
++Fixes mishandling of pre-normalization characters in urlsplit().
diff --git a/CVE-2019-10160-2.patch b/CVE-2019-10160-2.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f4e8ca7b0d7370ac3aad6d4ce7eb5f18288d9d2b
--- /dev/null
+++ b/CVE-2019-10160-2.patch
@@ -0,0 +1,50 @@
+diff -uNrp a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
+--- a/Lib/test/test_urlparse.py 2019-12-21 15:54:46.576000000 +0800
++++ b/Lib/test/test_urlparse.py 2019-12-21 15:56:22.440000000 +0800
+@@ -648,11 +648,13 @@ class UrlParseTestCase(unittest.TestCase
+ urlparse.urlsplit(u'http://\u30d5\u309a\ufe1380')
+
+ for scheme in [u"http", u"https", u"ftp"]:
+- for c in denorm_chars:
+- url = u"{}://netloc{}false.netloc/path".format(scheme, c)
+- print "Checking %r" % url
+- with self.assertRaises(ValueError):
+- urlparse.urlsplit(url)
++ for netloc in [u"netloc{}false.netloc", u"n{}user@netloc"]:
++ for c in denorm_chars:
++ url = u"{}://{}/path".format(scheme, netloc.format(c))
++ if test_support.verbose:
++ print "Checking %r" % url
++ with self.assertRaises(ValueError):
++ urlparse.urlsplit(url)
+
+ def test_main():
+ test_support.run_unittest(UrlParseTestCase)
+diff -uNrp a/Lib/urlparse.py b/Lib/urlparse.py
+--- a/Lib/urlparse.py 2019-12-21 15:54:46.344000000 +0800
++++ b/Lib/urlparse.py 2019-12-21 15:57:41.260000000 +0800
+@@ -171,18 +171,18 @@ def _checknetloc(netloc):
+ # looking for characters like \u2100 that expand to 'a/c'
+ # IDNA uses NFKC equivalence, so normalize for this check
+ import unicodedata
+- n = netloc.rpartition('@')[2] # ignore anything to the left of '@'
+- n = n.replace(':', '') # ignore characters already included
+- n = n.replace('#', '') # but not the surrounding text
+- n = n.replace('?', '')
++ n = netloc.replace(u'@', u'') # ignore characters already included
++ n = n.replace(u':', u'') # but not the surrounding text
++ n = n.replace(u'#', u'')
++ n = n.replace(u'?', u'')
+ netloc2 = unicodedata.normalize('NFKC', n)
+ if n == netloc2:
+ return
+ _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
+ for c in '/?#@:':
+ if c in netloc2:
+- raise ValueError("netloc '" + netloc + "' contains invalid " +
+- "characters under NFKC normalization")
++ raise ValueError(u"netloc '" + netloc + u"' contains invalid " +
++ u"characters under NFKC normalization")
+
+ def urlsplit(url, scheme='', allow_fragments=True):
+ """Parse a URL into 5 components:
diff --git a/CVE-2019-10160-3.patch b/CVE-2019-10160-3.patch
new file mode 100644
index 0000000000000000000000000000000000000000..78bfd3eb6041e4f82862a47a4d6c0b8c3c5bf8e2
--- /dev/null
+++ b/CVE-2019-10160-3.patch
@@ -0,0 +1,41 @@
+diff -uNrp a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
+--- a/Lib/test/test_urlparse.py 2019-12-21 15:58:00.556000000 +0800
++++ b/Lib/test/test_urlparse.py 2019-12-21 15:59:11.456000000 +0800
+@@ -656,6 +656,15 @@ class UrlParseTestCase(unittest.TestCase
+ with self.assertRaises(ValueError):
+ urlparse.urlsplit(url)
+
++ # check error message: invalid netloc must be formated with repr()
++ # to get an ASCII error message
++ with self.assertRaises(ValueError) as cm:
++ urlparse.urlsplit(u'http://example.com\uFF03@bing.com')
++ self.assertEqual(str(cm.exception),
++ "netloc u'example.com\\uff03@bing.com' contains invalid characters "
++ "under NFKC normalization")
++ self.assertIsInstance(cm.exception.args[0], str)
++
+ def test_main():
+ test_support.run_unittest(UrlParseTestCase)
+
+diff -uNrp a/Lib/urlparse.py b/Lib/urlparse.py
+--- a/Lib/urlparse.py 2019-12-21 15:58:00.480000000 +0800
++++ b/Lib/urlparse.py 2019-12-21 15:59:55.128000000 +0800
+@@ -181,8 +181,9 @@ def _checknetloc(netloc):
+ _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
+ for c in '/?#@:':
+ if c in netloc2:
+- raise ValueError(u"netloc '" + netloc + u"' contains invalid " +
+- u"characters under NFKC normalization")
++ raise ValueError("netloc %r contains invalid characters "
++ "under NFKC normalization"
++ % netloc)
+
+ def urlsplit(url, scheme='', allow_fragments=True):
+ """Parse a URL into 5 components:
+diff -uNrp a/Misc/NEWS.d/next/Library/2019-06-10-12-02-45.bpo-36742.UEdHXJ.rst b/Misc/NEWS.d/next/Library/2019-06-10-12-02-45.bpo-36742.UEdHXJ.rst
+--- a/Misc/NEWS.d/next/Library/2019-06-10-12-02-45.bpo-36742.UEdHXJ.rst 1970-01-01 08:00:00.000000000 +0800
++++ b/Misc/NEWS.d/next/Library/2019-06-10-12-02-45.bpo-36742.UEdHXJ.rst 2019-12-21 16:00:40.480000000 +0800
+@@ -0,0 +1,3 @@
++:func:`urlparse.urlsplit` error message for invalid ``netloc`` according to
++NFKC normalization is now a :class:`str` string, rather than a
++:class:`unicode` string, to prevent error when displaying the error.
diff --git a/CVE-2019-16056.patch b/CVE-2019-16056.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5a841437f3aadb61c09f1c8aa5bb93828169e0c9
--- /dev/null
+++ b/CVE-2019-16056.patch
@@ -0,0 +1,57 @@
+diff -uNrp a/Lib/email/_parseaddr.py b/Lib/email/_parseaddr.py
+--- a/Lib/email/_parseaddr.py 2019-12-21 16:01:21.340000000 +0800
++++ b/Lib/email/_parseaddr.py 2019-12-21 16:03:22.108000000 +0800
+@@ -336,7 +336,12 @@ class AddrlistClass:
+ aslist.append('@')
+ self.pos += 1
+ self.gotonext()
+- return EMPTYSTRING.join(aslist) + self.getdomain()
++ domain = self.getdomain()
++ if not domain:
++ # Invalid domain, return an empty address instead of returning a
++ # local part to denote failed parsing.
++ return EMPTYSTRING
++ return EMPTYSTRING.join(aslist) + domain
+
+ def getdomain(self):
+ """Get the complete domain name from an address."""
+@@ -351,6 +356,10 @@ class AddrlistClass:
+ elif self.field[self.pos] == '.':
+ self.pos += 1
+ sdlist.append('.')
++ elif self.field[self.pos] == '@':
++ # bpo-34155: Don't parse domains with two `@` like
++ # `a@malicious.org@important.com`.
++ return EMPTYSTRING
+ elif self.field[self.pos] in self.atomends:
+ break
+ else:
+diff -uNrp a/Lib/email/test/test_email.py b/Lib/email/test/test_email.py
+--- a/Lib/email/test/test_email.py 2019-12-21 16:01:21.344000000 +0800
++++ b/Lib/email/test/test_email.py 2019-12-21 16:04:40.564000000 +0800
+@@ -2306,6 +2306,20 @@ class TestMiscellaneous(TestEmailBase):
+ self.assertEqual(Utils.parseaddr('<>'), ('', ''))
+ self.assertEqual(Utils.formataddr(Utils.parseaddr('<>')), '')
+
++ def test_parseaddr_multiple_domains(self):
++ self.assertEqual(
++ Utils.parseaddr('a@b@c'),
++ ('', '')
++ )
++ self.assertEqual(
++ Utils.parseaddr('a@b.c@c'),
++ ('', '')
++ )
++ self.assertEqual(
++ Utils.parseaddr('a@172.17.0.1@c'),
++ ('', '')
++ )
++
+ def test_noquote_dump(self):
+ self.assertEqual(
+ Utils.formataddr(('A Silly Person', 'person@dom.ain')),
+diff -uNrp a/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst b/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst
+--- a/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst 1970-01-01 08:00:00.000000000 +0800
++++ b/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst 2019-12-21 16:05:05.292000000 +0800
+@@ -0,0 +1 @@
++Fix parsing of invalid email addresses with more than one ``@`` (e.g. a@b@c.com.) to not return the part before 2nd ``@`` as valid email address. Patch by maxking & jpic.
diff --git a/CVE-2019-16935.patch b/CVE-2019-16935.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b33f146e19c1f941be0eb8b051cd8b4406002148
--- /dev/null
+++ b/CVE-2019-16935.patch
@@ -0,0 +1,74 @@
+diff -uNrp a/Lib/DocXMLRPCServer.py b/Lib/DocXMLRPCServer.py
+--- a/Lib/DocXMLRPCServer.py 2019-12-21 16:13:25.240000000 +0800
++++ b/Lib/DocXMLRPCServer.py 2019-12-21 16:15:24.076000000 +0800
+@@ -20,6 +20,15 @@ from SimpleXMLRPCServer import (SimpleXM
+ CGIXMLRPCRequestHandler,
+ resolve_dotted_attribute)
+
++def _html_escape_quote(s):
++ s = s.replace("&", "&") # Must be done first!
++ s = s.replace("<", "<")
++ s = s.replace(">", ">")
++ s = s.replace('"', """)
++ s = s.replace('\'', "'")
++ return s
++
++
+ class ServerHTMLDoc(pydoc.HTMLDoc):
+ """Class used to generate pydoc HTML document for a server"""
+
+@@ -210,7 +219,9 @@ class XMLRPCDocGenerator:
+ methods
+ )
+
+- return documenter.page(self.server_title, documentation)
++ title = _html_escape_quote(self.server_title)
++ return documenter.page(title, documentation)
++
+
+ class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
+ """XML-RPC and documentation request handler class.
+diff -uNrp a/Lib/test/test_docxmlrpc.py b/Lib/test/test_docxmlrpc.py
+--- a/Lib/test/test_docxmlrpc.py 2019-12-21 16:13:25.340000000 +0800
++++ b/Lib/test/test_docxmlrpc.py 2019-12-21 16:16:49.828000000 +0800
+@@ -1,5 +1,6 @@
+ from DocXMLRPCServer import DocXMLRPCServer
+ import httplib
++import re
+ import sys
+ from test import test_support
+ threading = test_support.import_module('threading')
+@@ -176,6 +177,26 @@ class DocXMLRPCHTTPGETServer(unittest.Te
+ self.assertIn("""Try self.add, too.""",
+ response.read())
+
++ def test_server_title_escape(self):
++ """Test that the server title and documentation
++ are escaped for HTML.
++ """
++ self.serv.set_server_title('test_title