Blob Blame History Raw
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index 77c2007..c6c54b0 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -885,6 +885,12 @@ class UrlParseTestCase(unittest.TestCase):
         self.assertIn('\u2100', denorm_chars)
         self.assertIn('\uFF03', denorm_chars)
 
+        # bpo-36742: Verify port separators are ignored when they
+        # existed prior to decomposition
+        urllib.parse.urlsplit('http://\u30d5\u309a:80')
+        with self.assertRaises(ValueError):
+            urllib.parse.urlsplit('http://\u30d5\u309a\ufe1380')
+
         for scheme in ["http", "https", "ftp"]:
             for c in denorm_chars:
                 url = "{}://netloc{}false.netloc/path".format(scheme, c)
diff --git a/Lib/urllib/parse.py b/Lib/urllib/parse.py
index 243f470..da3a40c 100644
--- a/Lib/urllib/parse.py
+++ b/Lib/urllib/parse.py
@@ -322,13 +322,16 @@ def _checknetloc(netloc):
     # looking for characters like \u2100 that expand to 'a/c'
     # IDNA uses NFKC equivalence, so normalize for this check
     import unicodedata
-    netloc2 = unicodedata.normalize('NFKC', netloc)
-    if netloc == netloc2:
+    n = netloc.rpartition('@')[2] # ignore anything to the left of '@'
+    n = n.replace(':', '')        # ignore characters already included
+    n = n.replace('#', '')        # but not the surrounding text
+    n = n.replace('?', '')
+    netloc2 = unicodedata.normalize('NFKC', n)
+    if n == netloc2:
         return
-    _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
     for c in '/?#@:':
         if c in netloc2:
-            raise ValueError("netloc '" + netloc2 + "' contains invalid " +
+            raise ValueError("netloc '" + netloc + "' contains invalid " +
                              "characters under NFKC normalization")
 
 def urlsplit(url, scheme='', allow_fragments=True):