Blob Blame History Raw
From e8d0874ca63d0ad02978d82741439841bc1a7905 Mon Sep 17 00:00:00 2001
From: Brendan Reilly <breilly@redhat.com>
Date: Jul 13 2022 16:09:21 +0000
Subject: Avoid libmodulemd symbol clash


Importing dnf in the MBS process causes a symbol clash on RHEL 7. This
is a temporary fix to avoid that, and should not be merged.

See https://pagure.io/releng/issue/10850

---

diff --git a/module_build_service/builder/MockModuleBuilder.py b/module_build_service/builder/MockModuleBuilder.py
index df9a1b8..831f149 100644
--- a/module_build_service/builder/MockModuleBuilder.py
+++ b/module_build_service/builder/MockModuleBuilder.py
@@ -8,7 +8,6 @@ import re
 import subprocess
 import threading
 
-import dnf
 import koji
 import kobo.rpmlib
 import platform
@@ -182,6 +181,10 @@ def get_local_releasever():
     """
     Returns the $releasever variable used in the system when expanding .repo files.
     """
+    # import dnf in function to avoid symbol name clashing
+    # see: https://pagure.io/releng/issue/10850
+    import dnf
+
     dnf_base = dnf.Base()
     return dnf_base.conf.releasever
 
@@ -196,6 +199,10 @@ def import_builds_from_local_dnf_repos(platform_id=None):
     :param str platform_id: The `name:stream` of a fake platform module to generate in this
         method. When not set, the /etc/os-release is parsed to get the PLATFORM_ID.
     """
+    # import dnf in function to avoid symbol name clashing
+    # see: https://pagure.io/releng/issue/10850
+    import dnf
+
     log.info("Loading available RPM repositories.")
     dnf_base = dnf.Base()
     dnf_base.read_all_repos()
diff --git a/module_build_service/scheduler/default_modules.py b/module_build_service/scheduler/default_modules.py
index 6846d13..46d73f0 100644
--- a/module_build_service/scheduler/default_modules.py
+++ b/module_build_service/scheduler/default_modules.py
@@ -5,8 +5,8 @@ import errno
 import os
 import shutil
 import tempfile
+import subprocess
 
-import dnf
 import kobo.rpmlib
 import koji
 import six.moves.xmlrpc_client as xmlrpclib
@@ -330,91 +332,10 @@ def _get_rpms_from_tags(koji_session, tags, arches):
 
     return nevras
 
-
 def _get_rpms_in_external_repo(repo_url, arches, cache_dir_name):
-    """
-    Get the available RPMs in the external repo for the provided arches.
-
-    :param str repo_url: the URL of the external repo with the "$arch" variable included
-    :param list arches: the list of arches to query the external repo for
-    :param str cache_dir_name: the cache directory name under f"{conf.cache_dir}/dnf"
-    :return: a set of the RPM NEVRAs
-    :rtype: set
-    :raise RuntimeError: if the cache is not writeable or the external repo couldn't be loaded
-    :raises ValueError: if there is no "$arch" variable in repo URL
-    """
-    if "$arch" not in repo_url:
-        raise ValueError(
-            "The external repo {} does not contain the $arch variable".format(repo_url)
-        )
-
-    base = dnf.Base()
-    try:
-        dnf_conf = base.conf
-        # Expire the metadata right away so that when a repo is loaded, it will always check to
-        # see if the external repo has been updated
-        dnf_conf.metadata_expire = 0
+    # Calling an external script using subprocess to avoid importing dnf
+    # See: https://pagure.io/releng/issue/10850
+    nevras = subprocess.check_output(['mbs-get-rpms-in-external-repo', repo_url, cache_dir_name,
+                                      conf.cache_dir, str(conf.dnf_timeout), str(conf.dnf_minrate)] + arches)
 
-        cache_location = os.path.join(conf.cache_dir, "dnf", cache_dir_name)
-        try:
-            # exist_ok=True can't be used in Python 2
-            os.makedirs(cache_location, mode=0o0770)
-        except OSError as e:
-            # Don't fail if the directories already exist
-            if e.errno != errno.EEXIST:
-                log.exception("Failed to create the cache directory %s", cache_location)
-                raise RuntimeError("The MBS cache is not writeable.")
-
-        # Tell DNF to use the cache directory
-        dnf_conf.cachedir = cache_location
-        # Don't skip repos that can't be synchronized
-        dnf_conf.skip_if_unavailable = False
-        dnf_conf.timeout = conf.dnf_timeout
-        # Get rid of everything to be sure it's a blank slate. This doesn't delete the cached repo
-        # data.
-        base.reset(repos=True, goal=True, sack=True)
-
-        # Add a separate repo for each architecture
-        for arch in arches:
-            # Convert arch to canon_arch. This handles cases where Koji "i686" arch is mapped to
-            # "i386" when generating RPM repository.
-            canon_arch = koji.canonArch(arch)
-            repo_name = "repo_{}".format(canon_arch)
-            repo_arch_url = repo_url.replace("$arch", canon_arch)
-            base.repos.add_new_repo(
-                repo_name, dnf_conf, baseurl=[repo_arch_url], minrate=conf.dnf_minrate,
-            )
-
-        try:
-            # Load the repos in parallel
-            base.update_cache()
-        except dnf.exceptions.RepoError:
-            msg = "Failed to load the external repos"
-            log.exception(msg)
-            raise RuntimeError(msg)
-
-        # dnf will not always raise an error on repo failures, so we check explicitly
-        for repo_name in base.repos:
-            if not base.repos[repo_name].metadata:
-                msg = "Failed to load metadata for repo %s" % repo_name
-                log.exception(msg)
-                raise RuntimeError(msg)
-
-        base.fill_sack(load_system_repo=False)
-
-        # Return all the available RPMs
-        nevras = set()
-        for rpm in base.sack.query().available():
-            rpm_dict = {
-                "arch": rpm.arch,
-                "epoch": rpm.epoch,
-                "name": rpm.name,
-                "release": rpm.release,
-                "version": rpm.version,
-            }
-            nevra = kobo.rpmlib.make_nvra(rpm_dict, force_epoch=True)
-            nevras.add(nevra)
-    finally:
-        base.close()
-
-    return nevras
+    return set(nevras.split())
diff --git a/module_build_service_workarounds/__init__.py b/module_build_service_workarounds/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/module_build_service_workarounds/__init__.py
diff --git a/module_build_service_workarounds/externalrepo.py b/module_build_service_workarounds/externalrepo.py
new file mode 100755
index 0000000..a3e234b
--- /dev/null
+++ b/module_build_service_workarounds/externalrepo.py
@@ -0,0 +1,107 @@
+import os
+import sys
+assert "gi" not in sys.modules
+import errno
+import koji
+import kobo.rpmlib
+import dnf
+
+# Moved _get_rpms_in_external_repo logic to a separate script to avoid symbol clash with dnf
+# See: https://pagure.io/releng/issue/10850
+
+def main(argv=sys.argv):
+#def main(repo_url, cache_dir_name, cache_dir, dnf_timeout, dnf_minrate, arches):
+    """
+    Get the available RPMs in the external repo for the provided arches.
+
+    :param str repo_url: the URL of the external repo with the "$arch" variable included
+    :param list arches: the list of arches to query the external repo for
+    :param str cache_dir_name: the cache directory name under f"{conf.cache_dir}/dnf"
+    :return: a set of the RPM NEVRAs
+    :rtype: set
+    :raise RuntimeError: if the cache is not writeable or the external repo couldn't be loaded
+    :raises ValueError: if there is no "$arch" variable in repo URL
+    """
+
+    repo_url = argv[1]
+    cache_dir_name = argv[2]
+    cache_dir = argv[3]
+    dnf_timeout = argv[4]
+    dnf_minrate = argv[5]
+    arches = argv[6:]
+
+    if "$arch" not in repo_url:
+        raise ValueError(
+            "The external repo {} does not contain the $arch variable".format(repo_url)
+        )
+
+    base = dnf.Base()
+    try:
+        dnf_conf = base.conf
+        # Expire the metadata right away so that when a repo is loaded, it will always check to
+        # see if the external repo has been updated
+        dnf_conf.metadata_expire = 0
+
+        cache_location = os.path.join(cache_dir, "dnf", cache_dir_name)
+        try:
+            # exist_ok=True can't be used in Python 2
+            os.makedirs(cache_location, mode=0o0770)
+        except OSError as e:
+            # Don't fail if the directories already exist
+            if e.errno != errno.EEXIST:
+                #log.exception("Failed to create the cache directory %s", cache_location)
+                raise RuntimeError("The MBS cache is not writeable.")
+
+        # Tell DNF to use the cache directory
+        dnf_conf.cachedir = cache_location
+        # Don't skip repos that can't be synchronized
+        dnf_conf.skip_if_unavailable = False
+        dnf_conf.timeout = int(dnf_timeout)
+        # Get rid of everything to be sure it's a blank slate. This doesn't delete the cached repo
+        # data.
+        base.reset(repos=True, goal=True, sack=True)
+
+        # Add a separate repo for each architecture
+        for arch in arches:
+            # Convert arch to canon_arch. This handles cases where Koji "i686" arch is mapped to
+            # "i386" when generating RPM repository.
+            canon_arch = koji.canonArch(arch)
+            repo_name = "repo_{}".format(canon_arch)
+            repo_arch_url = repo_url.replace("$arch", canon_arch)
+            base.repos.add_new_repo(
+                repo_name, dnf_conf, baseurl=[repo_arch_url], minrate=int(dnf_minrate),
+            )
+
+        try:
+            # Load the repos in parallel
+            base.update_cache()
+        except dnf.exceptions.RepoError:
+            msg = "Failed to load the external repos"
+            #log.exception(msg)
+            raise RuntimeError(msg)
+
+        # dnf will not always raise an error on repo failures, so we check explicitly
+        for repo_name in base.repos:
+            if not base.repos[repo_name].metadata:
+                msg = "Failed to load metadata for repo %s" % repo_name
+                #log.exception(msg)
+                raise RuntimeError(msg)
+
+        base.fill_sack(load_system_repo=False)
+
+        # Return all the available RPMs
+        nevras = set()
+        for rpm in base.sack.query().available():
+            rpm_dict = {
+                "arch": rpm.arch,
+                "epoch": rpm.epoch,
+                "name": rpm.name,
+                "release": rpm.release,
+                "version": rpm.version,
+            }
+            nevra = kobo.rpmlib.make_nvra(rpm_dict, force_epoch=True)
+            nevras.add(nevra)
+    finally:
+        base.close()
+
+    print(" ".join(nevras))
diff --git a/setup.py b/setup.py
index 36b8c2e..21b889c 100644
--- a/setup.py
+++ b/setup.py
@@ -42,6 +42,7 @@ setup(
             "mbs-upgradedb = module_build_service.manage:upgradedb",
             "mbs-frontend = module_build_service.manage:run",
             "mbs-manager = module_build_service.manage:manager_wrapper",
+            "mbs-get-rpms-in-external-repo = module_build_service_workarounds.externalrepo:main"
         ],
         "moksha.consumer": "mbsconsumer = module_build_service.scheduler.consumer:MBSConsumer",
         "mbs.messaging_backends": [