Blob Blame History Raw
diff --git a/koji_containerbuild/plugins/builder_containerbuild.py b/koji_containerbuild/plugins/builder_containerbuild.py
index 677dae7..71f42f2 100644
--- a/koji_containerbuild/plugins/builder_containerbuild.py
+++ b/koji_containerbuild/plugins/builder_containerbuild.py
@@ -44,6 +44,11 @@ import osbs.api
 import osbs.http
 from osbs.api import OSBS
 from osbs.conf import Configuration
+from osbs.build.build_response import BuildResponse
+
+# subprocess.Popen patch
+import subprocess
+import json
 
 # We need kojid module which isn't proper python module and not even in
 # site-package path.
@@ -255,7 +260,6 @@ class CreateContainerTask(BaseTaskHandler):
                                  workdir)
         self._osbs = None
 
-
     def osbs(self):
         """Handler of OSBS object"""
         if not self._osbs:
@@ -365,66 +369,124 @@ class CreateContainerTask(BaseTaskHandler):
             'yum_repourls': yum_repourls,
             'scratch': scratch,
         }
+
+        # subprocess.Popen patch
+        cmd = "osbs --output json build"
+        cmd += " --git-url %s" % create_build_args["git_uri"]
+        cmd += " --git-commit %s" % create_build_args["git_ref"]
+        cmd += " --user %s" % create_build_args["user"]
+        cmd += " --component %s" % create_build_args["component"]
+        cmd += " --target %s" % create_build_args["target"]
+        cmd += " --arch %s" % create_build_args["architecture"]
+        for repo in create_build_args["yum_repourls"]:
+            cmd += " --add-yum-repo %s" % create_build_args["yum_repourls"]
+
         if branch:
             create_build_args['git_branch'] = branch
+
+            # subprocess.Popen patch
+            cmd += " --git-branch %s" % create_build_args["git_branch"]
+        elif target_info["name"].split("-")[0] == "rawhide":
+            # subprocess.Popen patch
+            cmd += " --git-branch %s" % "master"
+        else:
+            # subprocess.Popen patch
+            cmd += " --git-branch %s" % target_info["name"].split("-")[0]
+
         if push_url:
-            create_build_args['git_push_url'] = push_url
-        build_response = self.osbs().create_build(
-            **create_build_args
+            target_info['git_push_url'] = push_url
+
+            # subprocess.Popen patch
+            cmd += " --git-push-url %s" % target_info["git_push_url"]
+
+        # subprocess.Popen patch
+        # build_response = self.osbs().create_build(
+        #     **create_build_args
+        # )
+        # build_id = build_response.get_build_name()
+        self.logger.debug("OSBS subprocess.Popen cmd: %s" % cmd)
+        subproc = subprocess.Popen(
+                cmd.split(),
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE
         )
-        build_id = build_response.get_build_name()
+        sp_out, sp_err = subproc.communicate()
+        build_id = json.loads(sp_out)['metadata']['name']
+
         self.logger.debug("OSBS build id: %r", build_id)
 
-        self.logger.debug("Waiting for osbs build_id: %s to be scheduled.",
-                          build_id)
-        # we need to wait for kubelet to schedule the build, otherwise it's 500
-        self.osbs().wait_for_build_to_get_scheduled(build_id)
+        # subprocess.Popen patch
+        # (this isn't needed, osbs command doesn't return until it's scheduled)
+        #
+        #self.logger.debug("Waiting for osbs build_id: %s to be scheduled.",
+        #                  build_id)
+        ## we need to wait for kubelet to schedule the build, otherwise it's 500
+        #self.osbs().wait_for_build_to_get_scheduled(build_id)
         self.logger.debug("Build was scheduled")
 
         osbs_logs_dir = self.resultdir()
         koji.ensuredir(osbs_logs_dir)
-        pid = os.fork()
-        if pid:
-            self._incremental_upload_logs(pid)
 
-        else:
-            full_output_name = os.path.join(osbs_logs_dir,
-                                            'openshift-incremental.log')
-
-            # Make sure curl is initialized again otherwise connections via SSL
-            # fails with NSS error -8023 and curl_multi.info_read()
-            # returns error code 35 (SSL CONNECT failed).
-            # See http://permalink.gmane.org/gmane.comp.web.curl.library/38759
-            self._osbs = None
-            self.logger.debug("Running pycurl global cleanup")
-            pycurl.global_cleanup()
-
-            # Following retry code is here mainly to workaround bug which causes
-            # connection drop while reading logs after about 5 minutes.
-            # OpenShift bug with description:
-            # https://github.com/openshift/origin/issues/2348
-            # and upstream bug in Kubernetes:
-            # https://github.com/GoogleCloudPlatform/kubernetes/issues/9013
-            retry = 0
-            max_retries = 30
-            while retry < max_retries:
-                try:
-                    self._write_incremental_logs(build_id,
-                                                 full_output_name)
-                except Exception, error:
-                    self.logger.info("Error while saving incremental logs "
-                                     "(retry #%d): %s", retry, error)
-                    retry += 1
-                    time.sleep(10)
-                    continue
-                break
-            else:
-                self.logger.info("Gave up trying to save incremental logs "
-                                 "after #%d retries.", retry)
-                os._exit(1)
-            os._exit(0)
+# subprocess.Popen patch
+#   This stuff isn't needed, we're not handling incremental files for various
+#   reasons and this is just a temporary work-around
+#       pid = os.fork()
+#       if pid:
+#           self._incremental_upload_logs(pid)
+#
+#        else:
+#            full_output_name = os.path.join(osbs_logs_dir,
+#                                            'openshift-incremental.log')
+#
+#            # Make sure curl is initialized again otherwise connections via SSL
+#            # fails with NSS error -8023 and curl_multi.info_read()
+#            # returns error code 35 (SSL CONNECT failed).
+#            # See http://permalink.gmane.org/gmane.comp.web.curl.library/38759
+#            self._osbs = None
+#            self.logger.debug("Running pycurl global cleanup")
+#            pycurl.global_cleanup()
+#
+#            # Following retry code is here mainly to workaround bug which causes
+#            # connection drop while reading logs after about 5 minutes.
+#            # OpenShift bug with description:
+#            # https://github.com/openshift/origin/issues/2348
+#            # and upstream bug in Kubernetes:
+#            # https://github.com/GoogleCloudPlatform/kubernetes/issues/9013
+#            retry = 0
+#            max_retries = 30
+#            while retry < max_retries:
+#                try:
+#                    self._write_incremental_logs(build_id,
+#                                                 full_output_name)
+#                except Exception, error:
+#                    self.logger.info("Error while saving incremental logs "
+#                                     "(retry #%d): %s", retry, error)
+#                    retry += 1
+#                    time.sleep(10)
+#                    continue
+#                break
+#            else:
+#                self.logger.info("Gave up trying to save incremental logs "
+#                                 "after #%d retries.", retry)
+#                os._exit(1)
+#            os._exit(0)
+#
+
+        # subprocess.Popen patch
+        #response = self.osbs().wait_for_build_to_finish(build_id)
+        subproc = subprocess.Popen(
+            "osbs --output json watch-build {0}".format(build_id).split()
+        )
+        subproc.wait() # Wait for the build to be done
+
+        subproc = subprocess.Popen(
+            "osbs --output json get-build {0}".format(build_id).split(),
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE
+        )
+        sp_out, sp_err = subproc.communicate()
+        response = BuildResponse(json.loads(sp_out)) # get the response
 
-        response = self.osbs().wait_for_build_to_finish(build_id)
         self.logger.debug("OSBS build finished with status: %s. Build "
                           "response: %s.", response.status,
                           response.json)
@@ -630,7 +692,16 @@ class BuildContainerTask(BaseTaskHandler):
         return report
 
     def _raise_if_image_failed(self, osbs_build_id):
-        build = self.osbs().get_build(osbs_build_id)
+        # subprocess.Popen patch
+        #build = self.osbs().get_build(osbs_build_id)
+        subproc = subprocess.Popen(
+            "osbs --output json get-build {0}".format(osbs_build_id).split(),
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE
+        )
+        sp_out, sp_err = subproc.communicate()
+        build = BuildResponse(json.loads(sp_out))
+
         if build.is_failed():
             raise ContainerError('Image build failed. OSBS build id: %s' %
                                  osbs_build_id)