Blob Blame History Raw
From 1bd70037c40aaf8b5a230e9cca9707a4ba047115 Mon Sep 17 00:00:00 2001
From: serge-sans-paille <sguelton@redhat.com>
Date: Mon, 17 Dec 2018 08:51:41 +0000
Subject: [PATCH] Make Python scripts compatible with both Python2 and Python3

Mostly:

- Update shebang
- From __future__ import print_functions
- Portable way of iterating on dictionaries
---
 CompareDebugInfo.py         | 36 ++++++++++++++++--------------------
 FindMissingLineNo.py        |  4 +---
 litsupport/modules/stats.py |  4 ++--
 utils/compare.py            | 26 ++++++++++++++------------
 utils/tdiff.py              |  6 +++---
 5 files changed, 36 insertions(+), 40 deletions(-)

diff --git a/CompareDebugInfo.py b/CompareDebugInfo.py
index be6fa2368..121014e74 100755
--- a/CompareDebugInfo.py
+++ b/CompareDebugInfo.py
@@ -1,4 +1,5 @@
 #!/usr/bin/python
+from __future__ import print_function
 
 import os
 import sys
@@ -45,25 +46,24 @@ class BreakPoint:
         self.values[arg_name] = value
         
     def __repr__(self):
-        print self.name
-        items = self.values.items()
-        for i in range(len(items)):
-            print items[i][0]," = ",items[i][1]
+        print(self.name)
+        for k, v in self.values.items():
+            print(k, "=", v)
         return ''
 
     def compare_args(self, other, file):
         myitems = self.values.items()
-        otheritems = other.values.items()
+        otheritems = list(other.values.items())
         match = False
-        for i in range(len(myitems)):
+        for i, my_item in enumerate(my_items):
             if i >= len(otheritems):
                 match = True
-                self.missing_args.append(myitems[i][0])
-            elif cmp(myitems[i][1], otheritems[i][1]):
+                self.missing_args.append(myitem[0])
+            elif cmp(myitem[1], otheritems[i][1]):
                 match = True
-                self.notmatching_args.append(myitems[i][0])
+                self.notmatching_args.append(myitem[0])
             else:
-                self.matching_args.append(myitems[i][0])
+                self.matching_args.append(myitem[0])
 
         self.print_list(self.matching_args, " Matching arguments ", file)
         self.print_list(self.notmatching_args, " Not Matching arguments ", file)
@@ -108,9 +108,7 @@ f2_items = f2_breakpoints.items()
     
 f = open(LOG_FILE, "w")
 f.write("Log output\n")
-for f2bp in range(len(f2_items)):
-    id = f2_items[f2bp][0]
-    bp = f2_items[f2bp][1]
+for id, bp in f2_items:
     bp1 = f1_breakpoints.get(id)
     if bp1 is None:
         bp.setMissing()
@@ -127,9 +125,7 @@ read_input(NATIVE_OPT_DBG_OUTPUT_FILE, nf2_breakpoints)
 nf2_items = nf2_breakpoints.items()
     
 nfl = open(NATIVE_LOG_FILE, "w")
-for nf2bp in range(len(nf2_items)):
-    id = nf2_items[nf2bp][0]
-    bp = nf2_items[nf2bp][1]
+for id, bp in nf2_items:
     bp1 = nf1_breakpoints.get(id)
     if bp1 is None:
         bp.setMissing()
@@ -141,8 +137,8 @@ f1_arg_count = 0
 f1_matching_arg_count = 0
 f1_notmatching_arg_count = 0
 f1_missing_arg_count = 0
-for idx in range(len(f1_items)):
-    bp = f1_items[idx][1]
+for f1_item in f1_items:
+    bp = f1_item[1]
     f1_arg_count = f1_arg_count + bp.getArgCount()
     f1_matching_arg_count = f1_matching_arg_count + bp.getMatchingArgCount()
     f1_notmatching_arg_count = f1_notmatching_arg_count + bp.getNotMatchingArgCount()
@@ -152,8 +148,8 @@ nf1_arg_count = 0
 nf1_matching_arg_count = 0
 nf1_notmatching_arg_count = 0
 nf1_missing_arg_count = 0
-for idx in range(len(nf1_items)):
-    bp = nf1_items[idx][1]
+for nf1_item in nf1_items:
+    bp = nf1_item[1]
     nf1_arg_count = nf1_arg_count + bp.getArgCount()
     nf1_matching_arg_count = nf1_matching_arg_count + bp.getMatchingArgCount()
     nf1_notmatching_arg_count = nf1_notmatching_arg_count + bp.getNotMatchingArgCount()
diff --git a/FindMissingLineNo.py b/FindMissingLineNo.py
index c92b5ed41..ee25a1f86 100755
--- a/FindMissingLineNo.py
+++ b/FindMissingLineNo.py
@@ -40,9 +40,7 @@ xfailed_lines = {}
 read_inputfile(XFAIL_FILE, xfailed_lines)
 
 dbg_line_items = dbg_lines.items()
-for f in range(len(dbg_line_items)):
-    fname = dbg_line_items[f][0]
-    fset = dbg_line_items[f][1]
+for fname, fset in dbg_line_items:
     optset = dbg_opt_lines.get(fname)
     nativeoptset = native_dbg_opt_lines.get(fname)
     xfailedset = xfailed_lines.get(os.path.basename(fname))
diff --git a/litsupport/modules/stats.py b/litsupport/modules/stats.py
index 4cba3e8a1..125342c15 100644
--- a/litsupport/modules/stats.py
+++ b/litsupport/modules/stats.py
@@ -14,7 +14,7 @@ def _mergeStats(global_stats, statsfilename):
     except Exception as e:
         logging.warning("Could not read '%s'", statsfilename, exc_info=e)
         return
-    for name, value in stats.iteritems():
+    for name, value in stats.items():
         global_stats[name] += value
 
 
@@ -37,7 +37,7 @@ def _getStats(context):
         logging.warning("No stats for '%s'", context.test.getFullName())
 
     result = dict()
-    for key, value in stats.iteritems():
+    for key, value in stats.items():
         result[key] = value
     return result
 
diff --git a/utils/compare.py b/utils/compare.py
index b65787707..fa93b24a6 100755
--- a/utils/compare.py
+++ b/utils/compare.py
@@ -1,8 +1,10 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
 """Tool to filter, organize, compare and display benchmarking results. Usefull
 for smaller datasets. It works great with a few dozen runs it is not designed to
 deal with hundreds.
 Requires the pandas library to be installed."""
+from __future__ import print_function
+
 import pandas as pd
 import sys
 import os.path
@@ -19,7 +21,7 @@ def read_lit_json(filename):
     info_columns = ['hash']
     # Pass1: Figure out metrics (= the column index)
     if 'tests' not in jsondata:
-        print "%s: Could not find toplevel 'tests' key"
+        print("%s: Could not find toplevel 'tests' key")
         sys.exit(1)
     for test in jsondata['tests']:
         name = test.get("name")
@@ -31,7 +33,7 @@ def read_lit_json(filename):
             sys.exit(1)
         names.add(name)
         if "metrics" not in test:
-            print "Warning: '%s' has No metrics!" % test['name']
+            print("Warning: '%s' has No metrics!" % test['name'])
             continue
         for name in test["metrics"].keys():
             if name not in columnindexes:
@@ -54,9 +56,9 @@ def read_lit_json(filename):
 
         datarow = [nan] * len(columns)
         if "metrics" in test:
-            for (metricname, value) in test['metrics'].iteritems():
+            for (metricname, value) in test['metrics'].items():
                 datarow[columnindexes[metricname]] = value
-        for (name, value) in test.iteritems():
+        for (name, value) in test.items():
             index = columnindexes.get(name)
             if index is not None:
                 datarow[index] = test[name]
@@ -148,7 +150,7 @@ def print_filter_stats(reason, before, after):
     n_after = len(after.groupby(level=1))
     n_filtered = n_before - n_after
     if n_filtered != 0:
-        print "%s: %s (filtered out)" % (reason, n_filtered)
+        print("%s: %s (filtered out)" % (reason, n_filtered))
 
 # Truncate a string to a maximum length by keeping a prefix, a suffix and ...
 # in the middle
@@ -222,8 +224,8 @@ def print_result(d, limit_output=True, shorten_names=True,
     pd.set_option("display.max_colwidth", 0)
     out = dataout.to_string(index=False, justify='left',
                             float_format=float_format, formatters=formatters)
-    print out
-    print d.describe()
+    print(out)
+    print(d.describe())
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(prog='compare.py')
@@ -303,7 +305,7 @@ if __name__ == "__main__":
     # Filter data
     proggroup = data.groupby(level=1)
     initial_size = len(proggroup.indices)
-    print "Tests: %s" % (initial_size,)
+    print("Tests: %s" % (initial_size,))
     if config.filter_failed and hasattr(data, 'Exec'):
         newdata = filter_failed(data)
         print_filter_stats("Failed", data, newdata)
@@ -326,10 +328,10 @@ if __name__ == "__main__":
         data = newdata
     final_size = len(data.groupby(level=1))
     if final_size != initial_size:
-        print "Remaining: %d" % (final_size,)
+        print("Remaining: %d" % (final_size,))
 
     # Reduce / add columns
-    print "Metric: %s" % (",".join(metrics),)
+    print("Metric: %s" % (",".join(metrics),))
     if len(metrics) > 0:
         data = data[metrics]
     data = add_diff_column(data)
@@ -339,7 +341,7 @@ if __name__ == "__main__":
         sortkey = data.columns[0]
 
     # Print data
-    print ""
+    print("")
     shorten_names = not config.full
     limit_output = (not config.all) and (not config.full)
     print_result(data, limit_output, shorten_names, config.show_diff, sortkey)
diff --git a/utils/tdiff.py b/utils/tdiff.py
index 9f4cedbc8..dcfa167c2 100755
--- a/utils/tdiff.py
+++ b/utils/tdiff.py
@@ -95,7 +95,7 @@ def determine_max_commandline_len():
     if sc_arg_max <= 0:
         return 10000 # wild guess
     env_len = 0
-    for key,val in os.environ.iteritems():
+    for key,val in os.environ.items():
         env_len += len(key) + len(val) + 10
     return sc_arg_max - env_len
 
@@ -140,12 +140,12 @@ def filelist(mode, target, cwd, config):
 
     if config.mode == 'sources':
         # Take leafs in the dependency tree
-        for target, depnode in tree.iteritems():
+        for target, depnode in tree.items():
             if len(depnode.inputs) == 0:
                 yield target
     else:
         # Take files ending in '.o'
-        for target, depnode in tree.iteritems():
+        for target, depnode in tree.items():
             if target.endswith(".o"):
                 # Determine .s/.stats ending used by -save-temps=obj or
                 # -save-stats=obj
-- 
2.17.2