diff -ur nfsometer-1.9.orig/nfsometerlib/cmd.py nfsometer-1.9.wip/nfsometerlib/cmd.py --- nfsometer-1.9.orig/nfsometerlib/cmd.py 2017-06-29 04:39:18.000000000 +0100 +++ nfsometer-1.9.wip/nfsometerlib/cmd.py 2019-05-13 16:26:40.269894942 +0100 @@ -12,9 +12,9 @@ """ import os -import posix import sys import subprocess +import six # command wrappers def simplecmd(args): @@ -41,11 +41,11 @@ def cmd(args, raiseerrorcode=True, raiseerrorout=True, instr='', env=None, pass_output=False): - #print "command> %s" % args + #print("command> %s" % args) if env: - curenv = dict(posix.environ) - for k,v in env.iteritems(): + curenv = dict(os.environ) + for k,v in six.iteritems(env): curenv[k] = v env = curenv @@ -82,11 +82,13 @@ (args, errstr)) if outstr: - o_str = outstr.split('\n') + o_str = outstr.decode('utf-8').split('\n') else: o_str = '' if errstr: + if isinstance(errstr, six.binary_type): + errstr = errstr.decode('utf-8') e_str = errstr.split('\n') else: e_str = '' diff -ur nfsometer-1.9.orig/nfsometerlib/collection.py nfsometer-1.9.wip/nfsometerlib/collection.py --- nfsometer-1.9.orig/nfsometerlib/collection.py 2017-06-29 04:39:18.000000000 +0100 +++ nfsometer-1.9.wip/nfsometerlib/collection.py 2019-05-20 13:39:47.967197141 +0100 @@ -14,11 +14,12 @@ import os import numpy as np from subprocess import call +import six -from config import * -from selector import Selector -import parse -from trace import TraceAttrs +from .config import * +from .selector import Selector +from . import parse +from .trace import TraceAttrs class Stat: """ @@ -64,7 +65,7 @@ return "Stat(name=%r, values=%r, tracedirs=%r)" % \ (self.name, self._values, self._tracedirs) - def __nonzero__(self): + def __bool__(self): return not self.empty() def num_runs(self): @@ -75,7 +76,7 @@ """ return the value for the run associated with tracedir """ try: run = self._tracedirs.index(tracedir) - except ValueError, e: + except ValueError as e: if args: assert len(args) == 1 return args[0] @@ -83,7 +84,7 @@ try: return self._values[run] - except IndexError, e: + except IndexError as e: if args: assert len(args) == 1 return args[0] @@ -161,12 +162,12 @@ self._empty = None self._num_runs = None - def __nonzero__(self): + def __bool__(self): return not self.empty() def _sort(self): if not self._sorted: - self._stats.sort(lambda x,y: -1 * cmp(x.mean(), y.mean())) + self._stats.sort(key=lambda x: x.mean(), reverse=True) self._sorted = True def foreach(self): @@ -182,12 +183,12 @@ def mean(self): if self._mean == None: - self._mean = np.mean(self._sum_by_tracedir.values()) + self._mean = np.mean(list(self._sum_by_tracedir.values())) return self._mean def std(self): if self._std == None: - self._std = np.std(self._sum_by_tracedir.values()) + self._std = np.std(list(self._sum_by_tracedir.values())) return self._std def max(self): @@ -223,7 +224,7 @@ if not d in self._tracedirs: self._tracedirs.append(d) - if not self._sum_by_tracedir.has_key(d): + if d not in self._sum_by_tracedir: self._sum_by_tracedir[d] = 0.0 self._sum_by_tracedir[d] += vals[i] @@ -240,15 +241,16 @@ self._num_runs = None def add_attr(self, name, value): - if not self._attrs.has_key(name): - self._attrs[name] = set() - self._attrs[name].add(value) + if name not in self._attrs: + self._attrs[name] = set([value]) + else: + self._attrs[name].add(value) def get_attr(self, name): return self._attrs[name] def has_attr(self, name): - return self._attrs.has_key(name) + return name in self._attrs def merge_attrs(self, new): str_attrs = ['workload_command', 'workload_description'] @@ -264,7 +266,7 @@ """ add a value for the key. should be called once on each key for every workload result directory """ - if not self._values.has_key(key): + if key not in self._values: self._values[key] = Stat(key) self._values[key].add_value(float(value), filename, tracedir) @@ -293,7 +295,7 @@ every workload result directory """ assert isinstance(stat, Stat), repr(stat) - if not self._values.has_key(bucket_name): + if bucket_name not in self._values: self._values[bucket_name] = Bucket(bucket_name) self._values[bucket_name].add_stat_to_bucket(stat) @@ -337,7 +339,7 @@ # new elif ent.startswith(TRACE_DIR_PREFIX) and os.path.isdir(ent): self.load_tracedir(ent) - except IOError, e: + except IOError as e: self.warn(ent, str(e)) os.chdir(cwd) @@ -351,7 +353,7 @@ servers = set() paths = set() - for sel, tracestat in self._tracestats.iteritems(): + for sel, tracestat in six.iteritems(self._tracestats): parse.gather_buckets(self, tracestat) workloads.add(sel.workload) @@ -391,7 +393,7 @@ def notes_get(self): notes_file = os.path.join(self.resultsdir, NOTES_FILE) try: - return file(notes_file).readlines() + return open(notes_file).readlines() except IOError: return [] @@ -401,19 +403,19 @@ if msg.startswith('[Errno '): msg = msg[msg.find(']') + 1:] - if not self._warnings.has_key(tracedir): + if tracedir not in self._warnings: self._warnings[tracedir] = [] self._warnings[tracedir].append(msg.replace(tracedir, '[dir]/')) warn(tracedir + ': ' + msg) def warnings(self): - return [ (d, tuple(self._warnings[d])) for d in self._warnings.keys() ] + return [ (d, tuple(self._warnings[d])) for d in list(self._warnings.keys()) ] def empty(self): return len(self._tracestats) == 0 def set_stat_info(self, key, info): - if not self._stat_info.has_key(key): + if key not in self._stat_info: self._stat_info[key] = info else: assert self._stat_info[key] == info, \ @@ -450,7 +452,7 @@ assert sel.is_valid_key(), "Invalid key: %r" % sel - if not self._tracestats.has_key(sel): + if sel not in self._tracestats: self._tracestats[sel] = TraceStats(self) return self._tracestats[sel] @@ -462,7 +464,7 @@ """ return True if this collection has any traces matching 'selection', otherwise returns False """ for x in selection.foreach(): - if self._tracestats.has_key(x): + if x in self._tracestats: return True return False @@ -474,7 +476,7 @@ attr_file = os.path.join(tracedir, 'arguments') trace_attrs = TraceAttrs(filename=attr_file).to_dict() - for k, v in trace_attrs.iteritems(): + for k, v in six.iteritems(trace_attrs): attr[k] = v return attr @@ -485,7 +487,7 @@ returns empty string if nothing is found """ def _check_lines(f): - return '\n'.join([ x[2:] for x in file(f).readlines() + return '\n'.join([ x[2:] for x in open(f).readlines() if x.startswith('>') and x.lower().find('nfs:') >= 0 ]) diff = os.path.join(tracedir, 'dmesg.diff') @@ -528,14 +530,18 @@ for subsel in selection.foreach(): try: tracestat = self.get_trace(subsel) - except KeyError: + except KeyError as e: continue if tracestat.has_attr(attr_name): trace_attr = tracestat.get_attr(attr_name) attr = attr.union(trace_attr) - attr = list(attr) + if not attr: + attr = [] + else: + attr = list(attr) + attr.sort() return tuple(attr) @@ -568,9 +574,9 @@ if not mdt in map_order: map_order.append(mdt) - if not tmpmap.has_key(mdt): + if mdt not in tmpmap: tmpmap[mdt] = {} - if not tmpmap[mdt].has_key(nruns): + if nruns not in tmpmap[mdt]: tmpmap[mdt][nruns] = [] tmpmap[mdt][nruns].append(subsel.workload) @@ -578,10 +584,10 @@ wmap = {} worder = [] for mdt in map_order: - if not tmpmap.has_key(mdt): + if mdt not in tmpmap: continue - runs = tmpmap[mdt].keys() + runs = list(tmpmap[mdt].keys()) runs.sort() for r in runs: @@ -633,7 +639,7 @@ order = ['workload', 'client', 'server', 'mountopt', 'detect', 'tag', 'kernel', 'path'] for subsel in selection.foreach(order): - assert not vals.has_key(subsel) + assert subsel not in vals vals[subsel] = {} try: diff -ur nfsometer-1.9.orig/nfsometerlib/config.py nfsometer-1.9.wip/nfsometerlib/config.py --- nfsometer-1.9.orig/nfsometerlib/config.py 2017-06-29 04:41:45.000000000 +0100 +++ nfsometer-1.9.wip/nfsometerlib/config.py 2019-05-13 16:26:40.270894947 +0100 @@ -12,7 +12,7 @@ """ import re -import os, posix, stat, sys +import os, stat, sys import socket NFSOMETER_VERSION='1.9' @@ -20,7 +20,7 @@ NFSOMETER_MANPAGE='nfsometer.1' NFSOMETERLIB_DIR=os.path.split(__file__)[0] -NFSOMETER_DIR=os.path.join(posix.environ['HOME'], '.nfsometer') +NFSOMETER_DIR=os.path.join(os.environ['HOME'], '.nfsometer') # @@ -47,7 +47,7 @@ # MOUNTDIR=os.path.join(RUNNING_TRACE_DIR, 'mnt') WORKLOADFILES_ROOT=os.path.join(NFSOMETER_DIR, 'workload_files') -RESULTS_DIR=os.path.join(posix.environ['HOME'], 'nfsometer_results') +RESULTS_DIR=os.path.join(os.environ['HOME'], 'nfsometer_results') HOSTNAME=socket.getfqdn() RUNROOT='%s/nfsometer_runroot_%s' % (MOUNTDIR, HOSTNAME) HTML_DIR="%s/html" % NFSOMETERLIB_DIR @@ -139,7 +139,7 @@ _TEMPLATE_CACHE={} def html_template(filename): global _TEMPLATE_CACHE - if not _TEMPLATE_CACHE.has_key(filename): + if filename not in _TEMPLATE_CACHE: _TEMPLATE_CACHE[filename] = Template(filename=filename) return _TEMPLATE_CACHE[filename] @@ -244,7 +244,7 @@ gmap = {} for g in groups: vers = mountopts_version(g.mountopt) - if not gmap.has_key(vers): + if vers not in gmap: gmap[vers] = [] gmap[vers].append(g) return gmap diff -ur nfsometer-1.9.orig/nfsometerlib/graph.py nfsometer-1.9.wip/nfsometerlib/graph.py --- nfsometer-1.9.orig/nfsometerlib/graph.py 2017-06-29 04:39:18.000000000 +0100 +++ nfsometer-1.9.wip/nfsometerlib/graph.py 2019-05-21 10:06:41.797380624 +0100 @@ -11,15 +11,15 @@ FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ -#!/usr/bin/env python +#!/usr/bin/env python3 import multiprocessing -import cPickle -import os, sys, time +from six.moves import cPickle +import os, sys, time, errno -from collection import * -from config import * -import selector +from .collection import * +from .config import * +from . import selector _GRAPH_COLLECTION = None @@ -52,8 +52,8 @@ try: os.mkdir(self.imagedir) - except OSError, e: - assert e.errno == os.errno.EEXIST + except OSError as e: + assert e.errno == errno.EEXIST self._entries = set(os.listdir(imagedir)) @@ -104,8 +104,8 @@ if classes: other_attrs.append('class="%s"' % ' '.join(classes)) - if attrs.has_key('groups'): - if not attrs.has_key('gmap'): + if 'groups' in attrs: + if 'gmap' not in attrs: attrs['gmap'] = groups_by_nfsvers(attrs['groups']) gmap = attrs['gmap'] @@ -116,7 +116,7 @@ cur = 0 sub_src = [] for vers in NFS_VERSIONS: - if not gmap.has_key(vers): + if vers not in gmap: continue assert cur < num @@ -288,26 +288,26 @@ inform('\rGraph Summary: ') if self.gen_count: - print ' %u images generated' % self.gen_count + print(' %u images generated' % self.gen_count) if self.cached_count: - print ' %u cached images' % self.cached_count + print(' %u cached images' % self.cached_count) if self.prune_count: - print ' %u files pruned' % self.prune_count + print(' %u files pruned' % self.prune_count) def _fmt_data(x, scale): assert not isinstance(x, (list, tuple)) - if isinstance(x, Stat): + if isinstance(x, Stat) or type(x).__name__ == 'Stat': return x.mean() / scale, x.std() / scale # disallow? - elif isinstance(x, (float, int, long)): + elif isinstance(x, (float,)+six.integer_types): return x, 0.0 elif x == None: # when graphing, no data can just be zero return 0.0, 0.0 - raise ValueError('Unexpected data type for %r' % (val,)) + raise ValueError('Unexpected data type for %r' % (x,)) def _graphize_units(units): if not units: @@ -321,7 +321,7 @@ graph_f(imgfile, attrs) except KeyboardInterrupt: return False - except Exception, e: + except Exception as e: return e return True @@ -365,7 +365,7 @@ ax1 = fig.add_subplot(111) ax1.set_autoscale_on(True) ax1.autoscale_view(True,True,True) - for i in ax1.spines.itervalues(): + for i in six.itervalues(ax1.spines): i.set_linewidth(0.0) # width of bars within a group @@ -409,14 +409,14 @@ val = vals[g].get(key, None) hidx = 0 # default hatch - if isinstance(val, Bucket): + if isinstance(val, Bucket) or type(val).__name__ == 'Bucket': for s in val.foreach(): x_v, x_s = _fmt_data(s, scale) hidx = hatch_map[s.name] - assert not valmap[key].has_key(hidx), \ + assert hidx not in valmap[key], \ '%u, %r' % (hidx, val) - assert not errmap[key].has_key(hidx), \ + assert hidx not in errmap[key], \ '%u, %r' % (hidx, val) valmap[key][hidx] = x_v errmap[key][hidx] = x_s @@ -496,7 +496,7 @@ ax1 = fig.add_subplot(111) ax1.set_autoscale_on(True) ax1.autoscale_view(True,True,True) - for i in ax1.spines.itervalues(): + for i in six.itervalues(ax1.spines): i.set_linewidth(0.0) ax1.get_xaxis().set_visible(False) diff -ur nfsometer-1.9.orig/nfsometerlib/options.py nfsometer-1.9.wip/nfsometerlib/options.py --- nfsometer-1.9.orig/nfsometerlib/options.py 2017-06-29 04:39:18.000000000 +0100 +++ nfsometer-1.9.wip/nfsometerlib/options.py 2019-05-17 12:58:31.852257799 +0100 @@ -14,8 +14,9 @@ import os, posix, sys import getopt import re +import six -from config import * +from .config import * _progname = sys.argv[0] @@ -356,7 +357,7 @@ try: opts, args = getopt.getopt(sys.argv[1:], shortstr, longlist) - except getopt.GetoptError, err: + except getopt.GetoptError as err: self.usage(str(err)) # parse options @@ -464,7 +465,7 @@ for x in mountopts: try: vers = mountopts_version(x) - except ValueError, e: + except ValueError as e: self.usage(str(e)) self.mountopts.append(x) @@ -490,7 +491,7 @@ err = False for name in ('NFSOMETER_CMD', 'NFSOMETER_NAME', 'NFSOMETER_DESC',): if not name in posix.environ: - print >>sys.stderr, "%s not set" % name + six.print_("%s not set" % name, file=sys.stderr) err = True if err: @@ -564,10 +565,9 @@ return lines def error(self, msg=''): - print >>sys.stderr, msg - print >>sys.stderr, \ - '\nrun "%s --help" and "%s examples" for more info' % \ - (_progname, _progname) + six.print_(msg, file=sys.stderr) + six.print_('\nrun "%s --help" and "%s examples" for more info' % \ + (_progname, _progname), file=sys.stderr) sys.stderr.flush() sys.exit(1) @@ -593,19 +593,19 @@ return self._synopsis_fmt % script def examples(self): - print >>sys.stdout, self._examples() + print(self._examples()) def usage(self, msg=''): - print >>sys.stderr, "usage: %s" % self._synopsis(_progname) - print >>sys.stderr, self._modes_description(_progname) + six.print_("usage: %s" % self._synopsis(_progname), file=sys.stderr) + six.print_(self._modes_description(_progname), file=sys.stderr) - print >>sys.stderr - print >>sys.stderr, "Options:" - print >>sys.stderr, ' %s' % '\n '.join(self._option_help()) + six.print_("", file=sys.stderr) + six.print_("Options:", file=sys.stderr) + six.print_(' %s' % '\n '.join(self._option_help()), file=sys.stderr) if msg: - print >>sys.stderr - print >>sys.stderr, "Error: " + msg + six.print_('', file=sys.stderr) + six.print_("Error: " + msg, file=sys.stderr); sys.exit(1) @@ -635,5 +635,6 @@ for i in range(len(o)): o[i] = o[i].strip().replace('-', '\\-') - file(output_path, 'w+').write('\n'.join(o)) + with open(output_path, 'w+') as f: + f.write('\n'.join(o)) diff -ur nfsometer-1.9.orig/nfsometerlib/parse.py nfsometer-1.9.wip/nfsometerlib/parse.py --- nfsometer-1.9.orig/nfsometerlib/parse.py 2017-06-29 04:39:18.000000000 +0100 +++ nfsometer-1.9.wip/nfsometerlib/parse.py 2019-05-20 13:40:30.733352852 +0100 @@ -13,8 +13,9 @@ import os import re +import six -from config import * +from .config import * # # Regular Expressions section @@ -113,7 +114,7 @@ return r def add_key(self, bucket_name, key, display): - if self._key2bucket.has_key(key) or key in self._other_keys: + if key in self._key2bucket or key in self._other_keys: return if display: @@ -189,7 +190,7 @@ ), } nfsstat_op_map = {} -for b, ops in nfsstat_op_map_def.iteritems(): +for b, ops in six.iteritems(nfsstat_op_map_def): for o in ops: nfsstat_op_map[o] = b @@ -245,7 +246,7 @@ } mountstat_op_map = {} -for b, ops in mountstat_op_map_def.iteritems(): +for b, ops in six.iteritems(mountstat_op_map_def): for o in ops: mountstat_op_map[o] = b @@ -299,7 +300,7 @@ try: p(tracestat, tracedir, attrs) - except Exception, e: + except Exception as e: collection.warn(tracedir, str(e)) @@ -310,7 +311,7 @@ path = os.path.join(tracedir, filename) - lines = [ x.strip() for x in file(path) if x.strip() ] + lines = [ x.strip() for x in open(path) if x.strip() ] assert len(lines) == 3 def _parse_time(minutes, seconds): @@ -376,8 +377,7 @@ path = os.path.join(tracedir, filename) - f = file(path) - + f = open(path) for line in f: found = False @@ -388,7 +388,7 @@ m = RE['ms_read_norm'].match(line) if m: - val = long(m.group(1)) + val = int(m.group(1)) tracestat.add_stat(prefix + 'read_normal', val, 'B', 'Bytes read through the read() syscall', @@ -400,7 +400,7 @@ m = RE['ms_write_norm'].match(line) if m: - val = long(m.group(1)) + val = int(m.group(1)) tracestat.add_stat(prefix + 'write_normal', val, 'B', 'Bytes written through write() syscall', @@ -412,7 +412,7 @@ m = RE['ms_read_odir'].match(line) if m: - val = long(m.group(1)) + val = int(m.group(1)) tracestat.add_stat(prefix + 'read_odirect', val, 'B', 'Bytes read through read(O_DIRECT) syscall', @@ -424,7 +424,7 @@ m = RE['ms_write_odir'].match(line) if m: - val = long(m.group(1)) + val = int(m.group(1)) tracestat.add_stat(prefix + 'write_odirect', val, 'B', 'Bytes written through write(O_DIRECT) syscall', @@ -436,7 +436,7 @@ m = RE['ms_read_nfs'].match(line) if m: - val = long(m.group(1)) + val = int(m.group(1)) tracestat.add_stat(prefix + 'read_nfs', val, 'B', 'Bytes read via NFS RPCs', @@ -448,7 +448,7 @@ m = RE['ms_write_nfs'].match(line) if m: - val = long(m.group(1)) + val = int(m.group(1)) tracestat.add_stat(prefix + 'write_nfs', val, 'B', 'Bytes written via NFS RPCs', @@ -461,21 +461,21 @@ m = RE['ms_rpc_line'].match(line) if m: tracestat.add_stat(prefix + 'rpc_requests', - long(m.group(1)), 'RPCs', + int(m.group(1)), 'RPCs', 'Count of RPC requests', BETTER_LESS_IF_IO_BOUND, None, filename, tracedir) tracestat.add_stat(prefix + 'rpc_replies', - long(m.group(2)), 'RPCs', + int(m.group(2)), 'RPCs', 'Count of RPC replies', BETTER_LESS_IF_IO_BOUND, None, filename, tracedir) tracestat.add_stat(prefix + 'xid_not_found', - long(m.group(3)), 'RPCs', + int(m.group(3)), 'RPCs', 'Count of RPC replies that couldn\'t be matched ' + 'with a request', BETTER_ALWAYS_LESS, @@ -487,7 +487,7 @@ m = RE['ms_rpc_backlog'].match(line) if m: tracestat.add_stat(prefix + 'backlog_queue_avg', - long(m.group(1)), 'RPCs', + int(m.group(1)), 'RPCs', 'Average number of outgoing requests on the backlog ' + 'queue', BETTER_ALWAYS_LESS, @@ -500,9 +500,10 @@ op = None oplineno = 0 for line in f: - m = RE['ms_ops_header'].match(line.strip()) + ls = line.strip() + m = RE['ms_ops_header'].match(ls) if m: - assert op == None + #assert op is None,"failed op==None, m==%s" % m op = m.group(1) op_bucket = mountstat_op_map.get(op, BUCKET_OTHER) oplineno = 1 @@ -511,7 +512,7 @@ if oplineno == 1: m = RE['ms_ops_line1'].match(line) if m: - assert op != None + assert op is not None,"failed op != None" oplineno += 1 continue @@ -563,6 +564,8 @@ elif op: raise ParseError("Didn't match line: %s" % line) + f.close() + def parse_nfsiostat(tracestat, tracedir, attrs): prefix = 'nfsiostat:' stat_desc = 'output of nfsiostat(1)' @@ -570,7 +573,7 @@ path = os.path.join(tracedir, filename) - lines = file(path).readlines() + lines = open(path).readlines() # skip until we find our mount name=None @@ -656,7 +659,7 @@ path = os.path.join(tracedir, filename) - lines = file(path).readlines() + lines = open(path).readlines() m = RE['ns_rpc_title'].match(lines[0]) @@ -675,7 +678,7 @@ raise ParseError("Can't find RPC call count") tracestat.add_stat(prefix + 'rpc_calls', - long(m.group(1)), 'Calls', + int(m.group(1)), 'Calls', 'Count of RPC calls', BETTER_LESS_IF_IO_BOUND, None, @@ -709,12 +712,12 @@ m = RE['ns_count_data'].match(line) if m: for i, t in enumerate(titles): - assert not op_counts.has_key(t), "dup op count %s" % t - op_counts[t] = long(m.group(i+1)) + assert t not in op_counts, "dup op count %s" % t + op_counts[t] = int(m.group(i+1)) titles = None - for op, count in op_counts.iteritems(): + for op, count in six.iteritems(op_counts): if count: op_bucket = nfsstat_op_map.get(op, BUCKET_OTHER) tracestat.add_stat(prefix + op.upper() + ' Count', @@ -734,7 +737,8 @@ # NOTE: BETTER_* based on fact that filebench output is only ever time bound found = False - for line in file(path): + with open(path) as f: + for line in f: m = RE['filebench_stats'].match(line) if m: tracestat.add_stat(prefix + 'op_count', @@ -784,16 +788,15 @@ path = os.path.join(tracedir, filename) - f = file(path) - - found = False - for line in f: + with open(path) as f: + found = False + for line in f: m = RE['pms_events'].match(line) if m: found = True tracestat.add_stat(prefix + 'inode_revalidate', - long(m.group(1)), 'events', + int(m.group(1)), 'events', 'Count of inode_revalidate events', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -801,7 +804,7 @@ tracedir) tracestat.add_stat(prefix + 'dentry_revalidate', - long(m.group(2)), 'events', + int(m.group(2)), 'events', 'Count of dentry_revalidate events', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -809,7 +812,7 @@ tracedir) tracestat.add_stat(prefix + 'data_invalidate', - long(m.group(3)), 'events', + int(m.group(3)), 'events', 'Count of data_invalidate events', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -817,7 +820,7 @@ tracedir) tracestat.add_stat(prefix + 'attr_invalidate', - long(m.group(4)), 'events', + int(m.group(4)), 'events', 'Count of attr_invalidate events', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -825,7 +828,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_open', - long(m.group(5)), 'events', + int(m.group(5)), 'events', 'Count of file and directory opens', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -833,7 +836,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_lookup', - long(m.group(6)), 'events', + int(m.group(6)), 'events', 'Count of lookups', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -841,7 +844,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_access', - long(m.group(7)), 'events', + int(m.group(7)), 'events', 'Count of access calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -849,7 +852,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_updatepage', - long(m.group(8)), 'events', + int(m.group(8)), 'events', 'Count of updatepage calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -857,7 +860,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_readpage', - long(m.group(9)), 'events', + int(m.group(9)), 'events', 'Count of readpage calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -865,7 +868,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_readpages', - long(m.group(10)), 'events', + int(m.group(10)), 'events', 'Count of readpages calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -873,7 +876,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_writepage', - long(m.group(11)), 'events', + int(m.group(11)), 'events', 'Count of writepage calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -881,7 +884,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_writepages', - long(m.group(12)), 'events', + int(m.group(12)), 'events', 'Count of writepages calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -889,7 +892,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_getdents', - long(m.group(13)), 'events', + int(m.group(13)), 'events', 'Count of getdents calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -897,7 +900,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_setattr', - long(m.group(14)), 'events', + int(m.group(14)), 'events', 'Count of setattr calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -905,7 +908,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_flush', - long(m.group(15)), 'events', + int(m.group(15)), 'events', 'Count of flush calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -913,7 +916,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_fsync', - long(m.group(16)), 'events', + int(m.group(16)), 'events', 'Count of fsync calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -921,7 +924,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_lock', - long(m.group(17)), 'events', + int(m.group(17)), 'events', 'Count of lock calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -929,7 +932,7 @@ tracedir) tracestat.add_stat(prefix + 'vfs_release', - long(m.group(18)), 'events', + int(m.group(18)), 'events', 'Count of release calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -937,7 +940,7 @@ tracedir) tracestat.add_stat(prefix + 'congestion_wait', - long(m.group(19)), 'events', + int(m.group(19)), 'events', 'Count of congestion_wait', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -945,7 +948,7 @@ tracedir) tracestat.add_stat(prefix + 'setattr_trunc', - long(m.group(20)), 'events', + int(m.group(20)), 'events', 'Count of setattr_trunc', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -953,7 +956,7 @@ tracedir) tracestat.add_stat(prefix + 'extend_write', - long(m.group(21)), 'events', + int(m.group(21)), 'events', 'Count of extend_write', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -961,7 +964,7 @@ tracedir) tracestat.add_stat(prefix + 'silly_rename', - long(m.group(22)), 'events', + int(m.group(22)), 'events', 'Count of silly_rename', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -969,7 +972,7 @@ tracedir) tracestat.add_stat(prefix + 'short_read', - long(m.group(23)), 'events', + int(m.group(23)), 'events', 'Count of short_read', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -977,7 +980,7 @@ tracedir) tracestat.add_stat(prefix + 'short_write', - long(m.group(24)), 'events', + int(m.group(24)), 'events', 'Count of short_write', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -985,7 +988,7 @@ tracedir) tracestat.add_stat(prefix + 'delay', - long(m.group(25)), 'events', + int(m.group(25)), 'events', 'Count of delays (v3: JUKEBOX, v4: ERR_DELAY, grace period, key expired)', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -993,7 +996,7 @@ tracedir) tracestat.add_stat(prefix + 'pnfs_read', - long(m.group(26)), 'events', + int(m.group(26)), 'events', 'Count of pnfs_read calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -1001,7 +1004,7 @@ tracedir) tracestat.add_stat(prefix + 'pnfs_write', - long(m.group(27)), 'events', + int(m.group(27)), 'events', 'Count of pnfs_write calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, @@ -1016,7 +1019,7 @@ if len(values) > 10: # older mountstats don't have so many values tracestat.add_stat(prefix + 'xprt_max_slots', - long(values[10]), 'slots', + int(values[10]), 'slots', 'Max slots used by rpc transport', BETTER_ALWAYS_LESS, None, @@ -1031,7 +1034,7 @@ if len(values) > 8: # older mountstats don't have so many values tracestat.add_stat(prefix + 'xprt_max_slots', - long(values[8]), 'slots', + int(values[8]), 'slots', 'Max slots used by rpc transport', BETTER_ALWAYS_LESS, None, @@ -1049,7 +1052,6 @@ filename = 'test.log' path = os.path.join(tracedir, filename) - f = file(path) rpt_name = None rpt_col_hdr = [] @@ -1057,7 +1059,8 @@ # maps name -> (%u_%u) -> value newkeys = [] - for line in f: + with open(path) as f: + for line in f: line = line.strip() if rpt_name: if not line: @@ -1097,7 +1100,7 @@ y = int(skey[-1]) tracestat.add_stat(prefix + key + ' iozone', - long(value), 'KB/s', + int(value), 'KB/s', '%s: size kb: %u, reclen: %u' % (report, x, y), BETTER_ALWAYS_MORE, (iozone_bucket_def, report + ' iozone'), diff -ur nfsometer-1.9.orig/nfsometerlib/report.py nfsometer-1.9.wip/nfsometerlib/report.py --- nfsometer-1.9.orig/nfsometerlib/report.py 2017-06-29 04:39:18.000000000 +0100 +++ nfsometer-1.9.wip/nfsometerlib/report.py 2019-05-20 13:40:47.224411050 +0100 @@ -15,11 +15,11 @@ from math import sqrt, pow import time -import graph -from collection import * -from selector import Selector, SELECTOR_ORDER -from config import * -from workloads import * +from . import graph +from .collection import * +from .selector import Selector, SELECTOR_ORDER +from .config import * +from .workloads import * ENABLE_PIE_GRAPHS=False @@ -238,6 +238,8 @@ cell = val elif isinstance(val, (Stat, Bucket)): cell = html_fmt_value(val.mean(), val.std(), units=self.units) + elif type(val).__name__ == "Bucket" or type(val).__name__ == 'Stat': + cell = html_fmt_value(val.mean(), val.std(), units=self.units) else: assert val == None, "Not a string, Stat or Bucket: %r\ng = %s, k = %s" % (val, g, k) @@ -279,7 +281,7 @@ cur = 0 for vers in NFS_VERSIONS: - if not gmap.has_key(vers): + if vers not in gmap: continue assert cur < num @@ -392,7 +394,7 @@ value_map[g] = {} v = vals.get(g, {}).get(key, None) if v != None: - if isinstance(v, Bucket): + if isinstance(v, Bucket) or type(v).__name__ == 'Bucket': self.all_buckets = True for stat in v.foreach(): value_map[g][stat.name] = stat.mean() @@ -405,8 +407,9 @@ self.bucket_pie = '' # does ordering matter here? - bk_order = [ (k,v) for k, v in self.hatch_map.iteritems() ] - bk_order.sort(lambda x,y: cmp(x[1], y[1])) + bk_order = [ (k,v) for k, v in six.iteritems(self.hatch_map) ] + if bk_order: + bk_order.sort(key=lambda x: x[1]) table_values = {} bucket_names = [] @@ -483,7 +486,7 @@ self.make_comparison_vals(vals, key, groups, select_order) self.gmap = groups_by_nfsvers(groups) - self.nfs_versions = [ v for v in NFS_VERSIONS if self.gmap.has_key(v) ] + self.nfs_versions = [ v for v in NFS_VERSIONS if v in self.gmap ] # ensure the order of groups is in nfs_version order groups = [] @@ -549,7 +552,7 @@ ' style="display: none;">%s' % (hits,) - for compare, compvals in self.comparison_vals_map.iteritems(): + for compare, compvals in six.iteritems(self.comparison_vals_map): if compvals: c += '