Blob Blame History Raw
diff --git a/shell/src/dbck.py b/shell/src/dbck.py
index d01f24e0..796179a2 100644
--- a/shell/src/dbck.py
+++ b/shell/src/dbck.py
@@ -1170,11 +1170,12 @@ def fix_spacetokens_size(updatedb=False):
 
 
 
-def fill_missing_checksum(catalog, csumvalue, host, fs, checksumtype, updatedb=False):
+def fill_missing_checksum(executor, host, fs, checksumtype, timeout=900, updatedb=False):
     """Calculate and store missing checksums."""
-    _log.debug("fill_missing_checksum(%s, %s, %s, updatedb=%s)", host, fs, checksumtype, updatedb)
+    _log.debug("fill_missing_checksum(%s, %s, %s, %s, updatedb=%s)", host, fs, checksumtype, timeout, updatedb)
 
     updated = 0
+    sumsize = 0
     dry_run = '' if updatedb else 'dry-run: '
 
     csmap = {
@@ -1183,14 +1184,13 @@ def fill_missing_checksum(catalog, csumvalue, host, fs, checksumtype, updatedb=F
         'md5': 'MD',
     }
 
-    # fileid => fullpath
-    pathname = CachedFullPathTS(maxsize=100000)
-
-    cnt_rows = 0
-    conn = DBConn.new('cns_db')
-    cursor = conn.cursor(pymysql_cursors.SSDictCursor)
+    conn = None
+    cursor = None
+    fileid2size = {}
     try:
-        sql = "SELECT m.fileid, m.parent_fileid, m.name, m.csumtype, m.csumvalue, m.xattr FROM Cns_file_metadata m INNER JOIN Cns_file_replica r USING(fileid) WHERE r.host = %s AND r.fs = %s AND m.filemode & 32768 = 32768 AND r.status = '-' AND m.status = '-' AND m.csumtype != %s"
+        conn = DBConn.new('cns_db')
+        cursor = conn.cursor(pymysql_cursors.SSDictCursor)
+        sql = "SELECT m.fileid, m.filesize, m.csumtype, m.csumvalue, m.xattr FROM Cns_file_metadata m INNER JOIN Cns_file_replica r USING(fileid) WHERE r.host = %s AND r.fs = %s AND m.filemode & 32768 = 32768 AND r.status = '-' AND m.status = '-' AND m.csumtype != %s"
         _log.debug("query missing checksums '%s' (%s, %s, %s)", sql, host, fs, csmap[checksumtype])
         cursor.execute(sql, (host, fs, csmap[checksumtype]))
 
@@ -1203,31 +1203,20 @@ def fill_missing_checksum(catalog, csumvalue, host, fs, checksumtype, updatedb=F
             if _log.getEffectiveLevel() < logging.DEBUG:
                 _log.debug("fetched %i rows", len(rows))
 
-            fileids = [row['parent_fileid'] for row in rows]
-            pathnames = pathname.get_path_multi(fileids)
-
             for row in rows:
-                cnt_rows += 1
                 if _log.getEffectiveLevel() < logging.DEBUG:
-                    _log.debug("row %i: %s", cnt_rows, str(row))
+                    _log.debug("row: %s", str(row))
 
                 fileid = row['fileid']
-                parent_fileid = row['parent_fileid']
-                name = row['name']
-
-                prefix = pathnames.get(parent_fileid)
-                if prefix is None:
-                    _log.error("skipping fileid %i with name '%s', unable to reconstruct path of parent fileid %i", fileid, name, parent_fileid)
-                    continue
-                filename = "{0}/{1}".format(prefix, name)
+                size = row['filesize']
 
-                xattr = row.get('metadata_xattr')
+                xattr = row.get('xattr')
                 xattr_dict = {}
                 if xattr not in [None, '', '{}']:
                     try:
                         xattr_dict = json.loads(xattr)
                     except Exception as e:
-                        _log.error("unable to parse metadata_xattr for %s (fileid %i): %s", filename, fileid, str(xattr))
+                        _log.error("unable to parse metadata xattr for fileid %i: %s", fileid, str(xattr))
 
                 if "checksum.{0}".format(checksumtype) not in xattr_dict:
                     if row['csumtype'] == csmap[checksumtype]:
@@ -1236,31 +1225,88 @@ def fill_missing_checksum(catalog, csumvalue, host, fs, checksumtype, updatedb=F
                 if "checksum.{0}".format(checksumtype) in xattr_dict:
                     continue
 
-                _log.debug("%scalculate %s checksum on %s:%s for %s (fileid %i)", dry_run, checksumtype, host, fs, filename, fileid)
-                updated += 1
+                fileid2size[fileid] = size
+
+    except Exception as e:
+        # query in progress that use SSDictCursor can be killed only by terminating DB connection
+        # (closing / deleting cursor lead to retreival of all selected entries from DB)
+        if conn != None:
+            conn.close()
+            conn = None
+        raise
+
+    finally:
+        # Close cursor and connections
+        if cursor != None:
+            cursor.close()
+        if conn != None:
+            conn.close()
+
+    _log.info("found %i files (size %i) on %s:%s without %s checksum", len(fileid2size), sum(fileid2size.values()), host, fs, checksumtype)
+
+    pathname = None
+    try:
+        # fileid => fullpath
+        pathname = CachedFullPathTS(maxsize=100000)
+
+        while len(fileid2size) > 0:
+            # process files in batches for more efficient full path DB reconstruction
+            fileid2size_batch = {}
+            for x in range(1000):
+                if len(fileid2size) == 0: break
+                fileid, size = fileid2size.popitem()
+                fileid2size_batch[fileid] = size
+
+            pathnames = pathname.get_path_multi(fileid2size_batch.keys())
+
+            for fileid, size in fileid2size_batch.items():
+                filename = pathnames.get(fileid)
+                if filename is None:
+                    _log.error("skipping fileid %i, unable to reconstruct parent directory path", fileid)
+                    continue
+
+                if _log.getEffectiveLevel() <= logging.DEBUG:
+                    _log.debug("%scalculate %s checksum on %s:%s for %s (fileid %i, size %i)", dry_run, checksumtype, host, fs, filename, fileid, size)
 
                 if dry_run:
+                    updated += 1
+                    sumsize += size
                     continue
 
                 try:
-                    #csumvalue = pydmlite.StringWrapper()
-                    catalog.getChecksum(filename, checksumtype, csumvalue, "", True, 0)
-                    _log.info("finished %s checksum calculation on %s:%s for %s (fileid %i): %s", checksumtype, host, fs, filename, fileid, str(csumvalue.s))
+                    csumvalue = ''
+                    sleep = 0.25
+                    start = time.monotonic()
+                    while start + timeout > time.monotonic():
+                        data, error = executor.chksum(checksumtype, filename)
+                        if error:
+                            raise Exception("chksum failed ({0})".format(error))
+                        if data.get('status') == 'found':
+                            csumvalue = data.get('checksum', 'unknown')
+                            break
+                        time.sleep(sleep)
+                        sleep = min(sleep*4, 5)
+                    if csumvalue != '':
+                        _log.debug("finished %s checksum calculation on %s:%s for %s (fileid %i, size %i): %s", checksumtype, host, fs, filename, fileid, size, csumvalue)
+                        updated += 1
+                        sumsize += size
+                    else:
+                        _log.warn("timeout %s checksum calculation on %s:%s for %s (fileid %i, size %i): %ss", checksumtype, host, fs, filename, fileid, size, time.monotonic() - start)
                 except Exception as e:
-                    _log.error("failed %s checksum calculation on %s:%s for %s (fileid %i): %s", checksumtype, host, fs, filename, fileid, str(e))
+                    _log.error("failed %s checksum calculation on %s:%s for %s (fileid %i, size %i): %s", checksumtype, host, fs, filename, fileid, size, str(e).replace("\r", "  ").replace("\n", "  "))
 
     except Exception as e:
-        # query in progress that use SSDictCursor can be killed only by terminating DB connection
-        # (closing / deleting cursor lead to retreival of all selected entries from DB)
-        conn.close()
-        raise
+        _log.error("unexpected exception during %s checksum calculation on %s:%s: %s", checksumtype, host, fs, str(e))
+        if _log.getEffectiveLevel() <= logging.DEBUG:
+            import traceback
+            _log.debug(traceback.format_exc())
 
-    # Close cursor and connections
-    cursor.close()
-    conn.close()
-    del(pathname)
+    finally:
+        if pathname != None:
+            pathname.close()
+            del(pathname)
 
-    _log.info("processed %i records on %s:%s (%s checksum recalculated for %i files)", cnt_rows, host, fs, checksumtype, updated)
+    _log.info("updated %s checksum for %i files (size %i) on %s:%s", checksumtype, updated, sumsize, host, fs)
 
     return updated
 
diff --git a/shell/src/dbutils.py b/shell/src/dbutils.py
index a4798773..b054f6eb 100644
--- a/shell/src/dbutils.py
+++ b/shell/src/dbutils.py
@@ -526,10 +526,16 @@ class CachedFullPathTS(CachedFullPath):
         CachedFullPath.__init__(self, conn, maxsize, table, fileid_only)
 
         if conn:
+            self._conn_new = False
             self._conn = conn
         else:
+            self._conn_new = True
             self._conn = DBConn.new('cns_db')
 
+    def close(self):
+        if self._conn_new and self._conn:
+            self._conn.close()
+
 
 
 class DPMDB(object):
diff --git a/shell/src/executor.py b/shell/src/executor.py
index aac9e73d..55f986b2 100644
--- a/shell/src/executor.py
+++ b/shell/src/executor.py
@@ -2,6 +2,7 @@ import os
 import re
 import json
 import ssl
+import socket
 import subprocess # used by execute_old
 import logging
 
@@ -51,9 +52,10 @@ class DomeTalker(object):
         self.uri = uri
         self.verb = verb
         self.cmd = cmd
+        self._timeout = 60
 
     def execute_old(self, data):
-        _log.debug("talk with %s %s" % (self.cmd, data))
+        _log.debug("talk with %s %s", self.cmd, data)
 
         cmd = ["davix-http"]
         if self.creds.cert:
@@ -80,7 +82,7 @@ class DomeTalker(object):
         return (out, 200)
 
     def execute_new(self, params):
-        _log.debug("talk with %s %s" % (self.cmd, params))
+        _log.debug("talk with %s %s", self.cmd, params)
 
         url = urlparse(DomeTalker.build_url(self.uri, self.cmd))
 
@@ -116,6 +118,7 @@ class DomeTalker(object):
                 if conn.get('scheme') != scheme or conn.get('host') != host or conn.get('port') != port:
                     if conn.get('conn'):
                         conn['conn'].close()
+                        del(conn['conn'])
 
                     if url.scheme == 'https':
                         # configure SSL context
@@ -126,17 +129,17 @@ class DomeTalker(object):
                         if self.creds.capath:
                             context.load_verify_locations(capath=self.creds.capath)
 
-                        conn['conn'] = HTTPSConnection(host, port=port, context=context)
+                        conn['conn'] = HTTPSConnection(host, port=port, context=context, timeout=self._timeout)
 
                     else:
-                        conn['conn'] = HTTPConnection(host, port=port)
+                        conn['conn'] = HTTPConnection(host, port=port, timeout=self._timeout)
 
                     conn['scheme'] = scheme
                     conn['host'] = host
                     conn['port'] = port
 
                 # submit HTTPS request
-                _log.debug("New %s %s connection %s:%s%s", url.scheme.upper(), method, host, port, path)
+                _log.debug("new %s %s connection %s:%s%s", url.scheme.upper(), method, host, port, path)
                 conn['conn'].request(method=method, url=path, headers=hdrs, body=body)
                 response = conn['conn'].getresponse()
 
@@ -156,8 +159,12 @@ class DomeTalker(object):
             data = response.read().decode('utf-8')
             status = response.status
 
+        except socket.timeout as e:
+            _log.error("timeout to talk with %s %s", self.cmd, params)
+            data = str(e)
+
         except Exception as e:
-            _log.error("failed to talk with DOME: %s", str(e))
+            _log.error("failed to talk with DOME %s: %s", self.cmd, str(e))
             data = str(e)
 
         finally:
@@ -352,7 +359,7 @@ class DomeExecutor(object):
         return self._simple_response('POST', 'dome_putdone', {"server": server, "pfn": pfn, "size": size, "checksumtype": checksumtype, "checksumvalue": checksumvalue})
 
     def chksum(self, checksumtype, lfn, pfn=None, force=False):
-        return self._simple_response('GET', 'dome_chksum', {"checksum-type": checksumtype, "lfn": lfn, "pfn": pfn, "force-recalc": force})
+        return self._json_response('GET', 'dome_chksum', {"checksum-type": checksumtype, "lfn": lfn, "pfn": pfn, "force-recalc": force})
 
     def chksumstatus(self, checksumtype, lfn, pfn, status, checksum, update_lfn=False, reason=None):
         return self._simple_response('POST', 'dome_chksumstatus', {"checksum-type": checksumtype, "lfn": lfn, "pfn": pfn, "status": status, "checksum": checksum, "update-lfn-checksum": update_lfn, "reason": reason})
diff --git a/shell/src/interpreter.py b/shell/src/interpreter.py
index 2cb488eb..f98dc9af 100644
--- a/shell/src/interpreter.py
+++ b/shell/src/interpreter.py
@@ -4314,7 +4314,7 @@ Optional parameters:
         activities = []
         for activity in given.pop(0).split(','):
             if activity == 'dpm-dbck':
-                #activities.append('lost-and-dark')
+                #activities.append('lost-and-dark') # slow and manual cleanup
                 activities.append('namespace-continuity')
                 activities.append('wrong-replica')
                 activities.append('stuck-replica')
@@ -4327,8 +4327,8 @@ Optional parameters:
                 activities.append('spacetoken')
                 activities.append('dir-size')
                 activities.append('spacetoken-size')
-                activities.append('pool-file')
-                activities.append('fill-checksum')
+                #activities.append('pool-file') # slow
+                #activities.append('fill-checksum') # slow
             elif activity == 'legacy-to-dome-migration':
                 activities.append('zero-dir')
                 activities.append('spacetoken')
@@ -4539,9 +4539,9 @@ Optional parameters:
 
                 elif activity == 'fill-checksum':
 
-                    def fill_missing_checksum_with_result(result, catalog, csumvalue, host, fs, checksumtype, updatedb):
+                    def fill_missing_checksum_with_result(result, executor, host, fs, checksumtype, timeout, updatedb):
                         try:
-                            updates = dbck.fill_missing_checksum(catalog, csumvalue, host, fs, checksumtype, updatedb)
+                            updates = dbck.fill_missing_checksum(executor, host, fs, checksumtype, timeout, updatedb)
                             result[(host, fs)] = updates
                         except Exception as e:
                             _log.error("checksum recalc failed for %s:%s: %s", host, fs, str(e))
@@ -4561,14 +4561,14 @@ Optional parameters:
                     random.shuffle(hostfs)
                     filesystems = len(hostfs)
 
-                    logint = 60
+                    logint = 600
                     lastlog = time.monotonic()
                     tactive = []
                     results = {}
                     while len(hostfs) > 0 or len(tactive) > 0:
                         while len(hostfs) > 0 and len(tactive) < nthreads:
                             host, fs = hostfs.pop()
-                            t = threading.Thread(target=fill_missing_checksum_with_result, args=(results, self.interpreter.catalog, pydmlite.StringWrapper(), host, fs, csumtype, updatedb))
+                            t = threading.Thread(target=fill_missing_checksum_with_result, args=(results, self.interpreter.executor, host, fs, csumtype, 900, updatedb))
                             t.start()
                             _log.info("started checkum recalc thread for %s:%s", host, fs)
                             tactive.append((t, host, fs))
@@ -4585,8 +4585,8 @@ Optional parameters:
 
                         if len(tactive) == nthreads or (len(tactive) > 0 and len(hostfs) == 0):
                             currlog = time.monotonic()
-                            if int(lastlog) / logint != int(currlog) / logint:
-                                _log.debug("checksum recal %i done, %i running, %i waiting", filesystems-len(tactive)-len(hostfs), len(tactive), len(hostfs))
+                            if int(lastlog) // logint != int(currlog) // logint:
+                                _log.info("checksum recalc filesystems %i done, %i running, %i waiting", filesystems-len(tactive)-len(hostfs), len(tactive), len(hostfs))
                                 lastlog = currlog
                             time.sleep(1.0)
 
@@ -4800,7 +4800,7 @@ Additional documentation:
         # parse optional parameters
         parameters = {
             'namespace': 'namespace.csv',
-            'topology': 'topology.csv',
+            'config': 'config.csv',
             'skip-acl': '0',
         }
         for param in given:
@@ -4809,19 +4809,23 @@ Additional documentation:
             k, v = param.split('=', 1)
             parameters[k] = v
 
+        if sys.version_info[:2] < (3, 6):
+            return self.error("Migration tools require python 3.6, please use `python3 /usr/bin/dmlite-shell ...`")
+
         _log.debug("start DPM data dump with parameters %s", parameters)
 
         conn_cns = DBConn.get('cns_db')
         conn_dpm = DBConn.get('dpm_db')
         namespace = parameters['namespace']
-        topology = parameters['topology']
+        config = parameters['config']
         skip_acl = parameters['skip-acl'].lower() in ['true', '1']
 
         try:
             from . import migrate
             m = migrate.dpm(conn_cns, conn_dpm)
-            m.export_csv(namespace, topology, skip_acl)
+            m.export_csv(namespace, config, skip_acl)
         except Exception as e:
-            return self.error("Failed to dump migration date: %s" % str(e))
+            _log.debug(traceback.format_exc())
+            return self.error("Failed to dump migration data: %s" % str(e))
 
         return self.ok("OK")
diff --git a/shell/src/lost.py b/shell/src/lost.py
index f0844e16..221c5b32 100644
--- a/shell/src/lost.py
+++ b/shell/src/lost.py
@@ -365,10 +365,24 @@ class FileModule(BaseModule):
         self._write('DARK', self._diskserver, filename, data)
 
     def lost(self, filename, data):
-        if data['parent_dir_exists']:
-            self._write('LOST', self._diskserver, filename, data)
-        else:
-            self._write('LOSTNODIR', self._diskserver, filename, data)
+        status = 'LOST_UNKNOWN'
+        rstatus = data.get('db', {}).get('replica_status', '-')
+        if rstatus == '-':
+            if data['parent_dir_exists']:
+                status = 'LOST_FILE'
+            else:
+                status = 'LOST_NODIR'
+        elif rstatus == 'D':
+            status = 'LOST_DELETED'
+        elif rstatus == 'P':
+            currtime = time.time()
+            ctime = data.get('db', {}).get('replica_ctime', 0)
+            ptime = data.get('db', {}).get('replica_ptime', 0)
+            if (ptime == 0 and ctime+86400 < currtime) or (ptime != 0 and ptime+86400 < currtime):
+                status = 'LOST_UPLOADING'
+            else:
+                status = 'LOST_RECENT'
+        self._write(status, self._diskserver, filename, data)
 
     def _write(self, name, diskserver, filename, data):
         db = data.get('db', {})
@@ -414,14 +428,33 @@ class CleanupModule(FileModule):
         self._fh.write("ssh %s %s\n" % (self._diskserver, remote_command))
 
     def lost(self, filename, data):
-        if data['parent_dir_exists']:
-            self._write('# LOST', self._diskserver, filename.replace('\n', '\\n'), data)
-        else:
-            self._write('# LOSTNODIR', self._diskserver, filename.replace('\n', '\\n'), data)
+        cleanup = False
+        status = 'LOST_UNKNOWN'
+        rstatus = data.get('db', {}).get('replica_status', '-')
+        if rstatus == '-':
+            if data['parent_dir_exists']:
+                status = 'LOST_FILE'
+            else:
+                status = 'LOST_NODIR'
+            cleanup = True
+        elif rstatus == 'D':
+            status = 'LOST_DELETED'
+            cleanup = True
+        elif rstatus == 'P':
+            currtime = time.time()
+            ctime = data.get('db', {}).get('replica_ctime', 0)
+            ptime = data.get('db', {}).get('replica_ptime', 0)
+            if (ptime == 0 and ctime+86400 < currtime) or (ptime != 0 and ptime+86400 < currtime):
+                status = 'LOST_UPLOADING'
+                cleanup = True
+            else:
+                status = 'LOST_RECENT'
+
+        self._write("# {0}".format(status), self._diskserver, filename.replace('\n', '\\n'), data)
 
         db = data.get('db', {})
         rrowid = db.get('replica_rowid', '')
-        self._fh.write("mysql --host=%s --port=%i --user=%s --password='%s' --execute='DELETE FROM Cns_file_replica WHERE rowid = %i' %s\n" % (self._cns_db_config['host'], self._cns_db_config['port'], self._cns_db_config['user'], self._cns_db_config['pass'], rrowid, self._cns_db_config['db']))
+        self._fh.write("%smysql --host=%s --port=%i --user=%s --password='%s' --execute='DELETE FROM Cns_file_replica WHERE rowid = %i' %s\n" % ("" if cleanup else "#", self._cns_db_config['host'], self._cns_db_config['port'], self._cns_db_config['user'], self._cns_db_config['pass'], rrowid, self._cns_db_config['db']))
 
 
 
@@ -578,7 +611,7 @@ def diskserverfs_check(diskserver, diskserverfs, modules = []):
         #cursor.execute("select sfn, rowid, fileid, status from Cns_file_replica where host = %s and fs = %s", (diskserver, diskserverfs))
         # MySQL doesn't support FULL OUTER JOIN, use UNION and two separate LEFT+RIGHT exclusive joins
         cols = [
-            'replica.fileid', 'replica.rowid', 'replica.status', 'replica.sfn',
+            'replica.fileid', 'replica.rowid', 'replica.status', 'replica.ctime', 'replica.ptime', 'replica.sfn',
             'metadata.rowid', 'metadata.filesize', 'metadata.csumtype', 'metadata.csumvalue',
         ]
         colstr = ', '.join(["{0} AS `{1}`".format(x, x.replace('.', '_', 1)) for x in cols])
diff --git a/shell/src/migrate.py b/shell/src/migrate.py
index 6da6b822..f889c34c 100644
--- a/shell/src/migrate.py
+++ b/shell/src/migrate.py
@@ -28,7 +28,7 @@ except ImportError: import json
 
 __version__ = '0.0.1'
 
-_log = logging.getLogger('migration')
+_log = logging.getLogger('dmlite-shell')
 
 
 class dpm(object):
@@ -2699,8 +2699,8 @@ def main(argv):
     parser.add_option('--remove', dest='remove', default=False, action="store_true", help="Remove original data files, default: %default")
     parser.add_option('--remove-file', dest='remove_file', help="Filename with base, source and destination path in CSV format, default: %default")
     parser.add_option('--remove-reverse', dest='remove_reverse', default=False, action="store_true", help="Switch source and destination, default: %default")
-    parser.add_option('--fix-spacetokens', dest='fix_spacetokens', default=False, action="store_true", help="Use WriteToken to fix spacetokens, default: %default")
-    parser.add_option('--fix-spacetokens-dry-run', dest='fix_spacetokens_dry_run', default=False, action="store_true", help="Use WriteToken to fix spacetokens (dry-run only, no updates), default: %default")
+    parser.add_option('--dcache-fix-spacetokens', dest='dcache_fix_spacetokens', default=False, action="store_true", help="Use WriteToken to fix dCache spacetokens, default: %default")
+    parser.add_option('--dcache-fix-spacetokens-dry-run', dest='dcache_fix_spacetokens_dry_run', default=False, action="store_true", help="Use WriteToken to fix dCache spacetokens (dry-run only, no updates), default: %default")
 
     (options, args) = parser.parse_args(argv[1:])
 
@@ -2775,7 +2775,7 @@ def main(argv):
             m.write_config("{0}.default".format(options.config))
             m.generate_config()
 
-        if options.dcache_import or options.fix_spacetokens:
+        if options.dcache_import or options.dcache_fix_spacetokens or options.dcache_fix_spacetokens_dry_run:
             try:
                 import psycopg2
             except ImportError:
@@ -2806,8 +2806,8 @@ def main(argv):
                 m.write_config("{0}.dcache-after-import".format(options.config))
                 #m.generate_config()
 
-            if options.fix_spacetokens or options.fix_spacetokens_dry_run:
-                m.use_writetoken_for_spacetokens(not options.fix_spacetokens_dry_run)
+            if options.dcache_fix_spacetokens or options.dcache_fix_spacetokens_dry_run:
+                m.use_writetoken_for_spacetokens(not options.dcache_fix_spacetokens_dry_run)
 
             dcache_conn_chimera.close()
             dcache_conn_spacemanager.close()
diff --git a/shell/src/utils.py b/shell/src/utils.py
index b67db134..940c639d 100644
--- a/shell/src/utils.py
+++ b/shell/src/utils.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import
 from __future__ import print_function
 from __future__ import division
 
+import os
 import time
 import ctypes
 import ctypes.util
@@ -28,7 +29,7 @@ class timespec(ctypes.Structure):
         return self.tv_sec + float(self.tv_nsec) / 1e9
 
 
-if not hasattr(time, 'monotonic'):
+def clock_gettime(clk_id):
     library = ctypes.util.find_library('rt')
     if not library:
         raise Exception("unable to find librt")
@@ -40,20 +41,30 @@ if not hasattr(time, 'monotonic'):
     except OSError as e:
         raise Exception("can't load %s: %s" % (library, str(e)))
 
-    def _monotonic():
-        CLOCK_MONOTONIC = 1
-        ts = timespec()
+    ts = timespec()
 
-        rc = _clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts))
-        if rc != 0:
-            errno = ctypes.get_errno()
-            raise OSError(errno, os.strerror(errno))
+    rc = _clock_gettime(clk_id, ctypes.pointer(ts))
+    if rc != 0:
+        errno = ctypes.get_errno()
+        raise OSError(errno, os.strerror(errno))
 
-        return float(ts)
+    return float(ts)
 
+
+if not hasattr(time, 'monotonic'):
+    def _monotonic():
+        CLOCK_MONOTONIC = 1
+        return clock_gettime(CLOCK_MONOTONIC)
     setattr(time, 'monotonic', _monotonic)
 
 
+if not hasattr(time, 'process_time'):
+    def _process_time():
+        CLOCK_PROCESS_CPUTIME_ID = 2
+        return clock_gettime(CLOCK_PROCESS_CPUTIME_ID)
+    setattr(time, 'process_time', _process_time)
+
+
 
 #########################################################################
 ######## other useful functions                                  ########
@@ -101,3 +112,6 @@ def prettyInputSize(prettysize):
 if __name__ == '__main__':
     for i in range(5):
         print("monotonic: %.06f" % time.monotonic())
+    for i in range(5):
+        for i in range(1000000): pass
+        print("process_time: %.06f" % time.process_time())
diff --git a/src/dome/DomeCoreXeq.cpp b/src/dome/DomeCoreXeq.cpp
index c03a1550..f9cc9e86 100644
--- a/src/dome/DomeCoreXeq.cpp
+++ b/src/dome/DomeCoreXeq.cpp
@@ -1701,7 +1701,12 @@ int DomeCore::calculateChecksum(DomeReq &req, std::string lfn, Replica replica,
         else {
           std::string mmm = SSTR("Found a previous finished checksum calculation. namekey: '" << namekey << "'.\r\nTotal checksum calculations in queue right now: " << status.checksumq->nTotal());
           Log(Logger::Lvl2, domelogmask, domelogname, mmm);
-          return req.SendSimpleResp(202, mmm);
+          boost::property_tree::ptree jresp;
+          jresp.put("status", "enqueued");
+          jresp.put("server", replica.server);
+          jresp.put("pfn", DomeUtils::pfn_from_rfio_syntax(replica.rfn));
+          jresp.put("queue-size", status.checksumq->nTotal());
+          return req.SendSimpleResp(202, jresp, "DomeCore::calculateChecksum");
         }
 
       }