diff --git a/dmlite-LCGDM-2940-fix_ignorereadonlyfs.patch b/dmlite-LCGDM-2940-fix_ignorereadonlyfs.patch deleted file mode 100644 index 4a52618..0000000 --- a/dmlite-LCGDM-2940-fix_ignorereadonlyfs.patch +++ /dev/null @@ -1,20 +0,0 @@ -commit c880347415ea76db336c9da0047efbc63b31027a -Author: Fabrizio Furano -Date: Tue Aug 11 10:10:27 2020 +0200 - - Honour the ignorereadonlyfs flag in a request (fix typo) - LCGDM-2940 - -diff --git a/src/dome/DomeCoreXeq.cpp b/src/dome/DomeCoreXeq.cpp -index 4b51413f..05b4383a 100644 ---- a/src/dome/DomeCoreXeq.cpp -+++ b/src/dome/DomeCoreXeq.cpp -@@ -5860,7 +5860,7 @@ int DomeCore::dome_unlink(DomeReq &req) { - - bool ignorebrokenfs = CFG->GetBool("head.unlink.ignorebrokenfs", false) || req.bodyfields.get("ignorebrokenfs", false); - -- bool ignorereadonlyfs = CFG->GetBool("head.unlink.ignorereadonlyfs", true) && req.bodyfields.get("ignorebrokenfs", true); -+ bool ignorereadonlyfs = CFG->GetBool("head.unlink.ignorereadonlyfs", true) && req.bodyfields.get("ignorereadonlyfs", true); - - bool ignorefiledeletionerr = req.bodyfields.get("ignorefiledeletionerr", false); - diff --git a/dmlite-LCGDM-2941-fix_negative_cache.patch b/dmlite-LCGDM-2941-fix_negative_cache.patch deleted file mode 100644 index 23bf51e..0000000 --- a/dmlite-LCGDM-2941-fix_negative_cache.patch +++ /dev/null @@ -1,52 +0,0 @@ -commit 03882e00c1b8718bf4e1600a3f6e5fe8df1ba3db -Author: Petr Vokac -Date: Fri Aug 14 13:53:28 2020 +0200 - - LCGDM-2941 Fix ENOENT response for negative cached entries - -diff --git a/src/dome/DomeMetadataCache.cpp b/src/dome/DomeMetadataCache.cpp -index cda369f7..52620f00 100644 ---- a/src/dome/DomeMetadataCache.cpp -+++ b/src/dome/DomeMetadataCache.cpp -@@ -148,7 +148,7 @@ int DomeFileInfo::waitStat(boost::unique_lock &l, int sectmout) { - // If it's a cache hit we just exit - if ((status_statinfo == Ok) || (status_statinfo == NotFound)) { - Log(Logger::Lvl4, domelogmask, fname, "Info found. Fileid: " << fileid << -- " status_statinfo: " << status_statinfo); -+ " status_statinfo: " << status_statinfo << (status_statinfo == NotFound ? " (missing)" : "")); - - return 1; - } -@@ -195,7 +195,7 @@ int DomeFileInfo::waitStat(boost::unique_lock &l, int sectmout) { - // If it's a cache hit we just exit - if ((status_statinfo == Ok) || (status_statinfo == NotFound)) { - Log(Logger::Lvl4, domelogmask, fname, "Info found. Fileid: " << fileid << -- " status_statinfo: " << status_statinfo); -+ " status_statinfo: " << status_statinfo << (status_statinfo == NotFound ? " (missing)" : "")); - - return 1; - } -diff --git a/src/dome/DomeMysql_cns.cpp b/src/dome/DomeMysql_cns.cpp -index 1ad747b9..a1dc88cc 100644 ---- a/src/dome/DomeMysql_cns.cpp -+++ b/src/dome/DomeMysql_cns.cpp -@@ -1026,6 +1026,9 @@ DmStatus DomeMySql::getStatbyFileid(dmlite::ExtendedStat& xstat, int64_t fileid) - return DmStatus(ENOENT, SSTR("fileid "<< fileid << " not found (timeout waiting for result)")); - } - if(done) { -+ if (dfi->status_statinfo == DomeFileInfo::NotFound) -+ return DmStatus(ENOENT, SSTR("fileid " << fileid << "' not found (wait+cached)")); -+ - xstat = dfi->statinfo; - } - -@@ -1396,6 +1399,9 @@ DmStatus DomeMySql::getStatbyParentFileid(dmlite::ExtendedStat& xstat, int64_t f - return DmStatus(ENOENT, SSTR("parent_fileid "<< fileid << " with name '" << name << "' not found (timeout waiting for result)")); - } - if(done) { -+ if (dfi->status_statinfo == DomeFileInfo::NotFound) -+ return DmStatus(ENOENT, SSTR("file " << fileid << ":'" << name << "' not found (wait+cached)")); -+ - xstat = dfi->statinfo; - } - diff --git a/dmlite-LCGDM-2943-avoid-db-congestion.patch b/dmlite-LCGDM-2943-avoid-db-congestion.patch deleted file mode 100644 index cf47dfa..0000000 --- a/dmlite-LCGDM-2943-avoid-db-congestion.patch +++ /dev/null @@ -1,354 +0,0 @@ -diff --git a/src/dome/DomeCore.cpp b/src/dome/DomeCore.cpp -index 93f44e14..0b3199c0 100644 ---- a/src/dome/DomeCore.cpp -+++ b/src/dome/DomeCore.cpp -@@ -652,16 +652,16 @@ void DomeCore::tick(int parm) { - - status.stats.db_reqrate = (float)DomeMySql::dbstats.dbqueries / tdiff; - status.stats.db_transrate = (float)DomeMySql::dbstats.dbtrans / tdiff; -+ status.stats.db_timerate = DomeMySql::dbstats.dbtrans > 0 ? (float)(DomeMySql::dbstats.dbtime / DomeMySql::dbstats.dbtrans / 1e6) / tdiff : 0.; - DomeMySql::dbstats.dbqueries = 0; - DomeMySql::dbstats.dbtrans = 0; -+ DomeMySql::dbstats.dbtime = 0; - } - - int ttot, trunning; - this->getTaskCounters(ttot, trunning); - -- Log(Logger::Lvl1, domelogmask, domelogname, "Request rate: " << status.stats.stats_reqrate << "Hz (Peak: " << status.stats.peak_reqrate << "Hz) -- DB queries: " << status.stats.db_reqrate << "Hz -- DB transactions: " << status.stats.db_transrate << "Hz -- Intercluster messages: " << status.stats.stats_intercluster << "Hz -- Known tasks: " << ttot << " -- Running tasks: " << trunning); -- -- -+ Log(Logger::Lvl1, domelogmask, domelogname, "Request rate: " << status.stats.stats_reqrate << "Hz (Peak: " << status.stats.peak_reqrate << "Hz) -- DB queries: " << status.stats.db_reqrate << "Hz -- DB transactions: " << status.stats.db_transrate << "Hz -- DB avg transaction time: " << status.stats.db_timerate << "ms -- Intercluster messages: " << status.stats.stats_intercluster << "Hz -- Known tasks: " << ttot << " -- Running tasks: " << trunning); - - } - -diff --git a/src/dome/DomeCoreXeq.cpp b/src/dome/DomeCoreXeq.cpp -index 05b4383a..ee030937 100644 ---- a/src/dome/DomeCoreXeq.cpp -+++ b/src/dome/DomeCoreXeq.cpp -@@ -1863,7 +1863,8 @@ int DomeCore::dome_info(DomeReq &req, int myidx, bool authorized) { - "Hz (Peak: " << status.stats.peak_reqrate << - "Hz) -- DB queries: " << status.stats.db_reqrate << - "Hz -- DB transactions: " << status.stats.db_transrate << -- "Hz -- Intercluster messages: " << status.stats.stats_intercluster << -+ "Hz -- DB avg transaction time: " << status.stats.db_timerate << -+ "ms -- Intercluster messages: " << status.stats.stats_intercluster << - "Hz\r\n"; - } - -diff --git a/src/dome/DomeMetadataCache.cpp b/src/dome/DomeMetadataCache.cpp -index cda369f7..1a904947 100644 ---- a/src/dome/DomeMetadataCache.cpp -+++ b/src/dome/DomeMetadataCache.cpp -@@ -218,7 +218,7 @@ int DomeFileInfo::waitLocations(boost::unique_lock &l, int sectmou - // By convention, if there is noinfo then it's our responsibility to fill it, hence it becomes pending - if (status_locations == NoInfo) { - Log(Logger::Lvl4, domelogmask, fname, "Shall fill locations info. Fileid: " << fileid << -- "parent_fileid: " << parentfileid << " locfilename: '" << locfilename << -+ " parent_fileid: " << parentfileid << " locfilename: '" << locfilename << - "' status_statinfo: " << status_statinfo << " status_locations: " << status_locations); - - status_locations = InProgress; -diff --git a/src/dome/DomeMysql.cpp b/src/dome/DomeMysql.cpp -index d9a0d9c7..375956f1 100644 ---- a/src/dome/DomeMysql.cpp -+++ b/src/dome/DomeMysql.cpp -@@ -94,17 +94,21 @@ int DomeMySql::begin() - - countTransaction(); - -- if (this->transactionLevel_ == 0 && mysql_query(this->conn_->sqlinst, "BEGIN") != 0) { -- unsigned int merrno = mysql_errno(this->conn_->sqlinst); -- std::string merror = mysql_error(this->conn_->sqlinst); -- MySqlHolder::getMySqlPool().release(conn_); -- conn_ = 0; -- Err(fname, "Cannot start transaction: " << DMLITE_DBERR(merrno) << " " << merror); -- return -1; -+ if (this->transactionLevel_ == 0) { -+ if (mysql_query(this->conn_->sqlinst, "BEGIN") != 0) { -+ unsigned int merrno = mysql_errno(this->conn_->sqlinst); -+ std::string merror = mysql_error(this->conn_->sqlinst); -+ MySqlHolder::getMySqlPool().release(conn_); -+ conn_ = 0; -+ Err(fname, "Cannot start transaction: " << DMLITE_DBERR(merrno) << " " << merror); -+ return -1; -+ } -+ -+ clock_gettime(CLOCK_MONOTONIC, &tstart_); - } - - this->transactionLevel_++; -- Log(Logger::Lvl3, domelogmask, fname, "Transaction started"); -+ Log(Logger::Lvl3, domelogmask, fname, "Transaction started (level " << this->transactionLevel_ << ")"); - - return 0; - } -@@ -149,9 +153,19 @@ int DomeMySql::commit() - Err(fname, "Cannot commit: " << DMLITE_DBERR(merrno) << " " << merror); - return -1; - } -+ -+ struct timespec tend_; -+ clock_gettime(CLOCK_MONOTONIC, &tend_); -+ int64_t duration = (int64_t) (tend_.tv_sec - tstart_.tv_sec) * 1e9 + (tend_.tv_nsec - tstart_.tv_nsec); -+ -+ timeTransaction(duration); -+ Log(Logger::Lvl3, domelogmask, domelogname, "Exiting after " << ((double) duration / 1e6) << "ms"); -+ -+ } else { -+ -+ Log(Logger::Lvl3, domelogmask, domelogname, "Exiting level " << this->transactionLevel_); - } - -- Log(Logger::Lvl3, domelogmask, domelogname, "Exiting."); - return 0; - } - -@@ -159,7 +173,17 @@ int DomeMySql::commit() - - int DomeMySql::rollback() - { -- Log(Logger::Lvl4, domelogmask, domelogname, ""); -+ if (this->transactionLevel_ > 0) { -+ struct timespec tend_; -+ clock_gettime(CLOCK_MONOTONIC, &tend_); -+ int64_t duration = (int64_t) (tend_.tv_sec - tstart_.tv_sec) * 1e9 + (tend_.tv_nsec - tstart_.tv_nsec); -+ -+ timeTransaction(duration); -+ Log(Logger::Lvl3, domelogmask, domelogname, "Rollback transaction after " << ((double) duration / 1e6) << "ms"); -+ -+ } else { -+ Log(Logger::Lvl4, domelogmask, domelogname, "Rollback transaction"); -+ } - - this->transactionLevel_ = 0; - -diff --git a/src/dome/DomeMysql.h b/src/dome/DomeMysql.h -index a22a3ed8..101653a4 100644 ---- a/src/dome/DomeMysql.h -+++ b/src/dome/DomeMysql.h -@@ -27,6 +27,7 @@ - #define DOMEMYSQL_H - - #include -+#include - #include - #include "status.h" - #include "inode.h" -@@ -47,11 +48,12 @@ class DomeMySqlDir; - class DomeDbStats: public boost::mutex { - public: - // Simple stats -- int64_t dbqueries, dbtrans; -+ int64_t dbqueries, dbtrans, dbtime; - - DomeDbStats() { - dbqueries = 0; - dbtrans = 0; -+ dbtime = 0; - }; - }; - -@@ -246,6 +248,7 @@ protected: - - /// Transaction level, so begins and commits can be nested. - unsigned transactionLevel_; -+ struct timespec tstart_; - - static char *dpmdb, *cnsdb; - -@@ -257,6 +260,10 @@ void countTransaction() { - boost::unique_lock l(dbstats); - dbstats.dbtrans++; - }; -+void timeTransaction(int64_t dbtime) { -+ boost::unique_lock l(dbstats); -+ dbstats.dbtime += dbtime; -+}; - - private: - -diff --git a/src/dome/DomeMysql_cns.cpp b/src/dome/DomeMysql_cns.cpp -index 1ad747b9..c1cbc0c9 100644 ---- a/src/dome/DomeMysql_cns.cpp -+++ b/src/dome/DomeMysql_cns.cpp -@@ -268,53 +268,54 @@ DmStatus DomeMySql::create(ExtendedStat& nf) - if (!nf.stat.st_atim.tv_sec) nf.stat.st_atim = ts; - if (!nf.stat.st_mtim.tv_sec) nf.stat.st_mtim = ts; - if (!nf.stat.st_ctim.tv_sec) nf.stat.st_ctim = ts; -- -- -+ - // Fetch the new file ID - ino_t newFileId = 0; - -- // Start transaction -- DomeMySqlTrans trans(this); -- - try { -+ // Scope to make sure that the local objects that involve mysql -+ // are destroyed before the transaction is closed - -+ // Start transaction -+ DomeMySqlTrans trans(this); -+ Statement uniqueId(*conn_, DomeMySql::cnsdb, "SELECT id FROM Cns_unique_id FOR UPDATE"); - -- { -- // Scope to make sure that the local objects that involve mysql -- // are destroyed before the transaction is closed -- -- -- Statement uniqueId(*conn_, DomeMySql::cnsdb, "SELECT id FROM Cns_unique_id FOR UPDATE"); -- -- uniqueId.execute(); -- uniqueId.bindResult(0, &newFileId); -- -- // Update the unique ID -- if (uniqueId.fetch()) { -- Statement updateUnique(*conn_, DomeMySql::cnsdb, "UPDATE Cns_unique_id SET id = ?"); -- ++newFileId; -- updateUnique.bindParam(0, newFileId); -- updateUnique.execute(); -- } -- // Couldn't get, so insert -- else { -- Statement insertUnique(*conn_, DomeMySql::cnsdb, "INSERT INTO Cns_unique_id (id) VALUES (?)"); -- newFileId = 1; -- insertUnique.bindParam(0, newFileId); -- insertUnique.execute(); -- } -- -+ uniqueId.execute(); -+ uniqueId.bindResult(0, &newFileId); - -- // Closing the scope here makes sure that no local mysql-involving objects -- // are still around when we close the transaction -+ // Update the unique ID -+ if (uniqueId.fetch()) { -+ Statement updateUnique(*conn_, DomeMySql::cnsdb, "UPDATE Cns_unique_id SET id = ?"); -+ ++newFileId; -+ updateUnique.bindParam(0, newFileId); -+ updateUnique.execute(); -+ } -+ // Couldn't get, so insert -+ else { -+ Statement insertUnique(*conn_, DomeMySql::cnsdb, "INSERT INTO Cns_unique_id (id) VALUES (?)"); -+ newFileId = 1; -+ insertUnique.bindParam(0, newFileId); -+ insertUnique.execute(); - } - -+ // Closing the scope here makes sure that no local mysql-involving objects -+ // are still around when we close the transaction -+ trans.Commit(); - -+ } -+ catch ( DmException &e ) { -+ return DmStatus(e.code(), SSTR("No unique fileid for file '" << nf.name << "' parent: " << nf.parent << ", err: " << e.what())); -+ } -+ -+ try { - // Regular files start with 1 link. Directories 0. - unsigned nlink = S_ISDIR(nf.stat.st_mode) ? 0 : 1; - std::string aclStr = nf.acl.serialize(); - char cstatus = static_cast(nf.status); - -+ // Start transaction -+ DomeMySqlTrans trans(this); -+ - // Create the entry - Statement fileStmt(*this->conn_, DomeMySql::cnsdb, "INSERT INTO Cns_file_metadata\ - (fileid, parent_fileid, name, filemode, nlink, owner_uid, gid,\ -@@ -367,7 +368,6 @@ DmStatus DomeMySql::create(ExtendedStat& nf) - // This also releases the connection back to the pool - trans.Commit(); - -- - } - catch ( DmException &e ) { - if (e.code() | DMLITE_DATABASE_ERROR) { -diff --git a/src/dome/DomeStatus.h b/src/dome/DomeStatus.h -index 7eb256bf..13497183 100644 ---- a/src/dome/DomeStatus.h -+++ b/src/dome/DomeStatus.h -@@ -249,7 +249,7 @@ public: - time_t lastcheck, lastpeakcheck; - - // simple stats, last calculated values, ready to print -- float db_reqrate, db_transrate; -+ float db_reqrate, db_transrate, db_timerate; - float stats_intercluster, peak_reqrate, stats_reqrate; - DomeStats() { - cntrequests = 0; -@@ -260,6 +260,7 @@ public: - stats_reqrate = 0; - db_reqrate = 0; - db_transrate = 0; -+ db_timerate = 0; - stats_intercluster = 0; - peak_reqrate = 0; - }; -diff --git a/src/utils/MySqlWrapper.cpp b/src/utils/MySqlWrapper.cpp -index 273fa521..dc429ee3 100644 ---- a/src/utils/MySqlWrapper.cpp -+++ b/src/utils/MySqlWrapper.cpp -@@ -37,7 +37,7 @@ if (index > this->nFields_)\ - - - Statement::Statement(MYSQL* conn, const std::string& db, const char* query) : -- nFields_(0), result_(NULL), result_null_(NULL), status_(STMT_CREATED) -+ nFields_(0), result_(NULL), result_null_(NULL), status_(STMT_CREATED), query_(NULL) - { - if (mysql_select_db(conn, db.c_str()) != 0) - throw DmException(DMLITE_DBERR(mysql_errno(conn)), -@@ -50,6 +50,8 @@ Statement::Statement(MYSQL* conn, const std::string& db, const char* query) : - this->nParams_ = mysql_stmt_param_count(this->stmt_); - this->params_ = new MYSQL_BIND [this->nParams_]; - std::memset(this->params_, 0, sizeof(MYSQL_BIND) * this->nParams_); -+ -+ this->query_ = strdup(query); - } - - -@@ -79,6 +81,10 @@ Statement::~Statement() throw () - delete [] this->result_null_; - } - -+ if (this->query_) { -+ free(this->query_); -+ } -+ - // Close statement - mysql_stmt_close(this->stmt_); - } -@@ -140,7 +146,17 @@ unsigned long Statement::execute(void) - - mysql_stmt_bind_param(this->stmt_, this->params_); - -- if (mysql_stmt_execute(this->stmt_) != 0) -+ struct timespec starttime; -+ struct timespec endtime; -+ -+ clock_gettime(CLOCK_MONOTONIC, &starttime); -+ int eres = mysql_stmt_execute(this->stmt_); -+ clock_gettime(CLOCK_MONOTONIC, &endtime); -+ -+ double duration = ((endtime.tv_sec - starttime.tv_sec) * 1e3) + (double) (endtime.tv_nsec - starttime.tv_nsec) / 1e6; -+ Log(Logger::Lvl4, Logger::unregistered, Logger::unregisteredname, "Executed: " << stmt_ << " result " << eres << " errno " << mysql_stmt_errno(this->stmt_) << " duration " << duration << "ms query " << this->query_); -+ -+ if (eres != 0) - this->throwException(); - - // Count fields and reserve -diff --git a/src/utils/MySqlWrapper.h b/src/utils/MySqlWrapper.h -index f3df2e60..30bf0987 100644 ---- a/src/utils/MySqlWrapper.h -+++ b/src/utils/MySqlWrapper.h -@@ -58,6 +58,7 @@ private: - MYSQL_BIND* result_; - my_bool* result_null_; - Step status_; -+ char* query_; - - /// Throws the proper exception - void throwException() ; diff --git a/dmlite-LCGDM-2946-davs_zero_size_files.patch b/dmlite-LCGDM-2946-davs_zero_size_files.patch deleted file mode 100644 index ec27982..0000000 --- a/dmlite-LCGDM-2946-davs_zero_size_files.patch +++ /dev/null @@ -1,109 +0,0 @@ -commit a75a0002229f7d838b2011d57eaabec3bdbd2475 -Author: Petr Vokac -Date: Tue Sep 8 08:54:59 2020 +0200 - - LCGDM-2946 Remove special case for creating zero size files with HTTP - Uploaded files with HTTP protocol and Content-Legth zero - were threated differently and instead of dome_put they - were created with dome_create. This lead to issues with - downloads, checksums and probably other operations, - because these files did not register replica in DPM - database and did not have correstponding stored file. - - This modification reverts 2012 lcgdm-dav update - * Empty PUT's are considered create calls - -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_ns/repository.c b/src/plugins/apache-httpd/src/mod_lcgdm_ns/repository.c -index 680e7ad7..f8f9131f 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_ns/repository.c -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_ns/repository.c -@@ -654,18 +654,6 @@ static dav_error *dav_ns_open_stream(const dav_resource *resource, - length = apr_table_get(info->request->headers_in, "content-length"); - if (length != NULL ) { - contentLength = atol(length); -- if (contentLength == 0) { -- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, info->request, -- "PUT with content-length 0. Creating ns entry."); -- -- if (dmlite_create(info->ctx, info->sfn, 0644) != 0) -- return dav_shared_new_error(info->request, info->ctx, 0, -- "Could not create empty file %s", info->sfn); -- -- *stream = (dav_stream*) calloc(1, sizeof(dav_stream)); -- -- return NULL ; -- } - } - - /* NS alone doesn't support PUTs with content! */ - -commit dcd5418618774ec8ca91acc37fb07c10a5e601ce -Author: Petr Vokac -Date: Tue Sep 8 08:53:11 2020 +0200 - - Revert "Checksum calculation for zero size files" - - This reverts commit 0925c98cc180bd2ef55530bf5dd4c79d69ba3328. - -diff --git a/src/dome/DomeCoreXeq.cpp b/src/dome/DomeCoreXeq.cpp -index 05b4383a..4319e5f6 100644 ---- a/src/dome/DomeCoreXeq.cpp -+++ b/src/dome/DomeCoreXeq.cpp -@@ -1917,41 +1917,6 @@ int DomeCore::dome_chksum(DomeReq &req) { - return req.DomeReq::SendSimpleResp(422, SSTR("unknown checksum type " << chksumtype)); - } - -- // retrieve lfn metadata -- ExtendedStat xstat; -- { -- DmStatus st = sql.getStatbyLFN(xstat, lfn); -- if (!st.ok()) -- return req.DomeReq::SendSimpleResp(404, SSTR("Cannot stat lfn: '" << lfn << "'")); -- } -- -- // checksum for zero size files that doesn't have replica -- if (xstat.status == ExtendedStat::kOnline && xstat.stat.st_size == 0) { -- std::vector replicas; -- DmStatus ret = sql.getReplicas(replicas, lfn); -- if(ret.ok() && replicas.size() == 0) { -- std::string lfnchecksum; -- if(!forcerecalc && xstat.hasField(fullchecksum)) { -- lfnchecksum = xstat.getString(fullchecksum); -- } -- if (lfnchecksum == "") { -- if (chksumtype == "md5") -- lfnchecksum = "d41d8cd98f00b204e9800998ecf8427e"; -- else if (chksumtype == "crc32") -- lfnchecksum = "0"; -- else if (chksumtype == "adler32") -- lfnchecksum = "00000001"; -- else -- return req.DomeReq::SendSimpleResp(422, SSTR("unknown checksum type " << chksumtype << " for zero size files")); -- sql.setChecksum(xstat.stat.st_ino, chksumtype, lfnchecksum); -- } -- boost::property_tree::ptree jresp; -- jresp.put("status", "found"); -- jresp.put("checksum", lfnchecksum); -- return req.DomeReq::SendSimpleResp(200, jresp); -- } -- } -- - if(forcerecalc) { - Replica replica = pickReplica(lfn, pfn, sql); - return calculateChecksum(req, lfn, replica, chksumtype, updateLfnChecksum, true); -@@ -1962,6 +1927,15 @@ int DomeCore::dome_chksum(DomeReq &req) { - std::string pfnchecksum; - Replica replica; - -+ // retrieve lfn checksum -+ ExtendedStat xstat; -+ { -+ DomeMySql sql; -+ DmStatus st = sql.getStatbyLFN(xstat, lfn); -+ if (!st.ok()) -+ return req.DomeReq::SendSimpleResp(404, SSTR("Cannot stat lfn: '" << lfn << "'")); -+ } -+ - if(xstat.hasField(fullchecksum)) { - lfnchecksum = xstat.getString(fullchecksum); - if (!forcerecalc) diff --git a/dmlite-LCGDM-2948-draining_last_transfer_wait.patch b/dmlite-LCGDM-2948-draining_last_transfer_wait.patch deleted file mode 100644 index a6ee15c..0000000 --- a/dmlite-LCGDM-2948-draining_last_transfer_wait.patch +++ /dev/null @@ -1,33 +0,0 @@ -diff --git a/shell/src/interpreter.py b/shell/src/interpreter.py -index 011b9a23..625fce88 100644 ---- a/shell/src/interpreter.py -+++ b/shell/src/interpreter.py -@@ -3218,11 +3218,13 @@ class DrainReplicas(object): - self.threadpool = [] - - def stopThreads(self): -- self.interpreter.ok('Drain process Stopped, Waiting max 10 seconds for each running thread to end...') -+ self.interpreter.ok('Drain process Stopped, Waiting max 60 seconds + 5 seconds for each running thread') - for t in self.threadpool: - t.stop() -+ join_start = time.monotonic() - for t in self.threadpool: -- t.join(10) -+ wait = 60 - (time.monotonic() - join_start) -+ t.join(wait if wait > 5 else 5) - self.printDrainErrors() - - def printDrainErrors(self): -@@ -3291,8 +3293,11 @@ class DrainReplicas(object): - - for t in self.threadpool: - t.stop() -+ self.interpreter.ok("Joining %i threads (wait max 3600s + 5s for each thread)\n" % len(self.threadpool)) -+ join_start = time.monotonic() - for t in self.threadpool: -- t.join(10) -+ wait = 3600 - (time.monotonic() - join_start) -+ t.join(wait if wait > 5 else 5) - if self.parameters['move']: - self.interpreter.ok("Move Process completed\n") - else: diff --git a/dmlite-LCGDM-2949-xrootd-signing-and-tls.patch b/dmlite-LCGDM-2949-xrootd-signing-and-tls.patch deleted file mode 100644 index 04112b9..0000000 --- a/dmlite-LCGDM-2949-xrootd-signing-and-tls.patch +++ /dev/null @@ -1,479 +0,0 @@ -diff --git a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg -index 18fca76c..0881b9fa 100644 ---- a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg -+++ b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg -@@ -23,6 +23,10 @@ sec.protocol /usr/$(xrdlibdir) gsi -dlgpxy:1 -exppxy:=creds -crl:3 -key:/etc/gri - sec.protocol /usr/$(xrdlibdir) unix - xrootd.export / - xrd.port 1095 -+#xrd.tls /etc/grid-security/dpmmgr/dpmcert.pem /etc/grid-security/dpmmgr/dpmkey.pem -+#xrd.tlsca certdir /etc/grid-security/certificates -+#xrootd.tls none -+xrd.timeout idle 60m - ofs.osslib +cksio libXrdDPMOss.so.3 - ofs.authlib libXrdDPMDiskAcc.so.3 - ofs.ckslib = libXrdDPMCks.so.3 -diff --git a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmfedredir_atlas.cfg b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmfedredir_atlas.cfg -index 55b64de2..c8de0445 100644 ---- a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmfedredir_atlas.cfg -+++ b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmfedredir_atlas.cfg -@@ -25,6 +25,10 @@ xrootd.seclib libXrdSec.so - sec.protocol /usr/$(xrdlibdir) gsi -crl:3 -key:/etc/grid-security/dpmmgr/dpmkey.pem -cert:/etc/grid-security/dpmmgr/dpmcert.pem -md:sha256:sha1 -ca:2 -gmapopt:10 -vomsat:0 - sec.protocol /usr/$(xrdlibdir) unix - -+#xrd.tls /etc/grid-security/dpmmgr/dpmcert.pem /etc/grid-security/dpmmgr/dpmkey.pem -+#xrd.tlsca certdir /etc/grid-security/certificates -+#xrootd.tls none -+ - ofs.cmslib libXrdDPMFinder.so.3 - ofs.osslib libXrdDPMOss.so.3 - -diff --git a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg -index 11ee7c9d..ded9bd7c 100644 ---- a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg -+++ b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg -@@ -25,6 +25,10 @@ xrootd.seclib libXrdSec.so - sec.protocol /usr/$(xrdlibdir) gsi -crl:3 -key:/etc/grid-security/dpmmgr/dpmkey.pem -cert:/etc/grid-security/dpmmgr/dpmcert.pem -md:sha256:sha1 -ca:2 -gmapopt:10 -vomsfun:/usr/$(xrdlibdir)/libXrdSecgsiVOMS.so - sec.protocol /usr/$(xrdlibdir) unix - xrootd.export / -+#xrd.tls /etc/grid-security/dpmmgr/dpmcert.pem /etc/grid-security/dpmmgr/dpmkey.pem -+#xrd.tlsca certdir /etc/grid-security/certificates -+#xrootd.tls none -+xrd.timeout idle 60m - ofs.cmslib libXrdDPMFinder.so.3 - ofs.osslib +cksio libXrdDPMOss.so.3 - ofs.authlib libXrdDPMRedirAcc.so.3 -diff --git a/src/puppet/dmlite/manifests/accounting.pp b/src/puppet/dmlite/manifests/accounting.pp -index 0fbf5330..c5404a6f 100644 ---- a/src/puppet/dmlite/manifests/accounting.pp -+++ b/src/puppet/dmlite/manifests/accounting.pp -@@ -7,7 +7,6 @@ - # site_name => 'praguelcg2', - # } - class dmlite::accounting ( -- Boolean $enabled = hiera('dmlite::accounting::enabled',true), - String $cron_interval = hiera('dmlite::accounting::cron_interval','daily'), - String $bdii_url = hiera('dmlite::accounting::bdii_url','ldap://lcg-bdii.cern.ch:2170'), - String $broker_network = hiera('dmlite::accounting::broker_network','PROD'), -@@ -44,13 +43,11 @@ class dmlite::accounting ( - } - - # install -- if $enabled { -- package {['python-daemon','python-ldap','python-lockfile','stomppy']: -- ensure => 'installed', -- } -+ package {['python-daemon','python-ldap','python-lockfile','stomppy']: -+ ensure => 'installed', - } - # apel-ssm also available in UMD repository -- if $enabled and $ssm_url == '' { -+ if $ssm_url == '' { - if $facts['os']['family'] == 'RedHat' { - $ssm_package_url = $facts['os']['release']['major'] ? { - '6' => 'https://github.com/apel/ssm/releases/download/2.3.0-2/apel-ssm-2.3.0-2.el6.noarch.rpm', -@@ -65,19 +62,13 @@ class dmlite::accounting ( - fail("missing ssm_url on unsupported os ${facts['os']['family']} (${facts['os']['name']} ${facts['os']['release']['major']})") - } - package { 'apel-ssm': -- ensure => $enabled ? { -- true => 'installed', -- false => absent, -- }, -+ ensure => 'installed', - source => $ssm_package_url, - provider => 'rpm' - } - - file {'/etc/apel/sender.cfg': -- ensure => $enabled ? { -- true => present, -- false => absent, -- }, -+ ensure => present, - owner => 'root', - group => 'root', - content => template('dmlite/ssm/sender.cfg.erb'), -@@ -96,17 +87,14 @@ class dmlite::accounting ( - } - - file {"/etc/cron.${cron_interval}/dmlite-StAR-accounting": -- ensure => $enabled ? { -- true => present, -- false => absent, -- }, -+ ensure => present, - owner => 'root', - group => 'root', - mode => '0755', - content => $cron_content, - require => Package['apel-ssm'] - } -- -+ - #purge old cron - cron { 'dmlite-star-accounting': - ensure => absent, -diff --git a/src/puppet/dmlite/manifests/xrootd.pp b/src/puppet/dmlite/manifests/xrootd.pp -index f5263865..ae5198a0 100644 ---- a/src/puppet/dmlite/manifests/xrootd.pp -+++ b/src/puppet/dmlite/manifests/xrootd.pp -@@ -8,7 +8,15 @@ class dmlite::xrootd ( - Boolean $xrootd_use_voms = true, - Boolean $xrootd_use_delegation = false, - Optional[String] $xrootd_tpc_options = '', -- Optional[String] $xrootd_sec_level = undef, -+ Optional[Array[String]] $xrootd_sec_level = undef, -+ Optional[String] $xrootd_tls = undef, -+ Optional[Stdlib::Unixpath] $xrootd_tls_cert = undef, -+ Optional[Stdlib::Unixpath] $xrootd_tls_key = undef, -+ Optional[Stdlib::Unixpath] $xrootd_tls_cafile = undef, -+ Optional[Stdlib::Unixpath] $xrootd_tls_capath = undef, -+ Optional[String] $xrootd_tls_caopts = undef, -+ Optional[String] $xrootd_tls_ciphers = undef, -+ Optional[String] $xrootd_tls_reuse = undef, - Optional[String] $xrootd_async = undef, - Optional[String] $xrootd_monitor = undef, - Boolean $xrootd_jemalloc = true, -@@ -142,6 +150,14 @@ class dmlite::xrootd ( - ofs_tpc => $ofs_tpc, - sec_protocol => [ $sec_protocol_disk, $sec_protocol_local ], - sec_level => $xrootd_sec_level, -+ tls => $xrootd_tls, -+ tls_cert => $xrootd_tls_cert, -+ tls_key => $xrootd_tls_key, -+ tls_cafile => $xrootd_tls_cafile, -+ tls_capath => $xrootd_tls_capath, -+ tls_caopts => $xrootd_tls_caopts, -+ tls_ciphers => $xrootd_tls_ciphers, -+ tls_reuse => $xrootd_tls_reuse, - dpm_listvoms => $dpm_listvoms, - use_dmlite_io => $enable_hdfs, - dpm_enable_dome => $dpm_enable_dome, -@@ -190,6 +206,14 @@ class dmlite::xrootd ( - xrd_debug => $dpm_xrootd_debug, - sec_protocol => [ $sec_protocol_redir, $sec_protocol_local ], - sec_level => $xrootd_sec_level, -+ tls => $xrootd_tls, -+ tls_cert => $xrootd_tls_cert, -+ tls_key => $xrootd_tls_key, -+ tls_cafile => $xrootd_tls_cafile, -+ tls_capath => $xrootd_tls_capath, -+ tls_caopts => $xrootd_tls_caopts, -+ tls_ciphers => $xrootd_tls_ciphers, -+ tls_reuse => $xrootd_tls_reuse, - dpm_listvoms => $dpm_listvoms, - dpm_mmreqhost => $dpm_mmreqhost, - dpm_defaultprefix => "${domainpath}/home", -@@ -242,6 +266,14 @@ class dmlite::xrootd ( - xrd_debug => $dpm_xrootd_debug, - sec_protocol => [$sec_protocol_redir, $sec_protocol_local], - sec_level => $xrootd_sec_level, -+ tls => $xrootd_tls, -+ tls_cert => $xrootd_tls_cert, -+ tls_key => $xrootd_tls_key, -+ tls_cafile => $xrootd_tls_cafile, -+ tls_capath => $xrootd_tls_capath, -+ tls_caopts => $xrootd_tls_caopts, -+ tls_ciphers => $xrootd_tls_ciphers, -+ tls_reuse => $xrootd_tls_reuse, - dpm_listvoms => $dpm_listvoms, - dpm_mmreqhost => $dpm_mmreqhost, - dpm_xrootd_serverport => $dpm_xrootd_serverport, -diff --git a/src/puppet/dmlite/manifests/xrootd/create_config.pp b/src/puppet/dmlite/manifests/xrootd/create_config.pp -index ecc42bc0..9fe6be97 100644 ---- a/src/puppet/dmlite/manifests/xrootd/create_config.pp -+++ b/src/puppet/dmlite/manifests/xrootd/create_config.pp -@@ -44,6 +44,15 @@ define dmlite::xrootd::create_config ( - $sec_protocol = $xrootd::config::sec_protocol, - $sec_level = $xrootd::config::sec_level, - -+ $tls = $xrootd::config::tls, -+ $tls_cert = $xrootd::config::tls_cert, -+ $tls_key = $xrootd::config::tls_key, -+ $tls_cafile = $xrootd::config::tls_cafile, -+ $tls_capath = $xrootd::config::tls_capath, -+ $tls_caopts = $xrootd::config::tls_caopts, -+ $tls_ciphers = $xrootd::config::tls_ciphers, -+ $tls_reuse = $xrootd::config::tls_reuse, -+ - $pss_origin = $xrootd::config::pss_origin, - - $dpm_listvoms = undef, -diff --git a/src/puppet/dmlite/manifests/xrootd/create_redir_config.pp b/src/puppet/dmlite/manifests/xrootd/create_redir_config.pp -index 35a84063..4df094fb 100644 ---- a/src/puppet/dmlite/manifests/xrootd/create_redir_config.pp -+++ b/src/puppet/dmlite/manifests/xrootd/create_redir_config.pp -@@ -47,6 +47,15 @@ define dmlite::xrootd::create_redir_config ( - $sec_protocol = $xrootd::config::sec_protocol, - $sec_level = $xrootd::config::sec_level, - -+ $tls = $xrootd::config::tls, -+ $tls_cert = $xrootd::config::tls_cert, -+ $tls_key = $xrootd::config::tls_key, -+ $tls_cafile = $xrootd::config::tls_cafile, -+ $tls_capath = $xrootd::config::tls_capath, -+ $tls_caopts = $xrootd::config::tls_caopts, -+ $tls_ciphers = $xrootd::config::tls_ciphers, -+ $tls_reuse = $xrootd::config::tls_reuse, -+ - $pss_setopt = $xrootd::config::pss_setopt, - $cms_cidtag = $xrootd::config::cms_cidtag, - $oss_statlib = $xrootd::config::oss_statlib, -diff --git a/src/puppet/dpm/manifests/disknode.pp b/src/puppet/dpm/manifests/disknode.pp -index fcb3230a..36cd4dfc 100644 ---- a/src/puppet/dpm/manifests/disknode.pp -+++ b/src/puppet/dpm/manifests/disknode.pp -@@ -54,9 +54,19 @@ class dpm::disknode ( - #xrootd tuning - Optional[String] $xrd_timeout = $dpm::params::xrd_timeout, - Boolean $xrootd_jemalloc = $dpm::params::xrootd_jemalloc, -- Optional[String] $xrootd_sec_level = $dpm::params::xrootd_sec_level, -+ Optional[Array[String]] $xrootd_sec_level = $dpm::params::xrootd_sec_level, - Optional[String] $xrootd_tpc_options = $dpm::params::xrootd_tpc_options, - -+ #xrootd TLS configuration -+ Optional[String] $xrootd_tls = $dpm::params::xrootd_tls, -+ Optional[Stdlib::Unixpath] $xrootd_tls_cert = $dpm::params::xrootd_tls_cert, -+ Optional[Stdlib::Unixpath] $xrootd_tls_key = $dpm::params::xrootd_tls_key, -+ Optional[Stdlib::Unixpath] $xrootd_tls_cafile = $dpm::params::xrootd_tls_cafile, -+ Optional[Stdlib::Unixpath] $xrootd_tls_capath = $dpm::params::xrootd_tls_capath, -+ Optional[String] $xrootd_tls_caopts = $dpm::params::xrootd_tls_caopts, -+ Optional[String] $xrootd_tls_ciphers = $dpm::params::xrootd_tls_ciphers, -+ Optional[String] $xrootd_tls_reuse = $dpm::params::xrootd_tls_reuse, -+ - #host dn - String $host_dn = $dpm::params::host_dn - -@@ -251,6 +261,14 @@ class dpm::disknode ( - xrootd_jemalloc => $xrootd_jemalloc, - xrootd_sec_level => $xrootd_sec_level, - xrootd_tpc_options => $xrootd_tpc_options, -+ xrootd_tls => $xrootd_tls, -+ xrootd_tls_cert => $xrootd_tls_cert, -+ xrootd_tls_key => $xrootd_tls_key, -+ xrootd_tls_cafile => $xrootd_tls_cafile, -+ xrootd_tls_capath => $xrootd_tls_capath, -+ xrootd_tls_caopts => $xrootd_tls_caopts, -+ xrootd_tls_ciphers => $xrootd_tls_ciphers, -+ xrootd_tls_reuse => $xrootd_tls_reuse, - legacy => $configure_legacy, - dpm_enable_dome => $configure_dome, - dpm_xrdhttp_secret_key => $token_password, -diff --git a/src/puppet/dpm/manifests/head_disknode.pp b/src/puppet/dpm/manifests/head_disknode.pp -index b13b8baa..b406ee86 100644 ---- a/src/puppet/dpm/manifests/head_disknode.pp -+++ b/src/puppet/dpm/manifests/head_disknode.pp -@@ -70,9 +70,19 @@ class dpm::head_disknode ( - #xrootd tuning - Optional[String] $xrd_timeout = $dpm::params::xrd_timeout, - Boolean $xrootd_jemalloc = $dpm::params::xrootd_jemalloc, -- Optional[String] $xrootd_sec_level = $dpm::params::xrootd_sec_level, -+ Optional[Array[String]] $xrootd_sec_level = $dpm::params::xrootd_sec_level, - Optional[String] $xrootd_tpc_options = $dpm::params::xrootd_tpc_options, - -+ #xrootd TLS configuration -+ Optional[String] $xrootd_tls = $dpm::params::xrootd_tls, -+ Optional[Stdlib::Unixpath] $xrootd_tls_cert = $dpm::params::xrootd_tls_cert, -+ Optional[Stdlib::Unixpath] $xrootd_tls_key = $dpm::params::xrootd_tls_key, -+ Optional[Stdlib::Unixpath] $xrootd_tls_cafile = $dpm::params::xrootd_tls_cafile, -+ Optional[Stdlib::Unixpath] $xrootd_tls_capath = $dpm::params::xrootd_tls_capath, -+ Optional[String] $xrootd_tls_caopts = $dpm::params::xrootd_tls_caopts, -+ Optional[String] $xrootd_tls_ciphers = $dpm::params::xrootd_tls_ciphers, -+ Optional[String] $xrootd_tls_reuse = $dpm::params::xrootd_tls_reuse, -+ - String $site_name = $dpm::params::site_name, - - #admin DN -@@ -297,6 +307,14 @@ class dpm::head_disknode ( - xrootd_jemalloc => $xrootd_jemalloc, - xrootd_sec_level => $xrootd_sec_level, - xrootd_tpc_options => $xrootd_tpc_options, -+ xrootd_tls => $xrootd_tls, -+ xrootd_tls_cert => $xrootd_tls_cert, -+ xrootd_tls_key => $xrootd_tls_key, -+ xrootd_tls_cafile => $xrootd_tls_cafile, -+ xrootd_tls_capath => $xrootd_tls_capath, -+ xrootd_tls_caopts => $xrootd_tls_caopts, -+ xrootd_tls_ciphers => $xrootd_tls_ciphers, -+ xrootd_tls_reuse => $xrootd_tls_reuse, - site_name => $site_name, - legacy => $configure_legacy, - dpm_enable_dome => $configure_dome, -@@ -350,14 +368,15 @@ class dpm::head_disknode ( - } - - -- class{'dmlite::accounting': -- enabled => $configure_star, -- site_name => $site_name, -- dbuser => $db_user, -- dbpwd => $db_pass, -- dbhost => $db_host, -- nsdbname => $ns_db, -- dpmdbname => $dpm_db, -+ if ($configure_star) { -+ class{'dmlite::accounting': -+ site_name => $site_name, -+ dbuser => $db_user, -+ dbpwd => $db_pass, -+ dbhost => $db_host, -+ nsdbname => $ns_db, -+ dpmdbname => $dpm_db, -+ } - } - - if $argus_banning { -diff --git a/src/puppet/dpm/manifests/headnode.pp b/src/puppet/dpm/manifests/headnode.pp -index 889439ff..93aba0e5 100644 ---- a/src/puppet/dpm/manifests/headnode.pp -+++ b/src/puppet/dpm/manifests/headnode.pp -@@ -72,9 +72,19 @@ class dpm::headnode ( - #xrootd tuning - Optional[String] $xrd_timeout = $dpm::params::xrd_timeout, - Boolean $xrootd_jemalloc = $dpm::params::xrootd_jemalloc, -- Optional[String] $xrootd_sec_level = $dpm::params::xrootd_sec_level, -+ Optional[Array[String]] $xrootd_sec_level = $dpm::params::xrootd_sec_level, - Optional[String] $xrootd_tpc_options = $dpm::params::xrootd_tpc_options, - -+ #xrootd TLS configuration -+ Optional[String] $xrootd_tls = $dpm::params::xrootd_tls, -+ Optional[Stdlib::Unixpath] $xrootd_tls_cert = $dpm::params::xrootd_tls_cert, -+ Optional[Stdlib::Unixpath] $xrootd_tls_key = $dpm::params::xrootd_tls_key, -+ Optional[Stdlib::Unixpath] $xrootd_tls_cafile = $dpm::params::xrootd_tls_cafile, -+ Optional[Stdlib::Unixpath] $xrootd_tls_capath = $dpm::params::xrootd_tls_capath, -+ Optional[String] $xrootd_tls_caopts = $dpm::params::xrootd_tls_caopts, -+ Optional[String] $xrootd_tls_ciphers = $dpm::params::xrootd_tls_ciphers, -+ Optional[String] $xrootd_tls_reuse = $dpm::params::xrootd_tls_reuse, -+ - String $site_name = $dpm::params::site_name, - - #admin DN -@@ -309,6 +319,14 @@ class dpm::headnode ( - xrootd_jemalloc => $xrootd_jemalloc, - xrootd_sec_level => $xrootd_sec_level, - xrootd_tpc_options => $xrootd_tpc_options, -+ xrootd_tls => $xrootd_tls, -+ xrootd_tls_cert => $xrootd_tls_cert, -+ xrootd_tls_key => $xrootd_tls_key, -+ xrootd_tls_cafile => $xrootd_tls_cafile, -+ xrootd_tls_capath => $xrootd_tls_capath, -+ xrootd_tls_caopts => $xrootd_tls_caopts, -+ xrootd_tls_ciphers => $xrootd_tls_ciphers, -+ xrootd_tls_reuse => $xrootd_tls_reuse, - site_name => $site_name, - legacy => $configure_legacy, - dpm_enable_dome => $configure_dome, -@@ -355,14 +373,15 @@ class dpm::headnode ( - } - } - -- class{'dmlite::accounting': -- enabled => $configure_star, -- site_name => $site_name, -- dbuser => $db_user, -- dbpwd => $db_pass, -- dbhost => $db_host, -- nsdbname => $ns_db, -- dpmdbname => $dpm_db, -+ if ($configure_star) { -+ class{'dmlite::accounting': -+ site_name => $site_name, -+ dbuser => $db_user, -+ dbpwd => $db_pass, -+ dbhost => $db_host, -+ nsdbname => $ns_db, -+ dpmdbname => $dpm_db, -+ } - } - - if $argus_banning { -diff --git a/src/puppet/dpm/manifests/params.pp b/src/puppet/dpm/manifests/params.pp -index 807496a7..a7861a22 100644 ---- a/src/puppet/dpm/manifests/params.pp -+++ b/src/puppet/dpm/manifests/params.pp -@@ -98,6 +98,16 @@ class dpm::params { - $xrootd_sec_level = hiera('dpm::params::xrootd_sec_level',undef) - $xrootd_tpc_options = hiera('dpm::params::xrootd_tpc_options','xfr 25') - -+ #xrootd TLS configuration -+ $xrootd_tls = hiera('dpm::params::xrootd_tls',undef) -+ $xrootd_tls_cert = hiera('dpm::params::xrootd_tls_cert',undef) -+ $xrootd_tls_key = hiera('dpm::params::xrootd_tls_key',undef) -+ $xrootd_tls_cafile = hiera('dpm::params::xrootd_tls_cafile',undef) -+ $xrootd_tls_capath = hiera('dpm::params::xrootd_tls_capath',undef) -+ $xrootd_tls_caopts = hiera('dpm::params::xrootd_tls_caopts',undef) -+ $xrootd_tls_ciphers = hiera('dpm::params::xrootd_tls_ciphers',undef) -+ $xrootd_tls_reuse = hiera('dpm::params::xrootd_tls_reuse',undef) -+ - $site_name = hiera('dpm::params::site_name',undef) - - $new_installation = hiera('dpm::params::new_installation',true) -diff --git a/src/puppet/xrootd/manifests/create_config.pp b/src/puppet/xrootd/manifests/create_config.pp -index fc84b1fa..32d603e1 100644 ---- a/src/puppet/xrootd/manifests/create_config.pp -+++ b/src/puppet/xrootd/manifests/create_config.pp -@@ -56,6 +56,15 @@ define xrootd::create_config ( - $sec_protocol = $xrootd::config::sec_protocol, - $sec_level = $xrootd::config::sec_level, - -+ $tls = $xrootd::config::tls, -+ $tls_cert = $xrootd::config::tls_cert, -+ $tls_key = $xrootd::config::tls_key, -+ $tls_cafile = $xrootd::config::tls_cafile, -+ $tls_capath = $xrootd::config::tls_capath, -+ $tls_caopts = $xrootd::config::tls_caopts, -+ $tls_ciphers = $xrootd::config::tls_ciphers, -+ $tls_reuse = $xrootd::config::tls_reuse, -+ - $pss_origin = $xrootd::config::pss_origin - ) { - include xrootd::config -diff --git a/src/puppet/xrootd/manifests/params.pp b/src/puppet/xrootd/manifests/params.pp -index 279c3a1a..55fda2b4 100644 ---- a/src/puppet/xrootd/manifests/params.pp -+++ b/src/puppet/xrootd/manifests/params.pp -@@ -47,6 +47,15 @@ class xrootd::params { - $sec_protocol = [ ] - $sec_level = [ ] - -+ $tls = undef -+ $tls_cert = undef -+ $tls_key = undef -+ $tls_cafile = undef -+ $tls_capath = undef -+ $tls_caopts = undef -+ $tls_ciphers = undef -+ $tls_reuse = undef -+ - $xrd_port = undef - #$xrd_network = "nodnr" - $xrd_network = undef -diff --git a/src/puppet/xrootd/templates/xrootd.cfg.erb b/src/puppet/xrootd/templates/xrootd.cfg.erb -index 5594ed6d..e57cc73e 100644 ---- a/src/puppet/xrootd/templates/xrootd.cfg.erb -+++ b/src/puppet/xrootd/templates/xrootd.cfg.erb -@@ -70,6 +70,23 @@ xrootd.export <%= File.join(export,"") %> - <% if @xrd_port -%> - xrd.port <%= @xrd_port %> - <% end -%> -+<% if @tls_cert -%> -+xrd.tls <%= @tls_cert -%><% if @tls_key -%> <%= @tls_key -%><% end %> -+<% if @tls_cafile or @tls_capath -%> -+xrd.tlsca<% if @tls_cafile -%> certfile <%= @tls_cafile -%><% end -%><% if @tls_capath -%> certdir <%= @tls_capath -%><% end -%><% if @tls_caopts -%> <%= @tls_caopts -%><% end %> -+<% else -%> -+xrd.tlsca noverify -+<% end -%> -+<% if @tls_ciphers -%> -+xrd.tlsciphers <%= @tls_ciphers %> -+<% end -%> -+<% if @tls -%> -+xrootd.tls <%= @tls %> -+<% end -%> -+<% if @tls_reuse -%> -+xrootd.tlsreuse <%= @tls_reuse %> -+<% end -%> -+<% end -%> - <% if @xrd_timeout -%> - xrd.timeout <%= @xrd_timeout %> - <% end -%> diff --git a/dmlite-LCGDM-2951-invalid_fd_formatting.patch b/dmlite-LCGDM-2951-invalid_fd_formatting.patch deleted file mode 100644 index 4618dc1..0000000 --- a/dmlite-LCGDM-2951-invalid_fd_formatting.patch +++ /dev/null @@ -1,153 +0,0 @@ -commit cced98651e9650ac4b05cca6a7c12d18a32b6afc -Author: Petr Vokac -Date: Fri Oct 16 10:01:39 2020 +0200 - - LCGDM-2951 fix file descriptor string formatting - -diff --git a/src/plugins/adapter/IO.cpp b/src/plugins/adapter/IO.cpp -index f3ab3fb0..4d1ebbb7 100644 ---- a/src/plugins/adapter/IO.cpp -+++ b/src/plugins/adapter/IO.cpp -@@ -242,7 +242,7 @@ size_t StdIOHandler::read(char* buffer, size_t count) - if (nbytes < 0) { - char errbuffer[128]; - dpm_strerror_r(errno, errbuffer, sizeof(errbuffer)); -- throw DmException(errno, "%s on fd %s ", errbuffer, this->fd_); -+ throw DmException(errno, "%s on fd %d", errbuffer, this->fd_); - } - - eof_ = (static_cast(nbytes) < count); -@@ -260,7 +260,7 @@ size_t StdIOHandler::write(const char* buffer, size_t count) - if (nbytes < 0) { - char errbuffer[128]; - dpm_strerror_r(errno, errbuffer, sizeof(errbuffer)); -- throw DmException(errno, "%s on fd %s ", errbuffer, this->fd_); -+ throw DmException(errno, "%s on fd %d", errbuffer, this->fd_); - } - - return static_cast(nbytes); -@@ -276,7 +276,7 @@ size_t StdIOHandler::readv(const struct iovec* vector, size_t count) - if (nbytes < 0) { - char errbuffer[128]; - dpm_strerror_r(errno, errbuffer, sizeof(errbuffer)); -- throw DmException(errno, "%s on fd %s ", errbuffer, this->fd_); -+ throw DmException(errno, "%s on fd %d", errbuffer, this->fd_); - } - - return static_cast(nbytes); -@@ -292,7 +292,7 @@ size_t StdIOHandler::writev(const struct iovec* vector, size_t count) - if (nbytes < 0) { - char errbuffer[128]; - dpm_strerror_r(errno, errbuffer, sizeof(errbuffer)); -- throw DmException(errno, "%s on fd %s ", errbuffer, this->fd_); -+ throw DmException(errno, "%s on fd %d", errbuffer, this->fd_); - } - - return static_cast(nbytes); -@@ -308,7 +308,7 @@ size_t StdIOHandler::pread(void* buffer, size_t count, off_t offset) - if (nbytes < 0) { - char errbuffer[128]; - dpm_strerror_r(errno, errbuffer, sizeof(errbuffer)); -- throw DmException(errno, "%s on fd %s ", errbuffer, this->fd_); -+ throw DmException(errno, "%s on fd %d", errbuffer, this->fd_); - } - - return static_cast(nbytes); -@@ -324,7 +324,7 @@ size_t StdIOHandler::pwrite(const void* buffer, size_t count, off_t offset) - if (nbytes < 0) { - char errbuffer[128]; - dpm_strerror_r(errno, errbuffer, sizeof(errbuffer)); -- throw DmException(errno, "%s on fd %s ", errbuffer, this->fd_); -+ throw DmException(errno, "%s on fd %d", errbuffer, this->fd_); - } - - return static_cast(nbytes); -@@ -336,7 +336,7 @@ void StdIOHandler::seek(off_t offset, Whence whence) - { - Log(Logger::Lvl4, adapterlogmask, adapterlogname, " fd:" << this->fd_ << " offs:" << offset); - if (::lseek64(this->fd_, offset, whence) == ((off_t) - 1)) -- throw DmException(errno, "Could not seek on fd %s ", this->fd_); -+ throw DmException(errno, "Could not seek on fd %d", this->fd_); - } - - -diff --git a/src/plugins/adapter/RFIO.cpp b/src/plugins/adapter/RFIO.cpp -index 25568b79..3dab1566 100644 ---- a/src/plugins/adapter/RFIO.cpp -+++ b/src/plugins/adapter/RFIO.cpp -@@ -303,7 +303,7 @@ void StdRFIOHandler::seek(off_t offset, Whence whence) - - lk l(islocal_ ? 0 : &this->mtx_); - if (rfio_lseek64(this->fd_, offset, whence) == -1) -- throw DmException(serrno, "Could not seek fd %s", this->fd_); -+ throw DmException(serrno, "Could not seek fd %d", this->fd_); - - Log(Logger::Lvl3, adapterRFIOlogmask, adapterRFIOlogname, "Exiting. offs:" << offset); - } -diff --git a/src/plugins/domeadapter/DomeAdapterIO.cpp b/src/plugins/domeadapter/DomeAdapterIO.cpp -index f89c16f8..63a0b4ed 100644 ---- a/src/plugins/domeadapter/DomeAdapterIO.cpp -+++ b/src/plugins/domeadapter/DomeAdapterIO.cpp -@@ -355,7 +355,7 @@ size_t DomeIOHandler::read(char* buffer, size_t count) - if (nbytes < 0) { - char errbuffer[128]; - dpm_strerror_r(errno, errbuffer, sizeof(errbuffer)); -- throw DmException(errno, "%s on fd %s ", errbuffer, this->fd_); -+ throw DmException(errno, "%s on fd %d", errbuffer, this->fd_); - } - - eof_ = (static_cast(nbytes) < count); -@@ -373,7 +373,7 @@ size_t DomeIOHandler::write(const char* buffer, size_t count) - if (nbytes < 0) { - char errbuffer[128]; - dpm_strerror_r(errno, errbuffer, sizeof(errbuffer)); -- throw DmException(errno, "%s on fd %s ", errbuffer, this->fd_); -+ throw DmException(errno, "%s on fd %d", errbuffer, this->fd_); - } - - return static_cast(nbytes); -@@ -389,7 +389,7 @@ size_t DomeIOHandler::readv(const struct iovec* vector, size_t count) - if (nbytes < 0) { - char errbuffer[128]; - dpm_strerror_r(errno, errbuffer, sizeof(errbuffer)); -- throw DmException(errno, "%s on fd %s ", errbuffer, this->fd_); -+ throw DmException(errno, "%s on fd %d", errbuffer, this->fd_); - } - - return static_cast(nbytes); -@@ -405,7 +405,7 @@ size_t DomeIOHandler::writev(const struct iovec* vector, size_t count) - if (nbytes < 0) { - char errbuffer[128]; - dpm_strerror_r(errno, errbuffer, sizeof(errbuffer)); -- throw DmException(errno, "%s on fd %s ", errbuffer, this->fd_); -+ throw DmException(errno, "%s on fd %d", errbuffer, this->fd_); - } - - return static_cast(nbytes); -@@ -421,7 +421,7 @@ size_t DomeIOHandler::pread(void* buffer, size_t count, off_t offset) - if (nbytes < 0) { - char errbuffer[128]; - dpm_strerror_r(errno, errbuffer, sizeof(errbuffer)); -- throw DmException(errno, "%s on fd %s ", errbuffer, this->fd_); -+ throw DmException(errno, "%s on fd %d", errbuffer, this->fd_); - } - - return static_cast(nbytes); -@@ -437,7 +437,7 @@ size_t DomeIOHandler::pwrite(const void* buffer, size_t count, off_t offset) - if (nbytes < 0) { - char errbuffer[128]; - dpm_strerror_r(errno, errbuffer, sizeof(errbuffer)); -- throw DmException(errno, "%s on fd %s ", errbuffer, this->fd_); -+ throw DmException(errno, "%s on fd %d", errbuffer, this->fd_); - } - - return static_cast(nbytes); -@@ -449,7 +449,7 @@ void DomeIOHandler::seek(off_t offset, Whence whence) - { - Log(Logger::Lvl4, domeadapterlogmask, domeadapterlogname, " fd:" << this->fd_ << " offs:" << offset); - if (::lseek64(this->fd_, offset, whence) == ((off_t) - 1)) -- throw DmException(errno, "Could not seek on fd %s ", this->fd_); -+ throw DmException(errno, "Could not seek on fd %d", this->fd_); - } - - diff --git a/dmlite-LCGDM-2953-http-double-close.patch b/dmlite-LCGDM-2953-http-double-close.patch deleted file mode 100644 index 532b4e6..0000000 --- a/dmlite-LCGDM-2953-http-double-close.patch +++ /dev/null @@ -1,103 +0,0 @@ -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_disk/mod_lcgdm_disk.h b/src/plugins/apache-httpd/src/mod_lcgdm_disk/mod_lcgdm_disk.h -index 42164d74..62c204d3 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_disk/mod_lcgdm_disk.h -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_disk/mod_lcgdm_disk.h -@@ -80,7 +80,6 @@ struct dav_resource_private - size_t fsize; - dmlite_location loc; - dmlite_fd *fd; -- int fileno; // Stores fd underlying system file descriptor - int copy_already_redirected; - char *namespace_path; - const char *content_type; -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_disk/repository.c b/src/plugins/apache-httpd/src/mod_lcgdm_disk/repository.c -index 14d90453..df7a7573 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_disk/repository.c -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_disk/repository.c -@@ -118,10 +118,10 @@ static int dav_finish_writing(dav_resource_private* info) - dmlite_put_abort(info->ctx, &info->loc); - - if (info->fd) { -+ apr_pool_cleanup_kill(info->request->connection->pool, info->fd, dav_shared_fclose); - dmlite_fclose(info->fd); -+ info->fd = NULL; - } -- apr_pool_cleanup_kill(info->request->connection->pool, info->fd, dav_shared_fclose); -- info->fd = NULL; - apr_table_unset(info->request->connection->notes, "dav_disk_info"); - return e; - } -@@ -333,9 +336,6 @@ static dav_error *dav_disk_internal_get_resource(request_rec *r, - dmlite_fstat(info->fd, &fstat); - info->fsize = fstat.st_size; - -- /* Store underlying file descriptor */ -- info->fileno = dmlite_fileno(info->fd); -- - /* Keep it open as long as the connection lives */ - apr_pool_pre_cleanup_register(r->connection->pool, info->fd, dav_shared_fclose); - } -@@ -726,31 +728,40 @@ static dav_error *dav_disk_deliver(const dav_resource *resource, - apr_bucket_brigade *bb; - apr_bucket *bkt; - dav_resource_private *info; -- -- info = resource->info; -- bb = apr_brigade_create(resource->pool, output->c->bucket_alloc); -+ int fileno; - - if (resource->collection) { - return dav_shared_new_error(resource->info->request, NULL, - HTTP_FORBIDDEN, 0, "Can not list the content of a disk"); - } - -+ info = resource->info; -+ bb = apr_brigade_create(resource->pool, output->c->bucket_alloc); -+ fileno = dmlite_fileno(info->fd); -+ - /* Apache core dir config */ - core_dir_config *coreconf = (core_dir_config *) ap_get_module_config( - info->request->per_dir_config, &core_module); - - /* Try to see if we can use sendfile */ -- if (info->fileno > -1 && coreconf->enable_sendfile == ENABLE_SENDFILE_ON) { -+ if (fileno > -1 && coreconf->enable_sendfile == ENABLE_SENDFILE_ON) { - apr_file_t* apr_file = NULL; -- if (apr_os_file_put(&apr_file, &info->fileno, -+ if (apr_os_file_put(&apr_file, &fileno, - APR_FOPEN_READ | APR_FOPEN_SENDFILE_ENABLED, - info->request->pool) != APR_SUCCESS) { - return dav_shared_new_error(resource->info->request, NULL, - HTTP_INTERNAL_SERVER_ERROR, 0, - "Could not bind the file descriptor to the apr_file"); - } -- apr_pool_pre_cleanup_register(info->request->pool, apr_file, -- (apr_status_t (*)(void *))apr_file_close); -+ // apr_file_close should be used to release apr_file, but this function -+ // also close associated filehandle which would be closed second time -+ // by dmlite_fclose (called by dav_shared_fclose registered in -+ // dav_disk_internal_get_resource). Function apr_os_file_put is called -+ // with flags that doesn't currently allocate any additioanal resources -+ // and it is therefore safe (but still a bit danger) not to register -+ // apr_file_close clenup function -+ //apr_pool_pre_cleanup_register(info->request->pool, apr_file, -+ // (apr_status_t (*)(void *))apr_file_close); - - /* Split in chunks sendfile can handle - * Adapted from mod_xsendfile */ -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_ns/delivery.c b/src/plugins/apache-httpd/src/mod_lcgdm_ns/delivery.c -index a8e72b9d..613539c6 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_ns/delivery.c -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_ns/delivery.c -@@ -384,9 +385,10 @@ dav_error *dav_ns_deliver_virtual(const dav_resource *resource, - if (rbytes < 0) { - err = dav_shared_new_error(resource->info->request, NULL, HTTP_INTERNAL_SERVER_ERROR, - "Could not read: %s", dmlite_ferror(fd)); -- dmlite_fclose(fd); - } -+ dmlite_fclose(fd); - - dmlite_location_free(resource->info->virtual_location); -+ resource->info->virtual_location = NULL; - return err; - } diff --git a/dmlite-LCGDM-2954-pass-tpc-error.patch b/dmlite-LCGDM-2954-pass-tpc-error.patch deleted file mode 100644 index 87dad4c..0000000 --- a/dmlite-LCGDM-2954-pass-tpc-error.patch +++ /dev/null @@ -1,188 +0,0 @@ -diff --git a/src/plugins/apache-httpd/src/client/htext_api.c b/src/plugins/apache-httpd/src/client/htext_api.c -index 7ce0dc5f..1c3df75c 100644 ---- a/src/plugins/apache-httpd/src/client/htext_api.c -+++ b/src/plugins/apache-httpd/src/client/htext_api.c -@@ -319,7 +319,8 @@ int htext_perform(htext_handle *handle) - - /* Launch thread and return */ - handle->status = HTEXTS_STARTING; -- htext_log(handle, "Delegating to the proper handle (%s)", performer_str); -+ htext_log(handle, "Delegating to the proper handle (%s %s %s)", performer_str, -+ GETSTR(handle, HTEXTOP_SOURCEURL), GETSTR(handle, HTEXTOP_DESTINATIONURL)); - return pthread_create(&(handle->thread), NULL, performer, handle); - } - -diff --git a/src/plugins/apache-httpd/src/client/htext_common.c b/src/plugins/apache-httpd/src/client/htext_common.c -index 488e33a0..6a64381f 100644 ---- a/src/plugins/apache-httpd/src/client/htext_common.c -+++ b/src/plugins/apache-httpd/src/client/htext_common.c -@@ -42,6 +42,10 @@ void htext_partial_clean(htext_chunk *p) - GETIO(p->handle) ->close(p->fd); - if (p->chunk_rconn && *(p->chunk_rconn)) - free(*(p->chunk_rconn)); -+ if (p->error_string) -+ free(p->error_string); -+ if (p->http_response) -+ free(p->http_response); - if (p->location) - free(p->location); - if (p->headers) -@@ -108,6 +112,8 @@ int htext_error(htext_handle *handle, const char *fmt, ...) - n = vsnprintf(buffer, sizeof(buffer), fmt, args); - va_end(args); - -+ if (handle->error_string) -+ free(handle->error_string); - handle->error_string = strdup(buffer); - } - handle->status = HTEXTS_FAILED; -@@ -130,6 +136,9 @@ size_t htext_header_callback(void *buffer, size_t size, size_t nmemb, void *st) - if (strncasecmp("HTTP/", line, 5) == 0) { - p = index(line, ' ') + 1; - partial->http_status = atoi(p); -+ /* Only use first response in case multiple header lines starts with HTTP */ -+ if (!partial->http_response) -+ partial->http_response = trim(strdup(p+4)); - /* Check it is no redirect */ - if ((partial->http_status < 300 || partial->http_status >= 400)) { - sem_post(&(partial->final)); -@@ -150,6 +159,9 @@ size_t htext_header_callback(void *buffer, size_t size, size_t nmemb, void *st) - else if (strncasecmp("Location:", line, 9) == 0 - || strncasecmp("Content-Location:", line, 17) == 0) { - p = index(line, ':') + 1; -+ /* Just in case of multiple location headers */ -+ if (partial->location) -+ free(partial->location); - partial->location = trim(strdup(p)); - } - /* File size */ -@@ -297,4 +309,4 @@ struct curl_slist *htext_copy_slist(struct curl_slist *list) - list = list->next; - } - return copy; --} -\ No newline at end of file -+} -diff --git a/src/plugins/apache-httpd/src/client/htext_copy.c b/src/plugins/apache-httpd/src/client/htext_copy.c -index ae665e97..88c4aacd 100644 ---- a/src/plugins/apache-httpd/src/client/htext_copy.c -+++ b/src/plugins/apache-httpd/src/client/htext_copy.c -@@ -249,7 +249,7 @@ void *htext_copy_method(void *h) - htext_error(handle, err_buffer); - } - else if (control.http_status >= 400 || handle->status == HTEXTS_FAILED) { -- htext_error(handle, NULL ); -+ htext_error(handle, control.http_response); - } - else { - handle->status = HTEXTS_SUCCEEDED; -diff --git a/src/plugins/apache-httpd/src/client/htext_get.c b/src/plugins/apache-httpd/src/client/htext_get.c -index 42c0b0e3..0fb49bde 100644 ---- a/src/plugins/apache-httpd/src/client/htext_get.c -+++ b/src/plugins/apache-httpd/src/client/htext_get.c -@@ -61,7 +61,7 @@ static void* htext_get_subthread(void *pp) - - /* Perform */ - if (curl_easy_perform(partial->curl) != CURLE_OK) { -- partial->handle->error_string = strdup(err_buffer); -+ partial->error_string = strdup(err_buffer); - partial->handle->status = HTEXTS_FAILED; - } - -@@ -144,7 +144,8 @@ void *htext_get_method(void *h) - } - - if (head.http_status >= 400 || handle->status == HTEXTS_FAILED) { -- htext_log(handle, "Failed to get the file size, so no streaming code: %d", head.http_status); -+ htext_log(handle, "Failed to get the file size, so no streaming code: %d (%s)", -+ head.http_status, head.http_response ? head.http_response : "no response header"); - } - /* Did we get the location, and is it cacheable? */ - else if (head.location && head.redirect_is_cacheable) { -@@ -256,13 +257,18 @@ void *htext_get_method(void *h) - if (handle->status != HTEXTS_FAILED) - handle->http_status = partial_array[i].http_status; - -- /* Clean partial */ -- htext_partial_clean(&(partial_array[i])); -+ if (!handle->error_string && partial_array[i].error_string) -+ handle->error_string = strdup(partial_array[i].error_string); - - /* Check code */ - if (handle->http_status < 200 || handle->http_status >= 300) { - handle->status = HTEXTS_FAILED; -+ if (!handle->error_string && partial_array[i].http_response) -+ handle->error_string = strdup(partial_array[i].http_response); - } -+ -+ /* Clean partial */ -+ htext_partial_clean(&(partial_array[i])); - } - - /* Done */ -diff --git a/src/plugins/apache-httpd/src/client/htext_private.h b/src/plugins/apache-httpd/src/client/htext_private.h -index eb872a72..0a406df7 100644 ---- a/src/plugins/apache-httpd/src/client/htext_private.h -+++ b/src/plugins/apache-httpd/src/client/htext_private.h -@@ -87,6 +87,8 @@ struct htext_chunk - - CURL *curl; /* The CURL handle to use */ - unsigned http_status; /* The HTTP status for this specific partial */ -+ char *http_response; /* The HTTP response line without code */ -+ char *error_string; /* When an error ocurred, the description is set here */ - - short redirect_is_cacheable; /* Can cache the redirect location? */ - -diff --git a/src/plugins/apache-httpd/src/client/htext_put.c b/src/plugins/apache-httpd/src/client/htext_put.c -index d09763aa..0c3e4c1d 100644 ---- a/src/plugins/apache-httpd/src/client/htext_put.c -+++ b/src/plugins/apache-httpd/src/client/htext_put.c -@@ -115,7 +115,8 @@ static void* htext_put_subthread(void *pp) - - /* Perform */ - if (curl_easy_perform(partial->curl) != CURLE_OK) { -- htext_error(partial->handle, err_buffer); -+ partial->error_string = strdup(err_buffer); -+ partial->handle->status = HTEXTS_FAILED; - sem_post(&(partial->final)); - } - -@@ -304,9 +305,15 @@ void *htext_put_method(void *h) - if (handle->status != HTEXTS_FAILED) - handle->http_status = partial_array[i].http_status; - -+ if (!handle->error_string && partial_array[i].error_string) -+ handle->error_string = strdup(partial_array[i].error_string); -+ - /* Check code */ -- if (handle->http_status < 200 || handle->http_status >= 300) -+ if (handle->http_status < 200 || handle->http_status >= 300) { - handle->status = HTEXTS_FAILED; -+ if (!handle->error_string && partial_array[i].http_response) -+ handle->error_string = strdup(partial_array[i].http_response); -+ } - - htext_partial_clean(&(partial_array[i])); - -@@ -359,7 +366,8 @@ void *htext_put_method(void *h) - - handle->http_status = control.http_status; - if (control.http_status < 200 || control.http_status >= 300) -- htext_error(handle, "Final HEAD failed!"); -+ htext_error(handle, "Final HEAD failed with code %d (%s)", control.http_status, -+ control.http_response ? control.http_response : "no response header"); - - htext_partial_clean(&control); - } -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_disk/copy.c b/src/plugins/apache-httpd/src/mod_lcgdm_disk/copy.c -index 4f1b1744..dd586da6 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_disk/copy.c -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_disk/copy.c -@@ -186,6 +186,7 @@ static void dav_disk_copy_log(htext_handle *handle, HTEXT_LOG_TYPE type, - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, ddc->request, "| %s", msg); - break; - default: -+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, ddc->request, "?%d? %s", type, msg); - break; - } - } diff --git a/dmlite-LCGDM-2955-skip-dpm2.patch b/dmlite-LCGDM-2955-skip-dpm2.patch deleted file mode 100644 index 86b4453..0000000 --- a/dmlite-LCGDM-2955-skip-dpm2.patch +++ /dev/null @@ -1,36 +0,0 @@ -diff --git a/shell/src/interpreter.py b/shell/src/interpreter.py -index d18f918b..8a2d79f7 100644 ---- a/shell/src/interpreter.py -+++ b/shell/src/interpreter.py -@@ -2549,6 +2549,22 @@ class Util(object): - - return adminUserName - -+ @staticmethod -+ def getlisteningports(): -+ """Check which ports are being listened on""" -+ ports = [] -+ try: -+ import subprocess -+ pipe_out_err = subprocess.Popen("ss -tln", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() -+ for listen in pipe_out_err[0].split('\n'): -+ m = re.search(r":([0-9]+)\s", listen) -+ if m != None: -+ ports.append(int(m.group(1))) -+ except: -+ pass -+ -+ return ports -+ - @staticmethod - def setFSReadonly(interpreter, sourceFS): - #check which implementations are loaded -@@ -2572,7 +2588,7 @@ class Util(object): - return 1 - else: - #if DPM python is there try to set to RO also via DPM but don't fail in case of errors -- if 'dpm2' in sys.modules: -+ if 'dpm2' in sys.modules and 5015 in Util.getlisteningports(): - dpm2.dpm_modifyfs(sourceFS.server, sourceFS.name, 2, sourceFS.weight) - return 0 - diff --git a/dmlite-LCGDM-2957-same-ciphers-for-all-protocols.patch b/dmlite-LCGDM-2957-same-ciphers-for-all-protocols.patch deleted file mode 100644 index cc9eebd..0000000 --- a/dmlite-LCGDM-2957-same-ciphers-for-all-protocols.patch +++ /dev/null @@ -1,263 +0,0 @@ -diff --git a/src/plugins/dpm-dsi/etc/dpm-gsiftp.init b/src/plugins/dpm-dsi/etc/dpm-gsiftp.init -index 8add9cf2..fd4da237 100755 ---- a/src/plugins/dpm-dsi/etc/dpm-gsiftp.init -+++ b/src/plugins/dpm-dsi/etc/dpm-gsiftp.init -@@ -21,6 +21,8 @@ export CSEC_MECH - FTPLOGFILE=/var/log/dpm-gsiftp/dpm-gsiftp.log - GLOBUS_TCP_PORT_RANGE="20000,25000" - export GLOBUS_TCP_PORT_RANGE -+GLOBUS_GSSAPI_CIPHERS='RC4-SHA:AES128-SHA:HIGH:!aNULL:!MD5:!RC4' -+export GLOBUS_GSSAPI_CIPHERS - - sysname=`uname -s` - -@@ -70,6 +72,34 @@ if [ -n "$GLOBUS_UDP_PORT_RANGE" ]; then - export GLOBUS_UDP_PORT_RANGE - fi - -+if [ -n "$GLOBUS_THREAD_MODEL" ]; then -+ export GLOBUS_THREAD_MODEL -+fi -+ -+if [ -n "$GLOBUS_GSSAPI_FORCE_TLS" ]; then -+ export GLOBUS_GSSAPI_FORCE_TLS -+fi -+ -+if [ -n "$GLOBUS_GSSAPI_MIN_TLS_PROTOCOL" ]; then -+ export GLOBUS_GSSAPI_MIN_TLS_PROTOCOL -+fi -+ -+if [ -n "$GLOBUS_GSSAPI_MAX_TLS_PROTOCOL" ]; then -+ export GLOBUS_GSSAPI_MAX_TLS_PROTOCOL -+fi -+ -+if [ -n "$GLOBUS_GSSAPI_CIPHERS" ]; then -+ export GLOBUS_GSSAPI_CIPHERS -+fi -+ -+if [ -n "$GLOBUS_GSSAPI_SERVER_CIPHER_ORDER" ]; then -+ export GLOBUS_GSSAPI_SERVER_CIPHER_ORDER -+fi -+ -+if [ -n "$GLOBUS_GSSAPI_BACKWARD_COMPATIBLE_MIC" ]; then -+ export GLOBUS_GSSAPI_BACKWARD_COMPATIBLE_MIC -+fi -+ - if [ `uname -m` = "x86_64" ]; then - LD_LIBRARY_PATH=/opt/glite/lib64:/opt/lcg/lib64:$GLOBUS_LOCATION/lib - else -diff --git a/src/plugins/dpm-dsi/etc/dpm-gsiftp.service b/src/plugins/dpm-dsi/etc/dpm-gsiftp.service -index 1a0c728f..74287049 100644 ---- a/src/plugins/dpm-dsi/etc/dpm-gsiftp.service -+++ b/src/plugins/dpm-dsi/etc/dpm-gsiftp.service -@@ -15,9 +15,11 @@ Environment="FTPLOGFILE=/var/log/dpm-gsiftp/dpm-gsiftp.log" - Environment="GRIDFTPLOGFILE=/var/log/dpm-gsiftp/gridftp.log" - Environment="GLOBUS_TCP_PORT_RANGE=20000,25000" - Environment="GLOBUS_THREAD_MODEL=pthread" -+Environment="GLOBUS_GSSAPI_CIPHERS=RC4-SHA:AES128-SHA:HIGH:!aNULL:!MD5:!RC4" - Environment="OPTIONS=-S -p 2811 -auth-level 0 -dsi dmlite:dome_checksum -disable-usage-stats" - EnvironmentFile=-/etc/sysconfig/dpm-gsiftp - EnvironmentFile=-/etc/sysconfig/globus -+EnvironmentFile=-/etc/sysconfig/dmlite - - ExecStart=/usr/sbin/globus-gridftp-server $OPTIONS -l $GRIDFTPLOGFILE -Z $FTPLOGFILE - Restart=on-failure -diff --git a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg -index 7134516e..e7c27206 100644 ---- a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg -+++ b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg -@@ -54,7 +54,7 @@ if exec xrootd - #http.key /etc/grid-security/dpmmgr/dpmkey.pem - #http.cadir /etc/grid-security/certificates - #http.secretkey CHANGEMEkwpoMyfgthusgdbyyws6gxyzhntkLoh8jilwivnirtjki --#http.cipherfilter HIGH -+#http.cipherfilter RC4-SHA:AES128-SHA:HIGH:!aNULL:!MD5:!RC4 - fi - - dpm.dmconf /etc/dmlite.conf -diff --git a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg -index 0b28931e..19c0393d 100644 ---- a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg -+++ b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg -@@ -58,7 +58,7 @@ dpm.xrdserverport 1095 - #http.key /etc/grid-security/dpmmgr/dpmkey.pem - #http.cadir /etc/grid-security/certificates - #http.secretkey CHANGEMEkwpoMyfgthusgdbyyws6gxyzhntkLoh8jilwivnirtjki --#http.cipherfilter HIGH -+#http.cipherfilter RC4-SHA:AES128-SHA:HIGH:!aNULL:!MD5:!RC4 - fi - - # for any federations setup provide a reirect to federation handler -diff --git a/src/puppet/dmlite/manifests/dav/config.pp b/src/puppet/dmlite/manifests/dav/config.pp -index adaa97e0..1d6eb596 100644 ---- a/src/puppet/dmlite/manifests/dav/config.pp -+++ b/src/puppet/dmlite/manifests/dav/config.pp -@@ -201,5 +201,18 @@ class dmlite::dav::config ( - line => 'LoadModule mpm_prefork_module modules/mod_mpm_prefork.so', - path => '/etc/httpd/conf.modules.d/00-mpm.conf', - } -+ -+ # dmlite global environment configuration -+ file {"/etc/systemd/system/httpd.service.d/": -+ ensure => directory, -+ owner => root, -+ group => root, -+ } -> -+ file {"/etc/systemd/system/httpd.service.d/override.conf": -+ ensure => file, -+ owner => root, -+ group => root, -+ content => template('dmlite/dav/override.erb'), -+ } - } - } -diff --git a/src/puppet/dmlite/manifests/xrootd.pp b/src/puppet/dmlite/manifests/xrootd.pp -index ae5198a0..cd7f3b76 100644 ---- a/src/puppet/dmlite/manifests/xrootd.pp -+++ b/src/puppet/dmlite/manifests/xrootd.pp -@@ -48,7 +48,7 @@ class dmlite::xrootd ( - Boolean $xrd_checksum_enabled = false, - String $xrd_checksum = 'max 100 adler32 md5 crc32', - String $dpm_xrd_packagename = 'dmlite-dpm-xrootd', -- String $dpm_xrdhttp_cipherlist = 'HIGH' -+ String $dpm_xrdhttp_cipherlist = 'RC4-SHA:AES128-SHA:HIGH:!aNULL:!MD5:!RC4' - ) { - - Dmlite::Xrootd::Create_config <| |> ~> Class[xrootd::service] -diff --git a/src/puppet/dmlite/templates/dav/override.erb b/src/puppet/dmlite/templates/dav/override.erb -new file mode 100644 -index 00000000..e0ef44d4 ---- /dev/null -+++ b/src/puppet/dmlite/templates/dav/override.erb -@@ -0,0 +1,4 @@ -+[Service] -+LimitCORE=infinity -+EnvironmentFile=-/etc/sysconfig/dmlite -+ -diff --git a/src/puppet/dmlite/templates/xrootd/dpm-xrootd.cfg.erb b/src/puppet/dmlite/templates/xrootd/dpm-xrootd.cfg.erb -index cb88efd0..0d01358e 100644 ---- a/src/puppet/dmlite/templates/xrootd/dpm-xrootd.cfg.erb -+++ b/src/puppet/dmlite/templates/xrootd/dpm-xrootd.cfg.erb -@@ -23,12 +23,15 @@ dpm.allowvo <%= @dpm_allowvo %> - xrd.protocol XrdHttp /usr/lib64/libXrdHttp.so - http.exthandler dome /usr/lib64/libdome.so <%= @dpm_dome_conf_file %> - http.selfhttps2http yes -+http.secretkey <%= @dpm_xrdhttp_secret_key %> -+<% if !defined?(@tls_cert) -%> -+# XRootD 4.x configuration for HTTPS - http.cert /etc/grid-security/dpmmgr/dpmcert.pem - http.key /etc/grid-security/dpmmgr/dpmkey.pem - http.cadir /etc/grid-security/certificates --http.secretkey <%= @dpm_xrdhttp_secret_key %> - http.cipherfilter <%= @dpm_xrdhttp_cipherlist %> - <% end -%> -+<% end -%> - fi - <% if @dpm_namelib -%> - dpm.namelib <%= @dpm_namelib %> -diff --git a/src/puppet/dpm/manifests/params.pp b/src/puppet/dpm/manifests/params.pp -index a7861a22..23c5b1b7 100644 ---- a/src/puppet/dpm/manifests/params.pp -+++ b/src/puppet/dpm/manifests/params.pp -@@ -105,7 +105,7 @@ class dpm::params { - $xrootd_tls_cafile = hiera('dpm::params::xrootd_tls_cafile',undef) - $xrootd_tls_capath = hiera('dpm::params::xrootd_tls_capath',undef) - $xrootd_tls_caopts = hiera('dpm::params::xrootd_tls_caopts',undef) -- $xrootd_tls_ciphers = hiera('dpm::params::xrootd_tls_ciphers',undef) -+ $xrootd_tls_ciphers = hiera('dpm::params::xrootd_tls_ciphers','RC4-SHA:AES128-SHA:HIGH:!aNULL:!MD5:!RC4') - $xrootd_tls_reuse = hiera('dpm::params::xrootd_tls_reuse',undef) - - $site_name = hiera('dpm::params::site_name',undef) -diff --git a/src/puppet/gridftp/manifests/config.pp b/src/puppet/gridftp/manifests/config.pp -index e6a34050..ac259653 100644 ---- a/src/puppet/gridftp/manifests/config.pp -+++ b/src/puppet/gridftp/manifests/config.pp -@@ -65,6 +65,11 @@ class gridftp::config ( - $globus_tcp_port_range = $gridftp::params::globus_tcp_port_range, - $globus_udp_port_range = $gridftp::params::globus_udp_port_range, - $force_tls = $gridftp::params::force_tls, -+ $globus_min_tls_protocol = $gridftp::params::globus_min_tls_protocol, -+ $globus_max_tls_protocol = $gridftp::params::globus_max_tls_protocol, -+ $globus_ciphers = $gridftp::params::globus_ciphers, -+ $globus_server_cipher_order = $gridftp::params::globus_server_cipher_order, -+ $globus_compatible_mic = $gridftp::params::globus_compatible_mic, - $service = $gridftp::params::service, - $udt = $gridftp::params::udt, - $epsv_ip = $gridftp::params::epsv_ip, -diff --git a/src/puppet/gridftp/manifests/params.pp b/src/puppet/gridftp/manifests/params.pp -index 73a30ad6..4f390fb8 100644 ---- a/src/puppet/gridftp/manifests/params.pp -+++ b/src/puppet/gridftp/manifests/params.pp -@@ -78,6 +78,11 @@ class gridftp::params ( - $globus_tcp_port_range = hiera('gridftp::params::globus_tcp_port_range',undef), - $globus_udp_port_range = hiera('gridftp::params::globus_udp_port_range',undef), - $force_tls = hiera('gridftp::params::force_tls',1), -+ $globus_min_tls_protocol = hiera('gridftp::params::globus_min_tls_protocol',undef), -+ $globus_max_tls_protocol = hiera('gridftp::params::globus_max_tls_protocol',undef), -+ $globus_ciphers = hiera('gridftp::params::globus_ciphers','RC4-SHA:AES128-SHA:HIGH:!aNULL:!MD5:!RC4'), -+ $globus_server_cipher_order = hiera('gridftp::params::globus_server_cipher_order',undef), -+ $globus_compatible_mic = hiera('gridftp::params::globus_compatible_mic',undef), - $service = hiera('gridftp::params::service','globus-gridftp-server'), - - $udt = hiera('gridftp::params::udt',false), -diff --git a/src/puppet/gridftp/templates/sysconfig.erb b/src/puppet/gridftp/templates/sysconfig.erb -index f405386a..d420d942 100644 ---- a/src/puppet/gridftp/templates/sysconfig.erb -+++ b/src/puppet/gridftp/templates/sysconfig.erb -@@ -1,27 +1,30 @@ -+# -+# Globus environment -+# - <%if @configfile.to_s!='' -%>conf=<%= @configfile %><% end -%> - - <%if @configdir.to_s!='' -%>confdir=<%= @configdir %><% end -%> - --<%if scope['operatingsystemmajrelease'] == '6' -%> --export GLOBUS_THREAD_MODEL="<%= @thread_model %>" --export GLOBUS_GSSAPI_FORCE_TLS=<%= @force_tls %> --<%else -%> --GLOBUS_THREAD_MODEL="<%= @thread_model %>" --GLOBUS_GSSAPI_FORCE_TLS=<%= @force_tls %> --<% end -%> -- - <%if @globus_tcp_port_range.to_s!='' -%> --<%if scope['operatingsystemmajrelease'] == '6' -%> --export GLOBUS_TCP_PORT_RANGE="<%= @globus_tcp_port_range %>" --<%else -%> - GLOBUS_TCP_PORT_RANGE="<%= @globus_tcp_port_range %>" - <% end -%> --<% end -%> -- - <%if @globus_udp_port_range.to_s!='' -%> --<%if scope['operatingsystemmajrelease'] == '6' -%> --export GLOBUS_UDP_PORT_RANGE="<%= @globus_udp_port_range %>" --<%else -%> - GLOBUS_UDP_PORT_RANGE="<%= @globus_udp_port_range %>" - <% end -%> -+GLOBUS_THREAD_MODEL="<%= @thread_model %>" -+GLOBUS_GSSAPI_FORCE_TLS=<%= @force_tls %> -+<%if @globus_min_tls_protocol -%> -+GLOBUS_GSSAPI_MIN_TLS_PROTOCOL=<%= @globus_min_tls_protocol %> -+<% end -%> -+<%if @globus_max_tls_protocol -%> -+GLOBUS_GSSAPI_MAX_TLS_PROTOCOL=<%= @globus_max_tls_protocol %> -+<% end -%> -+<%if @globus_ciphers -%> -+GLOBUS_GSSAPI_CIPHERS='<%= @globus_ciphers %>' -+<% end -%> -+<%if defined?(@globus_server_cipher_order) -%> -+GLOBUS_GSSAPI_SERVER_CIPHER_ORDER=<% if @globus_server_cipher_order -%>true<% else -%>false<% end %> -+<% end -%> -+<%if defined?(@globus_compatible_mic) -%> -+GLOBUS_GSSAPI_BACKWARD_COMPATIBLE_MIC=<% if @globus_compatible_mic -%>true<% else -%>false<% end %> - <% end -%> -diff --git a/src/puppet/xrootd/templates/override.erb b/src/puppet/xrootd/templates/override.erb -index 7adfccb6..9b1d2524 100644 ---- a/src/puppet/xrootd/templates/override.erb -+++ b/src/puppet/xrootd/templates/override.erb -@@ -25,3 +25,5 @@ Environment=DAEMON_COREFILE_LIMIT=<%= @daemon_corefile_limit %> - <% if @enable_hdfs -%> - Environment=LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<%= @java_home %>/jre/lib/amd64/server/ - <% end -%> -+ -+EnvironmentFile=-/etc/sysconfig/dmlite diff --git a/dmlite-LCGDM-2958-filename-with-special-sequence.patch b/dmlite-LCGDM-2958-filename-with-special-sequence.patch deleted file mode 100644 index c619c4a..0000000 --- a/dmlite-LCGDM-2958-filename-with-special-sequence.patch +++ /dev/null @@ -1,94 +0,0 @@ -diff --git a/src/dome/DomeCoreXeq.cpp b/src/dome/DomeCoreXeq.cpp -index 29a8b7bd..af7a1238 100644 ---- a/src/dome/DomeCoreXeq.cpp -+++ b/src/dome/DomeCoreXeq.cpp -@@ -1649,7 +1649,10 @@ int DomeCore::dome_getspaceinfo(DomeReq &req) { - int DomeCore::calculateChecksum(DomeReq &req, std::string lfn, Replica replica, std::string checksumtype, bool updateLfnChecksum, bool forcerecalc = false) { - // create queue entry - GenPrioQueueItem::QStatus qstatus = GenPrioQueueItem::Waiting; -- std::string namekey = lfn + "[#]" + replica.rfn + "[#]" + checksumtype; -+ std::stringstream s; // encode checksum data in key -+ s << lfn << "[#]" << replica.rfn + "[#]" << checksumtype << "[#]" -+ << lfn.length() << "[#]" << replica.rfn.length() << "[#]" << checksumtype.length(); -+ std::string namekey = s.str(); - - - -diff --git a/src/dome/DomeStatus.cpp b/src/dome/DomeStatus.cpp -index cd174900..7969119a 100644 ---- a/src/dome/DomeStatus.cpp -+++ b/src/dome/DomeStatus.cpp -@@ -633,21 +633,28 @@ void DomeStatus::tickChecksums() { - - // parse queue item contents - std::vector qualifiers = next->qualifiers; -- std::vector namekey = DomeUtils::split(next->namekey, "[#]"); -+ std::vector namekey = DomeUtils::rsplit(next->namekey, "[#]", 3); - -- if(namekey.size() != 3) { -+ if(namekey.size() != 4) { - Log(Logger::Lvl1, domelogmask, domelogname, "INCONSISTENCY in the internal checksum queue. Invalid namekey: " << next->namekey); - continue; - } -- -+ int lfn_length = atoi(namekey[1].c_str()); -+ int rfn_length = atoi(namekey[2].c_str()); -+ int checksumtype_length = atoi(namekey[3].c_str()); -+ if (lfn_length + 3 + rfn_length + 3 + checksumtype_length != (int) namekey[0].length()) { -+ Log(Logger::Lvl1, domelogmask, domelogname, "INCONSISTENCY in the internal checksum queue. Invalid namekey: " << next->namekey); -+ continue; -+ } -+ - if(qualifiers.size() != 5) { - Log(Logger::Lvl1, domelogmask, domelogname, "INCONSISTENCY in the internal checksum queue. Invalid size of qualifiers: " << qualifiers.size()); - continue; - } -- -- lfn = namekey[0]; -- rfn = namekey[1]; -- checksumtype = namekey[2]; -+ -+ lfn = namekey[0].substr(0, lfn_length); -+ rfn = namekey[0].substr(lfn_length+3, rfn_length); -+ checksumtype = namekey[0].substr(lfn_length+3+rfn_length+3, checksumtype_length); - - server = qualifiers[1]; - updateLfnChecksum = DomeUtils::str_to_bool(qualifiers[2]); -diff --git a/src/utils/DomeUtils.h b/src/utils/DomeUtils.h -index 9e1a9cd3..3e0e03b4 100644 ---- a/src/utils/DomeUtils.h -+++ b/src/utils/DomeUtils.h -@@ -78,6 +78,20 @@ inline std::vector split(std::string data, std::string token) { - return output; - } - -+inline std::vector rsplit(std::string data, std::string token, int max=-1) { -+ std::vector output; -+ for (int cnt = 0; ; cnt++) { -+ size_t start = data.rfind(token); -+ if (start == std::string::npos || cnt == max) { -+ output.insert(output.begin(), data.substr(0, data.length())); -+ break; -+ } -+ output.insert(output.begin(), data.substr(start+token.length(), data.length()-start)); -+ data = data.substr(0, start); -+ } -+ return output; -+} -+ - inline void mkdirp(const std::string& path) { - std::vector parts = split(path, "/"); - std::ostringstream tocreate(parts[0]); -diff --git a/tests/dpm/dpm-tester.py b/tests/dpm/dpm-tester.py -index 0d637b61..6d6e30c9 100755 ---- a/tests/dpm/dpm-tester.py -+++ b/tests/dpm/dpm-tester.py -@@ -1179,7 +1179,7 @@ def single_protocol_tests(args, scope): - orch.add_nested(play_with_file(scope, tester, "/etc/services", "{0}/services".format(target))) - - if scope != "srm": -- evil_filename = """evil filename-!@#%^_-+=:][}{><'" #$&*)(""" -+ evil_filename = """evil filename[#]-!@#%^_-+=:][}{><'" #$&*)(""" - orch.add_nested(play_with_file(scope, tester, "/etc/services", path_join(target, evil_filename))) - - if args.dir_accounting: diff --git a/dmlite-LCGDM-2959-use-CRL-for-TPC.patch b/dmlite-LCGDM-2959-use-CRL-for-TPC.patch deleted file mode 100644 index 7213361..0000000 --- a/dmlite-LCGDM-2959-use-CRL-for-TPC.patch +++ /dev/null @@ -1,364 +0,0 @@ -diff --git a/src/plugins/apache-httpd/etc/httpd/conf.d/zlcgdm-dav.conf.in b/src/plugins/apache-httpd/etc/httpd/conf.d/zlcgdm-dav.conf.in -index 8d7d471b..062979ec 100644 ---- a/src/plugins/apache-httpd/etc/httpd/conf.d/zlcgdm-dav.conf.in -+++ b/src/plugins/apache-httpd/etc/httpd/conf.d/zlcgdm-dav.conf.in -@@ -175,7 +175,9 @@ DiskDMLite /etc/dmlite-disk.conf - # Trusted certificates for TPC connection to remote storage - #DiskSSLCACertificatePath /etc/grid-security/certificates - #DiskSSLCACertificateFile -+ #DiskSSLCARevocationPath /etc/grid-security/certificates - #DiskSSLCARevocationFile -+ #DiskSSLCARevocationCheck chain - - # Terminate slow (stuck) transfers if bytes transferred - # in given time window is smaller then configured tresholds -diff --git a/src/plugins/apache-httpd/src/client/htext.h b/src/plugins/apache-httpd/src/client/htext.h -index 1303e77e..fcf33f79 100644 ---- a/src/plugins/apache-httpd/src/client/htext.h -+++ b/src/plugins/apache-httpd/src/client/htext.h -@@ -44,6 +44,7 @@ typedef enum - - HTEXTOP_CAPATH, /* CA Path */ - HTEXTOP_CAFILE, /* CA File */ -+ HTEXTOP_CRLPATH, /* CRL Path */ - HTEXTOP_CRLFILE, /* CRL File */ - HTEXTOP_VERIFYPEER, /* Validate (!0) or not (0) the remote certificate */ - -diff --git a/src/plugins/apache-httpd/src/client/htext_api.c b/src/plugins/apache-httpd/src/client/htext_api.c -index 1c3df75c..09a3fa3f 100644 ---- a/src/plugins/apache-httpd/src/client/htext_api.c -+++ b/src/plugins/apache-httpd/src/client/htext_api.c -@@ -72,6 +72,7 @@ static option_entry option_definitions[] = { - - { OT_STRING, (option_value) NULL }, /* HTEXTOP_CAPATH */ - { OT_STRING, (option_value) NULL }, /* HTEXTOP_CAFILE */ -+ { OT_STRING, (option_value) NULL }, /* HTEXTOP_CRLPATH */ - { OT_STRING, (option_value) NULL }, /* HTEXTOP_CRLFILE */ - { OT_INT, (option_value) 1 }, /* HTEXTOP_VERIFYPEER */ - -diff --git a/src/plugins/apache-httpd/src/client/htext_common.c b/src/plugins/apache-httpd/src/client/htext_common.c -index 34a28353..4b7c9dc9 100644 ---- a/src/plugins/apache-httpd/src/client/htext_common.c -+++ b/src/plugins/apache-httpd/src/client/htext_common.c -@@ -21,6 +21,7 @@ - #define _GNU_SOURCE - #include - #include -+#include - #include - #include - #include -@@ -121,6 +122,32 @@ int htext_error(htext_handle *handle, const char *fmt, ...) - return n; - } - -+CURLcode htext_sslctx_callback(CURL *curl, void *ssl_ctx, void *pp) -+{ -+ X509_STORE *store = SSL_CTX_get_cert_store((SSL_CTX*)ssl_ctx); -+ htext_chunk *partial = (htext_chunk*) pp; -+ const char *capath = GETSTR(partial->handle, HTEXTOP_CAPATH); -+ const char *crlpath = GETSTR(partial->handle, HTEXTOP_CRLPATH); -+ -+ if (!crlpath) return CURLE_OK; -+ -+ if (!store) { -+ htext_log(partial->handle, "unable to get SSL storage"); -+ return CURLE_BAD_FUNCTION_ARGUMENT; -+ } -+ -+ if (!capath || strcmp(capath, crlpath)) { -+ if (!X509_STORE_load_locations(store, NULL, crlpath)) { -+ htext_log(partial->handle, "unable to load X.509 CRL from %s", crlpath); -+ return CURLE_BAD_FUNCTION_ARGUMENT; -+ } -+ } -+ -+ X509_STORE_set_flags(store, X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL); -+ -+ return CURLE_OK; -+} -+ - size_t htext_header_callback(void *buffer, size_t size, size_t nmemb, void *st) - { - htext_chunk *partial = (htext_chunk*) st; -diff --git a/src/plugins/apache-httpd/src/client/htext_copy.c b/src/plugins/apache-httpd/src/client/htext_copy.c -index 88c4aacd..36b04f75 100644 ---- a/src/plugins/apache-httpd/src/client/htext_copy.c -+++ b/src/plugins/apache-httpd/src/client/htext_copy.c -@@ -203,6 +203,9 @@ void *htext_copy_method(void *h) - curl_easy_setopt(curl, CURLOPT_USERAGENT, GETSTR(handle, HTEXTOP_CLIENTID)); - curl_easy_setopt(curl, CURLOPT_CAPATH, GETSTR(handle, HTEXTOP_CAPATH)); - curl_easy_setopt(curl, CURLOPT_CAINFO, GETSTR(handle, HTEXTOP_CAFILE)); -+ // CURL doesn't provide direct interface for "CRLPATH", use openssl context directly -+ curl_easy_setopt(curl, CURLOPT_SSL_CTX_FUNCTION, htext_sslctx_callback); -+ curl_easy_setopt(curl, CURLOPT_SSL_CTX_DATA, &control); - curl_easy_setopt(curl, CURLOPT_CRLFILE, GETSTR(handle, HTEXTOP_CRLFILE)); - curl_easy_setopt(curl, CURLOPT_SSLCERT, - GETSTR(handle, HTEXTOP_USERCERTIFICATE)); -diff --git a/src/plugins/apache-httpd/src/client/htext_get.c b/src/plugins/apache-httpd/src/client/htext_get.c -index 0fb49bde..310c773b 100644 ---- a/src/plugins/apache-httpd/src/client/htext_get.c -+++ b/src/plugins/apache-httpd/src/client/htext_get.c -@@ -51,6 +51,7 @@ static void* htext_get_subthread(void *pp) - GETIO(partial->handle)->write); - curl_easy_setopt(partial->curl, CURLOPT_WRITEDATA, partial->fd); - curl_easy_setopt(partial->curl, CURLOPT_DEBUGDATA, partial); -+ curl_easy_setopt(partial->curl, CURLOPT_SSL_CTX_DATA, partial); - - /* Range */ - if (partial->nchunks > 1) { -@@ -89,6 +90,8 @@ void *htext_get_method(void *h) - curl_easy_setopt(curl, CURLOPT_USERAGENT, GETSTR(handle, HTEXTOP_CLIENTID)); - curl_easy_setopt(curl, CURLOPT_CAPATH, GETSTR(handle, HTEXTOP_CAPATH)); - curl_easy_setopt(curl, CURLOPT_CAINFO, GETSTR(handle, HTEXTOP_CAFILE)); -+ // CURL doesn't provide direct interface for "CRLPATH", use openssl context directly -+ curl_easy_setopt(curl, CURLOPT_SSL_CTX_FUNCTION, htext_sslctx_callback); - curl_easy_setopt(curl, CURLOPT_CRLFILE, GETSTR(handle, HTEXTOP_CRLFILE)); - curl_easy_setopt(curl, CURLOPT_SSLCERT, - GETSTR(handle, HTEXTOP_USERCERTIFICATE)); -@@ -136,6 +139,7 @@ void *htext_get_method(void *h) - curl_easy_setopt(curl, CURLOPT_HEADERDATA, &head); - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, head.headers); - curl_easy_setopt(curl, CURLOPT_DEBUGDATA, &head); -+ curl_easy_setopt(curl, CURLOPT_SSL_CTX_DATA, &head); - - handle->status = HTEXTS_WAITING; - htext_log(handle, "Asking for the size"); -@@ -152,6 +156,12 @@ void *htext_get_method(void *h) - curl_easy_setopt(curl, CURLOPT_URL, head.location); - } - -+ /* Explicitly reset pointers to released head data structure */ -+ curl_easy_setopt(curl, CURLOPT_HEADERDATA, NULL); -+ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, NULL); -+ curl_easy_setopt(curl, CURLOPT_DEBUGDATA, NULL); -+ curl_easy_setopt(curl, CURLOPT_SSL_CTX_DATA, NULL); -+ - htext_partial_clean(&head); - } - -diff --git a/src/plugins/apache-httpd/src/client/htext_private.h b/src/plugins/apache-httpd/src/client/htext_private.h -index 0a406df7..49c6c60a 100644 ---- a/src/plugins/apache-httpd/src/client/htext_private.h -+++ b/src/plugins/apache-httpd/src/client/htext_private.h -@@ -187,6 +187,15 @@ int htext_log(htext_handle *handle, const char *fmt, ...); - */ - int htext_error(htext_handle *handle, const char *fmt, ...); - -+/** -+ * Called by Curl every time a line of the header needs to be processed -+ * @param curl The CURL handle calling -+ * @param ssl_ctx The SSL library context -+ * @param pp A pointer to a htext_partial structure -+ * @return Continue with SSL connection or return error -+ */ -+CURLcode htext_sslctx_callback(CURL *curl, void *ssl_ctx, void *pp); -+ - /** - * Called by Curl every time a line of the header needs to be processed - * @param buffer Where the data is -diff --git a/src/plugins/apache-httpd/src/client/htext_put.c b/src/plugins/apache-httpd/src/client/htext_put.c -index 0c3e4c1d..a23f0c0e 100644 ---- a/src/plugins/apache-httpd/src/client/htext_put.c -+++ b/src/plugins/apache-httpd/src/client/htext_put.c -@@ -101,6 +101,7 @@ static void* htext_put_subthread(void *pp) - curl_easy_setopt(partial->curl, CURLOPT_READDATA, partial); - curl_easy_setopt(partial->curl, CURLOPT_IOCTLDATA, partial); - curl_easy_setopt(partial->curl, CURLOPT_DEBUGDATA, partial); -+ curl_easy_setopt(partial->curl, CURLOPT_SSL_CTX_DATA, partial); - - /* Range (not for last empty PUT) */ - if (partial->nchunks > 1 && partial->index >= 0) { -@@ -154,6 +155,8 @@ void *htext_put_method(void *h) - curl_easy_setopt(curl, CURLOPT_USERAGENT, GETSTR(handle, HTEXTOP_CLIENTID)); - curl_easy_setopt(curl, CURLOPT_CAPATH, GETSTR(handle, HTEXTOP_CAPATH)); - curl_easy_setopt(curl, CURLOPT_CAINFO, GETSTR(handle, HTEXTOP_CAFILE)); -+ // CURL doesn't provide direct interface for "CRLPATH", use openssl context directly -+ curl_easy_setopt(curl, CURLOPT_SSL_CTX_FUNCTION, htext_sslctx_callback); - curl_easy_setopt(curl, CURLOPT_CRLFILE, GETSTR(handle, HTEXTOP_CRLFILE)); - curl_easy_setopt(curl, CURLOPT_SSLCERT, GETSTR(handle, HTEXTOP_USERCERTIFICATE)); - curl_easy_setopt(curl, CURLOPT_SSLKEY, GETSTR(handle, HTEXTOP_USERPRIVKEY)); -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_disk/copy.c b/src/plugins/apache-httpd/src/mod_lcgdm_disk/copy.c -index dd586da6..f3fd2caa 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_disk/copy.c -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_disk/copy.c -@@ -465,8 +465,14 @@ static dav_error *dav_disk_generic_copy(const dav_resource* res, const char* upr - if (d_conf->capath) { - htext_setopt(handle, HTEXTOP_CAPATH, d_conf->capath); - } -- if (d_conf->crlfile) { -- htext_setopt(handle, HTEXTOP_CRLFILE, d_conf->crlfile); -+ if (d_conf->crlcheck && strcmp("chain", d_conf->crlcheck)) -+ { -+ if (d_conf->crlpath) { -+ htext_setopt(handle, HTEXTOP_CRLPATH, d_conf->crlpath); -+ } -+ if (d_conf->crlfile) { -+ htext_setopt(handle, HTEXTOP_CRLFILE, d_conf->crlfile); -+ } - } - if (uproxy || d_conf->capath || d_conf->cafile) { - htext_setopt(handle, HTEXTOP_VERIFYPEER, 1); -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_disk/mod_lcgdm_disk.c b/src/plugins/apache-httpd/src/mod_lcgdm_disk/mod_lcgdm_disk.c -index 9529fbc8..2b875028 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_disk/mod_lcgdm_disk.c -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_disk/mod_lcgdm_disk.c -@@ -84,7 +84,9 @@ static void *dav_disk_create_dir_config(apr_pool_t *p, char *dir) - conf->proxy_cache = "/var/proxycache"; - conf->capath = "/etc/grid-security/certificates"; - conf->cafile = NULL; -+ conf->crlpath = "/etc/grid-security/certificates"; - conf->crlfile = NULL; -+ conf->crlcheck = "chain"; - conf->low_speed_time = 2*60; - conf->low_speed_limit = 10*1024; - -@@ -261,6 +263,23 @@ static const char *dav_disk_cmd_cafile(cmd_parms *cmd, void *config, - return NULL ; - } - -+/** -+ * Set CRLPath with revocated certificates for TPC -+ * @param cmd Lots of information about the configuration contexts (as pool) -+ * @param config A pointer to the directory configuration -+ * @param arg The CRLPath -+ * @return NULL on success. An error string otherwise -+ */ -+static const char *dav_disk_cmd_crlpath(cmd_parms *cmd, void *config, -+ const char *arg) -+{ -+ (void) cmd; -+ -+ dav_disk_dir_conf *conf = (dav_disk_dir_conf*) config; -+ conf->crlpath = (!arg || !strlen(arg)) ? NULL : arg; -+ return NULL ; -+} -+ - /** - * Set CRLFile with revocated certificates for TPC - * @param cmd Lots of information about the configuration contexts (as pool) -@@ -278,6 +297,24 @@ static const char *dav_disk_cmd_crlfile(cmd_parms *cmd, void *config, - return NULL ; - } - -+/** -+ * Set CRL validation for revocated certificates used by TPC -+ * @param cmd Lots of information about the configuration contexts (as pool) -+ * @param config A pointer to the directory configuration -+ * @param arg The configuration -+ * @return NULL on success. An error string otherwise -+ */ -+static const char *dav_disk_cmd_crlcheck(cmd_parms *cmd, void *config, -+ const char *arg) -+{ -+ if (strcmp("none", arg) && strcmp("chain", arg)) -+ return apr_psprintf(cmd->pool, "%s is not a recognised value", arg); -+ -+ dav_disk_dir_conf *conf = (dav_disk_dir_conf*) config; -+ conf->crlcheck = arg; -+ return NULL ; -+} -+ - /** Command list (configuration parameters) */ - static const command_rec dav_disk_cmds[] = { - AP_INIT_TAKE1 ("DiskDMLite", dav_disk_cmd_dmlite, NULL, ACCESS_CONF | RSRC_CONF, -@@ -294,10 +331,12 @@ static const command_rec dav_disk_cmds[] = { - "SSL CA Certificate path ('/path/to/dir' - contains PEM encoded files)"), - AP_INIT_TAKE1 ("DiskSSLCACertificateFile", dav_disk_cmd_cafile, NULL, ACCESS_CONF, - "SSL CA Certificate file ('/path/to/file' - PEM encoded)"), -- //AP_INIT_TAKE1 ("DiskSSLCARevocationPath", dav_disk_cmd_crlpath, NULL, ACCESS_CONF, -- // "SSL CA Certificate Revocation List (CRL) path ('/path/to/dir' - contains PEM encoded files)"), -+ AP_INIT_TAKE1 ("DiskSSLCARevocationPath", dav_disk_cmd_crlpath, NULL, ACCESS_CONF, -+ "SSL CA Certificate Revocation List (CRL) path ('/path/to/dir' - contains PEM encoded files)"), - AP_INIT_TAKE1 ("DiskSSLCARevocationFile", dav_disk_cmd_crlfile, NULL, ACCESS_CONF, - "SSL CA Certificate Revocation List (CRL) file ('/path/to/file' - PEM encoded)"), -+ AP_INIT_TAKE1 ("DiskSSLCARevocationCheck", dav_disk_cmd_crlcheck, NULL, ACCESS_CONF, -+ "SSL CA Certificate Revocation List checkging (chain|none)"), - AP_INIT_TAKE1 ("DiskLowSpeedTime", ap_set_int_slot, - (void *)APR_OFFSETOF(dav_disk_dir_conf, low_speed_time), ACCESS_CONF, - "Low speed limit time period in seconds"), -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_disk/mod_lcgdm_disk.h b/src/plugins/apache-httpd/src/mod_lcgdm_disk/mod_lcgdm_disk.h -index 42164d74..f2bdaf57 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_disk/mod_lcgdm_disk.h -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_disk/mod_lcgdm_disk.h -@@ -60,7 +60,9 @@ struct dav_disk_dir_conf - const char *delegation_service; - const char *capath; - const char *cafile; -+ const char *crlpath; - const char *crlfile; -+ const char *crlcheck; - int low_speed_time; - int low_speed_limit; - }; -diff --git a/src/puppet/dmlite/manifests/dav.pp b/src/puppet/dmlite/manifests/dav.pp -index 0518bbe5..9ff82e08 100644 ---- a/src/puppet/dmlite/manifests/dav.pp -+++ b/src/puppet/dmlite/manifests/dav.pp -@@ -16,6 +16,8 @@ class dmlite::dav ( - Stdlib::Unixpath $ssl_cert = $dmlite::dav::params::ssl_cert, - Stdlib::Unixpath $ssl_key = $dmlite::dav::params::ssl_key, - Stdlib::Unixpath $ssl_capath = $dmlite::dav::params::ssl_capath, -+ Stdlib::Unixpath $ssl_tpc_capath = $dmlite::dav::params::ssl_tpc_capath, -+ Optional[Stdlib::Unixpath] $ssl_tpc_crlpath = $dmlite::dav::params::ssl_tpc_crlpath, - String $ssl_options = $dmlite::dav::params::ssl_options, - String $log_error = $dmlite::dav::params::log_error, - String $log_transfer = $dmlite::dav::params::log_transfer, -diff --git a/src/puppet/dmlite/manifests/dav/config.pp b/src/puppet/dmlite/manifests/dav/config.pp -index 1d6eb596..94adea76 100644 ---- a/src/puppet/dmlite/manifests/dav/config.pp -+++ b/src/puppet/dmlite/manifests/dav/config.pp -@@ -18,6 +18,8 @@ class dmlite::dav::config ( - $ssl_options = $dmlite::dav::ssl_options, - $ssl_protocol = $dmlite::dav::ssl_protocol, - $ssl_ciphersuite = $dmlite::dav::ssl_ciphersuite, -+ $ssl_tpc_capath = $dmlite::dav::ssl_tpc_capath, -+ $ssl_tpc_crlpath = $dmlite::dav::ssl_tpc_crlpath, - $log_error = $dmlite::dav::log_error, - $log_transfer = $dmlite::dav::log_transfer, - $log_level = $dmlite::dav::log_level, -diff --git a/src/puppet/dmlite/manifests/dav/params.pp b/src/puppet/dmlite/manifests/dav/params.pp -index f2dcee6e..15b3c449 100644 ---- a/src/puppet/dmlite/manifests/dav/params.pp -+++ b/src/puppet/dmlite/manifests/dav/params.pp -@@ -20,6 +20,8 @@ class dmlite::dav::params ( - $ssl_protocol = hiera('dmlite::dav::params::ssl_protocol', 'all -SSLv2 -SSLv3') - $ssl_ciphersuite = hiera('dmlite::dav::params::ssl_ciphersuite', 'RC4-SHA:AES128-SHA:HIGH:!aNULL:!MD5:!RC4') - $ssl_options = hiera('dmlite::dav::params::ssl_options','+StdEnvVars') -+ $ssl_tpc_capath = hiera('dmlite::dav::params::ssl_tpc_capath', '/etc/grid-security/certificates') -+ $ssl_tpc_crlpath = hiera('dmlite::dav::params::ssl_tpc_crlpath', '/etc/grid-security/certificates') - $log_error = hiera('dmlite::dav::params::log_error', 'logs/ssl_error_log') - $log_transfer = hiera('dmlite::dav::params::log_transfer', 'logs/ssl_access_log') - $log_level = hiera('dmlite::dav::params::log_level','warn') -diff --git a/src/puppet/dmlite/templates/dav/zlcgdm-dav.conf b/src/puppet/dmlite/templates/dav/zlcgdm-dav.conf -index 65d8e165..64565ccf 100644 ---- a/src/puppet/dmlite/templates/dav/zlcgdm-dav.conf -+++ b/src/puppet/dmlite/templates/dav/zlcgdm-dav.conf -@@ -206,14 +206,23 @@ DiskDMLite <%= @dmlite_disk_conf %> - # CGI, it allways has to be DocumentRoot/../proxycache - DiskProxyCache /var/www/proxycache - -- <% if @dav_http_port.to_i != 80 or @dav_https_port.to_i != 443 -%> -+<% if @dav_http_port.to_i != 80 or @dav_https_port.to_i != 443 -%> - NSRedirectPort <%= @dav_http_port %> <%= @dav_https_port %> -- <% end -%> -+<% end -%> - - # Trusted certificates for TPC connection to remote storage -+<% if @ssl_tpc_capath -%> -+ DiskSSLCACertificatePath <%= @ssl_tpc_capath %> -+<% else -%> - #DiskSSLCACertificatePath /etc/grid-security/certificates -- #DiskSSLCACertificateFile -- #DiskSSLCARevocationFile -+<% end -%> -+<% if @ssl_tpc_crlpath -%> -+ DiskSSLCARevocationPath <%= @ssl_tpc_crlpath %> -+ DiskSSLCARevocationCheck chain -+<% else -%> -+ #DiskSSLCARevocationPath /etc/grid-security/certificates -+ DiskSSLCARevocationCheck none -+<% end -%> - - # Terminate slow (stuck) transfers if bytes transferred - # in given time window is smaller then configured tresholds diff --git a/dmlite-LCGDM-2961-remove-client-ip-from-token.patch b/dmlite-LCGDM-2961-remove-client-ip-from-token.patch deleted file mode 100644 index 443ac66..0000000 --- a/dmlite-LCGDM-2961-remove-client-ip-from-token.patch +++ /dev/null @@ -1,577 +0,0 @@ -diff --git a/src/plugins/adapter/Adapter.cpp b/src/plugins/adapter/Adapter.cpp -index e74c0923..f0bd0c40 100644 ---- a/src/plugins/adapter/Adapter.cpp -+++ b/src/plugins/adapter/Adapter.cpp -@@ -121,7 +121,7 @@ Authn* NsAdapterFactory::createAuthn(PluginManager*) - - - DpmAdapterFactory::DpmAdapterFactory() : -- retryLimit_(3), tokenPasswd_("default"), tokenUseIp_(true), tokenLife_(28800), -+ retryLimit_(3), tokenPasswd_("default"), tokenUseIp_(true), tokenUseDn_(false), tokenLife_(28800), - adminUsername_("root"), connectionPool_(&connectionFactory_, 10) - { - adapterlogmask = Logger::get()->getMask(adapterlogname); -@@ -155,10 +155,12 @@ void DpmAdapterFactory::configure(const std::string& key, const std::string& val - this->tokenPasswd_ = value; - } - else if (key == "TokenId") { -+ this->tokenUseIp_ = false; -+ this->tokenUseDn_ = false; - if (strcasecmp(value.c_str(), "ip") == 0) - this->tokenUseIp_ = true; -- else -- this->tokenUseIp_ = false; -+ else if (strcasecmp(value.c_str(), "dn") == 0) -+ this->tokenUseDn_ = true; - } - else if (key == "TokenLife") { - this->tokenLife_ = (unsigned)atoi(value.c_str()); -@@ -193,7 +195,7 @@ PoolManager* DpmAdapterFactory::createPoolManager(PluginManager*) - { - return new DpmAdapterPoolManager(this, this->retryLimit_, - this->tokenPasswd_, this->tokenUseIp_, -- this->tokenLife_); -+ this->tokenUseDn_, this->tokenLife_); - } - - -@@ -207,7 +209,7 @@ std::string DpmAdapterFactory::implementedPool() throw () - - PoolDriver* DpmAdapterFactory::createPoolDriver() - { -- return new FilesystemPoolDriver(tokenPasswd_, tokenUseIp_, tokenLife_, -+ return new FilesystemPoolDriver(tokenPasswd_, tokenUseIp_, tokenUseDn_, tokenLife_, - retryLimit_, adminUsername_, dirspacereportdepth); - } - -diff --git a/src/plugins/adapter/Adapter.h b/src/plugins/adapter/Adapter.h -index 0880c83f..43c3b3d3 100644 ---- a/src/plugins/adapter/Adapter.h -+++ b/src/plugins/adapter/Adapter.h -@@ -84,6 +84,7 @@ namespace dmlite { - - std::string tokenPasswd_; - bool tokenUseIp_; -+ bool tokenUseDn_; - unsigned tokenLife_; - - /// Admin username for replication. -diff --git a/src/plugins/adapter/DpmAdapter.cpp b/src/plugins/adapter/DpmAdapter.cpp -index 8cd93c37..3dd44b6e 100644 ---- a/src/plugins/adapter/DpmAdapter.cpp -+++ b/src/plugins/adapter/DpmAdapter.cpp -@@ -197,8 +197,9 @@ DpmAdapterPoolManager::DpmAdapterPoolManager(DpmAdapterFactory* factory, - unsigned retryLimit, - const std::string& passwd, - bool useIp, -+ bool useDn, - unsigned life) : -- si_(NULL), retryLimit_(retryLimit), tokenPasswd_(passwd), tokenUseIp_(useIp), -+ si_(NULL), retryLimit_(retryLimit), tokenPasswd_(passwd), tokenUseIp_(useIp), tokenUseDn_(useDn), - tokenLife_(life), userId_(""), fqans_(NULL), nFqans_(0), factory_(factory), secCtx_(NULL) - { - Log(Logger::Lvl3, adapterlogmask, adapterlogname, ""); -@@ -314,8 +315,10 @@ void DpmAdapterPoolManager::setSecurityContext(const SecurityContext* ctx) - - if (this->tokenUseIp_) - this->userId_ = ctx->credentials.remoteAddress; -- else -+ else if (this->tokenUseDn_) - this->userId_ = ctx->credentials.clientName; -+ else -+ this->userId_ = ""; - - Log(Logger::Lvl3, adapterlogmask, adapterlogname, "Exiting. uid=" << this->userId_ << - " gid=" << ( (ctx->groups.size() > 0) ? ctx->groups[0].getUnsigned("gid"):-1) << -diff --git a/src/plugins/adapter/DpmAdapter.h b/src/plugins/adapter/DpmAdapter.h -index e19ccfa7..c956e59e 100644 ---- a/src/plugins/adapter/DpmAdapter.h -+++ b/src/plugins/adapter/DpmAdapter.h -@@ -44,7 +44,7 @@ namespace dmlite { - class DpmAdapterPoolManager: public PoolManager { - public: - DpmAdapterPoolManager(DpmAdapterFactory* factory, unsigned retryLimit, -- const std::string&, bool, unsigned) ; -+ const std::string&, bool, bool, unsigned) ; - ~DpmAdapterPoolManager(); - - std::string getImplId() const throw (); -@@ -75,6 +75,7 @@ namespace dmlite { - - std::string tokenPasswd_; - bool tokenUseIp_; -+ bool tokenUseDn_; - unsigned tokenLife_; - std::string userId_; - -diff --git a/src/plugins/adapter/FilesystemDriver.cpp b/src/plugins/adapter/FilesystemDriver.cpp -index 1d1eb86a..1a6eb7b8 100644 ---- a/src/plugins/adapter/FilesystemDriver.cpp -+++ b/src/plugins/adapter/FilesystemDriver.cpp -@@ -28,12 +28,12 @@ using namespace dmlite; - std::map< std::string, poolfsnfo > FilesystemPoolHandler::dpmfs_; - boost::mutex FilesystemPoolHandler::mtx; - --FilesystemPoolDriver::FilesystemPoolDriver(const std::string& passwd, bool useIp, -+FilesystemPoolDriver::FilesystemPoolDriver(const std::string& passwd, bool useIp, bool useDn, - unsigned life, unsigned retryLimit, - const std::string& adminUsername, - int ddepth - ): -- secCtx_(NULL), tokenPasswd_(passwd), tokenUseIp_(useIp), tokenLife_(life), -+ secCtx_(NULL), tokenPasswd_(passwd), tokenUseIp_(useIp), tokenUseDn_(useDn), tokenLife_(life), - retryLimit_(retryLimit), fqans_(NULL), nFqans_(0), adminUsername_(adminUsername) - { - -@@ -145,8 +145,10 @@ void FilesystemPoolDriver::setSecurityContext(const SecurityContext* ctx) - // Id mechanism - if (this->tokenUseIp_) - this->userId_ = this->secCtx_->credentials.remoteAddress; -- else -+ else if (this->tokenUseDn_) - this->userId_ = this->secCtx_->credentials.clientName; -+ else -+ this->userId_ = ""; - - Log(Logger::Lvl3, adapterlogmask, adapterlogname, "userid=" << userId_ << " fqan=" << ( (fqans_ && nFqans_) ? fqans_[0]:"none") ); - } -diff --git a/src/plugins/adapter/FilesystemDriver.h b/src/plugins/adapter/FilesystemDriver.h -index 7251632a..a1088fec 100644 ---- a/src/plugins/adapter/FilesystemDriver.h -+++ b/src/plugins/adapter/FilesystemDriver.h -@@ -25,7 +25,7 @@ namespace dmlite { - /// Filesystem driver. - class FilesystemPoolDriver: public PoolDriver { - public: -- FilesystemPoolDriver(const std::string&, bool, unsigned, unsigned, -+ FilesystemPoolDriver(const std::string&, bool, bool, unsigned, unsigned, - const std::string&, int); - ~FilesystemPoolDriver(); - -@@ -54,6 +54,7 @@ namespace dmlite { - - std::string tokenPasswd_; - bool tokenUseIp_; -+ bool tokenUseDn_; - unsigned tokenLife_; - std::string userId_; - unsigned retryLimit_; -diff --git a/src/plugins/adapter/IO.cpp b/src/plugins/adapter/IO.cpp -index 4d1ebbb7..7b094280 100644 ---- a/src/plugins/adapter/IO.cpp -+++ b/src/plugins/adapter/IO.cpp -@@ -17,7 +17,7 @@ - using namespace dmlite; - - --StdIOFactory::StdIOFactory(): passwd_("default"), useIp_(true) -+StdIOFactory::StdIOFactory(): passwd_("default"), useIp_(true), useDn_(false) - { - Log(Logger::Lvl4, adapterlogmask, adapterlogname, " Ctor"); - Cthread_init(); -@@ -43,10 +43,12 @@ void StdIOFactory::configure(const std::string& key, const std::string& value) - this->passwd_ = value; - } - else if (key == "TokenId") { -+ this->useIp_ = false; -+ this->useDn_ = false; - if (strcasecmp(value.c_str(), "ip") == 0) - this->useIp_ = true; -- else -- this->useIp_ = false; -+ else if (strcasecmp(value.c_str(), "dn") == 0) -+ this->useDn_ = true; - } - else if (key == "DpmHost" || key == "Host") { - setenv("DPM_HOST", value.c_str(), 1); -@@ -63,13 +65,13 @@ void StdIOFactory::configure(const std::string& key, const std::string& value) - - IODriver* StdIOFactory::createIODriver(PluginManager* pm) - { -- return new StdIODriver(this->passwd_, this->useIp_); -+ return new StdIODriver(this->passwd_, this->useIp_, this->useDn_); - } - - - --StdIODriver::StdIODriver(std::string passwd, bool useIp): -- secCtx_(0), passwd_(passwd), useIp_(useIp) -+StdIODriver::StdIODriver(std::string passwd, bool useIp, bool useDn): -+ secCtx_(0), passwd_(passwd), useIp_(useIp), useDn_(useDn) - { - // Nothing - Log(Logger::Lvl4, adapterlogmask, adapterlogname, " Ctor"); -@@ -113,7 +115,7 @@ IOHandler* StdIODriver::createIOHandler(const std::string& pfn, - std::string userId; - if (this->useIp_) - userId = this->secCtx_->credentials.remoteAddress; -- else -+ else if (this->useDn_) - userId = this->secCtx_->credentials.clientName; - - if (dmlite::validateToken(extras.getString("token"), -@@ -122,7 +124,7 @@ IOHandler* StdIODriver::createIOHandler(const std::string& pfn, - flags != O_RDONLY) != kTokenOK) - - throw DmException(EACCES, "Token does not validate (using %s) on pfn %s", -- this->useIp_?"IP":"DN", pfn.c_str()); -+ this->useIp_?"IP":this->useDn_?"DN":"None", pfn.c_str()); - - } - -diff --git a/src/plugins/adapter/IO.h b/src/plugins/adapter/IO.h -index 375c1e06..53cab89a 100644 ---- a/src/plugins/adapter/IO.h -+++ b/src/plugins/adapter/IO.h -@@ -21,11 +21,12 @@ namespace dmlite { - private: - std::string passwd_; - bool useIp_; -+ bool useDn_; - }; - - class StdIODriver: public IODriver { - public: -- StdIODriver(std::string passwd, bool useIp); -+ StdIODriver(std::string passwd, bool useIp, bool useDn); - virtual ~StdIODriver(); - - std::string getImplId() const throw(); -@@ -48,6 +49,7 @@ namespace dmlite { - - std::string passwd_; - bool useIp_; -+ bool useDn_; - }; - - class StdIOHandler: public IOHandler { -diff --git a/src/plugins/adapter/RFIO.cpp b/src/plugins/adapter/RFIO.cpp -index 3dab1566..942cb623 100644 ---- a/src/plugins/adapter/RFIO.cpp -+++ b/src/plugins/adapter/RFIO.cpp -@@ -27,7 +27,7 @@ int rfio_parse(char *, char **, char **); - - - --StdRFIOFactory::StdRFIOFactory(): passwd_("default"), useIp_(true) -+StdRFIOFactory::StdRFIOFactory(): passwd_("default"), useIp_(true), useDn_(false) - { - adapterRFIOlogmask = Logger::get()->getMask(adapterRFIOlogname); - -@@ -54,10 +54,12 @@ void StdRFIOFactory::configure(const std::string& key, const std::string& value) - this->passwd_ = value; - } - else if (key == "TokenId") { -+ this->useIp_ = false; -+ this->useDn_ = false; - if (strcasecmp(value.c_str(), "ip") == 0) - this->useIp_ = true; -- else -- this->useIp_ = false; -+ if (strcasecmp(value.c_str(), "dn") == 0) -+ this->useDn_ = true; - } - else if (key == "DpmHost" || key == "Host") { - setenv("DPM_HOST", value.c_str(), 1); -@@ -74,13 +76,13 @@ void StdRFIOFactory::configure(const std::string& key, const std::string& value) - - IODriver* StdRFIOFactory::createIODriver(PluginManager* pm) - { -- return new StdRFIODriver(this->passwd_, this->useIp_); -+ return new StdRFIODriver(this->passwd_, this->useIp_, this->useDn_); - } - - - --StdRFIODriver::StdRFIODriver(std::string passwd, bool useIp): -- si_(0), secCtx_(0), passwd_(passwd), useIp_(useIp) -+StdRFIODriver::StdRFIODriver(std::string passwd, bool useIp, bool useDn): -+ si_(0), secCtx_(0), passwd_(passwd), useIp_(useIp), useDn_(useDn) - { - // Nothing - } -@@ -129,7 +131,7 @@ IOHandler* StdRFIODriver::createIOHandler(const std::string& pfn, - std::string userId; - if (this->useIp_) - userId = this->secCtx_->credentials.remoteAddress; -- else -+ else if (this->useDn_) - userId = this->secCtx_->credentials.clientName; - - if (dmlite::validateToken(extras.getString("token"), -@@ -138,7 +140,7 @@ IOHandler* StdRFIODriver::createIOHandler(const std::string& pfn, - flags != O_RDONLY) != kTokenOK) - - throw DmException(EACCES, "Token does not validate (using %s) on pfn %s", -- this->useIp_?"IP":"DN", pfn.c_str()); -+ this->useIp_?"IP":this->useDn_?"DN":"None", pfn.c_str()); - - } - -diff --git a/src/plugins/adapter/RFIO.h b/src/plugins/adapter/RFIO.h -index bfe747df..a3e850af 100644 ---- a/src/plugins/adapter/RFIO.h -+++ b/src/plugins/adapter/RFIO.h -@@ -22,11 +22,12 @@ namespace dmlite { - private: - std::string passwd_; - bool useIp_; -+ bool useDn_; - }; - - class StdRFIODriver: public IODriver { - public: -- StdRFIODriver(std::string passwd, bool useIp); -+ StdRFIODriver(std::string passwd, bool useIp, bool useDn); - virtual ~StdRFIODriver(); - - std::string getImplId() const throw(); -@@ -45,6 +46,7 @@ namespace dmlite { - - std::string passwd_; - bool useIp_; -+ bool useDn_; - }; - - class StdRFIOHandler: public IOHandler { -diff --git a/src/plugins/adapter/adapter.conf.in b/src/plugins/adapter/adapter.conf.in -index a1ffaff3..e2b848d4 100644 ---- a/src/plugins/adapter/adapter.conf.in -+++ b/src/plugins/adapter/adapter.conf.in -@@ -29,5 +29,5 @@ RetryInterval 2 - - # Token generation - TokenPassword change-this --TokenId ip -+TokenId none - TokenLife 1000 -diff --git a/src/plugins/domeadapter/DomeAdapter.cpp b/src/plugins/domeadapter/DomeAdapter.cpp -index 20b6a265..674f6ccb 100644 ---- a/src/plugins/domeadapter/DomeAdapter.cpp -+++ b/src/plugins/domeadapter/DomeAdapter.cpp -@@ -41,10 +41,12 @@ void DomeAdapterFactory::configure(const std::string& key, const std::string& va - - } - else if (key == "TokenId") { -+ this->tokenUseIp_ = false; -+ this->tokenUseDn_ = false; - if (strcasecmp(value.c_str(), "ip") == 0) - this->tokenUseIp_ = true; -- else -- this->tokenUseIp_ = false; -+ else if (strcasecmp(value.c_str(), "dn") == 0) -+ this->tokenUseDn_ = true; - } - else if (key == "TokenLife") { - this->tokenLife_ = (unsigned)atoi(value.c_str()); -diff --git a/src/plugins/domeadapter/DomeAdapter.h b/src/plugins/domeadapter/DomeAdapter.h -index 7c195f5a..908b1e9f 100644 ---- a/src/plugins/domeadapter/DomeAdapter.h -+++ b/src/plugins/domeadapter/DomeAdapter.h -@@ -42,6 +42,7 @@ namespace dmlite { - - std::string domehead_; - bool tokenUseIp_; -+ bool tokenUseDn_; - std::string tokenPasswd_; - unsigned tokenLife_; - -diff --git a/src/plugins/domeadapter/DomeAdapterDriver.cpp b/src/plugins/domeadapter/DomeAdapterDriver.cpp -index c4e9233d..7b257550 100644 ---- a/src/plugins/domeadapter/DomeAdapterDriver.cpp -+++ b/src/plugins/domeadapter/DomeAdapterDriver.cpp -@@ -43,8 +43,10 @@ void DomeAdapterPoolDriver::setSecurityContext(const SecurityContext* secCtx) - // Id mechanism - if (factory_->tokenUseIp_) - userId_ = secCtx_->credentials.remoteAddress; -- else -+ else if (factory_->tokenUseDn_) - userId_ = secCtx_->credentials.clientName; -+ else -+ userId_ = ""; - } - - PoolHandler* DomeAdapterPoolDriver::createPoolHandler(const std::string& poolname) { -diff --git a/src/plugins/domeadapter/DomeAdapterIO.cpp b/src/plugins/domeadapter/DomeAdapterIO.cpp -index 63a0b4ed..baf50c64 100644 ---- a/src/plugins/domeadapter/DomeAdapterIO.cpp -+++ b/src/plugins/domeadapter/DomeAdapterIO.cpp -@@ -21,7 +21,7 @@ using namespace dmlite; - using namespace Davix; - - DomeIOFactory::DomeIOFactory() --: tunnelling_protocol_("http"), tunnelling_port_("80"), passwd_("default"), useIp_(true), davixPool_(&davixFactory_, 10) -+: tunnelling_protocol_("http"), tunnelling_port_("80"), passwd_("default"), useIp_(true), useDn_(false), davixPool_(&davixFactory_, 10) - { - domeadapterlogmask = Logger::get()->getMask(domeadapterlogname); - Log(Logger::Lvl4, domeadapterlogmask, domeadapterlogname, " Ctor"); -@@ -41,10 +41,12 @@ void DomeIOFactory::configure(const std::string& key, const std::string& value) - this->passwd_ = value; - } - else if (key == "TokenId") { -+ this->useIp_ = false; -+ this->useDn_ = false; - if (strcasecmp(value.c_str(), "ip") == 0) - this->useIp_ = true; -- else -- this->useIp_ = false; -+ else if (strcasecmp(value.c_str(), "dn") == 0) -+ this->useDn_ = true; - } - else if (key == "DomeHead") { - domehead_ = value; -@@ -82,13 +84,13 @@ void DomeIOFactory::configure(const std::string& key, const std::string& value) - - IODriver* DomeIOFactory::createIODriver(PluginManager* pm) - { -- return new DomeIODriver(tunnelling_protocol_, tunnelling_port_, passwd_, useIp_, domedisk_, davixPool_); -+ return new DomeIODriver(tunnelling_protocol_, tunnelling_port_, passwd_, useIp_, useDn_, domedisk_, davixPool_); - } - - DomeIODriver::DomeIODriver(std::string tunnelling_protocol, std::string tunnelling_port, -- std::string passwd, bool useIp, std::string domedisk, DavixCtxPool &davixPool) -+ std::string passwd, bool useIp, bool useDn, std::string domedisk, DavixCtxPool &davixPool) - : secCtx_(0), tunnelling_protocol_(tunnelling_protocol), tunnelling_port_(tunnelling_port), -- passwd_(passwd), useIp_(useIp), domedisk_(domedisk), davixPool_(davixPool) -+ passwd_(passwd), useIp_(useIp), useDn_(useDn), domedisk_(domedisk), davixPool_(davixPool) - { - // Nothing - Log(Logger::Lvl4, domeadapterlogmask, domeadapterlogname, " Ctor"); -@@ -128,15 +130,17 @@ IOHandler* DomeIODriver::createIOHandler(const std::string& pfn, - std::string userId; - if (this->useIp_) - userId = this->secCtx_->credentials.remoteAddress; -- else -+ else if (this->useDn_) - userId = this->secCtx_->credentials.clientName; -+ else -+ userId = ""; - - Log(Logger::Lvl4, domeadapterlogmask, domeadapterlogname, " Validating token: userid: '" << userId << " pfn: '" << pfn << "'"); - - if(dmlite::validateToken(extras.getString("token"), userId, pfn, - this->passwd_, flags != O_RDONLY) != kTokenOK) { - throw DmException(EACCES, "Token does not validate (using %s) on pfn '%s' and userId '%s'", -- this->useIp_?"IP":"DN", pfn.c_str(), userId.c_str()); -+ this->useIp_?"IP":this->useDn_?"DN":"None", pfn.c_str(), userId.c_str()); - } - } - -diff --git a/src/plugins/domeadapter/DomeAdapterIO.h b/src/plugins/domeadapter/DomeAdapterIO.h -index 33981b0c..573cf461 100644 ---- a/src/plugins/domeadapter/DomeAdapterIO.h -+++ b/src/plugins/domeadapter/DomeAdapterIO.h -@@ -27,6 +27,7 @@ namespace dmlite { - - std::string passwd_; - bool useIp_; -+ bool useDn_; - std::string domedisk_; - std::string domehead_; - -@@ -37,7 +38,7 @@ namespace dmlite { - class DomeIODriver: public IODriver { - public: - DomeIODriver(std::string tunnelling_protocol, std::string tunnelling_port, -- std::string passwd, bool useIp, std::string domedisk, DavixCtxPool &davixPool); -+ std::string passwd, bool useIp, bool useDn, std::string domedisk, DavixCtxPool &davixPool); - virtual ~DomeIODriver(); - - std::string getImplId() const throw(); -@@ -59,6 +60,7 @@ namespace dmlite { - - std::string passwd_; - bool useIp_; -+ bool useDn_; - - std::string domedisk_; - DavixCtxPool &davixPool_; -diff --git a/src/plugins/domeadapter/DomeAdapterPools.cpp b/src/plugins/domeadapter/DomeAdapterPools.cpp -index dd789e52..ddcfda44 100644 ---- a/src/plugins/domeadapter/DomeAdapterPools.cpp -+++ b/src/plugins/domeadapter/DomeAdapterPools.cpp -@@ -47,8 +47,10 @@ void DomeAdapterPoolManager::setSecurityContext(const SecurityContext* secCtx) - // Id mechanism - if (factory_->tokenUseIp_) - userId_ = sec_->credentials.remoteAddress; -- else -+ else if (factory_->tokenUseDn_) - userId_ = sec_->credentials.clientName; -+ else -+ userId_ = ""; - } - - static PoolManager::PoolAvailability getAvailability(const Pool &p) { -diff --git a/src/plugins/domeadapter/domeadapter.conf.example b/src/plugins/domeadapter/domeadapter.conf.example -index 4e3fe38f..4d52387c 100644 ---- a/src/plugins/domeadapter/domeadapter.conf.example -+++ b/src/plugins/domeadapter/domeadapter.conf.example -@@ -14,7 +14,7 @@ DomeHead https://domehead-trunk.cern.ch/domehead - - # Token generation - TokenPassword change-this --TokenId ip -+TokenId none - TokenLife 1000 - - # Adminuser for replication and filesystem selection -diff --git a/src/puppet/dmlite/manifests/disk.pp b/src/puppet/dmlite/manifests/disk.pp -index ad06db33..121fd942 100644 ---- a/src/puppet/dmlite/manifests/disk.pp -+++ b/src/puppet/dmlite/manifests/disk.pp -@@ -1,6 +1,6 @@ - class dmlite::disk ( - String $token_password, -- Enum['ip','id'] $token_id = 'ip', -+ Enum['ip','dn','none'] $token_id = 'none', - Optional[String] $mysql_username = undef, - Optional[String] $mysql_password = undef, - Optional[Stdlib::Host] $mysql_host = undef, -diff --git a/src/puppet/dmlite/manifests/disk_hdfs.pp b/src/puppet/dmlite/manifests/disk_hdfs.pp -index de259fc6..cf7fc6a7 100644 ---- a/src/puppet/dmlite/manifests/disk_hdfs.pp -+++ b/src/puppet/dmlite/manifests/disk_hdfs.pp -@@ -1,6 +1,6 @@ - class dmlite::disk_hdfs ( - String $token_password, -- Enum['ip','id'] $token_id = 'ip', -+ Enum['ip','dn','none'] $token_id = 'none', - Integer $token_life = 1000, - String $mysql_username, - String $mysql_password, -diff --git a/src/puppet/dmlite/manifests/head.pp b/src/puppet/dmlite/manifests/head.pp -index 16d678ea..ee735de6 100644 ---- a/src/puppet/dmlite/manifests/head.pp -+++ b/src/puppet/dmlite/manifests/head.pp -@@ -1,6 +1,6 @@ - class dmlite::head ( - String $token_password, -- Enum['ip','id'] $token_id = 'ip', -+ Enum['ip','dn','none'] $token_id = 'none', - String $mysql_username, - String $mysql_password, - Stdlib::Host $mysql_host = 'localhost', -diff --git a/src/puppet/dmlite/manifests/head_hdfs.pp b/src/puppet/dmlite/manifests/head_hdfs.pp -index efa1e023..705871f8 100644 ---- a/src/puppet/dmlite/manifests/head_hdfs.pp -+++ b/src/puppet/dmlite/manifests/head_hdfs.pp -@@ -1,6 +1,6 @@ - class dmlite::head_hdfs ( - String $token_password, -- Enum['ip','id'] $token_id = 'ip', -+ Enum['ip','dn','none'] $token_id = 'none', - Integer $token_life = 1000, - String $mysql_username, - String $mysql_password, -diff --git a/src/puppet/dmlite/manifests/plugins/hdfs/params.pp b/src/puppet/dmlite/manifests/plugins/hdfs/params.pp -index 11fc8fe7..9d3ef1e5 100644 ---- a/src/puppet/dmlite/manifests/plugins/hdfs/params.pp -+++ b/src/puppet/dmlite/manifests/plugins/hdfs/params.pp -@@ -17,6 +17,6 @@ class dmlite::plugins::hdfs::params ( - $java_home = hiera('dmlite::plugins::hdfs::params::java_home','/usr/lib/jvm/java/') - $token_password = hiera('dmlite::plugins::hdfs::params::token_password','change-this') - $map_file = hiera('dmlite::plugins::hdfs::params::map_file','/etc/lcgdm-mapfile') -- $token_id = hiera('dmlite::plugins::hdfs::params::token_id','ip') -+ $token_id = hiera('dmlite::plugins::hdfs::params::token_id','none') - $token_life = hiera('dmlite::plugins::hdfs::params::token_life',1000) - } diff --git a/dmlite-LCGDM-2962-improve-dmlite-tester.patch b/dmlite-LCGDM-2962-improve-dmlite-tester.patch deleted file mode 100644 index 02e8435..0000000 --- a/dmlite-LCGDM-2962-improve-dmlite-tester.patch +++ /dev/null @@ -1,151 +0,0 @@ -diff --git a/tests/dpm/dpm-tester.py b/tests/dpm/dpm-tester.py -index 0d637b61..5b8969fd 100755 ---- a/tests/dpm/dpm-tester.py -+++ b/tests/dpm/dpm-tester.py -@@ -101,7 +101,7 @@ def ctrlc(signal, frame): - elif IMPATIENCE < 5: - print(Color.red("\nhit ctrl-c {0} more times to terminate forcefully".format(5 - IMPATIENCE))) - elif IMPATIENCE == 5: -- print(Color.red("\nterminating forcefully".format(5 - IMPATIENCE))) -+ print(Color.red("\nterminating forcefully")) - sys.exit(1) - - def printable_outcome(outcome): -@@ -367,18 +367,6 @@ def run_gfal_expect_exception(function, arguments, extype=None, excode=None): - if not expect_exception(result.status, result, extype=extype, excode=excode): return result - return result.success() - --def run_expect_ok(result, function, args): -- status = run_gfal(function, args) -- if expect_ok(status, result): -- result.success() -- return result -- --def run_expect_exception(result, function, args, extype=None, excode=None): -- status = run_gfal(function, args) -- if expect_exception(status, result, extype, excode): -- result.success() -- return result -- - def get_caller_name(): - caller_function_name = inspect.stack()[2][3] - return caller_function_name.replace("_", " ") -@@ -821,6 +809,38 @@ class ProtocolTester(object): - return result - return result.success() - -+ def Third_party_copy(self, source, tpc_source, tpc_destination): -+ result = self.new_result() -+ -+ # skip TPC for root protocol in case of missing environment -+ # variable telling XRootD client libraries to delegate proxy -+ if tpc_destination.startswith("root://") and 'XrdSecGSIDELEGPROXY' not in os.environ: -+ return result.success() -+ -+ sourcechksum = calculate_checksum("adler32", source) -+ -+ res = self.Upload_testfile(source, tpc_source) -+ if not res.ok(): -+ result.absorb(res) -+ return result -+ -+ self.Verify_checksum(tpc_source, "adler32", sourcechksum) -+ -+ res = self.Upload_testfile(tpc_source, tpc_destination) -+ if not res.ok(): -+ result.absorb(res) -+ return result -+ -+ self.Verify_checksum(tpc_destination, "adler32", sourcechksum) -+ -+ for tpc_file in [tpc_source, tpc_destination]: -+ res = self.Remove_file(tpc_file) -+ if not res.ok(): -+ result.absorb(res) -+ return result -+ -+ return result.success() -+ - def getargs(): - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, - description="Verifies the correct operation of a DPM instance using gfal2.\n", -@@ -1035,11 +1055,9 @@ def play_with_file(scope, tester, source, destination): - descr = "Upload to testdir: " + extract_file(destination) - orch.add_initialization(tester.Upload_testfile, [add_file_prefix(source), destination], descr) - -- # root in gfal does not support checksums -- if not destination.startswith("root://"): -- sourcechksum = calculate_checksum("md5", source) -- descr = "Verify md5 checksum: " + sourcechksum -- orch.add(tester.Verify_checksum, [destination, "md5", sourcechksum], descr) -+ sourcechksum = calculate_checksum("md5", source) -+ descr = "Verify md5 checksum: " + sourcechksum -+ orch.add(tester.Verify_checksum, [destination, "md5", sourcechksum], descr) - - descr = "Download from testdir: " + extract_file(destination) - orch.add(tester.Download_testfile, [destination, add_file_prefix(local_testfile)], descr) -@@ -1050,11 +1068,9 @@ def play_with_file(scope, tester, source, destination): - descr = "Verify size is " + str(st.st_size) - orch.add(tester.Verify_size, [destination, st.st_size], descr) - -- # root in gfal does not support checksums -- if not destination.startswith("root://"): -- sourcechksum = calculate_checksum("md5", source) -- descr = "Verify md5 checksum: " + sourcechksum -- orch.add(tester.Verify_checksum, [destination, "md5", sourcechksum], descr) -+ sourcechksum = calculate_checksum("md5", source) -+ descr = "Verify md5 checksum: " + sourcechksum -+ orch.add(tester.Verify_checksum, [destination, "md5", sourcechksum], descr) - - descr = "Remove: " + extract_file(destination) - orch.add_cleanup(tester.Remove_file, [destination], descr) -@@ -1211,6 +1227,11 @@ def single_protocol_tests(args, scope): - descr = f("Upload and delete the same file {ntimes} times") - orch.add(tester.Upload_delete_loop, ["file:///etc/services", path_join(target, "upload-delete-loop"), ntimes], descr) - -+ tpc_source = path_join(target, "tpc.source") -+ tpc_destination = path_join(target, "tpc.destination") -+ descr = "Third-party-copy: " + extract_path(target) -+ orch.add(tester.Third_party_copy, ["file:///etc/services", tpc_source, tpc_destination], descr) -+ - descr = "Recursively remove contents: " + extract_path(target) - orch.add_cleanup(tester.Recursively_remove_files, [target], descr) - -@@ -1241,7 +1262,7 @@ def set_credentials(cert, key): - # only cert, set a proxy - unset_environ("X509_USER_CERT") - unset_environ("X509_USER_KEY") -- os.environ["X509_USER_PROXY"] = args.cert -+ os.environ["X509_USER_PROXY"] = cert - - def xml_start(): - print("""""") -@@ -1290,7 +1311,7 @@ def test_dome(args): - - target_path = extract_path(target) - -- orch.add_initialization(dometester.info, []) -+ orch.add_initialization(dometester.info, [], "Init") - orch.add(tester.Create_directory, [target], f("Create directory: {target_path}")) - - # checksum tests -@@ -1320,7 +1341,8 @@ def test_dome(args): - tokendir_path = extract_path(tokendir) - orch.add(tester.Create_directory, [tokendir], f("Create directory: {tokendir_path}")) - -- orch.add(dometester.setquotatoken, ["DPM_TESTER_QUOTATOKEN", tokendir_path, args.dome_poolname, 1024*1024]) -+ orch.add(dometester.setquotatoken, ["DPM_TESTER_QUOTATOKEN", tokendir_path, args.dome_poolname, 1024*1024], -+ "Set quotatoken") - orch.add(tester.Upload_testfile, [f("file://{locations[1]}"), path_join(tokendir, "1mb")], - "Upload file with size equal to quota") - -@@ -1369,7 +1391,8 @@ def test_dome(args): - orch.add(tester.Upload_testfile, [f("file://{locations[0]}"), path_join(tokendir, "1b-3"), None, True], - "Upload 1-byte file, expect it to fail") - -- orch.add_cleanup(dometester.delquotatoken, [tokendir_path, args.dome_poolname]) -+ orch.add_cleanup(dometester.delquotatoken, [tokendir_path, args.dome_poolname], -+ "Cleanup quotatoken") - - orch.add_cleanup(tester.Recursively_remove_files, [target], - f("Recursively remove contents: {target_path}")) diff --git a/dmlite-LCGDM-2963-macaroon-READ_METADATA.patch b/dmlite-LCGDM-2963-macaroon-READ_METADATA.patch deleted file mode 100644 index e32581c..0000000 --- a/dmlite-LCGDM-2963-macaroon-READ_METADATA.patch +++ /dev/null @@ -1,58 +0,0 @@ -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_ns/mymacaroons.h b/src/plugins/apache-httpd/src/mod_lcgdm_ns/mymacaroons.h -index 2ff0daf7..0b786312 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_ns/mymacaroons.h -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_ns/mymacaroons.h -@@ -27,6 +27,8 @@ typedef enum { - ACTIVITY_LIST = 1 << 2, - ACTIVITY_DELETE = 1 << 3, - ACTIVITY_MANAGE = 1 << 4, -+ ACTIVITY_READ_METADATA = 1 << 5, -+ ACTIVITY_UPDATE_METADATA = 1 << 6, - } activity_t; - - #define MACAROON_MECH "macaroon" -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_ns/mymacaroons.c b/src/plugins/apache-httpd/src/mod_lcgdm_ns/mymacaroons.c -index af395450..98fe5863 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_ns/mymacaroons.c -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_ns/mymacaroons.c -@@ -46,6 +46,8 @@ static const activity_map_t activity_map[] = { - {ACTIVITY_LIST, "LIST"}, - {ACTIVITY_DELETE, "DELETE"}, - {ACTIVITY_MANAGE, "MANAGE"}, -+ {ACTIVITY_READ_METADATA, "READ_METADATA"}, -+ {ACTIVITY_UPDATE_METADATA, "UPDATE_METADATA"}, - {ACTIVITY_NONE, NULL} - }; - -@@ -552,7 +554,7 @@ static int verify_caveat(void *data, const unsigned char *pred, size_t pred_sz) - break; - case M_GET: - if (info->request->header_only) { -- allowed = activities & ACTIVITY_LIST; -+ allowed = activities; // READ_METADATA is implied if any activity - } - else { - allowed = activities & ACTIVITY_DOWNLOAD; -@@ -566,12 +568,19 @@ static int verify_caveat(void *data, const unsigned char *pred, size_t pred_sz) - allowed = activities & ACTIVITY_UPLOAD; - } - break; -- case M_PROPFIND: -- allowed = activities & ACTIVITY_LIST; -+ case M_MOVE: -+ allowed = activities & ACTIVITY_MANAGE; - break; - case M_DELETE: - allowed = activities & ACTIVITY_DELETE; - break; -+ case M_PROPFIND: -+ // should be always allowed for files -+ { -+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, info->request, "M_PROPFIND stat name=%s ino=%i parent=%i mode=%i", info->stat.name, info->stat.stat.st_ino, info->stat.parent, info->stat.stat.st_mode); -+ } -+ allowed = activities & ACTIVITY_LIST; -+ break; - case M_PROPPATCH: - allowed = activities & ACTIVITY_MANAGE; - break; - diff --git a/dmlite-LCGDM-2964-dont-ignore-mapfile-for-https.patch b/dmlite-LCGDM-2964-dont-ignore-mapfile-for-https.patch deleted file mode 100644 index e9cf7fe..0000000 --- a/dmlite-LCGDM-2964-dont-ignore-mapfile-for-https.patch +++ /dev/null @@ -1,18 +0,0 @@ -diff --git a/src/dome/DomeStatus.cpp b/src/dome/DomeStatus.cpp -index cd174900..209d6cd7 100644 ---- a/src/dome/DomeStatus.cpp -+++ b/src/dome/DomeStatus.cpp -@@ -1406,9 +1406,12 @@ DmStatus DomeStatus::getIdMap(const std::string& userName, - - - -+ // LCGDM-2964: since apache configuration update for macaroons https -+ // user automatically comes with assigned nogroup. Threat such case -+ // same as user without any assigned group - - // No VO information, so use the mapping file to get the group -- if (groupNames.empty()) { -+ if (groupNames.empty() || (groupNames.size() == 1 && groupNames[0] == "nogroup")) { - std::pair::iterator, - std::multimap::iterator> ppp; - diff --git a/dmlite-LCGDM-2967-disable-disk-on-headnode.patch b/dmlite-LCGDM-2967-disable-disk-on-headnode.patch deleted file mode 100644 index 1db1d30..0000000 --- a/dmlite-LCGDM-2967-disable-disk-on-headnode.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/src/puppet/dpm/manifests/headnode.pp b/src/puppet/dpm/manifests/headnode.pp -index 93aba0e5..537de95e 100644 ---- a/src/puppet/dpm/manifests/headnode.pp -+++ b/src/puppet/dpm/manifests/headnode.pp -@@ -283,6 +283,7 @@ class dpm::headnode ( - Class[dmlite::install] ~> Class[dmlite::dav::config] - - class{'dmlite::dav': -+ enable_disk => false, - ns_macaroon_secret => $http_macaroon_secret, - enable_ns_oidc => $oidc_clientid != undef, - ns_oidc_clientid => $oidc_clientid, diff --git a/dmlite-complilation-dependencies.patch b/dmlite-complilation-dependencies.patch deleted file mode 100644 index 33e7366..0000000 --- a/dmlite-complilation-dependencies.patch +++ /dev/null @@ -1,64 +0,0 @@ -diff --git a/src/plugins/apache-httpd/src/client/CMakeLists.txt b/src/plugins/apache-httpd/src/client/CMakeLists.txt -index 88daf71f..640de11c 100644 ---- a/src/plugins/apache-httpd/src/client/CMakeLists.txt -+++ b/src/plugins/apache-httpd/src/client/CMakeLists.txt -@@ -21,17 +21,18 @@ find_package (Ldap REQUIRED) - pkg_check_modules(GSOAP_PKG REQUIRED gsoapssl>=2.7) - - # Compile the WSDL --add_custom_command (OUTPUT typemap.dat -+add_custom_command (OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/typemap.dat - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/typemap.dat ${CMAKE_CURRENT_BINARY_DIR}/typemap.dat) --add_custom_command (OUTPUT delegation.h -+add_custom_command (OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/delegation.h - COMMAND wsdl2h -c -o${CMAKE_CURRENT_BINARY_DIR}/delegation.h ${DELEGATION_WSDL} -- DEPENDS typemap.dat) --add_custom_command (OUTPUT soapC.c soapClient.c -+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/typemap.dat) -+add_custom_command (OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/soapC.c ${CMAKE_CURRENT_BINARY_DIR}/soapClient.c - COMMAND soapcpp2 -c ${CMAKE_CURRENT_BINARY_DIR}/delegation.h -- DEPENDS delegation.h) --add_custom_command (OUTPUT DelegationSoapBinding.c -+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/delegation.h) -+add_custom_command (OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/DelegationSoapBinding.c - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/DelegationSoapBinding.nsmap ${CMAKE_CURRENT_BINARY_DIR}/DelegationSoapBinding.c -- DEPENDS soapC.c) -+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/soapC.c) -+add_custom_target(delegation2soap DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/delegation.h ${CMAKE_CURRENT_BINARY_DIR}/soapC.c ${CMAKE_CURRENT_BINARY_DIR}/soapClient.c ${CMAKE_CURRENT_BINARY_DIR}/DelegationSoapBinding.c) - - # Includes - include_directories (${CMAKE_CURRENT_BINARY_DIR} ${GRIDSITE_INCLUDE_DIR} -@@ -59,7 +60,7 @@ set_target_properties( lcgdmhtext PROPERTIES - SOVERSION ${lcgdm-dav_MAJOR} - ) - --add_dependencies (lcgdmhtext soapC.c DelegationSoapBinding.c) -+add_dependencies (lcgdmhtext delegation2soap) - message(STATUS "curl: ${CURL_LIBRARY}") - target_link_libraries (lcgdmhtext ${CURL_LIBRARY} - ${GRIDSITE_LIBRARIES} -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_disk/CMakeLists.txt b/src/plugins/apache-httpd/src/mod_lcgdm_disk/CMakeLists.txt -index d6e6fd0c..1a99edc7 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_disk/CMakeLists.txt -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_disk/CMakeLists.txt -@@ -32,7 +32,7 @@ else (BUILD_HTCOPY) - ) - endif (BUILD_HTCOPY) - --add_dependencies (mod_lcgdm_disk lcgdmhtext lcgdmdav) -+add_dependencies (mod_lcgdm_disk lcgdmhtext) - set_target_properties(mod_lcgdm_disk PROPERTIES PREFIX "") - - target_link_libraries(mod_lcgdm_disk dmlite -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_ns/CMakeLists.txt b/src/plugins/apache-httpd/src/mod_lcgdm_ns/CMakeLists.txt -index 54b98930..6626469c 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_ns/CMakeLists.txt -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_ns/CMakeLists.txt -@@ -29,6 +29,7 @@ add_library(mod_lcgdm_ns MODULE - ../shared/utils.c - ) - -+add_dependencies (mod_lcgdm_ns lcgdmhtext) - set_target_properties(mod_lcgdm_ns PROPERTIES PREFIX "") - target_link_libraries(mod_lcgdm_ns dmlite ${JANSSON_LIBRARIES} ${APR_LIBRARIES} ${APRUTIL_LIBRARIES} dmlitemacaroons) - diff --git a/dmlite-directory_listing_permissions.patch b/dmlite-directory_listing_permissions.patch deleted file mode 100644 index 64adef3..0000000 --- a/dmlite-directory_listing_permissions.patch +++ /dev/null @@ -1,40 +0,0 @@ -commit 3884f93208be5a621498dadecb24e10f3aebd8b1 -Author: Petr Vokac -Date: Sat Oct 31 01:35:38 2020 +0100 - - Check r-x permission on listed directory - -diff --git a/src/dome/DomeCoreXeq.cpp b/src/dome/DomeCoreXeq.cpp -index 65d67bbe..5fd87fbe 100644 ---- a/src/dome/DomeCoreXeq.cpp -+++ b/src/dome/DomeCoreXeq.cpp -@@ -4473,21 +4473,20 @@ int DomeCore::dome_getdir(DomeReq &req) { - fillSecurityContext(ctx, req); - - DomeMySql sql; -- ExtendedStat parent; -- std::string parentPath, name; -- DmStatus ret = sql.getParent(parent, path, parentPath, name); -+ dmlite::ExtendedStat xstat; -+ DmStatus ret = sql.getStatbyLFN(xstat, path); - if (!ret.ok()) -- return req.DomeReq::SendSimpleResp(404, SSTR("Cannot stat the parent '" << parentPath << "' of lfn: '" << path << "'")); -- -+ return req.DomeReq::SendSimpleResp(404, SSTR("Cannot stat lfn: '" << path << "'")); -+ - if (!req.creds.oidc_authorized) { - -- ret = sql.traverseBackwards(ctx, parent); -+ ret = sql.traverseBackwards(ctx, xstat); - if (!ret.ok()) { - return req.DomeReq::SendSimpleResp(403, SSTR("Permission denied on lfn: '" << path << "' err: " << ret.code() << " what: '" << ret.what() << "'")); - } -- // Need to be able to read the parent -- if (checkPermissions(&ctx, parent.acl, parent.stat, S_IREAD | S_IEXEC) != 0) -- return req.DomeReq::SendSimpleResp(403, SSTR("Need READ access on '" << parentPath << "'")); -+ // Need to be able to read the current directory -+ if (checkPermissions(&ctx, xstat.acl, xstat.stat, S_IREAD | S_IEXEC) != 0) -+ return req.DomeReq::SendSimpleResp(403, SSTR("Need READ&EXEC access on '" << path << "'")); - - } - diff --git a/dmlite-ellert.patch b/dmlite-ellert.patch deleted file mode 100644 index 0449d86..0000000 --- a/dmlite-ellert.patch +++ /dev/null @@ -1,1427 +0,0 @@ -diff --git a/python/sample.py b/python/sample.py -index 75666308..961214c0 100644 ---- a/python/sample.py -+++ b/python/sample.py -@@ -1,5 +1,6 @@ - #!/usr/bin/env python - -+from __future__ import print_function - import pydmlite - - def test(): -@@ -7,8 +8,8 @@ def test(): - try: - pluginManager = pydmlite.PluginManager() - pluginManager.loadConfiguration(configFile) -- except Exception, e: -- print e -+ except Exception as e: -+ print(e) - return - - try: -@@ -18,28 +19,28 @@ def test(): - group.setUnsigned("gid", 0) - securityContext.user.setUnsigned("uid", 0) - securityContext.groups.append(group) -- except Exception, e: -- print e -+ except Exception as e: -+ print(e) - return - - try: - stackInstance = pydmlite.StackInstance(pluginManager) - stackInstance.setSecurityContext(securityContext) -- except Exception, e: -- print e -+ except Exception as e: -+ print(e) - return - - try: - catalog = stackInstance.getCatalog() -- except Exception, e: -- print e -+ except Exception as e: -+ print(e) - return - try: - f = catalog.extendedStat("/", True) -- print f.stat.st_ino -- print f.stat.st_size -- except Exception, e: -- print e -+ print(f.stat.st_ino) -+ print(f.stat.st_size) -+ except Exception as e: -+ print(e) - return - - test() -diff --git a/shell/src/argus.py b/shell/src/argus.py -index 308dcaa6..b382145d 100644 ---- a/shell/src/argus.py -+++ b/shell/src/argus.py -@@ -200,8 +200,8 @@ class ArgusBan(object): - if [x.text for x in actions] != ['.*']: continue - # ignore policies with permit rules - rules = policy.findall("{%(xacml)s}Rule" % ArgusBan._PAP_NS) -- if len(filter(lambda x: x.get('Effect') == 'Permit', rules)) > 0: continue -- for rules in filter(lambda x: x.get('Effect') == 'Deny', rules): -+ if len([x for x in rules if x.get('Effect') == 'Permit']) > 0: continue -+ for rules in [x for x in rules if x.get('Effect') == 'Deny']: - for subject_match in rules.findall("{%(xacml)s}Target/{%(xacml)s}Subjects/{%(xacml)s}Subject/{%(xacml)s}SubjectMatch" % ArgusBan._PAP_NS): - subject_type = subject_match.find("{%(xacml)s}SubjectAttributeDesignator" % ArgusBan._PAP_NS) - subject_value = subject_match.find("{%(xacml)s}AttributeValue" % ArgusBan._PAP_NS) -diff --git a/shell/src/dmlite-mysql-dirspaces.py b/shell/src/dmlite-mysql-dirspaces.py -index 8a74bc60..e1df54e2 100755 ---- a/shell/src/dmlite-mysql-dirspaces.py -+++ b/shell/src/dmlite-mysql-dirspaces.py -@@ -305,7 +305,7 @@ class CachedFullPath(object): - elif currid in id2parent: - if currid in revids: - revids.reverse() -- fullpath = '/'.join(map(lambda x: id2name[x], revids)) -+ fullpath = '/'.join([id2name[x] for x in revids]) - _log.info("db inconsistency: detected directory loop for fileid %i parent %i %s", fileid, currid, fullpath) - for revid in revids: self._cache[revid] = None - revids = [] -@@ -318,7 +318,7 @@ class CachedFullPath(object): - if fileid != 0: - if fileid != currid: - revids.reverse() -- fullpath = '/'.join(map(lambda x: id2name[x], revids)) -+ fullpath = '/'.join([id2name[x] for x in revids]) - _log.info("db inconsistency: could not find path for fileid %i parent %i (most likely the entry is orphan, path %s)", fileid, currid, fullpath) - else: - _log.info("db inconsistency: could not find path for fileid %i (most likely the entry is orphan)", fileid) -@@ -337,7 +337,7 @@ class CachedFullPath(object): - self._cache[revid] = self._cache[currid] + revids[:i+1] - if not self._fileid_only: - if revid in self._cache_path: continue -- pathsuffix = '/'.join(map(lambda x: id2name[x], revids[:i+1])) -+ pathsuffix = '/'.join([id2name[x] for x in revids[:i+1]]) - if currid == 0: - self._cache_path[revid] = pathsuffix - elif currid in self._cache_path: -@@ -449,7 +449,7 @@ def get_quotatoken_data(conn): - - # sort quotatokens by path length with longest first - # (this ordering is assumed by code calling this function) -- for row in sorted(filter(lambda x: x[0] != None, rows), key=lambda x: len(x[0]), reverse=True): -+ for row in sorted([x for x in rows if x[0] != None], key=lambda x: len(x[0]), reverse=True): - path, poolname, u_token, s_token, t_space, g_space, u_space = row - ret.append((path, poolname, u_token, s_token, t_space, g_space, u_space)) - -@@ -572,7 +572,7 @@ def fix_spacetokens_by_path(conn_data, skip=[], updatedb=False): - _log.warn("skipping fileid %i, unable to reconstruct its parent %i path (dpm-dbck can correct this problem)", fileid, pfileid) - continue - -- qt_selected = filter(lambda x: path.startswith(x[0]), qt) -+ qt_selected = [x for x in qt if path.startswith(x[0])] - if len(qt_selected) == 0: - _log.warn("skipping %s/%s with fileid %i, because it doen't match any quotatoken path", path, name, fileid) - continue -@@ -904,9 +904,9 @@ def fix_dir_size(conn_data, updatelevels, updatedb=False): - - if len(curr_psize) > 0: - # try to get real paths for updated directories -- pathnames = pathname.get_ids_multi(curr_psize.keys()) -+ pathnames = pathname.get_ids_multi(list(curr_psize.keys())) - pathrealname = CachedFullPath(conn_path) -- pathrealnames = pathrealname.get_path_multi(curr_psize.keys()) -+ pathrealnames = pathrealname.get_path_multi(list(curr_psize.keys())) - for fileid in pathnames.keys(): - if fileid in pathrealnames: - pathnames[fileid] = pathrealnames[fileid] -@@ -1000,7 +1000,7 @@ def fix_spacetokens_size(conn_data, updatedb=False): - parent_already_accounted = True - if parent_already_accounted: continue - accounted_dirs.append(longer_path) -- path2subid[path] = map(lambda x: path2id[x], accounted_dirs) -+ path2subid[path] = [path2id[x] for x in accounted_dirs] - - updated = 0 - conn.autocommit(False) -@@ -1013,18 +1013,18 @@ def fix_spacetokens_size(conn_data, updatedb=False): - id2size = {} - qtsubdirids = path2subid[path] - cursor = conn.cursor() -- cursor.execute("SELECT fileid, filesize FROM Cns_file_metadata WHERE fileid IN ({0}) FOR UPDATE".format(','.join(map(lambda x: str(x), [pathid] + qtsubdirids)))) -+ cursor.execute("SELECT fileid, filesize FROM Cns_file_metadata WHERE fileid IN ({0}) FOR UPDATE".format(','.join([str(x) for x in [pathid] + qtsubdirids]))) - for row in cursor: - fileid, filesize = row - id2size[fileid] = filesize - cursor_dpm = conn_dpm.cursor() - cursor_dpm.execute("SELECT t_space, u_space FROM dpm_space_reserv WHERE path = %s FOR UPDATE", (path, )) - t_space, u_space = cursor_dpm.fetchone() -- pathfreespace = t_space - (id2size[pathid] - sum(map(lambda x: id2size[x], qtsubdirids))) -+ pathfreespace = t_space - (id2size[pathid] - sum([id2size[x] for x in qtsubdirids])) - if u_space != pathfreespace: - _log.info("%supdate spacetoken %s[%s] %.02f%% relative change %i from %i to %i = t_space(%i) - (dirsize(%i) - sum(%s))", - dry_run, path2st[path], path, 100. * (u_space - pathfreespace) / pathfreespace, u_space - pathfreespace, u_space, -- pathfreespace, t_space, id2size[pathid], ','.join(map(lambda x: "{0}({1})".format(id2path[x], id2size[x]), qtsubdirids))) -+ pathfreespace, t_space, id2size[pathid], ','.join(["{0}({1})".format(id2path[x], id2size[x]) for x in qtsubdirids])) - if updatedb: - cursor_dpm.execute("BEGIN") # be explicit - cursor_dpm.execute("UPDATE dpm_space_reserv SET u_space = %s WHERE path = %s", (pathfreespace, path)) -@@ -1241,7 +1241,7 @@ if __name__ == '__main__': - _log.error("unknown fix \"%s\", use --help command line option to get more informations", fix) - sys.exit(1) - -- if not options.force and len(filter(lambda x: x.endswith('offline'), fix_names)): -+ if not options.force and len([x for x in fix_names if x.endswith('offline')]): - _log.info('DPM must be offline for requested safe DB updates') - - # try to open TCP connections to the DPM headnode service ports -diff --git a/shell/src/dmlite-prom.py b/shell/src/dmlite-prom.py -index 2629c3b4..ab9bf211 100755 ---- a/shell/src/dmlite-prom.py -+++ b/shell/src/dmlite-prom.py -@@ -15,12 +15,13 @@ import os, sys, re - import optparse - import socket - import logging, logging.handlers --from io import StringIO - try: -- from urllib.parse import urlparse -+ from io import StringIO -+except ImportError: -+ from StringIO import StringIO -+try: - from urllib.request import urlopen - except ImportError: -- from urlparse import urlparse - from urllib2 import urlopen - - __version__ = '0.0.1' -diff --git a/shell/src/dmlite-shell b/shell/src/dmlite-shell -index 2eec3ed8..26d3ec4b 100755 ---- a/shell/src/dmlite-shell -+++ b/shell/src/dmlite-shell -@@ -16,6 +16,11 @@ import atexit - import argparse - import logging, logging.handlers - -+try: -+ input = raw_input # Redefine for Python 2 -+except NameError: -+ pass -+ - __version__ = '1.13.3' - - _log = logging.getLogger('dmlite-shell') -@@ -149,9 +154,9 @@ def main(): - # get next command - try: - if sys.__stdin__.isatty(): -- cmdline = raw_input('> ') -+ cmdline = input('> ') - else: -- cmdline = raw_input() -+ cmdline = input() - cmdline = cmdline.strip() - except EOFError: - # all commands from input have been executed, exit... -diff --git a/shell/src/dpm-storage-summary.cgi b/shell/src/dpm-storage-summary.cgi -index 10bc471a..208dbf55 100755 ---- a/shell/src/dpm-storage-summary.cgi -+++ b/shell/src/dpm-storage-summary.cgi -@@ -25,7 +25,10 @@ from __future__ import division - - import os, sys, time - import socket --import StringIO -+try: -+ import io as StringIO -+except ImportError: -+ import StringIO - import json - import logging - import logging.handlers -diff --git a/shell/src/dpm-storage-summary.py b/shell/src/dpm-storage-summary.py -index 5670dfef..ff932643 100755 ---- a/shell/src/dpm-storage-summary.py -+++ b/shell/src/dpm-storage-summary.py -@@ -23,7 +23,7 @@ _log = logging.getLogger('DPMSRR') - - - # The top level object --class storageService: -+class storageService(object): - - def __init__(self, config): - self.config = config -diff --git a/shell/src/infoutils.py b/shell/src/infoutils.py -index bfc1e3d9..7e00b196 100755 ---- a/shell/src/infoutils.py -+++ b/shell/src/infoutils.py -@@ -1,3 +1,4 @@ -+from __future__ import division - import json - import re - import subprocess -@@ -10,7 +11,7 @@ import rpm - from dmliteshell import executor - - --class SystemInfo: -+class SystemInfo(object): - """All necessary info on the DPM and the system""" - - def __init__(self, config=None): -@@ -36,7 +37,7 @@ class SystemInfo: - if mi.count() != 1: - return None - else: -- entry = mi.next() -+ entry = next(mi) - return str(entry['version']) - - def nameversion(self, packages): -@@ -107,20 +108,20 @@ class SystemInfo: - totalgroups = set([]) - for space, qt in jgqt.items(): - if isinstance(qt["groups"], dict): -- groups = qt["groups"].values() -+ groups = list(qt["groups"].values()) - if isinstance(qt["groups"], list): - groups = qt["groups"] - # don't show "root" qt group unless it is the only group asociated - # with this space, because ldif code rely on non-empty groups - if len(groups) > 1: groups.remove("root") - qt["groups"] = groups -- map(totalgroups.add, groups) -+ list(map(totalgroups.add, groups)) - totalcapacity += int(qt["quotatktotspace"]) - totalused += int(qt["pathusedspace"]) - return jgqt, totalcapacity, totalused, list(totalgroups) - - --class Entry: -+class Entry(object): - """Base class for all GLUE2 entries""" - - def __init__(self): -@@ -248,9 +249,9 @@ class StorageServiceCapacity(Entry): - self.name = "GLUE2StorageServiceCapacityID" - self.Attributes["GLUE2StorageServiceCapacityID"] = [hostname + "/StorageServiceCapacity"] - self.Attributes["GLUE2StorageServiceCapacityType"] = ["online"] -- self.Attributes["GLUE2StorageServiceCapacityFreeSize"] = [str((tot - used) / 1024**3)] -- self.Attributes["GLUE2StorageServiceCapacityTotalSize"] = [str(tot / 1024**3)] -- self.Attributes["GLUE2StorageServiceCapacityUsedSize"] = [str(used / 1024**3)] -+ self.Attributes["GLUE2StorageServiceCapacityFreeSize"] = [str((tot - used) // 1024**3)] -+ self.Attributes["GLUE2StorageServiceCapacityTotalSize"] = [str(tot // 1024**3)] -+ self.Attributes["GLUE2StorageServiceCapacityUsedSize"] = [str(used // 1024**3)] - self.Attributes["GLUE2StorageServiceCapacityStorageServiceForeignKey"] = ["Undefined"] - self.Attributes["ObjectClass"] = ["GLUE2StorageServiceCapacity"] - -@@ -265,9 +266,9 @@ class DataStore(Entry): - Entry.__init__(self) - self.name = "GLUE2ResourceID" - self.Attributes["GLUE2ResourceID"] = [hostname + "/DataStore"] -- self.Attributes["GLUE2DataStoreTotalSize"] = [str(tot / 1024**3)] -- self.Attributes["GLUE2DataStoreUsedSize"] = [str(used / 1024**3)] -- self.Attributes["GLUE2DataStoreFreeSize"] = [str((tot - used) / 1024**3)] -+ self.Attributes["GLUE2DataStoreTotalSize"] = [str(tot // 1024**3)] -+ self.Attributes["GLUE2DataStoreUsedSize"] = [str(used // 1024**3)] -+ self.Attributes["GLUE2DataStoreFreeSize"] = [str((tot - used) // 1024**3)] - self.Attributes["GLUE2DataStoreType"] = ["disk"] - self.Attributes["GLUE2DataStoreLatency"] = ["online"] - self.Attributes["GLUE2DataStoreStorageManagerForeignKey"] = ["Undefined"] -@@ -344,9 +345,9 @@ class ShareCapacity(Entry): - Entry.__init__(self) - self.name = "GLUE2StorageShareCapacityID" - self.Attributes["GLUE2StorageShareCapacityID"] = [hostname + "/Capacity/" + qtname] -- self.Attributes["GLUE2StorageShareCapacityTotalSize"] = [str(qtspace / 1024**3)] -- self.Attributes["GLUE2StorageShareCapacityUsedSize"] = [str(pathused / 1024**3)] -- self.Attributes["GLUE2StorageShareCapacityFreeSize"] = [str((qtspace - pathused) / 1024**3)] -+ self.Attributes["GLUE2StorageShareCapacityTotalSize"] = [str(qtspace // 1024**3)] -+ self.Attributes["GLUE2StorageShareCapacityUsedSize"] = [str(pathused // 1024**3)] -+ self.Attributes["GLUE2StorageShareCapacityFreeSize"] = [str((qtspace - pathused) // 1024**3)] - self.Attributes["GLUE2StorageShareCapacityType"] = ["online"] - self.Attributes["GLUE2StorageShareCapacityStorageShareForeignKey"] = ["Undefined"] - self.Attributes["ObjectClass"] = ["GLUE2StorageShareCapacity"] -diff --git a/shell/src/interpreter.py b/shell/src/interpreter.py -index d18f918b..94e86ba7 100644 ---- a/shell/src/interpreter.py -+++ b/shell/src/interpreter.py -@@ -1,3 +1,6 @@ -+from __future__ import print_function -+from __future__ import absolute_import -+from __future__ import division - # interpreter.py - - import pydmlite -@@ -10,19 +13,28 @@ import re - import time - import dateutil.parser - import pycurl --import urllib --from dbutils import DPMDB -+try: -+ from urllib.parse import unquote -+except ImportError: -+ from urllib import unquote -+from .dbutils import DPMDB - import threading --import Queue -+try: -+ import queue as Queue -+except ImportError: -+ import Queue - import signal - import socket --from executor import DomeExecutor -+from .executor import DomeExecutor - import json - import pprint --import StringIO -+try: -+ import io as StringIO -+except ImportError: -+ import StringIO - from M2Crypto import X509 --import utils --import argus -+from . import utils -+from . import argus - - try: - import dpm2 -@@ -38,7 +50,7 @@ activitystatus = {'UNKNOWN': 0, 'ONLINE': 1, 'DOWN': 2} - activitystatusbycode = dict((v, k) for k, v in activitystatus.items()) - - --class DMLiteInterpreter: -+class DMLiteInterpreter(object): - """ - A class taking commands as strings and passing them to DMLite via pydmlite. - """ -@@ -260,7 +272,7 @@ class DMLiteInterpreter: - return flist - - --class ShellCommand: -+class ShellCommand(object): - """ - An abstract class for deriving classes for supported shell commands. - """ -@@ -507,7 +519,7 @@ class ShellCommand: - if not lastgiven.startswith('/'): - lastgiven = './' + lastgiven - gfolder, gfilestart = os.path.split(lastgiven) -- groot, gdirs, gfiles = os.walk(gfolder).next() -+ groot, gdirs, gfiles = next(os.walk(gfolder)) - gfiles = gfiles + list((d + '/') for d in gdirs) - l = list(f for f in gfiles if f.startswith(gfilestart)) - elif ptype == 'd': # dmlite file or folder -@@ -1005,7 +1017,7 @@ class InfoCommand(ShellCommand): - - self.ok('GUID: ' + str(f.guid)) - self.ok('Ino: ' + str(f.stat.st_ino)) -- self.ok('Mode: ' + oct(f.stat.st_mode)) -+ self.ok('Mode: ' + '0%o' % f.stat.st_mode) - self.ok('# of Links: ' + str(f.stat.st_nlink)) - - try: -@@ -2631,7 +2643,7 @@ class Response(object): - for marker in self.markers: - if 'success' in marker.lower(): - return 0 -- elif any(map(lambda x: x in marker.lower(), ['failed', 'aborted', 'failure'])): -+ elif any([x in marker.lower() for x in ['failed', 'aborted', 'failure']]): - return marker - - def printMarkers(self): -@@ -2739,7 +2751,7 @@ class Replicate(object): - pass - - destination = loc[0].url.toString() -- destination = urllib.unquote(destination) -+ destination = unquote(destination) - #create correct destination url and SFN - sfn = destination[0:destination.index(':') + 1] + destination[destination.index(':') + 1:destination.index('?')] - destination = destination[0:destination.index(':') + 1] + str(http_port) + destination[destination.index(':') + 1:len(destination)] -@@ -3263,18 +3275,18 @@ class DrainReplicas(object): - fileSize = fileSize + file.size - if self.parameters['move']: - self.interpreter.ok("Total replicas to move: " + str(numFiles)) -- self.interpreter.ok("Total capacity to move: " + str(fileSize / 1024) + " KB") -+ self.interpreter.ok("Total capacity to move: " + str(fileSize // 1024) + " KB") - else: - self.interpreter.ok("Total replicas installed in the FS to drain: " + str(numFiles)) -- self.interpreter.ok("Total capacity installed in the FS to drain: " + str(fileSize / 1024) + " KB") -+ self.interpreter.ok("Total capacity installed in the FS to drain: " + str(fileSize // 1024) + " KB") - - #in case the size is != 100, we should limit the number of replicas to drain - sizeToDrain = fileSize - if self.parameters['size'] != 100: -- sizeToDrain = sizeToDrain * self.parameters['size'] / 100 -+ sizeToDrain = sizeToDrain * self.parameters['size'] // 100 - if not self.parameters['move']: - self.interpreter.ok("Percentage of capacity to drain: " + str(self.parameters['size']) + " %") -- self.interpreter.ok("Total capacity to drain: " + str(sizeToDrain / 1024) + " KB") -+ self.interpreter.ok("Total capacity to drain: " + str(sizeToDrain // 1024) + " KB") - - for file in self.fileReplicas: - if (self.parameters['group'] != "ALL"): -diff --git a/src/dome/cli/dome.py b/src/dome/cli/dome.py -index 56af6f63..95be9127 100755 ---- a/src/dome/cli/dome.py -+++ b/src/dome/cli/dome.py -@@ -4,11 +4,12 @@ - This file implements the Dome CLI. - The CLI provides an easy access to many features of the Dome Service - """ -+from __future__ import print_function -+from __future__ import absolute_import - import os --import ConfigParser - import sys - from optparse import OptionParser --from executor import DomeExecutor -+from .executor import DomeExecutor - - def main(): - # parse options -@@ -31,7 +32,7 @@ def main(): - - if options.execute: - if not options.url: -- print "Please specify the url via --url option" -+ print("Please specify the url via --url option") - sys.exit(1) - executor = DomeExecutor("/etc/grid-security/dpmmgr/dpmcert.pem", "/etc/grid-security/dpmmgr/dpmkey.pem", - "/etc/grid-security/certificates", options.clientDN, options.clientAddr) -@@ -40,81 +41,81 @@ def main(): - - if options.execute == 'get': - if not options.lfn: -- print "Please specify the LFN via --lfn option" -+ print("Please specify the LFN via --lfn option") - sys.exit(1) - if not options.pfn: -- print "Please specify the PFN via --pfn option" -+ print("Please specify the PFN via --pfn option") - sys.exit(1) - if not options.server: -- print "Please specify the Server via --server option" -+ print("Please specify the Server via --server option") - sys.exit(1) - if not options.fs: -- print "Please specify the Filesystem via --fs option" -+ print("Please specify the Filesystem via --fs option") - sys.exit(1) - executor.get(options.url,options.lfn, options.pfn,options.server,options.fs) - if options.execute == 'put': - if not options.lfn: -- print "Please specify the LFN via --lfn option" -+ print("Please specify the LFN via --lfn option") - sys.exit(1) - executor.put(options.url,options.lfn) - elif options.execute == 'putdone': - if not options.pfn: -- print "Please specify the PFN via --pfn option" -+ print("Please specify the PFN via --pfn option") - sys.exit(1) - if not options.size: -- print "Please specify the Server via --size option" -+ print("Please specify the Server via --size option") - sys.exit(1) - executor.putDone(options.url, options.pfn,options.size) - elif options.execute == 'getspaceinfo': - executor.getSpaceInfo(options.url) - elif options.execute == 'statpool': - if not options.pool: -- print "Please specify the Pool to stat via --pool option" -+ print("Please specify the Pool to stat via --pool option") - sys.exit(1) - executor.statPool(options.url,options.pool) - elif options.execute == 'getquotatoken': - if not options.lfn: -- print "Please specify the LFN via --lfn option" -+ print("Please specify the LFN via --lfn option") - sys.exit(1) - executor.getquotatoken(options.url,options.lfn) - elif options.execute == 'setquotatoken': - if not options.lfn: -- print "Please specify the LFN via --lfn option" -+ print("Please specify the LFN via --lfn option") - sys.exit(1) - if not options.pool: -- print "Please specify the Pool to set the quota token via --pool option" -+ print("Please specify the Pool to set the quota token via --pool option") - sys.exit(1) - if not options.space: -- print "Please specify the Space for the quota token via --space option" -+ print("Please specify the Space for the quota token via --space option") - sys.exit(1) - if not options.desc: -- print "Please specify the Space for the quota token description via --desc option" -+ print("Please specify the Space for the quota token description via --desc option") - sys.exit(1) - executor.setquotatoken(options.url,options.lfn,options.pool, options.space,options.desc) - elif options.execute == 'delquotatoken': - if not options.lfn: -- print "Please specify the LFN via --lfn option" -+ print("Please specify the LFN via --lfn option") - sys.exit(1) - if not options.pool: -- print "Please specify the Pool to set the quota token via --pool option" -+ print("Please specify the Pool to set the quota token via --pool option") - sys.exit(1) - executor.delquotatoken(options.url,options.lfn,options.pool) - elif options.execute == 'getdirspaces': - if not options.lfn: -- print "Please specify the LFN via --lfn option" -+ print("Please specify the LFN via --lfn option") - sys.exit(1) - executor.getdirspaces(options.url,options.lfn) - elif options.execute == "pfnrm": - if not options.pfn: -- print "Please specify the PFN via --pfn option" -+ print("Please specify the PFN via --pfn option") - sys.exit(1) - executor.pfnrm(options.url,options.pfn) - elif options.execute == "delreplica": - if not options.pfn: -- print "Please specify the PFN via --pfn option" -+ print("Please specify the PFN via --pfn option") - sys.exit(1) - if not options.server: -- print "Please specify the Server via --server option" -+ print("Please specify the Server via --server option") - sys.exit(1) - executor.delreplica(options.url,options.pfn,options.server) - -diff --git a/src/dome/cli/executor.py b/src/dome/cli/executor.py -index 53e541bc..46cdde32 100644 ---- a/src/dome/cli/executor.py -+++ b/src/dome/cli/executor.py -@@ -1,3 +1,4 @@ -+from __future__ import print_function - from subprocess import Popen, PIPE, STDOUT - from pipes import quote - import os -diff --git a/tests/dpm/dpm-tester.py b/tests/dpm/dpm-tester.py -index 7308b010..0d637b61 100755 ---- a/tests/dpm/dpm-tester.py -+++ b/tests/dpm/dpm-tester.py -@@ -2,7 +2,10 @@ - from __future__ import print_function, division, absolute_import - - import gfal2 --import StringIO -+try: -+ import io as StringIO -+except ImportError: -+ import StringIO - import traceback - import datetime - import time -@@ -13,7 +16,10 @@ import inspect - import os - import filecmp - import hashlib --import urllib -+try: -+ from urllib.parse import quote, unquote -+except ImportError: -+ from urllib import quote, unquote - import signal - import stat - import sys -@@ -22,7 +28,10 @@ import textwrap - import json - import subprocess - import random --from urlparse import urlparse -+try: -+ from urllib.parse import urlparse -+except ImportError: -+ from urlparse import urlparse - - EX_OK = 0 - EX_WARNING = 1 -@@ -164,7 +173,7 @@ def get_test_number(): - return TEST_NUMBER - - --class TestResult: -+class TestResult(object): - """A class to store and display the results from a single test run""" - def __init__(self, prefix, name): - self.prefix = prefix -@@ -221,7 +230,7 @@ class TestResult: - @staticmethod - def show_skipped(prefix, name, indent=""): - if USE_XML: -- print("""""".format(get_test_number(), urllib.quote(name), prefix)) -+ print("""""".format(get_test_number(), quote(name), prefix)) - print("""""") - return - -@@ -247,7 +256,7 @@ class TestResult: - return strio.getvalue() - - def show_xml(self): -- print("""""".format(get_test_number(), urllib.quote(self.name), self.prefix, -+ print("""""".format(get_test_number(), quote(self.name), self.prefix, - duration_in_sec(self.starttime, self.endtime))) - - if not self.ok(): -@@ -285,7 +294,7 @@ class ThreadExc(threading.Thread): - self.exception = e - - # Wraps the result from the execution of a gfal function --class GfalStatus: -+class GfalStatus(object): - def __init__(self, output, exception, traceback, timed_out): - self.output = output - self.traceback = traceback -@@ -404,7 +413,7 @@ class DomeCredentials(object): - self.clientDN = clientDN - self.clientAddress = clientAddress - --class DomeTalker: -+class DomeTalker(object): - """Issues requests to Dome""" - - @staticmethod -@@ -446,7 +455,7 @@ class DomeTalker: - result.write(f("Davix exit code: {proc.returncode}")) - return result - --class DomeTester: -+class DomeTester(object): - def __init__(self, uri): - self.uri = uri - self.ctx = gfal2.creat_context() -@@ -534,7 +543,7 @@ def ensure_safe_path(path): - # assumption: second will always contain a single chunk, ie don't pass - # "/dir1/" along with "/dir2/file" - def path_join(first, second): -- second = urllib.quote(second) -+ second = quote(second) - - if first.endswith("/"): - return first + second -@@ -568,7 +577,7 @@ def hammer_tester(functions, arguments): - if result.alive(): result.success() - return result - --class ProtocolTester: -+class ProtocolTester(object): - def __init__(self, testcase): - self.ctx = gfal2.creat_context() - -@@ -906,7 +915,7 @@ def calculate_checksum(checksumtype, filename): - if checksumtype == "md5": - return hashlib.md5(open(filename, 'rb').read()).hexdigest() - --class Runner: -+class Runner(object): - def __init__(self, function, args, name): - self.function = function - self.args = args -@@ -927,7 +936,7 @@ class Runner: - - # Run a series of tests, optionally with initialization and cleanup. - # You can also nest orchestrators together, but be careful. --class Orchestrator: -+class Orchestrator(object): - def __init__(self, prefix=""): - self.prefix = prefix - self.initialization = [] -@@ -1145,10 +1154,10 @@ def build_target_url(args, scheme): - return path_join(build_base_url(args, scheme), args.testdir) - - def extract_path(url): -- return urllib.unquote(urlparse(url).path) -+ return unquote(urlparse(url).path) - - def extract_file(url): -- return urllib.unquote(url.split("/")[-1]) -+ return unquote(url.split("/")[-1]) - - def single_protocol_tests(args, scope): - tester = ProtocolTester(scope) -diff --git a/tests/python/catalog.py b/tests/python/catalog.py -index e77e73d1..c329f4c5 100644 ---- a/tests/python/catalog.py -+++ b/tests/python/catalog.py -@@ -20,7 +20,7 @@ class TestDmliteCatalog(unittest.TestCase): - self.manager = pydmlite.PluginManager() - try : - self.manager.loadConfiguration(self.conf_file) -- except Exception, err: -+ except Exception as err: - self.fail("%s" % err) - self.stack = pydmlite.StackInstance(self.manager) - self.creds = pydmlite.SecurityCredentials() -@@ -30,7 +30,7 @@ class TestDmliteCatalog(unittest.TestCase): - - try: - self.stack.setSecurityCredentials(self.creds) -- except Exception, err: -+ except Exception as err: - self.fail("%s" % err) - - def tearDown(self): -@@ -72,17 +72,17 @@ class TestDmliteCatalog(unittest.TestCase): - catalog = self.stack.getCatalog() - try: - catalog.changeDir(self.path) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - try: -- catalog.makeDir(self.newdir,0775) # create a test dir -- except Exception, err: -+ catalog.makeDir(self.newdir,0o775) # create a test dir -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - try: -- catalog.setMode(self.newdir,0777) # change mode from 0775 to 0777 -- except Exception, err: -+ catalog.setMode(self.newdir,0o777) # change mode from 0775 to 0777 -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - return 0 -@@ -91,7 +91,7 @@ class TestDmliteCatalog(unittest.TestCase): - mydirpath = self.path + "/" + self.newdir - try: - xstat = catalog.extendedStat(mydirpath, True) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - self.assertEquals(xstat.name,self.newdir) # checking the directory name -@@ -106,18 +106,18 @@ class TestDmliteCatalog(unittest.TestCase): - self.assertTrue(deltatimed < 4) # checking the time to create the directory - self.assertEquals(xstat.stat.st_blksize,0) - self.assertEquals(xstat.stat.st_size, 0) -- self.assertEquals(str(oct(xstat.stat.st_mode)),"040777") # checking the mode -+ self.assertEquals(xstat.stat.st_mode, 0o40777) # checking the mode - def test_rmdir(self): - catalog = self.stack.getCatalog() - try: - catalog.changeDir(self.path) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - arg = self.path + "/" + self.rmdir - try: - catalog.removeDir(arg) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - return 0 -@@ -125,22 +125,22 @@ class TestDmliteCatalog(unittest.TestCase): - catalog = self.stack.getCatalog() - try: - catalog.changeDir(self.path) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - try: -- catalog.create(self.newfile, 0775) # create a test file -- except Exception, err: -+ catalog.create(self.newfile, 0o775) # create a test file -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - try: - xstat = catalog.extendedStat(self.newfile, True) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(erra)) - return 1 - try: -- catalog.setMode(self.newfile,0777) # change mode from 0775 to 0777 -- except Exception, err: -+ catalog.setMode(self.newfile,0o777) # change mode from 0775 to 0777 -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - return 0 -@@ -149,7 +149,7 @@ class TestDmliteCatalog(unittest.TestCase): - filevername = self.path + "/" + self.newfile - try: - xstat = catalog.extendedStat(filevername,True) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(erra)) - return 1 - self.assertFalse(xstat.stat.isDir()) -@@ -159,7 +159,7 @@ class TestDmliteCatalog(unittest.TestCase): - self.assertTrue(xstat.stat.isReg()) - self.assertTrue(stat.S_ISREG(xstat.stat.st_mode)) - self.assertEquals(xstat.name, self.newfile) # checking the file name -- self.assertEquals(str(oct(xstat.stat.st_mode)),"0100777") # checking the file mode -+ self.assertEquals(xstat.stat.st_mode, 0o100777) # checking the file mode - self.assertTrue(xstat.stat.st_ino > 0) - self.assertEquals(xstat.stat.st_nlink, 1) - deltatime = time.time() - xstat.stat.getMTime() -@@ -173,18 +173,18 @@ class TestDmliteCatalog(unittest.TestCase): - catalog = self.stack.getCatalog() - try: - catalog.changeDir(self.path) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - try: - catalog.symlink(self.newfile, self.newlink) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - name = self.path + "/" + self.newlink - try: - filename = catalog.readLink(name) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - return 0 -@@ -192,7 +192,7 @@ class TestDmliteCatalog(unittest.TestCase): - catalog = self.stack.getCatalog() - try: - xstat = catalog.extendedStat(self.newlink,False) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - self.assertFalse(xstat.stat.isDir()) -@@ -200,7 +200,7 @@ class TestDmliteCatalog(unittest.TestCase): - self.assertTrue(xstat.stat.isLnk()) - self.assertEquals(xstat.name,self.newlink) # checking the link name - self.assertEquals(catalog.readLink(self.newlink),self.newfile) # checking of the link (newlink->newfile) -- self.assertEquals(str(oct(xstat.stat.st_mode)),"0120777") # checking the link mode -+ self.assertEquals(xstat.stat.st_mode, 0o120777) # checking the link mode - self.assertTrue(xstat.stat.st_ino > 0) - self.assertEquals(xstat.stat.st_nlink, 1) - deltatimel = time.time() - xstat.stat.getMTime() -@@ -212,12 +212,12 @@ class TestDmliteCatalog(unittest.TestCase): - catalog = self.stack.getCatalog() - try: - catalog.changeDir(self.path) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - try: - catalog.unlink (self.newlink) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - return 0 -@@ -226,17 +226,17 @@ class TestDmliteCatalog(unittest.TestCase): - catalog = self.stack.getCatalog() - try: - catalog.changeDir(self.path) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - try: - xstat = catalog.extendedStat(self.newfile,True) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 - try: - catalog.unlink(self.newfile) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('file is removed: %s\n' % str(err)) - return 1 - return 0 -diff --git a/tests/python/catalog_first.py b/tests/python/catalog_first.py -index eb75bb1f..4a544e5b 100755 ---- a/tests/python/catalog_first.py -+++ b/tests/python/catalog_first.py -@@ -13,7 +13,7 @@ class TestDmliteCatalog(unittest.TestCase): - self.manager = pydmlite.PluginManager() - try : - self.manager.loadConfiguration(self.conf_file) -- except Exception, err: -+ except Exception as err: - self.fail("%s" % err) - self.stack = pydmlite.StackInstance(self.manager) - self.creds = pydmlite.SecurityCredentials() -@@ -23,7 +23,7 @@ class TestDmliteCatalog(unittest.TestCase): - - try: - self.stack.setSecurityCredentials(self.creds) -- except Exception, err: -+ except Exception as err: - self.fail("%s" % err) - - def tearDown(self): -diff --git a/tests/python/dm-ls.py b/tests/python/dm-ls.py -index c0420283..c6a152fe 100755 ---- a/tests/python/dm-ls.py -+++ b/tests/python/dm-ls.py -@@ -1,4 +1,5 @@ - #!/usr/bin/python -+from __future__ import print_function - import pydmlite - import sys - import time -@@ -10,14 +11,14 @@ if __name__ == "__main__": - l=0 - # print len - if len < 3 or sys.argv[1] == "-h": -- print "Usage:", sys.argv[0], "[-l] " -+ print("Usage:", sys.argv[0], "[-l] ") - sys.exit(1) - # print sys.argv[1] - if sys.argv[1] == "-l" : - l=1 - try : - manager.loadConfiguration(sys.argv[l+1]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - stack = pydmlite.StackInstance(manager) -@@ -29,7 +30,7 @@ if __name__ == "__main__": - - try: - stack.setSecurityCredentials(creds) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - catalog = stack.getCatalog() -@@ -46,10 +47,10 @@ if __name__ == "__main__": - # print "\t%s" % xstat.name, " is link to ", filename - ltime = time.localtime(xstat.stat.getMTime()) - date=datetime(ltime[0],ltime[1],ltime[2],ltime[3],ltime[4],ltime[5]) -- print "%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" %xstat.name, "->",filename -+ print("%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" %xstat.name, "->",filename) - - sys.exit(0) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - flag = xstat.stat.isDir() -@@ -59,7 +60,7 @@ if __name__ == "__main__": - catalog = stack.getCatalog() - try : - mydir = catalog.openDir(inputname) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - while True: -@@ -83,13 +84,13 @@ if __name__ == "__main__": - filename = catalog.readLink(name) - ltime = time.localtime(xstat.stat.getMTime()) - date=datetime(ltime[0],ltime[1],ltime[2],ltime[3],ltime[4],ltime[5]) -- print "%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" % f.name, "->",filename -+ print("%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" % f.name, "->",filename) - continue - ltime = time.localtime(xstat.stat.getMTime()) - date=datetime(ltime[0],ltime[1],ltime[2],ltime[3],ltime[4],ltime[5]) -- print "%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" % f.name -+ print("%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" % f.name) - else : -- print "\t%s" % f.name -+ print("\t%s" % f.name) - except: - catalog.closeDir(mydir) - break -@@ -97,8 +98,8 @@ if __name__ == "__main__": - if l>0 : - ltime = time.localtime(xstat.stat.getMTime()) - date=datetime(ltime[0],ltime[1],ltime[2],ltime[3],ltime[4],ltime[5]) -- print "%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" % xstat.name -+ print("%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" % xstat.name) - else : -- print "\t%s" % xstat.name -+ print("\t%s" % xstat.name) - sys.exit(0) - -diff --git a/tests/python/dm-mkdir.py b/tests/python/dm-mkdir.py -index d5fe47da..c6d165b6 100755 ---- a/tests/python/dm-mkdir.py -+++ b/tests/python/dm-mkdir.py -@@ -1,4 +1,5 @@ - #!/usr/bin/python -+from __future__ import print_function - import pydmlite - import sys - -@@ -8,12 +9,12 @@ if __name__ == "__main__": - l=0 - # print len - if len < 3 or sys.argv[1] == "-h": -- print "Usage:", sys.argv[0], " []" -+ print("Usage:", sys.argv[0], " []") - sys.exit(1) - # print sys.argv[1] - try : - manager.loadConfiguration(sys.argv[1]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - -@@ -29,22 +30,22 @@ if __name__ == "__main__": - catalog = stack.getCatalog() - try: - catalog.changeDir(sys.argv[2]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - try: -- catalog.makeDir(sys.argv[3],0777) -- except Exception, err: -+ catalog.makeDir(sys.argv[3],0o777) -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) -- print sys.argv[3]," was created" -+ print(sys.argv[3]," was created") - if len == 5 : - try: - mode = int(sys.argv[4],8) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) -- print sys.argv[3],"%o" % mode -+ print(sys.argv[3],"%o" % mode) - catalog.setMode(sys.argv[3],mode) - # arg = sys.argv[2] + "/" + sys.argv[3] - # catalog.removeDir(arg) -diff --git a/tests/python/dm-mkfile.py b/tests/python/dm-mkfile.py -index 380c5931..607318f4 100755 ---- a/tests/python/dm-mkfile.py -+++ b/tests/python/dm-mkfile.py -@@ -1,4 +1,5 @@ - #!/usr/bin/python -+from __future__ import print_function - import pydmlite - import sys - import time -@@ -9,11 +10,11 @@ if __name__ == "__main__": - len = len(sys.argv) - l=0 - if len < 4 or sys.argv[1] == "-h": -- print "Usage:", sys.argv[0], " []" -+ print("Usage:", sys.argv[0], " []") - sys.exit(1) - try: - manager.loadConfiguration(sys.argv[1]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - stack = pydmlite.StackInstance(manager) -@@ -25,36 +26,36 @@ if __name__ == "__main__": - - try: - stack.setSecurityCredentials(creds) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - - catalog = stack.getCatalog() - try: - catalog.changeDir(sys.argv[2]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - try: -- catalog.create(sys.argv[3], 0775) # create a test file -- except Exception, err: -+ catalog.create(sys.argv[3], 0o775) # create a test file -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - try: - xstat = catalog.extendedStat(sys.argv[3], True) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - ltime = time.localtime(xstat.stat.getMTime()) -- print "create a test file", "\t%s" % xstat.name -+ print("create a test file", "\t%s" % xstat.name) - - date=datetime(ltime[0],ltime[1],ltime[2],ltime[3],ltime[4],ltime[5]) -- print "%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" % xstat.name -+ print("%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" % xstat.name) - if len == 5 : - try : - mode = int(sys.argv[4],8) - # print "%o" % mode -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - - sys.exit(1) -@@ -64,6 +65,6 @@ if __name__ == "__main__": - # print "%o" % xstat.stat.st_mode - ltime = time.localtime(xstat.stat.getMTime()) - date=datetime(ltime[0],ltime[1],ltime[2],ltime[3],ltime[4],ltime[5]) -- print "change mode from 775 to ", "%o" % mode -- print "%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" % xstat.name -+ print("change mode from 775 to ", "%o" % mode) -+ print("%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" % xstat.name) - sys.exit(0) -diff --git a/tests/python/dm-mklink.py b/tests/python/dm-mklink.py -index e54b9d37..2f776fe4 100755 ---- a/tests/python/dm-mklink.py -+++ b/tests/python/dm-mklink.py -@@ -1,4 +1,5 @@ - #!/usr/bin/python -+from __future__ import print_function - import pydmlite - import sys - -@@ -8,12 +9,12 @@ if __name__ == "__main__": - l=0 - # print len - if len < 5 or sys.argv[1] == "-h": -- print "Usage:", sys.argv[0], " " -+ print("Usage:", sys.argv[0], " ") - sys.exit(1) - # print sys.argv[1] - try : - manager.loadConfiguration(sys.argv[1]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - -@@ -29,17 +30,17 @@ if __name__ == "__main__": - catalog = stack.getCatalog() - try: - catalog.changeDir(sys.argv[2]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - try: - catalog.symlink(sys.argv[3],sys.argv[4]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - name = sys.argv[2] + sys.argv[4] - filename = catalog.readLink(name) -- print sys.argv[4], "->",filename -+ print(sys.argv[4], "->",filename) - # catalog.setMode(sys.argv[4],120777) - # arg = sys.argv[2] + "/" + sys.argv[3] - # catalog.removeDir(arg) -diff --git a/tests/python/dm-physicallocation.py b/tests/python/dm-physicallocation.py -index 1b468bb1..a0133bee 100755 ---- a/tests/python/dm-physicallocation.py -+++ b/tests/python/dm-physicallocation.py -@@ -1,4 +1,5 @@ - #!/usr/bin/python -+from __future__ import print_function - import pydmlite - import sys - -@@ -6,11 +7,11 @@ if __name__ == "__main__": - manager = pydmlite.PluginManager() - - if len(sys.argv) < 3: -- print "Usage:", sys.argv[0], " " -+ print("Usage:", sys.argv[0], " ") - sys.exit(1) - try : - manager.loadConfiguration(sys.argv[1]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - stack = pydmlite.StackInstance(manager) -@@ -21,18 +22,18 @@ if __name__ == "__main__": - creds.fqans.append("dteam") - try : - stack.setSecurityCredentials(creds) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - poolManager = stack.getPoolManager() - try : - location = poolManager.whereToRead(sys.argv[2]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - for l in location: -- print "Chunk: %s:%s (%d-%d)" % (l.host, l.path, l.offset, l.offset + l.size) -+ print("Chunk: %s:%s (%d-%d)" % (l.host, l.path, l.offset, l.offset + l.size)) - for k in l.getKeys(): -- print "\t%s: %s" % (k, l.getString(k)) -+ print("\t%s: %s" % (k, l.getString(k))) - sys.exit(0) - -diff --git a/tests/python/dm-rmdir.py b/tests/python/dm-rmdir.py -index 59971fe8..6eb950a0 100755 ---- a/tests/python/dm-rmdir.py -+++ b/tests/python/dm-rmdir.py -@@ -1,4 +1,5 @@ - #!/usr/bin/python -+from __future__ import print_function - import pydmlite - import sys - -@@ -8,12 +9,12 @@ if __name__ == "__main__": - l=0 - # print len - if len < 3 or sys.argv[1] == "-h": -- print "Usage:", sys.argv[0], " " -+ print("Usage:", sys.argv[0], " ") - sys.exit(1) - # print sys.argv[1] - try : - manager.loadConfiguration(sys.argv[1]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - -@@ -29,14 +30,14 @@ if __name__ == "__main__": - catalog = stack.getCatalog() - try: - catalog.changeDir(sys.argv[2]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - arg = sys.argv[2] + "/" + sys.argv[3] - try : - catalog.removeDir(arg) -- print arg," was removed" -+ print(arg," was removed") - sys.exit(0) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) -diff --git a/tests/python/dm-rmfile.py b/tests/python/dm-rmfile.py -index 4a2f1dfc..de68ef2b 100755 ---- a/tests/python/dm-rmfile.py -+++ b/tests/python/dm-rmfile.py -@@ -1,4 +1,5 @@ - #!/usr/bin/python -+from __future__ import print_function - import pydmlite - import sys - import time -@@ -9,11 +10,11 @@ if __name__ == "__main__": - len = len(sys.argv) - l=0 - if len < 4 or sys.argv[1] == "-h": -- print "Usage:", sys.argv[0], " " -+ print("Usage:", sys.argv[0], " ") - sys.exit(1) - try: - manager.loadConfiguration(sys.argv[1]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - stack = pydmlite.StackInstance(manager) -@@ -25,31 +26,31 @@ if __name__ == "__main__": - - try: - stack.setSecurityCredentials(creds) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - - catalog = stack.getCatalog() - try: - catalog.changeDir(sys.argv[2]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - try: - xstat = catalog.extendedStat(sys.argv[3], True) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - ltime = time.localtime(xstat.stat.getMTime()) -- print "file exists: " -+ print("file exists: ") - - date=datetime(ltime[0],ltime[1],ltime[2],ltime[3],ltime[4],ltime[5]) -- print "%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" % xstat.name -+ print("%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" % xstat.name) - catalog.unlink(sys.argv[3]) - - try: - xstat = catalog.extendedStat(sys.argv[3], True) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('file is removed: %s\n' % str(err)) - sys.exit(0) - # sys.exit(0) -diff --git a/tests/python/dm-rmlink.py b/tests/python/dm-rmlink.py -index 1091ea7c..3104444a 100755 ---- a/tests/python/dm-rmlink.py -+++ b/tests/python/dm-rmlink.py -@@ -1,4 +1,5 @@ - #!/usr/bin/python -+from __future__ import print_function - import pydmlite - import sys - -@@ -8,12 +9,12 @@ if __name__ == "__main__": - l=0 - # print len - if len < 3 or sys.argv[1] == "-h": -- print "Usage:", sys.argv[0], " " -+ print("Usage:", sys.argv[0], " ") - sys.exit(1) - # print sys.argv[1] - try : - manager.loadConfiguration(sys.argv[1]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - -@@ -29,12 +30,12 @@ if __name__ == "__main__": - catalog = stack.getCatalog() - try: - catalog.changeDir(sys.argv[2]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - try : - catalog.unlink (sys.argv[3]) -- except Exception, err: -+ except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - # arg = sys.argv[2] + "/" + sys.argv[3] diff --git a/dmlite-fix-oob-memory-read.patch b/dmlite-fix-oob-memory-read.patch deleted file mode 100644 index 2a85782..0000000 --- a/dmlite-fix-oob-memory-read.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/src/utils/DomeTalker.cpp b/src/utils/DomeTalker.cpp -index 40bdd511..1a978a47 100644 ---- a/src/utils/DomeTalker.cpp -+++ b/src/utils/DomeTalker.cpp -@@ -276,7 +276,7 @@ int dmlite::http_status(const dmlite::DmStatus &e) { - return DOME_HTTP_INTERNAL_SERVER_ERROR; - } - int DomeTalker::dmlite_code() { -- for(size_t i = 0; i < sizeof(pairs); i++) { -+ for(size_t i = 0; i < sizeof(pairs) / sizeof(pairs[0]); i++) { - if(pairs[i].status == status_) { - return pairs[i].code; - } diff --git a/dmlite-fix_puppet_headdisk_node.patch b/dmlite-fix_puppet_headdisk_node.patch deleted file mode 100644 index 727fbd6..0000000 --- a/dmlite-fix_puppet_headdisk_node.patch +++ /dev/null @@ -1,20 +0,0 @@ -commit 59419870be82ca146cbded3cf697b1d700c4b516 -Author: Andrea Manzi -Date: Wed Nov 18 10:21:53 2020 +0000 - - Update head_disknode.pp - added missing parenthesis - reported at https://github.com/cern-it-sdc-id/lcgdm-puppet-examples/issues/1#issuecomment-729444230 - -diff --git a/src/puppet/dpm/manifests/head_disknode.pp b/src/puppet/dpm/manifests/head_disknode.pp -index b13b8baa..fcdf741e 100644 ---- a/src/puppet/dpm/manifests/head_disknode.pp -+++ b/src/puppet/dpm/manifests/head_disknode.pp -@@ -390,6 +390,8 @@ class dpm::head_disknode ( - ensure => 'absent'; - 'dpm-argus-banned-groups': - ensure => 'absent'; -+ } -+ } - } - - #pools configuration diff --git a/dmlite-initialize_class_variable.patch b/dmlite-initialize_class_variable.patch deleted file mode 100644 index d2bc034..0000000 --- a/dmlite-initialize_class_variable.patch +++ /dev/null @@ -1,19 +0,0 @@ -commit 6338e683a1486d23bde7c448bcb83e2cadb94736 -Author: Petr Vokac -Date: Wed Oct 28 18:21:09 2020 +0100 - - Constructor should initialize even unused variables - -diff --git a/src/utils/DomeTalker.h b/src/utils/DomeTalker.h -index f107b578..ff49d0ab 100644 ---- a/src/utils/DomeTalker.h -+++ b/src/utils/DomeTalker.h -@@ -55,7 +55,7 @@ struct DomeCredentials { - clientName(cn), remoteAddress(ra), groups(gr), oidc_authorized(false) {} - - DomeCredentials() : oidc_authorized(false) {} -- DomeCredentials(const SecurityContext *ctx) { -+ DomeCredentials(const SecurityContext *ctx) : oidc_authorized(false) { - if(ctx) { - - clientName = ctx->credentials.clientName; diff --git a/dmlite-no_lfn_draining_exception.patch b/dmlite-no_lfn_draining_exception.patch deleted file mode 100644 index c9528af..0000000 --- a/dmlite-no_lfn_draining_exception.patch +++ /dev/null @@ -1,30 +0,0 @@ -commit c3a2b521ca3433241f10302b2ce146e912b2249d -Author: Petr Vokac -Date: Tue Nov 10 03:02:40 2020 +0100 - - Handle exeption for draining SFN without LFN - -diff --git a/shell/src/interpreter.py b/shell/src/interpreter.py -index 48ff02fd..011b9a23 100644 ---- a/shell/src/interpreter.py -+++ b/shell/src/interpreter.py -@@ -3088,6 +3088,10 @@ class DrainFileReplica(object): - - def drain(self): - filename = self.fileReplica.lfn -+ if not filename: -+ self.logError("The file with replica sfn: " + self.fileReplica.sfn + " doesn't have LFN, ignored\n") -+ self.interpreter.drainErrors.append((filename, self.fileReplica.sfn, "The file doesn't have LFN")) -+ return 1 - #step 4 : check the status and see if they the replica can be drained - if self.fileReplica.status != "-": - if self.fileReplica.status == "P": -@@ -3225,7 +3229,7 @@ class DrainReplicas(object): - if len(self.interpreter.drainErrors) > 0: - self.interpreter.ok("List of Errors:\n") - for (file, sfn, error) in self.interpreter.drainErrors: -- self.interpreter.ok("File: " + file + "\tsfn: " + sfn + "\tError: " + error) -+ self.interpreter.ok("File: " + str(file) + "\tsfn: " + sfn + "\tError: " + error) - - def drain(self): - gid = None diff --git a/dmlite-perfmarker-sleep-cond.patch b/dmlite-perfmarker-sleep-cond.patch deleted file mode 100644 index 55cd384..0000000 --- a/dmlite-perfmarker-sleep-cond.patch +++ /dev/null @@ -1,496 +0,0 @@ -diff --git a/src/plugins/apache-httpd/src/client/htext.h b/src/plugins/apache-httpd/src/client/htext.h -index 1303e77e..853a146a 100644 ---- a/src/plugins/apache-httpd/src/client/htext.h -+++ b/src/plugins/apache-httpd/src/client/htext.h -@@ -67,6 +67,8 @@ typedef enum - HTEXTOP_NOHEAD, /* Disable HEAD requests */ - - HTEXTOP_USE_COPY_FROM_SOURCE, /* Use Copy Source (fetching) instead of Copy Destination (pushing) */ -+ HTEXTOP_COPY_DONE, /* If set, called after COPY operation is done */ -+ HTEXTOP_COPY_DONE_DATA, /* Additional data to pass to transfer done function */ - - HTEXTOP_SENTINEL, /* To mark the last one */ - -diff --git a/src/plugins/apache-httpd/src/client/htext_api.c b/src/plugins/apache-httpd/src/client/htext_api.c -index 1c3df75c..201a5e37 100644 ---- a/src/plugins/apache-httpd/src/client/htext_api.c -+++ b/src/plugins/apache-httpd/src/client/htext_api.c -@@ -92,6 +92,8 @@ static option_entry option_definitions[] = { - - { OT_INT, (option_value) 0 }, /* HTEXTOP_NOHEAD */ - { OT_INT, (option_value) 0 }, /* HTEXTOP_USE_COPY_FROM_SOURCE */ -+ { OT_POINTER, (option_value) NULL }, /* HTEXTOP_COPY_DONE */ -+ { OT_POINTER, (option_value) NULL }, /* HTEXTOP_COPY_DONE_DATA */ - }; - - /** -@@ -156,6 +158,10 @@ void htext_destroy(htext_handle *handle) - free(handle->error_string); - free(handle->partial_total); - free(handle->partial_done); -+ for (i = 0; i < handle->partials; ++i) { -+ free(handle->partial_rconn[i]); -+ } -+ free(handle->partial_rconn); - - for (i = 0; i < HTEXTOP_SENTINEL; ++i) { - if (handle->options[i].type == OT_STRING) -@@ -249,6 +255,7 @@ int htext_perform(htext_handle *handle) - { - void* (*performer)(void *); - const char *performer_str; -+ int i; - - /* Check status */ - switch (handle->status) { -@@ -263,9 +270,14 @@ int htext_perform(htext_handle *handle) - free(handle->error_string); - free(handle->partial_total); - free(handle->partial_done); -+ for (i = 0; i < handle->partials; ++i) { -+ free(handle->partial_rconn[i]); -+ } -+ free(handle->partial_rconn); - handle->error_string = NULL; - handle->partial_done = NULL; - handle->partial_total = NULL; -+ handle->partial_rconn = NULL; - handle->partials = 0; - - /* Check we have source and destination */ -@@ -326,7 +338,9 @@ int htext_perform(htext_handle *handle) - - void htext_abort(htext_handle *handle) - { -- /* TODO: Really abort */ -+ /* TODO: Transfer is aborted by returning non-zero value from progress -+ function (this doesn't happen immediately and with curl easy interface -+ there is no better option except closing directly transfer socket) */ - handle->status = HTEXTS_ABORTED; - } - -diff --git a/src/plugins/apache-httpd/src/client/htext_common.c b/src/plugins/apache-httpd/src/client/htext_common.c -index 34a28353..219988dc 100644 ---- a/src/plugins/apache-httpd/src/client/htext_common.c -+++ b/src/plugins/apache-httpd/src/client/htext_common.c -@@ -40,8 +40,6 @@ void htext_partial_clean(htext_chunk *p) - curl_easy_cleanup(p->curl); - if (p->fd) - GETIO(p->handle) ->close(p->fd); -- if (p->chunk_rconn && *(p->chunk_rconn)) -- free(*(p->chunk_rconn)); - if (p->error_string) - free(p->error_string); - if (p->http_response) -@@ -219,28 +217,37 @@ int htext_progress_callback(void *pp, curl_off_t dltotal, curl_off_t dlnow, - *(partial->chunk_done) = dlnow; - } - -- CURLcode res = CURLE_OK; -- char *ip = NULL; -- long port = 0; -- -- if (res == CURLE_OK) -- res = curl_easy_getinfo(partial->curl, CURLINFO_PRIMARY_IP, &ip); -- if (res == CURLE_OK) -- res = curl_easy_getinfo(partial->curl, CURLINFO_PRIMARY_PORT, &port); -- if (res == CURLE_OK && ip) { -- int len = strlen(ip)+15; -- char *rconn = malloc(len); -- if (strchr(ip, ':')) { -- snprintf(rconn, len, "tcp:[%s]:%ld", ip, port); -- } else { -- snprintf(rconn, len, "tcp:%s:%ld", ip, port); -+ /* Add remote connection only after real data transfer starts -+ otherwise ip:port info could come previous (headnode) connection */ -+ if (dltotal > 0 || dlnow > 0 || ultotal > 0 || ulnow > 0) { -+ CURLcode res = CURLE_OK; -+ char *ip = NULL; -+ long port = 0; -+ -+ if (res == CURLE_OK) -+ res = curl_easy_getinfo(partial->curl, CURLINFO_PRIMARY_IP, &ip); -+ if (res == CURLE_OK) -+ res = curl_easy_getinfo(partial->curl, CURLINFO_PRIMARY_PORT, &port); -+ -+ if (res == CURLE_OK && ip && port) { -+ char rconn[HTEXT_PERF_MARKER_MAX_RCONN_SIZE]; -+ if (strchr(ip, ':')) { -+ snprintf(rconn, sizeof(rconn), "tcp:[%s]:%ld", ip, port); -+ } else { -+ snprintf(rconn, sizeof(rconn), "tcp:%s:%ld", ip, port); -+ } -+ htext_log(partial->handle, "connection %s, download %ld/%ld, upload %ld/%ld", rconn, dlnow, dltotal, ulnow, ultotal); -+ if (!*(partial->chunk_rconn)) -+ *(partial->chunk_rconn) = calloc(sizeof(char), HTEXT_PERF_MARKER_MAX_RCONN_SIZE); -+ if (strcmp(*(partial->chunk_rconn), rconn)) -+ strcpy(*(partial->chunk_rconn), rconn); - } -- htext_log(partial->handle, "connection %s, download %ld/%ld, upload %ld/%ld", rconn, dlnow, dltotal, ulnow, ultotal); -- char *tmp = *(partial->chunk_rconn); -- *(partial->chunk_rconn) = rconn; -- if (tmp) free(tmp); - } - -+ /* Let curl abort terminated transfer (doesn't happen immediately) */ -+ if (partial->handle->status == HTEXTS_ABORTED) -+ return 1; -+ - return 0; - } - -diff --git a/src/plugins/apache-httpd/src/client/htext_copy.c b/src/plugins/apache-httpd/src/client/htext_copy.c -index 88c4aacd..0be2f969 100644 ---- a/src/plugins/apache-httpd/src/client/htext_copy.c -+++ b/src/plugins/apache-httpd/src/client/htext_copy.c -@@ -83,6 +83,7 @@ static size_t htext_copy_write_callback(char *buffer, size_t size, size_t nmemb, - memset(perf_data, 0, sizeof(*perf_data)); - perf_data->index = -1; - perf_data->transferred = -1; -+ perf_data->rconn[0] = '\0'; - } - else if (strncasecmp("Timestamp:", p, 10) == 0) - { -@@ -94,15 +95,17 @@ static size_t htext_copy_write_callback(char *buffer, size_t size, size_t nmemb, - } - else if (strncasecmp("Stripe Bytes Transferred:", p, 25) == 0) - { -- perf_data->transferred = atol(p + 26); -+ perf_data->transferred = atol(p + 25); - } - else if (strncasecmp("Total Stripe Count:", p, 19) == 0) - { -- perf_data->count = atoi(p + 20); -+ perf_data->count = atoi(p + 19); - } - else if (strncasecmp("RemoteConnections:", p, 18) == 0) - { -- strcpy(perf_data->rconn, p + 19); -+ char *c = p+18; -+ while (isspace(*c)) c++; -+ snprintf(perf_data->rconn, sizeof(perf_data->rconn), "%s", c); - } - // skip unused dCache perf markers - else if (strncasecmp("State:", p, 6) == 0) {} -@@ -137,8 +140,12 @@ static size_t htext_copy_write_callback(char *buffer, size_t size, size_t nmemb, - - handle->partial_done[perf_data->index] = perf_data->transferred; - handle->partial_total[perf_data->index] = 0; -- handle->partial_rconn[perf_data->index] = perf_data->rconn[0] != '\0' ? strdup(perf_data->rconn) : NULL; -- -+ if (perf_data->rconn[0] != '\0') { -+ if (!handle->partial_rconn[perf_data->index]) -+ handle->partial_rconn[perf_data->index] = calloc(sizeof(char), HTEXT_PERF_MARKER_MAX_RCONN_SIZE); -+ if (strcmp(handle->partial_rconn[perf_data->index], perf_data->rconn)) -+ strcpy(handle->partial_rconn[perf_data->index], perf_data->rconn); -+ } - } - - } -@@ -257,8 +264,15 @@ void *htext_copy_method(void *h) - - handle->http_status = control.http_status; - -+ /* Call transfer done callback */ -+ if (GETPTR(handle, HTEXTOP_COPY_DONE)) { -+ void (*copy_done)(htext_handle *, int, void *) = GETPTR(handle, HTEXTOP_COPY_DONE); -+ copy_done(handle, handle->status, GETPTR(handle, HTEXTOP_COPY_DONE_DATA)); -+ } -+ - /* Clean up */ - htext_partial_clean(&control); - curl_easy_cleanup(curl); -+ - return NULL ; - } -diff --git a/src/plugins/apache-httpd/src/client/htext_get.c b/src/plugins/apache-httpd/src/client/htext_get.c -index 0fb49bde..ae9b686b 100644 ---- a/src/plugins/apache-httpd/src/client/htext_get.c -+++ b/src/plugins/apache-httpd/src/client/htext_get.c -@@ -276,6 +276,13 @@ void *htext_get_method(void *h) - handle->status = HTEXTS_SUCCEEDED; - } - -+ /* Call transfer done callback */ -+ if (GETPTR(handle, HTEXTOP_COPY_DONE)) { -+ void (*copy_done)(htext_handle *, int, void *) = GETPTR(handle, HTEXTOP_COPY_DONE); -+ copy_done(handle, handle->status, GETPTR(handle, HTEXTOP_COPY_DONE_DATA)); -+ } -+ -+ /* Clean up */ - curl_easy_cleanup(curl); - free(partial_array); - -diff --git a/src/plugins/apache-httpd/src/client/htext_private.h b/src/plugins/apache-httpd/src/client/htext_private.h -index 0a406df7..b4369c1e 100644 ---- a/src/plugins/apache-httpd/src/client/htext_private.h -+++ b/src/plugins/apache-httpd/src/client/htext_private.h -@@ -113,13 +113,14 @@ typedef struct htext_chunk htext_chunk; - * and unprocessed last line from input data - */ - #define HTEXT_PERF_MARKER_MAX_LINE_SIZE 1024 -+#define HTEXT_PERF_MARKER_MAX_RCONN_SIZE 256 - struct htext_perf_marker_parsed_data - { - time_t latest; /* Timestamp for received data */ - int index; /* Stripe index */ - off_t transferred; /* Stripe bytes transferred */ - int count; /* Total stripe count */ -- char rconn[HTEXT_PERF_MARKER_MAX_LINE_SIZE]; /* Remote connections */ -+ char rconn[HTEXT_PERF_MARKER_MAX_RCONN_SIZE]; /* Remote connections */ - - char remaining[HTEXT_PERF_MARKER_MAX_LINE_SIZE]; /* Remaining input data from unterminated last line */ - }; -diff --git a/src/plugins/apache-httpd/src/client/htext_put.c b/src/plugins/apache-httpd/src/client/htext_put.c -index 0c3e4c1d..94d8781a 100644 ---- a/src/plugins/apache-httpd/src/client/htext_put.c -+++ b/src/plugins/apache-httpd/src/client/htext_put.c -@@ -377,6 +377,12 @@ void *htext_put_method(void *h) - handle->status = HTEXTS_SUCCEEDED; - } - -+ /* Call transfer done callback */ -+ if (GETPTR(handle, HTEXTOP_COPY_DONE)) { -+ void (*copy_done)(htext_handle *, int, void *) = GETPTR(handle, HTEXTOP_COPY_DONE); -+ copy_done(handle, handle->status, GETPTR(handle, HTEXTOP_COPY_DONE_DATA)); -+ } -+ - curl_easy_cleanup(curl); - free(partial_array); - -diff --git a/src/plugins/apache-httpd/src/mod_lcgdm_disk/copy.c b/src/plugins/apache-httpd/src/mod_lcgdm_disk/copy.c -index dd586da6..cdc22c30 100644 ---- a/src/plugins/apache-httpd/src/mod_lcgdm_disk/copy.c -+++ b/src/plugins/apache-httpd/src/mod_lcgdm_disk/copy.c -@@ -42,6 +42,8 @@ typedef struct - request_rec *request; - const char *source; - const char *destination; -+ pthread_mutex_t *done_lock; -+ pthread_cond_t *done_cond; - } dav_disk_copy_data; - - /* Custom I/O handler */ -@@ -191,6 +193,30 @@ static void dav_disk_copy_log(htext_handle *handle, HTEXT_LOG_TYPE type, - } - } - -+/** -+ * Called by htext when transfer is done -+ * @param handle The handle that triggers the call -+ * @param status The size of the message (it might not be NULL terminated) -+ * @param ud User defined data -+ */ -+static void dav_disk_copy_done(htext_handle *handle, int status, void *ud) -+{ -+ dav_disk_copy_data *ddc = (dav_disk_copy_data*) ud; -+ -+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, ddc->request, -+ "Remote copy done (%d): %s => %s status %i", -+ htext_http_code(handle), ddc->source, ddc->destination, status); -+ -+ // send signal to finish perf marker loop -+ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, ddc->request, "EEEE Remote copy done (%d): %s => %s status %i", htext_http_code(handle), ddc->source, ddc->destination, status); -+ pthread_mutex_lock(ddc->done_lock); -+ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, ddc->request, "EEEE Remote copy done (%d): %s => %s status %i", htext_http_code(handle), ddc->source, ddc->destination, status); -+ pthread_cond_signal(ddc->done_cond); -+ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, ddc->request, "EEEE Remote copy done (%d): %s => %s status %i", htext_http_code(handle), ddc->source, ddc->destination, status); -+ pthread_mutex_unlock(ddc->done_lock); -+ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, ddc->request, "EEEE Remote copy done (%d): %s => %s status %i", htext_http_code(handle), ddc->source, ddc->destination, status); -+} -+ - /** - * Generates and sends the performance clients following GridFTP format - * http://www.ogf.org/documents/GFD.20.pdf (Appendix II), skipping the -@@ -201,14 +223,14 @@ static void dav_disk_send_performance_markers(dav_disk_copy_data *ddc, size_t n, - { - (void) total; - -- char buf[80]; -+ char buf[256+2]; // HTEXT_PERF_MARKER_MAX_RCONN_SIZE+2 - time_t timestamp = time(NULL ); - size_t i; - - for (i = 0; i < n; ++i) { - buf[0] = '\0'; - if (rconn && rconn[i]) { -- snprintf(buf, 78, "\tRemoteConnections: %s", rconn[i]); -+ snprintf(buf, sizeof(buf)-2, "\tRemoteConnections: %s", rconn[i]); - strcat(buf, "\n"); // we reserved space for endl - } - apr_brigade_printf(ddc->brigade, ap_filter_flush, ddc->output, -@@ -228,52 +250,81 @@ static void dav_disk_send_performance_markers(dav_disk_copy_data *ddc, size_t n, - static dav_error *dav_disk_pool_and_feedback(htext_handle *handle, - dav_disk_copy_data *ddc) - { -- int wait, status; -+ int status, interval = 1, done = 0; - dav_error *error = NULL; - const char *error_string; - -- do { -- size_t *total, *done, n, i; -- size_t globalDone, globalTotal; -- char **rconn; -+ pthread_mutex_lock(ddc->done_lock); - -+ do { - status = htext_status(handle); -- switch (status) { -- case HTEXTS_SUCCEEDED: -- case HTEXTS_FAILED: -- case HTEXTS_ABORTED: -- wait = 0; -- break; -- default: -- /* In the first go we need to set the reply */ -- if (ddc->request->status == 0) { -- ddc->request->status = HTTP_ACCEPTED; -- ap_set_content_type(ddc->request, "text/plain"); -- } -- /* Print progress */ -- htext_progress(handle, &n, &total, &done, &rconn); -- globalDone = globalTotal = 0; -- for (i = 0; i < n; ++i) { -- globalDone += done[i]; -- globalTotal += total[i]; -- } -- -- dav_disk_send_performance_markers(ddc, n, total, done, rconn); -- -- if (ap_fflush(ddc->output, ddc->brigade) == APR_SUCCESS) { -- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, ddc->request, -- "COPY '%s' %lu/%lu", ddc->request->uri, -- (unsigned long) globalDone, -- (unsigned long) globalTotal); -- wait = 1; -- sleep(1); -- } -- else { -- wait = 0; -- htext_abort(handle); -- } -+ -+ if (status == HTEXTS_SUCCEEDED || status == HTEXTS_FAILED || status == HTEXTS_ABORTED) { -+ done = 1; -+ } -+ else { -+ struct timespec ts; -+ clock_gettime(CLOCK_REALTIME, &ts); -+ ts.tv_sec += interval; -+ interval = 5; // 5s between perf markers except for first one -+ -+ int rc = pthread_cond_timedwait(ddc->done_cond, ddc->done_lock, &ts); -+ if (rc == 0) { -+ // transfer done, continue to get new status -+ continue; -+ } -+ else if (rc == ETIMEDOUT) { -+ // ongoing transfer, reached time to print new perfmarker -+ } -+ else { -+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, ddc->request, -+ "Waiting for perf marker condition failed for COPY '%s' rc %i", -+ ddc->request->uri, rc); -+ htext_abort(handle); -+ continue; -+ } -+ } -+ -+ /* Always write last performance marker and in defined intervals */ -+ { -+ size_t *total, *done, n, i; -+ size_t globalDone, globalTotal; -+ char **rconn; -+ -+ /* In the first go we need to set the reply */ -+ if (ddc->request->status == 0) { -+ ddc->request->status = HTTP_ACCEPTED; -+ ap_set_content_type(ddc->request, "text/plain"); -+ } -+ /* Print progress */ -+ htext_progress(handle, &n, &total, &done, &rconn); -+ dav_disk_send_performance_markers(ddc, n, total, done, rconn); -+ -+ globalDone = globalTotal = 0; -+ for (i = 0; i < n; ++i) { -+ globalDone += done[i]; -+ globalTotal += total[i]; -+ } -+ -+ if (ap_fflush(ddc->output, ddc->brigade) == APR_SUCCESS) { -+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, ddc->request, -+ "COPY '%s' %lu/%lu", ddc->request->uri, -+ (unsigned long) globalDone, -+ (unsigned long) globalTotal); -+ } -+ else { -+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, ddc->request, -+ "Failed to send perf marker for COPY '%s' %lu/%lu", -+ ddc->request->uri, -+ (unsigned long) globalDone, -+ (unsigned long) globalTotal); -+ htext_abort(handle); -+ } - } -- } while (wait); -+ -+ } while (!done); -+ -+ pthread_mutex_unlock(ddc->done_lock); - - error_string = htext_error_string(handle); - switch (status) { -@@ -439,6 +490,8 @@ static dav_error *dav_disk_generic_copy(const dav_resource* res, const char* upr - dav_disk_copy_data ddc; - request_rec* req = res->info->request; - dav_disk_dir_conf* d_conf = res->info->d_conf; -+ pthread_mutex_t done_lock; -+ pthread_cond_t done_cond; - - int oldcancel; - pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldcancel); -@@ -475,6 +528,9 @@ static dav_error *dav_disk_generic_copy(const dav_resource* res, const char* upr - htext_setopt(handle, HTEXTOP_LOW_SPEED_TIME, d_conf->low_speed_time); - htext_setopt(handle, HTEXTOP_LOW_SPEED_LIMIT, d_conf->low_speed_limit); - -+ htext_setopt(handle, HTEXTOP_COPY_DONE, dav_disk_copy_done); -+ htext_setopt(handle, HTEXTOP_COPY_DONE_DATA, &ddc); -+ - htext_setopt(handle, HTEXTOP_LOGCALLBACK, dav_disk_copy_log); - htext_setopt(handle, HTEXTOP_LOGCALLBACK_DATA, &ddc); - htext_setopt(handle, HTEXTOP_VERBOSITY, 2); -@@ -486,13 +542,23 @@ static dav_error *dav_disk_generic_copy(const dav_resource* res, const char* upr - ddc.request = req; - ddc.source = src; - ddc.destination = dst; -+ ddc.done_lock = &done_lock; -+ ddc.done_cond = &done_cond; -+ -+ pthread_mutex_init(&done_lock, NULL); -+ pthread_cond_init(&done_cond, NULL); - - /* Run */ - if (htext_perform(handle) != 0) { - error = dav_shared_new_error(req, NULL, - HTTP_INTERNAL_SERVER_ERROR, "Could not perform the action: %s", - htext_error_string(handle)); -+ -+ pthread_cond_destroy(&done_cond); -+ pthread_mutex_destroy(&done_lock); -+ - htext_destroy(handle); -+ - return error; - } - -@@ -504,6 +570,9 @@ static dav_error *dav_disk_generic_copy(const dav_resource* res, const char* upr - - /* Finish */ - htext_destroy(handle); -+ -+ pthread_cond_destroy(&done_cond); -+ pthread_mutex_destroy(&done_lock); - - if (!error) { - bkt = apr_bucket_eos_create(ddc.output->c->bucket_alloc); diff --git a/dmlite-prettyprint.patch b/dmlite-prettyprint.patch deleted file mode 100644 index a1f072a..0000000 --- a/dmlite-prettyprint.patch +++ /dev/null @@ -1,112 +0,0 @@ -diff --git a/shell/src/interpreter.py b/shell/src/interpreter.py -index a10ca4a8..64641a02 100644 ---- a/shell/src/interpreter.py -+++ b/shell/src/interpreter.py -@@ -13,6 +13,8 @@ import re - import time - import dateutil.parser - import pycurl -+import traceback -+ - try: - from urllib.parse import unquote - except ImportError: -@@ -159,20 +161,7 @@ class DMLiteInterpreter(object): - return None - - def prettySize(self, size): -- isize = int(size) # argument can be string -- if isize < 1024**1: -- prettySize = "%iB" % isize -- elif isize < 1024**2: -- prettySize = '%.2fkB' % (float(isize) / 1024**1) -- elif isize < 1024**3: -- prettySize = '%.2fMB' % (float(isize) / 1024**2) -- elif isize < 1024**4: -- prettySize = '%.2fGB' % (float(isize) / 1024**3) -- elif isize < 1024**5: -- prettySize = '%.2fTB' % (float(isize) / 1024**4) -- else: -- prettySize = '%.2fPB' % (float(isize) / 1024**5) -- return prettySize -+ return prettySize(size) - - def prettyInputSize(self, prettysize): - if 'PB' in prettysize: -@@ -1715,31 +1704,52 @@ The latter is the default. - except Exception as e: - return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) - -+def prettySize(size): -+ isize = int(size) # argument can be string -+ if isize < 1024**1: -+ prettySize = "%iB" % isize -+ elif isize < 1024**2: -+ prettySize = '%.2fkB' % (float(isize) / 1024**1) -+ elif isize < 1024**3: -+ prettySize = '%.2fMB' % (float(isize) / 1024**2) -+ elif isize < 1024**4: -+ prettySize = '%.2fGB' % (float(isize) / 1024**3) -+ elif isize < 1024**5: -+ prettySize = '%.2fTB' % (float(isize) / 1024**4) -+ else: -+ prettySize = '%.2fPB' % (float(isize) / 1024**5) -+ return prettySize -+ - - def pprint_dictionary(dpool, indent=4): -- ret = StringIO.StringIO() -+ ret = '' - for key, value in dpool.items(): -- ret.write(" " * indent) -- ret.write(key) -- ret.write(": ") -+ ret += (" " * indent) -+ ret += key -+ ret += ": " - if type(value) is dict: -- ret.write(pprint_dictionary(value, indent + 4)) -+ ret += pprint_dictionary(value, indent + 4) - elif type(value) is list and len(value) > 0 and type(value[0]) is dict: - for item in value: -- ret.write("\n") -- ret.write(pprint_dictionary(item, indent + 4)) -- ret.write("\n") -+ ret += "\n" -+ ret += pprint_dictionary(item, indent + 4) -+ ret += "\n" - #workaround to print the status and activitystatus as String - elif str(key) in ["status", "poolstatus"]: -- ret.write(fsstatusbycode[int(value)]) -- ret.write("\n") -+ ret += fsstatusbycode[int(value)] -+ ret += "\n" -+ elif "space" in key or "size" in key: -+ ret += str(value) -+ ret += " (" -+ ret += prettySize(value) -+ ret += ")\n" - elif str(key) in ["activitystatus"]: -- ret.write(activitystatusbycode[int(value)]) -- ret.write("\n") -+ ret += activitystatusbycode[int(value)] -+ ret += "\n" - else: -- ret.write(str(value)) -- ret.write("\n") -- return ret.getvalue() -+ ret += str(value) -+ ret += "\n" -+ return ret - - - class PoolInfoCommand(ShellCommand): -@@ -1773,7 +1783,7 @@ class PoolInfoCommand(ShellCommand): - self.ok("%s (%s)\n%s" % (pool.name, pool.type, pprint_dictionary(dpool))) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(e.__str__() + '\n' + traceback.format_exc() + '\nParameter(s): ' + ', '.join(given)) - - - class PoolModifyCommand(ShellCommand): diff --git a/dmlite-python3.patch b/dmlite-python3.patch index d89cf55..096dd6c 100644 --- a/dmlite-python3.patch +++ b/dmlite-python3.patch @@ -1,90 +1,205 @@ -diff -ur dmlite-1.14.1.orig/scripts/StAR-accounting/star-accounting.py dmlite-1.14.1/scripts/StAR-accounting/star-accounting.py ---- dmlite-1.14.1.orig/scripts/StAR-accounting/star-accounting.py 2020-09-24 12:12:28.000000000 +0200 -+++ dmlite-1.14.1/scripts/StAR-accounting/star-accounting.py 2020-11-01 21:21:03.885665263 +0100 +diff -NPurBb dmlite-1.15.0.orig/src/plugins/apache-httpd/buildcurl.sh dmlite-1.15.0/src/plugins/apache-httpd/buildcurl.sh +--- dmlite-1.15.0.orig/src/plugins/apache-httpd/buildcurl.sh 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/src/plugins/apache-httpd/buildcurl.sh 2021-06-14 15:01:56.333257596 +0200 +@@ -6,7 +6,12 @@ + cd src/plugins/apache-httpd/external/curl + echo "Current dir: `pwd`" + ls -l ++which cmake3 2> /dev/null ++if [ $? -eq 0 ]; then + cmake3 . -DCMAKE_INSTALL_PREFIX=/tmp/curl/bogusinstall -DBUILD_CURL_EXE=false -DBUILD_TESTING=false -DBUILD_SHARED_LIBS=false -DCMAKE_CXX_FLAGS=-fPIC -DCMAKE_C_FLAGS=-fPIC -DCMAKE_USE_LIBSSH2=false ++else ++cmake . -DCMAKE_INSTALL_PREFIX=/tmp/curl/bogusinstall -DBUILD_CURL_EXE=false -DBUILD_TESTING=false -DBUILD_SHARED_LIBS=false -DCMAKE_CXX_FLAGS=-fPIC -DCMAKE_C_FLAGS=-fPIC -DCMAKE_USE_LIBSSH2=false ++fi + make -j2 install + cd - + echo "Current dir: `pwd`" +diff -NPurBb dmlite-1.15.0.orig/python/sample.py dmlite-1.15.0/python/sample.py +--- dmlite-1.15.0.orig/python/sample.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/python/sample.py 2021-06-14 14:32:20.838231809 +0200 @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 - ########################################################################### - # - # star-accounting -diff -ur dmlite-1.14.1.orig/shell/src/argus.py dmlite-1.14.1/shell/src/argus.py ---- dmlite-1.14.1.orig/shell/src/argus.py 2020-09-24 12:12:28.000000000 +0200 -+++ dmlite-1.14.1/shell/src/argus.py 2020-11-01 21:24:40.692135650 +0100 + + from __future__ import print_function + import pydmlite +diff -NPurBb dmlite-1.15.0.orig/scripts/bdii/dome-info-provider.py dmlite-1.15.0/scripts/bdii/dome-info-provider.py +--- dmlite-1.15.0.orig/scripts/bdii/dome-info-provider.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/scripts/bdii/dome-info-provider.py 2021-06-14 14:30:54.289208914 +0200 @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 - # Simplified Argus banning data extracted from server configuration - # read directly by its WSDL interface. - # -diff -ur dmlite-1.14.1.orig/shell/src/dmlite-mysql-dirspaces.py dmlite-1.14.1/shell/src/dmlite-mysql-dirspaces.py ---- dmlite-1.14.1.orig/shell/src/dmlite-mysql-dirspaces.py 2020-09-24 12:12:28.000000000 +0200 -+++ dmlite-1.14.1/shell/src/dmlite-mysql-dirspaces.py 2020-11-01 21:21:03.885665263 +0100 + + import argparse + import socket +diff -NPurBb dmlite-1.15.0.orig/scripts/SRR/dpm-storage-summary.py dmlite-1.15.0/scripts/SRR/dpm-storage-summary.py +--- dmlite-1.15.0.orig/scripts/SRR/dpm-storage-summary.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/scripts/SRR/dpm-storage-summary.py 2021-06-14 14:31:02.857112187 +0200 @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 - # - # A script that calculates the space occupied by files in every directory - # and can set the metadata filesize field with it, for the first N levels -diff -ur dmlite-1.14.1.orig/shell/src/dmlite-prom.py dmlite-1.14.1/shell/src/dmlite-prom.py ---- dmlite-1.14.1.orig/shell/src/dmlite-prom.py 2020-09-24 12:12:28.000000000 +0200 -+++ dmlite-1.14.1/shell/src/dmlite-prom.py 2020-11-01 21:24:40.693135652 +0100 + ################################################################################ + ## Original script is now integrated in the dmliteshell which should be used ## + ## instead of calling this legacy but fully backward compatible CLI interface ## +diff -NPurBb dmlite-1.15.0.orig/scripts/StAR-accounting/star-accounting.py dmlite-1.15.0/scripts/StAR-accounting/star-accounting.py +--- dmlite-1.15.0.orig/scripts/StAR-accounting/star-accounting.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/scripts/StAR-accounting/star-accounting.py 2021-06-14 14:30:58.238164333 +0200 +@@ -1,4 +1,4 @@ +-#!/usr/bin/python2 ++#!/usr/bin/python3 + ################################################################################ + ## Original script is now integrated in the dmliteshell which should be used ## + ## instead of calling this legacy but fully backward compatible CLI interface ## +diff -NPurBb dmlite-1.15.0.orig/scripts/utils/dmlite-mysql-dirspaces.py dmlite-1.15.0/scripts/utils/dmlite-mysql-dirspaces.py +--- dmlite-1.15.0.orig/scripts/utils/dmlite-mysql-dirspaces.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/scripts/utils/dmlite-mysql-dirspaces.py 2021-06-14 14:30:46.657295078 +0200 +@@ -1,4 +1,4 @@ +-#!/usr/bin/python2 ++#!/usr/bin/python3 + ################################################################################ + ## Original script is now integrated in the dmliteshell which should be used ## + ## instead of calling this legacy but fully backward compatible CLI interface ## +diff -NPurBb dmlite-1.15.0.orig/scripts/utils/dmlite-prom.py dmlite-1.15.0/scripts/utils/dmlite-prom.py +--- dmlite-1.15.0.orig/scripts/utils/dmlite-prom.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/scripts/utils/dmlite-prom.py 2021-06-14 14:30:50.735249039 +0200 @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 # # Dump DPM DOME information in a format that can be scraped # by prometheus Node exporter text collector -diff -ur dmlite-1.14.1.orig/shell/src/dmlite-shell dmlite-1.14.1/shell/src/dmlite-shell ---- dmlite-1.14.1.orig/shell/src/dmlite-shell 2020-09-24 12:12:28.000000000 +0200 -+++ dmlite-1.14.1/shell/src/dmlite-shell 2020-11-01 21:24:40.693135652 +0100 +diff -NPurBb dmlite-1.15.0.orig/scripts/utils/dpm-dump.py dmlite-1.15.0/scripts/utils/dpm-dump.py +--- dmlite-1.15.0.orig/scripts/utils/dpm-dump.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/scripts/utils/dpm-dump.py 2021-06-14 14:30:02.736790919 +0200 +@@ -1,4 +1,4 @@ +-#!/usr/bin/python2 ++#!/usr/bin/python3 + ################################################################################ + ## Original script is now integrated in the dmliteshell which should be used ## + ## instead of calling this legacy but fully backward compatible CLI interface ## +diff -NPurBb dmlite-1.15.0.orig/scripts/utils/lost.py dmlite-1.15.0/scripts/utils/lost.py +--- dmlite-1.15.0.orig/scripts/utils/lost.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/scripts/utils/lost.py 2021-06-14 14:30:43.033335992 +0200 +@@ -1,4 +1,4 @@ +-#!/usr/bin/python2 ++#!/usr/bin/python3 + ################################################################################ + ## Original script is now integrated in the dmliteshell which should be used ## + ## instead of calling this legacy but fully backward compatible CLI interface ## +diff -NPurBb dmlite-1.15.0.orig/src/dome/cli/dome.py dmlite-1.15.0/src/dome/cli/dome.py +--- dmlite-1.15.0.orig/src/dome/cli/dome.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/src/dome/cli/dome.py 2021-06-14 14:32:24.161194292 +0200 @@ -1,4 +1,4 @@ --#! /usr/bin/python2 -+#! /usr/bin/python3 - # dmliteshell.py +-#!/usr/bin/python2 ++#!/usr/bin/python3 + # dome.py """ - This file implements the DMLite shell. -diff -ur dmlite-1.14.1.orig/shell/src/dome-info-provider.py dmlite-1.14.1/shell/src/dome-info-provider.py ---- dmlite-1.14.1.orig/shell/src/dome-info-provider.py 2020-09-24 12:12:28.000000000 +0200 -+++ dmlite-1.14.1/shell/src/dome-info-provider.py 2020-11-01 21:21:03.883665259 +0100 + This file implements the Dome CLI. +diff -NPurBb dmlite-1.15.0.orig/tests/dpm/dpm-tester.py dmlite-1.15.0/tests/dpm/dpm-tester.py +--- dmlite-1.15.0.orig/tests/dpm/dpm-tester.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/tests/dpm/dpm-tester.py 2021-06-14 14:32:17.386270780 +0200 @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 + from __future__ import print_function, division, absolute_import - import argparse - import socket -diff -ur dmlite-1.14.1.orig/shell/src/dpm-storage-summary.cgi dmlite-1.14.1/shell/src/dpm-storage-summary.cgi ---- dmlite-1.14.1.orig/shell/src/dpm-storage-summary.cgi 2020-09-24 12:12:28.000000000 +0200 -+++ dmlite-1.14.1/shell/src/dpm-storage-summary.cgi 2020-11-01 21:21:03.884665261 +0100 + import gfal2 +diff -NPurBb dmlite-1.15.0.orig/tests/python/catalog_first.py dmlite-1.15.0/tests/python/catalog_first.py +--- dmlite-1.15.0.orig/tests/python/catalog_first.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/tests/python/catalog_first.py 2021-06-14 14:31:48.512596756 +0200 @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 - # CGI wrapper for dpm-storage-summary.py - # - # Parameters read from environment: -diff -ur dmlite-1.14.1.orig/shell/src/dpm-storage-summary.py dmlite-1.14.1/shell/src/dpm-storage-summary.py ---- dmlite-1.14.1.orig/shell/src/dpm-storage-summary.py 2020-09-24 12:12:28.000000000 +0200 -+++ dmlite-1.14.1/shell/src/dpm-storage-summary.py 2020-11-01 21:21:03.885665263 +0100 + + import stat + import unittest +diff -NPurBb dmlite-1.15.0.orig/tests/python/catalog.py dmlite-1.15.0/tests/python/catalog.py +--- dmlite-1.15.0.orig/tests/python/catalog.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/tests/python/catalog.py 2021-06-14 14:31:58.135488118 +0200 @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 - # WLCG Storage Resource Reporting implementation for DPM - # * https://docs.google.com/document/d/1yzCvKpxsbcQC5K9MyvXc-vBF1HGPBk4vhjw3MEXoXf8/edit - # * https://twiki.cern.ch/twiki/bin/view/LCG/AccountingTaskForce -diff -ur dmlite-1.14.1.orig/shell/src/utils.py dmlite-1.14.1/shell/src/utils.py ---- dmlite-1.14.1.orig/shell/src/utils.py 2020-09-24 12:12:28.000000000 +0200 -+++ dmlite-1.14.1/shell/src/utils.py 2020-11-01 21:24:40.693135652 +0100 + + import sys + import time +diff -NPurBb dmlite-1.15.0.orig/tests/python/dm-ls.py dmlite-1.15.0/tests/python/dm-ls.py +--- dmlite-1.15.0.orig/tests/python/dm-ls.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/tests/python/dm-ls.py 2021-06-14 14:31:40.647685538 +0200 @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 - from __future__ import absolute_import from __future__ import print_function - from __future__ import division -diff -ur dmlite-1.14.1.orig/tests/dpm/dpm-tester.py dmlite-1.14.1/tests/dpm/dpm-tester.py ---- dmlite-1.14.1.orig/tests/dpm/dpm-tester.py 2020-09-24 12:12:28.000000000 +0200 -+++ dmlite-1.14.1/tests/dpm/dpm-tester.py 2020-11-01 21:21:03.886665265 +0100 + import pydmlite + import sys +diff -NPurBb dmlite-1.15.0.orig/tests/python/dm-mkdir.py dmlite-1.15.0/tests/python/dm-mkdir.py +--- dmlite-1.15.0.orig/tests/python/dm-mkdir.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/tests/python/dm-mkdir.py 2021-06-14 14:31:44.521641806 +0200 +@@ -1,4 +1,4 @@ +-#!/usr/bin/python2 ++#!/usr/bin/python3 + from __future__ import print_function + import pydmlite + import sys +diff -NPurBb dmlite-1.15.0.orig/tests/python/dm-mkfile.py dmlite-1.15.0/tests/python/dm-mkfile.py +--- dmlite-1.15.0.orig/tests/python/dm-mkfile.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/tests/python/dm-mkfile.py 2021-06-14 14:32:05.228408039 +0200 +@@ -1,4 +1,4 @@ +-#!/usr/bin/python2 ++#!/usr/bin/python3 + from __future__ import print_function + import pydmlite + import sys +diff -NPurBb dmlite-1.15.0.orig/tests/python/dm-mklink.py dmlite-1.15.0/tests/python/dm-mklink.py +--- dmlite-1.15.0.orig/tests/python/dm-mklink.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/tests/python/dm-mklink.py 2021-06-14 14:31:36.127736566 +0200 +@@ -1,4 +1,4 @@ +-#!/usr/bin/python2 ++#!/usr/bin/python3 + from __future__ import print_function + import pydmlite + import sys +diff -NPurBb dmlite-1.15.0.orig/tests/python/dm-physicallocation.py dmlite-1.15.0/tests/python/dm-physicallocation.py +--- dmlite-1.15.0.orig/tests/python/dm-physicallocation.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/tests/python/dm-physicallocation.py 2021-06-14 14:32:01.484450308 +0200 +@@ -1,4 +1,4 @@ +-#!/usr/bin/python2 ++#!/usr/bin/python3 + from __future__ import print_function + import pydmlite + import sys +diff -NPurBb dmlite-1.15.0.orig/tests/python/dm-rmdir.py dmlite-1.15.0/tests/python/dm-rmdir.py +--- dmlite-1.15.0.orig/tests/python/dm-rmdir.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/tests/python/dm-rmdir.py 2021-06-14 14:31:51.578562144 +0200 +@@ -1,4 +1,4 @@ +-#!/usr/bin/python2 ++#!/usr/bin/python3 + from __future__ import print_function + import pydmlite + import sys +diff -NPurBb dmlite-1.15.0.orig/tests/python/dm-rmfile.py dmlite-1.15.0/tests/python/dm-rmfile.py +--- dmlite-1.15.0.orig/tests/python/dm-rmfile.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/tests/python/dm-rmfile.py 2021-06-14 14:31:54.822525520 +0200 +@@ -1,4 +1,4 @@ +-#!/usr/bin/python2 ++#!/usr/bin/python3 + from __future__ import print_function + import pydmlite + import sys +diff -NPurBb dmlite-1.15.0.orig/tests/python/dm-rmlink.py dmlite-1.15.0/tests/python/dm-rmlink.py +--- dmlite-1.15.0.orig/tests/python/dm-rmlink.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/tests/python/dm-rmlink.py 2021-06-14 14:32:11.916332534 +0200 +@@ -1,4 +1,4 @@ +-#!/usr/bin/python2 ++#!/usr/bin/python3 + from __future__ import print_function + import pydmlite + import sys +diff -NPurBb dmlite-1.15.0.orig/tests/python/run-all.py dmlite-1.15.0/tests/python/run-all.py +--- dmlite-1.15.0.orig/tests/python/run-all.py 2021-06-07 10:22:42.000000000 +0200 ++++ dmlite-1.15.0/tests/python/run-all.py 2021-06-14 14:32:08.106375547 +0200 @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 - from __future__ import print_function, division, absolute_import - import gfal2 + import unittest + diff --git a/dmlite-remove_explicit_xrootd_plugin_version.patch b/dmlite-remove_explicit_xrootd_plugin_version.patch deleted file mode 100644 index 07c50a3..0000000 --- a/dmlite-remove_explicit_xrootd_plugin_version.patch +++ /dev/null @@ -1,45 +0,0 @@ -commit f5aaf7baa63db959cbe7b81a0d780b135b95c0f0 -Author: Petr Vokac -Date: Mon Sep 28 21:37:01 2020 +0200 - - Remove explicity library version from XRootD configs - -diff --git a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg -index 18fca76c..16559388 100644 ---- a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg -+++ b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg -@@ -43,7 +43,7 @@ fi - dpm.nohv1 - - if exec xrootd --#xrd.protocol XrdHttp /usr/$(xrdlibdir)/libXrdHttp-4.so -+#xrd.protocol XrdHttp /usr/$(xrdlibdir)/libXrdHttp.so - #http.exthandler dome /usr/$(xrdlibdir)/libdome.so /etc/domedisk.conf - #http.selfhttps2http yes - #http.cert /etc/grid-security/dpmmgr/dpmcert.pem -diff --git a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg -index 11ee7c9d..93149711 100644 ---- a/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg -+++ b/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg -@@ -47,7 +47,7 @@ dpm.nohv1 - - if exec xrootd - dpm.xrdserverport 1095 --#xrd.protocol XrdHttp /usr/$(xrdlibdir)/libXrdHttp-4.so -+#xrd.protocol XrdHttp /usr/$(xrdlibdir)/libXrdHttp.so - #http.exthandler dome /usr/$(xrdlibdir)/libdome.so /etc/domehead.conf - #http.selfhttps2http yes - #http.cert /etc/grid-security/dpmmgr/dpmcert.pem -diff --git a/src/puppet/dmlite/templates/xrootd/dpm-xrootd.cfg.erb b/src/puppet/dmlite/templates/xrootd/dpm-xrootd.cfg.erb -index 46a3fa20..cb88efd0 100644 ---- a/src/puppet/dmlite/templates/xrootd/dpm-xrootd.cfg.erb -+++ b/src/puppet/dmlite/templates/xrootd/dpm-xrootd.cfg.erb -@@ -20,7 +20,7 @@ dpm.enablecmsclient - dpm.allowvo <%= @dpm_allowvo %> - <% end -%> - <% if @dpm_enable_dome -%> --xrd.protocol XrdHttp /usr/lib64/libXrdHttp-4.so -+xrd.protocol XrdHttp /usr/lib64/libXrdHttp.so - http.exthandler dome /usr/lib64/libdome.so <%= @dpm_dome_conf_file %> - http.selfhttps2http yes - http.cert /etc/grid-security/dpmmgr/dpmcert.pem diff --git a/dmlite-remove_explicit_xrootd_plugin_version2.patch b/dmlite-remove_explicit_xrootd_plugin_version2.patch deleted file mode 100644 index 54ee22c..0000000 --- a/dmlite-remove_explicit_xrootd_plugin_version2.patch +++ /dev/null @@ -1,131 +0,0 @@ -diff -NpurBb dmlite-1.14.2.orig/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg dmlite-1.14.2/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg ---- dmlite-1.14.2.orig/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg 2020-12-06 20:38:19.362603416 +0100 -+++ dmlite-1.14.2/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmdisk.cfg 2020-12-06 20:46:46.143504042 +0100 -@@ -27,9 +27,9 @@ - #xrd.tlsca certdir /etc/grid-security/certificates - #xrootd.tls none - xrd.timeout idle 60m --ofs.osslib +cksio libXrdDPMOss.so.3 --ofs.authlib libXrdDPMDiskAcc.so.3 --ofs.ckslib = libXrdDPMCks.so.3 -+ofs.osslib +cksio libXrdDPMOss.so -+ofs.authlib libXrdDPMDiskAcc.so -+ofs.ckslib = libXrdDPMCks.so - ofs.authorize - ofs.tpc xfr 25 oids fcreds gsi =X509_USER_PROXY pgm /usr/bin/xrdcp --server - all.role server -diff -NpurBb dmlite-1.14.2.orig/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmfedredir_atlas.cfg dmlite-1.14.2/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmfedredir_atlas.cfg ---- dmlite-1.14.2.orig/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmfedredir_atlas.cfg 2020-11-05 12:25:07.000000000 +0100 -+++ dmlite-1.14.2/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmfedredir_atlas.cfg 2020-12-06 20:46:56.959373434 +0100 -@@ -29,10 +29,10 @@ - #xrd.tlsca certdir /etc/grid-security/certificates - #xrootd.tls none - --ofs.cmslib libXrdDPMFinder.so.3 --ofs.osslib libXrdDPMOss.so.3 -+ofs.cmslib libXrdDPMFinder.so -+ofs.osslib libXrdDPMOss.so - --ofs.authlib libXrdDPMRedirAcc.so.3 -+ofs.authlib libXrdDPMRedirAcc.so - ofs.authorize - - # access may be restricted by vo; but this option is only -diff -NpurBb dmlite-1.14.2.orig/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg dmlite-1.14.2/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg ---- dmlite-1.14.2.orig/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg 2020-12-06 20:38:19.362603416 +0100 -+++ dmlite-1.14.2/src/plugins/dpm-xrootd/etc/xrootd/xrootd-dpmredir.cfg 2020-12-06 20:47:20.567088353 +0100 -@@ -29,10 +29,10 @@ - #xrd.tlsca certdir /etc/grid-security/certificates - #xrootd.tls none - xrd.timeout idle 60m --ofs.cmslib libXrdDPMFinder.so.3 --ofs.osslib +cksio libXrdDPMOss.so.3 --ofs.authlib libXrdDPMRedirAcc.so.3 --ofs.ckslib = libXrdDPMCks.so.3 -+ofs.cmslib libXrdDPMFinder.so -+ofs.osslib +cksio libXrdDPMOss.so -+ofs.authlib libXrdDPMRedirAcc.so -+ofs.ckslib = libXrdDPMCks.so - ofs.authorize - ofs.forward all - all.role manager -@@ -81,7 +81,7 @@ dpm.mmreqhost localhost - # e.g. libXrdAliceTokenAcc - # - # setenv TTOKENAUTHZ_AUTHORIZATIONFILE=/etc/xrd.authz.cnf --# ofs.authlib libXrdDPMRedirAcc.so.3 libXrdAliceTokenAcc.so.0.0.0 -+# ofs.authlib libXrdDPMRedirAcc.so libXrdAliceTokenAcc.so.0.0.0 - # dpm.replacementprefix /alice /dpm/example.com/home/alice - # dpm.fixedidrestrict /dpm/example.com/home/alice - # -diff -NpurBb dmlite-1.14.2.orig/src/puppet/dmlite/manifests/xrootd.pp dmlite-1.14.2/src/puppet/dmlite/manifests/xrootd.pp ---- dmlite-1.14.2.orig/src/puppet/dmlite/manifests/xrootd.pp 2020-11-05 12:25:07.000000000 +0100 -+++ dmlite-1.14.2/src/puppet/dmlite/manifests/xrootd.pp 2020-12-06 20:47:37.838879784 +0100 -@@ -86,12 +86,12 @@ class dmlite::xrootd ( - #add possibility to disable xrd checksum - if $xrd_checksum_enabled { - $_xrd_checksum = $xrd_checksum -- $_xrd_ofsckslib = '= libXrdDPMCks.so.3' -- $_xrd_ofsosslib = '+cksio libXrdDPMOss.so.3' -+ $_xrd_ofsckslib = '= libXrdDPMCks.so' -+ $_xrd_ofsosslib = '+cksio libXrdDPMOss.so' - } else { - $_xrd_checksum = undef - $_xrd_ofsckslib = undef -- $_xrd_ofsosslib = 'libXrdDPMOss.so.3' -+ $_xrd_ofsosslib = 'libXrdDPMOss.so' - } - - if member($nodetype, 'disk') { -@@ -129,7 +129,7 @@ class dmlite::xrootd ( - xrootd_export => [ '/' ], - xrootd_async => $xrootd_async, - xrootd_monitor => $xrootd_monitor, -- ofs_authlib => 'libXrdDPMDiskAcc.so.3', -+ ofs_authlib => 'libXrdDPMDiskAcc.so', - ofs_authorize => true, - xrd_ofsosslib => $_xrd_ofsosslib, - xrd_ofsckslib => $_xrd_ofsckslib, -@@ -165,7 +165,7 @@ class dmlite::xrootd ( - 'redir' => "-l /var/log/xrootd/xrootd.log -c /etc/xrootd/xrootd-dpmredir.cfg ${log_style_param}" - } - -- $ofs_authlib = 'libXrdDPMRedirAcc.so.3' -+ $ofs_authlib = 'libXrdDPMRedirAcc.so' - - dmlite::xrootd::create_config{'/etc/xrootd/xrootd-dpmredir.cfg': - dmlite_conf => $dmlite_conf, -@@ -182,7 +182,7 @@ class dmlite::xrootd ( - xrd_ofsosslib => $_xrd_ofsosslib, - xrd_ofsckslib => $_xrd_ofsckslib, - xrootd_chksum => $_xrd_checksum, -- ofs_cmslib => 'libXrdDPMFinder.so.3', -+ ofs_cmslib => 'libXrdDPMFinder.so', - ofs_forward => 'all', - xrd_network => 'nodnr', - xrd_timeout => $xrd_timeout, -@@ -213,7 +213,7 @@ class dmlite::xrootd ( - $cms_cidtag = $::fqdn - } - -- $oss_statlib = '-2 libXrdDPMStatInfo.so.3' -+ $oss_statlib = '-2 libXrdDPMStatInfo.so' - - $federation_defaults = { - dmlite_conf => $dmlite_conf, -@@ -228,13 +228,13 @@ class dmlite::xrootd ( - xrootd_monitor => $xrootd_monitor, - ofs_authlib => $ofs_authlib, - ofs_authorize => true, -- xrd_ofsosslib => 'libXrdDPMOss.so.3', -+ xrd_ofsosslib => 'libXrdDPMOss.so', - cmsd_ofsosslib => 'libXrdPss.so', - pss_setopt => [ - 'ConnectTimeout 30', - 'RequestTimeout 30', - 'RedirectLimit 0'], -- ofs_cmslib => 'libXrdDPMFinder.so.3', -+ ofs_cmslib => 'libXrdDPMFinder.so', - ofs_forward => 'all', - xrd_network => 'nodnr', - xrd_timeout => $xrd_timeout, diff --git a/dmlite-shell-678-python3.patch b/dmlite-shell-678-python3.patch deleted file mode 100644 index 8b18dbe..0000000 --- a/dmlite-shell-678-python3.patch +++ /dev/null @@ -1,180 +0,0 @@ -diff -NPur dmlite-1.14.2.python2/python/sample.py dmlite-1.14.2/python/sample.py ---- dmlite-1.14.2.python2/python/sample.py 2021-04-03 02:52:42.638862976 +0200 -+++ dmlite-1.14.2/python/sample.py 2021-04-03 02:56:35.868219799 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - - from __future__ import print_function - import pydmlite -diff -NPur dmlite-1.14.2.python2/scripts/bdii/dome-info-provider.py dmlite-1.14.2/scripts/bdii/dome-info-provider.py ---- dmlite-1.14.2.python2/scripts/bdii/dome-info-provider.py 2021-04-03 02:52:42.639862964 +0200 -+++ dmlite-1.14.2/scripts/bdii/dome-info-provider.py 2021-04-03 02:54:26.786682669 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - - import argparse - import socket -diff -NPur dmlite-1.14.2.python2/scripts/SRR/dpm-storage-summary.py dmlite-1.14.2/scripts/SRR/dpm-storage-summary.py ---- dmlite-1.14.2.python2/scripts/SRR/dpm-storage-summary.py 2021-04-03 02:52:42.638862976 +0200 -+++ dmlite-1.14.2/scripts/SRR/dpm-storage-summary.py 2021-04-03 02:54:43.278495764 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - ################################################################################ - ## Original script is now integrated in the dmliteshell which should be used ## - ## instead of calling this legacy but fully backward compatible CLI interface ## -diff -NPur dmlite-1.14.2.python2/scripts/StAR-accounting/star-accounting.py dmlite-1.14.2/scripts/StAR-accounting/star-accounting.py ---- dmlite-1.14.2.python2/scripts/StAR-accounting/star-accounting.py 2021-04-03 02:52:42.639862964 +0200 -+++ dmlite-1.14.2/scripts/StAR-accounting/star-accounting.py 2021-04-03 02:54:35.450584479 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - ################################################################################ - ## Original script is now integrated in the dmliteshell which should be used ## - ## instead of calling this legacy but fully backward compatible CLI interface ## -diff -NPur dmlite-1.14.2.python2/scripts/utils/dmlite-mysql-dirspaces.py dmlite-1.14.2/scripts/utils/dmlite-mysql-dirspaces.py ---- dmlite-1.14.2.python2/scripts/utils/dmlite-mysql-dirspaces.py 2021-04-03 02:52:42.639862964 +0200 -+++ dmlite-1.14.2/scripts/utils/dmlite-mysql-dirspaces.py 2021-04-03 02:54:07.640899651 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - ################################################################################ - ## Original script is now integrated in the dmliteshell which should be used ## - ## instead of calling this legacy but fully backward compatible CLI interface ## -diff -NPur dmlite-1.14.2.python2/scripts/utils/dmlite-prom.py dmlite-1.14.2/scripts/utils/dmlite-prom.py ---- dmlite-1.14.2.python2/scripts/utils/dmlite-prom.py 2021-04-03 02:52:42.639862964 +0200 -+++ dmlite-1.14.2/scripts/utils/dmlite-prom.py 2021-04-03 02:54:18.043781752 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - # - # Dump DPM DOME information in a format that can be scraped - # by prometheus Node exporter text collector -diff -NPur dmlite-1.14.2.python2/scripts/utils/dpm-dump.py dmlite-1.14.2/scripts/utils/dpm-dump.py ---- dmlite-1.14.2.python2/scripts/utils/dpm-dump.py 2021-04-03 02:52:42.639862964 +0200 -+++ dmlite-1.14.2/scripts/utils/dpm-dump.py 2021-04-03 02:53:37.521240985 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - ################################################################################ - ## Original script is now integrated in the dmliteshell which should be used ## - ## instead of calling this legacy but fully backward compatible CLI interface ## -diff -NPur dmlite-1.14.2.python2/scripts/utils/lost.py dmlite-1.14.2/scripts/utils/lost.py ---- dmlite-1.14.2.python2/scripts/utils/lost.py 2021-04-03 02:52:42.639862964 +0200 -+++ dmlite-1.14.2/scripts/utils/lost.py 2021-04-03 02:53:55.531036894 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - ################################################################################ - ## Original script is now integrated in the dmliteshell which should be used ## - ## instead of calling this legacy but fully backward compatible CLI interface ## -diff -NPur dmlite-1.14.2.python2/src/dome/cli/dome.py dmlite-1.14.2/src/dome/cli/dome.py ---- dmlite-1.14.2.python2/src/dome/cli/dome.py 2021-04-03 02:52:42.643862919 +0200 -+++ dmlite-1.14.2/src/dome/cli/dome.py 2021-04-03 02:56:44.721119470 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - # dome.py - """ - This file implements the Dome CLI. -diff -NPur dmlite-1.14.2.python2/tests/python/catalog_first.py dmlite-1.14.2/tests/python/catalog_first.py ---- dmlite-1.14.2.python2/tests/python/catalog_first.py 2021-04-03 02:52:42.644862908 +0200 -+++ dmlite-1.14.2/tests/python/catalog_first.py 2021-04-03 02:55:35.700901680 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - - import unittest - import pydmlite -diff -NPur dmlite-1.14.2.python2/tests/python/catalog.py dmlite-1.14.2/tests/python/catalog.py ---- dmlite-1.14.2.python2/tests/python/catalog.py 2021-04-03 02:52:42.644862908 +0200 -+++ dmlite-1.14.2/tests/python/catalog.py 2021-04-03 02:55:32.362939509 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - - import unittest - import pydmlite -diff -NPur dmlite-1.14.2.python2/tests/python/dm-ls.py dmlite-1.14.2/tests/python/dm-ls.py ---- dmlite-1.14.2.python2/tests/python/dm-ls.py 2021-04-03 02:52:42.644862908 +0200 -+++ dmlite-1.14.2/tests/python/dm-ls.py 2021-04-03 02:55:38.999864294 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - from __future__ import print_function - import pydmlite - import sys -diff -NPur dmlite-1.14.2.python2/tests/python/dm-mkdir.py dmlite-1.14.2/tests/python/dm-mkdir.py ---- dmlite-1.14.2.python2/tests/python/dm-mkdir.py 2021-04-03 02:52:42.644862908 +0200 -+++ dmlite-1.14.2/tests/python/dm-mkdir.py 2021-04-03 02:55:42.028829967 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - from __future__ import print_function - import pydmlite - import sys -diff -NPur dmlite-1.14.2.python2/tests/python/dm-mkfile.py dmlite-1.14.2/tests/python/dm-mkfile.py ---- dmlite-1.14.2.python2/tests/python/dm-mkfile.py 2021-04-03 02:52:42.644862908 +0200 -+++ dmlite-1.14.2/tests/python/dm-mkfile.py 2021-04-03 02:55:45.542790142 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - from __future__ import print_function - import pydmlite - import sys -diff -NPur dmlite-1.14.2.python2/tests/python/dm-mklink.py dmlite-1.14.2/tests/python/dm-mklink.py ---- dmlite-1.14.2.python2/tests/python/dm-mklink.py 2021-04-03 02:52:42.644862908 +0200 -+++ dmlite-1.14.2/tests/python/dm-mklink.py 2021-04-03 02:55:18.871092411 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - from __future__ import print_function - import pydmlite - import sys -diff -NPur dmlite-1.14.2.python2/tests/python/dm-physicallocation.py dmlite-1.14.2/tests/python/dm-physicallocation.py ---- dmlite-1.14.2.python2/tests/python/dm-physicallocation.py 2021-04-03 02:52:42.644862908 +0200 -+++ dmlite-1.14.2/tests/python/dm-physicallocation.py 2021-04-03 02:55:51.576721759 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - from __future__ import print_function - import pydmlite - import sys -diff -NPur dmlite-1.14.2.python2/tests/python/dm-rmdir.py dmlite-1.14.2/tests/python/dm-rmdir.py ---- dmlite-1.14.2.python2/tests/python/dm-rmdir.py 2021-04-03 02:52:42.644862908 +0200 -+++ dmlite-1.14.2/tests/python/dm-rmdir.py 2021-04-03 02:55:54.900684089 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - from __future__ import print_function - import pydmlite - import sys -diff -NPur dmlite-1.14.2.python2/tests/python/dm-rmfile.py dmlite-1.14.2/tests/python/dm-rmfile.py ---- dmlite-1.14.2.python2/tests/python/dm-rmfile.py 2021-04-03 02:52:42.644862908 +0200 -+++ dmlite-1.14.2/tests/python/dm-rmfile.py 2021-04-03 02:55:58.703640988 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - from __future__ import print_function - import pydmlite - import sys -diff -NPur dmlite-1.14.2.python2/tests/python/dm-rmlink.py dmlite-1.14.2/tests/python/dm-rmlink.py ---- dmlite-1.14.2.python2/tests/python/dm-rmlink.py 2021-04-03 02:52:42.644862908 +0200 -+++ dmlite-1.14.2/tests/python/dm-rmlink.py 2021-04-03 02:56:01.817605699 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - from __future__ import print_function - import pydmlite - import sys -diff -NPur dmlite-1.14.2.python2/tests/python/run-all.py dmlite-1.14.2/tests/python/run-all.py ---- dmlite-1.14.2.python2/tests/python/run-all.py 2021-04-03 02:52:42.644862908 +0200 -+++ dmlite-1.14.2/tests/python/run-all.py 2021-04-03 02:56:05.429564765 +0200 -@@ -1,4 +1,4 @@ --#!/usr/bin/python2 -+#!/usr/bin/python3 - - import unittest - diff --git a/dmlite-shell-678.patch b/dmlite-shell-678.patch deleted file mode 100644 index 755cef8..0000000 --- a/dmlite-shell-678.patch +++ /dev/null @@ -1,9648 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index e89154b0..488904c9 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -223,11 +223,6 @@ if (NOT APRUTIL_FOUND) - message("-- APR-utils not found... disabling the Apache modules (lcgdm-dav)") - set (BUILD_DAV OFF) - endif() --find_package(VOMS) --if (NOT VOMS_FOUND) -- message("-- LibVOMS not found... disabling the Apache modules (lcgdm-dav)") -- set (BUILD_DAV OFF) --endif() - - # Include directories for our newer version of curl - set(CURL_DIR /tmp/curl/bogusinstall) -diff --git a/dist/packaging/fedora/dmlite.spec b/dist/packaging/fedora/dmlite.spec -index c7a85cb6..742a5c2b 100644 ---- a/dist/packaging/fedora/dmlite.spec -+++ b/dist/packaging/fedora/dmlite.spec -@@ -186,6 +186,7 @@ data such as HTTP, Xrootd, GridFTP - %{_prefix}/share/dmlite/dbscripts - %{_prefix}/share/dmlite/filepull - %{_prefix}/share/dmlite/StAR-accounting -+%{_prefix}/share/dmlite/utils - %endif - - %if %{?fedora}%{!?fedora:0} <= 30 -@@ -792,6 +793,7 @@ install -p -d -m 755 %{buildroot}%{_localstatedir}/log/dpm-gsiftp - %{_prefix}/share/dmlite/dbscripts - %{_prefix}/share/dmlite/filepull - %{_prefix}/share/dmlite/StAR-accounting -+%{_prefix}/share/dmlite/utils - - %files dpmdisk-domeonly - %{_prefix}/share/dmlite/filepull -@@ -871,14 +873,9 @@ install -p -d -m 755 %{buildroot}%{_localstatedir}/log/dpm-gsiftp - %{_bindir}/dmlite-shell - %{_bindir}/dpm-storage-summary.py - %{_bindir}/dpm-storage-summary.cgi --%{_bindir}/dmlite-mysql-dirspaces.py - %{_bindir}/dome-info-provider.py - %{_sharedstatedir}/bdii/gip/provider/dome-info-exec - %config(noreplace) %{_sysconfdir}/sysconfig/dpminfo --%if %{?rhel}%{!?rhel:99} <= 5 --%{_bindir}/dmlite-mysql-dirspaces.pyc --%{_bindir}/dmlite-mysql-dirspaces.pyo --%endif - %{python2_sitelib}/dmliteshell - %doc LICENSE README RELEASE-NOTES - -diff --git a/dist/packaging/rpm/dmlite-python3.patch b/dist/packaging/rpm/dmlite-python3.patch -new file mode 100644 -index 00000000..279abf25 ---- /dev/null -+++ b/dist/packaging/rpm/dmlite-python3.patch -@@ -0,0 +1,189 @@ -+diff -NPurBb dmlite-1.14.0.orig/python/sample.py dmlite-1.14.0/python/sample.py -+--- dmlite-1.14.0.orig/python/sample.py 2020-07-19 13:06:01.170693812 +0200 -++++ dmlite-1.14.0/python/sample.py 2020-07-19 13:08:04.196592081 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ -+ import pydmlite -+ -+diff -NPurBb dmlite-1.14.0.orig/scripts/bdii/dome-info-provider.py dmlite-1.14.0/scripts/bdii/dome-info-provider.py -+--- dmlite-1.14.0.orig/scripts/bdii/dome-info-provider.py 2020-07-19 13:06:01.171693804 +0200 -++++ dmlite-1.14.0/scripts/bdii/dome-info-provider.py 2020-07-19 13:10:29.870285545 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ -+ import argparse -+ import socket -+diff -NPurBb dmlite-1.14.0.orig/scripts/SRR/dpm-storage-summary.py dmlite-1.14.0/scripts/SRR/dpm-storage-summary.py -+--- dmlite-1.14.0.orig/scripts/SRR/dpm-storage-summary.py 2020-07-19 13:06:01.170693812 +0200 -++++ dmlite-1.14.0/scripts/SRR/dpm-storage-summary.py 2020-07-19 13:09:42.911706378 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ ################################################################################ -+ ## Original script is now integrated in the dmliteshell which should be used ## -+ ## instead of calling this legacy but fully backward compatible CLI interface ## -+diff -NPurBb dmlite-1.14.0.orig/scripts/StAR-accounting/star-accounting.py dmlite-1.14.0/scripts/StAR-accounting/star-accounting.py -+--- dmlite-1.14.0.orig/scripts/StAR-accounting/star-accounting.py 2020-07-19 13:06:01.171693804 +0200 -++++ dmlite-1.14.0/scripts/StAR-accounting/star-accounting.py 2020-07-19 13:10:20.225371980 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ ################################################################################ -+ ## Original script is now integrated in the dmliteshell which should be used ## -+ ## instead of calling this legacy but fully backward compatible CLI interface ## -+diff -NPurBb dmlite-1.14.0.orig/scripts/utils/dmlite-mysql-dirspaces.py dmlite-1.14.0/scripts/utils/dmlite-mysql-dirspaces.py -+--- dmlite-1.14.0.orig/scripts/utils/dmlite-mysql-dirspaces.py 2020-07-19 13:06:01.171693804 +0200 -++++ dmlite-1.14.0/scripts/utils/dmlite-mysql-dirspaces.py 2020-07-19 13:10:13.095435878 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ ################################################################################ -+ ## Original script is now integrated in the dmliteshell which should be used ## -+ ## instead of calling this legacy but fully backward compatible CLI interface ## -+diff -NPurBb dmlite-1.14.0.orig/scripts/utils/dmlite-prom.py dmlite-1.14.0/scripts/utils/dmlite-prom.py -+--- dmlite-1.14.0.orig/scripts/utils/dmlite-prom.py 2020-07-19 13:06:01.171693804 +0200 -++++ dmlite-1.14.0/scripts/utils/dmlite-prom.py 2020-07-19 13:10:05.109507445 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ # -+ # Dump DPM DOME information in a format that can be scraped -+ # by prometheus Node exporter text collector -+diff -NPurBb dmlite-1.14.0.orig/scripts/utils/dpm-dump.py dmlite-1.14.0/scripts/utils/dpm-dump.py -+--- dmlite-1.14.0.orig/scripts/utils/dpm-dump.py 2020-07-19 13:06:01.171693804 +0200 -++++ dmlite-1.14.0/scripts/utils/dpm-dump.py 2020-07-19 13:09:57.590574828 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ ################################################################################ -+ ## Original script is now integrated in the dmliteshell which should be used ## -+ ## instead of calling this legacy but fully backward compatible CLI interface ## -+diff -NPurBb dmlite-1.14.0.orig/scripts/utils/lost.py dmlite-1.14.0/scripts/utils/lost.py -+--- dmlite-1.14.0.orig/scripts/utils/lost.py 2020-07-19 13:06:01.171693804 +0200 -++++ dmlite-1.14.0/scripts/utils/lost.py 2020-07-19 13:09:50.540638008 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ ################################################################################ -+ ## Original script is now integrated in the dmliteshell which should be used ## -+ ## instead of calling this legacy but fully backward compatible CLI interface ## -+diff -NPurBb dmlite-1.14.0.orig/shell/src/dmlite-shell dmlite-1.14.0/shell/src/dmlite-shell -+--- dmlite-1.14.0.orig/shell/src/dmlite-shell 2020-07-19 13:06:01.174693778 +0200 -++++ dmlite-1.14.0/shell/src/dmlite-shell 2020-07-19 13:11:36.803701621 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ # dmliteshell.py -+ """ -+ This file implements the DMLite shell. -+diff -NPurBb dmlite-1.14.0.orig/src/dome/cli/dome.py dmlite-1.14.0/src/dome/cli/dome.py -+--- dmlite-1.14.0.orig/src/dome/cli/dome.py 2020-07-19 13:06:01.179693736 +0200 -++++ dmlite-1.14.0/src/dome/cli/dome.py 2020-07-19 13:11:19.422852964 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ # dome.py -+ """ -+ This file implements the Dome CLI. -+diff -NPurBb dmlite-1.14.0.orig/tests/python/catalog_first.py dmlite-1.14.0/tests/python/catalog_first.py -+--- dmlite-1.14.0.orig/tests/python/catalog_first.py 2020-07-19 13:06:01.180693728 +0200 -++++ dmlite-1.14.0/tests/python/catalog_first.py 2020-07-19 13:08:43.184241643 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ -+ import unittest -+ import pydmlite -+diff -NPurBb dmlite-1.14.0.orig/tests/python/catalog.py dmlite-1.14.0/tests/python/catalog.py -+--- dmlite-1.14.0.orig/tests/python/catalog.py 2020-07-19 13:06:01.179693736 +0200 -++++ dmlite-1.14.0/tests/python/catalog.py 2020-07-19 13:07:58.017647769 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ -+ import unittest -+ import pydmlite -+diff -NPurBb dmlite-1.14.0.orig/tests/python/dm-ls.py dmlite-1.14.0/tests/python/dm-ls.py -+--- dmlite-1.14.0.orig/tests/python/dm-ls.py 2020-07-19 13:06:01.180693728 +0200 -++++ dmlite-1.14.0/tests/python/dm-ls.py 2020-07-19 13:09:27.132847785 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ import pydmlite -+ import sys -+ import time -+diff -NPurBb dmlite-1.14.0.orig/tests/python/dm-mkdir.py dmlite-1.14.0/tests/python/dm-mkdir.py -+--- dmlite-1.14.0.orig/tests/python/dm-mkdir.py 2020-07-19 13:06:01.180693728 +0200 -++++ dmlite-1.14.0/tests/python/dm-mkdir.py 2020-07-19 13:09:06.620031616 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ import pydmlite -+ import sys -+ -+diff -NPurBb dmlite-1.14.0.orig/tests/python/dm-mkfile.py dmlite-1.14.0/tests/python/dm-mkfile.py -+--- dmlite-1.14.0.orig/tests/python/dm-mkfile.py 2020-07-19 13:06:01.180693728 +0200 -++++ dmlite-1.14.0/tests/python/dm-mkfile.py 2020-07-19 13:09:13.979965658 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ import pydmlite -+ import sys -+ import time -+diff -NPurBb dmlite-1.14.0.orig/tests/python/dm-mklink.py dmlite-1.14.0/tests/python/dm-mklink.py -+--- dmlite-1.14.0.orig/tests/python/dm-mklink.py 2020-07-19 13:06:01.181693719 +0200 -++++ dmlite-1.14.0/tests/python/dm-mklink.py 2020-07-19 13:08:16.531480912 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ import pydmlite -+ import sys -+ -+diff -NPurBb dmlite-1.14.0.orig/tests/python/dm-physicallocation.py dmlite-1.14.0/tests/python/dm-physicallocation.py -+--- dmlite-1.14.0.orig/tests/python/dm-physicallocation.py 2020-07-19 13:06:01.181693719 +0200 -++++ dmlite-1.14.0/tests/python/dm-physicallocation.py 2020-07-19 13:08:53.349150547 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ import pydmlite -+ import sys -+ -+diff -NPurBb dmlite-1.14.0.orig/tests/python/dm-rmdir.py dmlite-1.14.0/tests/python/dm-rmdir.py -+--- dmlite-1.14.0.orig/tests/python/dm-rmdir.py 2020-07-19 13:06:01.181693719 +0200 -++++ dmlite-1.14.0/tests/python/dm-rmdir.py 2020-07-19 13:09:20.342908636 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ import pydmlite -+ import sys -+ -+diff -NPurBb dmlite-1.14.0.orig/tests/python/dm-rmfile.py dmlite-1.14.0/tests/python/dm-rmfile.py -+--- dmlite-1.14.0.orig/tests/python/dm-rmfile.py 2020-07-19 13:06:01.181693719 +0200 -++++ dmlite-1.14.0/tests/python/dm-rmfile.py 2020-07-19 13:08:33.952324377 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ import pydmlite -+ import sys -+ import time -+diff -NPurBb dmlite-1.14.0.orig/tests/python/dm-rmlink.py dmlite-1.14.0/tests/python/dm-rmlink.py -+--- dmlite-1.14.0.orig/tests/python/dm-rmlink.py 2020-07-19 13:06:01.181693719 +0200 -++++ dmlite-1.14.0/tests/python/dm-rmlink.py 2020-07-19 13:09:00.642085189 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ import pydmlite -+ import sys -+ -+diff -NPurBb dmlite-1.14.0.orig/tests/python/run-all.py dmlite-1.14.0/tests/python/run-all.py -+--- dmlite-1.14.0.orig/tests/python/run-all.py 2020-07-19 13:06:01.181693719 +0200 -++++ dmlite-1.14.0/tests/python/run-all.py 2020-07-19 13:08:24.710407201 +0200 -+@@ -1,4 +1,4 @@ -+-#!/usr/bin/python2 -++#!/usr/bin/python3 -+ -+ import unittest -+ -diff --git a/dist/packaging/rpm/dmlite.spec b/dist/packaging/rpm/dmlite.spec -index 516d7303..2f55d83d 100644 ---- a/dist/packaging/rpm/dmlite.spec -+++ b/dist/packaging/rpm/dmlite.spec -@@ -827,6 +827,7 @@ rm -rf %{buildroot} - %{_prefix}/share/dmlite/dbscripts - %{_prefix}/share/dmlite/filepull - %{_prefix}/share/dmlite/StAR-accounting -+%{_prefix}/share/dmlite/utils - - %files dpmdisk-domeonly - %defattr(-,root,root,-) -@@ -839,6 +840,7 @@ rm -rf %{buildroot} - %{_prefix}/share/dmlite/dbscripts - %{_prefix}/share/dmlite/filepull - %{_prefix}/share/dmlite/StAR-accounting -+%{_prefix}/share/dmlite/utils - - %files dpmdisk - %defattr(-,root,root,-) -@@ -923,14 +925,9 @@ rm -rf %{buildroot} - %{_bindir}/dmlite-shell - %{_bindir}/dpm-storage-summary.py - %{_bindir}/dpm-storage-summary.cgi --%{_bindir}/dmlite-mysql-dirspaces.py - %{_bindir}/dome-info-provider.py - %{_sharedstatedir}/bdii/gip/provider/dome-info-exec - %config(noreplace) %{_sysconfdir}/sysconfig/dpminfo --%if %{?rhel}%{!?rhel:99} <= 5 --%{_bindir}/dmlite-mysql-dirspaces.pyc --%{_bindir}/dmlite-mysql-dirspaces.pyo --%endif - %{python_sitelib}/dmliteshell - %doc LICENSE README RELEASE-NOTES - -diff --git a/doc/dome/dome.tex b/doc/dome/dome.tex -index 5fbdc06c..6f4e2b17 100644 ---- a/doc/dome/dome.tex -+++ b/doc/dome/dome.tex -@@ -1668,6 +1668,28 @@ Returns: - Code: 200 or error - - -+\subsubsection{dome\_setutime} -+ -+Update the access and modification time of a file or directory. -+Only privileged DPM user can call this interface (e.g. dmlite-shell). -+ -+Command: -+\lstinline"GET /dome/command/dome_setutime"\\ -+ -+Request header:\\ -+no particular fields in the header\\ -+ -+Params: -+\begin{itemize} -+ \item path: absolute path to the file/directory -+ \item actime: access time -+ \item modtime: modification time -+\end{itemize} -+ -+Returns: -+Code: 200 or error -+ -+ - \subsubsection{dome\_setsize} - - Update the size of a file or directory. NB: changing the size of a directory -diff --git a/doc/tutorial/backend_python.html b/doc/tutorial/backend_python.html -index 8611b911..8c42baac 100644 ---- a/doc/tutorial/backend_python.html -+++ b/doc/tutorial/backend_python.html -@@ -24,7 +24,7 @@ These examples are the same as those explained using the C wrapper and the C++ A -

We will start with a simple appplication skeleton which does nothing.

- -
--#!/usr/bin/env python
-+#!/usr/bin/env python2
- import pydmlite
- 
- -@@ -59,7 +59,7 @@ or passing some credentials first. -

- -
--#!/usr/bin/env python
-+#!/usr/bin/env python2
- import pydmlite
- import sys
- 
-diff --git a/etc/domehead.conf.example b/etc/domehead.conf.example
-index 4ae60c2f..825ff23c 100644
---- a/etc/domehead.conf.example
-+++ b/etc/domehead.conf.example
-@@ -19,6 +19,7 @@ head.checksum.maxtotal: 1000
- head.checksum.maxpernode: 40
- head.filepuller.stathook: /usr/share/dmlite/filepull/externalstat_example.sh
- 
-+# Database
- head.db.host: localhost
- head.db.user: dpmdbuser
- head.db.password: 
-@@ -32,3 +33,6 @@ head.db.dpmdbname: dpm_db
- #head.oidc.allowissuer[]: "/dpm" "https://wlcg.cloud.cnaf.infn.it/" wlcg
- #head.oidc.allowaudience[]: https://wlcg.cern.ch/jwt/v1/any
- 
-+# Telemetry (version, host, totalspace, freespace)
-+head.informer.mainurl: https://dpmhead-rc.cern.ch/dpminfo
-+
-diff --git a/python/sample.py b/python/sample.py
-index 961214c0..ca03e810 100644
---- a/python/sample.py
-+++ b/python/sample.py
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/python2
- 
- from __future__ import print_function
- import pydmlite
-@@ -10,7 +10,7 @@ def test():
-       pluginManager.loadConfiguration(configFile)
-     except Exception as e:
-       print(e)
--      return 
-+      return
- 
-     try:
-       securityContext = pydmlite.SecurityContext()
-@@ -21,7 +21,7 @@ def test():
-       securityContext.groups.append(group)
-     except Exception as e:
-       print(e)
--      return 
-+      return
- 
-     try:
-       stackInstance = pydmlite.StackInstance(pluginManager)
-diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt
-index afc59046..578760bb 100644
---- a/scripts/CMakeLists.txt
-+++ b/scripts/CMakeLists.txt
-@@ -9,10 +9,27 @@ install(DIRECTORY      filepull
-         FILE_PERMISSIONS       OWNER_EXECUTE OWNER_WRITE OWNER_READ
-                                GROUP_EXECUTE GROUP_READ
-                                WORLD_EXECUTE WORLD_READ)
--                               
--                               
-+
- install(DIRECTORY      StAR-accounting
-         DESTINATION   ${INSTALL_PFX_USR}/share/dmlite/
-         FILE_PERMISSIONS       OWNER_EXECUTE OWNER_WRITE OWNER_READ
-                                GROUP_EXECUTE GROUP_READ
--                               WORLD_EXECUTE WORLD_READ)                               
-+                               WORLD_EXECUTE WORLD_READ)
-+
-+install(DIRECTORY      utils
-+        DESTINATION   ${INSTALL_PFX_USR}/share/dmlite/
-+        FILE_PERMISSIONS       OWNER_EXECUTE OWNER_WRITE OWNER_READ
-+                               GROUP_EXECUTE GROUP_READ
-+                               WORLD_EXECUTE WORLD_READ)
-+
-+install(PROGRAMS      bdii/dome-info-provider.py
-+        DESTINATION   ${INSTALL_PFX_USR}/bin)
-+
-+install(PROGRAMS      bdii/dome-info-exec
-+        DESTINATION   ${INSTALL_PFX_VARLIB}/bdii/gip/provider)
-+
-+install(PROGRAMS      SRR/dpm-storage-summary.py
-+        DESTINATION   ${INSTALL_PFX_USR}/bin)
-+
-+install(PROGRAMS      SRR/dpm-storage-summary.cgi
-+        DESTINATION   ${INSTALL_PFX_USR}/bin)
-diff --git a/shell/src/dpm-storage-summary.cgi b/scripts/SRR/dpm-storage-summary.cgi
-similarity index 87%
-rename from shell/src/dpm-storage-summary.cgi
-rename to scripts/SRR/dpm-storage-summary.cgi
-index 208dbf55..ca6fd579 100755
---- a/shell/src/dpm-storage-summary.cgi
-+++ b/scripts/SRR/dpm-storage-summary.cgi
-@@ -25,17 +25,11 @@ from __future__ import division
- 
- import os, sys, time
- import socket
--try:
--    import io as StringIO
--except ImportError:
--    import StringIO
- import json
- import logging
- import logging.handlers
--
--
--class Config(object):
--    pass
-+from io import StringIO
-+from dmliteshell import srr
- 
- 
- try:
-@@ -44,13 +38,10 @@ try:
-     _log.setLevel(logging.WARN)
-     _log.addHandler(handler)
- 
--    srr = __import__('dpm-storage-summary')
--
--    options = Config()
--    options.host = os.getenv('DPM_SRR_HOST', socket.getfqdn())
--    options.port = os.getenv('DPM_SRR_PORT', 1094)
--    options.cert = os.getenv('DPM_SRR_CERT', '/etc/grid-security/dpmmgr/dpmcert.pem')
--    options.key = os.getenv('DPM_SRR_KEY', '/etc/grid-security/dpmmgr/dpmkey.pem')
-+    host = os.getenv('DPM_SRR_HOST', socket.getfqdn())
-+    port = os.getenv('DPM_SRR_PORT', 1094)
-+    cert = os.getenv('DPM_SRR_CERT', '/etc/grid-security/dpmmgr/dpmcert.pem')
-+    key = os.getenv('DPM_SRR_KEY', '/etc/grid-security/dpmmgr/dpmkey.pem')
- 
-     cache_time = 0
-     cache_file = os.getenv('DPM_SRR_FILE', '/tmp/storagesummary.json')
-@@ -74,7 +65,7 @@ try:
- 
-     try:
-         # read new SRR info using DPM DOME interface
--        dpm = srr.storageService(options)
-+        dpm = srr.StorageService(host, cert, key)
-         dpm.addshares()
-         dpm.addendpoints()
-     except Exception as e:
-@@ -102,10 +93,8 @@ try:
-         sys.exit(1)
- 
-     _log.debug("publish fresh SRR data")
--    out = StringIO.StringIO()
--    out.write("Content-type: application/json\r\n\r\n")
--    dpm.printjson(out)
--    sys.stdout.write(out.getvalue())
-+    sys.stdout.write("Content-type: application/json\r\n\r\n")
-+    dpm.publish('stdout://')
- 
-     if cache_time != 0:
-         # update cache file
-@@ -116,6 +105,7 @@ try:
-             _log.warn("unable to save SRR data in cache file %s: %s", cache_file, str(e))
- 
- except Exception as e:
-+    _log.error("unable to write SRR info from DPM DOME: %s", str(e))
-     sys.stdout.write("Status: 500 Internal Server Error\r\n")
-     sys.stdout.write("Content-Type: text/html\r\n\r\n")
-     sys.stdout.write("

Unexpected exception occured

\r\n") -diff --git a/scripts/SRR/dpm-storage-summary.py b/scripts/SRR/dpm-storage-summary.py -new file mode 100644 -index 00000000..c119fed9 ---- /dev/null -+++ b/scripts/SRR/dpm-storage-summary.py -@@ -0,0 +1,15 @@ -+#!/usr/bin/python2 -+################################################################################ -+## Original script is now integrated in the dmliteshell which should be used ## -+## instead of calling this legacy but fully backward compatible CLI interface ## -+################################################################################ -+# WLCG Storage Resource Reporting implementation for DPM -+# * https://docs.google.com/document/d/1yzCvKpxsbcQC5K9MyvXc-vBF1HGPBk4vhjw3MEXoXf8/edit -+# * https://twiki.cern.ch/twiki/bin/view/LCG/AccountingTaskForce -+# Configuration -+# * https://twiki.cern.ch/twiki/bin/view/DPM/DpmSetupManualInstallation#Publishing_space_usage -+import sys -+from dmliteshell import srr -+ -+srr._log.warn("Calling directly %s is deprecated, use `dmlite-shell -e 'srr'`" % sys.argv[0]) -+sys.exit(srr.main(sys.argv)) -diff --git a/scripts/StAR-accounting/star-accounting.py b/scripts/StAR-accounting/star-accounting.py -index 42960148..5a01c6b6 100755 ---- a/scripts/StAR-accounting/star-accounting.py -+++ b/scripts/StAR-accounting/star-accounting.py -@@ -1,5 +1,8 @@ - #!/usr/bin/python2 --########################################################################### -+################################################################################ -+## Original script is now integrated in the dmliteshell which should be used ## -+## instead of calling this legacy but fully backward compatible CLI interface ## -+################################################################################ - # - # star-accounting - # -@@ -12,7 +15,7 @@ - # .. to get the help screen - # - # Dependencies: --# yum install MySQL-python python-lxml python-uuid -+# MySQL-python or python2-PyMySQL/python3-PyMySQL, python-lxml, python-uuid - # - # v1.0.0 initial release - # v1.0.2 removed site debug printouts that were screwing up the output -@@ -22,347 +25,11 @@ - # v1.3.0 Petr Vokac (petr.vokac@cern.ch), February 7, 2019 - # * replace SQL join with simple queries to improve performance - # * compatibility with python 3 -+# v1.4.0 Petr Vokac (petr.vokac@cern.ch), February 7, 2019 -+# * integrated in dmliteshell - # --from __future__ import absolute_import --from __future__ import print_function --from __future__ import division -- --__version__ = "1.3.0" --__author__ = "Fabrizio Furano" -- - import sys --import os --import socket --import re --from optparse import OptionParser --import MySQLdb --import lxml.builder as lb --from lxml import etree --import uuid --import datetime --import logging -- -- --_log = logging.getLogger('DPMDUMP') -- --default_cns_db = 'cns_db' --default_dpm_db = 'dpm_db' -- -- --def get_conn_data(config, default_db): -- """ Returns connection data from {DPM,NS}CONFIG""" -- retval = {} -- -- _log.debug("getting connection info from %s", config) -- try: -- config_line = open(config).readline().strip() -- except Exception as e: -- _log.error("Cannot open DPM config file %s: %s", config, str(e)) -- sys.exit(-1) -- -- nsre = re.compile(r"(.*)/(.*)@([^/]*)(?:/(.*))?") -- m = nsre.match(config_line) -- if m == None: -- _log.error("Bad line in DPM config '%s', doesn't match re '%s'", config, nsre) -- sys.exit(-1) -- retval['user'] = m.group(1) -- retval['pass'] = m.group(2) -- retval['host'] = m.group(3) -- if m.group(4): -- retval['db'] = m.group(4) -- else: -- retval['db'] = default_db -- -- _log.debug("database connection: host=%s, database=%s, user=%s", retval['host'], retval['db'], retval['user']) -- -- return retval -- -- -- --def addrecord(xmlroot, hostname, group, user, site, filecount, resourcecapacityused, logicalcapacityused, validduration, recordid = None): -- # update XML -- rec = etree.SubElement(xmlroot, SR+'StorageUsageRecord') -- rid = etree.SubElement(rec, SR+'RecordIdentity') -- rid.set(SR+"createTime", datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")) -- -- if hostname: -- ssys = etree.SubElement(rec, SR+"StorageSystem") -- ssys.text = hostname -- -- recid = recordid -- if not recid: -- recid = hostname+"-"+str(uuid.uuid1()) -- rid.set(SR+"recordId", recid) -- -- subjid = etree.SubElement(rec, SR+'SubjectIdentity') -- -- if group: -- grouproles = group.split('/') -- -- # If the last token is Role=... then we fetch the role and add it to the record -- tmprl = grouproles[-1] -- if tmprl.find('Role=') != -1: -- splitroles = tmprl.split('=') -- if (len(splitroles) > 1): -- role = splitroles[1] -- grp = etree.SubElement(subjid, SR+"GroupAttribute" ) -- grp.set( SR+"attributeType", "role" ) -- grp.text = role -- # Now drop this last token, what remains is the vo identifier -- grouproles.pop() -- -- -- # The voname is the first token -- voname = grouproles.pop(0) -- grp = etree.SubElement(subjid, SR+"Group") -- grp.text = voname -- -- # If there are other tokens, they are a subgroup -- if len(grouproles) > 0: -- subgrp = '/'.join(grouproles) -- grp = etree.SubElement(subjid, SR+"GroupAttribute" ) -- grp.set( SR+"attributeType", "subgroup" ) -- grp.text = subgrp -- -- if user: -- usr = etree.SubElement(subjid, SR+"User") -- usr.text = user -- -- if site: -- st = etree.SubElement(subjid, SR+"Site") -- st.text = site -- -- e = etree.SubElement(rec, SR+"StorageMedia") -- e.text = "disk" -- -- if validduration: -- e = etree.SubElement(rec, SR+"StartTime") -- d = datetime.datetime.utcnow() - datetime.timedelta(seconds=validduration) -- e.text = d.strftime("%Y-%m-%dT%H:%M:%SZ") -- -- e = etree.SubElement(rec, SR+"EndTime") -- e.text = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") -- -- -- if filecount: -- e = etree.SubElement(rec, SR+"FileCount") -- e.text = str(filecount) -- -- -- if not resourcecapacityused: -- resourcecapacityused = 0 -- -- e1 = etree.SubElement(rec, SR+"ResourceCapacityUsed") -- e1.text = str(resourcecapacityused) -- -- e3 = etree.SubElement(rec, SR+"ResourceCapacityAllocated") -- e3.text = str(resourcecapacityused) -- -- if not logicalcapacityused: -- logicalcapacityused = 0 -- -- e2 = etree.SubElement(rec, SR+"LogicalCapacityUsed") -- e2.text = str(logicalcapacityused) -- -- -- --# --# Return dictionary with reserved space by given column (s_uid, s_gid) --# --def getreservedspace(dbconn, column): -- cursor = dbconn.cursor() -- cursor.execute('SELECT {0}, SUM(t_space) FROM dpm_space_reserv GROUP BY {0}'.format(column)) -- -- ret = {} -- for row in cursor: -- ret[row[0]] = row[1] -- -- cursor.close() -- -- return ret -- -- -- --# --# Return dictionary with key / value for given table --# --def getdbkv(dbconn, table, ckey, cval): -- cursor = dbconn.cursor() -- cursor.execute('SELECT {0}, {1} FROM {2}'.format(ckey, cval, table)) -- -- ret = {} -- for row in cursor: -- ret[row[0]] = row[1] -- -- cursor.close() -- -- return ret -- -- -- --def getdnsnames(name): -- d = socket.gethostbyaddr(name) -- names = [ d[0] ] + d[1] + d[2] -- return names -- --def resolve(name): -- names = getdnsnames(name) -- for dnsname in names: -- if '.' in dnsname: -- fullname = dnsname -- break -- else: -- fullname = name -- return fullname -- --def gethostname(): -- fullname = socket.gethostname() -- if '.' not in fullname: -- fullname = resolve(fullname) -- return fullname -- --############# --# Main code # --############# -- --# basic logging configuration --streamHandler = logging.StreamHandler(sys.stderr) --streamHandler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s](%(module)s:%(lineno)d) %(message)s", "%d %b %H:%M:%S")) --_log.addHandler(streamHandler) --_log.setLevel(logging.WARN) -- --parser = OptionParser() --parser.add_option('--reportgroups', dest='reportgroups', action='store_true', default=False, help="Report about all groups") --parser.add_option('--reportusers', dest='reportusers', action='store_true', default=False, help="Report about all users") --parser.add_option('-v', '--debug', dest='verbose', action='count', default=0, help='Increase verbosity level for debugging (on stderr)') --parser.add_option('--hostname', dest='hostname', default=gethostname(), help="The hostname string to use in the record. Default: this host.") --parser.add_option('--site', dest='site', default="", help="The site string to use in the record. Default: none.") --parser.add_option('--recordid', dest='recordid', default=None, help="The recordid string to use in the record. Default: a newly computed unique string.") --parser.add_option('--nsconfig', dest='nsconfig', default=None, help="Path to the NSCONFIG file where to take the db login info") --parser.add_option('--dpmconfig', dest='dpmconfig', default=None, help="Path to the DPMCONFIG file where to take the db login info") --parser.add_option('--dbhost', dest='dbhost', default=None, help="Database host, if no NSCONFIG given") --parser.add_option('--dbuser', dest='dbuser', default=None, help="Database user, if no NSCONFIG given") --parser.add_option('--dbpwd', dest='dbpwd', default=None, help="Database password, if no NSCONFIG given") --parser.add_option('--nsdbname', dest='nsdbname', default='cns_db', help="NS Database name, if no NSCONFIG given") --parser.add_option('--dpmdbname', dest='dpmdbname', default='dpm_db', help="DPM Database name, if no DPMCONFIG given") --parser.add_option('--validduration', dest='validduration', default=86400, help="Valid duration of this record, in seconds (default: 1 day)") --options, args = parser.parse_args() -- --if options.verbose == 0: _log.setLevel(logging.ERROR) --elif options.verbose == 1: _log.setLevel(logging.WARN) --elif options.verbose == 2: _log.setLevel(logging.INFO) --else: _log.setLevel(logging.DEBUG) -- --record_id = options.recordid --site = options.site --conn_data = { -- 'host': options.dbhost, -- 'user': options.dbuser, -- 'pass': options.dbpwd, -- 'cns_db': options.nsdbname, -- 'dpm_db': options.dpmdbname, --} -- --if options.nsconfig: -- # Parse the NSCONFIG line, extract the db login info from it -- for k, v in get_conn_data(options.nsconfig, default_cns_db).items(): -- if k == 'db': conn_data['cns_db'] = v -- else: conn_data[k] = v -- --if options.dpmconfig: -- # Parse the DPMCONFIG line, extract the db login info from it -- for k, v in get_conn_data(options.nsconfig, default_dpm_db).items(): -- if k == 'db': conn_data['dpm_db'] = v -- else: conn_data[k] = v -- -- --# --# Connect to the db --# --try: -- nsconn = MySQLdb.connect(host=conn_data['host'], user=conn_data['user'], passwd=conn_data['pass'], db=conn_data['cns_db']) --except MySQLdb.Error as e: -- _log.error("Error Connecting to mysql. %d: %s", e.args[0], e.args[1]) -- sys.exit (1) -- -- --try: -- dpmconn = MySQLdb.connect(host=conn_data['host'], user=conn_data['user'], passwd=conn_data['pass'], db=conn_data['dpm_db']) --except MySQLdb.Error as e: -- _log.error("Error Connecting to mysql. %d: %s", e.args[0], e.args[1]) -- sys.exit (1) -- -- -- --# --# Init the xml generator --# --SR_NAMESPACE = "http://eu-emi.eu/namespaces/2011/02/storagerecord" --SR = "{%s}" % SR_NAMESPACE --NSMAP = {"sr": SR_NAMESPACE} --xmlroot = etree.Element(SR+"StorageUsageRecords", nsmap=NSMAP) -- -- -- --if options.reportgroups: -- # -- # Report about groups -- # -- _log.debug("Groups reporting: starting") -- -- gid2space = getreservedspace(dpmconn, 's_gid') -- gid2name = getdbkv(nsconn, 'Cns_groupinfo', 'gid', 'groupname') -- -- cursor = nsconn.cursor() -- cursor.execute('SELECT gid, COUNT(*), SUM(filesize) FROM Cns_file_metadata WHERE filemode & 16384 = 0 GROUP BY gid') -- -- for row in cursor: -- _log.debug(row) -- if row[0] not in gid2name: continue -- -- # update XML -- addrecord(xmlroot, options.hostname, gid2name[row[0]], None, site, row[1], gid2space.get(row[0], 0), row[2], options.validduration, record_id) -- -- _log.debug("Groups reporting: number of rows returned: %d", cursor.rowcount) -- -- cursor.close() -- -- -- --if options.reportusers: -- # -- # Report about users -- # -- _log.debug("Users reporting: starting") -- -- uid2space = getreservedspace(dpmconn, 's_uid') -- uid2name = getdbkv(nsconn, 'Cns_userinfo', 'userid', 'username') -- -- cursor = nsconn.cursor() -- cursor.execute ('SELECT owner_uid, COUNT(*), SUM(filesize) FROM Cns_file_metadata WHERE filemode & 16384 = 0 GROUP BY owner_uid') -- -- for row in cursor: -- _log.debug(row) -- if row[0] not in uid2name: continue -- -- # update XML -- addrecord(xmlroot, options.hostname, None, uid2name[row[0]], site, row[1], uid2space.get(row[0], 0), row[2], options.validduration, record_id) -- -- _log.debug("Users reporting: number of rows returned: %d", cursor.rowcount) -- -- cursor.close() -- -- --nsconn.close() --dpmconn.close() -- -- --# pretty string --out = sys.stdout --if sys.version_info >= (3,): -- out = sys.stdout.buffer --et = etree.ElementTree(xmlroot) --et.write(out, pretty_print=True, encoding="utf-8") -- -+from dmliteshell import star - --_log.debug('done') -+star._log.warn("Calling directly %s is deprecated, use `dmlite-shell -e 'star'`" % sys.argv[0]) -+sys.exit(star.main(sys.argv)) -diff --git a/shell/src/dome-info-exec b/scripts/bdii/dome-info-exec -similarity index 100% -rename from shell/src/dome-info-exec -rename to scripts/bdii/dome-info-exec -diff --git a/shell/src/dome-info-provider.py b/scripts/bdii/dome-info-provider.py -similarity index 64% -rename from shell/src/dome-info-provider.py -rename to scripts/bdii/dome-info-provider.py -index 2aca35aa..cb16ece2 100755 ---- a/shell/src/dome-info-provider.py -+++ b/scripts/bdii/dome-info-provider.py -@@ -6,19 +6,18 @@ import socket - from dmliteshell.infoutils import * - - --def create_shares(config): -+def create_shares(sysinfo): - """Find all the StorageShares on this DPM - and return an array of them""" -- sysinfo = SystemInfo(config) - jgqt, totalcapacity, totalused, totalgroups = sysinfo.getspaces() - ret = [] - for space, qt in jgqt.items(): -- share = Share(qt["path"], qt["quotatkpoolname"], qt["groups"], qt["quotatkname"], config.host) -- scap = ShareCapacity(qt["quotatkname"], int(qt["quotatktotspace"]), int(qt["pathusedspace"]), config.host) -+ share = Share(qt["path"], qt["quotatkpoolname"], qt["groups"], qt["quotatkname"], sysinfo.host) -+ scap = ShareCapacity(qt["quotatkname"], int(qt["quotatktotspace"]), int(qt["pathusedspace"]), sysinfo.host) - scap.set_foreign_key(share.getname()) - share.add_child(scap) - -- mapp = MappingPolicy(qt["quotatkname"], qt["groups"], config.host) -+ mapp = MappingPolicy(qt["quotatkname"], qt["groups"], sysinfo.host) - mapp.set_foreign_key(share.getname()) - share.add_child(mapp) - -@@ -26,54 +25,50 @@ def create_shares(config): - return ret - - --def create_endpoints(config): -+def create_endpoints(sysinfo): - """Find all the StorageEndpoints on this DPM - and return an array of them""" -- sysinfo = SystemInfo() -- - cert_subject = None - try: - from M2Crypto import X509 -- x509 = X509.load_cert(config.cert, X509.FORMAT_PEM) -+ x509 = X509.load_cert(sysinfo.cert, X509.FORMAT_PEM) - cert_subject = "/%s" % '/'.join(x509.get_issuer().as_text().split(', ')) - except: - pass - - ret = [] - if 2811 in sysinfo.ports: -- ret.append(Endpoint("gsiftp", config.host, 2811, "", cert_subject, *sysinfo.getprotinfo("gsiftp"))) -+ ret.append(Endpoint("gsiftp", sysinfo.host, 2811, "", cert_subject, *sysinfo.getprotinfo("gsiftp"))) - if 443 in sysinfo.ports: -- ret.append(Endpoint("https", config.host, 443, "", cert_subject, *sysinfo.getprotinfo("https"))) -+ ret.append(Endpoint("https", sysinfo.host, 443, "", cert_subject, *sysinfo.getprotinfo("https"))) - if 1094 in sysinfo.ports: -- ret.append(Endpoint("xroot", config.host, 1094, "/", cert_subject, *sysinfo.getprotinfo("root"))) -+ ret.append(Endpoint("xroot", sysinfo.host, 1094, "/", cert_subject, *sysinfo.getprotinfo("root"))) - if 8446 in sysinfo.ports: -- ret.append(Endpoint("httpg", config.host, 8446, "/srm/managerv2?SFN=/", cert_subject, *sysinfo.getprotinfo("srm"))) -+ ret.append(Endpoint("httpg", sysinfo.host, 8446, "/srm/managerv2?SFN=/", cert_subject, *sysinfo.getprotinfo("srm"))) - - return ret - - --def create_accessprotocols(config): -+def create_accessprotocols(sysinfo): - """Find all the StorageEndpoints on this DPM - and return an array of them""" - ret = [] -- sysinfo = SystemInfo() - if 2811 in sysinfo.ports: -- ret.append(AccessProtocol(config.host, *sysinfo.getprotinfo("gsiftp"))) -+ ret.append(AccessProtocol(sysinfo.host, *sysinfo.getprotinfo("gsiftp"))) - if 443 in sysinfo.ports: -- ret.append(AccessProtocol(config.host, *sysinfo.getprotinfo("https"))) -+ ret.append(AccessProtocol(sysinfo.host, *sysinfo.getprotinfo("https"))) - if 1094 in sysinfo.ports: -- ret.append(AccessProtocol(config.host, *sysinfo.getprotinfo("root"))) -+ ret.append(AccessProtocol(sysinfo.host, *sysinfo.getprotinfo("root"))) - if 8446 in sysinfo.ports: -- ret.append(AccessProtocol(config.host, *sysinfo.getprotinfo("gsiftp"))) -+ ret.append(AccessProtocol(sysinfo.host, *sysinfo.getprotinfo("gsiftp"))) - return ret - - --def create_manager(config): -+def create_manager(sysinfo): - """Create a StorageManager""" -- sysinfo = SystemInfo(config) - jgqt, totalcapacity, totalused, totalgroups = sysinfo.getspaces() -- mgr = Manager(sysinfo.getsysinfo("dome"), sysinfo.getsysinfo("dmlite"), config.host) -- ds = DataStore(config.host, totalcapacity, totalused) -+ mgr = Manager(sysinfo.getsysinfo("dome"), sysinfo.getsysinfo("dmlite"), sysinfo.host) -+ ds = DataStore(sysinfo.host, totalcapacity, totalused) - ds.set_foreign_key(mgr.getname()) - mgr.add_child(ds) - return mgr -@@ -89,30 +84,30 @@ def main(): - args = parser.parse_args() - - # Create the top of the tree -- top = Service(args) -- sysinfo = SystemInfo(args) -+ top = Service(args.host, args.sitename) -+ sysinfo = SystemInfo(args.host, args.cert, args.key) - jgqt, totalcapacity, totalused, totalgroups = sysinfo.getspaces() - ssc = StorageServiceCapacity(args.host, totalcapacity, totalused) - ssc.set_foreign_key(top.getname()) - top.add_child(ssc) - - # Add entries underneath -- for share in create_shares(args): -+ for share in create_shares(sysinfo): - share.set_foreign_key(top.getname()) - top.add_child(share) - -- for endpoint in create_endpoints(args): -+ for endpoint in create_endpoints(sysinfo): - endpoint.set_foreign_key(top.getname()) - ap = AccessPolicy(totalgroups, args.host) - ap.set_foreign_key(endpoint.getname()) - endpoint.add_child(ap) - top.add_child(endpoint) - -- for accessprotocol in create_accessprotocols(args): -+ for accessprotocol in create_accessprotocols(sysinfo): - accessprotocol.set_foreign_key(top.getname()) - top.add_child(accessprotocol) - -- manager = create_manager(args) -+ manager = create_manager(sysinfo) - manager.set_foreign_key(top.getname()) - top.add_child(manager) - -diff --git a/scripts/utils/dmlite-mysql-dirspaces.py b/scripts/utils/dmlite-mysql-dirspaces.py -new file mode 100755 -index 00000000..57235b27 ---- /dev/null -+++ b/scripts/utils/dmlite-mysql-dirspaces.py -@@ -0,0 +1,26 @@ -+#!/usr/bin/python2 -+################################################################################ -+## Original script is now integrated in the dmliteshell which should be used ## -+## instead of calling this legacy but fully backward compatible CLI interface ## -+################################################################################ -+# -+# A script that calculates the space occupied by files in every directory -+# and can set the metadata filesize field with it, for the first N levels -+# -+# This script can directly update information in the database and not all -+# operation leads to consistent results if you don't stop all DPM daemons -+# before executing this script (see help screen for more details and examples) -+# -+# March 2015 - Fabrizio Furano - CERN IT/SDC - furano@cern.ch -+# January 2019 - Petr Vokac - petr.vokac@cern.ch -+# -+# -+# Usage: -+# * get the help screen with all available options -+# dmlite-mysql-dirspaces [-h] [--help] -+# -+import sys -+from dmliteshell import dbck -+ -+dbck._log.warn("Calling directly %s is deprecated, use `dmlite-shell -e 'dbck'`" % sys.argv[0]) -+sys.exit(dbck.main(sys.argv)) -diff --git a/shell/src/dmlite-prom.py b/scripts/utils/dmlite-prom.py -similarity index 90% -rename from shell/src/dmlite-prom.py -rename to scripts/utils/dmlite-prom.py -index ab9bf211..fe30ad10 100755 ---- a/shell/src/dmlite-prom.py -+++ b/scripts/utils/dmlite-prom.py -@@ -43,8 +43,8 @@ def parseConfig(filename): - _log.debug("parsing config file %s", filename) - ret = {} - with open(filename) as f: -- reComment = re.compile('^ *#') -- reKeyValue = re.compile('^(.*?):\s*(.*?)\s*$') -+ reComment = re.compile(r'^ *#') -+ reKeyValue = re.compile(r'^(.*?):\s*(.*?)\s*$') - for line in f.readlines(): - if reComment.match(line): continue - res = reKeyValue.match(line) -@@ -55,32 +55,37 @@ def parseConfig(filename): - return ret - - --def getDomeInfo(url): -+def getDomeInfo(url, capath=DEFAULT_CAPATH): - _log.debug("get info from DOME: %s", url) - - ret = {} - -- f = urlopen(url) -+ f = urlopen(url, capath=capath) - for line in f.readlines(): -- res = re.match(b'^dome \[(.*?)\] running as (\S+)', line) -+ res = re.match(r'^dome \[(.*?)\] running as (\S+)', line) - if res != None: - ret['version'] = res.group(1).decode('utf-8') - ret['flavor'] = res.group(2).decode('utf-8') -- res = re.match(b'^Total: (\d+) .*Free: (\d+)', line) -+ res = re.match(r'^Total: (\d+) .*Free: (\d+)', line) - if res != None: - ret['space_total'] = int(res.group(1)) - ret['space_free'] = int(res.group(2)) -- res = re.match(b'^Server PID: (\d+)', line) -+ res = re.match(r'^Server PID: (\d+)', line) - if res != None: - ret['pid'] = int(res.group(1)) -- res = re.match(b'^Request rate: (.*?)Hz \(Peak: (.*?)Hz\) .*DB queries: (.*?)Hz .*DB transactions: (.*?)Hz .*Intercluster messages: (.*?)Hz', line) -+ res = re.match(r'^Request rate: (.*?)Hz \(Peak: (.*?)Hz\)', line) - if res != None: - ret['request_rate'] = float(res.group(1)) - ret['request_rate_peak'] = float(res.group(2)) -- ret['db_query_rate'] = float(res.group(3)) -- ret['db_transaction_rate'] = float(res.group(4)) -- ret['intercluster_rate'] = float(res.group(5)) -- res = re.match(b'^Queue checksum: (\d+) .*Queue file pull: (\d+)', line) -+ res = re.search(r'DB queries: (.*?)Hz', line) -+ if res != None: ret['db_query_rate'] = float(res.group(1)) -+ res = re.search(r'DB transactions: (.*?)Hz', line) -+ if res != None: ret['db_transaction_rate'] = float(res.group(1)) -+ res = re.search(r'DB avg transaction time: (.*?)ms', line) -+ if res != None: ret['db_transaction_time_ms'] = float(res.group(1)) -+ res = re.search(r'Intercluster messages: (.*?)Hz', line) -+ if res != None: ret['intercluster_rate'] = float(res.group(1)) -+ res = re.match(r'^Queue checksum: (\d+) .*Queue file pull: (\d+)', line) - if res != None: - ret['queue_checksum'] = int(res.group(1)) - ret['queue_filepull'] = int(res.group(2)) -@@ -212,7 +217,7 @@ if __name__ == '__main__': - out.write(u'# TYPE dmlite_version gauge\n') - out.write(u'dmlite_version{version="%s",flavor="%s"} 1\n' % (data['version'], data['flavor'])) - -- for key in ['space_total', 'space_free', 'pid', 'request_rate', 'request_rate_peak', 'db_query_rate', 'db_transaction_rate', 'intercluster_rate', 'queue_checksum', 'queue_filepull']: -+ for key in ['space_total', 'space_free', 'pid', 'request_rate', 'request_rate_peak', 'db_query_rate', 'db_transaction_rate', 'db_transaction_time_ms', 'intercluster_rate', 'queue_checksum', 'queue_filepull']: - if key not in data: continue - out.write(u'# HELP dmlite_%s DPM DOME %s\n' % (key, key)) - out.write(u'# TYPE dmlite_%s gauge\n' % key) -@@ -229,14 +234,12 @@ if __name__ == '__main__': - import json - import dmliteshell.executor - -- dome_command_base = "https://{0}:{1}{2}command".format(dome_host, dome_port, dome_urlprefix) -- dshell = dmliteshell.executor.DomeExecutor(dome_cert, dome_key, dome_capath, '', '') -+ dome_command_base = "https://{0}:{1}{2}".format(dome_host, dome_port, dome_urlprefix) -+ executor = dmliteshell.executor.DomeExecutor(dome_command_base, dome_cert, dome_key, dome_capath, '', '') - -- si, err = dshell.getSpaceInfo(dome_command_base) -+ jsi, err = executor.getspaceinfo() - if err: -- raise Exception("Unable to get DPM DOME storage info ({0})".format(str(si))) -- -- jsi = json.loads(si) -+ raise Exception("Unable to get DPM DOME storage info ({0})".format(str(err))) - - out.write(u'# HELP dmlite_fsinfo_size Physical size of disknodes filesystems\n') - out.write(u'# TYPE dmlite_fsinfo_size gauge\n') -@@ -262,11 +265,9 @@ if __name__ == '__main__': - for poolname, pooldata in sorted(jsi.get('poolinfo', {}).items()): - out.write(u"dmlite_poolinfo_reserved{pool=\"%s\",status=\"%s\"} %s\n" % (poolname, pooldata['poolstatus'], pooldata['defsize'])) - -- qt, err = dshell.getquotatoken(dome_command_base, '/', 0, 1) -+ jqt, err = executor.getquotatoken('/', 0, 1) - if err: -- raise Exception("Unable to get DPM DOME quotatoken ({0})".format(str(qt))) -- -- jqt = json.loads(qt) -+ raise Exception("Unable to get DPM DOME quotatoken ({0})".format(str(err))) - - out.write(u'# HELP dmlite_quotatoken_size Quotatoken size\n') - out.write(u'# TYPE dmlite_quotatoken_size gauge\n') -@@ -286,10 +287,9 @@ if __name__ == '__main__': - for qtdata in sorted(jqt.values(), key=lambda x: x['path']): - path = qtdata['path'] - try: -- ds, err = dshell.getdirspaces(dome_command_base, path) -+ dirdata, err = executor.getdirspaces(path) - if err: - raise Exception("Unable to get DPM DOME dirspaces ({0})".format(str(ds))) -- dirdata = json.loads(ds) - out.write(u"dmlite_directory_used{name=\"%s\",pool=\"%s\",path=\"%s\"} %s\n" % (dirdata['quotatoken'], dirdata['poolname'], path, dirdata['dirusedspace'])) - except Exception as e: - _log.error("uanble to get DOME directory details for %s: %s", path, str(e)) -diff --git a/scripts/utils/dpm-dump.py b/scripts/utils/dpm-dump.py -new file mode 100755 -index 00000000..81e07b28 ---- /dev/null -+++ b/scripts/utils/dpm-dump.py -@@ -0,0 +1,22 @@ -+#!/usr/bin/python2 -+################################################################################ -+## Original script is now integrated in the dmliteshell which should be used ## -+## instead of calling this legacy but fully backward compatible CLI interface ## -+################################################################################ -+# -+# Script for extracting file information from dpm database and converting -+# the result into text, json or xml storage dump -+# -+# Erming Pei, 2009/11/13 -+# Tomas Kouba, 2012/11/16 -+# Dennis van Dok, 2015/07/03 -+# Alessandra Forti, 2015/10/14, 2015/11/18 -+# Eygene Ryabinkin, 2016 -+# Georgios Bitzes + Fabrizio Furano, 2016 -+# Petr Vokac, 2018/12/31 -+# -+import sys -+from dmliteshell import dump -+ -+dump._log.warn("Calling directly %s is deprecated, use `dmlite-shell -e 'dump'`" % sys.argv[0]) -+sys.exit(dump.main(sys.argv)) -diff --git a/scripts/utils/lost.py b/scripts/utils/lost.py -new file mode 100644 -index 00000000..8a4c3680 ---- /dev/null -+++ b/scripts/utils/lost.py -@@ -0,0 +1,18 @@ -+#!/usr/bin/python2 -+################################################################################ -+## Original script is now integrated in the dmliteshell which should be used ## -+## instead of calling this legacy but fully backward compatible CLI interface ## -+################################################################################ -+# -+# Find and deal with DPM lost and dark files (DB vs. filesystem inconsistencies) -+# -+# usage: -+# python lost.py --help -+# examples: -+# python lost.py --verbose --processes 10 --stat-types=dark --fix-dark --all --include-fs-rdonly &> dpm-cleaner.out -+# -+import sys -+from dmliteshell import lost -+ -+lost._log.warn("Calling directly %s is deprecated, use `dmlite-shell -e 'dbck lost'`" % sys.argv[0]) -+sys.exit(lost.main(sys.argv)) -diff --git a/shell/man/dmlite-shell.1 b/shell/man/dmlite-shell.1 -index dce1978c..b0f07509 100644 ---- a/shell/man/dmlite-shell.1 -+++ b/shell/man/dmlite-shell.1 -@@ -11,12 +11,13 @@ show this help message and exit - .IP "\fB-c , --config=\fP" - define the configuration file before launching the shell (default value is /etc/dmlite.conf) - .IP "\fB-e , --execute=\fP" --execute the given command and exit -+execute the given command (or multiple commands) and exit - .IP "\fB-s , --script=\fP" - execute the given script file line by line and exit - - .SH HELP - .IP "\fBacl\fP \fISet or reads the ACL of a file.\fP" -+.IP "\fBaccounting\fP \fIStorage space accounting for EGI and WLCG.\fP" - .IP "\fBcd\fP \fIChange the current directory.\fP" - .IP "\fBchrp\fP \fIChange the group of a file.\fP" - .IP "\fBchmod\fP \fIChange the mode of a file.\fP" -@@ -24,7 +25,9 @@ execute the given script file line by line and exit - .IP "\fBchecksum\fP \fISet or read file checksums.\fP" - .IP "\fBcomment\fP \fISet or read file comments.\fP" - .IP "\fBcreate\fP \fICreate a new file.\fP" -+.IP "\fBdbck\fP \fIConsistency checks and fixes.\fP" - .IP "\fBdu\fP \fIDetermine the disk usage of a file or a directory.\fP" -+.IP "\fBdump\fP \fIDump file / replica info from DPM database.\fP" - .IP "\fBexit\fP \fIExit the DMLite shell.\fP" - .IP "\fBfsadd\fP \fIAdd a filesystem.\fP" - .IP "\fBfsdel\fP \fIDelete a filesystem.\fP" -@@ -52,7 +55,6 @@ execute the given script file line by line and exit - .IP "\fBreplicadel\fP \fIDelete a replica for a file.\fP" - .IP "\fBreplicamodify\fP \fIUpdate the replica of a file.\fP" - .IP "\fBrmdir\fP \fIDelete a directory.\fP" --.IP "\fBsetguid\fP \fISet the GUID of a file.\fP" - .IP "\fBunlink\fP \fIRemove a file from the database.\fP" - .IP "\fBuseradd\fP \fIAdd a new user.\fP" - .IP "\fBuserban\fP \fIModify the ban status of a user.\fP" -diff --git a/shell/src/CMakeLists.txt b/shell/src/CMakeLists.txt -index b975327c..a95546a5 100644 ---- a/shell/src/CMakeLists.txt -+++ b/shell/src/CMakeLists.txt -@@ -11,21 +11,6 @@ execute_process (COMMAND ${PYTHON_EXECUTABLE} -c "from distutils.sysconfig impor - install(PROGRAMS dmlite-shell - DESTINATION ${INSTALL_PFX_USR}/bin) - --install(PROGRAMS dmlite-mysql-dirspaces.py -- DESTINATION ${INSTALL_PFX_USR}/bin) -- --install(PROGRAMS dpm-storage-summary.py -- DESTINATION ${INSTALL_PFX_USR}/bin) -- --install(PROGRAMS dpm-storage-summary.cgi -- DESTINATION ${INSTALL_PFX_USR}/bin) -- --install(PROGRAMS dome-info-provider.py -- DESTINATION ${INSTALL_PFX_USR}/bin) -- --install(PROGRAMS dome-info-exec -- DESTINATION ${INSTALL_PFX_VARLIB}/bdii/gip/provider) -- --install(FILES __init__.py interpreter.py dbutils.py executor.py infoutils.py argus.py utils.py -+install(FILES __init__.py argus.py dbck.py dbutils.py dump.py executor.py infoutils.py interpreter.py lost.py srr.py star.py utils.py - DESTINATION ${PYTHON_SITE_PACKAGES}/dmliteshell - ) -diff --git a/shell/src/argus.py b/shell/src/argus.py -index b382145d..b65bd845 100644 ---- a/shell/src/argus.py -+++ b/shell/src/argus.py -@@ -1,4 +1,3 @@ --#!/usr/bin/python2 - # Simplified Argus banning data extracted from server configuration - # read directly by its WSDL interface. - # -@@ -22,10 +21,14 @@ from __future__ import division - import sys - import pycurl - import io -+import logging - - import xml.etree.ElementTree as ET - - -+_log = logging.getLogger('dmlite-shell') -+ -+ - class ArgusBan(object): - - _PAP_NS = { -@@ -72,6 +75,8 @@ class ArgusBan(object): - - def pap_aliases(self): - # requires Argus PAP CONFIGURATION_READ privileges -+ _log.debug("get PAP aliases") -+ - aliases = [] - - PAP_GET_ALL = """ -@@ -136,6 +141,8 @@ class ArgusBan(object): - - def pap_simple_banlist(self, alias): - # requires Argus PAP POLICY_READ_LOCAL|POLICY_READ_REMOTE privileges -+ _log.debug("get PAP simple banlist for %s", alias) -+ - ret = {} - - PAP_LIST_POLICIES = """ -diff --git a/shell/src/dmlite-mysql-dirspaces.py b/shell/src/dbck.py -old mode 100755 -new mode 100644 -similarity index 71% -rename from shell/src/dmlite-mysql-dirspaces.py -rename to shell/src/dbck.py -index e1df54e2..684b656e ---- a/shell/src/dmlite-mysql-dirspaces.py -+++ b/shell/src/dbck.py -@@ -1,9 +1,7 @@ --#!/usr/bin/python2 --# --# A script that calculates the space occupied by files in every directory -+# A module that calculates the space occupied by files in every directory - # and can set the metadata filesize field with it, for the first N levels - # --# This script can directly update information in the database and not all -+# This module can directly update information in the database and not all - # operation leads to consistent results if you don't stop all DPM daemons - # before executing this script (see help screen for more details and examples) - # -@@ -31,18 +29,19 @@ import logging, logging.handlers - try: import simplejson as json - except ImportError: import json - -+from .dbutils import DBConn, CachedFullPath -+ - # compatibility for existing SLC6, CentOS7, CentOS8 packages - try: -- import pymysql - import pymysql.cursors as pymysql_cursors - except ImportError: -- import MySQLdb as pymysql - import MySQLdb.cursors as pymysql_cursors - -+ - __version__ = '1.2.0' - __author__ = 'Fabrizio Furano' - --_log = logging.getLogger('DPMDIRSPACES') -+_log = logging.getLogger('dmlite-shell') - - DEFAULT_CNS_DB = 'cns_db' - DEFAULT_DPM_DB = 'dpm_db' -@@ -51,64 +50,7 @@ DEFAULT_UPDATELEVELS = 6 - - - --def guess_config_files(): -- """ Guesses the location of DPM namespace configuration file """ -- try: -- possible_nsconfigs = ['/opt/lcg/etc/NSCONFIG', '/usr/etc/NSCONFIG'] -- if 'LCG_LOCATION' in os.environ: -- possible_nsconfigs.insert(0, os.environ['LCG_LOCATION'].rstrip('/') + '/etc/NSCONFIG') -- -- for f in possible_nsconfigs: -- if os.path.exists(f): -- return f -- -- except Exception as e: -- _log.warn("failed to guess DB config file location: %s", str(e)) -- -- return None -- -- -- --def get_conn_data(nsconfig): -- """ Returns connection data from NSCONFIG""" -- retval = {} -- -- _log.debug("getting connection info from %s", nsconfig) -- try: -- nsconfig_line = open(nsconfig).readline().strip() -- except Exception as e: -- _log.error("Cannot open DPM config file %s: %s", nsconfig, str(e)) -- sys.exit(-1) -- -- nsre = re.compile(r"(.*)/(.*)@([^/]*)(?:/(.*))?") -- m = nsre.match(nsconfig_line) -- if m == None: -- _log.error("Bad line in DPM config '%s', doesn't match re '%s'", nsconfig, nsre) -- sys.exit(-1) -- retval['user'] = m.group(1) -- retval['pass'] = m.group(2) -- retval['host'] = m.group(3) -- if m.group(4): -- retval['cns_db'] = m.group(4) -- else: -- retval['cns_db'] = DEFAULT_CNS_DB -- retval['dpm_db'] = DEFAULT_DPM_DB -- -- _log.debug("database connection: host=%s, user=%s, cns_db=%s, dpm_db=%s", retval['host'], retval['user'], retval['cns_db'], retval['dpm_db']) -- -- return retval -- -- -- --def get_connection(conn_data, db, cclass=pymysql.cursors.Cursor): -- return pymysql.connect( -- host=conn_data['host'], user=conn_data['user'], -- passwd=conn_data['pass'], db=conn_data[db], -- cursorclass=cclass) -- -- -- --def get_updatelivels(filename): -+def get_updatelevels(filename): - """ Guesses the location of DPM namespace configuration file """ - ret = DEFAULT_UPDATELEVELS - -@@ -130,265 +72,6 @@ def get_updatelivels(filename): - - - --class CachedFullPath(object): -- """DPM file metadata stored in database have pointer just -- to parent directory and to build whole path it is necessary -- to recursively query all parent directories to the root "/". -- -- Althought these queries are pretty cheap they are done for -- each file and even with small latencies (especially with -- database on dedicated server) they can take quite a time. -- -- This class not only caches past queries, but also limit -- size of cached data not to exhaust whole memory while -- dumping big DPM database with a lot of files. -- """ -- -- def __init__(self, conn, maxsize=1000000, table='Cns_file_metadata', fileid_only=False): -- self._cache = {} -- self._cache_path = {} -- self._conn = conn -- self._maxsize = maxsize -- self._table = table -- self._fileid_only = fileid_only -- self._ntotal = 0 -- self._ncached = 0 -- self._nqueries = 0 -- self._ncleanup = 0 -- -- def __del__(self): -- if _log: -- _log.info("path lookup cache usage: total %i, cached %i, queries %i, cleanup %i", self._ntotal, self._ncached, self._nqueries, self._ncleanup) -- -- def _fill_cache(self, fileid): -- """Recursively get full path for given fileid (simple - could be used to validate _fill_cache_multi)""" -- self._ntotal += 1 -- -- if fileid in self._cache: -- self._ncached += 1 -- return -- -- if len(self._cache) >= self._maxsize: -- _log.debug("fullpath cache too big (%i), dropping cached records...", len(self._cache)) -- self._ncleanup += 1 -- del(self._cache) -- del(self._cache_path) -- self._cache = {} -- self._cache_path = {} -- -- sql = "SELECT parent_fileid, name FROM %s WHERE fileid=%%s" % self._table -- if self._fileid_only: -- sql = "SELECT parent_fileid FROM %s WHERE fileid=%%s" % self._table -- -- cursor = self._conn.cursor() -- cursor.execute(sql, (fileid, )) -- res = cursor.fetchone() -- cursor.close() -- -- self._nqueries += 1 -- -- if _log.getEffectiveLevel() < logging.DEBUG: -- _log.debug("query parent directory '%s': %s", sql, res) -- -- if res == None: -- if fileid != 0: -- _log.info("db inconsistency: could not find path for fileid %i (most likely the entry is orphan)", fileid) -- else: -- _log.debug("no parent for top level directory 0") -- self._cache[fileid] = None -- return -- -- parentid = res[0] -- name = str(fileid) if self._fileid_only else res[1] -- -- if parentid == 0: # top level directory -- self._cache[fileid] = [fileid] -- self._cache_path[fileid] = '' -- return -- -- if name.find('/') != -1: # this script doesn't support '/' characters in metadata name -- raise Exception("fileid {0} contains slash character in its name '{1}'".format(fileid, name)) -- -- self._fill_cache(parentid) -- if parentid not in self._cache or self._cache[parentid] == None: -- # db inconsistency already detected and logged -- self._cache[fileid] = None -- return -- -- self._cache[fileid] = self._cache[parentid] + [fileid] -- if not self._fileid_only: -- fullpath = "{0}/{1}".format(self._cache_path[parentid], name) -- self._cache_path[fileid] = fullpath -- -- def _fill_cache_multi(self, fileids): -- """Reduce impact of query latency by resolving paths for multiple -- fileids. Max number of queries still corresponds to the max path -- depth, but all fileids are resolved at the same time.""" -- self._ntotal += len(fileids) -- -- if len(self._cache) + len(fileids) >= self._maxsize: -- _log.debug("fullpath cache too big (%i+%i), dropping cached records...", len(self._cache), len(fileids)) -- self._ncleanup += 1 -- del(self._cache) -- del(self._cache_path) -- self._cache = {} -- self._cache_path = {} -- -- tores = set() -- id2name = {} -- id2parent = {} -- for fileid in fileids: -- if fileid in self._cache: -- if self._cache[fileid] != None: -- self._ncached += len(self._cache[fileid]) -- else: -- self._ncached += 1 -- else: -- tores.add(fileid) -- -- if len(tores) > 0: -- cursor = self._conn.cursor() -- -- while len(tores) > 0: -- -- sql = "SELECT fileid, parent_fileid, name FROM Cns_file_metadata WHERE fileid IN ({0})".format(','.join([ str(x) for x in tores ])) -- if self._fileid_only: -- sql = "SELECT fileid, parent_fileid FROM Cns_file_metadata WHERE fileid IN ({0})".format(','.join([ str(x) for x in tores ])) -- -- cursor.execute(sql) -- -- self._nqueries += 1 -- tores = set() -- -- for row in cursor: -- fileid = row[0] -- parentid = row[1] -- name = str(row[0]) if self._fileid_only else row[2] -- -- if _log.getEffectiveLevel() < logging.DEBUG: -- _log.debug("query parent directory '%s': %s", sql, row) -- -- if parentid == 0: -- name = '' -- -- if name.find('/') != -1: # this script doesn't support '/' characters in metadata name -- raise Exception("fileid {0} contains slash character in its name '{1}'".format(fileid, name)) -- -- id2name[fileid] = name -- id2parent[fileid] = parentid -- -- if parentid == 0: -- pass -- elif parentid in self._cache: -- if self._cache[parentid] != None: -- self._ncached += len(self._cache[parentid]) -- else: -- self._ncached += 1 -- elif parentid not in id2parent: -- tores.add(parentid) -- -- cursor.close() -- -- for fileid in fileids: -- if fileid in self._cache: continue -- -- currid = fileid -- revids = [] -- -- while True: -- if currid in self._cache: -- if self._cache[currid] != None: -- self._ncached += len(self._cache[currid]) -- else: -- self._ncached += 1 -- break -- elif currid in id2parent: -- if currid in revids: -- revids.reverse() -- fullpath = '/'.join([id2name[x] for x in revids]) -- _log.info("db inconsistency: detected directory loop for fileid %i parent %i %s", fileid, currid, fullpath) -- for revid in revids: self._cache[revid] = None -- revids = [] -- break -- revids.append(currid) -- currid = id2parent[currid] -- if currid == 0: # root directory -- break -- else: -- if fileid != 0: -- if fileid != currid: -- revids.reverse() -- fullpath = '/'.join([id2name[x] for x in revids]) -- _log.info("db inconsistency: could not find path for fileid %i parent %i (most likely the entry is orphan, path %s)", fileid, currid, fullpath) -- else: -- _log.info("db inconsistency: could not find path for fileid %i (most likely the entry is orphan)", fileid) -- else: -- _log.debug("no parent for top level directory 0") -- for revid in revids: self._cache[revid] = None -- revids = [] -- break -- -- if len(revids) > 0: -- revids.reverse() -- for i, revid in enumerate(revids): -- if currid == 0: -- self._cache[revid] = revids[:i+1] -- else: -- self._cache[revid] = self._cache[currid] + revids[:i+1] -- if not self._fileid_only: -- if revid in self._cache_path: continue -- pathsuffix = '/'.join([id2name[x] for x in revids[:i+1]]) -- if currid == 0: -- self._cache_path[revid] = pathsuffix -- elif currid in self._cache_path: -- self._cache_path[revid] = "{0}/{1}".format(self._cache_path[currid], pathsuffix) -- -- -- def get_ids(self, fileid): -- self._fill_cache_multi(fileid) -- #self._fill_cache_multi([fileid]) -- -- return self._cache.get(fileid) -- -- -- def get_path(self, fileid): -- if self._fileid_only: -- raise Exception("can't get directory path with {0} instance in fileid_only mode".format(self.__class__.__name__)) -- -- self._fill_cache(fileid) -- #self._fill_cache_multi([fileid]) -- -- return self._cache_path.get(fileid) -- -- -- def get_ids_multi(self, fileids): -- ret = {} -- -- self._fill_cache_multi(fileids) -- -- for fileid in fileids: -- if fileid not in self._cache: continue # db inconsistency already reported -- ret[fileid] = self._cache[fileid] -- -- return ret -- -- -- def get_path_multi(self, fileids): -- if self._fileid_only: -- raise Exception("can't get directory path with {0} instance in fileid_only mode".format(self.__class__.__name__)) -- -- ret = {} -- -- self._fill_cache_multi(fileids) -- -- for fileid in fileids: -- if fileid not in self._cache_path: continue # db inconsistency already reported -- ret[fileid] = self._cache_path[fileid] -- -- return ret -- -- -- - def get_path_id(conn, path): - """Get fileid for given path""" - fileid = 0 -@@ -489,11 +172,204 @@ def get_accounted_dirs(conn, updatelevels): - - - --def fix_zero_dirs(conn_data, updatelevels, updatedb=False): -+def fix_namespace_continuity(updatedb=False): -+ """Check virtual namespace continuity and join dangling objects in lost+found""" -+ _log.debug("fix_namespace_continuity(updatedb=%s)", updatedb) -+ -+ dry_run = '' if updatedb else 'dry-run: ' -+ -+ conn = DBConn.get('cns_db') -+ cursor = conn.cursor() -+ if updatedb: -+ ucursor = conn.cursor() -+ try: -+ lostpdirid = 0 -+ lostdirids = [] -+ -+ # get lost+found directory fileid and content -+ if updatedb: -+ # assume that /dpm/lost+found directory already exists -+ for d in '/dpm/lost+found'.split('/'): -+ if d == '': d = '/' -+ cursor.execute('SELECT fileid FROM Cns_file_metadata WHERE (filemode & %s) = %s AND parent_fileid = %s AND name = %s', (stat.S_IFDIR, stat.S_IFDIR, lostpdirid, d)) -+ lostpdirid = cursor.fetchone()[0] -+ -+ cursor.execute('SELECT fileid, name FROM Cns_file_metadata WHERE parent_fileid = %s', (lostpdirid, )) -+ lostdirids = dict([(int(x[1][len('lost'):]), x[0]) for x in cursor.fetchall() if x[1].startswith('lost')]) -+ -+ sql = 'SELECT fileid, parent_fileid FROM Cns_file_metadata WHERE parent_fileid!=0 AND parent_fileid NOT IN (SELECT fileid FROM Cns_file_metadata)' -+ cursor.execute(sql) -+ -+ for row in cursor: -+ fileid, pfileid = row -+ _log.info("found disconnected fileid %i with missing parent %i", fileid, pfileid) -+ -+ if updatedb: -+ # prevent name conflicts within lost+found directory by joining -+ # disconnected data in new subdirectory named with original parentid -+ if pfileid not in lostdirids: -+ # create directory manually to allow consistency checks -+ # and updates without running DPM services -+ ucursor.execute('SELECT id FROM Cns_unique_id FOR UPDATE') -+ dirid = ucursor.fetchone()[0]+1 # assume non-empty Cns_unique_id -+ ucursor.execute('UPDATE Cns_unique_id SET id = %s', (dirid, )) -+ conn.commit() -+ sql = """INSERT INTO Cns_file_metadata ( -+ fileid, parent_fileid, name, filemode, -+ nlink, owner_uid, gid, filesize, -+ atime, mtime, ctime, -+ fileclass, status, csumtype, csumvalue, acl, xattr -+ ) VALUES ( -+ %s, %s, %s, %s, -+ %s, %s, %s, %s, -+ UNIX_TIMESTAMP(), UNIX_TIMESTAMP(), UNIX_TIMESTAMP(), -+ %s, %s, %s, %s, %s, %s -+ )""" -+ ucursor.execute(sql, (dirid, pfileid, "lost{0}".format(pfileid), 16893, 0, 0, 0, 0, 0, '-', '', '', 'ACL', None)) -+ ucursor.execute('UPDATE Cns_file_metadata SET nlink = nlink + 1 WHERE fileid = %s', (pfileid, )) -+ conn.commit() -+ lostdirids[pfileid] = dirid -+ ucursor.execute('UPDATE Cns_file_metadata SET parent_fileid = %s WHERE fileid = %s', (lostdirids[pfileid], fileid)) -+ conn.commit() -+ -+ _log.info("%supdated %i disconnected objects", dry_run, cursor.rowcount) -+ -+ finally: -+ if updatedb: -+ ucursor.close() -+ cursor.close() -+ -+ -+ -+def fix_no_replica(updatedb=False): -+ """Cleanup files with no replica from namespace""" -+ _log.debug("fix_no_replica(updatedb=%s)", updatedb) -+ -+ dry_run = '' if updatedb else 'dry-run: ' -+ -+ conn = DBConn.get('cns_db') -+ cursor = conn.cursor() -+ try: -+ where = 'filemode&40960=32768 AND fileid NOT IN (SELECT DISTINCT fileid FROM Cns_file_replica)' -+ if not updatedb or _log.getEffectiveLevel() <= logging.DEBUG: -+ cursor.execute('SELECT fileid, status FROM Cns_file_metadata WHERE {0}'.format(where)) -+ -+ for row in cursor: -+ fileid, status = row -+ if status == 'D': -+ _log.info("found stuck deleted fileid %i", fileid) -+ else: -+ _log.info("found fileid %i with no replica", fileid) -+ -+ if updatedb: -+ cursor.execute('DELETE FROM Cns_file_metadata WHERE {0}'.format(where)) -+ conn.commit() -+ -+ _log.info("%supdated %i entries with no replica", dry_run, cursor.rowcount) -+ -+ finally: -+ cursor.close() -+ -+ -+ -+def fix_replica_type(updatedb=False): -+ """Replace missing replica type with primary""" -+ _log.debug("fix_replica_type(updatedb=%s)", updatedb) -+ -+ dry_run = '' if updatedb else 'dry-run: ' -+ -+ conn = DBConn.get('cns_db') -+ cursor = conn.cursor() -+ try: -+ where = 'r_type IS NULL' -+ if not updatedb or _log.getEffectiveLevel() <= logging.DEBUG: -+ cursor.execute('SELECT rowid, fileid FROM Cns_file_replica WHERE {0}'.format(where)) -+ -+ for row in cursor: -+ replicaid, fileid = row -+ _log.info("found replica %i for fileid %s with undefined type", replicaid, fileid) -+ -+ if updatedb: -+ cursor.execute('UPDATE Cns_file_replica SET r_type = "P" WHERE {0}'.format(where)) -+ conn.commit() -+ -+ _log.info("%supdated %i entries with missing replica type", dry_run, cursor.rowcount) -+ -+ finally: -+ cursor.close() -+ -+ -+ -+def fix_symlink(updatedb=False): -+ """Remove symlinks with no missing target""" -+ _log.debug("fix_symlink(updatedb=%s)", updatedb) -+ -+ dry_run = '' if updatedb else 'dry-run: ' -+ -+ conn = DBConn.get('cns_db') -+ cursor = conn.cursor() -+ try: -+ where = 'filemode&8192=8192 AND fileid NOT IN (SELECT fileid FROM Cns_symlinks)' -+ if not updatedb or _log.getEffectiveLevel() <= logging.DEBUG: -+ cursor.execute('SELECT fileid FROM Cns_file_metadata WHERE {0}'.format(where)) -+ -+ for row in cursor: -+ fileid = row[0] -+ _log.info("found symlink %i with no target", fileid) -+ -+ if updatedb: -+ cursor.execute('DELETE Cns_file_metadata WHERE {0}'.format(where)) -+ conn.commit() -+ -+ _log.info("%supdated %i entries with missing symlink target", dry_run, cursor.rowcount) -+ -+ finally: -+ cursor.close() -+ -+ -+ -+def fix_nlink(updatedb=False): -+ """Recalculate number of objects in each directory""" -+ _log.debug("fix_nlink(updatedb=%s)", updatedb) -+ -+ dry_run = '' if updatedb else 'dry-run: ' -+ -+ conn = DBConn.get('cns_db') -+ cursor = conn.cursor() -+ if updatedb: -+ ucursor = conn.cursor() -+ try: -+ sql = """SELECT fileid, a.nlink, b.nlink -+FROM Cns_file_metadata AS a -+JOIN (SELECT parent_fileid, count(*) AS nlink FROM Cns_file_metadata GROUP BY parent_fileid) AS b ON a.fileid=b.parent_fileid -+WHERE a.filemode&16384=16384 AND a.nlink!=b.nlink""" -+ cursor.execute(sql) -+ -+ for row in cursor: -+ fileid, snlink, cnlink = row -+ _log.info("found wrong nlink for directory %i (saved %i != counted %i)", fileid, snlink, cnlink) -+ -+ if updatedb: -+ sql = 'UPDATE Cns_file_metadata SET nlink = nlink + %s WHERE fileid = %s' -+ ucursor.execute(sql, (cnlink-snlink, fileid)) -+ conn.commit() -+ -+ _log.info("%supdated %i entries with wrong nlink", dry_run, cursor.rowcount) -+ -+ finally: -+ if updatedb: -+ ucursor.close() -+ cursor.close() -+ -+ -+ -+def fix_zero_dirs(updatelevels, updatedb=False): - """Set filesize to zero for directories below update level""" - _log.debug("fix_zero_dirs(updatelevels=%i, updatedb=%s)", updatelevels, updatedb) - -- conn = get_connection(conn_data, 'cns_db') -+ dry_run = '' if updatedb else 'dry-run: ' -+ -+ conn = DBConn.get('cns_db') - cursor = conn.cursor() - try: - currfileids = get_accounted_dirs(conn, updatelevels) -@@ -509,35 +385,35 @@ def fix_zero_dirs(conn_data, updatelevels, updatedb=False): - fileid, filesize = row - _log.info("dry-run: update directory %s with fileid %i (below %i levels) with size %i to zero", pathname.get_path(fileid), fileid, updatelevels, filesize) - -- _log.info("updated %i non-accounted directories to filesize zero (updatelevels=%i)", cursor.rowcount, updatelevels) -+ _log.info("%supdated %i non-accounted directories to filesize zero (updatelevels=%i)", dry_run, cursor.rowcount, updatelevels) - - finally: - cursor.close() - - - --def fix_spacetokens_by_path(conn_data, skip=[], updatedb=False): -+def fix_spacetokens_by_path(skip=[], updatedb=False): - # NOTE: this function doesn't check if storage pool associated with replica - # use right host:/fs defined in 'dpm_db'.'dpm_fs' table - _log.debug("fix_spacetokens_by_path(skip=%s, updatedb=%s)", skip, updatedb) - - dry_run = '' if updatedb else 'dry-run: ' - -- conn_dpm = get_connection(conn_data, 'dpm_db') -+ conn_dpm = DBConn.new('dpm_db') - qt = get_quotatoken_data(conn_dpm) - s2name = {} - for path, poolname, u_token, s_token, t_space, g_space, u_space in qt: - s2name[s_token] = u_token - conn_dpm.close() - -- conn = get_connection(conn_data, 'cns_db') -- conn_ss = get_connection(conn_data, 'cns_db', cclass=pymysql.cursors.SSCursor) -- conn_path = get_connection(conn_data, 'cns_db') -+ conn = DBConn.new('cns_db') -+ conn_ss = DBConn.new('cns_db') -+ conn_path = DBConn.new('cns_db') - pathname = CachedFullPath(conn_path) - - try: - # join metadata table with replicas to account size of all file replicas -- cursor = conn_ss.cursor() -+ cursor = conn_ss.cursor(pymysql_cursors.SSCursor) - ucursor = conn.cursor() - - sql = 'SELECT replica.rowid, replica.fileid, metadata.parent_fileid, metadata.name, replica.poolname, replica.setname, replica.xattr FROM Cns_file_metadata AS metadata JOIN Cns_file_replica AS replica ON metadata.fileid = replica.fileid' -@@ -636,7 +512,7 @@ def fix_spacetokens_by_path(conn_data, skip=[], updatedb=False): - ucursor.close() - cursor.close() - -- _log.info("processed %i records, updated %i", cnt_rows, updated) -+ _log.info("%sprocessed %i records, updated %i", dry_run, cnt_rows, updated) - - except Exception: - # query in progress that use SSCursor can be killed only by terminating DB connection -@@ -654,20 +530,20 @@ def fix_spacetokens_by_path(conn_data, skip=[], updatedb=False): - - - --def fix_dir_size_offline(conn_data, updatelevels, updatedb=False): -+def fix_dir_size_offline(updatelevels, updatedb=False): - _log.debug("fix_dir_size_offline(updatelevels=%i, updatedb=%s", updatelevels, updatedb) - - dry_run = '' if updatedb else 'dry-run: ' - -- conn_ss = get_connection(conn_data, 'cns_db', cclass=pymysql.cursors.SSCursor) -- conn_path = get_connection(conn_data, 'cns_db') -+ conn_ss = DBConn.new('cns_db') -+ conn_path = DBConn.new('cns_db') - pathname = CachedFullPath(conn_path) - - psize = {} - try: - # join metadata table with replicas to account size of all file replicas - sql = 'SELECT metadata.fileid, metadata.parent_fileid, metadata.filesize FROM Cns_file_metadata AS metadata JOIN Cns_file_replica AS replica ON metadata.fileid = replica.fileid WHERE replica.status = "-"' -- cursor = conn_ss.cursor() -+ cursor = conn_ss.cursor(pymysql_cursors.SSCursor) - cursor.execute(sql) - - cnt_rows = 0 -@@ -716,7 +592,7 @@ def fix_dir_size_offline(conn_data, updatelevels, updatedb=False): - conn_ss.close() - - # list of directories with updated "filesize" -- conn = get_connection(conn_data, 'cns_db') -+ conn = DBConn.new('cns_db') - dirids = get_accounted_dirs(conn, updatelevels) - - updated = 0 -@@ -749,11 +625,11 @@ def fix_dir_size_offline(conn_data, updatelevels, updatedb=False): - del(pathname) - conn_path.close() - -- _log.info("updated filesize for %i records", updated) -+ _log.info("%supdated filesize for %i records", dry_run, updated) - - - --def fix_dir_size(conn_data, updatelevels, updatedb=False): -+def fix_dir_size(updatelevels, updatedb=False): - _log.debug("fix_dir_size(updatelevels=%i, updatedb=%s", updatelevels, updatedb) - - dry_run = '' if updatedb else 'dry-run: ' -@@ -767,7 +643,7 @@ def fix_dir_size(conn_data, updatelevels, updatedb=False): - - try: - -- conn = get_connection(conn_data, 'cns_db') -+ conn = DBConn.new('cns_db') - cursor = conn.cursor() - - # create temporary table -@@ -789,7 +665,7 @@ def fix_dir_size(conn_data, updatelevels, updatedb=False): - """.format(tmptable)) - - _log.debug("create snapshot of required data from metadata and replica tables in %s", tmptable) -- conn_ss = get_connection(conn_data, 'cns_db', cclass=pymysql.cursors.SSCursor) -+ conn_ss = DBConn.new('cns_db') - try: - sql = """ - SELECT metadata.fileid as fileid, parent_fileid, filesize, -@@ -803,7 +679,7 @@ def fix_dir_size(conn_data, updatelevels, updatedb=False): - WHERE metadata.filemode & {1} = {1} OR replica.status = "-" - """.format(stat.S_IFREG, stat.S_IFDIR, stat.S_IFLNK) - -- cursor_ss = conn_ss.cursor() -+ cursor_ss = conn_ss.cursor(pymysql_cursors.SSCursor) - cursor_ss.execute(sql) - - cnt_rows = 0 -@@ -831,8 +707,8 @@ def fix_dir_size(conn_data, updatelevels, updatedb=False): - conn.close() - - _log.debug("read and caclulate directory size for %s", tmptable) -- conn_ss = get_connection(conn_data, 'cns_db', cclass=pymysql.cursors.SSCursor) -- conn_path = get_connection(conn_data, 'cns_db') -+ conn_ss = DBConn.new('cns_db') -+ conn_path = DBConn.new('cns_db') - pathname = CachedFullPath(conn_path, table=tmptable, fileid_only=True) - - psize = {} -@@ -840,7 +716,7 @@ def fix_dir_size(conn_data, updatelevels, updatedb=False): - try: - # join metadata table with replicas to account size of all file replicas - sql = "SELECT fileid, parent_fileid, filesize, fileflags FROM {0}".format(tmptable) -- cursor = conn_ss.cursor() -+ cursor = conn_ss.cursor(pymysql_cursors.SSCursor) - cursor.execute(sql) - - cnt_rows = 0 -@@ -913,7 +789,7 @@ def fix_dir_size(conn_data, updatelevels, updatedb=False): - else: - pathnames[fileid] = "fileidpath[{0}]".format(','.join([ str(x) for x in pathnames[fileid] ])) - -- conn = get_connection(conn_data, 'cns_db') -+ conn = DBConn.new('cns_db') - conn.autocommit(False) - cursor = conn.cursor() - -@@ -939,7 +815,7 @@ def fix_dir_size(conn_data, updatelevels, updatedb=False): - conn_path.close() - - finally: -- conn = get_connection(conn_data, 'cns_db') -+ conn = DBConn.new('cns_db') - cursor = conn.cursor() - cursor.execute("SHOW TABLES") - for row in cursor: -@@ -949,11 +825,11 @@ def fix_dir_size(conn_data, updatelevels, updatedb=False): - cursor.close() - conn.close() - -- _log.info("updated filesize for %i records", updated) -+ _log.info("%supdated filesize for %i records", dry_run, updated) - - - --def fix_spacetokens_size(conn_data, updatedb=False): -+def fix_spacetokens_size(updatedb=False): - """Update spacetoken usable space according associated directory size. - Consistent results are not completely guaranteed with running DPM/dmlite - daemons, but this update is very fast and discrepancies should be very -@@ -963,8 +839,8 @@ def fix_spacetokens_size(conn_data, updatedb=False): - - dry_run = '' if updatedb else 'dry-run: ' - -- conn = get_connection(conn_data, 'cns_db') -- conn_dpm = get_connection(conn_data, 'dpm_db') -+ conn = DBConn.new('cns_db') -+ conn_dpm = DBConn.new('dpm_db') - - qt = get_quotatoken_data(conn_dpm) - -@@ -1039,15 +915,15 @@ def fix_spacetokens_size(conn_data, updatedb=False): - conn_dpm.close() - conn.close() - -- _log.info("updated size for %i spacetokens", updated) -+ _log.info("%supdated size for %i spacetokens", dry_run, updated) - - - - - #===================================================================== --# main -+# main - legacy interface - #===================================================================== --if __name__ == '__main__': -+def main(argv): - # basic logging configuration - streamHandler = logging.StreamHandler(sys.stderr) - streamHandler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s](%(module)s:%(lineno)d) %(message)s", "%d %b %H:%M:%S")) -@@ -1100,8 +976,7 @@ if __name__ == '__main__': - return "\n{0}\n".format("\n".join(sections)) - - # default config values - log level is not yet set while calling these functions -- guess_nsconfig = guess_config_files() -- guess_updatelevels = get_updatelivels(DEFAULT_DOME_CONFIG) -+ guess_updatelevels = get_updatelevels(DEFAULT_DOME_CONFIG) - - # command line arguments - usage = "usage: %prog [options]" -@@ -1149,14 +1024,14 @@ if __name__ == '__main__': - "more than a second." - ) - epilog.append("EXAMPLE # dry-run with default configuration (no DB updates)") -- epilog.append("EXAMPLE python /usr/bin/dmlite-mysql-dirspaces.py --fix=zero-dir") -- epilog.append("EXAMPLE python /usr/bin/dmlite-mysql-dirspaces.py --fix=spacetoken") -- epilog.append("EXAMPLE python /usr/bin/dmlite-mysql-dirspaces.py --fix=dir-size") -- epilog.append("EXAMPLE python /usr/bin/dmlite-mysql-dirspaces.py --fix=dir-size-offline") -- epilog.append("EXAMPLE python /usr/bin/dmlite-mysql-dirspaces.py --fix=spacetoken-size") -+ epilog.append("EXAMPLE python dmlite-mysql-dirspaces.py --fix=zero-dir") -+ epilog.append("EXAMPLE python dmlite-mysql-dirspaces.py --fix=spacetoken") -+ epilog.append("EXAMPLE python dmlite-mysql-dirspaces.py --fix=dir-size") -+ epilog.append("EXAMPLE python dmlite-mysql-dirspaces.py --fix=dir-size-offline") -+ epilog.append("EXAMPLE python dmlite-mysql-dirspaces.py --fix=spacetoken-size") - epilog.append("EXAMPLE ") - epilog.append("EXAMPLE # dry-run multiple DB checks") -- epilog.append("EXAMPLE python /usr/bin/dmlite-mysql-dirspaces.py --fix=zero-dir,spacetoken,dir-size,spacetoken-size") -+ epilog.append("EXAMPLE python dmlite-mysql-dirspaces.py --fix=zero-dir,spacetoken,dir-size,spacetoken-size") - epilog.append("EXAMPLE ") - epilog.append("EXAMPLE # database updates with --updatedb option must be executed") - epilog.append("EXAMPLE # while DPM is offline for fixes with \"offline\" suffix") -@@ -1169,7 +1044,7 @@ if __name__ == '__main__': - epilog.append("EXAMPLE # stop DPM services on headnode with systemd (e.g. CentOS7)") - epilog.append("EXAMPLE systemctl stop httpd rfiod srmv2.2 dpnsdaemon dpm dpm-gsiftp xrootd@dpmredir") - epilog.append("EXAMPLE # execute DB directory and spacetoken size update (can take long time)") -- epilog.append("EXAMPLE python /usr/bin/dmlite-mysql-dirspaces.py --log-file=/var/log/dmlite-mysql-dirspaces.log --fix=zero-dir,spacetoken,dir-size-offline,spacetoken-size --updatedb") -+ epilog.append("EXAMPLE python dmlite-mysql-dirspaces.py --log-file=/var/log/dmlite-mysql-dirspaces.log --fix=zero-dir,spacetoken,dir-size-offline,spacetoken-size --updatedb") - epilog.append("EXAMPLE # stop DPM services on headnode with init scripts (e.g. SL6)") - epilog.append("EXAMPLE service httpd start; service rfiod start; service srmv2.2 start; service dpnsdaemon start; service dpm start; service dpm-gsiftp start; service xrootd start") - epilog.append("EXAMPLE # stop DPM services on headnode with systemd (e.g. CentOS7)") -@@ -1177,11 +1052,11 @@ if __name__ == '__main__': - epilog.append("EXAMPLE ") - epilog.append("EXAMPLE # standard consistency checks and updates can be executed with running DPM") - epilog.append("EXAMPLE # this can be also used with online legacy DPM before migration to DPM DOME") -- epilog.append("EXAMPLE python /usr/bin/dmlite-mysql-dirspaces.py --log-file=/var/log/dmlite-mysql-dirspaces.log --updatedb") -+ epilog.append("EXAMPLE python dmlite-mysql-dirspaces.py --log-file=/var/log/dmlite-mysql-dirspaces.log --updatedb") - epilog.append("EXAMPLE ") - epilog.append("EXAMPLE # assigning quotatoken to existing directory with stored files") - epilog.append("EXAMPLE # must be followed by fixing spacetoken data in the database") -- epilog.append("EXAMPLE python /usr/bin/dmlite-mysql-dirspaces.py --log-file=/var/log/dmlite-mysql-dirspaces.log --fix=spacetoken --updatedb") -+ epilog.append("EXAMPLE python dmlite-mysql-dirspaces.py --log-file=/var/log/dmlite-mysql-dirspaces.log --fix=spacetoken --updatedb") - description = "Update and synchronize space and spacetoken data for DOME DPM." - parser = optparse.OptionParser(usage=usage, description=description, version="%prog", epilog=epilog, formatter=IndentedHelpFormatterWithEpilogExamples()) - parser.add_option("-v", "--verbose", dest="loglevel", action="callback", callback=opt_set_loglevel, default=-10, help="each \"v\" increases the verbosity level") -@@ -1192,7 +1067,7 @@ if __name__ == '__main__': - parser.add_option("--log-file", dest="logfile", metavar="FILE", help="set log file, default: %default") - parser.add_option("--log-size", dest="logsize", type="int", default=10*1024*1024, help="maximum size of log file, default: %default") - parser.add_option("--log-backup", dest="logbackup", type="int", default=4, help="number of log backup files, default: %default") -- parser.add_option("-c", "--nsconfig", dest="nsconfig", default=guess_nsconfig, help="NSCONFIG file with sql connection info, default: %default") -+ parser.add_option("-c", "--nsconfig", dest="nsconfig", default=None, help="LEGACY OPTION, NO LONGER IN USE (NSCONFIG file with sql connection info)") - parser.add_option('--headnode', dest='headnode', default=socket.getfqdn(), help="DPM headnode - verify offline for unsafe DB updates (default: %default)") - parser.add_option('--dbhost', dest='dbhost', default=None, help="database host, if no NSCONFIG given") - parser.add_option('--dbuser', dest='dbuser', default=None, help="database user, if no NSCONFIG given") -@@ -1206,7 +1081,7 @@ if __name__ == '__main__': - parser.add_option('--skip-spacetokens', dest='skip_spacetokens', action='append', default=[], help="skip fixes for spacetoken, default: %default") - - -- (options, args) = parser.parse_args() -+ (options, args) = parser.parse_args(argv[1:]) - - if options.logfile == '-': - _log.removeHandler(streamHandler) -@@ -1223,10 +1098,11 @@ if __name__ == '__main__': - import getpass - import inspect - import hashlib -- _log.info("command: %s", " ".join(sys.argv)) -- _log.info("script: %s", os.path.abspath(inspect.getfile(inspect.currentframe()))) -+ script_name = os.path.abspath(inspect.getfile(inspect.currentframe())) -+ _log.info("command: %s", " ".join(argv)) -+ _log.info("script: %s", script_name) - _log.info("version: %s", __version__) -- _log.info("sha256: %s", hashlib.sha256(open(__file__).read().encode('utf-8')).hexdigest()) -+ _log.info("sha256: %s", hashlib.sha256(open(script_name, "rb").read()).hexdigest()) - _log.info("python: %s", str(sys.version_info)) - _log.info("user: %s@%s", getpass.getuser(), socket.gethostname()) - _log.info("system load: %s", str(os.getloadavg())) -@@ -1235,11 +1111,11 @@ if __name__ == '__main__': - fix_names = options.fix.split(',') - if len(fix_names) == 0: - _log.error("no fix specified, please --help command line option to get more informations") -- sys.exit(1) -+ return 1 - for fix in fix_names: - if fix not in [ 'zero-dir', 'spacetoken', 'dir-size', 'dir-size-offline', 'spacetoken-size' ]: - _log.error("unknown fix \"%s\", use --help command line option to get more informations", fix) -- sys.exit(1) -+ return 1 - - if not options.force and len([x for x in fix_names if x.endswith('offline')]): - _log.info('DPM must be offline for requested safe DB updates') -@@ -1275,35 +1151,33 @@ if __name__ == '__main__': - - if len(results) > 0: - _log.error("DPM must be offline for requested DB updates %s (headnode %s open ports %s) or you can add --force which doesn't guarantee correct directory/spacetoken size updates", ','.join(fix_names), options.headnode, ','.join([ str(x) for x in sorted(results) ])) -- sys.exit(1) -- -- conn_data = {} -- if options.dbhost == None: -- if options.nsconfig == None: -- _log.error("no database configuration specified, use either nsconfig or db* command line options") -- sys.exit(1) -+ return 1 - -- conn_data = get_conn_data(options.nsconfig) -- -- else: -+ if options.dbhost != None: -+ # try to use database connection data from CLI - if options.dbuser == None or options.dbpwd == None: - _log.error("no database user or password defined as command line options") -- sys.exit(1) -+ return 1 - -- conn_data['host'] = options.dbhost -- conn_data['user'] = options.dbuser -- conn_data['pass'] = options.dbpwd -- conn_data['cns_db'] = options.dbname -- conn_data['dpm_db'] = options.dbdpm -+ DBConn.configure('user', { -+ 'host': options.dbhost, -+ #'port': 0, -+ 'user': options.dbuser, -+ 'pass': options.dbpwd, -+ 'cns_db': options.dbname, -+ 'dpm_db': options.dbdpm, -+ }) - -- _log.info("%s %s for %i levels of the DPM database (host=%s, cns_db=%s, dpm_db=%s)", 'fix' if options.updatedb else 'dry-run', ', '.join(fix_names), options.updatelevels, conn_data['host'], conn_data['cns_db'], conn_data['dpm_db']) -+ _log.info("%s %s for %i levels of the DPM database", 'fix' if options.updatedb else 'dry-run', ', '.join(fix_names), options.updatelevels) - - starttime = datetime.datetime.now() -- if 'zero-dir' in fix_names: fix_zero_dirs(conn_data, options.updatelevels, updatedb=options.updatedb) -- if 'spacetoken' in fix_names: fix_spacetokens_by_path(conn_data, skip=options.skip_spacetokens, updatedb=options.updatedb) -- if 'dir-size-offline' in fix_names: fix_dir_size_offline(conn_data, options.updatelevels, updatedb=options.updatedb) -- if 'dir-size' in fix_names: fix_dir_size(conn_data, options.updatelevels, updatedb=options.updatedb) -- if 'spacetoken-size' in fix_names: fix_spacetokens_size(conn_data, updatedb=options.updatedb) -+ if 'zero-dir' in fix_names: fix_zero_dirs(options.updatelevels, updatedb=options.updatedb) -+ if 'spacetoken' in fix_names: fix_spacetokens_by_path(skip=options.skip_spacetokens, updatedb=options.updatedb) -+ if 'dir-size-offline' in fix_names: fix_dir_size_offline(options.updatelevels, updatedb=options.updatedb) -+ if 'dir-size' in fix_names: fix_dir_size(options.updatelevels, updatedb=options.updatedb) -+ if 'spacetoken-size' in fix_names: fix_spacetokens_size(updatedb=options.updatedb) - endtime = datetime.datetime.now() - - _log.info("done (time: %ss)", (endtime-starttime).seconds) -+ -+ return os.EX_OK -diff --git a/shell/src/dbutils.py b/shell/src/dbutils.py -index bb676acc..92d7de9d 100644 ---- a/shell/src/dbutils.py -+++ b/shell/src/dbutils.py -@@ -2,80 +2,511 @@ from __future__ import absolute_import - from __future__ import print_function - from __future__ import division - -+import os - import sys -+import re -+import threading - import logging -+ -+# compatibility with MySQL modules available on SLC6, CentOS7, CentOS8 - try: -- import pymysql as MySQLdb -+ import pymysql -+ import pymysql.cursors as pymysql_cursors - except ImportError: - try: -- import MySQLdb -+ import MySQLdb as pymysql -+ import MySQLdb.cursors as pymysql_cursors -+ # implement functions missing in MySQLdb -+ if not hasattr(pymysql_cursors.BaseCursor, '__enter__'): -+ def pymysql_cursors__enter__(self): -+ return self -+ pymysql_cursors.BaseCursor.__enter__ = pymysql_cursors__enter__ -+ if not hasattr(pymysql_cursors.BaseCursor, '__exit__'): -+ def pymysql_cursors__exit__(self, *exc_info): -+ del exc_info -+ self.close() -+ pymysql_cursors.BaseCursor.__exit__ = pymysql_cursors__exit__ - except ImportError: -- sys.exit("Could not import MySQLdb, please install the MySQL Python module.") -+ sys.exit("Could not import MySQL module, please install the MySQL Python module (python2-mysql/python3-mysql or MySQL-python).") -+ - - _log = logging.getLogger('dmlite-shell') - - -+ -+class DBConn(object): -+ """Simple class that provides access to the shared -+ or new database connections to cns_db and dpm_db""" -+# _instance = None -+ _conn = {} -+ _mode = None -+ _data = { -+ 'host': 'localhost', -+ 'port': 3306, -+ 'user': 'dpm', -+ 'pass': None, -+ 'cns_db': 'cns_db', -+ 'dpm_db': 'dpm_db', -+ } -+ _connections = [] -+ _lock = threading.Lock() -+ -+ _config_obsolete_cns_filename = '/usr/etc/NSCONFIG' -+ _config_obsolete_dpm_filename = '/usr/etc/DPMCONFIG' -+ _config_legacy_filename = '/etc/dmlite.conf.d/mysql.conf' -+ _config_dome_filename = '/etc/domehead.conf' -+ -+ _config_legacy_map = { -+ 'MySqlHost': 'host', -+ 'MySqlPort': 'port', -+ 'MySqlUsername': 'user', -+ 'MySqlPassword': 'pass', -+ 'NsDatabase': 'cns_db', -+ 'DpmDatabase': 'dpm_db', -+ } -+ _config_dome_map = { -+ 'head.db.host:': 'host', -+ 'head.db.port:': 'port', -+ 'head.db.user:': 'user', -+ 'head.db.password:': 'pass', -+ 'head.db.cnsdbname:': 'cns_db', -+ 'head.db.dpmdbname:': 'dpm_db', -+ } -+ -+ -+ @classmethod -+ def get(cls, db): -+ """return shared connection for given database""" -+ _log.debug("get database connection for %s", db) -+ -+ if not cls._mode: -+ cls.configure() -+ -+ if db not in cls._conn: -+ # no global connection exist, create first connection -+ with cls._lock: -+ if db not in cls._conn: -+ conn = pymysql.connect( -+ host=cls._data['host'], user=cls._data['user'], -+ passwd=cls._data['pass'], db=cls._data[db]) -+ cls._connections.append(conn) -+ cls._conn[db] = conn -+ -+ return cls._conn[db] -+ -+ -+ @classmethod -+ def new(cls, db): -+ """return new connection for given database""" -+ _log.debug("new database connection for %s", db) -+ -+ if not cls._mode: -+ cls.configure() -+ -+ conn = pymysql.connect( -+ host=cls._data['host'], user=cls._data['user'], -+ passwd=cls._data['pass'], db=cls._data[db]) -+ -+ with cls._lock: -+ cls._connections.append(conn) -+ -+ return conn -+ -+ -+ @classmethod -+ def close(cls, conn=None): -+ with cls._lock: -+ if conn: -+ if conn in cls._connections: -+ try: -+ if conn.open: -+ conn.close() -+ cls._connections.remove(conn) -+ except pymysql.Error as e: -+ _log.warn("failed to close database connection %s: %s", str(conn), str(e)) -+ else: -+ _log.debug("unable to find db connection %s to close", str(conn)) -+ else: -+ # close/remove all connections -+ for conn in cls._connections: -+ try: -+ if conn.open: -+ conn.close() -+ cls._connections.remove(conn) -+ except pymysql.Error as e: -+ _log.warn("failed to close database connection %s: %s", str(conn), str(e)) -+ -+ -+ @classmethod -+ def configure(cls, mode='auto', data=None): -+ """Configure parameters for database connection""" -+ if mode not in ['auto', 'dome', 'legacy', 'obsolete', 'user']: -+ raise Exception("unknown database configuration mode '%s'" % mode) -+ -+ with cls._lock: -+ if not cls._mode: -+ _log.debug("configure database in %s mode", mode) -+ else: -+ _log.debug("database connection already configured, skipping %s", mode) -+ -+ if not cls._mode and mode in ['auto', 'user']: -+ if data: -+ cls._data.update(data) -+ cls._mode = 'user' -+ elif mode == 'user': -+ raise Exception("missing connection data parametr") -+ -+ if not cls._mode and mode in ['auto', 'legacy']: -+ if os.path.exists(DBConn._config_legacy_filename) and os.stat(DBConn._config_legacy_filename).st_size > 0: -+ cls._data.update(DBConn._read_config(DBConn._config_legacy_filename, DBConn._config_legacy_map)) -+ elif mode == 'legacy': -+ raise Exception("missing or emply legacy configuration file %s" % DBConn._config_legacy_filename) -+ -+ if not cls._mode and mode in ['auto', 'dome']: -+ if os.path.exists(DBConn._config_dome_filename) and os.stat(DBConn._config_dome_filename).st_size > 0: -+ cls._data.update(DBConn._read_config(DBConn._config_dome_filename, DBConn._config_dome_map)) -+ cls._mode = 'dome' -+ elif mode == 'dome': -+ raise Exception("missing or emply dome configuration file %s" % DBConn._config_dome_filename) -+ -+ if not cls._mode and mode in ['auto', 'obsolete']: -+ if os.path.exists(DBConn._config_obsolete_cns_filename) and os.path.exists(DBConn._config_obsolete_dpm_filename): -+ cls._data.update(DBConn._read_dpm_config(DBConn._config_obsolete_cns_filename, 'cns_db')) -+ cls._data.update(DBConn._read_dpm_config(DBConn._config_obsolete_dpm_filename, 'dpm_db')) -+ cls._mode = 'obsolete' -+ elif mode == 'obsolete': -+ raise Exception("missing or emply obsolete configuration files %s and %s" % (DBConn._config_obsolete_cns_filename, DBConn._config_obsolete_dpm_filename)) -+ -+ if not cls._mode and mode == 'auto': -+ raise Exception("auto database configuration mode did not succeeded") -+ -+ _log.debug("database configured using %s config: %s", cls._mode, ", ".join(["%s=%s" % (k, v) for k, v in cls._data.items() if k != 'pass'])) -+ -+ -+ @staticmethod -+ def _read_config(filename, cfgmap): -+ _log.debug("reading database configuration from %s", filename) -+ -+ ret = {} -+ -+ with open(filename) as f: -+ for line in f.readlines(): -+ for k, v in cfgmap.items(): -+ if line.startswith(k): -+ ret[v] = line[len(k):].strip() -+ -+ if 'port' in ret: -+ ret['port'] = int(ret['port']) -+ if ret['port'] == 0: -+ ret['port'] = 3306 -+ -+ return ret -+ -+ -+ @staticmethod -+ def _read_dpm_config(filename, db): -+ _log.debug("reading database configuration from %s", filename) -+ -+ ret = {} -+ res = None -+ -+ with open(filename) as f: -+ config_line = f.readline().strip() -+ nsre = re.compile(r"(.*)/(.*)@([^/]*)(?:/(.*))?") -+ res = nsre.match(config_line) -+ if res == None: -+ raise Exception("bad line in DPM config '%s', doesn't match re '%s'", filename, nsre) -+ -+ ret['user'] = res.group(1) -+ ret['pass'] = res.group(2) -+ ret['host'] = res.group(3) -+ if res.group(4): -+ ret[db] = res.group(4) -+ -+ return ret -+ -+ -+ -+class CachedFullPath(object): -+ """DPM file metadata stored in database have pointer just -+ to parent directory and to build whole path it is necessary -+ to recursively query all parent directories to the root "/". -+ -+ Althought these queries are pretty cheap they are done for -+ each file and even with small latencies (especially with -+ database on dedicated server) they can take quite a time. -+ -+ This class not only caches past queries, but also limit -+ size of cached data not to exhaust whole memory while -+ dumping big DPM database with a lot of files. -+ """ -+ -+ def __init__(self, conn=None, maxsize=1000000, table='Cns_file_metadata', fileid_only=False): -+ self._cache = {} -+ self._cache_path = {} -+ if conn: -+ self._conn = conn -+ else: -+ self._conn = DBConn.get('cns_db') -+ self._maxsize = maxsize -+ self._table = table -+ self._fileid_only = fileid_only -+ self._ntotal = 0 -+ self._ncached = 0 -+ self._nqueries = 0 -+ self._ncleanup = 0 -+ -+ def __del__(self): -+ if _log: -+ _log.info("path lookup cache usage: total %i, cached %i, queries %i, cleanup %i", self._ntotal, self._ncached, self._nqueries, self._ncleanup) -+ -+ def _fill_cache(self, fileid): -+ """Recursively get full path for given fileid (simple - could be used to validate _fill_cache_multi)""" -+ self._ntotal += 1 -+ -+ if fileid in self._cache: -+ self._ncached += 1 -+ return -+ -+ if len(self._cache) >= self._maxsize: -+ _log.debug("fullpath cache too big (%i), dropping cached records...", len(self._cache)) -+ self._ncleanup += 1 -+ del(self._cache) -+ del(self._cache_path) -+ self._cache = {} -+ self._cache_path = {} -+ -+ sql = "SELECT parent_fileid, name FROM %s WHERE fileid=%%s" % self._table -+ if self._fileid_only: -+ sql = "SELECT parent_fileid FROM %s WHERE fileid=%%s" % self._table -+ -+ cursor = self._conn.cursor() -+ cursor.execute(sql, (fileid, )) -+ res = cursor.fetchone() -+ cursor.close() -+ -+ self._nqueries += 1 -+ -+ if _log.getEffectiveLevel() < logging.DEBUG: -+ _log.debug("query parent directory '%s': %s", sql, res) -+ -+ if res == None: -+ if fileid != 0: -+ _log.info("db inconsistency: could not find path for fileid %i (most likely the entry is orphan)", fileid) -+ else: -+ _log.debug("no parent for top level directory 0") -+ self._cache[fileid] = None -+ return -+ -+ parentid = res[0] -+ name = str(fileid) if self._fileid_only else res[1] -+ -+ if parentid == 0: # top level directory -+ self._cache[fileid] = [fileid] -+ self._cache_path[fileid] = '' -+ return -+ -+ if name.find('/') != -1: # this script doesn't support '/' characters in metadata name -+ raise Exception("fileid {0} contains slash character in its name '{1}'".format(fileid, name)) -+ -+ self._fill_cache(parentid) -+ if parentid not in self._cache or self._cache[parentid] == None: -+ # db inconsistency already detected and logged -+ self._cache[fileid] = None -+ return -+ -+ self._cache[fileid] = self._cache[parentid] + [fileid] -+ if not self._fileid_only: -+ fullpath = "{0}/{1}".format(self._cache_path[parentid], name) -+ self._cache_path[fileid] = fullpath -+ -+ def _fill_cache_multi(self, fileids): -+ """Reduce impact of query latency by resolving paths for multiple -+ fileids. Max number of queries still corresponds to the max path -+ depth, but all fileids are resolved at the same time.""" -+ self._ntotal += len(fileids) -+ -+ if len(self._cache) + len(fileids) >= self._maxsize: -+ _log.debug("fullpath cache too big (%i+%i), dropping cached records...", len(self._cache), len(fileids)) -+ self._ncleanup += 1 -+ del(self._cache) -+ del(self._cache_path) -+ self._cache = {} -+ self._cache_path = {} -+ -+ tores = set() -+ id2name = {} -+ id2parent = {} -+ for fileid in fileids: -+ if fileid in self._cache: -+ if self._cache[fileid] != None: -+ self._ncached += len(self._cache[fileid]) -+ else: -+ self._ncached += 1 -+ else: -+ tores.add(fileid) -+ -+ if len(tores) > 0: -+ cursor = self._conn.cursor() -+ -+ while len(tores) > 0: -+ -+ sql = "SELECT fileid, parent_fileid, name FROM Cns_file_metadata WHERE fileid IN ({0})".format(','.join([ str(x) for x in tores ])) -+ if self._fileid_only: -+ sql = "SELECT fileid, parent_fileid FROM Cns_file_metadata WHERE fileid IN ({0})".format(','.join([ str(x) for x in tores ])) -+ -+ cursor.execute(sql) -+ -+ self._nqueries += 1 -+ tores = set() -+ -+ for row in cursor: -+ fileid = row[0] -+ parentid = row[1] -+ name = str(row[0]) if self._fileid_only else row[2] -+ -+ if _log.getEffectiveLevel() < logging.DEBUG: -+ _log.debug("query parent directory '%s': %s", sql, row) -+ -+ if parentid == 0: -+ name = '' -+ -+ if name.find('/') != -1: # this script doesn't support '/' characters in metadata name -+ raise Exception("fileid {0} contains slash character in its name '{1}'".format(fileid, name)) -+ -+ id2name[fileid] = name -+ id2parent[fileid] = parentid -+ -+ if parentid == 0: -+ pass -+ elif parentid in self._cache: -+ if self._cache[parentid] != None: -+ self._ncached += len(self._cache[parentid]) -+ else: -+ self._ncached += 1 -+ elif parentid not in id2parent: -+ tores.add(parentid) -+ -+ cursor.close() -+ -+ for fileid in fileids: -+ if fileid in self._cache: continue -+ -+ currid = fileid -+ revids = [] -+ -+ while True: -+ if currid in self._cache: -+ if self._cache[currid] != None: -+ self._ncached += len(self._cache[currid]) -+ else: -+ self._ncached += 1 -+ break -+ elif currid in id2parent: -+ if currid in revids: -+ revids.reverse() -+ fullpath = '/'.join([id2name[x] for x in revids]) -+ _log.info("db inconsistency: detected directory loop for fileid %i parent %i %s", fileid, currid, fullpath) -+ for revid in revids: self._cache[revid] = None -+ revids = [] -+ break -+ revids.append(currid) -+ currid = id2parent[currid] -+ if currid == 0: # root directory -+ break -+ else: -+ if fileid != 0: -+ if fileid != currid: -+ revids.reverse() -+ fullpath = '/'.join([id2name[x] for x in revids]) -+ _log.info("db inconsistency: could not find path for fileid %i parent %i (most likely the entry is orphan, path %s)", fileid, currid, fullpath) -+ else: -+ _log.info("db inconsistency: could not find path for fileid %i (most likely the entry is orphan)", fileid) -+ else: -+ _log.debug("no parent for top level directory 0") -+ for revid in revids: self._cache[revid] = None -+ revids = [] -+ break -+ -+ if len(revids) > 0: -+ revids.reverse() -+ for i, revid in enumerate(revids): -+ if currid == 0: -+ self._cache[revid] = revids[:i+1] -+ else: -+ self._cache[revid] = self._cache[currid] + revids[:i+1] -+ if not self._fileid_only: -+ if revid in self._cache_path: continue -+ pathsuffix = '/'.join([id2name[x] for x in revids[:i+1]]) -+ if currid == 0: -+ self._cache_path[revid] = pathsuffix -+ elif currid in self._cache_path: -+ self._cache_path[revid] = "{0}/{1}".format(self._cache_path[currid], pathsuffix) -+ -+ -+ def get_ids(self, fileid): -+ self._fill_cache_multi(fileid) -+ #self._fill_cache_multi([fileid]) -+ -+ return self._cache.get(fileid) -+ -+ -+ def get_path(self, fileid): -+ if self._fileid_only: -+ raise Exception("can't get directory path with {0} instance in fileid_only mode".format(self.__class__.__name__)) -+ -+ self._fill_cache(fileid) -+ #self._fill_cache_multi([fileid]) -+ -+ return self._cache_path.get(fileid) -+ -+ -+ def get_ids_multi(self, fileids): -+ ret = {} -+ -+ self._fill_cache_multi(fileids) -+ -+ for fileid in fileids: -+ if fileid not in self._cache: continue # db inconsistency already reported -+ ret[fileid] = self._cache[fileid] -+ -+ return ret -+ -+ -+ def get_path_multi(self, fileids): -+ if self._fileid_only: -+ raise Exception("can't get directory path with {0} instance in fileid_only mode".format(self.__class__.__name__)) -+ -+ ret = {} -+ -+ self._fill_cache_multi(fileids) -+ -+ for fileid in fileids: -+ if fileid not in self._cache_path: continue # db inconsistency already reported -+ ret[fileid] = self._cache_path[fileid] -+ -+ return ret -+ -+ -+ - class DPMDB(object): -+ """Legacy DPMDB object used by interpreter module""" - -- def __init__(self, interpreter): -- #check where to get the mysql conf from -- username = None -- password = None -- host = None -- port = 0 -- nsDBName = 'cns_db' -- dpmDBName = 'dpm_db' -+ def __init__(self, legacy=False): - self.dirhash = {} - -- catalogImpl = interpreter.catalog.getImplId() -- if 'DomeAdapterHeadCatalog' not in catalogImpl: -- conf = open('/etc/dmlite.conf.d/mysql.conf', 'r') -- for line in conf: -- if line.startswith("MySqlHost"): -- host = line[len("MySqlHost")+1:len(line)].strip() -- if line.startswith("MySqlUsername"): -- username = line[len("MySqlUsername")+1:len(line)].strip() -- if line.startswith("MySqlPassword"): -- password = line[len("MySqlPassword")+1:len(line)].strip() -- if line.startswith("MySqlPort"): -- port = line[len("MySqlPort")+1:len(line)].strip() -- if line.startswith("NsDatabase"): -- nsDBName = line[len("NsDatabase")+1:len(line)].strip() -- if line.startswith("DpmDatabase"): -- dpmDBName = line[len("DpmDatabase")+1:len(line)].strip() -- conf.close() -- else: -- conf = open('/etc/domehead.conf', 'r') -- for line in conf: -- if line.startswith("head.db.host:"): -- host = line[len("head.db.host:"):len(line)].strip() -- if line.startswith("head.db.user:"): -- username = line[len("head.db.user:"):len(line)].strip() -- if line.startswith("head.db.password:"): -- password = line[len("head.db.password:"):len(line)].strip() -- if line.startswith("head.db.port:"): -- port = line[len("head.db.port:"):len(line)].strip() -- if line.startswith("head.db.cnsdbname:"): -- nsDBName = line[len("head.db.cnsdbname:"):len(line)].strip() -- if line.startswith("head.db.dpmdbname:"): -- dpmDBName = line[len("head.db.dpmdbname:"):len(line)].strip() -- conf.close() -- -- if int(port) == 0: -- port = 3306 -- else: -- port = int(port) - try: -- dpmdb = MySQLdb.connect(host=host, user=username, passwd=password, db=dpmDBName, port=port) -- self.dpmdb_c = dpmdb.cursor() -- except MySQLdb.Error as e: -- _log.error("Database connection to %s failed, error %d: %s", dpmDBName, e.args[0], e.args[1]) -- raise e -- try: -- nsdb = MySQLdb.connect(host=host, user=username, passwd=password, db=nsDBName, port=port) -- self.nsdb_c = nsdb.cursor() -- except MySQLdb.Error as e: -- _log.error("Database connection to %s failed, error %d: %s", nsDBName, e.args[0], e.args[1]) -+ if legacy: -+ DBConn.configure('legacy') -+ else: -+ DBConn.configure('dome') -+ -+ # separate connections for dpm_db and cns_db -+ self.dpmdb_c = DBConn.new('dpm_db').cursor() -+ self.nsdb_c = DBConn.new('cns_db').cursor() -+ -+ except pymysql.Error as e: -+ _log.error("database connection failed, error %d: %s", e.args[0], e.args[1]) - raise e - - def getReplicasInServer(self, server): -@@ -89,9 +520,9 @@ class DPMDB(object): - ''' % {"host": server}) - ret = list() - for row in self.nsdb_c.fetchall(): -- ret.append(FileReplica(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])) -+ ret.append(FileReplica(row[0], row[1], row[2], row[3], row[4].decode('utf-8'), row[5], row[6], row[7], row[8], row[9], row[10])) - return ret -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database server '%s' replica query failed, error %d: %s", server, e.args[0], e.args[1]) - raise e - -@@ -106,9 +537,9 @@ class DPMDB(object): - ''' % {"fs": fsname, "host": server}) - ret = list() - for row in self.nsdb_c.fetchall(): -- ret.append(FileReplica(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])) -+ ret.append(FileReplica(row[0], row[1], row[2], row[3], row[4].decode('utf-8'), row[5], row[6], row[7], row[8], row[9], row[10])) - return ret -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database server '%s' filesystem '%s' replica query failed, error %d: %s", server, fsname, e.args[0], e.args[1]) - raise e - -@@ -124,9 +555,9 @@ class DPMDB(object): - ''' % {"fs": fsname, "host": server, "fold": folder}) - ret = list() - for row in self.nsdb_c.fetchall(): -- ret.append(FileReplica(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])) -+ ret.append(FileReplica(row[0], row[1], row[2], row[3], row[4].decode('utf-8'), row[5], row[6], row[7], row[8], row[9], row[10])) - return ret -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database server '%s' filesystem '%s' folder '%s' replica query failed, error %d: %s", server, fsname, folder, e.args[0], e.args[1]) - raise e - -@@ -141,9 +572,9 @@ class DPMDB(object): - ''' % {"poolname": poolname}) - ret = list() - for row in self.nsdb_c.fetchall(): -- ret.append(FileReplica(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])) -+ ret.append(FileReplica(row[0], row[1], row[2], row[3], row[4].decode('utf-8'), row[5], row[6], row[7], row[8], row[9], row[10])) - return ret -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database pool '%s' replica query failed, error %d: %s", poolname, e.args[0], e.args[1]) - raise e - -@@ -159,7 +590,7 @@ class DPMDB(object): - for row in self.nsdb_c.fetchall(): - ret.append(row[0]) - return ret -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database pool from replica '%s' query failed, error %d: %s", sfn, e.args[0], e.args[1]) - raise e - -@@ -175,7 +606,7 @@ class DPMDB(object): - for row in self.dpmdb_c.fetchall(): - ret.append(FileSystem(row[0], row[1], row[2], row[3], row[4])) - return ret -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database query for pool '%s' filesystems failed, error %d: %s", poolname, e.args[0], e.args[1]) - raise e - -@@ -191,7 +622,7 @@ class DPMDB(object): - for row in self.dpmdb_c.fetchall(): - ret.append(FileSystem(row[0], row[1], row[2], row[3], row[4])) - return ret -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database query for server '%s' filesystems failed, error %d: %s", server, e.args[0], e.args[1]) - raise e - -@@ -207,7 +638,7 @@ class DPMDB(object): - for row in self.dpmdb_c.fetchall(): - ret.append(row[0]) - return ret -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database query for pool '%s' servers failed, error %d: %s", pool, e.args[0], e.args[1]) - raise e - -@@ -223,7 +654,7 @@ class DPMDB(object): - for row in self.dpmdb_c.fetchall(): - ret.append(row[0]) - return ret -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database query for pool from server '%s' and filesystem '%s' failed, error %d: %s", server, fsname, e.args[0], e.args[1]) - raise e - -@@ -237,7 +668,7 @@ class DPMDB(object): - ''' % {"gid": gid}) - row = self.nsdb_c.fetchone() - return row[0] -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database query for group by gid '%i' failed, error %d: %s", gid, e.args[0], e.args[1]) - raise e - -@@ -251,7 +682,7 @@ class DPMDB(object): - ''' % {"groupname": groupname}) - row = self.nsdb_c.fetchone() - return row[0] -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database query for gid by group '%s' failed, error %d: %s", groupname, e.args[0], e.args[1]) - raise e - -@@ -272,7 +703,7 @@ class DPMDB(object): - namelist = self.getLFN(parent_fileid) + [str(name)] - ret.append('/'.join(namelist)) - return ret -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database query for file pattern '%s' failed, error %d: %s", pattern, e.args[0], e.args[1]) - raise e - except ValueError as v: -@@ -293,7 +724,7 @@ class DPMDB(object): - parent_fileid, name = row - namelist = self.getLFN(parent_fileid) + [str(name)] - return '/'.join(namelist) -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database query for sfn '%s' failed, error %d: %s", sfn, e.args[0], e.args[1]) - return None - except ValueError as e: -@@ -305,7 +736,7 @@ class DPMDB(object): - try: - namelist = self.getLFN(fileid) - return '/'.join(namelist) -- except MySQLdb.Error as e: -+ except pymysql.Error as e: - _log.error("Database query for fileid '%s' failed, error %d: %s", fileid, e.args[0], e.args[1]) - return None - except ValueError as e: -@@ -317,22 +748,22 @@ class DPMDB(object): - namelist = [] - parent_fileid = fileid - while parent_fileid > 0: -- key = str(parent_fileid) -- if key in self.dirhash: -- name, parent_fileid = self.dirhash[key] -- namelist.append(str(name)) -- else: -+ if parent_fileid not in self.dirhash: - self.nsdb_c.execute('''SELECT parent_fileid, name FROM Cns_file_metadata WHERE fileid = %s''' % parent_fileid) - row = self.nsdb_c.fetchone() - if row == None: - raise ValueError("no parent for %s" % parent_fileid) -- parent_fileid, name = row -- self.dirhash[key] = (name, parent_fileid) -- namelist.append(str(name)) -+ if len(self.dirhash) > 100000: -+ # prevent excessive size of cached entries -+ self.dirhash = {} -+ self.dirhash[parent_fileid] = list(row) -+ parent_fileid, name = self.dirhash[parent_fileid] -+ namelist.append(name) - namelist.reverse() #put entries in "right" order for joining together - return [''] + namelist[1:] #and sfn and print dpns name (minus srm bits) - - -+ - # Get a filesytem information from db - class FileReplica(object): - -@@ -354,6 +785,7 @@ class FileReplica(object): - return "FileReplica(name=" + self.name + ", poolname=" + self.poolname + ", server=" + self.host + ", fsname=" + self.fsname + ", sfn=" + self.sfn + ", size=" + str(self.size) + ", gid=" + str(self.gid) + ", status=" + self.status + ", replicastatus=" + self.replicastatus + ", setname=" + self.setname + ", pinnedtime=" + str(self.pinnedtime) + ")" - - -+ - # Get a filesytem information from db - class FileSystem(object): - -@@ -379,3 +811,39 @@ class FileSystem(object): - - def __repr__(self): - return "FileSystem(poolname=" + self.poolname + ", server=" + self.server + ", name=" + self.name + ", status=" + str(self.status) + ", weight=" + str(self.weight) + ", with " + str(len(self.files)) + " files and " + str(self.avail) + " 1k blocks avail)" -+ -+ -+ -+if __name__ == '__main__': -+ # basic logging configuration -+ streamHandler = logging.StreamHandler(sys.stderr) -+ streamHandler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s](%(module)s:%(lineno)d) %(message)s", "%d %b %H:%M:%S")) -+ _log.addHandler(streamHandler) -+ _log.setLevel(logging.DEBUG) -+ -+ # just simple module tests to make debugging easier -+ _log.debug("start") -+ -+ DBConn.configure('auto') -+ #DBConn.configure('obsolete') -+ #DBConn.configure('legacy') -+ #DBConn.configure('dome') -+ #DBConn.configure('user', {'pass': 'secret'}) -+ -+ _log.debug("test ping with cns_db") -+ DBConn.get('cns_db').ping() -+ _log.debug("test ping with dpm_db") -+ DBConn.get('dpm_db').ping() -+ -+ _log.debug("test select with cns_db") -+ with DBConn.get('cns_db').cursor() as cur: -+ cur.execute("SELECT 1") -+ res = cur.fetchall() -+ _log.debug("database SELECT from cns_db succeeded: %s", res[0][0]) -+ _log.debug("test select with dpm_db") -+ with DBConn.get('dpm_db').cursor() as cur: -+ cur.execute("SELECT 1") -+ res = cur.fetchall() -+ _log.debug("database SELECT from dpm_db succeeded: %s", res[0][0]) -+ -+ _log.debug("end") -diff --git a/shell/src/dmlite-shell b/shell/src/dmlite-shell -index 26d3ec4b..f5db6ff5 100755 ---- a/shell/src/dmlite-shell -+++ b/shell/src/dmlite-shell -@@ -1,4 +1,4 @@ --#! /usr/bin/python2 -+#!/usr/bin/python2 - # dmliteshell.py - """ - This file implements the DMLite shell. -@@ -21,7 +21,7 @@ try: - except NameError: - pass - --__version__ = '1.13.3' -+__version__ = '1.14.0' - - _log = logging.getLogger('dmlite-shell') - -@@ -62,7 +62,7 @@ def main(): - parser.add_argument("--log-backup", dest="logbackup", metavar="COUNT", type=int, default=4, help="number of log backup files, default: %(default)s") - # shell arguments - parser.add_argument("-c", "--config", dest="configfile", metavar="FILENAME", help="define the configuration file before launching the shell, default: %(default)s", default="/etc/dmlite.conf") -- parser.add_argument("-e", "--execute", dest="command", help="execute the given command and exit", default='') -+ parser.add_argument("-e", "--execute", dest="commands", action='append', help="execute the given command (or multiple commands) and exit", default=[]) - parser.add_argument("-s", "--script", dest="scriptfile", metavar="FILENAME", help="execute the given script file line by line and exit", default='') - - options = parser.parse_args() -@@ -109,7 +109,7 @@ def main(): - _log.debug("python: %s", str(sys.version_info)) - - # init interpreter -- quietMode = (options.command) or (options.scriptfile) or (not sys.__stdin__.isatty()) -+ quietMode = (options.commands) or (options.scriptfile) or (not sys.__stdin__.isatty()) - def log_write(msg): - streamHandler.setFormatter(log_format_empty) - _log.warning(msg[:-1]) -@@ -118,8 +118,13 @@ def main(): - if interpreter2.failed: # initialisation failed - return - -- if options.command: -- interpreter2.execute(options.command) -+ if options.commands: -+ for command in options.commands: -+ if interpreter2.failed: -+ break -+ -+ interpreter2.execute(command) -+ - print() - - elif options.scriptfile: -diff --git a/shell/src/dump.py b/shell/src/dump.py -new file mode 100644 -index 00000000..1fb56214 ---- /dev/null -+++ b/shell/src/dump.py -@@ -0,0 +1,1084 @@ -+# Script for extracting file information from dpm database and converting -+# the result into text, json or xml storage dump -+# -+# Erming Pei, 2009/11/13 -+# Tomas Kouba, 2012/11/16 -+# Dennis van Dok, 2015/07/03 -+# Alessandra Forti, 2015/10/14, 2015/11/18 -+# Eygene Ryabinkin, 2016 -+# Georgios Bitzes + Fabrizio Furano, 2016 -+# Petr Vokac, 2018/12/31 -+# -+from __future__ import absolute_import -+from __future__ import print_function -+from __future__ import division -+ -+import os, sys -+import string -+import datetime -+import time -+import collections -+import logging, logging.handlers -+import re -+import stat -+import socket -+import hashlib -+import tempfile -+import optparse -+import getpass -+import inspect -+import atexit -+ -+try: import simplejson as json -+except ImportError: import json -+ -+# compatibility with python 2 and 3 -+try: import urllib.parse as urlparse -+except ImportError: import urlparse -+try: import http.client as httplib -+except ImportError: import httplib -+ -+from .dbutils import DBConn, CachedFullPath -+ -+# compatibility for existing SLC6, CentOS7, CentOS8 packages -+try: -+ import pymysql.cursors as pymysql_cursors -+except ImportError: -+ import MySQLdb.cursors as pymysql_cursors -+ -+__version__ = '0.1.2' -+ -+_log = logging.getLogger('dmlite-shell') -+ -+# We could get column names directy from DB, -+# but DPM schema is stable to just use predefined constants -+# SHOW columns FROM Cns_file_metadata -+METADATA_COLUMNS = [ -+ 'metadata_rowid', 'metadata_fileid', 'metadata_parent_fileid', -+ 'metadata_guid', 'metadata_name', 'metadata_filemode', -+ 'metadata_nlink', 'metadata_owner_uid', 'metadata_gid', -+ 'metadata_filesize', 'metadata_atime', 'metadata_mtime', -+ 'metadata_ctime', 'metadata_fileclass', 'metadata_status', -+ 'metadata_csumtype', 'metadata_csumvalue', 'metadata_acl', -+ 'metadata_xattr', -+] -+# SHOW columns FROM Cns_file_replica -+REPLICA_COLUMNS = [ -+ 'replica_rowid', 'replica_fileid', 'replica_nbaccesses', -+ 'replica_atime', 'replica_ptime', 'replica_status', -+ 'replica_f_type', 'replica_poolname', 'replica_host', -+ 'replica_fs', 'replica_sfn', 'replica_ctime', 'replica_ltime', -+ 'replica_r_type', 'replica_setname', 'replica_xattr', -+] -+# global list of temporary files used by exit handler -+tempfiles = [] -+ -+ -+ -+@atexit.register -+def temp_files_cleanup(): -+ global tempfiles -+ for filename in tempfiles: -+ if not os.path.exists(filename): continue -+ os.unlink(filename) -+ -+ -+ -+class BaseFormatter(object): -+ """ Interface for all formatters """ -+ -+ def __init__(self, fp, base, opts): -+ """ Initializes formatter """ -+ self._fp = fp -+ self._base = base -+ self._opts = opts -+ self._columns = [] -+ -+ def __str__(self): -+ return "{0}[{1}]".format(self.__class__.__name__, self._base) -+ -+ def write(self, data): -+ """ Writes single record """ -+ raise NotImplementedError -+ -+ def finish(self): -+ """ Finalize formatter """ -+ raise NotImplementedError -+ -+ @property -+ def base(self): -+ """ Base path used to construct relative paths {rpath} """ -+ return self._base -+ -+ @property -+ def columns(self): -+ """ Database columns used by specific formatter """ -+ return self._columns -+ -+ -+class PlaintextFormatter(BaseFormatter): -+ -+ def __init__(self, fp, base, opts): -+ super(PlaintextFormatter, self).__init__(fp, base, opts) -+ self._header = opts.get('header') -+ self._footer = opts.get('footer') -+ self._format = opts.get('format', '{rpath}{nl}') -+ -+ self._records = 0 -+ self._sumsize = 0 -+ -+ # parse database columns necessary to format record data -+ self._columns = [] -+ fmt = string.Formatter() -+ for literal_text, field_name, format_spec, conversion in fmt.parse(self._format): -+ if field_name in self._columns: continue -+ if field_name.startswith('metadata_'): -+ self._columns.append(field_name) -+ if field_name.startswith('replica_'): -+ self._columns.append(field_name) -+ -+ if self._header == None: -+ return -+ -+ data = { -+ 'version': __version__, -+ 'sysversion': str(sys.version_info), -+ 'command': " ".join(sys.argv), -+ 'script': os.path.abspath(inspect.getfile(inspect.currentframe())), -+ 'sha256': hashlib.sha256(open(__file__, "rb").read()).hexdigest(), -+ 'user': getpass.getuser(), -+ 'host': socket.gethostname(), -+ 'filename': getattr(self._fp, 'name', ''), -+ 'base': base, -+ 'vo': opts.get('vo', ''), -+ 'curtime': opts.get('curtime'), -+ 'timestamp': int(time.time()), -+ 'datetime': datetime.datetime.now(), -+ 'time': time.ctime(), -+ 'format': self._format, -+ 'nl': '\n', # make it easy to specify newline on command line -+ 'tab': '\t', # make it easy to specify tabulator on command line -+ 'comma': ',', # make it easy to specify ',' on command line -+ 'space': ' ', # make it easy to specify ' ' on command line -+ 'hash': '#', # make it easy to specify '#' on command line -+ 'quot': '"', # make it easy to specify '"' on command line -+ 'apos': "'", # make it easy to specify "'" on command line -+ } -+ -+ #if _log.getEffectiveLevel() < logging.DEBUG: -+ # _log.debug("{0} {1}".format(self._header, str(data))) -+ # _log.debug(self._header.replace('{nl}', ';').format(**data)) -+ -+ self._fp.write(self._header.format(**data)) -+ -+ def write(self, data): -+ self._records += 1 -+ self._sumsize += data.get('metadata_filesize', 0) -+ -+ #if _log.getEffectiveLevel() < logging.DEBUG: -+ # _log.debug("{0} {1}".format(self._format, str(data))) -+ # _log.debug(self._format.replace('{nl}', ';').format(**data)) -+ -+ self._fp.write(self._format.format(**data)) -+ -+ def finish(self): -+ if self._footer == None: -+ return -+ -+ data = { -+ 'timestamp': int(time.time()), -+ 'datetime': datetime.datetime.now(), -+ 'time': time.ctime(), -+ 'records': self._records, -+ 'sumsize': self._sumsize, -+ 'nl': '\n', # make it easy to specify newline on command line -+ 'tab': '\t', # make it easy to specify tabulator on command line -+ 'comma': ',', # make it easy to specify ',' on command line -+ 'space': ' ', # make it easy to specify ' ' on command line -+ 'hash': '#', # make it easy to specify '#' on command line -+ 'quot': '"', # make it easy to specify '"' on command line -+ 'apos': "'", # make it easy to specify "'" on command line -+ } -+ -+ #if _log.getEffectiveLevel() < logging.DEBUG: -+ # _log.debug("{0} {1}".format(self._footer, str(data))) -+ # _log.debug(self._footer.replace('{nl}', ';').format(**data)) -+ -+ self._fp.write(self._footer.format(**data)) -+ -+ -+class XmlFormatter(BaseFormatter): -+ -+ def __init__(self, fp, base, opts): -+ super(XmlFormatter, self).__init__(fp, base, opts) -+ self._columns = ['metadata_filesize', 'metadata_atime', 'metadata_mtime', 'metadata_csumtype', 'metadata_csumvalue', 'metadata_xattr'] -+ self._fp.write('\n') -+ self._fp.write('vo:{1}\n\n'.format(opts.get('curtime'), opts.get('vo', 'none'))) -+ -+ def _escape(self, astr): -+ # avoid dependency on xml.sax.saxutils.escape -+ if type(astr) != str: -+ return astr -+ astr = astr.replace("&", "&") -+ astr = astr.replace("<", "<") -+ astr = astr.replace(">", ">") -+ astr = astr.replace('"', """) -+ astr = astr.replace("'", "'") -+ return astr -+ -+ def write(self, data): -+ rpath = self._escape(data['path'][len(self._base):]) -+ xattr = data.get('metadata_xattr', {}) -+ csumtype = '' -+ checksum = '' -+ if 'checksum_adler32' in xattr: -+ # Cns_file_metadata csumtype & csumvalue columns are deprecated -+ # use adler32 checksum from xattr column if available in json data -+ csumtype = 'AD' -+ checksum = xattr['checksum_adler32'] -+ elif data.get('metadata_csumtype') == 'AD': -+ # don't use other than adler32 checksum, because DPM < 1.10.x -+ # did not provide other algorithm and all tools that use these -+ # dumps expect either adler32 or empty checksum -+ csumtype = 'AD' -+ checksum = data['metadata_csumvalue'] -+ -+ self._fp.write('\n') -+ -+ def finish(self): -+ self._fp.write("\n") -+ self._fp.write('"{0}"\n'.format(datetime.datetime.isoformat(datetime.datetime.now()))) -+ self._fp.write("\n") -+ -+ -+class JsonFormatter(BaseFormatter): -+ -+ def __init__(self, fp, base, opts): -+ super(JsonFormatter, self).__init__(fp, base, opts) -+ self._columns = ['metadata_filesize', 'metadata_atime', 'metadata_mtime', 'metadata_csumtype', 'metadata_csumvalue', 'metadata_xattr'] -+ self._fp.write('{{ "recorded" : "{0}", "for" : "vo:{1}", "entries" : [\n'.format(opts.get('curtime'), opts.get('vo', 'none'))) -+ self.first_line = True -+ -+ def _escape(self, astr): -+ if type(astr) != str: -+ return astr -+ return astr.replace('"', '\\"') -+ -+ def write(self, data): -+ rpath = self._escape(data['path'][len(self._base):]) -+ xattr = data.get('metadata_xattr', {}) -+ csumtype = '' -+ checksum = '' -+ if 'checksum_adler32' in xattr: -+ # Cns_file_metadata csumtype & csumvalue columns are deprecated -+ # use adler32 checksum from xattr column if available in json data -+ csumtype = 'AD' -+ checksum = xattr['checksum_adler32'] -+ elif data.get('metadata_csumtype') == 'AD': -+ # don't use other than adler32 checksum, because DPM < 1.10.x -+ # did not provide other algorithm and all tools that use these -+ # dumps expect either adler32 or empty checksum -+ csumtype = 'AD' -+ checksum = data['metadata_csumvalue'] -+ -+ if self.first_line: -+ self.first_line = False -+ else: -+ self._fp.write(',\n') -+ -+ self._fp.write(' {') -+ self._fp.write(' "name": "{rpath}, "ctype": "{ctype}", "cs": "{cs}"'.format(rpath=rpath, ctype=csumtype, cs=checksum)) -+ self._fp.write(', "size": "{metadata_filesize}", "atime": "{metadata_atime}", "mtime": "{metadata_mtime}"'.format(**data)) -+ for k, v in sorted(xattr.items()): -+ self._fp.write(', "{0}": "{1}"'.format(k, self._escape(v))) -+ self._fp.write(' }') -+ -+ def finish(self): -+ if not self.first_line: -+ self._fp.write('\n') -+ self._fp.write(' ],\n') -+ self._fp.write(' "dump_complete" : "{0}"\n'.format(datetime.datetime.isoformat(datetime.datetime.now()))) -+ self._fp.write('}\n') -+ -+ -+ -+def filterByDisknode(replica_hostfs, replica_table='replica'): -+ where = [] -+ -+ for host, fss in replica_hostfs.items(): -+ if None in fss: -+ where.append('{0}.host = "{1}"'.format(replica_table, host)) -+ else: -+ for fs in fss: -+ where.append('{0}.host = "{1}" AND {0}.fs = "{2}"'.format(replica_table, host, fs)) -+ -+ return "({0})".format(') OR ('.join(where)) -+ -+ -+ -+def dump_data(formatters, filters={}, timestamp=0): -+ _log.debug("dump_data") -+ -+ # mandatory columns -+ cols = ['metadata.fileid', 'metadata.parent_fileid', 'metadata.name', 'metadata.mtime'] -+ ucols = [] # unknown columns -+ AVAILABLE_COLUMNS = METADATA_COLUMNS + REPLICA_COLUMNS if filters.get('filemode') == stat.S_IFREG else METADATA_COLUMNS -+ -+ # union of columns required by all formatters -+ for formatter in formatters: -+ for col in formatter.columns: -+ dbcol = col if col.find('[') == -1 else col[:col.find('[')] -+ if dbcol in AVAILABLE_COLUMNS: -+ dbcol = dbcol.replace('_', '.', 1) -+ if dbcol in cols: continue -+ cols.append(dbcol) -+ else: -+ ucols.append(col) -+ _log.error("%s use uknown column %s (%s)", str(formatter), dbcol, col) -+ if len(ucols) > 0: -+ raise Exception("unknown columns: {0}".format(', '.join(ucols))) -+ -+ has_replica_column = any([col.startswith('replica.') for col in cols]) -+ -+ where = [] -+ if 'filemode' in filters: -+ where.append('(metadata.filemode & {0}) = {0}'.format(filters['filemode'])) -+ if 'replica_pool' in filters: -+ where.append('replica.poolname IN ("{0}")'.format('","'.join(filters['replica_pool']))) -+ if 'replica_spacetoken' in filters: -+ st2uuid = {} -+ conn_dpm = DBConn.get('dpm_db') -+ cursor = conn_dpm.cursor() -+ cursor.execute('SELECT s_token, u_token, poolname, path FROM dpm_space_reserv') -+ while True: -+ row = cursor.fetchone() -+ if row == None: break -+ s_token, u_token, poolname, path = row -+ st2uuid[u_token.lower()] = s_token -+ cursor.close() -+ conn_dpm.close() -+ replica_setnames = [st2uuid.get(x.lower(), x) for x in filters['replica_spacetoken']] -+ where.append('replica.setname IN ("{0}")'.format('","'.join(replica_setnames))) -+ if 'metadata_status' in filters: -+ where.append('metadata.status = "{0}"'.format(filters['metadata_status'])) -+ if 'replica_status' in filters: -+ where.append('replica.status = "{0}"'.format(filters['replica_status'])) -+ if len(filters.get('replica_hostfs', [])) > 0: -+ where.append(filterByDisknode(filters['replica_hostfs'])) -+ if len(filters.get('only_replica_hostfs', [])) > 0: -+ where.append(filterByDisknode(filters['only_replica_hostfs'])) -+ # exclude files that have multiple replicas and one replica -+ # with "Available" status is on diskserver or filesystem -+ # that is not selected by current hostfs filter -+ onlyReplicaDisknodeFilter = filterByDisknode(filters['only_replica_hostfs'], replica_table='r') -+ excludedReplicaIds = 'SELECT r.fileid FROM Cns_file_replica r WHERE r.status = "-" AND NOT ({0})'.format(onlyReplicaDisknodeFilter) -+ #if filter_replica_status: # commented out - we really want only "Available" replicas -+ # excludedReplicaIds = '{0} AND r.status = "{1}"'.format(excludedReplicaIds, filter_replica_status) -+ where.append('replica.fileid NOT IN ({0})'.format(excludedReplicaIds)) -+ -+ colstr = ', '.join(["{0} AS `{1}`".format(x, x.replace('.', '_', 1)) for x in cols]) -+ sql = 'SELECT SQL_BIG_RESULT {0} FROM Cns_file_metadata AS metadata JOIN Cns_file_replica AS replica ON metadata.fileid = replica.fileid'.format(colstr) -+ if filters.get('filemode') != stat.S_IFREG: -+ # for non-file objects it doen't make sense to join with replica table -+ sql = 'SELECT {0} FROM Cns_file_metadata AS metadata'.format(colstr) -+ if len(where) > 0: -+ sql += ' WHERE ({0})'.format(') AND ('.join(where)) -+ if filters.get('filemode') == stat.S_IFREG and not has_replica_column: -+ # don't produce duplicate replica records unless result contains column from replica table -+ sql += ' GROUP BY replica.fileid' -+ -+ # fileid => fullpath -+ pathname = CachedFullPath() -+ -+ cnt_rows = 0 -+ cnt_files = 0 -+ conn = DBConn.new('cns_db') -+ cursor = conn.cursor(pymysql_cursors.SSDictCursor) -+ try: -+ _log.debug("query file metadata with '%s'", sql) -+ cursor.execute(sql) -+ -+ while True: -+ # retreiving data in chunks dramatically increase performance -+ # mostly because of latency associated with each DB query -+ rows = cursor.fetchmany(1000) -+ if len(rows) == 0: break -+ -+ if _log.getEffectiveLevel() < logging.DEBUG: -+ _log.debug("fetched %i rows", len(rows)) -+ -+ fileids = [row['metadata_parent_fileid'] for row in rows] -+ pathnames = pathname.get_path_multi(fileids) -+ -+ for row in rows: -+ cnt_rows += 1 -+ if _log.getEffectiveLevel() < logging.DEBUG: -+ _log.debug("row %i: %s", cnt_rows, str(row)) -+ -+ fileid = row['metadata_fileid'] -+ parent_fileid = row['metadata_parent_fileid'] -+ name = row['metadata_name'] -+ mtime = row['metadata_mtime'] -+ -+ # NOTE: unfortunately mtime & ctime doesn't probably contain -+ # timestamp that you would expect, because at least DPM in legacy -+ # mode update both timestamps in case of checksum recalculation -+ # (done on first SRM download when in legacy mody). That means -+ # [cm]time can be set to the value that is much more recent than -+ # the time when file was uploaded to the DPM - more details in -+ # https://groups.cern.ch/group/dpm-users-forum/Lists/Archive/Flat.aspx?RootFolder=%2Fgroup%2Fdpm-users-forum%2FLists%2FArchive%2FStorage%20dumps%20%20filter%20by%20mtime -+ # In pure DPM DOME configuration (without legacy SRM support) -+ # these timestamps should not be modified unless you modify file -+ # content (e.g. adding checksum should not cause [cm]time update) -+ if timestamp != 0 and mtime > timestamp: -+ continue -+ -+ prefix = pathnames.get(parent_fileid) -+ if prefix is None: -+ _log.error("skipping fileid %i with name '%s', unable to reconstruct path of parent fileid %i", fileid, name, parent_fileid) -+ continue -+ -+ result_enhanced = False -+ metadata_xattr_parsing_required = 'metadata_xattr' in row -+ replica_xattr_parsing_required = 'replica_xattr' in row -+ fullpath = prefix + "/" + name -+ for formatter in formatters: -+ # filter by base path -+ if not fullpath.startswith(formatter.base): -+ continue -+ -+ # parse metadata xattr json value into dictionary -+ if metadata_xattr_parsing_required: -+ metadata_xattr_parsing_required = False -+ -+ xattr = row.get('metadata_xattr') -+ xattr_dict = collections.defaultdict(lambda: '') -+ if xattr not in [None, '', '{}']: -+ try: -+ # replace '.' in dictionary key to be able to easily access -+ # xattr values using string.Template formatting -+ xattr_dict = collections.defaultdict(lambda: '', [(k.replace('.', '_'), v) for k, v in json.loads(xattr).items()]) -+ except Exception as e: -+ _log.error("unable to parse metadata_xattr for fileid %i: %s", fileid, str(xattr)) -+ row['metadata_xattr'] = xattr_dict -+ -+ # parse replica xattr json value into dictionary -+ if replica_xattr_parsing_required: -+ replica_xattr_parsing_required = False -+ -+ xattr = row.get('replica_xattr') -+ xattr_dict = collections.defaultdict(lambda: '') -+ if xattr not in [None, '', '{}']: -+ try: -+ # replace '.' in dictionary key to be able to easily access -+ # xattr values using string.Template formatting -+ xattr_dict = collections.defaultdict(lambda: '', [(k.replace('.', '_'), v) for k, v in json.loads(xattr).items()]) -+ except Exception as e: -+ _log.error("unable to parse replica_xattr for fileid %i: %s", fileid, str(xattr)) -+ row['replica_xattr'] = xattr_dict -+ -+ # add necessary metadata and formatting strings in the 'row' dictionary -+ if not result_enhanced: -+ result_enhanced = True -+ -+ #row['bpath'] = prefix -+ row['path'] = fullpath -+ row['tab'] = '\t' -+ row['nl'] = '\n' -+ row['comma'] = ',' -+ -+ # relative path with respect to formatter base -+ row['rpath'] = fullpath[len(formatter.base):] -+ -+ formatter.write(row) -+ -+ # file info processed at least by on formatter -+ if result_enhanced: -+ cnt_files += 1 -+ -+ if cnt_rows % 1000000 == 0: -+ _log.debug("processed %i records (%i files formatted)", cnt_rows, cnt_files) -+ -+ except Exception as e: -+ # query in progress that use SSDictCursor can be killed only by terminating DB connection -+ # (closing / deleting cursor lead to retreival of all selected entries from DB) -+ conn.close() -+ del(pathname) -+ raise -+ -+ # Close cursor and connections -+ cursor.close() -+ conn.close() -+ del(pathname) -+ -+ _log.info("processed %i records (%i files)", cnt_rows, cnt_files) -+ -+ -+ -+def dump_and_store(oconfig, filters, cert, key, timestamp=0, tmp_dir=None, keep_failed=False): -+ _log.debug("dump_and_store(%s, %s, %s, %s, %s, %s, %s)", oconfig, filters, cert, key, timestamp, tmp_dir, keep_failed) -+ -+ fclazz = { -+ 'xml': XmlFormatter, -+ 'json': JsonFormatter, -+ 'txt': PlaintextFormatter, -+ } -+ -+ formatters = [] -+ outputs = [] -+ for format, path, opts in oconfig: -+ _log.debug("requested dump: format=%s, path=%s, opts=%s", format, path, opts) -+ if path != '' and not path.endswith('/'): -+ path = "{0}/".format(path) -+ ourl = urlparse.urlparse(opts.get('output', 'stdout://')) -+ if ourl.scheme not in ['stdout', 'file', 'davs', 'https', 'root']: -+ _log.warn("skipping output with unsupported protocol scheme: %s", ourl.geturl()) -+ continue -+ -+ # open output file -+ fp = None -+ ofile = None -+ if ourl.scheme == 'stdout': -+ fp = sys.stdout -+ -+ else: -+ if ourl.scheme == 'file': -+ ofile = ourl.path -+ else: -+ bfile = os.path.basename(ourl.path) -+ fp, ofile = tempfile.mkstemp(suffix=".{0}".format(bfile), prefix='dpm-dump.', dir=tmp_dir) -+ # python < 3.3 doesn't support bz2 opened from file object - always use directly filename -+ os.close(fp) -+ tempfiles.append(ofile) -+ -+ if ofile.endswith('.gz'): -+ import gzip -+ fp = gzip.open(ofile, 'wb') -+ elif ofile.endswith('.bz2'): -+ import bz2 -+ fp = bz2.BZ2File(ofile, 'wb') -+ elif ofile.endswith('.lzma'): -+ import lzma -+ fp = lzma.LZMAFile(ofile, "wb") -+ else: -+ fp = open(ofile, 'wb') -+ -+ # allow to write strings (instead of bytes) also in python3 -+ if sys.version_info >= (3, ): -+ import io -+ fp = io.TextIOWrapper(fp) -+ -+ formatter = fclazz[format](fp, path, opts) -+ formatters.append(formatter) -+ outputs.append((formatter, ourl, ofile, fp)) -+ -+ _log.info("dump data") -+ dump_start = time.time() -+ dump_data(formatters, filters=filters, timestamp=timestamp) -+ dump_end = time.time() -+ _log.info("all database entries processed") -+ -+ publish_failed = 0 -+ xroot_uploads = [] -+ for formatter, ourl, ofile, fp in outputs: -+ formatter.finish() -+ -+ if ourl.scheme not in ['stdout']: -+ # close all files (including temporary used later for davs/root upload) -+ fp.close() -+ -+ if ourl.scheme in ['stdout', 'file']: -+ pass -+ -+ elif ourl.scheme in ['davs', 'https']: -+ try: -+ location = ourl.geturl() -+ if ourl.scheme == 'davs': -+ location = ourl._replace(scheme='https').geturl() -+ publish_https(ofile, location, cert, key) -+ except Exception as e: -+ _log.error("unable to upload %s to %s: %s", ofile, ourl.geturl(), str(e)) -+ publish_failed += 1 -+ if keep_failed and ofile in tempfiles: -+ _log.info("keep %s for manual upload (location: %s)", ofile, ourl.geturl()) -+ tempfiles.remove(ofile) -+ -+ elif ourl.scheme in ['root']: -+ # XRootD can deal with parallel uploads -+ xroot_uploads.append((ofile, ourl.geturl())) -+ -+ else: -+ _log.info("unknow output scheme '%s' for %s", ourl.scheme, ourl.geturl()) -+ -+ # XRootD parallel upload -+ if len(xroot_uploads) > 0: -+ try: -+ publish_xrootd(xroot_uploads, cert, key) -+ except Exception as e: -+ _log.error("unable to upload files with xrootd protocol: %s", str(e)) -+ publish_failed += 1 -+ if keep_failed: -+ for ofile, location in xroot_uploads: -+ if ofile in tempfiles: -+ _log.info("keep %s for manual upload (location: %s)", ofile, location) -+ tempfiles.remove(ofile) -+ -+ _log.info("done (dump: %s, upload: %s)", int(dump_end - dump_start), int(time.time() - dump_end)) -+ -+ if publish_failed > 0: -+ raise Exception("failed to publish %i files" % publish_failed) -+ -+ -+ -+def publish_https(filename, location, cert, key): -+ _log.debug("publish DPM dump %s to %s", filename, location) -+ try: -+ import pycurl -+ except ImportError as e: -+ raise Exception("unable to import pycurl module (install python-pycurl package): {0}".format(str(e))) -+ -+ c = pycurl.Curl() -+ if _log.getEffectiveLevel() < logging.DEBUG: -+ c.setopt(pycurl.VERBOSE, True) -+ c.setopt(c.SSLCERT, cert) -+ c.setopt(c.SSLKEY, key) -+ c.setopt(c.SSL_VERIFYPEER, 0) -+ c.setopt(c.SSL_VERIFYHOST, 2) -+ c.setopt(c.FOLLOWLOCATION, 1) -+ -+ # check path exists -+ lurl = urlparse.urlparse(location) -+ burl = lurl._replace(path=os.path.dirname(lurl.path)) -+ c.setopt(c.URL, burl.geturl()) -+ c.setopt(c.NOBODY, True) -+ c.setopt(c.CUSTOMREQUEST, "HEAD") -+ c.perform() -+ if (c.getinfo(c.HTTP_CODE) != 200): -+ raise Exception("path {0} not found".format(burl.geturl())) -+ -+ # delete existing file -+ c.setopt(c.URL, location) -+ c.setopt(c.NOBODY, True) -+ c.setopt(c.CUSTOMREQUEST, "DELETE") -+ c.perform() -+ if (c.getinfo(c.HTTP_CODE) != 204 and c.getinfo(c.HTTP_CODE) != 404): -+ raise Exception("can't delete {0}".format(location)) -+ -+ # put the new file -+ c.setopt(c.CUSTOMREQUEST, "PUT") -+ c.setopt(c.NOBODY, False) -+ # suppress the response body -+ c.setopt(c.WRITEFUNCTION, lambda x: None) -+ c.setopt(pycurl.UPLOAD, 1) -+ c.setopt(pycurl.READFUNCTION, open(filename, 'rb').read) -+ c.setopt(pycurl.INFILESIZE, os.path.getsize(filename)) -+ c.perform() -+ if (c.getinfo(c.HTTP_CODE) == 201): -+ _log.info("uploaded %s", location) -+ else: -+ _log.error("upload %s error: %i", location, c.getinfo(c.HTTP_CODE)) -+ -+ c.close() -+ -+ -+ -+def publish_xrootd(datalist, cert, key): -+ _log.debug("publish %i DPM dumps with xrootd protocol", len(datalist)) -+ -+ # set environment for XRootD transfers -+ # XRD_* env variables must be set befor importing XRootD module -+ if _log.getEffectiveLevel() < logging.DEBUG: -+ os.putenv('XRD_LOGLEVEL', 'Dump') -+ #os.putenv('XRD_LOGFILE', '/tmp/xrootd.debug') -+ os.putenv('XRD_LOGMASK', 'All') -+ os.putenv('XRD_CONNECTIONWINDOW', '10') # by default connection timeouts after 300s -+ #os.putenv('XRD_REQUESTTIMEOUT', '10') # can be set individually for each operation -+ -+ # set X509_* env variable used by XRootD authentication -+ if os.getenv('X509_USER_CERT', cert) != cert: -+ _log.info("overwriting X509_USER_CERT (%s) with %s", os.getenv('X509_USER_CERT'), cert) -+ os.putenv('X509_USER_CERT', cert) -+ if os.getenv('X509_USER_KEY', key) != key: -+ _log.info("overwriting X509_USER_KEY (%s) with %s", os.getenv('X509_USER_KEY'), key) -+ os.putenv('X509_USER_KEY', key) -+ -+ try: -+ import XRootD.client -+ except ImportError as e: -+ raise Exception("unable to import XRootD module (install python2-xrootd or python34-xrootd package): {0}".format(str(e))) -+ -+ process = XRootD.client.CopyProcess() -+ for filename, location in datalist: -+ process.add_job(filename, location, force=True) -+ -+ status = process.prepare() -+ if not status.ok: -+ raise Exception("unable to prepare XRootD transfer ({0})".format(str(status))) -+ -+ status, details = process.run() -+ if not status.ok: -+ succeeded = 0 -+ failed = 0 -+ for i in range(len(datalist)): -+ filename, location = datalist[i] -+ tstatus = None -+ if len(details) > i: -+ tstatus = details[i].get('status') -+ if tstatus == None: -+ failed += 1 -+ _log.error("unknown transfer status from %s to %s", filename, location) -+ else: -+ if not tstatus.ok: -+ failed += 1 -+ _log.error("transfer status from %s to %s: %s", filename, location, str(tstatus)) -+ else: -+ succeeded += 1 -+ _log.debug("transfer succeeded from %s to %s", filename, location) -+ -+ raise Exception("xrootd transfers failed (succeeded: %i, failed: %i)", succeeded, failed) -+ -+ -+ -+#===================================================================== -+# main - legacy interface -+#===================================================================== -+def main(argv): -+ # basic logging configuration -+ streamHandler = logging.StreamHandler(sys.stderr) -+ streamHandler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s](%(module)s:%(lineno)d) %(message)s", "%d %b %H:%M:%S")) -+ _log.addHandler(streamHandler) -+ _log.setLevel(logging.WARN) -+ -+ # parse options from command line -+ def opt_set_loglevel(option, opt, value, parser): -+ loglevel = option.default -+ if value != None: -+ loglevel = int({ -+ 'CRITICAL': logging.CRITICAL, -+ 'DEBUG': logging.DEBUG, -+ 'ERROR': logging.ERROR, -+ 'FATAL': logging.FATAL, -+ 'INFO': logging.INFO, -+ 'NOTSET': logging.NOTSET, -+ 'WARN': logging.WARN, -+ 'WARNING': logging.WARNING, -+ }.get(value, value)) -+ -+ _log.setLevel(loglevel) -+ setattr(parser.values, option.dest, loglevel) -+ -+ class IndentedHelpFormatterWithEpilogExamples(optparse.IndentedHelpFormatter): -+ -+ def format_epilog(self, epilog): -+ import textwrap -+ if not epilog: -+ return "" -+ text_width = self.width - self.current_indent -+ indent = " " * self.current_indent -+ sections = [] -+ if type(epilog) == str: -+ sections.append(textwrap.fill(epilog, text_width, initial_indent=indent, subsequent_indent=indent)) -+ else: -+ example_section = False -+ for epilog_section in epilog: -+ if not epilog_section.startswith('EXAMPLE '): -+ sections.append(textwrap.fill(epilog_section, text_width, initial_indent=indent, subsequent_indent=indent)) -+ sections.append('') -+ example_section = False -+ else: -+ if not example_section: -+ sections.append('Examples:') -+ example_section = True -+ sections.append(" {0}{1}".format(indent, epilog_section[len('EXAMPLE '):].replace("\n", "\n{0}".format(indent)))) -+ return "\n{0}\n".format("\n".join(sections)) -+ -+ # command line arguments -+ usage = "usage: %prog [options]" -+ epilog = [] -+ epilog.append( -+ "WARNING: be aware that \"age\" and \"date\" configuration option doesn't produce expected results for DPM in legacy (SRM) mode. " -+ "File modification time (ctime & mtime) is not handled consistently in DPM implementation, " -+ "e.g. both timestamps are updated on first file download during checksum calculation.") -+ epilog.append( -+ "DUMPPATH dpm_path[,output_path[,key=val]] must contain lfn path that is used as a filter " -+ "to retrieve only files within specific subdirectory in DPM namespace. " -+ "Optionally you can specify output_path that points to the file or http URL " -+ "where to store output data, otherwise everything is printed to the standard output. " -+ "Output file is automatically compressed with gzip, bzip or lzma according " -+ "suffix (.gz, .bz2, .lzma). Each formater can have their own specific " -+ "configuratin options that can be passed as additional key=value arguments.") -+ epilog.append( -+ "HOSTFS diskserver_fqdn[:/filesystem] specifies filter for replica on given diskserver " -+ "and optionally filesystem (e.g. diskserver1.example.com or diskserver1.example.com:/mnt/fs1)") -+ epilog.append( -+ "POOL name of existing DPM pool") -+ epilog.append( -+ "SPACETOKEN name of existing spacetoken or its uuid") -+ epilog.append( -+ "Plain text formatter (txt) can use customized header, footer and per file output format. " -+ "Standard python str.format function is called and fomatting string can use all features provides by this function. " -+ "List of available attributes for header/footer formatting can be found in PlaintextFormatter.__init__, PlaintextFormatter.finish method. " -+ "File record formatting can use 'path', 'rpath' (relative path with respect to formatter base path) " -+ "and all available columns from file metadata and file replica table ({0}). " -+ "Columns 'metadata_xattr' and 'replica_xattr' contains dictionary, " -+ "to access \"xattr\" data you have to use e.g. {{metadata_xattr[checksum_adler32]}} " -+ "(all '.' characters in xattr dictionary keys are replaced with '_').".format(', '.join(METADATA_COLUMNS + REPLICA_COLUMNS))) -+ epilog.append("EXAMPLE # dump everything in stdout in plain text format") -+ epilog.append("EXAMPLE python dpm-dump.py --txt-path=") -+ epilog.append("EXAMPLE # dump everything in compressed file in plain text format") -+ epilog.append("EXAMPLE python dpm-dump.py --txt-path=,/tmp/dump.txt.gz") -+ epilog.append("EXAMPLE # dump everything in compressed file in plain json format") -+ epilog.append("EXAMPLE python dpm-dump.py --json-path=,/tmp/dump.json.bz2,vo=vo_name") -+ epilog.append("EXAMPLE # dump everything in compressed file in plain xml format") -+ epilog.append("EXAMPLE python dpm-dump.py --xml-path=,/tmp/dump.xml.lzma,vo=vo_name") -+ epilog.append("EXAMPLE # dump several directories in different output files in different formats") -+ epilog.append("EXAMPLE python dpm-dump.py --txt-path=/dpm/fqdn/home/atlas,/tmp/dump.atlas.txt --json-path=/dpm/fqdn/home/cms,/tmp/dump.cms.json,vo=cms") -+ epilog.append("EXAMPLE # upload dump output to the storage directory using WebDAV") -+ epilog.append("EXAMPLE python dpm-dump.py --txt-path=/dpm/fqdn/home/atlas,davs://dpmheadnode.fqdn:443/dpm/fqdn/home/atlas/dump.txt") -+ epilog.append("EXAMPLE # upload dump output to the storage directory using XRootD") -+ epilog.append("EXAMPLE python dpm-dump.py --txt-path=/dpm/fqdn/home/atlas,root://dpmheadnode.fqdn:1094//dpm/fqdn/home/atlas/dump.txt") -+ epilog.append("EXAMPLE # plain text output format is configurable (header, line, footer)") -+ epilog.append("EXAMPLE python dpm-dump.py --txt-header='# command: {command}{nl}# ...{nl}' --txt-format='{path}{nl}' --txt-footer='# records: {records}{nl}# ...{nl}# finished OK{nl}' --txt-path=/dpm/fqdn/home,/tmp/dump1.txt") -+ epilog.append("EXAMPLE # each plain text output can also have individualy configured (header, line, footer)") -+ epilog.append("EXAMPLE python dpm-dump.py --txt-path='/dpm/fqdn/home,/tmp/dump2.txt,header=# header{nl},format={path};{metadata_fileid}{tab}{metadata_parent_fileid}{comma}{metadata_mtime}{nl},footer=# footer{nl}'") -+ epilog.append("EXAMPLE # enable logging with given log level to the log file rotated at given size") -+ epilog.append("EXAMPLE python dpm-dump.py --log-level=DEBUG --log-file=/var/log/dpm-dump.log --log-size=1048576 --log-backup=2 --txt-path=") -+ epilog.append("EXAMPLE # dump directories from DPM") -+ epilog.append("EXAMPLE python dpm-dump.py --filter-filemode=directory --txt-path=") -+ epilog.append("EXAMPLE # dump data from DPM pools (mypool1, mypool2)") -+ epilog.append("EXAMPLE python dpm-dump.py --filter-replica-pool=mypool1 --filter-replica-pool=mypool2 --txt-path=") -+ epilog.append("EXAMPLE # dump data from DPM spacetokens (dteam, ops)") -+ epilog.append("EXAMPLE python dpm-dump.py --filter-replica-spacetoken=dteam --filter-replica-spacetoken=ops --txt-path=") -+ epilog.append("EXAMPLE # dump data from one diskserver plus one filesystem on the other diskserver") -+ epilog.append("EXAMPLE python dpm-dump.py --filter-replica-hostfs=dpmpoolxx.fqdn --filter-replica-hostfs=dpmpoolyy.fqdn:/mnt/fs1 --txt-path=") -+ epilog.append("EXAMPLE # dump pfn name from a diskserver plus one filesystem on different diskserver") -+ epilog.append("EXAMPLE # with no replicas elsewhere (to declare temporarily unavailable Rucio files)") -+ epilog.append("EXAMPLE python dpm-dump.py --filter-only-replica-hostfs=dpmpoolxx.fqdn --filter-only-replica-hostfs=dpmpoolyy.fqdn:/mnt/fs1 --txt-format='root://dpmheadnode.fqdn:1094/{path}{nl}' --txt-path=/dpm/fqdn/home/vo,file:///tmp/dump.vo-temporarily-unavailable.txt") -+ epilog.append("EXAMPLE # dump all pfn and replicaid stored only on one diskserver including files") -+ epilog.append("EXAMPLE # with special file/replicas status (useful to cleanup DPM namespace after") -+ epilog.append("EXAMPLE # you e.g. loose completely data on one diskserver)") -+ epilog.append("EXAMPLE python dpm-dump.py --filter-replica-hostfs=dpmpoolxx.fqdn --txt-format=\"dmlite-shell -e 'replicadel {path} {replica_rowid}' # {replica_host}:{replica_fs}{nl}\" --txt-path=,file:///tmp/dump.dpmpoolxx.txt") -+ epilog.append("EXAMPLE ") -+ epilog.append("EXAMPLE # ATLAS storage dumps") -+ epilog.append("EXAMPLE # ===================") -+ epilog.append("EXAMPLE # storage dump for PRAGUELCG2 data and scratch disk") -+ epilog.append("EXAMPLE python dpm-dump.py --log-level=INFO --log-file=/var/log/dpm-dump.log --txt-path=/dpm/farm.particle.cz/home/atlas/atlasdatadisk/rucio/,davs://golias100.farm.particle.cz:443/dpm/farm.particle.cz/home/atlas/atlasdatadisk/dumps/dump_20181228 --txt-path=/dpm/farm.particle.cz/home/atlas/atlasscratchdisk/rucio/,davs://golias100.farm.particle.cz:443/dpm/farm.particle.cz/home/atlas/atlasscratchdisk/dumps/dump_20181228") -+ epilog.append("EXAMPLE # legacy (deprecated) dump for PRAGUELCG2 data disk") -+ epilog.append("EXAMPLE python dpm-dump.py -t /tmp/ATLASDATADISK-dump_20181228 -p /dpm/farm.particle.cz/home/atlas/atlasdatadisk/rucio -a -1") -+ epilog.append("EXAMPLE # temporary unavailable PRAGUELCG2 pfn dump for rucio, one diskserver and one specific filesystem from second diskserver") -+ epilog.append("EXAMPLE python dpm-dump.py --log-level=INFO --log-file=/var/log/dpm-dump.log --txt-path=/dpm/farm.particle.cz/home/atlas --txt-format='root://golias100.farm.particle.cz/{path}{nl}' --filter-only-replica-hostfs=dpmpool1.farm.particle.cz --filter-only-replica-hostfs=dpmpool2.farm.particle.cz:/mnt/fs7") -+ description = "Dumps the content of DPM storage element into a file that can be used for consistency checks (e.g. with Rucio database)." -+ parser = optparse.OptionParser(usage=usage, description=description, version="%prog", epilog=epilog, formatter=IndentedHelpFormatterWithEpilogExamples()) -+ parser.add_option("-v", "--verbose", dest="loglevel", action="callback", callback=opt_set_loglevel, default=logging.DEBUG, help="set log level to DEBUG") -+ parser.add_option("-q", "--quiet", dest="loglevel", action="callback", callback=opt_set_loglevel, default=logging.ERROR, help="set log level to ERROR") -+ parser.add_option("--log-level", dest="loglevel", action="callback", callback=opt_set_loglevel, type="string", help="set log level, default: %default") -+ parser.add_option("--log-file", dest="logfile", metavar="FILE", help="set log file (default: %default)") -+ parser.add_option("--log-size", dest="logsize", type="int", default=10*1024*1024, help="maximum size of log file, default: %default") -+ parser.add_option("--log-backup", dest="logbackup", type="int", default=4, help="number of log backup files, default: %default") -+ # db command line options -+ parser.add_option("-c", "--nsconfig", dest="nsconfig", default=None, help="LEGACY OPTION, NO LONGER IN USE (NSCONFIG file with sql connection info)") -+ parser.add_option('--dbhost', dest='dbhost', default=None, help="database host (overwrite host from NSCONFIG)") -+ parser.add_option('--dbuser', dest='dbuser', default=None, help="database user (overwrite user from NSCONFIG)") -+ parser.add_option('--dbpwd', dest='dbpwd', default=None, help="database password (overwrite password from NSCONFIG)") -+ parser.add_option('--dbname', dest='dbname', default=None, help="database name (overwrite cns database from NSCONFIG)") -+ parser.add_option('--dbdpm', dest='dbdpm', default=None, help="DPM database name overwrite dpm database from NSCONFIG)") -+ # legacy command line options -+ parser.add_option("-x", "--xml", help="create output file in XML format (deprecated).", metavar="XMLFILE") -+ parser.add_option("-j", "--json", help="create output file in JSON format (deprecated).", metavar="JSONFILE") -+ parser.add_option("-t", "--txt", help="create output file in TXT format (deprecated).", metavar="TXTFILE") -+ parser.add_option("-p", "--path", help="dump only files within this DPNS path (deprecated), default: %default", default="/", metavar="PATH") -+ parser.add_option("-a", "--age", help="dump only files older than AGE days (warning), default: %default", metavar="AGE") -+ parser.add_option("-D", "--date", help="dump only files up to the day before date, format YYYYMMDD (warning)", metavar="DATE") -+ parser.add_option("-V", "--vo", help="VO information used by JSON and XML formatters, default: %default", default="none") -+ parser.add_option("-F", "--filter", help="filter dpm diskserver or diskserver filesystem, default: %default", default=None) -+ # new command line options -+ parser.add_option("--xml-path", action="append", help="dump files within this DPNS path in XML format to stdout, local file or upload to DPM", default=[], metavar="DUMPPATH") -+ parser.add_option("--json-path", action="append", help="dump files within this DPNS path in JSON format to stdout, local file or upload to DPM", default=[], metavar="DUMPPATH") -+ parser.add_option("--txt-path", action="append", help="dump files within this DPNS path in TXT format to stdout, local file or upload to DPM", default=[], metavar="DUMPPATH") -+ parser.add_option("--txt-header", help="TXT output file header format, default: %default", default=None, metavar="FORMAT") -+ parser.add_option("--txt-footer", help="TXT output file footer format, default: %default", default=None, metavar="FORMAT") -+ parser.add_option("--txt-format", help="TXT output file line format, default: %default", default="{rpath}{nl}", metavar="FORMAT") -+ parser.add_option("--filter-filemode", help="filter file/symlink/directory type (None, File, Symlink, Directory), default: %default", default="File") -+ parser.add_option("--filter-metadata-status", help="filter file metadata statuses (None, Online, Migrated), default: %default", default="Online") -+ parser.add_option("--filter-replica-status", help="filter file replica statuses (None, Available, BeingPopulated, ToBeDeleted), default: %default", default="Available") -+ parser.add_option("--filter-replica-pool", action="append", help="", default=[], metavar="POOL") -+ parser.add_option("--filter-replica-spacetoken", action="append", help="", default=[], metavar="SPACETOKEN") -+ parser.add_option("--filter-replica-hostfs", action="append", help="retrive data for specific diskserver and optionally filesystem", default=[], metavar="HOSTFS") -+ parser.add_option("--filter-only-replica-hostfs", action="append", help="retrive data for replicas on given diskserver and optionally filesystem with no other replica on other diskservers and/or filesystems", default=[], metavar="HOSTFS") -+ parser.add_option("--cert", help="path to host certificate used for data uploads, default: %default", default="/etc/grid-security/hostcert.pem") -+ parser.add_option("--key", help="path to host key used for data uploads, default: %default", default="/etc/grid-security/hostkey.pem") -+ parser.add_option("--tmp", help="custom directory for temporary dump files", default=None, metavar="PATH") -+ parser.add_option("--keep-failed", action="store_true", help="keep temporary files in case output transfer fails, default: %default", default=False) -+ -+ (options, args) = parser.parse_args(argv[1:]) -+ -+ if options.logfile == '-': -+ _log.removeHandler(streamHandler) -+ streamHandler = logging.StreamHandler(sys.stdout) -+ streamHandler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s](%(module)s:%(lineno)d) %(message)s", "%d %b %H:%M:%S")) -+ _log.addHandler(streamHandler) -+ elif options.logfile != None and options.logfile != '': -+ #fileHandler = logging.handlers.TimedRotatingFileHandler(options.logfile, 'midnight', 1, 4) -+ fileHandler = logging.handlers.RotatingFileHandler(options.logfile, maxBytes=options.logsize, backupCount=options.logbackup) -+ fileHandler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s](%(module)s:%(lineno)d) %(message)s", "%d %b %H:%M:%S")) -+ _log.addHandler(fileHandler) -+ _log.removeHandler(streamHandler) -+ -+ script_name = os.path.abspath(inspect.getfile(inspect.currentframe())) -+ _log.info("command: %s", " ".join(argv)) -+ _log.info("script: %s", script_name) -+ _log.info("version: %s", __version__) -+ _log.info("sha256: %s", hashlib.sha256(open(script_name, "rb").read()).hexdigest()) -+ _log.info("python: %s", str(sys.version_info)) -+ _log.info("user: %s@%s", getpass.getuser(), socket.gethostname()) -+ _log.info("system load: %s", str(os.getloadavg())) -+ -+ # validate command line options -+ timestamp = 0 -+ if options.date != None: -+ timestamp = int(time.mktime(datetime.datetime.strptime(str(options.date), "%Y%m%d").timetuple())) -+ elif options.age != None: -+ now = int(time.time()) -+ age = int(options.age) -+ timestamp = now - 86400 * age -+ -+ if timestamp != 0 and timestamp < int(time.time()): -+ _log.info("filtering older data according ctime or mtime doesn't produce expected results for DPM in legacy (SRM) mode") -+ -+ # database connection data (CLI parameters overwrite config) -+ if not options.dbhost or not options.dbuser or not options.dbpwd or not options.dbname or not options.dbdpm: -+ DBConn.configure('auto') -+ if options.dbhost: DBConn.configure({'host': options.dbhost}) -+ if options.dbuser: DBConn.configure({'user': options.dbuser}) -+ if options.dbpwd: DBConn.configure({'pass': options.dbpwd}) -+ if options.dbname: DBConn.configure({'cns_db': options.dbname}) -+ if options.dbdpm: DBConn.configure({'dpm_db': options.dbdpm}) -+ -+ filters = {} -+ -+ # filters for pool and spacetokens -+ if len(options.filter_replica_pool) > 0: -+ filters['replica_pool'] = options.filter_replica_pool -+ if len(options.filter_replica_spacetoken) > 0: -+ filters['replica_spacetoken'] = options.filter_replica_spacetoken -+ -+ # filters for disknodes and filesystems -+ if options.filter != None: -+ host, fs = list(options.filter.split(':', 1)) if options.filter.find(':') != -1 else (options.filter, None) -+ filters['replica_hostfs'].setdefault(host.lower(), []).append(fs) -+ if len(options.filter_replica_hostfs) > 0: -+ filters['replica_hostfs'] = {} -+ for hostfs in options.filter_replica_hostfs: -+ host, fs = list(hostfs.split(':', 1)) if hostfs.find(':') != -1 else (hostfs, None) -+ filters['replica_hostfs'].setdefault(host.lower(), []).append(fs) -+ if len(options.filter_only_replica_hostfs) > 0: -+ filters['only_replica_hostfs'] = {} -+ for hostfs in options.filter_only_replica_hostfs: -+ host, fs = list(hostfs.split(':', 1)) if hostfs.find(':') != -1 else (hostfs, None) -+ filters['only_replica_hostfs'].setdefault(host.lower(), []).append(fs) -+ -+ # file metadata status filter (pydmlite.FileStatus) -+ if options.filter_metadata_status.lower() == 'none': -+ if 'metadata_status' in filters: # no filtering on metadata file status -+ del(filters['metadata_status']) -+ elif options.filter_metadata_status.lower() in ('o', 'online', '-'): -+ filters['metadata_status'] = '-' # pydmlite.FileStatus.kOnline -+ elif options.filter_metadata_status.lower() in ('m', 'migrated'): -+ filters['metadata_status'] = 'm' # pydmlite.FileStatus.kMigrated -+ else: -+ _log.error("invalid file metadata status filter \"%s\"", options.filter_metadata_status) -+ return 1 -+ -+ # file replica status filter (pydmlite.ReplicaStatus) -+ if options.filter_replica_status.lower() == 'none': -+ if 'replica_status' in filters: # no filtering on replica status -+ del(filters['replica_status']) -+ elif options.filter_replica_status.lower() in ('a', 'available', '-'): -+ filters['replica_status'] = '-' # pydmlite.ReplicaStatus.kAvailable -+ elif options.filter_replica_status.lower() in ('p', 'beingpopulated'): -+ filters['replica_status'] = 'p' # pydmlite.ReplicaStatus.kBeingPopulated -+ elif options.filter_replica_status.lower() in ('d', 'tobedeleted'): -+ filters['replica_status'] = 'd' # pydmlite.ReplicaStatus.kToBeDeleted -+ else: -+ _log.error("invalid file replica status filter \"%s\"", options.filter_replica_status) -+ return 1 -+ -+ # file type fileter (None, File, Symlink, Directory) -+ if options.filter_filemode.lower() == 'none': -+ if 'filemode' in filters: # no filtering on file type -+ del(filters['filemode']) -+ elif options.filter_filemode.lower() == 'file': -+ filters['filemode'] = stat.S_IFREG -+ elif options.filter_filemode.lower() == 'symlink': -+ filters['filemode'] = stat.S_IFLNK -+ elif options.filter_filemode.lower() == 'directory': -+ filters['filemode'] = stat.S_IFDIR -+ else: -+ _log.error("invalid file mode filter \"%s\"", options.filter_filemode) -+ return 1 -+ -+ # Configure requested dumps -+ oconfig = [] -+ curtime = datetime.datetime.isoformat(datetime.datetime.now()) -+ for format in ['xml', 'json', 'txt']: -+ -+ # legacy command line options -+ if getattr(options, format) != None: -+ opts = {'curtime': curtime, 'vo': options.vo} -+ output = getattr(options, format) -+ if output.startswith('/'): -+ opts['output'] = "file://{0}".format(output) -+ else: -+ opts['output'] = "file://{0}".format(os.path.realpath(output)) -+ oconfig.append((format, options.path, opts)) -+ -+ # new command line options -+ for params in getattr(options, "{0}_path".format(format), []): -+ path = None -+ opts = {'curtime': curtime} -+ for pos, param in enumerate(params.split(',')): -+ if pos == 0: path = param -+ elif pos == 1: opts['output'] = param -+ else: -+ # formatter specific key=value options -+ key, val = param.split('=', 1) -+ opts[key] = val -+ -+ if opts.get('output', '') == '': -+ opts['output'] = 'stdout://' -+ elif opts['output'].startswith('/'): # absolute path -+ opts['output'] = 'file://{0}'.format(opts['output']) -+ elif opts['output'].find('://') == -1: # no scheme - use as relative path -+ opts['output'] = "file://{0}".format(os.path.realpath(opts['output'])) -+ else: -+ ourl = urlparse.urlparse(opts['output']) -+ if ourl.scheme not in [ 'stdout', 'file', 'davs', 'https', 'root' ]: -+ _log.warn("skipping output with unsupported protocol scheme: %s", ourl.geturl()) -+ continue -+ -+ oconfig.append((format, path, opts)) -+ -+ for format, path, opts in oconfig: -+ # parse command line options for given output format -+ for k in list(options.__dict__.keys()): -+ prefix = "{0}_".format(format) -+ if not k.startswith(prefix): continue -+ key = k[len(prefix):] -+ if key in opts: continue -+ opts[key] = getattr(options, k) -+ -+ if len(oconfig) == 0: -+ _log.error("no output defined") -+ return 1 -+ -+ try: -+ dump_and_store(oconfig, filters, options.cert, options.key, timestamp=timestamp, tmp_dir=options.tmp, keep_failed=options.keep_failed) -+ except Exception as e: -+ _log.error("dump&store unexpected failure: %s", str(e)) -+ return 1 -+ -+ return os.EX_OK -diff --git a/shell/src/executor.py b/shell/src/executor.py -index de5d5af5..3a01f882 100644 ---- a/shell/src/executor.py -+++ b/shell/src/executor.py -@@ -1,5 +1,19 @@ -+import re - import json --import subprocess -+import ssl -+import subprocess # used by execute_old -+import logging -+ -+# compatibility with python 2 and 3 -+try: -+ from urllib.parse import urlparse -+ from http.client import HTTPConnection, HTTPSConnection -+except ImportError: -+ from urlparse import urlparse -+ from httplib import HTTPConnection, HTTPSConnection -+ -+ -+_log = logging.getLogger('dmlite-shell') - - - class DomeCredentials(object): -@@ -37,7 +51,7 @@ class DomeTalker(object): - self.verb = verb - self.cmd = cmd - -- def execute(self, data): -+ def execute_old(self, data): - cmd = ["davix-http"] - if self.creds.cert: - cmd += ["--cert", self.creds.cert] -@@ -57,55 +71,377 @@ class DomeTalker(object): - - cmd.append(DomeTalker.build_url(self.uri, self.cmd)) - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) -- out = proc.communicate()[0] -- err = False -+ out = proc.communicate()[0].decode('utf-8') - if "(Davix::HttpRequest) Error:" in out: -- err = True -- return (out, err) -+ return (out, 500) -+ return (out, 200) -+ -+ def execute_new(self, params): -+ url = urlparse(DomeTalker.build_url(self.uri, self.cmd)) -+ -+ hdrs = {} -+ #hdrs['Content-Type'] = 'application/json' -+ if self.creds.clientDN: -+ hdrs['remoteclientdn'] = self.creds.clientDN -+ if self.creds.clientAddress: -+ hdrs['remoteclienthost'] = self.creds.clientAddress -+ hdrs['remoteclient'] = 'root' -+ hdrs['remoteclientgroups'] = '0' -+ -+ body = json.dumps(filterDict(params)) -+ -+ data = 'Unexpected error calling DPM DOME' -+ status = 500 -+ -+ redirects = 0 -+ max_redirects = 3 -+ method = self.verb -+ conn = {} -+ -+ try: -+ while True: -+ if redirects >= max_redirects: -+ raise Exception("Too many redirections") -+ -+ scheme = url.scheme -+ host = url.hostname -+ port = url.port if url.port else 1094 -+ path = "%s?%s" % (url.path, url.query) if url.query else url.path -+ -+ if conn.get('scheme') != scheme or conn.get('host') != host or conn.get('port') != port: -+ if conn.get('conn'): -+ conn['conn'].close() -+ -+ if url.scheme == 'https': -+ # configure SSL context -+ context = ssl.create_default_context() -+ if self.creds.cert: -+ context.load_cert_chain(certfile=self.creds.cert, keyfile=self.creds.key) -+ context.verify_mode = ssl.CERT_REQUIRED -+ if self.creds.capath: -+ context.load_verify_locations(capath=self.creds.capath) -+ -+ conn['conn'] = HTTPSConnection(host, port=port, context=context) -+ -+ else: -+ conn['conn'] = HTTPConnection(host, port=port) -+ -+ conn['scheme'] = scheme -+ conn['host'] = host -+ conn['port'] = port -+ -+ # submit HTTPS request -+ _log.debug("New %s %s connection %s:%s%s", url.scheme.upper(), method, host, port, path) -+ conn['conn'].request(method=method, url=path, headers=hdrs, body=body) -+ response = conn['conn'].getresponse() -+ -+ if response.status in [301, 302, 303, 307]: -+ location = response.getheader('Location') -+ if not location: -+ raise Exception("HTTP redirection without Location header") -+ -+ url = urlparse(location) -+ method = 'GET' -+ -+ redirects += 1 -+ continue -+ -+ break -+ -+ data = response.read().decode('utf-8') -+ status = response.status -+ -+ except Exception as e: -+ _log.error("failed to talk with DOME: %s", str(e)) -+ data = str(e) -+ -+ finally: -+ if conn.get('conn'): -+ conn['conn'].close() -+ -+ return (data, status) -+ -+ execute = execute_old - - - class DomeExecutor(object): - """Wrapper around DomeTalker""" - -- def __init__(self, cert, key, capath, clientDN, clientAddress): -+ def __init__(self, url, cert, key, capath, clientDN, clientAddress): - self.creds = DomeCredentials(cert, key, capath, clientDN, clientAddress) -+ self.url = url -+ -+ def _simple_response(self, verb, cmd, params=None): -+ if params is None: params = {} -+ talker = DomeTalker(self.creds, self.url, verb, cmd) -+ data, status = talker.execute(params) -+ if status >= 400: -+ if not data: -+ return (None, "%s request failed" % cmd) -+ return (None, "[HTTP%i] %s" % (status, data)) -+ else: -+ return (data, None) -+ -+ def _json_response(self, verb, cmd, params=None): -+ data, error = self._simple_response(verb, cmd, params) -+ if error: -+ return (None, error) -+ -+ if not data: -+ return ({}, None) -+ -+ try: -+ jdata = json.loads(data) -+ return (jdata, None) -+ except ValueError: -+ #except json.JSONDecodeError as e: # python 3.5 -+ _log.error("Unable to parse %s %s(%s) response: %s", verb, cmd, str(params), str(data)) -+ return (None, "Failed to parse %s response" % cmd) -+ -+ def info(self): -+ data, err = self._simple_response('GET', 'dome_info') -+ if err: -+ return (None, err) -+ -+ ret = {'data': data} -+ for line in data.split('\n'): -+ res = re.match(r'^dome \[(.*?)\] running as (\S+)', line) -+ if res: -+ ret['version'] = res.group(1) -+ ret['flavor'] = res.group(2) -+ res = re.match(r'^Total: (\d+) .*Free: (\d+)', line) -+ if res: -+ ret['space_total'] = int(res.group(1)) -+ ret['space_free'] = int(res.group(2)) -+ res = re.match(r'^Server PID: (\d+)', line) -+ if res: -+ ret['pid'] = int(res.group(1)) -+ res = re.match(r'^Request rate: (.*?)Hz \(Peak: (.*?)Hz\) .*DB queries: (.*?)Hz .*DB transactions: (.*?)Hz .*Intercluster messages: (.*?)Hz', line) -+ if res: -+ ret['request_rate'] = float(res.group(1)) -+ ret['request_rate_peak'] = float(res.group(2)) -+ ret['db_query_rate'] = float(res.group(3)) -+ ret['db_transaction_rate'] = float(res.group(4)) -+ ret['intercluster_rate'] = float(res.group(5)) -+ res = re.match(r'^Queue checksum: (\d+) .*Queue file pull: (\d+)', line) -+ if res: -+ ret['queue_checksum'] = int(res.group(1)) -+ ret['queue_filepull'] = int(res.group(2)) -+ -+ return (ret, None) -+ -+ def addpool(self, pool, defsize=None, stype=None): -+ return self._simple_response('POST', 'dome_addpool', {"poolname": pool, "pool_defsize": defsize, "pool_stype": stype}) -+ -+ def modifypool(self, pool, defsize=None, stype=None): -+ return self._simple_response('POST', 'dome_modifypool', {"poolname": pool, "pool_defsize": defsize, "pool_stype": stype}) -+ -+ def rmpool(self, pool): -+ return self._simple_response('POST', 'dome_rmpool', {"poolname": pool}) -+ -+ def addfstopool(self, server, fs, pool, status): -+ return self._simple_response('POST', 'dome_addfstopool', {"server": server, "fs": fs, "poolname": pool, "status": status}) -+ -+ def modifyfs(self, server, fs, pool, status): -+ return self._simple_response('POST', 'dome_modifyfs', {"server": server, "fs": fs, "poolname": pool, "status": status}) -+ -+ def rmfs(self, server, fs): -+ return self._simple_response('POST', 'dome_rmfs', {"server": server, "fs": fs}) -+ -+ def getspaceinfo(self): -+ return self._json_response('GET', 'dome_getspaceinfo') -+ -+ def statpool(self, pool): -+ return self._json_response('GET', 'dome_statpool', {"poolname": pool}) -+ -+ def getquotatoken(self, lfn, getparentdirs, getsubdirs): -+ return self._json_response('GET', 'dome_getquotatoken', {"path": lfn, "getsubdirs": getsubdirs, "getparentdirs": getparentdirs}) -+ -+ def setquotatoken(self, lfn, pool, space, desc, groups): -+ return self._simple_response('POST', 'dome_setquotatoken', {"path": lfn, "poolname": pool, "quotaspace": space, "description": desc, "groups": groups}) -+ -+ def modquotatoken(self, s_token, lfn, pool, space, desc, groups): -+ return self._simple_response('POST', 'dome_modquotatoken', {"tokenid": s_token, "path": lfn, "poolname": pool, "quotaspace": space, "description": desc, "groups": groups}) -+ -+ def getdirspaces(self, lfn): -+ return self._json_response('GET', 'dome_getdirspaces', {"path": lfn}) -+ -+ def delquotatoken(self, lfn, pool): -+ return self._simple_response('POST', 'dome_delquotatoken', {"path": lfn, "poolname": pool}) -+ -+ def chooseserver(self, lfn): -+ return self._json_response('GET', 'dome_chooseserver', {"lfn": lfn}) -+ -+ def getreplicainfo(self, replicaid=None, rfn=None): -+ return self._json_response('GET', 'dome_getreplicainfo', {"replicaid": replicaid, "rfn": rfn}) -+ -+ def getreplicavec(self, lfn): -+ return self._json_response('GET', 'dome_getreplicavec', {"lfn": lfn}) -+ -+ def updatereplica(self, rfn, replicaid, status, type, setname, xattr): -+ return self._simple_response('POST', 'dome_updatereplica', {"rfn": rfn, "replicaid": replicaid, "status": status, "type": type, "setname": setname, "xattr": xattr}) -+ -+ def accessreplica(self, rfn, mode): -+ return self._simple_response('GET', 'dome_accessreplica', {"rfn": rfn, "mode": mode}) -+ -+ def addreplica(self, rfn, status, type, setname, xattr): -+ return self._simple_response('POST', 'dome_addreplica', {"rfn": rfn, "status": status, "type": type, "setname": setname, "xattr": xattr}) -+ -+ def delreplica(self, server, pfn): -+ return self._simple_response('POST', 'dome_delreplica', {"server": server, "pfn": pfn}) -+ -+ def getstatinfo(self, server=None, pfn=None, rfn=None, lfn=None): -+ return self._json_response('GET', 'dome_getstatinfo', {"server": server, "pfn": pfn, "rfn": rfn, "lfn": lfn}) -+ -+ def statpfn(self, pfn): -+ return self._json_response('GET', 'dome_statpfn', {"pfn": pfn}) -+ -+ def pfnrm(self, pfn): -+ return self._simple_response('POST', 'dome_pfnrm', {"pfn": pfn}) -+ -+ def access(self, lfn, mode): -+ return self._json_response('GET', 'dome_access', {"path": lfn, "mode": mode}) -+ -+ def getdir(self, lfn): -+ return self._json_response('GET', 'dome_getdir', {"path": lfn}) -+ -+ def makedir(self, lfn, mode=0o0755): -+ return self._simple_response('POST', 'dome_makedir', {"path": lfn, "mode": mode}) -+ -+ def removedir(self, lfn): -+ return self._simple_response('POST', 'dome_removedir', {"path": lfn}) -+ -+ def create(self, lfn, mode=0o0644): -+ return self._simple_response('POST', 'dome_create', {"path": lfn, "mode": mode}) -+ -+ def unlink(self, lfn, ignorebrokenfs=False, ignorereadonlyfs=True, ignorefiledeletionerr=False): -+ return self._simple_response('POST', 'dome_unlink', {"lfn": lfn, "ignorebrokenfs": ignorebrokenfs, "ignorereadonlyfs": ignorereadonlyfs, "ignorefiledeletionerr": ignorefiledeletionerr}) -+ -+ def get(self, lfn): -+ return self._json_response('GET', 'dome_get', {"lfn": lfn}) -+ -+ def put(self, lfn, pool=None, server=None, fs=None, additionalreplica=False): -+ return self._json_response('POST', 'dome_put', {"lfn": lfn, "pool": pool, "host": server, "fs": fs, "additionalreplica": "true" if additionalreplica else "false"}) -+ -+ def putdone(self, server, pfn, size=0, checksumtype=None, checksumvalue=None): -+ return self._simple_response('POST', 'dome_putdone', {"server": server, "pfn": pfn, "size": size, "checksumtype": checksumtype, "checksumvalue": checksumvalue}) -+ -+ def chksum(self, checksumtype, lfn, pfn=None, force=False): -+ return self._simple_response('GET', 'dome_chksum', {"checksum-type": checksumtype, "lfn": lfn, "pfn": pfn, "force-recalc": force}) -+ -+ def chksumstatus(self, checksumtype, lfn, pfn, status, checksum, update_lfn=False, reason=None): -+ return self._simple_response('POST', 'dome_chksumstatus', {"checksum-type": checksumtype, "lfn": lfn, "pfn": pfn, "status": status, "checksum": checksum, "update-lfn-checksum": update_lfn, "reason": reason}) -+ -+ def dochksum(self, checksumtype, lfn, pfn): -+ return self._simple_response('POST', 'dome_dochksum', {"checksum-type": checksumtype, "lfn": lfn, "pfn": pfn}) -+ -+ def setchecksum(self, lfn, checksumtype, checksumvalue): -+ return self._simple_response('POST', 'dome_setchecksum', {"lfn": lfn, "checksum-type": checksumtype, "checksum-value": checksumvalue}) -+ -+ def rename(self, oldlfn, newlfn): -+ return self._simple_response('POST', 'dome_rename', {"oldpath": oldlfn, "newpath": newlfn}) -+ -+ def symlink(self, lfn, target): -+ return self._simple_response('POST', 'dome_symlink', {"link": lfn, "target": target}) -+ -+ def readlink(self, lfn): -+ return self._json_response('GET', 'dome_readlink', {"lfn": lfn}) -+ -+ def setowner(self, lfn, uid, gid): -+ return self._simple_response('POST', 'dome_setowner', {"path": lfn, "uid": uid, "gid": gid}) -+ -+ def setmode(self, lfn, mode): -+ return self._simple_response('POST', 'dome_setmode', {"path": lfn, "mode": mode}) -+ -+ def setutime(self, lfn, actime, modtime): -+ return self._simple_response('POST', 'dome_setutime', {"path": lfn, "actime": actime, "modtime": modtime}) -+ -+ def setsize(self, lfn, size): -+ return self._simple_response('POST', 'dome_setsize', {"path": lfn, "size": size}) -+ -+ def setacl(self, lfn, acl): -+ return self._simple_response('POST', 'dome_setacl', {"path": lfn, "acl": acl}) -+ -+ def updatexattr(self, lfn=None, fileid=None, xattr=None): -+ return self._simple_response('POST', 'dome_updatexattr', {"lfn": lfn, "fileid": fileid, "xattr": xattr}) -+ -+ def getcomment(self, lfn=None, fileid=None): -+ return self._json_response('GET', 'dome_getcomment', {"lfn": lfn, "fileid": fileid}) -+ -+ def setcomment(self, lfn=None, fileid=None, comment=None): -+ return self._json_response('POST', 'dome_setcomment', {"lfn": lfn, "fileid": fileid, "comment": comment}) -+ -+ def newuser(self, username): -+ return self._simple_response('POST', 'dome_newuser', {"username": username}) -+ -+ def getuser(self, userid=None, username=None): -+ return self._json_response('GET', 'dome_getuser', {"userid": userid, "username": username}) -+ -+ def getusersvec(self): -+ return self._json_response('GET', 'dome_getusersvec') -+ -+ def updateuser(self, userid=None, username=None, banned=None, xattr=None): -+ return self._simple_response('POST', 'dome_updateuser', {"userid": userid, "username": username, "banned": banned, "xattr": xattr}) -+ -+ def deleteuser(self, username): -+ return self._simple_response('POST', 'dome_deleteuser', {"username": username}) -+ -+ def newgroup(self, groupname): -+ return self._simple_response('POST', 'dome_newgroup', {"groupname": groupname}) -+ -+ def getgroup(self, groupid=None, groupname=None): -+ return self._json_response('GET', 'dome_groupuser', {"groupid": groupid, "groupname": groupname}) -+ -+ def getgroupsvec(self): -+ return self._json_response('GET', 'dome_getgroupsvec') -+ -+ def updategroup(self, groupid=None, groupname=None, banned=None, xattr=None): -+ return self._simple_response('POST', 'dome_updategroup', {"groupid": groupid, "groupname": groupname, "banned": banned, "xattr": xattr}) -+ -+ def deleteroup(self, groupname): -+ return self._simple_response('POST', 'dome_deletegroup', {"groupname": groupname}) -+ -+ def getidmap(self, username, groupnames=None): -+ return self._json_response('GET', 'dome_getidmap', {"username": username, "groupnames": groupnames}) - -- def addFsToPool(self, url, fs, pool, server, status): -- talker = DomeTalker(self.creds, url, "POST", "dome_addfstopool") -- return talker.execute({"fs": fs, "poolname": pool, "server": server, "status": status}) - -- def modifyFs(self, url, fs, pool, server, status): -- talker = DomeTalker(self.creds, url, "POST", "dome_modifyfs") -- return talker.execute({"fs": fs, "poolname": pool, "server": server, "status": status}) - -- def rmFs(self, url, fs, server): -- talker = DomeTalker(self.creds, url, "POST", "dome_rmfs") -- return talker.execute({"fs": fs, "server": server}) -+if __name__ == '__main__': -+ import sys -+ import socket -+ from M2Crypto import X509 - -- def getSpaceInfo(self, url): -- talker = DomeTalker(self.creds, url, "GET", "dome_getspaceinfo") -- return talker.execute({}) -+ # basic logging configuration -+ streamHandler = logging.StreamHandler(sys.stderr) -+ streamHandler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s](%(module)s:%(lineno)d) %(message)s", "%d %b %H:%M:%S")) -+ _log.addHandler(streamHandler) -+ _log.setLevel(logging.DEBUG) - -- def statPool(self, url, pool): -- talker = DomeTalker(self.creds, url, "GET", "dome_statpool") -- return talker.execute({"poolname": pool}) -+ dome_host = socket.getfqdn() -+ dome_port = 1094 -+ #dome_port = 1095 -+ dome_urlprefix = '/domehead' -+ #dome_urlprefix = '/domedisk' -+ dome_cert = '/etc/grid-security/dpmmgr/dpmcert.pem' -+ dome_key = '/etc/grid-security/dpmmgr/dpmkey.pem' -+ dome_capath = '/etc/grid-security/certificates/' - -- def getquotatoken(self, url, lfn, getparentdirs, getsubdirs): -- talker = DomeTalker(self.creds, url, "GET", "dome_getquotatoken") -- return talker.execute({"path": lfn, "getsubdirs": getsubdirs, "getparentdirs": getparentdirs}) -+ dome_x509 = X509.load_cert(dome_cert, X509.FORMAT_PEM) -+ dome_hostDN = '/' + '/'.join(dome_x509.get_subject().as_text().split(', ')) -+ dome_command_base = "https://{0}:{1}{2}".format(dome_host, dome_port, dome_urlprefix) - -- def setquotatoken(self, url, lfn, pool, space, desc, groups): -- talker = DomeTalker(self.creds, url, "POST", "dome_setquotatoken") -- return talker.execute({"path": lfn, "poolname": pool, "quotaspace": space, "description": desc, "groups": groups}) -+ # use new talker execute method that rely on python -+ # http.client instead of calling davix-http in subprocess -+ DomeTalker.execute = DomeTalker.execute_new - -- def modquotatoken(self, url, s_token, lfn, pool, space, desc, groups): -- talker = DomeTalker(self.creds, url, "POST", "dome_modquotatoken") -- return talker.execute({"tokenid": s_token, "path": lfn, "poolname": pool, "quotaspace": space, "description": desc, "groups": groups}) -+ executor = DomeExecutor(dome_command_base, dome_cert, dome_key, dome_capath, dome_hostDN, dome_host) - -- def getdirspaces(self, url, lfn): -- talker = DomeTalker(self.creds, url, "GET", "dome_getdirspaces") -- return talker.execute({"path": lfn}) -+ data, error = executor.info() -+ if error: -+ raise Exception("Unable to get DPM DOME info ({0})".format(error)) -+ print(data) - -- def delquotatoken(self, url, lfn, pool): -- talker = DomeTalker(self.creds, url, "POST", "dome_delquotatoken") -- return talker.execute({"path": lfn, "poolname": pool}) -+ data, error = executor.getspaceinfo() -+ if error: -+ raise Exception("Unable to get DPM DOME storage info ({0})".format(error)) -+ print(data) -diff --git a/shell/src/infoutils.py b/shell/src/infoutils.py -old mode 100755 -new mode 100644 -index 7e00b196..f1cad529 ---- a/shell/src/infoutils.py -+++ b/shell/src/infoutils.py -@@ -2,29 +2,39 @@ from __future__ import division - import json - import re - import subprocess -+import os - import sys - from datetime import datetime - --import ldif -+if sys.version_info[:2] < (2, 7): -+ # python-asn1 dependency for python-ldap3 package -+ # doesn't work on SLC6 with python 2.6 -+ import ldif -+else: -+ import ldap3 - import rpm - --from dmliteshell import executor -+from .executor import DomeExecutor - - - class SystemInfo(object): - """All necessary info on the DPM and the system""" - -- def __init__(self, config=None): -+ def __init__(self, host, cert, key): -+ self.host = host -+ self.cert = cert -+ self.key = key -+ self.capath = '/etc/grid-security/certificates/' - self.ts = rpm.TransactionSet() -- self.config = config - self.ports = set([]) - self.getlisteningports() - - def getlisteningports(self): - """Check which ports are being listened on""" - try: -- pipe_out_err = subprocess.Popen("ss -tln", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() -- for listen in pipe_out_err[0].split('\n'): -+ devnull = os.open(os.devnull, os.O_RDWR) # subprocess.DEVNULL only since python 3.3 -+ pipe_out, _ = subprocess.Popen("ss -tln", shell=True, stdout=subprocess.PIPE, stderr=devnull).communicate() -+ for listen in pipe_out.split('\n'): - m = re.search(r":([0-9]+)\s", listen) - if m != None: - self.ports.add(int(m.group(1))) -@@ -95,14 +105,11 @@ class SystemInfo(object): - def getspaces(self): - """Find all the StorageShares on this DPM - and return an array of them""" -- e = executor.DomeExecutor(self.config.cert, self.config.key, "/etc/grid-security/certificates/", "", "") -- gqt, err = e.getquotatoken("https://" + self.config.host + ":1094/domehead/command", "/", 0, 1) -+ dome_commnad_base = "https://" + self.host + ":1094/domehead" -+ executor = DomeExecutor(dome_commnad_base, self.cert, self.key, self.capath, "", "") -+ jgqt, err = executor.getquotatoken("/", 0, 1) - if err: -- raise Exception("{0}: Error contacting Dome on host {1} ({2})".format(sys.argv[0], self.config.host, str(gqt).strip())) -- try: -- jgqt = json.loads(gqt) -- except Exception as X: -- raise Exception("{0}: Error loading Dome output ({1})".format(sys.argv[0], str(X))) -+ raise Exception("{0}: Error on host {1} ({2})".format(sys.argv[0], self.host, str(err))) - totalcapacity = 0 - totalused = 0 - totalgroups = set([]) -@@ -131,18 +138,42 @@ class Entry(object): - self.name = "" - self.Attributes = {} - self.Attributes["GLUE2EntityCreationTime"] = [datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'] -- self.ldif_writer = ldif.LDIFWriter(sys.stdout, cols=1000000) -+ if sys.version_info[:2] < (2, 7): -+ self.ldif_writer = ldif.LDIFWriter(sys.stdout, cols=1000000) -+ else: -+ self.ldif_writer = ldap3.Connection(None, client_strategy='LDIF') - - def add_child(self, entry): - self.Children.append(entry) - -+ def convert(self, data): -+ ret = {} -+ for k, v in data.items(): -+ ret[k] = list(map(lambda x: x.encode('utf-8'), v)) -+ - def print_out(self, parent=None): - """ Recursively print out with all children""" - if parent == None: - dn = self.name + "=" + self.Attributes[self.name][0] - else: - dn = self.name + "=" + self.Attributes[self.name][0] + "," + parent -- self.ldif_writer.unparse(dn, self.Attributes) -+ bAttributes = {} -+ if sys.version_info[:2] < (2, 7): -+ # python3 LDIFWriter works only with string dictionary keys -+ # and list of bytes as dictionary attribute values -+ for k, v in self.Attributes.items(): -+ bAttributes[k] = list(map(lambda x: x.encode('utf-8'), v)) -+ self.ldif_writer.unparse(dn, bAttributes) -+ else: -+ bClasses = None -+ for k, v in self.Attributes.items(): -+ if k.lower() == 'objectclass': -+ bClasses = list(map(lambda x: x.encode('utf-8'), v)) -+ else: -+ bAttributes[k] = list(map(lambda x: x.encode('utf-8'), v)) -+ sys.stdout.write(self.ldif_writer.add(dn, bClasses, bAttributes)) -+ sys.stdout.write('\n') -+ - for e in self.Children: - e.print_out(dn) - -@@ -157,10 +188,10 @@ class Entry(object): - class Service(Entry): - """A GLUE2Service""" - -- def __init__(self, config): -+ def __init__(self, hostname, sitename): - Entry.__init__(self) - self.name = "GLUE2ServiceID" -- self.Attributes["GLUE2ServiceID"] = [config.host + "/Service"] -+ self.Attributes["GLUE2ServiceID"] = [hostname + "/Service"] - self.Attributes["GLUE2ServiceType"] = ["dpm"] - self.Attributes["GLUE2ServiceQualityLevel"] = ["production"] - self.Attributes["GLUE2ServiceCapability"] = [ -@@ -172,7 +203,7 @@ class Service(Entry): - 'security.authentication', - 'security.authorization', - ] -- self.Attributes["GLUE2ServiceAdminDomainForeignKey"] = [config.sitename] -+ self.Attributes["GLUE2ServiceAdminDomainForeignKey"] = [sitename] - self.Attributes["ObjectClass"] = ["GLUE2Service", "GLUE2StorageService"] - - -diff --git a/shell/src/interpreter.py b/shell/src/interpreter.py -index 64641a02..574185b3 100644 ---- a/shell/src/interpreter.py -+++ b/shell/src/interpreter.py -@@ -11,36 +11,41 @@ import inspect - import sys - import re - import time -+import datetime - import dateutil.parser -+import json - import pycurl - import traceback -+import threading -+import signal -+import socket -+import stat -+import logging -+ -+from io import StringIO -+from M2Crypto import X509 - - try: -- from urllib.parse import unquote -+ from urllib.parse import urlparse, unquote -+ from queue import Queue - except ImportError: -+ from urlparse import urlparse - from urllib import unquote -+ from Queue import Queue -+ - from .dbutils import DPMDB --import threading --try: -- import queue as Queue --except ImportError: -- import Queue --import signal --import socket - from .executor import DomeExecutor --import json --import pprint --try: -- import io as StringIO --except ImportError: -- import StringIO --from M2Crypto import X509 - from . import utils - from . import argus -+from . import dbck -+from . import dump -+from . import lost -+from . import srr -+from . import star - - try: - import dpm2 --except: -+except ImportError: - pass - - IMPATIENCE = 0 -@@ -51,6 +56,7 @@ fsstatusbycode = dict((v, k) for k, v in fsstatus.items()) - activitystatus = {'UNKNOWN': 0, 'ONLINE': 1, 'DOWN': 2} - activitystatusbycode = dict((v, k) for k, v in activitystatus.items()) - -+_log = logging.getLogger('dmlite-shell') - - class DMLiteInterpreter(object): - """ -@@ -64,17 +70,18 @@ class DMLiteInterpreter(object): - self.lastCompleted = 0 - self.lastCompletedState = 0 - #read DN from certificate file -+ self.host = socket.getfqdn() - self.hostcert = "/etc/grid-security/dpmmgr/dpmcert.pem" - self.hostkey = "/etc/grid-security/dpmmgr/dpmkey.pem" - self.capath = "/etc/grid-security/certificates" - try: - x509 = X509.load_cert(self.hostcert, X509.FORMAT_PEM) - except: -- return self.error('Failed to load host certificate') -+ self.error('Failed to load host certificate') - self.hostDN = '/' + '/'.join(x509.get_subject().as_text().split(', ')) - -- self.executor = DomeExecutor(self.hostcert, self.hostkey, self.capath, self.hostDN, socket.getfqdn()) -- self.domeheadurl = "https://" + socket.getfqdn() + ":1094/domehead" -+ self.domeheadurl = "https://" + self.host + ":1094/domehead" -+ self.executor = DomeExecutor(self.domeheadurl, self.hostcert, self.hostkey, self.capath, self.hostDN, self.host) - - # just a placeholder variables that are used by replication - # code to pass data between different objects -@@ -130,7 +137,7 @@ class DMLiteInterpreter(object): - return not self.failed - - def doIndentation(self, msg, firstLine, indentation): -- exp = re.compile('\\n[^\\S\\r\\n]*') # selects all -+ exp = re.compile(r'\n[^\S\r\n]*') # selects all - return exp.sub("\n" + indentation, firstLine + msg.lstrip()) - - def exitShell(self, msg=''): -@@ -152,7 +159,7 @@ class DMLiteInterpreter(object): - coptions = c.completer(text) - self.completionOptions.extend(coptions) - except Exception as e: # look out for errors! -- print(e.__str__()) -+ print(str(e)) - - # return the correct option - try: -@@ -186,7 +193,7 @@ class DMLiteInterpreter(object): - def listDirectory(self, directory, readComments=False): - # list information about files in a directory - try: -- hDir = self.catalog.openDir(directory) -+ hDir = self.catalog.openDir(str(directory)) - except: - return -1 - -@@ -201,19 +208,19 @@ class DMLiteInterpreter(object): - finfo['size'] = 0 - finfo['isDir'] = True - else: -- finfo['prettySize'] = self.prettySize(f.stat.st_size) -+ finfo['prettySize'] = utils.prettySize(f.stat.st_size) - finfo['size'] = f.stat.st_size - finfo['isDir'] = False - finfo['name'] = f.name - finfo['isLnk'] = f.stat.isLnk() - if finfo['isLnk']: -- finfo['link'] = self.catalog.readLink(os.path.join(directory, f.name)) -+ finfo['link'] = self.catalog.readLink(str(os.path.join(directory, f.name))) - else: - finfo['link'] = '' - - if readComments: - try: -- finfo['comment'] = self.catalog.getComment(os.path.join(directory, f.name)) -+ finfo['comment'] = self.catalog.getComment(str(os.path.join(directory, f.name))) - except Exception as e: - finfo['comment'] = '' - -@@ -231,12 +238,12 @@ class DMLiteInterpreter(object): - if f['isDir']: - self.remove_recursive(name) - else: -- self.catalog.unlink(name) -- self.catalog.removeDir(folder) -+ self.catalog.unlink(str(name)) -+ self.catalog.removeDir(str(folder)) - - def list_folder(self, folder): - try: -- hDir = self.catalog.openDir(folder) -+ hDir = self.catalog.openDir(str(folder)) - except: - self.error("cannot open the folder: " + folder) - return [] -@@ -327,7 +334,7 @@ class ShellCommand(object): - - def moreHelp(self): - """Returns the syntax description of the command and a help text.""" -- return self.syntax() + '\n' + self.interpreter.doIndentation(self.description, ' ', ' ') -+ return self.syntax() + '\n ' + self.description.replace('\n', '\n ') - - def prettyParameter(self, parameter): - """Return the human readable format of a parameter""" -@@ -342,35 +349,39 @@ class ShellCommand(object): - return '/'.join(p for p in parameter) - return '<' + parameter[1:] + '>' - -- def prettySize(self, size=0): -- return self.interpreter.prettySize(size) -- - def checkSyntax(self, given): -- if len(given) > len(self.parameters): -+ params = self.parameters[:] -+ if len(params) > 0 and params[-1] == '*?...': -+ params.pop() -+ elif len(given) > len(params): - return self.syntaxError() -- for i in range(len(self.parameters)): -- if self.parameters[i].startswith('*'): -- ptype = self.parameters[i][1:2] -+ -+ for i, param in enumerate(params): -+ if param.startswith('*'): -+ ptype = param[1:2] - if i > len(given) - 1: - continue - elif i > len(given) - 1: - return self.syntaxError() - else: -- ptype = self.parameters[i][0:1] -+ ptype = param[0:1] - - # check for type and check if correct type - if ptype == 'C': - if given[i] not in list(c.name for c in self.interpreter.commands): - return self.syntaxError('Unknown command "' + given[i] + '".') -+ - elif ptype == 'F': - if not os.path.exists(given[i]): - return self.syntaxError('File "' + given[i] + '" does not exist.') -+ - elif ptype == 'D': - # check if file exists in DMLite - try: -- f = self.interpreter.catalog.extendedStat(given[i], False) -+ f = self.interpreter.catalog.extendedStat(str(given[i]), False) - except Exception as e: - return self.syntaxError('File "' + given[i] + '" does not exist.') -+ - elif ptype == 't' or ptype == 'T': - # check if a valid date/time is given - try: -@@ -381,6 +392,7 @@ class ShellCommand(object): - time.mktime(given[i]) - except Exception as e: - return self.syntaxError('Date/time expression expected.') -+ - elif ptype in ['G', 'g']: - # check if a valid group name or group ID is given - # lower case letter match also special character * -@@ -400,6 +412,7 @@ class ShellCommand(object): - return self.syntaxError('Group name or group ID expected.') - except Exception as e: - pass -+ - elif ptype in ['U', 'u']: - # check if a valid user name or user ID is given - # lower case letter match also special character * -@@ -419,9 +432,10 @@ class ShellCommand(object): - return self.syntaxError('User name or user ID expected.') - except Exception as e: - pass -+ - elif ptype == 'O': - # list of possible options -- pOptions = self.parameters[i].split(':')[1:] -+ pOptions = param.split(':')[1:] - if given[i] not in pOptions: - return self.syntaxError('Expected one of the following options: ' + ', '.join(pOptions)) - -@@ -439,6 +453,9 @@ class ShellCommand(object): - # check syntax first - if not self.checkSyntax(given): # syntax error occcurred! - return -+ -+ _log.debug("Execute %s%s", self.__class__.__name__, given) -+ - return self._execute(given) - - def completer(self, start): -@@ -604,7 +621,7 @@ class InitCommand(ShellCommand): - self.interpreter.pluginManager = pydmlite.PluginManager() - self.interpreter.pluginManager.loadConfiguration(self.interpreter.configurationFile) - except Exception as e: -- return self.error('Could not initialise PluginManager with file "' + self.interpreter.configurationFile + '".\n' + e.__str__()) -+ return self.error('Could not initialise PluginManager with file "' + self.interpreter.configurationFile + '".\n' + str(e)) - - try: - self.interpreter.securityContext = pydmlite.SecurityContext() -@@ -616,38 +633,34 @@ class InitCommand(ShellCommand): - self.interpreter.securityContext.groups.append(group) - self.interpreter.securityContext.credentials.remoteAddress = socket.getfqdn() - except Exception as e: -- return self.error('Could not initialise root SecurityContext.\n' + e.__str__()) -+ return self.error('Could not initialise root SecurityContext.\n' + str(e)) - - try: - self.interpreter.stackInstance = pydmlite.StackInstance(self.interpreter.pluginManager) - self.interpreter.stackInstance.manager = self.interpreter.pluginManager - self.interpreter.stackInstance.setSecurityContext(self.interpreter.securityContext) - except Exception as e: -- return self.error('Could not initialise a StackInstance.\n' + e.__str__()) -+ return self.error('Could not initialise a StackInstance.\n' + str(e)) - - try: - self.interpreter.catalog = self.interpreter.stackInstance.getCatalog() -- self.interpreter.catalog.changeDir('') -+ self.interpreter.catalog.changeDir(str('')) - except Exception as e: -- return self.error('Could not initialise the file catalog.\n' + e.__str__()) -+ return self.error('Could not initialise the file catalog.\n' + str(e)) - - try: - self.interpreter.authn = self.interpreter.stackInstance.getAuthn() - except Exception as e: - self.interpreter.authn = None - if not self.interpreter.quietMode: -- self.ok( -- '\nWARNING: Could not initialise the authentication interface. The functions like userinfo, groupinfo, entergrpmap, rmusrmap, ... will not work.\n' -- + e.__str__() + '\n') -+ self.ok('\nWARNING: Could not initialise the authentication interface. The functions like userinfo, groupinfo, entergrpmap, rmusrmap, ... will not work.\n' + str(e) + '\n') - - try: - self.interpreter.poolManager = self.interpreter.stackInstance.getPoolManager() - except Exception as e: - self.interpreter.poolManager = None - if not self.interpreter.quietMode: -- self.ok( -- '\nWARNING: Could not initialise the pool manager. The functions like pools, modifypool, addpool, qryconf, ... will not work.\n' + -- e.__str__() + '\n') -+ self.ok('\nWARNING: Could not initialise the pool manager. The functions like pools, modifypool, addpool, qryconf, ... will not work.\n' + str(e) + '\n') - - if not self.interpreter.quietMode: - self.ok('Using configuration "' + configFile + '" as root.') -@@ -701,7 +714,7 @@ class GetImplIdCommand(ShellCommand): - self.ok('Implementation ID of Authentification interface:\t' + self.interpreter.authn.getImplId()) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class ExitCommand(ShellCommand): -@@ -721,7 +734,7 @@ class PwdCommand(ShellCommand): - try: - return self.ok(self.interpreter.catalog.getWorkingDir()) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class CdCommand(ShellCommand): -@@ -738,22 +751,22 @@ class CdCommand(ShellCommand): - else: - path = os.path.normpath(given[0]) - -- f = self.interpreter.catalog.extendedStat(path, True) -+ f = self.interpreter.catalog.extendedStat(str(path), True) - if f.stat.isLnk(): - visited = [path] - while f.stat.isLnk(): -- link = self.interpreter.catalog.readLink(path) -+ link = self.interpreter.catalog.readLink(str(path)) - if link in visited: - return self.error('Circular link is not a Directory:\nParameter(s): ' + ', '.join(given)) - visited.append(link) -- f = self.interpreter.catalog.extendedStat(link, True) -+ f = self.interpreter.catalog.extendedStat(str(link), True) - if f.stat.isDir(): -- self.interpreter.catalog.changeDir(path) -+ self.interpreter.catalog.changeDir(str(path)) - else: - return self.error('The given path is not a Directory:\nParameter(s): ' + ', '.join(given)) - - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - return self.ok() - - -@@ -791,9 +804,9 @@ class MkDirCommand(ShellCommand): - def _execute(self, given): - try: - directory = given[0] -- self.interpreter.catalog.makeDir(directory, 0o777) -+ self.interpreter.catalog.makeDir(str(directory), 0o777) - except Exception as e: -- msg = e.__str__() -+ msg = str(e) - code = msg[msg.find('#') + 1:msg.find(']')] - code = int(float(code) * 1000000) - # Parent missing -@@ -804,17 +817,17 @@ class MkDirCommand(ShellCommand): - while (directory != '' and directory != '/'): - listDir.append(directory) - directory = os.path.dirname(directory) -- for dir in reversed(listDir): -+ for directory in reversed(listDir): - try: -- self.interpreter.catalog.makeDir(dir, 0o777) -+ self.interpreter.catalog.makeDir(str(directory), 0o777) - except: - pass - return self.ok() - else: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - # Directory already existing - elif code == 17: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - return self.ok() - - -@@ -830,15 +843,15 @@ class UnlinkCommand(ShellCommand): - filename = given[0] - if not filename.startswith('/'): - filename = os.path.normpath(os.path.join(self.interpreter.catalog.getWorkingDir(), filename)) -- replicas = self.interpreter.catalog.getReplicas(filename) -+ replicas = self.interpreter.catalog.getReplicas(str(filename)) - except: - replicas = [] - if replicas and not (len(given) > 1 and given[1].lower() in ['f', '-f', 'force', '--force']): - return self.error("You can't unlink a file which still has replicas. To do it, use the force flag (or just f).") - else: -- self.interpreter.catalog.unlink(given[0]) -+ self.interpreter.catalog.unlink(str(given[0])) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - return self.ok() - - -@@ -854,22 +867,22 @@ class RmDirCommand(ShellCommand): - dirname = os.path.normpath(os.path.join(self.interpreter.catalog.getWorkingDir(), dirname)) - if not (len(given) > 1 and given[1].lower() in ['-r', '--recursive']): - try: -- self.interpreter.catalog.removeDir(dirname) -+ self.interpreter.catalog.removeDir(str(dirname)) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - else: - #recursive mode - try: -- f = self.interpreter.catalog.extendedStat(dirname, True) -+ f = self.interpreter.catalog.extendedStat(str(dirname), True) - if f.stat.isDir(): - self.interpreter.remove_recursive(dirname) - return self.ok() - else: - self.error('The given parameter is not a folder: Parameter(s): ' + ', '.join(given)) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class MvCommand(ShellCommand): -@@ -880,9 +893,9 @@ class MvCommand(ShellCommand): - - def _execute(self, given): - try: -- self.interpreter.catalog.rename(given[0], given[1]) -+ self.interpreter.catalog.rename(str(given[0]), given[1]) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - return self.ok() - - -@@ -896,7 +909,7 @@ class DuCommand(ShellCommand): - # if no parameters given, list current directory - given.append(self.interpreter.catalog.getWorkingDir()) - -- f = self.interpreter.catalog.extendedStat(given[0], True) -+ f = self.interpreter.catalog.extendedStat(str(given[0]), True) - if f.stat.isDir(): - return self.ok(str(self.folderSize(given[0])) + 'B') - else: -@@ -923,9 +936,9 @@ class LnCommand(ShellCommand): - - def _execute(self, given): - try: -- self.interpreter.catalog.symlink(given[0], given[1]) -+ self.interpreter.catalog.symlink(str(given[0]), str(given[1])) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - return self.ok() - - -@@ -937,9 +950,9 @@ class ReadLinkCommand(ShellCommand): - - def _execute(self, given): - try: -- return self.ok(self.interpreter.catalog.readLink(given[0])) -+ return self.ok(self.interpreter.catalog.readLink(str(given[0]))) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class CommentCommand(ShellCommand): -@@ -952,13 +965,13 @@ class CommentCommand(ShellCommand): - def _execute(self, given): - if len(given) == 2: - try: -- self.interpreter.catalog.setComment(given[0], given[1]) -+ self.interpreter.catalog.setComment(str(given[0]), str(given[1])) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - return self.ok() - else: - try: -- return self.ok(self.interpreter.catalog.getComment(given[0])) -+ return self.ok(self.interpreter.catalog.getComment(str(given[0]))) - except Exception as e: - return self.ok(' ') # no comment - -@@ -977,14 +990,14 @@ class InfoCommand(ShellCommand): - self.ok(filename) - self.ok('-' * len(filename)) - -- f = self.interpreter.catalog.extendedStat(filename, False) -+ f = self.interpreter.catalog.extendedStat(str(filename), False) - if f.stat.isDir(): - self.ok('File type: Folder') - elif f.stat.isReg(): - self.ok('File type: Regular file') - elif f.stat.isLnk(): - self.ok('File type: Symlink') -- self.ok(' -> ' + self.interpreter.catalog.readLink(filename)) -+ self.ok(' -> ' + self.interpreter.catalog.readLink(str(filename))) - else: - self.ok('File type: Unknown') - -@@ -999,7 +1012,7 @@ class InfoCommand(ShellCommand): - self.ok('Status: Unknown (' + str(f.status) + ')') - - try: -- comment = self.interpreter.catalog.getComment(filename) -+ comment = self.interpreter.catalog.getComment(str(filename)) - self.ok('Comment: ' + comment) - except: - self.ok('Comment: None') -@@ -1039,7 +1052,7 @@ class InfoCommand(ShellCommand): - self.ok('CTime: ' + time.ctime(f.stat.getCTime())) - - try: -- replicas = self.interpreter.catalog.getReplicas(filename) -+ replicas = self.interpreter.catalog.getReplicas(str(filename)) - except: - replicas = [] - -@@ -1065,7 +1078,7 @@ class InfoCommand(ShellCommand): - return self.ok(' ') - - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class CreateCommand(ShellCommand): -@@ -1082,9 +1095,9 @@ class CreateCommand(ShellCommand): - return self.syntaxError('Expected: Octal mode number') - - try: -- self.interpreter.catalog.create(given[0], mode) -+ self.interpreter.catalog.create(str(given[0]), mode) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - return self.ok() - - -@@ -1101,9 +1114,9 @@ class ChModCommand(ShellCommand): - return self.syntaxError('Expected: Octal mode number') - - try: -- self.interpreter.catalog.setMode(given[0], mode) -+ self.interpreter.catalog.setMode(str(given[0]), mode) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - return self.ok() - - -@@ -1119,7 +1132,7 @@ class ChOwnCommand(ShellCommand): - - #check if the user is a gid - try: -- f = self.interpreter.catalog.extendedStat(given[0], False) -+ f = self.interpreter.catalog.extendedStat(str(given[0]), False) - gid = f.stat.st_gid - isusername = False - #try first username -@@ -1134,14 +1147,14 @@ class ChOwnCommand(ShellCommand): - _uid = pydmlite.boost_any() - _uid.setUnsigned(uid) - self.interpreter.authn.getUser('uid', _uid) -+ except ValueError as e: -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -- except ValueError as e1: -- return self.error(e1.__str__() + '\nParameter(s): ' + ', '.join(given)) -- self.interpreter.catalog.setOwner(given[0], uid, gid, False) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) -+ self.interpreter.catalog.setOwner(str(given[0]), uid, gid, False) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class ChGrpCommand(ShellCommand): -@@ -1155,7 +1168,7 @@ class ChGrpCommand(ShellCommand): - return self.error('There is no Authentification interface.') - - try: -- f = self.interpreter.catalog.extendedStat(given[0], False) -+ f = self.interpreter.catalog.extendedStat(str(given[0]), False) - uid = f.stat.st_uid - isgroupname = False - try: -@@ -1169,14 +1182,14 @@ class ChGrpCommand(ShellCommand): - _gid = pydmlite.boost_any() - _gid.setUnsigned(gid) - self.interpreter.authn.getGroup('gid', _gid) -+ except ValueError as e: -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -- except ValueError as e1: -- return self.error(e1.__str__() + '\nParameter(s): ' + ', '.join(given)) -- self.interpreter.catalog.setOwner(given[0], uid, gid, False) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) -+ self.interpreter.catalog.setOwner(str(given[0]), uid, gid, False) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class GetChecksumCommand(ShellCommand): -@@ -1199,10 +1212,10 @@ class GetChecksumCommand(ShellCommand): - - try: - csumvalue = pydmlite.StringWrapper() -- self.interpreter.catalog.getChecksum(given[0], given[1], csumvalue, pfn, forcerecalc, 0) -+ self.interpreter.catalog.getChecksum(str(given[0]), str(given[1]), csumvalue, str(pfn), forcerecalc, 0) - return self.ok(str(given[1]) + ': ' + str(csumvalue.s)) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class ChecksumCommand(ShellCommand): -@@ -1214,17 +1227,17 @@ class ChecksumCommand(ShellCommand): - def _execute(self, given): - if len(given) == 1: - try: -- f = self.interpreter.catalog.extendedStat(given[0], False) -+ f = self.interpreter.catalog.extendedStat(str(given[0]), False) - return self.ok(str(f.csumtype) + ': ' + str(f.csumvalue)) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - else: - given.append('') - try: -- self.interpreter.catalog.setChecksum(given[0], given[2], given[1]) -+ self.interpreter.catalog.setChecksum(str(given[0]), str(given[2]), str(given[1])) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class UtimeCommand(ShellCommand): -@@ -1240,10 +1253,10 @@ class UtimeCommand(ShellCommand): - tb.actime = int(time.mktime(given[1])) - tb.modtime = int(time.mktime(given[2])) - try: -- self.interpreter.catalog.utime(given[0], tb) -+ self.interpreter.catalog.utime(str(given[0]), tb) - return self.ok() - except Exception as e: -- return self.error(e.__str__()) -+ return self.error(str(e)) - - - class ACLCommand(ShellCommand): -@@ -1279,7 +1292,7 @@ Default ACLs can be also set by specifying default: or d: in front of the ACL ex - self.parameters = ['Dfile', '*?ACL', '*Ocommand=modify:set:modify:delete', '*?-r'] - - def getACL(self, interpreter, file): -- f = interpreter.catalog.extendedStat(file, False) -+ f = interpreter.catalog.extendedStat(str(file), False) - if not f.acl: - return ['No ACL'] - output = [] -@@ -1350,7 +1363,7 @@ Default ACLs can be also set by specifying default: or d: in front of the ACL ex - return output - - def setACL(self, given, path): -- f = self.interpreter.catalog.extendedStat(path, True) -+ f = self.interpreter.catalog.extendedStat(str(path), True) - list_acl = f.acl.serialize().split(',') - try: - command = given[2] -@@ -1448,7 +1461,7 @@ Default ACLs can be also set by specifying default: or d: in front of the ACL ex - list_acl.sort() - myacl = pydmlite.Acl(','.join(list_acl)) - try: -- self.interpreter.catalog.setAcl(path, myacl) -+ self.interpreter.catalog.setAcl(str(path), myacl) - except: - self.error("Error while setting/modifying ACLs for path: " + path) - raise -@@ -1495,7 +1508,7 @@ Default ACLs can be also set by specifying default: or d: in front of the ACL ex - def _execute(self, given): - try: - filename = given[0] -- f = self.interpreter.catalog.extendedStat(filename, True) -+ f = self.interpreter.catalog.extendedStat(str(filename), True) - if '-r' in given and len(given) > 3: - #recursive mode - if f.stat.isDir(): -@@ -1512,21 +1525,7 @@ Default ACLs can be also set by specifying default: or d: in front of the ACL ex - return self.ok("\n".join(self.getACL(self.interpreter, filename))) - - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -- -- --class SetGuidCommand(ShellCommand): -- """Set the GUID of a file.""" -- -- def _init(self): -- self.parameters = ['Dfile', '?GUID'] -- -- def _execute(self, given): -- try: -- self.interpreter.catalog.setGuid(given[0], given[1]) -- return self.ok() -- except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class ReplicaAddCommand(ShellCommand): -@@ -1553,9 +1552,9 @@ class ReplicaAddCommand(ShellCommand): - return self.syntaxError('This is not a valid replica type.') - - try: -- f = self.interpreter.catalog.extendedStat(given[0], False) -+ f = self.interpreter.catalog.extendedStat(str(given[0]), False) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - myreplica = pydmlite.Replica() - myreplica.fileid = f.stat.st_ino -@@ -1577,7 +1576,7 @@ class ReplicaAddCommand(ShellCommand): - self.interpreter.catalog.addReplica(myreplica) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class ReplicaModifyCommand(ShellCommand): -@@ -1591,7 +1590,7 @@ class ReplicaModifyCommand(ShellCommand): - def _execute(self, given): - try: - self.interpreter.catalog.getReplicas -- replicas = self.interpreter.catalog.getReplicas(given[0]) -+ replicas = self.interpreter.catalog.getReplicas(str(given[0])) - - for r in replicas: - if given[1] in (str(r.replicaid), r.rfn): -@@ -1628,7 +1627,7 @@ class ReplicaModifyCommand(ShellCommand): - return self.ok() - - except Exception as e: -- return self.error(e.__str__() + ' ' + given[0]) -+ return self.error(str(e) + ' ' + given[0]) - - - class ReplicaDelCommand(ShellCommand): -@@ -1639,8 +1638,7 @@ class ReplicaDelCommand(ShellCommand): - - def _execute(self, given): - try: -- self.interpreter.catalog.getReplicas -- replicas = self.interpreter.catalog.getReplicas(given[0]) -+ replicas = self.interpreter.catalog.getReplicas(str(given[0])) - sz = len(replicas) - replicaFound = False - for r in replicas: -@@ -1662,14 +1660,14 @@ class ReplicaDelCommand(ShellCommand): - if sz == 1: - try: - #remove also from catalog to clean memcache -- self.interpreter.catalog.unlink(given[0]) -+ self.interpreter.catalog.unlink(str(given[0])) - except Exception: - #do nothing cause if it fails does not hurt - pass - return self.ok() - - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - ### Pools commands ### -@@ -1702,7 +1700,7 @@ The latter is the default. - self.interpreter.poolManager.newPool(pool) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - def prettySize(size): - isize = int(size) # argument can be string -@@ -1783,7 +1781,7 @@ class PoolInfoCommand(ShellCommand): - self.ok("%s (%s)\n%s" % (pool.name, pool.type, pprint_dictionary(dpool))) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\n' + traceback.format_exc() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\n' + traceback.format_exc() + '\nParameter(s): ' + ', '.join(given)) - - - class PoolModifyCommand(ShellCommand): -@@ -1850,7 +1848,7 @@ Additionnal attributes for: - self.interpreter.poolManager.updatePool(pool) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class PoolDelCommand(ShellCommand): -@@ -1867,7 +1865,7 @@ class PoolDelCommand(ShellCommand): - self.interpreter.poolManager.deletePool(pool) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class QryConfCommand(ShellCommand): -@@ -1880,19 +1878,20 @@ class QryConfCommand(ShellCommand): - try: - availability = pydmlite.PoolAvailability.kAny - pools = self.interpreter.poolManager.getPools(availability) -- info, err = self.interpreter.executor.getSpaceInfo(self.interpreter.domeheadurl) -+ data, err = self.interpreter.executor.getspaceinfo() - if err: - return self.error("Error while querying for Pools/FSs information") -- if info is None: -+ if len(data) == 0: - return self.ok("No Pool configured") -- data = json.loads(info) - for pool in pools: - output = "POOL %s " % (pool.name) - for key in ['defsize', 'gc_start_thresh', 'gc_stop_thresh', 'def_lifetime', 'defpintime', 'max_lifetime', 'maxpintime']: -- value = str(pool.getLong(key, -1)) + " " -+ output += key.upper() + " " -+ value = pool.getLong(key, -1) - if value < 0: -- value = "" -- output += key.upper() + " " + value -+ output += " " -+ else: -+ output += str(value) + " " - for key in ['groups', 'fss_policy', 'gc_policy', 'mig_policy', 'rs_policy', 'ret_policy', 's_type']: - value = pool.getString(key, "") + " " - output += key.upper() + " " + value -@@ -1902,7 +1901,7 @@ class QryConfCommand(ShellCommand): - self.interpreter.poolDriver = self.interpreter.stackInstance.getPoolDriver(pool.type) - except Exception as e: - self.interpreter.poolDriver = None -- return self.error('Could not initialise the pool driver.\n' + e.__str__()) -+ return self.error('Could not initialise the pool driver.\n' + str(e)) - poolHandler = self.interpreter.poolDriver.createPoolHandler(pool.name) - capacity = poolHandler.getTotalSpace() - free = poolHandler.getFreeSpace() -@@ -1910,7 +1909,7 @@ class QryConfCommand(ShellCommand): - rate = round(float(100 * free) / capacity, 1) - else: - rate = 0 -- self.ok('\t\tCAPACITY %s FREE %s (%.1f%%)' % (self.prettySize(capacity), self.prettySize(free), rate)) -+ self.ok('\t\tCAPACITY %s FREE %s (%.1f%%)' % (utils.prettySize(capacity), utils.prettySize(free), rate)) - - for _pool in sorted(data['poolinfo'].keys()): - if _pool == pool.name: -@@ -1932,12 +1931,12 @@ class QryConfCommand(ShellCommand): - actstatus = '' - self.ok( - "\t%s %s CAPACITY %s FREE %s ( %.1f%%) %s %s" % -- (server, _fs, self.prettySize(fs['physicalsize']), self.prettySize(fs['freespace']), rate, status, actstatus)) -+ (server, _fs, utils.prettySize(fs['physicalsize']), utils.prettySize(fs['freespace']), rate, status, actstatus)) - except Exception as e: - pass - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - ### User and Group commands ### -@@ -1957,7 +1956,7 @@ class GroupAddCommand(ShellCommand): - self.interpreter.authn.newGroup(given[0]) - return self.ok('Group added') - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class UserAddCommand(ShellCommand): -@@ -1974,7 +1973,7 @@ class UserAddCommand(ShellCommand): - self.interpreter.authn.newUser(given[0]) - return self.ok('User added') - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class GroupInfoCommand(ShellCommand): -@@ -2021,7 +2020,7 @@ class GroupInfoCommand(ShellCommand): - self.ok(' - %s\t(ID: %d)\t%s' % (g.name, gid, status)) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class UserInfoCommand(ShellCommand): -@@ -2068,7 +2067,7 @@ class UserInfoCommand(ShellCommand): - self.ok(' - %s\t(ID: %d)\t%s' % (u.name, uid, status)) - return self.ok() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class UserGroupBanBaseCmd(ShellCommand): -@@ -2251,7 +2250,7 @@ Configuration data obtained from Argus server are cached 60s. - return self.ok('Ban status same') - - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class UserBanCommand(UserGroupBanBaseCmd): -@@ -2359,7 +2358,7 @@ Configuration data obtained from Argus server are cached 60s. - return self.ok('Ban status same') - - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class GroupDelCommand(ShellCommand): -@@ -2383,7 +2382,7 @@ class GroupDelCommand(ShellCommand): - self.interpreter.authn.deleteGroup(groupname) - return self.ok('Group deleted') - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class UserDelCommand(ShellCommand): -@@ -2407,14 +2406,16 @@ class UserDelCommand(ShellCommand): - self.interpreter.authn.deleteUser(username) - return self.ok('User deleted') - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - --restart_dpm_reminder = ("\n\n" + "*" * 80 + "\n" -- "If your storage system is using the legacy DPM stack, please don't forget\n" -- "to restart the DPM daemon after any filesystem changes.\n" -- "Running 'service dpm restart' should be enough.\n" -- "*" * 80 + "\n") -+restart_dpm_reminder = (""" -+****************************************************************************** -+If your storage system is using the legacy DPM stack, please don't forget -+to restart the DPM daemon after any filesystem changes. -+Running 'service dpm restart' should be enough. -+****************************************************************************** -+""") - - - class FsAddCommand(ShellCommand): -@@ -2452,14 +2453,14 @@ class FsAddCommand(ShellCommand): - if not force and fs.startswith('/dpm'): - return self.error('Path on filesystem that starts with /dpm is not compatible with default WebDAV configuration. Use force parameter to disable this check.') - -- out, err = self.interpreter.executor.addFsToPool(self.interpreter.domeheadurl, fs, pool, server, status) -+ out, err = self.interpreter.executor.addfstopool(server, fs, pool, status) - if err: -- return self.error(out) -+ return self.error(err) - else: - print(out) - return self.ok(restart_dpm_reminder) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class FsModifyCommand(ShellCommand): -@@ -2486,14 +2487,14 @@ Status must have one of the following values: ENABLED, DISABLED, RDONLY. - except KeyError as e: - return self.error('Unknown status value: ' + given[3]) - -- out, err = self.interpreter.executor.modifyFs(self.interpreter.domeheadurl, fs, pool, server, status) -+ out, err = self.interpreter.executor.modifyfs(server, fs, pool, status) - if err: -- return self.error(out) -+ return self.error(err) - else: - print(out) - return self.ok(restart_dpm_reminder) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class FsDelCommand(ShellCommand): -@@ -2510,14 +2511,14 @@ class FsDelCommand(ShellCommand): - fs = given[0] - server = given[1] - -- out, err = self.interpreter.executor.rmFs(self.interpreter.domeheadurl, fs, server) -+ out, err = self.interpreter.executor.rmfs(server, fs) - if err: -- return self.error(out) -+ return self.error(err) - else: - print(out) - return self.ok(restart_dpm_reminder) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - ### Replicate and Drain commands ### -@@ -2536,13 +2537,13 @@ class Util(object): - try: - conf = open("/etc/dmlite.conf.d/mysql.conf", 'r') - except Exception as e: -- print(e.__str__()) -+ print(str(e)) - return False - else: - try: - conf = open("/etc/dmlite.conf.d/domeadapter.conf", 'r') - except Exception as e: -- print(e.__str__()) -+ print(str(e)) - return False - adminUserName = None - dnisroot = None -@@ -2604,7 +2605,7 @@ class Util(object): - interpreter.error('Not possible to set Filesystem ' + sourceFS.server + "/" + sourceFS.name + " To ReadOnly. Exiting.") - return 1 - else: -- out, err = interpreter.executor.modifyFs(interpreter.domeheadurl, sourceFS.name, sourceFS.poolname, sourceFS.server, 2) -+ _, err = interpreter.executor.modifyfs(sourceFS.server, sourceFS.name, sourceFS.poolname, 2) - if err: - interpreter.error('Not possible to set Filesystem ' + sourceFS.server + "/" + sourceFS.name + " To ReadOnly. Exiting.") - return 1 -@@ -2616,19 +2617,25 @@ class Util(object): - - @staticmethod - def printComments(interpreter): -- interpreter.ok('') -- interpreter.ok('===================================================================================================================================================================================') -- interpreter.ok("The process is running in dryrun mode, please add the option 'dryrun false' to effectively perform the drain process") -- interpreter.ok('\n') -- interpreter.ok("Make sure to have LCGDM-Dav properly setup on your infrastructure. The process contacts the Headnode by default via https on the 443 port and the disknodes via http on the 80 port") -- interpreter.ok('\n') -- interpreter.ok("If your infrastructure has different ports configured please use the DPM_HTTPS_PORT and DPM_HTTP_PORT env variabile to configure the drain process accordingly") -- interpreter.ok('\n') -- interpreter.ok("The disknodes should ALL have the same port configured") -- interpreter.ok('\n') -- interpreter.ok("Please also monitor the draining logs, and in case of errors due to timeouts/daemons overloaded please adjust accordingly the number of draining threads( Default = 5)") -- interpreter.ok('===================================================================================================================================================================================') -- interpreter.ok('\n') -+ interpreter.ok(""" -+=============================================================================== -+The process is running in dryrun mode, please add the option 'dryrun false' -+to effectively perform the drain process. -+ -+Make sure to have LCGDM-Dav properly setup on your infrastructure. The process -+contacts the Headnode by default via https on the 443 port and the disknodes -+via http on the 80 port. -+ -+If your infrastructure has different ports configured use the DPM_HTTPS_PORT -+and DPM_HTTP_PORT env variabile to configure the drain process accordingly. -+ -+The disknodes should ALL have the same port configured. -+ -+Please also monitor the draining logs, and in case of errors due to the -+timeouts/daemons overloaded please adjust accordingly the number of draining -+threads (default = 5). -+=============================================================================== -+""") - - - class Response(object): -@@ -2647,7 +2654,7 @@ class Response(object): - - ## Callback function invoked when body data is ready - def body(self, buf): -- self.markers.append(buf) -+ self.markers.append(buf.decode('utf-8')) - - def headers(self): - s = ''.join(self.chunks) -@@ -2760,8 +2767,8 @@ class Replicate(object): - try: - loc = self.interpreter.poolManager.whereToWrite(self.filename) - except Exception as e: -- self.interpreter.error(e.__str__()) -- return (False, None, e.__str__()) -+ self.interpreter.error(str(e)) -+ return (False, None, str(e)) - - #checking ports to use - http_port = 80 -@@ -2800,8 +2807,8 @@ class Replicate(object): - try: - c.perform() - except Exception as e: -- self.interpreter.error(e.__str__()) -- return (False, sfn, e.__str__()) -+ self.interpreter.error(str(e)) -+ return (False, sfn, str(e)) - - status = c.getinfo(c.RESPONSE_CODE) - if status != 202: -@@ -2814,8 +2821,8 @@ class Replicate(object): - else: - return (False, sfn, outcome) - except Exception as e: -- self.interpreter.error(e.__str__()) -- return (False, sfn, e.__str__()) -+ self.interpreter.error(str(e)) -+ return (False, sfn, str(e)) - - - class DrainThread(threading.Thread): -@@ -2915,13 +2922,13 @@ ex: - parameters['dryrun'] = False - - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - #instantiating DPMDB - try: -- db = DPMDB(self.interpreter) -+ db = DPMDB('DomeAdapterHeadCatalog' not in self.interpreter.catalog.getImplId()) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - try: - #check if destination FS exists and it's not disabled/readonly -@@ -2958,7 +2965,7 @@ ex: - listTotalFiles = db.getReplicaInFSFolder(sourceFilesystem, sourceServer, sourceFolder) - - #step 3 : for each file call the drain method of DrainFileReplica -- self.interpreter.replicaQueue = Queue.Queue(len(listTotalFiles)) -+ self.interpreter.replicaQueue = Queue(len(listTotalFiles)) - self.interpreter.replicaQueue.queue.clear() - self.interpreter.replicaQueueLock = threading.Lock() - -@@ -2966,7 +2973,7 @@ ex: - self.drainProcess.drain() - - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class ReplicateCommand(ShellCommand): -@@ -2995,19 +3002,25 @@ The replicate command accepts the following parameters: - ] - - def printComments(self): -- self.interpreter.ok('') -- self.interpreter.ok('===================================================================================================================================================================================') -- self.interpreter.ok("Your are running in dryrun mode, please add the option 'dryrun false' to effectively perform the file replication") -- self.interpreter.ok('\n') -- self.interpreter.ok("Make sure to have LCGDM-Dav properly setup on your infrastructure. The process contacts the Headnode by default via https on the 443 port and the disknodes via http on the 80 port") -- self.interpreter.ok('\n') -- self.interpreter.ok("If your infrastructure has different ports configured please use the DPM_HTTPS_PORT and DPM_HTTP_PORT env variabile to configure the drain process accordingly") -- self.interpreter.ok('\n') -- self.interpreter.ok("The disknodes should have ALL the same port configured") -- self.interpreter.ok('\n') -- self.interpreter.ok('Please also note that if the file is associated to a spacetoken the new replica is not going to be added to that spacetoken unless you specify it via the \'spacetoken\' parameter') -- self.interpreter.ok('===================================================================================================================================================================================') -- self.interpreter.ok('\n') -+ self.interpreter.ok(""" -+=============================================================================== -+Your are running in dryrun mode, please add the option 'dryrun false' -+to effectively perform the file replication") -+ -+Make sure to have LCGDM-Dav properly setup on your infrastructure. The process -+contacts the Headnode by default via https on the 443 port and the disknodes -+via http on the 80 port. -+ -+If your infrastructure has different ports configured use the DPM_HTTPS_PORT -+and DPM_HTTP_PORT env variabile to configure the drain process accordingly. -+ -+The disknodes should ALL have the same port configured. -+ -+Please also note that if the file is associated to a spacetoken the new replica -+is not going to be added to that spacetoken unless you specify it via -+the \'spacetoken\' parameter') -+=============================================================================== -+""") - - def _execute(self, given): - if self.interpreter.stackInstance is None: -@@ -3061,7 +3074,7 @@ The replicate command accepts the following parameters: - replicate = Replicate(self.interpreter, filename, spacetoken, parameters) - (replicated, destination, error) = replicate.run() - except Exception as e: -- self.error(e.__str__()) -+ self.error(str(e)) - self.error("Error Replicating file: " + filename + "\n") - if error: - self.error(error) -@@ -3084,7 +3097,7 @@ The replicate command accepts the following parameters: - if replicated: - #check replica status - try: -- replica = self.interpreter.catalog.getReplicaByRFN(destination) -+ replica = self.interpreter.catalog.getReplicaByRFN(str(destination)) - if replica.status != pydmlite.ReplicaStatus.kAvailable: - cleanReplica = True - self.error("Error while updating the replica status\n") -@@ -3097,24 +3110,24 @@ The replicate command accepts the following parameters: - if cleanReplica: - if not replica: - try: -- replica = self.interpreter.catalog.getReplicaByRFN(destination) -+ replica = self.interpreter.catalog.getReplicaByRFN(str(destination)) - except Exception as e: - self.error("Error while checking the replica status\n") - return self.error('Please remove manually the replica with rfn: ' + destination) - try: - self.interpreter.poolDriver = self.interpreter.stackInstance.getPoolDriver('filesystem') - except Exception as e: -- self.error('Could not initialise the pool driver to clean the replica\n' + e.__str__()) -+ self.error('Could not initialise the pool driver to clean the replica\n' + str(e)) - return self.error('Please remove manually the replica with rfn: ' + destination) - try: - if replica.getString('pool', ''): - poolHandler = self.interpreter.poolDriver.createPoolHandler(replica.getString('pool', '')) - poolHandler.removeReplica(replica) - else: -- self.error('Could not clean the replica.\n' + e.__str__()) -+ self.error('Could not clean the replica.\n' + str(e)) - return self.error('Please remove manually the replica with rfn: ' + destination) - except Exception as e: -- self.error('Could not clean the replica.\n' + e.__str__()) -+ self.error('Could not clean the replica.\n' + str(e)) - return self.error('Please remove manually the replica with rfn: ' + destination) - return self.ok() - -@@ -3139,6 +3152,10 @@ class DrainFileReplica(object): - - def drain(self): - filename = self.fileReplica.lfn -+ if not filename: -+ self.logError("The file with replica sfn: " + self.fileReplica.sfn + " doesn't have LFN, ignored\n") -+ self.interpreter.drainErrors.append((filename, self.fileReplica.sfn, "The file doesn't have LFN")) -+ return 1 - #step 4 : check the status and see if they the replica can be drained - if self.fileReplica.status != "-": - if self.fileReplica.status == "P": -@@ -3148,7 +3165,7 @@ class DrainFileReplica(object): - else: - #new behaviour, in case the file is in status D we should remove the file and the replicas - self.logOK("The file with replica sfn: " + self.fileReplica.sfn + " is under deletion, it can be safely removed\n") -- self.interpreter.catalog.unlink(filename) -+ self.interpreter.catalog.unlink(str(filename)) - return 0 - - #step 4-1 : check the replica status and see if they the replica can be drained -@@ -3174,9 +3191,9 @@ class DrainFileReplica(object): - try: - (replicated, destination, error) = replicate.run() - except Exception as e: -- self.logError(e.__str__()) -+ self.logError(str(e)) - self.logError("Error moving Replica for file: " + filename + "\n") -- self.interpreter.drainErrors.append((filename, self.fileReplica.sfn, e.__str__())) -+ self.interpreter.drainErrors.append((filename, self.fileReplica.sfn, str(e))) - if destination: - #logging only need to clean pending replica - self.logError("Error while copying to SFN: " + destination + "\n") -@@ -3198,7 +3215,7 @@ class DrainFileReplica(object): - if replicated: - #check replica status - try: -- replica = self.interpreter.catalog.getReplicaByRFN(destination) -+ replica = self.interpreter.catalog.getReplicaByRFN(str(destination)) - if replica.status != pydmlite.ReplicaStatus.kAvailable: - cleanReplica = True - self.logError("Error while updating the replica status for file: " + filename + "\n") -@@ -3213,24 +3230,23 @@ class DrainFileReplica(object): - #step 6 : remove drained replica file if correctly replicated or erroneus drained file - if not cleanReplica: - try: -- replica = self.interpreter.catalog.getReplicaByRFN(self.fileReplica.sfn) -+ replica = self.interpreter.catalog.getReplicaByRFN(str(self.fileReplica.sfn)) - pool = self.interpreter.poolManager.getPool(self.fileReplica.poolname) - self.interpreter.poolDriver = self.interpreter.stackInstance.getPoolDriver(pool.type) - except Exception as e: - self.interpreter.drainErrors.append( - (filename, self.fileReplica.sfn, "Error while getting the original replica from the catalog, cannot drain")) -- return self.logError('Error while getting the original replica from the catalog for file: ' + filename + ', cannot drain.\n' + -- e.__str__()) -+ return self.logError('Error while getting the original replica from the catalog for file: ' + filename + ', cannot drain.\n' + str(e)) - - else: - try: -- replica = self.interpreter.catalog.getReplicaByRFN(destination) -+ replica = self.interpreter.catalog.getReplicaByRFN(str(destination)) - pool = self.interpreter.poolManager.getPool(replica.getString('pool', '')) - self.interpreter.poolDriver = self.interpreter.stackInstance.getPoolDriver(pool.type) - except Exception as e: - self.interpreter.drainErrors.append( - (filename, self.fileReplica.sfn, "Error while getting the new replica from the catalog, cannot clean")) -- return self.logError('Error while getting the new replica from the catalog for file: ' + filename + ', cannot clean.\n' + e.__str__()) -+ return self.logError('Error while getting the new replica from the catalog for file: ' + filename + ', cannot clean.\n' + str(e)) - #retry 3 times: - for i in range(0, 3): - try: -@@ -3240,10 +3256,10 @@ class DrainFileReplica(object): - if i == 2: - if not cleanReplica: - self.interpreter.drainErrors.append((filename, self.fileReplica.sfn, "Could not remove the original replica")) -- return self.logError('Could not remove the original replica for file: ' + filename + '\n' + e.__str__()) -+ return self.logError('Could not remove the original replica for file: ' + filename + '\n' + str(e)) - else: - self.interpreter.drainErrors.append((filename, self.fileReplica.sfn, "Could not clean the new replica")) -- return self.logError('Could not remove the new replica for file: ' + filename + '\n' + e.__str__()) -+ return self.logError('Could not remove the new replica for file: ' + filename + '\n' + str(e)) - else: - continue - #cleaning catalog ( not throwing exception if fails though could it could be already cleaned by the poolhandler.removereplica -@@ -3266,18 +3282,20 @@ class DrainReplicas(object): - self.threadpool = [] - - def stopThreads(self): -- self.interpreter.ok('Drain process Stopped, Waiting max 10 seconds for each running thread to end...') -+ self.interpreter.ok('Drain process Stopped, Waiting max 60 seconds + 5 seconds for each running thread') - for t in self.threadpool: - t.stop() -+ join_start = time.monotonic() - for t in self.threadpool: -- t.join(10) -+ wait = 60 - (time.monotonic() - join_start) -+ t.join(wait if wait > 5 else 5) - self.printDrainErrors() - - def printDrainErrors(self): - if len(self.interpreter.drainErrors) > 0: - self.interpreter.ok("List of Errors:\n") - for (file, sfn, error) in self.interpreter.drainErrors: -- self.interpreter.ok("File: " + file + "\tsfn: " + sfn + "\tError: " + error) -+ self.interpreter.ok("File: " + str(file) + "\tsfn: " + sfn + "\tError: " + error) - - def drain(self): - gid = None -@@ -3339,8 +3357,11 @@ class DrainReplicas(object): - - for t in self.threadpool: - t.stop() -+ self.interpreter.ok("Joining %i threads (wait max 3600s + 5s for each thread)\n" % len(self.threadpool)) -+ join_start = time.monotonic() - for t in self.threadpool: -- t.join(10) -+ wait = 3600 - (time.monotonic() - join_start) -+ t.join(wait if wait > 5 else 5) - if self.parameters['move']: - self.interpreter.ok("Move Process completed\n") - else: -@@ -3349,7 +3370,7 @@ class DrainReplicas(object): - self.printDrainErrors() - - except Exception as e: -- return self.interpreter.error(e.__str__()) -+ return self.interpreter.error(str(e)) - - - class DrainPoolCommand(ShellCommand): -@@ -3424,7 +3445,7 @@ The drainpool command accepts the following parameters: - force = True - - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - #check nthreads and force params - if parameters['nthreads'] < 1: -@@ -3434,9 +3455,9 @@ The drainpool command accepts the following parameters: - - #instantiating DPMDB - try: -- db = DPMDB(self.interpreter) -+ db = DPMDB('DomeAdapterHeadCatalog' not in self.interpreter.catalog.getImplId()) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - #get information on pools - try: -@@ -3460,19 +3481,20 @@ The drainpool command accepts the following parameters: - #step 1 : set as READONLY all FS in the pool to drain - if not parameters['dryrun']: - listFS = db.getFilesystems(poolToDrain.name) -+ _log.info("Set as READONLY all FS in the pool to drain: ", ','.join(["%s:%s" % (x.server, x.name) for x in listFS])) - for fsToDrain in listFS: - if Util.setFSReadonly(self.interpreter, fsToDrain): - return - else: - Util.printComments(self.interpreter) -- self.ok("Calculating Replicas to Drain..") -- self.ok() - - #step 2 : get all FS associated to the pool to drain and get the list of replicas -+ self.ok("Calculating Replicas to Drain...") -+ self.ok() - listTotalFiles = db.getReplicasInPool(poolToDrain.name) - - #step 3 : for each file call the drain method of DrainFileReplica -- self.interpreter.replicaQueue = Queue.Queue(len(listTotalFiles)) -+ self.interpreter.replicaQueue = Queue(len(listTotalFiles)) - self.interpreter.replicaQueue.queue.clear() - self.interpreter.replicaQueueLock = threading.Lock() - -@@ -3480,7 +3502,7 @@ The drainpool command accepts the following parameters: - self.drainProcess.drain() - - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class DrainFSCommand(ShellCommand): -@@ -3549,7 +3571,7 @@ The drainfs command accepts the following parameters: - if given[i + 1] == "True" or given[i + 1] == "true" or given[i + 1] == "1": - force = True - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - #check nthreads and force params - if parameters['nthreads'] < 1: -@@ -3559,9 +3581,9 @@ The drainfs command accepts the following parameters: - - #instantiating DPMDB - try: -- db = DPMDB(self.interpreter) -+ db = DPMDB('DomeAdapterHeadCatalog' not in self.interpreter.catalog.getImplId()) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - #check if the filesystem is ok and also check if other filesystems are available - try: -@@ -3587,19 +3609,21 @@ The drainfs command accepts the following parameters: - if not fsToDrain: - return self.error("The specified filesystem has not been found in the DPM configuration") - -- #set as READONLY the FS to drain -+ #step 1 : set as READONLY the FS to drain - if not parameters['dryrun']: -+ _log.info("Set as READONLY FS to drain: %s:%s", fsToDrain.server, fsToDrain.name) - if Util.setFSReadonly(self.interpreter, fsToDrain): - return - else: - Util.printComments(self.interpreter) -- self.ok("Calculating Replicas to Drain..") -+ -+ #step 2 : get list of all replicas to drain in selected FS -+ self.ok("Calculating Replicas to Drain...") - self.ok() -- #get all files to drain - listFiles = db.getReplicasInFS(fsToDrain.name, fsToDrain.server) - - #step 3 : for each file call the drain method of DrainFileReplica -- self.interpreter.replicaQueue = Queue.Queue(len(listFiles)) -+ self.interpreter.replicaQueue = Queue(len(listFiles)) - self.interpreter.replicaQueue.queue.clear() - self.interpreter.replicaQueueLock = threading.Lock() - -@@ -3607,7 +3631,7 @@ The drainfs command accepts the following parameters: - self.drainProcess.drain() - - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - class DrainServerCommand(ShellCommand): -@@ -3674,7 +3698,7 @@ The drainserver command accepts the following parameters: - if given[i + 1] == "True" or given[i + 1] == "true" or given[i + 1] == "1": - force = True - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - #check nthreads and force params - if parameters['nthreads'] < 1: -@@ -3684,9 +3708,9 @@ The drainserver command accepts the following parameters: - - #instantiating DPMDB - try: -- db = DPMDB(self.interpreter) -+ db = DPMDB('DomeAdapterHeadCatalog' not in self.interpreter.catalog.getImplId()) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - #check if the server is ok and also check if other filesystems in other diskservers are available - try: -@@ -3716,25 +3740,28 @@ The drainserver command accepts the following parameters: - return self.error("The specified server has not been found in the DPM configuration") - #set as READONLY the FS to drain - if not parameters['dryrun']: -- for fs in db.getFilesystemsInServer(servername): -+ listFS = db.getFilesystemsInServer(servername) -+ _log.info("Set as READONLY FS to drain: %s", ','.join(["%s:%s" % (x.server, x.name) for x in listFS])) -+ for fs in listFS: - if Util.setFSReadonly(self.interpreter, fs): - return - else: - Util.printComments(self.interpreter) -- self.ok("Calculating Replicas to Drain..") -- self.ok() -+ - #get all files to drain -+ self.ok("Calculating Replicas to Drain...") -+ self.ok() - listFiles = db.getReplicasInServer(servername) - - #step 3 : for each file call the drain method of DrainFileReplica -- self.interpreter.replicaQueue = Queue.Queue(len(listFiles)) -+ self.interpreter.replicaQueue = Queue(len(listFiles)) - self.interpreter.replicaQueue.queue.clear() - self.interpreter.replicaQueueLock = threading.Lock() - - self.drainProcess = DrainReplicas(self.interpreter, db, listFiles, parameters) - self.drainProcess.drain() - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - - ### Quota Token Commands### -@@ -3762,11 +3789,9 @@ The command accepts the following paramameter: - getparentdirs = False - if path == ".": - path = self.interpreter.catalog.getWorkingDir() -- out, err = self.interpreter.executor.getquotatoken(self.interpreter.domeheadurl, path, getparentdirs, getsubdirs) -- try: -- data = json.loads(out) -- except ValueError: -- self.error("No quota token defined for the path: " + path) -+ data, err = self.interpreter.executor.getquotatoken(path, getparentdirs, getsubdirs) -+ if err: -+ self.error("No quota token defined for '%s': %s" % (path, str(err))) - return - for token in data.keys(): - self.ok("\n") -@@ -3779,15 +3804,15 @@ The command accepts the following paramameter: - self.ok("Token Name:\t" + data[token]['quotatkname']) - self.ok("Token Path:\t" + data[token]['path']) - self.ok("Token Pool:\t" + data[token]['quotatkpoolname']) -- self.ok("Token Total Space:\t" + self.interpreter.prettySize(data[token]['quotatktotspace'])) -- self.ok("Pool Total Space:\t" + self.interpreter.prettySize(data[token]['pooltotspace'])) -+ self.ok("Token Total Space:\t" + utils.prettySize(data[token]['quotatktotspace'])) -+ self.ok("Pool Total Space:\t" + utils.prettySize(data[token]['pooltotspace'])) - if int(data[token]['pathusedspace']) < 0: -- warning = "WARNING: the accounted space is negative, please run the 'dmlite-mysql-dirspaces.py' script to fix possible inconsistencies" -+ warning = "WARNING: the accounted space is negative, please run the 'dbck' to fix possible inconsistencies" - self.ok("Token Accounted Space:\t" + str(0)) - self.ok(warning) - else: -- self.ok("Token Accounted Space:\t" + self.interpreter.prettySize(data[token]['pathusedspace'])) -- self.ok("Token Available Space:\t" + self.interpreter.prettySize(data[token]['pathfreespace'])) -+ self.ok("Token Accounted Space:\t" + utils.prettySize(data[token]['pathusedspace'])) -+ self.ok("Token Available Space:\t" + utils.prettySize(data[token]['pathfreespace'])) - self.ok("Groups:") #sometimes the groups are not avaialble immediately after a quotatoken is created - try: - for group in data[token]['groups'].keys(): -@@ -3833,7 +3858,7 @@ The command accepts the following parameters: - if given[i] == "pool": - pool = given[i + 1] - elif given[i] == "size": -- size = self.interpreter.prettyInputSize(given[i + 1]) -+ size = utils.prettyInputSize(given[i + 1]) - if size < 0: - return self.error("Incorrect size: it must be a positive integer") - elif given[i] == "desc": -@@ -3852,16 +3877,14 @@ The command accepts the following parameters: - lfn = os.path.normpath(os.path.join(self.interpreter.catalog.getWorkingDir(), lfn)) - - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - qt_pool = pool - if qt_pool == None: - # map quotatoken token_id to the pool name -- out, err = self.interpreter.executor.getquotatoken(self.interpreter.domeheadurl, '/', 0, 1) -- try: -- data = json.loads(out) -- except ValueError: -- return self.error("Failed to read quota token definitions") -+ data, err = self.interpreter.executor.getquotatoken('/', 0, 1) -+ if err: -+ return self.error("Failed to read quota token definitions: %s" % str(err)) - for token in data.keys(): - if token != s_token: continue - qt_pool = data[token]['quotatkpoolname'] -@@ -3871,23 +3894,22 @@ The command accepts the following parameters: - - # compare quotatoken size with minimum pool free space - try: -- info, err = self.interpreter.executor.statPool(self.interpreter.domeheadurl, qt_pool) -+ data, err = self.interpreter.executor.statpool(qt_pool) - if err: - return self.error("Error while querying for \"" + qt_pool + "\" information") -- if info is None: -+ if len(data) == 0: - return self.error("No pool with name \"" + qt_pool + "\" configured") -- data = json.loads(info) - if 'poolinfo' not in data or qt_pool not in data['poolinfo']: - return self.error("No data returned for pool \"" + qt_pool + "\"") - pool_defsize = int(data['poolinfo'][qt_pool]['defsize']) - if size <= pool_defsize: -- return self.error("Quotatoken size must be bigger than minimum pool free space (" + qt_pool + ".defsize = " + self.interpreter.prettySize(pool_defsize) + ")") -+ return self.error("Quotatoken size must be bigger than minimum pool free space (" + qt_pool + ".defsize = " + utils.prettySize(pool_defsize) + ")") - except Exception as e: -- return self.error(type(e).__name__ + ": " + e.__str__() + '\nUnable to get defsize configuration for pool "' + qt_pool + '"') -+ return self.error(type(e).__name__ + ": " + str(e) + '\nUnable to get defsize configuration for pool "' + qt_pool + '"') - -- out, err = self.interpreter.executor.modquotatoken(self.interpreter.domeheadurl, s_token, lfn, pool, size, desc, groups) -+ out, err = self.interpreter.executor.modquotatoken(s_token, lfn, pool, size, desc, groups) - if err: -- self.error(out) -+ self.error(err) - else: - self.ok(out) - -@@ -3926,7 +3948,7 @@ The command accepts the following parameter: - if given[i] == "pool": - pool = given[i + 1] - elif given[i] == "size": -- size = self.interpreter.prettyInputSize(given[i + 1]) -+ size = utils.prettyInputSize(given[i + 1]) - if size < 0: - return self.error("Incorrect size: it must be a positive integer") - elif given[i] == "desc": -@@ -3934,27 +3956,26 @@ The command accepts the following parameter: - elif given[i] == "groups": - groups = given[i + 1] - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - - # compare quotatoken size with minimum pool free space - try: -- info, err = self.interpreter.executor.statPool(self.interpreter.domeheadurl, pool) -+ data, err = self.interpreter.executor.statpool(pool) - if err: - return self.error("Error while querying for \"" + pool + "\" information") -- if info is None: -+ if len(data) == 0: - return self.error("No pool with name \"" + pool + "\" configured") -- data = json.loads(info) - if 'poolinfo' not in data or pool not in data['poolinfo']: - return self.error("No data returned for pool \"" + pool + "\"") - pool_defsize = int(data['poolinfo'][pool]['defsize']) - if size <= pool_defsize: -- return self.error("Quotatoken size must be bigger than minimum pool free space (" + pool + ".defsize = " + self.interpreter.prettySize(pool_defsize) + ")") -+ return self.error("Quotatoken size must be bigger than minimum pool free space (" + pool + ".defsize = " + utils.prettySize(pool_defsize) + ")") - except Exception as e: -- return self.error(type(e).__name__ + ": " + e.__str__() + '\nUnable to get defsize configuration for pool "' + pool + '"') -+ return self.error(type(e).__name__ + ": " + str(e) + '\nUnable to get defsize configuration for pool "' + pool + '"') - -- out, err = self.interpreter.executor.setquotatoken(self.interpreter.domeheadurl, lfn, pool, size, desc, groups) -+ out, err = self.interpreter.executor.setquotatoken(lfn, pool, size, desc, groups) - if err: -- self.error(out) -+ self.error(err) - else: - self.ok(out) - -@@ -3977,9 +3998,9 @@ The command accepts the following parameters: - pool = given[1] - if lfn == ".": - lfn = self.interpreter.catalog.getWorkingDir()[:-1] -- out, err = self.interpreter.executor.delquotatoken(self.interpreter.domeheadurl, lfn, pool) -+ out, err = self.interpreter.executor.delquotatoken(lfn, pool) - if err: -- self.error(out) -+ self.error(err) - else: - self.ok(out) - -@@ -4013,10 +4034,10 @@ find "" -d - folder = True - ret = list() - try: -- db = DPMDB(self.interpreter) -+ db = DPMDB('DomeAdapterHeadCatalog' not in self.interpreter.catalog.getImplId()) - ret = db.find(pattern, folder) - except Exception as e: -- return self.error(e.__str__() + '\nParameter(s): ' + ', '.join(given)) -+ return self.error(str(e) + '\nParameter(s): ' + ', '.join(given)) - if ret != None: - if len(ret) > 0: - return self.ok(('\n'.join(ret))) -@@ -4041,7 +4062,7 @@ class GetLfnCommand(ShellCommand): - except ValueError: - pass - try: -- db = DPMDB(self.interpreter) -+ db = DPMDB('DomeAdapterHeadCatalog' not in self.interpreter.catalog.getImplId()) - if fileid == None: - lfn = db.getLFNFromSFN(given[0]) - if lfn is None: -@@ -4056,3 +4077,369 @@ class GetLfnCommand(ShellCommand): - else: - return self.error("Cannot find the given Ino") - return self.ok(lfn) -+ -+ -+class AccountingCommand(ShellCommand): -+ """Storage space accounting for EGI and WLCG. -+ -+* EMI StAR format version 1.2 -+ http://cds.cern.ch/record/1452920/files/GFD.201.pdf -+ https://wiki.egi.eu/wiki/APEL/Storage -+ Calculating aggregated size used by users/groups can take some time -+ Parameters: -+ filename : output file (default: stdout) -+ site : GOCDB site name (default: UNKNOWN) -+ hostname : storage headnode fqdn (default: local fqdn) -+ groups : include aggregated groups data (default: True) -+ users : include aggregated users data (default: True) -+* WLCG Storage Resource Reporting -+ https://twiki.cern.ch/twiki/bin/view/LCG/AccountingTaskForce -+ Parameters: -+ output : SRR Json output destination (default: stdout) -+ supported schema: file:///, https://, root:// and apel:// -+ -+Examples: -+ accounting StAR site=GOCDB_SITE_NAME -+ accounting StAR site=GOCDB_SITE_NAME output=/tmp/EMI_StAR.xml -+ accounting StAR site=GOCDB_SITE_NAME output=apel:// -+ accounting StAR site=GOCDB_SITE_NAME output=apel://msg.argo.grnet.gr:443/?ams_auth_port=8443 -+ accounting SRR output=https://dpmhead.domain/dpm/domain/home/SRR/storagesummary.json -+""" -+ -+ def _init(self): -+ self.parameters = [ -+ 'Oformat:StAR:SRR', '*?...', -+ ] -+ -+ def _execute(self, given): -+ fmt = given[0] -+ parameters = {} -+ parameters['output'] = None # stdout -+ if fmt == 'StAR': -+ parameters['site'] = 'UNDEFINED' -+ parameters['host'] = self.interpreter.host -+ parameters['record_id'] = None -+ parameters['groups'] = True -+ parameters['users'] = True -+ parameters['valid'] = 86400 -+ elif fmt == 'SRR': -+ pass -+ -+ for param in given[1:]: -+ if param.find('=') == -1: -+ return self.error("Invalid parameter '%s'" % param) -+ k, v = param.split('=', 1) -+ -+ if fmt == 'StAR': -+ if k in ['output', 'site', 'host']: -+ parameters[k] = v -+ elif k in ['groups', 'users']: -+ parameters[k] = v.lower() in ['1', 'y', 'yes', 't', 'true'] -+ else: -+ return self.error("Unknown %s parameter '%s'" % (fmt, param)) -+ elif fmt == 'SRR': -+ if k in ['output']: -+ parameters[k] = v -+ else: -+ return self.error("Unknown %s parameter '%s'" % (fmt, param)) -+ -+ try: -+ if fmt == 'StAR': -+ self._execute_star(parameters) -+ elif fmt == 'SRR': -+ self._execute_srr(parameters) -+ -+ except Exception as e: -+ return self.error("Failed to create %s accounting: %s" % (fmt, str(e))) -+ -+ self.ok("\n") # newline after XML/JSON output -+ -+ return self.ok("OK") -+ -+ def _execute_star(self, parameters): -+ data = star.star(parameters['groups'], parameters['users'], -+ parameters['record_id'], parameters['site'], -+ parameters['host'], parameters['valid']) -+ -+ if not parameters['output'] or parameters['output'] == 'stdout://': -+ sys.stdout.write(data) -+ elif parameters['output'] == 'stderr://': -+ sys.stderr.write(data) -+ elif parameters['output'].startswith('/') or parameters['output'].startswith('file://'): -+ filename = parameters['output'] if not parameters['output'].startswith('file://') else parameters['output'][len('file://'):] -+ with open(filename, "w") as f: -+ f.write(data) -+ elif parameters['output'].startswith('apel://'): -+ apelurl = urlparse(parameters['output']) -+ ams_host = apelurl.hostname if apelurl.hostname else 'msg.argo.grnet.gr' -+ ams_port = apelurl.port if apelurl.port else 443 -+ ams_auth_port = int(apelurl.query[len('ams_auth_port='):]) if apelurl.query.startswith('ams_auth_port=') else 8443 -+ publisher = StARPublisher(ams_host=ams_host, ams_port=ams_port, ams_auth_port=ams_auth_port, cert=self.interpreter.hostcert, key=self.interpreter.hostkey, capath=self.interpreter.capath) -+ pub = publisher.publish(data) -+ _log.debug("data published: %s", str(pub)) -+ else: -+ raise Exception("Unsupported output URL '%s'" % parameters['output']) -+ -+ def _execute_srr(self, parameters): -+ # Create object -+ dpm = srr.StorageService(self.interpreter.host, self.interpreter.hostcert, self.interpreter.hostkey) -+ dpm.addshares() -+ -+ # Which endpoints are supposed to be there? -+ dpm.addendpoints() -+ # Don't bother with this for now -+ #dpm.adddataStores() -+ -+ # Print or publish -+ dpm.publish(parameters['output'], self.interpreter.hostcert, self.interpreter.hostkey, self.interpreter.capath) -+ -+ -+class DbckCommand(ShellCommand): -+ """Consistency checks and fixes. -+ -+Dbck command must be called with one or more consistency checks: -+* namespace-continuity - connect dangling objects in lost+found (NOT TESTED) -+* no-replica - cleanup files with no replica from namespace (NOT TESTED, followed by nlink) -+* replica-type - change missing replica type to primary (NOT TESTED) -+* symlink - remove symlinks with missing target (NOT TESTED, followed by nlink) -+* nlink - recalculate number of objects in each directory (NOT TESTED, offline?) -+* zero-dir - set directory size to 0 deeper than update level -+* spacetoken - assign spacetoken according quotatoken path -+* dir-size-offline - recalculate directory size with DPM off -+* dir-size - recalculate directory size with DPM on -+* spacetoken-size - sync spacetoken usage to associated directory size -+* lost-and-dark - find DPM DB vs. filesystem inconsistencies -+ -+Checks with "offline" suffix must be executed without running DPM/dmlite -+services otherwise it is not guaranteed to get consistent results. -+Be aware that even content of read-only disknodes can be modified -+because head.unlink.ignorereadonlyfs is enabled by default. -+ -+Use cases: -+* Before legacy DPM to DPM DOME migration it is necessary to fix -+ zero-dir,spacetoken,dir-size-offline,spacetoken-size -+* Quotatoken changes (add/delete/update path) on existion directory -+ must be followed by spacetoken,spacetoken-size -+ -+Optional parameters: -+* update : enable updates not just dry-run checks -+* levels : depth of directory size updates (default: from /etc/domehead.conf config or 6) -+""" -+ -+ def _init(self): -+ self.parameters = ['?activities', '*?update', '*?...'] -+ -+ def _execute(self, given): -+ # expand compound activities: -+ activities = [] -+ for activity in given.pop(0).split(','): -+ if activity == 'legacy-to-dome-migration': -+ activities.append('zero-dir') -+ activities.append('spacetoken') -+ activities.append('dir-size') -+ activities.append('spacetoken-size') -+ if activity == 'quotatoken-update': -+ activities.append('spacetoken') -+ activities.append('spacetoken-size') -+ else: -+ activities.append(activity) -+ -+ # parse optional parameters -+ updatedb = given[0] == 'update' if len(given) > 0 else False -+ if updatedb: given.pop(0) # remove 'update' from parameters -+ updatelevels = dbck.get_updatelevels(dbck.DEFAULT_DOME_CONFIG) -+ skip_spacetokens = [] -+ -+ for param in given: -+ if param.find('=') == -1: -+ return self.error("Invalid parameter '%s'" % param) -+ k, v = param.split('=', 1) -+ -+ if k == 'levels': -+ updatelevels = int(v) -+ else: -+ return self.error("Unknown parameter '%s'" % param) -+ -+ _log.debug("%sfix %s, levels=%i", "" if updatedb else "dry-run ", ','.join(activities), updatelevels) -+ -+ try: -+ for activity in activities: -+ if activity == 'namespace-continuity': -+ dbck.fix_namespace_continuity(updatedb=updatedb) -+ elif activity == 'no-replica': -+ dbck.fix_no_replica(updatedb=updatedb) -+ #elif activity == 'only-one-replica': # keep only one replica -+ # dbck.fix_only_one_replica(updatedb=updatedb) -+ elif activity == 'replica-type': -+ dbck.fix_replica_type(updatedb=updatedb) -+ elif activity == 'symlink': -+ dbck.fix_symlink(updatedb=updatedb) -+ elif activity == 'nlink': -+ dbck.fix_nlink(updatedb=updatedb) -+ elif activity == 'zero-dir': -+ dbck.fix_zero_dirs(updatelevels, updatedb=updatedb) -+ elif activity == 'spacetoken': -+ dbck.fix_spacetokens_by_path(skip=skip_spacetokens, updatedb=updatedb) -+ elif activity == 'dir-size-offline': -+ dbck.fix_dir_size_offline(updatelevels, updatedb=updatedb) -+ elif activity == 'dir-size': -+ dbck.fix_dir_size(updatelevels, updatedb=updatedb) -+ elif activity == 'spacetoken-size': -+ dbck.fix_spacetokens_size(updatedb=updatedb) -+ elif activity == 'lost-and-dark': -+ tasks = [] -+ spaceinfo, err = self.interpreter.executor.getspaceinfo() -+ if err: raise Exception(str(err)) -+ for poolname, pooldata in sorted(spaceinfo['poolinfo'].items()): -+ if pooldata['poolstatus'] != '0': continue -+ for server, serverdata in sorted(pooldata['fsinfo'].items()): -+ for fs, fsdata in sorted(serverdata.items()): -+ if fsdata['fsstatus'] == '1': continue # FS_DISABLED -+ #if fsdata['fsstatus'] == '2': continue # FS_RDONLY -+ tasks.append((poolname, server, fs, fsdata['fsstatus'])) -+ modules = [(lost.FileModule, ['lost', 'dark'], {'filename': 'stdout://'})] -+ lost.run(tasks, modules, self.interpreter.executor, 10, 12*60*60) -+ else: -+ raise Exception("Unknown action '%'", activity) -+ # FIXME: add/implement dpm-dbck functionality -+ except Exception as e: -+ return self.error("%s dbck failed: %s" % (given[0], str(e))) -+ return self.ok("OK") -+ -+ -+class DumpCommand(ShellCommand): -+ """Dump file / replica info from DPM database. -+ -+Flexible and efficient way to dump data about stored files in various -+formats. Provide easy way to create storage dumps consumed e.g. by Rucio -+consistency check. -+ -+The command accepts the following parameters: -+ -+* filter-filemode - None|File|Symlink|Directory, default: File -+* filter-metadata-status - None|Online|Migrated, default: Online -+* filter-replica-status - None|Available|BeingPopulated|ToBeDeleted, default: Available -+* filter-replica-pool - no filter by default -+* filter-replica-spacetoken - no filter by default -+* filter-replica-hostfs - no filter by default -+* filter-only-replica-hostfs - no filter by default -+ -+Output formats: -+ -+* xml-path -+* json-path -+* txt-path - customizable output format -+ usage: txt-path=basepath[,output[,header=...,format=...,footer=...]] -+ -+Examples: -+ dump txt-path= -+ dump txt-path=,/tmp/dump.txt.gz -+ dump txt-path=,,header={hash}{space}header{nl},format={path};{metadata_fileid}{tab}{metadata_parent_fileid}{comma}{metadata_mtime}{nl},footer={hash}{space}footer{nl} -+ dump txt-path=/dpm/farm.particle.cz/home/atlas/atlasdatadisk/rucio,davs://golias100.farm.particle.cz/dpm/farm.particle.cz/home/atlas/atlasdatadisk/dumps/dump_20200707 txt-path=/dpm/farm.particle.cz/home/atlas/atlasscratchdisk/rucio,davs://golias100.farm.particle.cz/dpm/farm.particle.cz/home/atlas/atlasscratchdisk/dumps/dump_20200707 -+ dump txt-path=/dpm/farm.particle.cz/home/atlas,,format=root://golias100.farm.particle.cz{path}{nl} filter-only-replica-hostfs=dpmdisk1.farm.particle.cz filter-only-replica-hostfs=dpmdisk2.farm.particle.cz:/mnt/fs1 -+""" -+ -+ def _init(self): -+ self.parameters = [ '*?...' ] -+ -+ def _execute(self, given): -+ filters = { -+ 'filemode': stat.S_IFREG, -+ 'metadata_status': '-', -+ 'replica_status': '-', -+ } -+ oconfig = [] -+ curtime = datetime.datetime.isoformat(datetime.datetime.now()) -+ -+ for param in given: -+ if param.find('=') == -1: -+ return self.error("Invalid parameter '%s'" % param) -+ k, v = param.split('=', 1) -+ -+ if k == 'filter-filemode': -+ # file type fileter (None, File, Symlink, Directory) -+ if v.lower() == 'none': -+ if 'filemode' in filters: # no filtering on file type -+ del(filters['filemode']) -+ elif v.lower() == 'file': -+ filters['filemode'] = stat.S_IFREG -+ elif v.lower() == 'symlink': -+ filters['filemode'] = stat.S_IFLNK -+ elif v.lower() == 'directory': -+ filters['filemode'] = stat.S_IFDIR -+ else: -+ return self.error("invalid file mode filter \"%s\"" % v) -+ elif k == 'filter-metadata-status': -+ # file metadata status filter (pydmlite.FileStatus) -+ if v.lower() == 'none': -+ if 'metadata_status' in filters: # no filtering on metadata file status -+ del(filters['metadata_status']) -+ elif v.lower() in ('o', 'online', '-'): -+ filters['metadata_status'] = '-' # pydmlite.FileStatus.kOnline -+ elif v.lower() in ('m', 'migrated'): -+ filters['metadata_status'] = 'm' # pydmlite.FileStatus.kMigrated -+ else: -+ return self.error("Invalid file metadata status filter \"%s\"" % v) -+ elif k == 'filter-replica-status': -+ # file replica status filter (pydmlite.ReplicaStatus) -+ if v.lower() == 'none': -+ if 'replica_status' in filters: # no filtering on replica status -+ del(filters['replica_status']) -+ elif v.lower() in ('a', 'available', '-'): -+ filters['replica_status'] = '-' # pydmlite.ReplicaStatus.kAvailable -+ elif v.lower() in ('p', 'beingpopulated'): -+ filters['replica_status'] = 'p' # pydmlite.ReplicaStatus.kBeingPopulated -+ elif v.lower() in ('d', 'tobedeleted'): -+ filters['replica_status'] = 'd' # pydmlite.ReplicaStatus.kToBeDeleted -+ else: -+ return self.error("Invalid file replica status filter \"%s\"" % v) -+ elif k == 'filter-replica-pool': -+ filters.setdefault('replica_pool', []).append(v) -+ elif k == 'filter-replica-spacetoken': -+ filters.setdefault('replica_spacetoken', []).append(v) -+ elif k == 'filter-replica-hostfs': -+ host, fs = list(v.split(':', 1)) if v.find(':') != -1 else (v, None) -+ filters.setdefault('replica_hostfs', {}).setdefault(host.lower(), []).append(fs) -+ elif k == 'filter-only-replica-hostfs': -+ host, fs = list(v.split(':', 1)) if v.find(':') != -1 else (v, None) -+ filters.setdefault('only_replica_hostfs', {}).setdefault(host.lower(), []).append(fs) -+ elif k in ['xml-path', 'json-path', 'txt-path']: -+ path, opts = self._parse_path(v, curtime) -+ if path is not None: -+ oconfig.append((k[:-len('-path')], path, opts)) -+ else: -+ return self.error("Unknown dump parameter '%s'" % param) -+ -+ if len(oconfig) == 0: -+ return self.error("No output format specified") -+ -+ try: -+ dump.dump_and_store(oconfig, filters, self.interpreter.hostcert, self.interpreter.hostkey) -+ except Exception as e: -+ return self.error("Failed to dump DPM files / replicas: %s" % str(e)) -+ return self.ok("OK") -+ -+ def _parse_path(self, params, curtime): -+ path = None -+ opts = {'curtime': curtime} -+ for pos, param in enumerate(params.split(',')): -+ if pos == 0: path = param -+ elif pos == 1: opts['output'] = param -+ else: -+ # formatter specific key=value options -+ key, val = param.split('=', 1) -+ opts[key] = val -+ -+ if opts.get('output', '') == '': -+ opts['output'] = 'stdout://' -+ elif opts['output'].startswith('/'): # absolute path -+ opts['output'] = 'file://{0}'.format(opts['output']) -+ elif opts['output'].find('://') == -1: # no scheme - use as relative path -+ opts['output'] = "file://{0}".format(os.path.realpath(opts['output'])) -+ else: -+ ourl = urlparse(opts['output']) -+ if ourl.scheme not in [ 'stdout', 'file', 'davs', 'https', 'root' ]: -+ #_log.warn("skipping output with unsupported protocol scheme: %s", ourl.geturl()) -+ return (None, None) -+ -+ return (path, opts) -diff --git a/shell/src/lost.py b/shell/src/lost.py -new file mode 100644 -index 00000000..81826d74 ---- /dev/null -+++ b/shell/src/lost.py -@@ -0,0 +1,856 @@ -+# Find and deal with DPM lost and dark files (DB vs. filesystem inconsistencies) -+# -+# usage: -+# python lost.py --help -+# examples: -+# python lost.py --verbose --processes 10 --stat-types=dark --fix-dark --all --include-fs-rdonly &> dpm-cleaner.out -+# -+from __future__ import absolute_import -+from __future__ import print_function -+from __future__ import division -+ -+import os -+import re -+import io -+import sys -+import time -+import random -+import signal -+import select -+import socket -+import multiprocessing -+import logging, logging.handlers -+ -+from M2Crypto import X509 -+ -+from .dbutils import DBConn -+from .executor import DomeExecutor -+ -+# compatibility for existing SLC6, CentOS7, CentOS8 packages -+try: -+ import pymysql.cursors as pymysql_cursors -+except ImportError: -+ import MySQLdb.cursors as pymysql_cursors -+ -+try: -+ import paramiko -+except ImportError: -+ sys.exit("Could not import ssh library module. Please install the paramiko rpm.") -+ -+ -+__author__ = 'Petr Vokac' -+__date__ = 'March 2018' -+__version = '0.0.1' -+ -+_log = logging.getLogger('dmlite-shell') -+ -+DEFAULT_HOST = socket.getfqdn() -+DEFAULT_HEAD_PORT = 1094 -+DEFAULT_DOME_CONFIG = '/etc/domehead.conf' -+DEFAULT_URLPREFIX = '/domehead/' -+DEFAULT_CERT = '/etc/grid-security/dpmmgr/dpmcert.pem' -+DEFAULT_KEY = '/etc/grid-security/dpmmgr/dpmkey.pem' -+DEFAULT_CAPATH = '/etc/grid-security/certificates/' -+ -+ -+######################################################################### -+# modules applied on normal/dark/lost files -+######################################################################### -+ -+class BaseModule(object): -+ -+ def __init__(self, poolname, diskserver, diskserverfs, run = []): -+ self._status = 'init' -+ self._poolname = poolname -+ self._diskserver = diskserver -+ self._diskserverfs = diskserverfs -+ self.run_normal = 'normal' in run -+ self.run_dark = 'dark' in run -+ self.run_lost = 'lost' in run -+ self._ssh = None -+ -+ def start(self, ssh): -+ if self._status not in [ 'init' ]: -+ raise Exception("invalid state transition (%s -> started)" % self._status) -+ self._status = 'started' -+ self._ssh = ssh -+ -+ def normal(self, filename, data): -+ pass -+ -+ def dark(self, filename, data): -+ pass -+ -+ def lost(self, filename, data): -+ pass -+ -+ def finish(self): -+ if self._status not in [ 'started' ]: -+ raise Exception("invalid state transition (%s -> finished)" % self._status) -+ self._status = 'finished' -+ self._ssh = None -+ -+ return {} -+ -+ -+ -+class TestModule(BaseModule): -+ """Example of module used to deal with consistency check output.""" -+ -+ def __init__(self, poolname, diskserver, diskserverfs, run=['normal','dark','lost']): -+ super(TestModule, self).__init__(poolname, diskserver, diskserverfs, run) -+ print("%s.__init__(%s, %s, %s)" % (self.__class__.__name__, diskserver, diskserverfs, run)) -+ -+ def start(self, ssh): -+ super(TestModule, self).start(ssh) -+ print("%s.start(%s)" % (self.__class__.__name__, ssh)) -+ -+ def normal(self, filename, data): -+ #if not self.run_normal: return -+ super(TestModule, self).normal(filename, data) -+ print("%s.normal(%s, %s)" % (self.__class__.__name__, filename, data)) -+ -+ def dark(self, filename, data): -+ #if not self.run_dark: return -+ super(TestModule, self).dark(filename, data) -+ print("%s.dark(%s, %s)" % (self.__class__.__name__, filename, data)) -+ -+ def lost(self, filename, data): -+ #if not self.run_lost: return -+ super(TestModule, self).lost(filename, data) -+ print("%s.lost(%s, %s)" % (self.__class__.__name__, filename, data)) -+ -+ def finish(self): -+ ret = super(TestModule, self).finish() -+ print("%s.finish()" % (self.__class__.__name__, )) -+ return ret -+ -+ -+ -+class StatModule(BaseModule): -+ """Get stat details about diskpool files.""" -+ -+ def __init__(self, poolname, diskserver, diskserverfs, run=['dark']): -+ super(StatModule, self).__init__(poolname, diskserver, diskserverfs, run) -+ -+ def start(self, ssh): -+ super(StatModule, self).start(ssh) -+ self._cnt = 0 -+ self._done = 0 -+ self._time = 0 -+ -+ def finish(self): -+ ret = super(StatModule, self).finish() -+ -+ ret['cnt'] = getattr(self, '_cnt', 0) -+ ret['done'] = getattr(self, '_done', 0) -+ ret['time'] = getattr(self, '_time', 0) -+ -+ _log.info("%s:%s stat time %0.1fs (cnt: %i, done: %i)", self._diskserver, self._diskserverfs, ret['time'], ret['cnt'], ret['done']) -+ -+ return ret -+ -+ def normal(self, filename, data): -+ self._cnt += 1 -+ -+ start = time.time() -+ stat = self._stat(filename) -+ self._time += time.time() - start -+ -+ if stat != None: -+ self._done += 1 -+ data['stat'] = stat -+ -+ def dark(self, filename, data): -+ self._cnt += 1 -+ -+ start = time.time() -+ stat = self._stat(filename) -+ self._time += time.time() - start -+ -+ if stat != None: -+ self._done += 1 -+ data['stat'] = stat -+ -+ def _stat(self, filename): -+ ret = None -+ stat_filename = filename.replace(r"'", r"\'") -+ stat_cmd = "stat --format='%%i;%%h;%%f;%%s;%%U;%%G;%%X;%%Y;%%Z' '%s'" % stat_filename -+ -+ try: -+ exit_code, stdout_lines, stderr_lines = run_ssh_command(self._ssh, stat_cmd) -+ -+ if exit_code != 0: -+ raise Exception("non-zero exit code %i" % exit_code) -+ if len(stdout_lines) == 0: -+ raise Exception("no output") -+ -+ statout = stdout_lines[0].strip().split(';') -+ if len(statout) != 9: -+ raise Exception("ivalid output %s" % str(statout)) -+ -+ inode, links, mode, size, user, group, atime, mtime, ctime = statout -+ ret = (int(inode), int(links), mode, int(size), user, group, int(atime), int(mtime), int(ctime)) -+ -+ except Exception as e: -+ _log.error("%s:%s stat '%s' failed: %s", self._diskserver, self._diskserverfs, stat_filename, str(e)) -+ -+ return ret -+ -+ -+ -+class ChksumModule(BaseModule): -+ """Calculate checksum of the diskpool files.""" -+ -+ def __init__(self, poolname, diskserver, diskserverfs, run=['normal']): -+ super(ChksumModule, self).__init__(poolname, diskserver, diskserverfs, run) -+ -+ def start(self, ssh): -+ super(ChksumModule, self).start(ssh) -+ self._cnt = 0 -+ self._done = 0 -+ self._time = 0 -+ -+ def finish(self): -+ ret = super(ChksumModule, self).finish() -+ -+ ret['cnt'] = getattr(self, '_cnt', 0) -+ ret['done'] = getattr(self, '_done', 0) -+ ret['time'] = getattr(self, '_time', 0) -+ -+ _log.info("%s:%s chksum time %0.1fs (cnt: %i, done: %i)", self._diskserver, self._diskserverfs, ret['time'], ret['cnt'], ret['done']) -+ -+ return ret -+ -+ def normal(self, filename, data): -+ self._cnt += 1 -+ -+ start = time.time() -+ chksum = self._chksum(filename) -+ self._time += time.time() - start -+ -+ if chksum != None: -+ self._done += 1 -+ data['chksum'] = chksum -+ -+ def dark(self, filename, data): -+ self._cnt += 1 -+ -+ start = time.time() -+ chksum = self._chksum(filename) -+ self._time += time.time() - start -+ -+ if chksum != None: -+ self._done += 1 -+ data['chksum'] = chksum -+ -+ def _chksum(self, filename): -+ ret = None -+ chksum_filename = filename.replace(r"'", r"\'") -+ chksum_cmd = "xrdadler32 '%s'" % chksum_filename -+ -+ try: -+ exit_code, stdout_lines, stderr_lines = run_ssh_command(self._ssh, chksum_cmd) -+ -+ if exit_code != 0: -+ raise Exception("non-zero exit code %i" % exit_code) -+ if len(stdout_lines) == 0: -+ raise Exception("no output") -+ -+ ret = stdout_lines[0].split(' ')[0].strip() -+ -+ except Exception as e: -+ _log.error("%s:%s chksum '%s' failed: %s", self._diskserver, self._diskserverfs, chksum_filename, str(e)) -+ -+ return ret -+ -+ -+ -+class FileModule(BaseModule): -+ """Write consistency check details in the file.""" -+ -+ def __init__(self, poolname, diskserver, diskserverfs, run=['normal','dark','lost'], filename=None, header=False): -+ super(FileModule, self).__init__(poolname, diskserver, diskserverfs, run) -+ -+ if filename: -+ self._filename = filename % { -+ 'POOL': poolname, -+ 'DISKSERVER': diskserver, -+ 'DISKSERVERFS': diskserverfs.lstrip('/').replace('/', '_'), -+ } -+ else: -+ self._filename = 'stdout://' -+ self._header = header -+ self._fh = None -+ self._summary = {} -+ -+ def start(self, ssh): -+ super(FileModule, self).start(ssh) -+ -+ if self._filename == 'stdout://': -+ self._fh = sys.stdout -+ elif self._filename == 'stderr://': -+ self._fh = sys.stderr -+ elif self._filename.startswith('file://'): -+ self._fh = open(self._filename[len('file://'):], "w") -+ else: -+ self._fh = open(self._filename, "w") -+ -+ if self._header: -+ self._fh.write("#type\tserver\tfile\tdbfileid\tdbmetadataid\tdbreplicaid\tdbstatus\tdbsize\tsize\tchksum\n") -+ -+ def finish(self): -+ data = super(FileModule, self).finish() -+ -+ for k, v in sorted(self._summary.items()): -+ _log.info("FileModule summary %s: %s", k, v) -+ -+ if self._fh == None: -+ return -+ if self._fh not in [ sys.stdout, sys.stderr ]: -+ self._fh.close() -+ self._fh = None -+ -+ return data -+ -+ def normal(self, filename, data): -+ self._write('FILE', self._diskserver, filename, data) -+ -+ def dark(self, filename, data): -+ self._write('DARK', self._diskserver, filename, data) -+ -+ def lost(self, filename, data): -+ if data['parent_dir_exists']: -+ self._write('LOST', self._diskserver, filename, data) -+ else: -+ self._write('LOSTNODIR', self._diskserver, filename, data) -+ -+ def _write(self, name, diskserver, filename, data): -+ fileid = data['db'].get('metadata_fileid') if data['db'].get('metadata_fileid') else data['db'].get('replica_fileid') -+ mrowid = data['db'].get('metadata_rowid', '') -+ rrowid = data['db'].get('replica_rowid', '') -+ status = data['db'].get('replica_status', '') -+ filesize = data['db'].get('metadata_filesize', '') -+ size = data.get('stat', ('', '', '', '', '', '', '', '', ''))[3] -+ chksum = data.get('chksum', '') -+ -+ self._fh.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (name, diskserver, filename, fileid, mrowid, rrowid, status, filesize, size, chksum)) -+ -+ self._summary["nfiles %s" % name] = self._summary.get("nfiles %s" % name, 0) + 1 -+ try: -+ self._summary["size %s" % name] = self._summary.get("size %s" % name, 0) + int(filesize) -+ except ValueError as e: -+ _log.debug("invalid file size: %s ... %s", filename, filesize) -+ -+ -+ -+class FixModule(BaseModule): -+ """Fix problems found by consistency checks.""" -+ -+ def __init__(self, poolname, diskserver, diskserverfs, run=['dark','lost'], executor=None): -+ super(FixModule, self).__init__(poolname, diskserver, diskserverfs, run) -+ self._executor = executor -+ -+ def dark(self, filename, data): -+ rm_cmd = "rm '%s'" % filename.replace(r"'", r"\'") -+ try: -+ exit_code, stdout_lines, stderr_lines = run_ssh_command(self._ssh, rm_cmd) -+ -+ if exit_code != 0: -+ raise Exception("non-zero exit code %i" % exit_code) -+ -+ except Exception as e: -+ _log.error("%s:%s remove dark '%s' failed: %s", self._diskserver, self._diskserverfs, filename, str(e)) -+ -+ def lost(self, filename, data): -+ status = data.get('status', '') -+ if status != '-': -+ _log.info("skipping replica %s:%s with status %s", self._diskserver, filename, status) -+ return -+ -+ if data['parent_dir_exists']: -+ #dpm.dpm_delreplica("%s:%s" % (self._diskserver, filename)) -+ if self._executor: -+ _, err = self._executor.delreplica(self._diskserver, filename) -+ if err: -+ _log.warn("unable to delete replica %s:%s: %s", self._diskserver, filename, err) -+ else: -+ _log.debug("executor not defined, not deleting replica %s:%s", self._diskserver, filename) -+ else: -+ _log.warn("replica directory missing, skipping delreplica %s:%s", self._diskserver, filename) -+ -+ -+ -+######################################################################### -+######################################################################### -+ -+# execute command on remote machine and deal with buffering -+# for commands that produce huge stdout/stderr -+def run_ssh_command(client, cmd, timeout=0): -+ _log.debug("%s running command `%s` (timeout=%s)", str(client.get_transport().getpeername()[0]), cmd, timeout) -+ -+ channel = client.get_transport().open_session() -+ channel.exec_command(cmd) -+ channel.shutdown_write() -+ -+ stdout_bytes = io.BytesIO() -+ stderr_bytes = io.BytesIO() -+ while not channel.eof_received or channel.recv_ready() or channel.recv_stderr_ready(): -+ readq, _, _ = select.select([channel], [], [], timeout) -+ for c in readq: -+ if c.recv_ready(): -+ stdout_bytes.write(channel.recv(len(c.in_buffer))) -+ if c.recv_stderr_ready(): -+ stderr_bytes.write(channel.recv(len(c.in_buffer))) -+ -+ exit_status = channel.recv_exit_status() -+ channel.shutdown_read() -+ channel.close() -+ -+ stdout_bytes.seek(0) -+ stderr_bytes.seek(0) -+ stdout_wrapper = io.TextIOWrapper(stdout_bytes, encoding='utf-8') -+ stderr_wrapper = io.TextIOWrapper(stderr_bytes, encoding='utf-8') -+ stdout_lines = stdout_wrapper.readlines() -+ stderr_lines = stderr_wrapper.readlines() -+ -+ _log.debug("%s finished command `%s`: exit_status %i, stdout lines %i, stderr lines %i", str(client.get_transport().getpeername()[0]), cmd, exit_status, len(stdout_lines), len(stderr_lines)) -+ -+ return (exit_status, stdout_lines, stderr_lines) -+ -+ -+ -+def diskserverfs_check(diskserver, diskserverfs, modules = []): -+ _log.debug("diskserverfs_check(%s, %s, %s)", diskserver, diskserverfs, [x.__class__.__name__ for x in modules]) -+ -+ # login to diskserver -+ try: -+ ssh = paramiko.SSHClient() -+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) -+ ssh.connect(diskserver, username='root', allow_agent=True, look_for_keys=True) -+ except Exception as e: -+ _log.error("could not ssh to %s for file listing. Passwordless ssh needed to target disk server: %s", diskserver, str(e)) -+ return 'SSH connection failed' -+ -+ for module in modules: -+ module.start(ssh) -+ -+ # obtain list of all directories on given filesystem -+ _log.debug("%s:%s find directories", diskserver, diskserverfs) -+ try: -+ cmd = "find '%s' -type d" % diskserverfs.replace(r"'", r"\'") -+ exit_code, stdout_lines, stderr_lines = run_ssh_command(ssh, cmd) -+ if exit_code != 0: -+ raise Exception("find failed with exit code %i" % exit_code) -+ if len(stderr_lines) > 0: -+ raise Exception("find returned non-empty stderr with %i lines (first error): %s" % (len(stderr_lines), stderr_lines[0])) -+ diskdirs = set([ x.rstrip('\n') for x in stdout_lines ]) -+ except Exception as e: -+ _log.error("remote directory find over SSH failed on %s:%s: %s", diskserver, diskserverfs, str(e)) -+ return 'SSH read dirlist failed' -+ -+ # obtain list of all files on given filesystem -+ _log.debug("%s:%s find files", diskserver, diskserverfs) -+ try: -+ cmd = "find '%s' -type f" % diskserverfs.replace(r"'", r"\'") -+ exit_code, stdout_lines, stderr_lines = run_ssh_command(ssh, cmd) -+ if exit_code != 0: -+ raise Exception("find failed with exit code %i" % exit_code) -+ if len(stderr_lines) > 0: -+ raise Exception("find returned non-empty stderr with %i lines (first error): %s" % (len(stderr_lines), stderr_lines[0])) -+ diskfiles = set([ x.rstrip('\n') for x in stdout_lines ]) -+ except Exception as e: -+ _log.error("remote file find over SSH failed on %s:%s: %s", diskserver, diskserverfs, str(e)) -+ return 'SSH read filelist failed' -+ -+ _log.info("%s:%s found %i directories and %i files", diskserver, diskserverfs, len(diskdirs), len(diskfiles)) -+ -+ # read also all files for diskserver:diskserverfs from DPM database -+ _log.debug("%s:%s read database replicas", diskserver, diskserverfs) -+ conn = cursor = None -+ try: -+ dbfiles = {} -+ conn = DBConn.new('cns_db') -+ cursor = conn.cursor(pymysql_cursors.SSDictCursor) -+ #cursor.execute("select sfn, rowid, fileid, status from Cns_file_replica where host = %s and fs = %s", (diskserver, diskserverfs)) -+ # MySQL doesn't support FULL OUTER JOIN, use UNION and two separate LEFT+RIGHT exclusive joins -+ cols = [ -+ 'replica.fileid', 'replica.rowid', 'replica.status', 'replica.sfn', -+ 'metadata.rowid', 'metadata.filesize', 'metadata.csumtype', 'metadata.csumvalue', -+ ] -+ colstr = ', '.join(["{0} AS `{1}`".format(x, x.replace('.', '_', 1)) for x in cols]) -+ sql = """SELECT SQL_BIG_RESULT {0} -+FROM Cns_file_metadata AS metadata LEFT JOIN Cns_file_replica AS replica ON metadata.fileid = replica.fileid -+WHERE host = %s and fs = %s -+UNION ALL -+SELECT SQL_BIG_RESULT {0} -+FROM Cns_file_metadata AS metadata RIGHT JOIN Cns_file_replica AS replica ON metadata.fileid = replica.fileid -+WHERE replica.rowid IS NULL AND host = %s and fs = %s -+""".format(colstr) -+ cursor.execute(sql, (diskserver, diskserverfs, diskserver, diskserverfs)) -+ for row in cursor: -+ if row['replica_rowid'] == None: -+ _log.warn("no replica for metadata: %s", str(row)) -+ continue -+ if row['metadata_rowid'] == None: -+ _log.warn("no metadata for replica: %s", str(row)) -+ filename = row['replica_sfn'].decode('utf-8').split(':', 1)[1] -+ dbfiles[filename] = row -+ except Exception as e: -+ _log.error("unable to get data from database for %s:%s: %s", diskserver, diskserverfs, str(e)) -+ return 'DB read failed' -+ finally: -+ if cursor: cursor.close() -+ if conn: conn.close() -+ -+ _log.info("%s:%s database has %i entries", diskserver, diskserverfs, len(dbfiles)) -+ -+ # find lost files (files in DPM database with missing file on diskserver) -+ for sfn_local in dbfiles: -+ try: -+ data = {'db': dbfiles[sfn_local]} -+ if sfn_local in diskfiles: -+ for module in modules: -+ if not module.run_normal: continue -+ module.normal(sfn_local, data) -+ else: -+ data['parent_dir_exists'] = os.path.dirname(sfn_local) in diskdirs -+ for module in modules: -+ if not module.run_lost: continue -+ module.lost(sfn_local, data) -+ except Exception as e: -+ _log.error("%s:%s file -> db comparison failed for %s: %s", diskserver, diskserverfs, sfn_local, str(e)) -+ -+ _log.info("%s:%s finished file -> db check", diskserver, diskserverfs) -+ -+ # find dark files (files on diskserver that have no record in DPM database) -+ for sfn_local in diskfiles: -+ try: -+ if sfn_local in dbfiles: continue -+ data = {} -+ for module in modules: -+ if not module.run_dark: continue -+ module.dark(sfn_local, data) -+ except Exception as e: -+ _log.error("%s:%s db -> file comparision failed for %s: %s", diskserver, diskserverfs, sfn_local, str(e)) -+ -+ _log.info("%s:%s finished db -> file check", diskserver, diskserverfs) -+ -+ ssh.close() -+ -+ for module in modules: -+ module.finish() -+ -+ return 'DONE' -+ -+ -+ -+def process_diskserverfs(executor, poolname, diskserver, diskserverfs, fsstatus, modules): -+ _log.debug("process_diskserverfs(%s, %s, %s, %s, %s)", poolname, diskserver, diskserverfs, fsstatus, [x.__class__.__name__ for x in modules]) -+ -+ online = fsstatus == '0' -+ if online: -+ # set DPM filesystem read-only before checking for DARK/LOST data -+ # not to let DPM create/delete data files diskpool and in database -+ # during our attempt to find inconsistencies -+ try: -+ _log.debug("setting FS read-only on %s:%s", diskserver, diskserverfs) -+ _, err = executor.modifyfs(diskserver, diskserverfs, poolname, 2) # read-only -+ if err: -+ raise Exception("dome_modifyfs failed (%s)" % str(err)) -+ except Exception as e: -+ _log.error("unable to change FS read-only on %s:%s: %s", diskserver, diskserverfs, str(e)) -+ return (diskserver, diskserverfs, 'FS RDONLY failed') -+ -+ retval = 'EMPTY' -+ try: -+ retval = diskserverfs_check(diskserver, diskserverfs, modules) -+ except Exception as e: -+ _log.error("unexpected exception %s:%s: %s", diskserver, diskserverfs, str(e)) -+ retval = "Unexpected error %s" % str(e) -+ finally: -+ if online: -+ # set DPM filesystem back online in case it was set read-only by this script -+ try: -+ _log.debug("setting FS online on %s:%s", diskserver, diskserverfs) -+ _, err = executor.modifyfs(diskserver, diskserverfs, poolname, 0) # online -+ if err: -+ raise Exception("dome_modifyfs failed (%s)" % str(err)) -+ except Exception as e: -+ _log.error("unable to change FS read-only on %s:%s: %s", diskserver, diskserverfs, str(e)) -+ return (diskserver, diskserverfs, 'FS ONLINE failed') -+ -+ return (diskserver, diskserverfs, retval) -+ -+ -+ -+def run(tasks, modules, executor, processes=10, timeout=12*60*60): -+ _log.info("%d filesystems checked with %d processes", len(tasks), processes) -+ -+ starttime = time.time() -+ -+ # random order of disknodefs not to stress one disknode -+ # with up to options.processes at the same time -+ random.shuffle(tasks) -+ -+ # disable SIGINT and SIGTERM in child processes -+ original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) -+ #original_sigterm_handler = signal.signal(signal.SIGTERM, signal.SIG_IGN) -+ pool = multiprocessing.Pool(processes=processes) -+ #signal.signal(signal.SIGTERM, original_sigterm_handler) -+ signal.signal(signal.SIGINT, original_sigint_handler) -+ -+ futures = [] -+ for poolname, diskserver, diskserverfs, fsstatus in tasks: -+ # initialize modules -+ modinsts = [] -+ for mclass, mrun, mparams in modules: -+ modinst = mclass(poolname, diskserver, diskserverfs, run=mrun, **mparams) -+ modinsts.append(modinst) -+ -+ # run constency check asynchronoutsly in separate processes -+ future = pool.apply_async(process_diskserverfs, (executor, poolname, diskserver, diskserverfs, fsstatus, modinsts), callback=lambda x: _log.info("finished check: %s", x)) -+ futures.append(future) -+ -+ try: -+ timed_out = False -+ for future in futures: -+ time_remaining = timeout+10 - (time.time() - starttime) -+ if time_remaining > 0: -+ future.wait(time_remaining) -+ else: -+ timed_out = True -+ break -+ -+ if timed_out: -+ _log.error("childs terminated, reached global script run timeout %i", timeout) -+ pool.terminate() -+ else: -+ pool.close() -+ -+ except KeyboardInterrupt as e: -+ _log.info("process terminated: %s", str(e)) -+ -+ pool.terminate() -+ -+ pool.join() -+ -+ -+ -+def parseConfig(filename): -+ _log.debug("parsing config file %s", filename) -+ ret = {} -+ with open(filename) as f: -+ reComment = re.compile(r'^ *#') -+ reKeyValue = re.compile(r'^(.*?):\s*(.*?)\s*$') -+ for line in f.readlines(): -+ if reComment.match(line): continue -+ res = reKeyValue.match(line) -+ if res == None: continue -+ k = res.group(1) -+ v = res.group(2) -+ ret[k] = v -+ return ret -+ -+ -+ -+def main(argv): -+ import optparse -+ import getpass -+ import inspect -+ -+ # basic logging configuration -+ streamHandler = logging.StreamHandler(sys.stderr) -+ streamHandler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s](%(module)s:%(lineno)d) %(message)s", "%d %b %H:%M:%S")) -+ _log.addHandler(streamHandler) -+ _log.setLevel(logging.WARN) -+ -+ # parse options from command line -+ def opt_set_loglevel(option, opt, value, parser): -+ loglevel = option.default -+ if value != None: -+ loglevel = int({ -+ 'CRITICAL': logging.CRITICAL, -+ 'DEBUG': logging.DEBUG, -+ 'ERROR': logging.ERROR, -+ 'FATAL': logging.FATAL, -+ 'INFO': logging.INFO, -+ 'NOTSET': logging.NOTSET, -+ 'WARN': logging.WARN, -+ 'WARNING': logging.WARNING, -+ }.get(value, value)) -+ -+ _log.setLevel(loglevel) -+ setattr(parser.values, option.dest, loglevel) -+ -+ parser = optparse.OptionParser(usage = "usage: %prog [options] [--all | --pool poolname ] [ server1:fs1 [ server2:fs2 [ ... ] ] ]", version="%prog") -+ parser.add_option("-v", "--verbose", dest="loglevel", action="callback", callback=opt_set_loglevel, default=logging.DEBUG, help="set log level to DEBUG") -+ parser.add_option("-q", "--quiet", dest="loglevel", action="callback", callback=opt_set_loglevel, default=logging.ERROR, help="set log level to ERROR") -+ parser.add_option("--log-level", dest="loglevel", action="callback", callback=opt_set_loglevel, type="string", help="set log level (default: %default)") -+ parser.add_option("--log-file", dest="logfile", metavar="FILE", help="set log file (default: %default)") -+ parser.add_option("--log-size", dest="logsize", type="int", default=10*1024*1024, help="maximum size of log file (default: %default)") -+ parser.add_option("--log-backup", dest="logbackup", type="int", default=2, help="number of log backup files (default: %default)") -+ # DOME configuration -+ parser.add_option("-c", "--config", dest="config", help="DOME config file") -+ parser.add_option('--host', dest='host', help="DOME host, if no DOME config given") -+ parser.add_option('--port', dest='port', help="DOME port, if no DOME config given") -+ parser.add_option('--urlprefix', dest='urlprefix', help="DOME base url prefix, if no config given") -+ parser.add_option("--cert", dest='cert', help="DOME host certificate, if no config given") -+ parser.add_option("--key", dest='key', help="DOME host key, if no config given") -+ # Lost and dark processing -+ parser.add_option("-a", "--all", dest='all', action="store_true", default=False, help='check all pools (default: %default)') -+ parser.add_option("-p", "--pool", dest='pools', action='append', default=[], help='list of pools to check (default: %default).') -+ parser.add_option("-n", "--processes", dest="processes", type="int", default=1, help="skip filesystems with RDONLY status (default: %default)") -+ parser.add_option("-t", "--timeout", dest="timeout", type="int", default=12*60*60, help="consistency check timeout (default: %default)") -+ parser.add_option("-r", "--include-fs-rdonly", dest="fsrdonly", action="store_true", default=False, help="include filesystems with RDONLY status (default: %default)") -+ parser.add_option("-b", "--include-fs-disabled", dest="fsdisabled", action="store_true", default=False, help="include filesystems with DISABLED status (default: %default)") -+ parser.add_option("--module-stat", dest="module_stat", default=None, help="stat diskpool normal or dark files (default: %default)") -+ parser.add_option("--module-chksum", dest="module_chksum", default=None, help="checksum diskpool normal or dark files (default: %default)") -+ parser.add_option("--module-output", dest="module_output", default=None, help="include information about normal/dark/lost in the output file (default: %default)") -+ parser.add_option("--module-output-filename", dest="module_output_filename", default="stdout://", help="output filename template can include %(DISKSERVER)s, %(DISKSERVERFS)s, %(POOL)s, %(TIMESTAMP)s (default: %default)") -+ parser.add_option("--module-fix", dest="module_fix", default=None, help="fix dark/lost inconsistencies - dangerous (default: %default)") -+ -+ (options, args) = parser.parse_args(argv[1:]) -+ -+ if options.logfile == '-': -+ _log.removeHandler(streamHandler) -+ streamHandler = logging.StreamHandler(sys.stdout) -+ streamHandler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s](%(module)s:%(lineno)d) %(message)s", "%d %b %H:%M:%S")) -+ _log.addHandler(streamHandler) -+ elif options.logfile != None and options.logfile != '': -+ #fileHandler = logging.handlers.TimedRotatingFileHandler(options.logfile, 'midnight', 1, 4) -+ fileHandler = logging.handlers.RotatingFileHandler(options.logfile, maxBytes=options.logsize, backupCount=options.logbackup) -+ fileHandler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s](%(module)s:%(lineno)d) %(message)s", "%d %b %H:%M:%S")) -+ _log.addHandler(fileHandler) -+ _log.removeHandler(streamHandler) -+ -+ _log.info("command: %s", " ".join(argv)) -+ _log.info("script: %s", os.path.abspath(inspect.getfile(inspect.currentframe()))) -+ _log.info("user: %s", getpass.getuser()) -+ -+ # validate command line options -+ if len(args) == 0 and not options.all and len(options.pools) == 0: -+ _log.error("wrong number of args, use `%s -h` for basic help", argv[0]) -+ return 1 -+ -+ if not options.config and not options.host: -+ _log.error("Missing required configuration options \"config\" or \"host\"") -+ return 1 -+ -+ config = {} -+ if options.config: -+ if not os.path.exists(options.config): -+ _log.error("DOME configuration file %s doesn't exist", options.config) -+ sys.exit(1) -+ config = parseConfig(options.config) -+ -+ dome_host = options.host if options.host else DEFAULT_HOST -+ dome_port = options.port if options.port else DEFAULT_HEAD_PORT -+ dome_urlprefix = options.urlprefix if options.urlprefix else config.get('glb.auth.urlprefix', DEFAULT_URLPREFIX) -+ if not dome_urlprefix.startswith('/'): dome_urlprefix = "/{1}".format(dome_urlprefix) -+ if not dome_urlprefix.endswith('/'): dome_urlprefix = "{1}/".format(dome_urlprefix) -+ -+ dome_cert = options.cert if options.cert else config.get('glb.restclient.cli_certificate', DEFAULT_CERT) -+ dome_key = options.key if options.key else config.get('glb.restclient.cli_private_key', DEFAULT_KEY) -+ dome_capath = DEFAULT_CAPATH -+ -+ dome_x509 = X509.load_cert(dome_cert, X509.FORMAT_PEM) -+ dome_hostDN = '/' + '/'.join(dome_x509.get_subject().as_text().split(', ')) -+ dome_command_base = "https://{0}:{1}{2}".format(dome_host, dome_port, dome_urlprefix) -+ -+ executor = DomeExecutor(dome_command_base, dome_cert, dome_key, dome_capath, dome_hostDN, dome_host) -+ -+ spaceinfo, err = executor.getspaceinfo() -+ if err: -+ _log.error("unable to get storage space configuration: %s", str(err)) -+ return 1 -+ -+ tasks = [] -+ if options.all or len(options.pools) > 0: -+ for poolname, pooldata in sorted(spaceinfo['poolinfo'].items()): -+ fs_online = 0 -+ if pooldata['poolstatus'] != '0': continue -+ if not options.all and poolname not in options.pools: continue -+ for server, serverdata in sorted(pooldata['fsinfo'].items()): -+ for fs, fsdata in sorted(serverdata.items()): -+ if not options.fsdisabled and fsdata['fsstatus'] == '1': continue # FS_DISABLED -+ if not options.fsrdonly and fsdata['fsstatus'] == '2': continue # FS_RDONLY -+ fs_online += 1 -+ tasks.append((poolname, server, fs, fsdata['fsstatus'])) -+ if fs_online <= options.processes: -+ # TODO: this is not perfect because it doesn't prevent -+ # all read-only filesystems in one storage pool -+ _log.warn("%d processes could set all %d online filesystems read-only for pool %s during check", options.processes, fs_online, poolname) -+ -+ for diskfs in args: -+ diskserver, diskserverfs = diskfs.split(':', 1) -+ poolname = spaceinfo['fsinfo'].get(diskserver, {}).get(diskserverfs, {}).get('poolname') -+ fsstatus = spaceinfo['fsinfo'].get(diskserver, {}).get(diskserverfs, {}).get('fsstatus') -+ if not poolname: -+ _log.warn("unable to find pool for %s:%s", diskserver, diskserverfs) -+ continue -+ if (poolname, diskserver, diskserverfs, fsstatus) in tasks: continue -+ tasks.append((poolname, diskserver, diskserverfs, fsstatus)) -+ -+ if len(tasks) == 0: -+ _log.error("no filesystem found to be checked") -+ return 1 -+ -+ # order of modules is important, because data can be -+ # passed to the module called later in the chain -+ modules = [] -+ if options.module_stat: -+ modules.append((StatModule, options.module_stat.split(','), {})) -+ if options.module_chksum: -+ modules.append((ChksumModule, options.module_chksum.split(','), {})) -+ if options.module_output: -+ modules.append((FileModule, options.module_output.split(','), {'filename': options.module_output_filename})) -+ if options.module_fix: -+ modules.append((FixModule, options.module_fix.split(','), {'executor': executor})) -+ -+ if len(modules) == 0: -+ _log.error("no module defined") -+ return 1 -+ -+ starttime = time.time() -+ -+ run(tasks, modules, executor, options.processes, options.timeout) -+ -+ spaceinfo_new, err = executor.getspaceinfo() -+ if err: -+ _log.error("unable to get storage space configuration: %s", str(err)) -+ _log.warn("check online status for following servers/filesystems:") -+ for server, serverdata in sorted(spaceinfo['fsinfo'].items()): -+ for fs, fsdata in sorted(serverdata.items()): -+ if fsdata['fsstatus'] == '0': -+ _log.warn(" %s:%s", server, fs) -+ return 1 -+ -+ # doublecheck that filesystems that were online at the beginning -+ # are set back to online before the end of this script -+ for poolname, pooldata in sorted(spaceinfo_new['poolinfo'].items()): -+ if pooldata['poolstatus'] != '0': continue -+ for server, serverdata in sorted(pooldata['fsinfo'].items()): -+ for fs, fsdata in sorted(serverdata.items()): -+ if fsdata['fsstatus'] == '0': # already back online -+ continue -+ -+ if 'fsstatus' not in spaceinfo['fsinfo'].get(server, {}).get(fs, {}): -+ _log.error("unknown %s:%s online status?!?!", server, fs) -+ continue -+ -+ if spaceinfo['fsinfo'][server][fs]['fsstatus'] != '0': -+ continue -+ -+ # set DPM filesystem back online in case it was set read-only by this script -+ try: -+ _log.debug("setting FS online on %s:%s", server, fs) -+ _, err = executor.modifyfs(server, fs, poolname, 0) # online -+ if err: -+ raise Exception("dome_modifyfs failed (%s)" % str(err)) -+ except Exception as e: -+ _log.error("unable to change FS read-only on %s:%s: %s", server, fs, str(e)) -+ -+ _log.info("total runtime: %.2fs", time.time()-starttime) -+ -+ return os.EX_OK -diff --git a/shell/src/dpm-storage-summary.py b/shell/src/srr.py -old mode 100755 -new mode 100644 -similarity index 67% -rename from shell/src/dpm-storage-summary.py -rename to shell/src/srr.py -index ff932643..5575c80a ---- a/shell/src/dpm-storage-summary.py -+++ b/shell/src/srr.py -@@ -1,4 +1,3 @@ --#!/usr/bin/python2 - # WLCG Storage Resource Reporting implementation for DPM - # * https://docs.google.com/document/d/1yzCvKpxsbcQC5K9MyvXc-vBF1HGPBk4vhjw3MEXoXf8/edit - # * https://twiki.cern.ch/twiki/bin/view/LCG/AccountingTaskForce -@@ -15,23 +14,27 @@ import argparse - import tempfile - import os, sys - import logging, logging.handlers -+ -+try: import urllib.parse as urlparse -+except ImportError: import urlparse -+ - from dmliteshell.infoutils import SystemInfo - - __version__ = '0.1.1' - --_log = logging.getLogger('DPMSRR') -+_log = logging.getLogger('dmlite-shell') - - - # The top level object --class storageService(object): -+class StorageService(object): - -- def __init__(self, config): -- self.config = config -+ def __init__(self, host, cert, key): -+ self._host = host -+ self.info = SystemInfo(host, cert, key) - self.entry = {} -- info = SystemInfo(self.config) -- self.entry["name"] = self.config.host -+ self.entry["name"] = host - self.entry["implementation"] = "DPM" -- self.entry["implementationversion"] = info.getsysinfo("dome") -+ self.entry["implementationversion"] = self.info.getsysinfo("dome") - self.entry["qualitylevel"] = "production" - self.entry["storageshares"] = [] - self.entry["storageendpoints"] = [] -@@ -39,40 +42,56 @@ class storageService(object): - self.entry["latestupdate"] = int(time.time()) - - def addshares(self): -- info = SystemInfo(self.config) -- jgqt, totalcapacity, totalused, totalgroups = info.getspaces() -+ jgqt, totalcapacity, totalused, totalgroups = self.info.getspaces() - for space, qt in jgqt.items(): - quotatktotspace = max(0, int(qt["quotatktotspace"])) - pathusedspace = max(0, int(qt["pathusedspace"])) - self.entry["storageshares"].append(storageShare(qt["quotatkname"], quotatktotspace, pathusedspace, qt["path"], qt["groups"])) - - def addendpoints(self): -- self.entry["storageendpoints"].append(storageEndpoint("gsiftp", "gsiftp://" + self.config.host + "/", "gsiftp")) -- self.entry["storageendpoints"].append(storageEndpoint("https", "https://" + self.config.host + "/", "https")) -- self.entry["storageendpoints"].append(storageEndpoint("xrootd", "root://" + self.config.host + "/", "xrootd")) -- self.entry["storageendpoints"].append(storageEndpoint("srm", "srm://" + self.config.host + ":8446/srm/managerv2?SFN=/", "srm")) -+ self.entry["storageendpoints"].append(storageEndpoint("gsiftp", "gsiftp://" + self._host + "/", "gsiftp")) -+ self.entry["storageendpoints"].append(storageEndpoint("https", "https://" + self._host + "/", "https")) -+ self.entry["storageendpoints"].append(storageEndpoint("xrootd", "root://" + self._host + "/", "xrootd")) -+ self.entry["storageendpoints"].append(storageEndpoint("srm", "srm://" + self._host + ":8446/srm/managerv2?SFN=/", "srm")) - - def adddataStores(self): - self.entry["datastores"].append(dataStore("DPM data store")) - - def printjson(self, out=sys.stdout): - out.write(json.dumps({"storageservice": self.entry}, indent=4)) -+ -+ def publish(self, url, cert=None, key=None, capath=None): -+ if url == None or url == 'stdout://': # stdout -+ sys.stdout.write(json.dumps({"storageservice": self.entry}, indent=4)) -+ elif url == 'stderr://': -+ sys.stderr.write(json.dumps({"storageservice": self.entry}, indent=4)) -+ elif url.startswith('/') or url.startswith('file://'): -+ pass -+ elif url.startswith('davs://') or url.startswith('https://'): -+ if url.startswith('davs://'): -+ url = "https://{0}".format(url[len('davs://'):]) -+ self._publish_https(url, cert, key, capath) -+ elif url.startswith('root://') or url.startswith('xroot://'): -+ if url.startswith('xroot://'): -+ url = "root://{0}".format(url[len('xroot://'):]) -+ self._publish_xrootd(url, cert, key, capath) -+ else: -+ raise Exception("Output URL '{0}' not supported".format(url)) - -- def publish_https(self): -+ def _publish_https(self, url, cert, key, capath): - try: - import pycurl - except ImportError as e: - raise Exception("unable to import pycurl module (install python2-pycurl or python3-pycurl package): {0}".format(str(e))) - -- urlbase = "https://%s%s" % (self.config.host, self.config.path) -- if self.config.davport != 443: -- urlbase = "https://%s:%s%s" % (self.config.host, self.config.davport, self.config.path) -- urlfile = "%s/%s" % (urlbase, self.config.file) -- urlfilenew = "%s/%s.%i" % (urlbase, self.config.file, int(time.time())) -+ urlbase = os.path.dirname(url) -+ urlnew = "{0}.{1}".format(url, int(time.time())) - - c = pycurl.Curl() -- c.setopt(c.SSLCERT, self.config.cert) -- c.setopt(c.SSLKEY, self.config.key) -+ c.setopt(c.SSLCERT, cert) -+ c.setopt(c.SSLKEY, key) -+ if capath: -+ c.setopt(c.CAPATH, capath) - c.setopt(c.SSL_VERIFYPEER, 0) - c.setopt(c.SSL_VERIFYHOST, 2) - c.setopt(c.FOLLOWLOCATION, 1) -@@ -80,7 +99,6 @@ class storageService(object): - c.setopt(c.VERBOSE, True) - - try: -- _log.debug("check if base path %s exists", urlbase) - c.setopt(c.URL, urlbase) - c.setopt(c.NOBODY, True) - c.setopt(c.CUSTOMREQUEST, "HEAD") -@@ -88,8 +106,19 @@ class storageService(object): - if c.getinfo(c.HTTP_CODE) != 200: - raise Exception("base path %s not found" % urlbase) - -- _log.debug("put the new file to %s", urlfilenew) -- c.setopt(c.URL, urlfilenew) -+ # avoid overwriting directory because after failed -+ # PUT we would try to DELETE whole directory content -+ hdrs = [] -+ c.setopt(c.URL, url) -+ c.setopt(c.NOBODY, True) -+ c.setopt(c.CUSTOMREQUEST, "HEAD") -+ c.setopt(c.HEADERFUNCTION, hdrs.append) -+ c.perform() -+ if c.getinfo(c.HTTP_CODE) == 200 and sum([x.startswith(b'Content-Length:') for x in hdrs]) == 0: -+ raise Exception("destination %s seems to be directory" % url) -+ -+ _log.debug("put the new file to %s", urlnew) -+ c.setopt(c.URL, urlnew) - c.setopt(c.NOBODY, False) - c.setopt(c.CUSTOMREQUEST, "PUT") - # suppress the response body -@@ -97,48 +126,48 @@ class storageService(object): - c.setopt(c.POSTFIELDS, json.dumps({"storageservice": self.entry}, indent=4)) - c.perform() - if c.getinfo(c.HTTP_CODE) != 201: -- raise Exception("unable to put new file %s (HTTP code %s)" % (urlfilenew, c.getinfo(c.HTTP_CODE))) -+ raise Exception("unable to put new file %s (HTTP code %s)" % (urlnew, c.getinfo(c.HTTP_CODE))) - -- _log.debug("delete existing file %s", urlfile) -- c.setopt(c.URL, urlfile) -+ _log.debug("delete existing file %s", url) -+ c.setopt(c.URL, url) - c.setopt(c.NOBODY, True) - c.setopt(c.CUSTOMREQUEST, "DELETE") - c.perform() - if c.getinfo(c.HTTP_CODE) != 204 and c.getinfo(c.HTTP_CODE) != 404: -- raise Exception("unable to delete file %s (HTTP code %i)" % (urlfile, c.getinfo(c.HTTP_CODE))) -+ raise Exception("unable to delete file %s (HTTP code %i)" % (url, c.getinfo(c.HTTP_CODE))) - -- _log.debug("rename the new file %s to %s", urlfilenew, urlfile) -- c.setopt(c.URL, urlfilenew) -+ _log.debug("rename the new file %s to %s", urlnew, url) -+ c.setopt(c.URL, urlnew) - c.setopt(c.NOBODY, True) - c.setopt(c.CUSTOMREQUEST, "MOVE") -- c.setopt(c.HTTPHEADER, ["Destination:%s" % urlfile]) -+ c.setopt(c.HTTPHEADER, ["Destination:%s" % url]) - c.perform() - if c.getinfo(c.HTTP_CODE) != 201: -- raise Exception("unable to rename %s to %s (HTTP code %i)" % (urlfilenew, urlfile, c.getinfo(c.HTTP_CODE))) -+ raise Exception("unable to rename %s to %s (HTTP code %i)" % (urlnew, url, c.getinfo(c.HTTP_CODE))) - -- _log.info("%s written successfully" % urlfile) -+ _log.info("%s written successfully" % url) - - except Exception as e: - _log.error("file writing error: %s", str(e)) - -- _log.debug("delete temporary file %s", urlfilenew) -- c.setopt(c.URL, urlfilenew) -+ _log.debug("delete temporary file %s", urlnew) -+ c.setopt(c.URL, urlnew) - c.setopt(c.NOBODY, True) - c.setopt(c.CUSTOMREQUEST, "DELETE") - c.perform() - if c.getinfo(c.HTTP_CODE) != 204: - if c.getinfo(c.HTTP_CODE) != 404: -- _log.error("unable to cleanup temporary file %s (http code %i)", urlfilenew, c.getinfo(c.HTTP_CODE)) -+ _log.error("unable to cleanup temporary file %s (http code %i)", urlnew, c.getinfo(c.HTTP_CODE)) - else: -- _log.info("temporary file %s doesn't exist", urlfilenew) -+ _log.info("temporary file %s doesn't exist", urlnew) - -- raise Exception("failed to upload %s with HTTP protocol".format(urlfile)) -+ raise Exception("failed to upload %s with HTTP protocol" % url) - - finally: - _log.debug("cleanup") - c.close() - -- def publish_xrootd(self): -+ def _publish_xrootd(self, url, cert, key, capath): - # set environment for XRootD transfers - # XRD_* env variables must be set before importing XRootD module - if _log.getEffectiveLevel() < logging.DEBUG: -@@ -149,23 +178,22 @@ class storageService(object): - #os.putenv('XRD_REQUESTTIMEOUT', '10') # can be set individually for each operation - - # set X509_* env variable used by XRootD authentication -- os.putenv('X509_USER_CERT', self.config.cert) -- os.putenv('X509_USER_KEY', self.config.key) -+ if cert: os.putenv('X509_USER_CERT', cert) -+ if key: os.putenv('X509_USER_KEY', key) -+ if capath: os.putenv('X509_CERT_DIR', capath) - - try: - import XRootD.client - except ImportError as e: -- raise Exception("unable to import XRootD module (install python2-xrootd or python34-xrootd package): {0}".format(str(e))) -- -- urlhost = "root://{0}".format(self.config.host) -- if self.config.port != 1094: -- urlhost = "{0}:{1}".format(self.config.host, self.config.port) -- filename = "{0}/{1}".format(self.config.path, self.config.file) -- filenamenew = "{0}.{1}".format(filename, int(time.time())) -- filenameold = "{0}.bak".format(filenamenew) -- urlfile = "{0}/{1}".format(urlhost, filename) -- urlfilenew = "{0}/{1}".format(urlhost, filenamenew) -- urlfileold = "{0}/{1}".format(urlhost, filenameold) -+ raise Exception("unable to import XRootD module (install python2-xrootd or python3-xrootd package): {0}".format(str(e))) -+ -+ u = urlparse.urlparse(url) -+ urlhost = "{0}://{1}".format(u.scheme, u.netloc) -+ filepath = u.path -+ filepathnew = "{0}.{1}".format(filepath, int(time.time())) -+ filepathold = "{0}.bak".format(filepathnew) -+ urlnew = "{0}{1}".format(urlhost, filepathnew) -+ urlold = "{0}{1}".format(urlhost, filepathold) - - cleanup = [] - try: -@@ -177,43 +205,45 @@ class storageService(object): - - xrdc = XRootD.client.FileSystem(urlhost) - -- _log.debug("refresh storage json uploading the temporary file") -+ _log.debug("upload the temporary file %s to %s", tmpfile, urlnew) - -- status, details = xrdc.copy(tmpfile, urlfilenew, force=True) -+ status, details = xrdc.copy(tmpfile, urlnew, force=True) -+ cleanup.append(urlnew) - if not status.ok: -- raise Exception("unable to copy data to {0}: {1}".format(urlfilenew, status.message)) -- cleanup.append(urlfilenew) -+ raise Exception("unable to copy data to {0}: {1}".format(urlnew, status.message)) - -- status, detail = xrdc.stat(filename) -+ status, detail = xrdc.stat(filepath) - if status.ok: -- status, details = xrdc.mv(filename, filenameold) -+ status, details = xrdc.mv(filepath, filepathold) - if not status.ok: -- raise Exception("unable to move {0} to {1}: {2}".format(filename, filenameold, status.message)) -- cleanup.append(urlfileold) -+ raise Exception("unable to move {0} to {1}: {2}".format(filepath, filepathold, status.message)) -+ cleanup.append(urlold) - else: -- _log.info("no previous version of %s: %s", filename, status.message) -+ _log.info("no previous version of %s: %s", filepath, status.message) - -- status, details = xrdc.mv(filenamenew, filename) -+ status, details = xrdc.mv(filepathnew, filepath) - if not status.ok: - # move old file back in case of problems with new file -- status1, details1 = xrdc.mv(filenameold, filename) -+ status1, details1 = xrdc.mv(filepathold, filepath) - if status1.ok: -- cleanup.remove(urlfileold) -- raise Exception("unable to move {0} to {1}: {2}".format(filenamenew, filename, status.message)) -- cleanup.remove(urlfilenew) -+ cleanup.remove(urlold) -+ raise Exception("unable to move {0} to {1}: {2}".format(filepathnew, filepath, status.message)) -+ cleanup.remove(urlnew) - -- _log.info("%s written successfully", urlfile) -+ _log.info("%s written successfully", url) - - except Exception as e: - _log.error(str(e)) -- raise Exception("failed to upload {0} with XRootD protocol".format(filename)) -+ raise Exception("failed to upload {0} with XRootD protocol".format(filepath)) - - finally: - # cleanup temporary files - for c in cleanup: - try: -- if c.startswith('root://'): xrdc.rm(c) -- else: os.unlink(c) -+ if c.startswith('root://'): -+ xrdc.rm(urlparse.urlparse(c).path) -+ else: -+ os.unlink(c) - except Exception as e: - _log.debug("unable to remove temporary file %s: %s", c, str(e)) - -@@ -254,7 +284,7 @@ class dataStore(dict): - #===================================================================== - # main - #===================================================================== --if __name__ == '__main__': -+def main(argv): - import inspect - - # basic logging configuration -@@ -298,13 +328,14 @@ if __name__ == '__main__': - parser.add_argument("--file", help="Name of the summary file, default: %(default)s", default="storagesummary.json") - parser.add_argument("--cert", help="Path to host certificate, default: %(default)s", default="/etc/grid-security/hostcert.pem") - parser.add_argument("--key", help="Path to host key, default: %(default)s", default="/etc/grid-security/hostkey.pem") -+ parser.add_argument("--capath", help="Path to CAs, default: %(default)s", default="/etc/grid-security/certificates") - parser.add_argument("--host", help="FQDN, default: %(default)s", default=fqdn) - parser.add_argument("--print", help="Just print, don't publish", action="store_true", dest="prnt") - parser.add_argument("--proto", help="Publish protocol, default %(default)s", default="https") - parser.add_argument("--port", help="Dome's port number, default: %(default)s", default=1094) - parser.add_argument("--davport", help="Webdav's port, default: %(default)s", default=443) - -- options = parser.parse_args() -+ options = parser.parse_args(argv[1:]) - - if options.logfile == '-': - _log.removeHandler(streamHandler) -@@ -318,14 +349,14 @@ if __name__ == '__main__': - _log.addHandler(fileHandler) - _log.removeHandler(streamHandler) - -- _log.info("command: %s", " ".join(sys.argv)) -+ _log.info("command: %s", " ".join(argv)) - _log.info("script: %s", os.path.abspath(inspect.getfile(inspect.currentframe()))) - _log.info("version: %s", __version__) - _log.info("python: %s", str(sys.version_info)) - - try: - # Create object -- dpm = storageService(options) -+ dpm = StorageService(options.host, options.cert, options.key) - dpm.addshares() - - # Which endpoints are supposed to be there? -@@ -334,17 +365,19 @@ if __name__ == '__main__': - #dpm.adddataStores() - - # Print or publish -- if options.prnt: -+ if options.prnt or options.proto == 'stdout': - dpm.printjson() - elif options.proto in ['https', 'davs']: -- dpm.publish_https() -+ url = "https://%s:%s%s%s%s" % (options.host, options.davport, options.path, '' if options.path.endswith('/') else '/', options.file) -+ dpm.publish(url, options.cert, options.key, options.capath) - elif options.proto in ['root', 'xroot']: -- dpm.publish_xrootd() -+ url = "root://%s:%s%s%s" % (options.host, options.port, options.path, '' if options.path.endswith('/') else '/', options.file) -+ dpm.publish(url, options.cert, options.key, options.capath) - else: - _log.error("unknown publish protocol %s", options.proto) - - except Exception as e: - _log.error(str(e)) -- sys.exit(1) -+ return 1 - -- sys.exit(os.EX_OK) -+ return os.EX_OK -diff --git a/shell/src/star.py b/shell/src/star.py -new file mode 100644 -index 00000000..96377b66 ---- /dev/null -+++ b/shell/src/star.py -@@ -0,0 +1,430 @@ -+# Module to generate an accounting record following the EMI StAR specs -+# in the version 1.2, for details see -+# * http://cds.cern.ch/record/1452920/files/GFD.201.pdf -+# * https://wiki.egi.eu/wiki/APEL/Storage -+# -+# Syntax: -+# -+# star-accounting [-h] [--help] -+# .. to get the help screen -+# -+# Dependencies: -+# yum install MySQL-python python-lxml python-uuid -+# -+# v1.0.0 initial release -+# v1.0.2 removed site debug printouts that were screwing up the output -+# v1.0.3 avoid summing the size fields for directories. Fixes EGI doublecounting -+# v1.0.4 marks the report as regarding the past period, not the next one; parse -+# the DPM group names into VO and role -+# v1.3.0 Petr Vokac (petr.vokac@cern.ch), February 7, 2019 -+# * replace SQL join with simple queries to improve performance -+# * compatibility with python 3 -+# v1.4.0 Petr Vokac (petr.vokac@cern.ch), June 28, 2020 -+# * integrated with dmlite-shell -+# -+from __future__ import absolute_import -+from __future__ import print_function -+from __future__ import division -+ -+import os -+import sys -+import socket -+import io -+import lxml.builder as lb -+from lxml import etree -+import uuid -+import datetime -+import logging -+ -+import ssl -+import json -+from io import BytesIO -+ -+try: -+ from urllib.parse import urlparse, urljoin -+ import http.client as http_client -+except ImportError: -+ from urlparse import urlparse, urljoin -+ import httplib as http_client -+ -+from M2Crypto import BIO, Rand, SMIME -+ -+from .dbutils import DBConn -+ -+ -+__version__ = '1.4.0' -+__author__ = 'Fabrizio Furano' -+ -+_log = logging.getLogger('dmlite-shell') -+ -+SR_NAMESPACE = "http://eu-emi.eu/namespaces/2011/02/storagerecord" -+SR = "{%s}" % SR_NAMESPACE -+NSMAP = {"sr": SR_NAMESPACE} -+ -+ -+ -+def addrecord(xmlroot, hostname, group, user, site, filecount, resourcecapacityused, logicalcapacityused, validduration, recordid=None): -+ # update XML -+ rec = etree.SubElement(xmlroot, SR+'StorageUsageRecord') -+ rid = etree.SubElement(rec, SR+'RecordIdentity') -+ rid.set(SR+"createTime", -+ datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")) -+ -+ if hostname: -+ ssys = etree.SubElement(rec, SR+"StorageSystem") -+ ssys.text = hostname -+ -+ recid = recordid -+ if not recid: -+ recid = hostname+"-"+str(uuid.uuid1()) -+ rid.set(SR+"recordId", recid) -+ -+ subjid = etree.SubElement(rec, SR+'SubjectIdentity') -+ -+ if group: -+ grouproles = group.split('/') -+ -+ # If the last token is Role=... then we fetch the role and add it to the record -+ tmprl = grouproles[-1] -+ if tmprl.find('Role=') != -1: -+ splitroles = tmprl.split('=') -+ if (len(splitroles) > 1): -+ role = splitroles[1] -+ grp = etree.SubElement(subjid, SR+"GroupAttribute") -+ grp.set(SR+"attributeType", "role") -+ grp.text = role -+ # Now drop this last token, what remains is the vo identifier -+ grouproles.pop() -+ -+ # The voname is the first token -+ voname = grouproles.pop(0) -+ grp = etree.SubElement(subjid, SR+"Group") -+ grp.text = voname -+ -+ # If there are other tokens, they are a subgroup -+ if len(grouproles) > 0: -+ subgrp = '/'.join(grouproles) -+ grp = etree.SubElement(subjid, SR+"GroupAttribute") -+ grp.set(SR+"attributeType", "subgroup") -+ grp.text = subgrp -+ -+ if user: -+ usr = etree.SubElement(subjid, SR+"User") -+ usr.text = user -+ -+ if site: -+ st = etree.SubElement(subjid, SR+"Site") -+ st.text = site -+ -+ e = etree.SubElement(rec, SR+"StorageMedia") -+ e.text = "disk" -+ -+ if validduration: -+ e = etree.SubElement(rec, SR+"StartTime") -+ d = datetime.datetime.utcnow() - datetime.timedelta(seconds=validduration) -+ e.text = d.strftime("%Y-%m-%dT%H:%M:%SZ") -+ -+ e = etree.SubElement(rec, SR+"EndTime") -+ e.text = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") -+ -+ if filecount: -+ e = etree.SubElement(rec, SR+"FileCount") -+ e.text = str(filecount) -+ -+ if not resourcecapacityused: -+ resourcecapacityused = 0 -+ -+ e1 = etree.SubElement(rec, SR+"ResourceCapacityUsed") -+ e1.text = str(resourcecapacityused) -+ -+ e3 = etree.SubElement(rec, SR+"ResourceCapacityAllocated") -+ e3.text = str(resourcecapacityused) -+ -+ if not logicalcapacityused: -+ logicalcapacityused = 0 -+ -+ e2 = etree.SubElement(rec, SR+"LogicalCapacityUsed") -+ e2.text = str(logicalcapacityused) -+ -+ -+# -+# Return dictionary with reserved space by given column (s_uid, s_gid) -+# -+def getreservedspace(column): -+ with DBConn.get('dpm_db').cursor() as cursor: -+ cursor.execute('SELECT {0}, SUM(t_space) FROM dpm_space_reserv GROUP BY {0}'.format(column)) -+ -+ ret = {} -+ for row in cursor: -+ ret[row[0]] = row[1] -+ -+ return ret -+ -+ -+# -+# Return dictionary with key / value for given table -+# -+def getdbkv(table, ckey, cval): -+ with DBConn.get('cns_db').cursor() as cursor: -+ cursor.execute('SELECT {0}, {1} FROM {2}'.format(ckey, cval, table)) -+ -+ ret = {} -+ for row in cursor: -+ ret[row[0]] = row[1] -+ -+ return ret -+ -+ -+def star(reportgroups, reportusers, record_id, site, hostname, validduration): -+ # Init the xml generator -+ xmlroot = etree.Element(SR+"StorageUsageRecords", nsmap=NSMAP) -+ -+ if reportgroups: -+ # Report about groups -+ _log.debug("Groups reporting: starting") -+ -+ groups2space = getreservedspace('groups') -+ gid2name = getdbkv('Cns_groupinfo', 'gid', 'groupname') -+ -+ # find all unique groups associated with some quotatokens -+ allgroups = [] -+ for groups in groups2space.keys(): -+ for group in [ int(x) for x in groups.split(',') ]: -+ if group == 0: continue # ignore 'root' group -+ if group not in gid2name: continue -+ name = gid2name[group] -+ if name in allgroups: continue -+ allgroups.append(name) -+ # filter only base VO groups assigned to quotatokens -+ maingroups = [] -+ for group in allgroups: -+ if any([ group.startswith("%s/" % x) for x in allgroups ]): -+ continue -+ maingroups.append(group) -+ # sum space for base VO groups -+ gname2space = {} -+ for groups, space in groups2space.items(): -+ used = [] -+ for group in [ int(x) for x in groups.split(',') ]: -+ if group == 0: continue # ignore 'root' group -+ if group not in gid2name: continue -+ name = gid2name[group] -+ # attempt to map group name to one of base VO group -+ matchedgroups = [x for x in maingroups if x == name or name.startswith("%s/" % x)] -+ if len(matchedgroups) != 1: continue -+ maingroup = matchedgroups[0] -+ if maingroup in used: continue -+ gname2space[maingroup] = gname2space.get(maingroup, 0) + space -+ used.append(maingroup) -+ -+ gname2used_files = {} -+ gname2used_space = {} -+ with DBConn.get('cns_db').cursor() as cursor: -+ cursor.execute('SELECT gid, COUNT(*), SUM(filesize) FROM Cns_file_metadata WHERE filemode & 16384 = 0 GROUP BY gid') -+ -+ for row in cursor: -+ if _log.getEffectiveLevel() < logging.DEBUG: -+ _log.debug(row) -+ if row[0] not in gid2name: -+ continue -+ if row[0] == 0: # ignore data owned by 'root' -+ continue -+ -+ name = gid2name[row[0]] -+ matchedgroups = [x for x in maingroups if x == name or name.startswith("%s/" % x)] -+ if len(matchedgroups) != 1: continue -+ name = matchedgroups[0] -+ gname2used_files[name] = gname2used_files.get(name, 0) + row[1] -+ gname2used_space[name] = gname2used_space.get(name, 0) + row[2] -+ -+ for name in gname2used_files.keys(): -+ # update XML -+ addrecord(xmlroot, hostname, name, None, site, gname2used_files[name], gname2space[name], gname2used_space[name], validduration, record_id) -+ -+ _log.debug("Groups reporting: number of rows returned: %d (only %d maingroups)", cursor.rowcount, len(maingroups)) -+ -+ -+ if reportusers: -+ # -+ # Report about users -+ # -+ _log.debug("Users reporting: starting") -+ -+ uid2space = getreservedspace('s_uid') -+ uid2name = getdbkv('Cns_userinfo', 'userid', 'username') -+ -+ with DBConn.get('cns_db').cursor() as cursor: -+ cursor.execute('SELECT owner_uid, COUNT(*), SUM(filesize) FROM Cns_file_metadata WHERE filemode & 16384 = 0 GROUP BY owner_uid') -+ -+ for row in cursor: -+ if _log.getEffectiveLevel() < logging.DEBUG: -+ _log.debug(row) -+ if row[0] not in uid2name: -+ continue -+ -+ # update XML -+ addrecord(xmlroot, hostname, None, uid2name[row[0]], site, row[1], uid2space.get(row[0], 0), row[2], validduration, record_id) -+ -+ _log.debug("Users reporting: number of rows returned: %d", cursor.rowcount) -+ -+ DBConn.close() -+ -+ out = io.BytesIO() -+ et = etree.ElementTree(xmlroot) -+ et.write(out, pretty_print=True, encoding="utf-8") -+ -+ return out.getvalue().decode('utf-8') -+ -+ -+ -+class StARPublisher(object): -+ -+ def __init__(self, ams_host='msg.argo.grnet.gr', ams_port=443, ams_auth_port=8443, cert='/etc/grid-security/hostcert.pem', key='/etc/grid-security/hostkey.pem', capath='/etc/grid-security/certificates'): -+ self._ams_host = ams_host -+ self._ams_port = ams_port -+ self._ams_auth_port = ams_auth_port -+ self._ams_project = 'accounting' -+ self._ams_topic = 'eu-egi-storage-accounting' -+ self._cert = cert -+ self._key = key -+ self._capath = capath -+ self._token = None -+ -+ -+ def _request(self, url, method='GET', body=None, cert=None, key=None, capath=None): -+ headers = { 'Content-Type': 'application/json' } -+ urlquery = "{0}?{1}".format(url.path, url.query) if url.query else url.path -+ -+ _log.debug('request: %s', url.geturl()) -+ -+ ctx = ssl.create_default_context() -+ if cert and key: -+ ctx.load_cert_chain(certfile=cert, keyfile=key) -+ ctx.load_verify_locations(capath=capath) -+ -+ conn = http_client.HTTPSConnection(url.hostname, port=url.port, context=ctx) -+ conn.request(method=method, url=urlquery, body=body, headers=headers) -+ -+ res = conn.getresponse() -+ if res.status != 200: -+ raise Exception("http request status %i: %s" % (res.status, res.reason)) -+ -+ return json.loads(res.read()) -+ -+ -+ def _get_token(self): -+ if not self._token: -+ _log.debug("get_token from %s:%s", self._ams_host, self._ams_auth_port) -+ -+ route = 'https://{0}:{1}/v1/service-types/ams/hosts/{0}:authx509' -+ url = urlparse(route.format(self._ams_host, self._ams_auth_port)) -+ data = self._request(url, cert=self._cert, key=self._key, capath=self._capath) -+ self._token = str(data['token']) -+ -+ _log.debug("retrieved token %s", self._token) -+ -+ return self._token -+ -+ -+ def _sign_bytes(self, data): -+ buf = BIO.MemoryBuffer(data) -+ -+ smime = SMIME.SMIME() -+ smime.load_key(self._key, self._cert) -+ p7 = smime.sign(buf, SMIME.PKCS7_DETACHED) -+ -+ out = BIO.MemoryBuffer() -+ buf = BIO.MemoryBuffer(data) -+ smime.write(out, p7, buf) -+ -+ return out.read() -+ -+ -+ def topics(): -+ _log.debug("list %s topics %s:%s", self._ams_project, self._ams_host, self._ams_port) -+ -+ route = 'https://{0}:{1}/v1/projects/{2}/topics?key={3}' -+ token = self._get_token() -+ url = urlparse(route.format(self._ams_host, self._ams_port, self._ams_project, token)) -+ -+ return self._request(url, capath=self._capath) -+ -+ -+ def publish(self, data): -+ _log.debug("publish %s topic %s to %s:%s", self._ams_project, self._ams_topic, self._ams_host, self._ams_port) -+ -+ route = 'https://{0}:{1}/v1/projects/{2}/topics/{3}:publish?key={4}' -+ token = self._get_token() -+ url = urlparse(route.format(self._ams_host, self._ams_port, self._ams_project, self._ams_topic, token)) -+ currtime = datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S") -+ msg = { -+ 'attributes': {'empaid': "{0}/{1}".format(currtime[:8], currtime)}, -+ 'data': self._sign_bytes(data).decode('utf-8') -+ } -+ -+ return self._request(url, method='POST', body=json.dumps({"messages": [msg]}), capath=self._capath) -+ -+ -+ -+################################ -+# Main code - legacy interface # -+################################ -+def main(argv): -+ import optparse -+ -+ # basic logging configuration -+ streamHandler = logging.StreamHandler(sys.stderr) -+ streamHandler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s](%(module)s:%(lineno)d) %(message)s", "%d %b %H:%M:%S")) -+ _log.addHandler(streamHandler) -+ _log.setLevel(logging.WARN) -+ -+ parser = optparse.OptionParser() -+ parser.add_option('--reportgroups', dest='reportgroups', action='store_true', default=False, help="Report about all groups") -+ parser.add_option('--reportusers', dest='reportusers', action='store_true', default=False, help="Report about all users") -+ parser.add_option('-v', '--debug', dest='verbose', action='count', default=0, help='Increase verbosity level for debugging (on stderr)') -+ parser.add_option('--hostname', dest='hostname', default=socket.getfqdn(), help="The hostname string to use in the record. Default: this host.") -+ parser.add_option('--site', dest='site', default="", help="The site string to use in the record. Default: none.") -+ parser.add_option('--recordid', dest='recordid', default=None, help="The recordid string to use in the record. Default: a newly computed unique string.") -+ parser.add_option("--nsconfig", dest="nsconfig", default=None, help="LEGACY OPTION, NO LONGER IN USE (NSCONFIG file with sql connection info)") -+ parser.add_option("--dpmconfig", dest="dpmconfig", default=None, help="LEGACY OPTION, NO LONGER IN USE (DPMCONFIG file with sql connection info)") -+ parser.add_option('--dbhost', dest='dbhost', default=None, help="Database host, if no NSCONFIG given") -+ parser.add_option('--dbuser', dest='dbuser', default=None, help="Database user, if no NSCONFIG given") -+ parser.add_option('--dbpwd', dest='dbpwd', default=None, help="Database password, if no NSCONFIG given") -+ parser.add_option('--nsdbname', dest='nsdbname', default='cns_db', help="NS Database name, if no NSCONFIG given") -+ parser.add_option('--dpmdbname', dest='dpmdbname', default='dpm_db', help="DPM Database name, if no DPMCONFIG given") -+ parser.add_option('--validduration', dest='validduration', default=86400, help="Valid duration of this record, in seconds (default: 1 day)") -+ parser.add_option('--ams-host', default="msg.argo.grnet.gr", help="APEL accounting server hostname, default: %(default)") -+ parser.add_option('--ams-port', default=443, help="APEL accounting server port, default: %(default)") -+ parser.add_option('--ams-auth-port', default=8443, help="APEL accounting server authentication port, default: %(default)") -+ parser.add_option('--cert', default='/etc/grid-security/hostcert.pem', help="Host certificate for access to APEL accounting server, default: %(default)") -+ parser.add_option('--key', default='/etc/grid-security/hostkey.pem', help="Host key for access to APEL accounting server, default: %(default)") -+ parser.add_option('--capath', default='/etc/grid-security/certificates', help="Trusted CA directory for APEL accounting server verification, default: %(default)") -+ -+ options, args = parser.parse_args(sys.argv[1:]) -+ -+ if options.verbose == 0: _log.setLevel(logging.ERROR) -+ elif options.verbose == 1: _log.setLevel(logging.WARN) -+ elif options.verbose == 2: _log.setLevel(logging.INFO) -+ else: _log.setLevel(logging.DEBUG) -+ -+ if options.dbhost != None: -+ # try to use database connection data from CLI -+ if options.dbuser == None or options.dbpwd == None: -+ _log.error("no database user or password defined as command line options") -+ return 1 -+ -+ DBConn.configure('user', { -+ 'host': options.dbhost, -+ #'port': 0, -+ 'user': options.dbuser, -+ 'pass': options.dbpwd, -+ 'cns_db': options.nsdbname, -+ 'dpm_db': options.dpmdbname, -+ }) -+ -+ data = star(options.reportgroups, options.reportusers, options.recordid, options.site, options.hostname, options.validduration) -+ sys.stdout.write(data) -+ -+ _log.debug('done') -+ -+ return os.EX_OK -diff --git a/shell/src/test.sh b/shell/src/test.sh -new file mode 100644 -index 00000000..ebb9665a ---- /dev/null -+++ b/shell/src/test.sh -@@ -0,0 +1,232 @@ -+#!/bin/sh -+# Test dmlite-shell functionality -+PYTHON=python -+DSHELL=/usr/bin/dmlite-shell -+ -+TEST_DISKSERVER='dpmdisk1.farm.particle.cz' -+TEST_BASE='/dpm/farm.particle.cz/home/test_dmlite_shell' -+TEST_USER='test_dmlite_shell' -+TEST_GROUP='test_dmlite_shell' -+TEST_POOL='test_ds_pool' # max 15 characters -+ -+function testCmd() { -+ RUN="${PYTHON} ${DSHELL}" -+ for ARG in "$@"; do -+ RUN="${RUN} -e '${ARG}'" -+ done -+ echo "########################################################################" -+ echo "Running: ${RUN}" -+ eval "${RUN}" -+ if [ $? -ne 0 ]; then -+ echo "FAILED" -+ exit 1 -+ fi -+} -+ -+function cleanupCmd() { -+ RUN="${PYTHON} ${DSHELL} -e '$1' > /dev/null" -+ echo "Cleanup (these commands can fail): ${RUN}" -+ eval "${RUN}" -+} -+ -+ -+ -+if [ $# -lt 2 ]; then -+ echo "usage: $0 /dpm/fqdn/home/testdir diskserver.fqdn [ /usr/bin/python [ /usr/bin/dmlite-shell ]]" -+ echo "example: $0 /dpm/farm.particle.cz/home/test_dmlite_shell dpmdisk1.farm.particle.cz /usr/bin/python3" -+ echo "!!! WARNING - use non-existing base path for tests not to overwrite/destroy existing data !!!" -+ exit 1 -+fi -+ -+if [ $# -ge 1 ]; then -+ TEST_BASE=$1 -+fi -+ -+if [ $# -ge 2 ]; then -+ TEST_DISKSERVER=$2 -+fi -+ -+if [ $# -ge 3 ]; then -+ PYTHON=$3 -+fi -+ -+if [ $# -ge 4 ]; then -+ DSHELL=$4 -+fi -+ -+ -+ -+echo "### dmlite-shell test ###" -+echo "python interpreter: ${PYTHON}" -+echo "dmlite-shell script: ${DSHELL}" -+echo "test base path: ${TEST_BASE}" -+echo "test disknode: ${TEST_DISKSERVER}" -+echo "test user: ${TEST_USER}" -+echo "test group: ${TEST_GROUP}" -+echo "test pool: ${TEST_POOL}" -+ -+echo "########################################################################" -+cleanupCmd "unlink ${TEST_BASE}/file1" -+cleanupCmd "unlink ${TEST_BASE}/file1-moved" -+cleanupCmd "unlink ${TEST_BASE}/file2" -+cleanupCmd "unlink ${TEST_BASE}/file2-moved" -+cleanupCmd "unlink ${TEST_BASE}/file3" -+cleanupCmd "unlink ${TEST_BASE}/file3-moved" -+cleanupCmd "rmdir ${TEST_BASE} -r" -+cleanupCmd "userdel ${TEST_USER}" -+cleanupCmd "groupdel ${TEST_GROUP}" -+cleanupCmd "quotatokendel ${TEST_BASE} ${TEST_POOL}" -+cleanupCmd "fsdel /tmp ${TEST_DISKSERVER}" -+cleanupCmd "pooldel ${TEST_POOL}" -+echo "########################################################################" -+echo "########################################################################" -+ -+ -+ -+testCmd 'help' -+testCmd 'version' -+testCmd 'getimplid' -+testCmd 'qryconf' -+testCmd 'exit' -+#testCmd 'pwd' -+testCmd 'cd /' 'pwd' 'cd dpm' 'pwd' 'ls' 'cd ..' 'ls' -+#testCmd "ls ${TEST_BASE}" -+ -+# pool, filesystems and quotatoken -+testCmd "pooladd ${TEST_POOL} filesystem" -+testCmd "poolmodify ${TEST_POOL} defsize 12345678" -+testCmd "fsadd /tmp ${TEST_POOL} ${TEST_DISKSERVER}" -+testCmd "fsmodify /tmp ${TEST_DISKSERVER} ${TEST_POOL} DISABLED" -+testCmd "poolinfo" -+testCmd "mkdir ${TEST_BASE} -p" -+testCmd "quotatokenset ${TEST_BASE} pool ${TEST_POOL} size 1073741824 desc TEST_DS_POOL groups root" -+#testCmd "quotatokenmod path ${TEST_BASE}" -+#testCmd "quotatokenmod pool ${TEST_POOL}" -+#testCmd "quotatokenmod size 10737418240" -+#testCmd "quotatokenmod desc TEST_DS_POOL" -+#testCmd "quotatokenmod groups root" -+testCmd "quotatokenget / -s" -+testCmd "quotatokendel ${TEST_BASE} ${TEST_POOL}" -+testCmd "rmdir ${TEST_BASE} -r" -+testCmd "fsdel /tmp ${TEST_DISKSERVER}" -+testCmd "pooldel ${TEST_POOL}" -+ -+# create/remove directories and files -+#testCmd "getlfn 5" -+testCmd "mkdir ${TEST_BASE} -p" -+testCmd "info ${TEST_BASE}" -+testCmd "mkdir ${TEST_BASE}/dir1" -+testCmd "ln ${TEST_BASE}/dir1 ${TEST_BASE}/dir1-link" -+testCmd "readlink ${TEST_BASE}/dir1-link" -+testCmd "rmdir ${TEST_BASE}/dir1" -+testCmd "mkdir ${TEST_BASE}/dir1/1/2/3/4 -p" -+testCmd "rmdir ${TEST_BASE}/dir1 -r" -+testCmd "cd ${TEST_BASE}" "mkdir dir2" -+testCmd "cd ${TEST_BASE}" "ln dir2 dir2-link" -+testCmd "cd ${TEST_BASE}" "rmdir dir2" -+testCmd "create ${TEST_BASE}/file1" -+testCmd "ln ${TEST_BASE}/file1 ${TEST_BASE}/file1-link" -+testCmd "readlink ${TEST_BASE}/file1-link" -+testCmd "info ${TEST_BASE}/file1" -+testCmd "mv ${TEST_BASE}/file1 ${TEST_BASE}/file1-moved" -+testCmd "unlink ${TEST_BASE}/file1-moved" -+testCmd "create ${TEST_BASE}/file2 775" -+testCmd "info ${TEST_BASE}/file2" -+testCmd "ln ${TEST_BASE}/file2 ${TEST_BASE}/file2-link" -+testCmd "readlink ${TEST_BASE}/file2-link" -+testCmd "mv ${TEST_BASE}/file2 ${TEST_BASE}/file2-moved" -+testCmd "unlink ${TEST_BASE}/file2-moved" -+testCmd "cd ${TEST_BASE}" "create file3" -+testCmd "cd ${TEST_BASE}" "ln file3 file3-link" -+testCmd "cd ${TEST_BASE}" "readlink file3-link" -+testCmd "cd ${TEST_BASE}" "info file3" -+testCmd "cd ${TEST_BASE}" "mv file3 file3-moved" -+testCmd "cd ${TEST_BASE}" "unlink file3-moved" -+testCmd "ls ${TEST_BASE}" -+testCmd "du ${TEST_BASE}" -+testCmd "rmdir ${TEST_BASE} -r" -+ -+# manage users -+testCmd "userinfo" -+testCmd "useradd ${TEST_USER}" -+testCmd "userinfo ${TEST_USER}" -+testCmd "userban ${TEST_USER} LOCAL_BAN" -+testCmd "userinfo ${TEST_USER}" -+testCmd "userban ${TEST_USER} ARGUS_BAN" -+testCmd "userinfo ${TEST_USER}" -+testCmd "userban ${TEST_USER} NO_BAN" -+testCmd "userinfo ${TEST_USER}" -+testCmd "userdel ${TEST_USER}" -+ -+# manage groups -+testCmd "groupinfo" -+testCmd "groupadd ${TEST_GROUP}" -+testCmd "groupinfo ${TEST_GROUP}" -+testCmd "groupban ${TEST_GROUP} LOCAL_BAN" -+testCmd "groupinfo ${TEST_GROUP}" -+testCmd "groupban ${TEST_GROUP} ARGUS_BAN" -+testCmd "groupinfo ${TEST_GROUP}" -+testCmd "groupban ${TEST_GROUP} NO_BAN" -+testCmd "groupinfo ${TEST_GROUP}" -+testCmd "groupdel ${TEST_GROUP}" -+ -+# change owner/group/mode -+testCmd "useradd ${TEST_USER}" -+testCmd "groupadd ${TEST_GROUP}" -+testCmd "mkdir ${TEST_BASE}/dir1 -p" -+testCmd "chmod ${TEST_BASE}/dir1 755" -+testCmd "chgrp ${TEST_BASE}/dir1 ${TEST_GROUP}" -+testCmd "chown ${TEST_BASE}/dir1 ${TEST_USER}" -+#testCmd "utime ${TEST_BASE}/dir1 2020-02-02 2030-03-03" -+testCmd "comment ${TEST_BASE}/dir1 \"this is a test\"" -+testCmd "comment ${TEST_BASE}/dir1" -+testCmd "info ${TEST_BASE}/dir1" -+testCmd "cd ${TEST_BASE}" "mkdir dir2" -+testCmd "cd ${TEST_BASE}" "chmod dir2 755" -+testCmd "cd ${TEST_BASE}" "chgrp dir2 ${TEST_GROUP}" -+testCmd "cd ${TEST_BASE}" "chown dir2 ${TEST_USER}" -+#testCmd "cd ${TEST_BASE}" "utime dir2 2020-02-02 2030-03-03" -+testCmd "cd ${TEST_BASE}" "comment dir2 \"this is a test\"" -+testCmd "cd ${TEST_BASE}" "comment dir2" -+testCmd "cd ${TEST_BASE}" "info dir2" -+testCmd "rmdir ${TEST_BASE} -r" -+testCmd "groupdel ${TEST_GROUP}" -+testCmd "userdel ${TEST_USER}" -+ -+# change ACL -+testCmd "useradd ${TEST_USER}" -+testCmd "groupadd ${TEST_GROUP}" -+testCmd "mkdir ${TEST_BASE} -p" -+testCmd "create ${TEST_BASE}/file1 644" -+testCmd "acl ${TEST_BASE}/file1" -+testCmd "acl ${TEST_BASE}/file1 user::rw-,group::rw-,other::r-- set" -+testCmd "acl ${TEST_BASE}/file1" -+testCmd "acl ${TEST_BASE}/file1 group::--- modify" -+testCmd "acl ${TEST_BASE}/file1" -+testCmd "acl ${TEST_BASE}/file1 user:${TEST_USER}:r--,group:${TEST_GROUP}:r--,mask::rw- modify" -+testCmd "acl ${TEST_BASE}/file1" -+testCmd "acl ${TEST_BASE}/file1 user:${TEST_USER}:rw-,group:${TEST_GROUP}:rw-,mask::r-- modify" -+testCmd "acl ${TEST_BASE}/file1" -+testCmd "acl ${TEST_BASE}/file1 user:${TEST_USER}:rw-,group:${TEST_GROUP}:rw-,mask::rw- modify" -+testCmd "acl ${TEST_BASE}/file1" -+testCmd "acl ${TEST_BASE}/file1 user:${TEST_USER}:rw-,group:${TEST_GROUP}:rw-,mask::rw- delete" -+testCmd "acl ${TEST_BASE}/file1" -+testCmd "unlink ${TEST_BASE}/file1" -+testCmd "mkdir ${TEST_BASE}/dir1" -+testCmd "acl ${TEST_BASE}/dir1" -+testCmd "acl ${TEST_BASE}/dir1 user::rwx,group::rwx,other::r-x set" -+testCmd "acl ${TEST_BASE}/dir1" -+testCmd "acl ${TEST_BASE}/dir1 group::--- modify" -+testCmd "acl ${TEST_BASE}/dir1" -+testCmd "acl ${TEST_BASE}/dir1 user:${TEST_USER}:r-x,group:${TEST_GROUP}:r-x,mask::rwx modify" -+testCmd "acl ${TEST_BASE}/dir1" -+testCmd "acl ${TEST_BASE}/dir1 user:${TEST_USER}:rwx,group:${TEST_GROUP}:rwx,mask::r-x modify" -+testCmd "acl ${TEST_BASE}/dir1" -+testCmd "acl ${TEST_BASE}/dir1 user:${TEST_USER}:rwx,group:${TEST_GROUP}:rwx,mask::rwx modify" -+testCmd "acl ${TEST_BASE}/dir1" -+testCmd "acl ${TEST_BASE}/dir1 user:${TEST_USER}:rwx,group:${TEST_GROUP}:rwx,mask::rwx delete" -+testCmd "acl ${TEST_BASE}/dir1" -+testCmd "rmdir ${TEST_BASE}/dir1" -+testCmd "rmdir ${TEST_BASE} -r" -+testCmd "groupdel ${TEST_GROUP}" -+testCmd "userdel ${TEST_USER}" -diff --git a/shell/src/utils.py b/shell/src/utils.py -index 8a09c059..b67db134 100644 ---- a/shell/src/utils.py -+++ b/shell/src/utils.py -@@ -1,4 +1,3 @@ --#!/usr/bin/python2 - from __future__ import absolute_import - from __future__ import print_function - from __future__ import division -@@ -56,6 +55,49 @@ if not hasattr(time, 'monotonic'): - - - -+######################################################################### -+######## other useful functions ######## -+######################################################################### -+ -+def prettySize(size): -+ isize = int(size) # argument can be string -+ if isize < 1024**1: -+ prettySize = "%iB" % isize -+ elif isize < 1024**2: -+ prettySize = '%.2fkB' % (float(isize) / 1024**1) -+ elif isize < 1024**3: -+ prettySize = '%.2fMB' % (float(isize) / 1024**2) -+ elif isize < 1024**4: -+ prettySize = '%.2fGB' % (float(isize) / 1024**3) -+ elif isize < 1024**5: -+ prettySize = '%.2fTB' % (float(isize) / 1024**4) -+ else: -+ prettySize = '%.2fPB' % (float(isize) / 1024**5) -+ return prettySize -+ -+ -+def prettyInputSize(prettysize): -+ if 'PB' in prettysize: -+ prettysize = prettysize.replace('PB', '') -+ size = int(prettysize) * 1024**5 -+ elif 'TB' in prettysize: -+ prettysize = prettysize.replace('TB', '') -+ size = int(prettysize) * 1024**4 -+ elif 'GB' in prettysize: -+ prettysize = prettysize.replace('GB', '') -+ size = int(prettysize) * 1024**3 -+ elif 'MB' in prettysize: -+ prettysize = prettysize.replace('MB', '') -+ size = int(prettysize) * 1024**2 -+ elif 'KB' in prettysize: -+ prettysize = prettysize.replace('kB', '') -+ size = int(prettysize) * 1024**1 -+ else: -+ size = int(prettysize) -+ return size -+ -+ -+ - if __name__ == '__main__': - for i in range(5): - print("monotonic: %.06f" % time.monotonic()) -diff --git a/src/dome/DomeCore.cpp b/src/dome/DomeCore.cpp -index 23ba3b4e..94170ab9 100644 ---- a/src/dome/DomeCore.cpp -+++ b/src/dome/DomeCore.cpp -@@ -319,6 +319,9 @@ int DomeCore::processreq(DomeReq &dreq) { - else if (dreq.domecmd == "dome_setowner") { - dome_setowner(dreq); - } -+ else if (dreq.domecmd == "dome_setutime") { -+ dome_setutime(dreq); -+ } - else if (dreq.domecmd == "dome_setsize") { - dome_setsize(dreq); - } -diff --git a/src/dome/DomeCore.h b/src/dome/DomeCore.h -index ab52ba64..5458fea8 100644 ---- a/src/dome/DomeCore.h -+++ b/src/dome/DomeCore.h -@@ -241,6 +241,8 @@ public: - int dome_setmode(DomeReq &req); - /// Set the uid/gid - int dome_setowner(DomeReq &req); -+ /// Set the actime/modtime -+ int dome_setutime(DomeReq &req); - /// Set the size of a file - int dome_setsize(DomeReq &req); - /// Set the checksum of a file -diff --git a/src/dome/DomeCoreXeq.cpp b/src/dome/DomeCoreXeq.cpp -index 29a8b7bd..73bd2f50 100644 ---- a/src/dome/DomeCoreXeq.cpp -+++ b/src/dome/DomeCoreXeq.cpp -@@ -5733,6 +5733,48 @@ int DomeCore::dome_setowner(DomeReq &req) { - - - -+int DomeCore::dome_setutime(DomeReq &req) { -+ if (status.role != status.roleHead) { -+ return req.DomeReq::SendSimpleResp(DOME_HTTP_BAD_REQUEST, "dome_setutime only available on head nodes."); -+ } -+ -+ struct utimbuf buf; -+ std::string path = req.bodyfields.get("path", ""); -+ try { -+ buf.actime = req.bodyfields.get("actime"); -+ buf.modtime = req.bodyfields.get("modtime"); -+ } -+ catch ( ... ) { -+ return req.DomeReq::SendSimpleResp(422, "Can't find actime or modtime."); -+ } -+ -+ if(path == "") { -+ return req.DomeReq::SendSimpleResp(422, "Path cannot be empty."); -+ } -+ -+ DomeMySql sql; -+ dmlite::SecurityContext ctx; -+ fillSecurityContext(ctx, req); -+ -+ ExtendedStat meta; -+ DmStatus ret = sql.getStatbyLFN(meta, path); -+ if (!ret.ok()) -+ return req.DomeReq::SendSimpleResp(404, SSTR("Can't find lfn: '" << path << "'")); -+ -+ if (ctx.user.getUnsigned("uid") != 0) -+ return req.DomeReq::SendSimpleResp(403, "Only root can set the utime"); -+ -+ DmStatus dmst = sql.utime(meta.stat.st_ino, &buf); -+ if (dmst.ok()) -+ return req.DomeReq::SendSimpleResp(200, ""); -+ -+ return req.DomeReq::SendSimpleResp(422, SSTR("Can not set the utime of '" << path << "' err:" << -+ dmst.code() << ":" << dmst.what())); -+} -+ -+ -+ -+ - int DomeCore::dome_setsize(DomeReq &req) { - if (status.role != status.roleHead) { - return req.DomeReq::SendSimpleResp(DOME_HTTP_BAD_REQUEST, "dome_rename only available on head nodes."); -diff --git a/src/dome/DomeMysql_cns.cpp b/src/dome/DomeMysql_cns.cpp -index c2fb1a06..419dfa11 100644 ---- a/src/dome/DomeMysql_cns.cpp -+++ b/src/dome/DomeMysql_cns.cpp -@@ -1848,6 +1848,8 @@ dmlite::DmStatus DomeMySql::utime(ino_t inode, const utimbuf *buf) { - return DmStatus(EINVAL, SSTR("Cannot set time to fileid: " << inode)); - } - -+ DOMECACHE->wipeEntry(inode); -+ - Log(Logger::Lvl3, domelogmask, domelogname, "Exiting. inode:" << inode); - - return DmStatus(); -diff --git a/src/dome/cli/dome.py b/src/dome/cli/dome.py -index 95be9127..56444290 100755 ---- a/src/dome/cli/dome.py -+++ b/src/dome/cli/dome.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/python2 - # dome.py - """ - This file implements the Dome CLI. -@@ -65,14 +65,14 @@ def main(): - if not options.size: - print("Please specify the Server via --size option") - sys.exit(1) -- executor.putDone(options.url, options.pfn,options.size) -+ executor.putdone(options.url, options.pfn,options.size) - elif options.execute == 'getspaceinfo': -- executor.getSpaceInfo(options.url) -+ executor.getspaceinfo(options.url) - elif options.execute == 'statpool': - if not options.pool: - print("Please specify the Pool to stat via --pool option") - sys.exit(1) -- executor.statPool(options.url,options.pool) -+ executor.statpool(options.url,options.pool) - elif options.execute == 'getquotatoken': - if not options.lfn: - print("Please specify the LFN via --lfn option") -diff --git a/src/dome/cli/executor.py b/src/dome/cli/executor.py -index 46cdde32..792268ea 100644 ---- a/src/dome/cli/executor.py -+++ b/src/dome/cli/executor.py -@@ -57,7 +57,7 @@ class DomeExecutor(object): - def __init__(self, cert, key, capath, clientDN, clientAddress): - self.creds = DomeCredentials(cert, key, capath, clientDN, clientAddress) - -- def putDone(self, url,pfn,size): -+ def putdone(self, url,pfn,size): - talker = DomeTalker(self.creds, url, "POST", "dome_putdone") - talker.execute({"pfn" : pfn, "size" : size}) - -@@ -74,11 +74,11 @@ class DomeExecutor(object): - talker = DomeTalker(self.creds, url, "POST", "dome_pfnrm") - talker.execute({"pfn" : pfn}) - -- def getSpaceInfo(self,url): -+ def getspaceinfo(self,url): - talker = DomeTalker(self.creds, url, "GET", "dome_getspaceinfo") - talker.execute({}) - -- def statPool(self,url, pool): -+ def statpool(self,url, pool): - talker = DomeTalker(self.creds, url, "GET", "dome_statpool") - talker.execute({"poolname" : pool}) - -diff --git a/src/plugins/domeadapter/DomeAdapterHeadCatalog.cpp b/src/plugins/domeadapter/DomeAdapterHeadCatalog.cpp -index 846c7321..80ff9a67 100644 ---- a/src/plugins/domeadapter/DomeAdapterHeadCatalog.cpp -+++ b/src/plugins/domeadapter/DomeAdapterHeadCatalog.cpp -@@ -679,6 +679,22 @@ void DomeAdapterHeadCatalog::setAcl(const std::string& path, const Acl& acl) { - } - } - -+void DomeAdapterHeadCatalog::utime(const std::string& path, const struct utimbuf* buf) -+{ -+ Log(Logger::Lvl4, domeadapterlogmask, domeadapterlogname, "Entering."); -+ -+ DomeCredentials dc(secCtx_); -+ talker__->setcommand(dc, "POST", "dome_setutime"); -+ -+ boost::property_tree::ptree params; -+ params.put("path", absPath(path)); -+ params.put("actime", buf->actime); -+ params.put("modtime", buf->modtime); -+ -+ if(!talker__->execute(params)) { -+ throw DmException(talker__->dmlite_code(), talker__->err()); -+ } -+} - - std::string DomeAdapterHeadCatalog::absPath(const std::string &relpath) { - if(relpath.size() > 0 && relpath[0] == '/') return relpath; -diff --git a/src/plugins/domeadapter/DomeAdapterHeadCatalog.h b/src/plugins/domeadapter/DomeAdapterHeadCatalog.h -index a601eb72..820e204e 100644 ---- a/src/plugins/domeadapter/DomeAdapterHeadCatalog.h -+++ b/src/plugins/domeadapter/DomeAdapterHeadCatalog.h -@@ -76,6 +76,8 @@ namespace dmlite { - - void setAcl(const std::string&, const Acl&) ; - -+ void utime(const std::string&, const struct utimbuf*) ; -+ - Replica getReplica (int64_t rid) ; - // Replica getReplica (const std::string& sfn) ; - -diff --git a/src/puppet/dmlite/manifests/dome/config.pp b/src/puppet/dmlite/manifests/dome/config.pp -index f26a459b..78530d30 100644 ---- a/src/puppet/dmlite/manifests/dome/config.pp -+++ b/src/puppet/dmlite/manifests/dome/config.pp -@@ -36,6 +36,9 @@ class dmlite::dome::config ( - $ns_oidc_clientid = $dmlite::dome::params::ns_oidc_clientid, - $ns_oidc_allowissuer = $dmlite::dome::params::ns_oidc_allowissuer, - $ns_oidc_allowaudience = $dmlite::dome::params::ns_oidc_allowaudience, -+ -+ $informer_urls = $dmlite::dome::params::informer_urls, -+ $informer_more = $dmlite::dome::params::informer_more, - ) inherits dmlite::dome::params { - - $domehead_template = 'dmlite/dome/domehead.conf.erb' -diff --git a/src/puppet/dmlite/manifests/dome/params.pp b/src/puppet/dmlite/manifests/dome/params.pp -index b7b7d2b0..9f0ef416 100644 ---- a/src/puppet/dmlite/manifests/dome/params.pp -+++ b/src/puppet/dmlite/manifests/dome/params.pp -@@ -43,4 +43,7 @@ class dmlite::dome::params ( - $ns_oidc_allowissuer = hiera('dmlite::dav::params::ns_oidc_allowissuer', ['"/dpm" "https://wlcg.cloud.cnaf.infn.it/" wlcg']) - $ns_oidc_allowaudience = hiera('dmlite::dav::params::ns_oidc_allowaudience', ['https://wlcg.cern.ch/jwt/v1/any', "${::fqdn}"]) - $ns_oidc_auth_verify_jwks_uri = hiera('dmlite::dav::params::ns_oidc_auth_verify_jwks_uri', 'https://wlcg.cloud.cnaf.infn.it/jwk') -+ -+ $informer_urls = hiera('dmlite::dome::params::informer_urls', ['https://dpmhead-rc.cern.ch/dpminfo']) -+ $informer_more = hiera('dmlite::dome::params::informer_more', undef) - } -diff --git a/src/puppet/dmlite/templates/dome/domehead.conf.erb b/src/puppet/dmlite/templates/dome/domehead.conf.erb -index 0f4ec667..ea18a923 100644 ---- a/src/puppet/dmlite/templates/dome/domehead.conf.erb -+++ b/src/puppet/dmlite/templates/dome/domehead.conf.erb -@@ -22,6 +22,8 @@ head.filepuller.stathook: <%= @head_filepuller_stathook %> - <% if @head_filepuller_stathooktimeout != 0 -%> - head.filepuller.stathooktimeout:<%= @head_filepuller_stathooktimeout %> - <% end -%> -+ -+# Database - head.db.host: <%= @db_host %> - head.db.user: <%= @db_user %> - head.db.password: <%= @db_password %> -@@ -29,7 +31,9 @@ head.db.port: <%= @db_port %> - head.db.poolsz: <%= @db_pool_size %> - head.db.cnsdbname: <%= @cnsdb_name %> - head.db.dpmdbname: <%= @dpmdb_name %> -+ - <% if @enable_ns_oidc -%> -+# OIDC - head.oidc.allowaudience[]: <%= @ns_oidc_clientid %> - <% @ns_oidc_allowaudience.each do |audience| -%> - head.oidc.allowaudience[]: <%= audience %> -@@ -38,3 +42,18 @@ head.oidc.allowaudience[]: <%= audience %> - head.oidc.allowissuer[]: <%= issuer %> - <% end -%> - <% end -%> -+ -+<% if @informer_urls and @informer_urls.length > 0 -%> -+# Telemetry (version, host, totalspace, freespace) -+<% @informer_urls.each_with_index do |informer_url, index| -%> -+<% if index == 0 -%> -+head.informer.mainurl: <%= informer_url %> -+<% else -%> -+head.informer.additionalurls: <%= informer_url %> -+<% end -%> -+<% end -%> -+<% if @informer_more -%> -+head.informer.additionalinfo: <%= @informer_more %> -+<% end -%> -+<% end -%> -+ -diff --git a/src/puppet/dmlite/templates/ssm/sender.cfg.erb b/src/puppet/dmlite/templates/ssm/sender.cfg.erb -deleted file mode 100644 -index 175c04fb..00000000 ---- a/src/puppet/dmlite/templates/ssm/sender.cfg.erb -+++ /dev/null -@@ -1,68 +0,0 @@ --################################################################################ --# Required: broker configuration options --# -- --[broker] -- --# The SSM will query a BDII to find brokers available. These details are for the --# EGI production broker network --bdii: <%= @bdii_url %> --network: <%= @broker_network %> --# OR (these details will only be used if the broker network settings aren't used) --<% if @broker_host -%> --host: <%= @broker_host %> --<% else -%> --#host: test-msg01.afroditi.hellasgrid.gr --<% end -%> --<% if @broker_port -%> --port: <%= @broker_port %> --<% else -%> --#port: 6163 --<% end -%> -- --# broker authentication. If use_ssl is set, the certificates configured --# in the mandatory [certificates] section will be used. --use_ssl: <%= @use_ssl %> -- -- --################################################################################ --# Required: Certificate configuration -- --[certificates] --<% if @certificate -%> --certificate: <%= @certificate %> --<% else -%> --#certificate: /etc/grid-security/hostcert.pem --<% end -%> --<% if @key -%> --key: <%= @key %> --<% else -%> --#key: /etc/grid-security/hostkey.pem --<% end -%> --capath: <%= @capath %> --# If supplied, outgoing messages will be encrypted using this certificate. --# May be used in addition to 'use_ssl'. If used, it must be the certificate of --# the final server that's receiving your messages; not your own, nor the broker. --<%if @server_cert -%> --server_cert: <%= @server_cert %> --<% else -%> --#server_cert: /etc/grid-security/servercert.pem --<% end -%> -- --################################################################################ --# Messaging configuration. --# --[messaging] -- --# Queue to which SSM will send messages --destination: <%= @messaging_destination %> -- --# Outgoing messages will be read and removed from this directory. --path: <%= @messaging_path %> -- --[logging] --logfile: <%= @log_file %> --# Available logging levels: --# DEBUG, INFO, WARN, ERROR, CRITICAL --level: <%= @log_level %> --console: <%= @console %> -diff --git a/tests/dpm/dpm-tester.py b/tests/dpm/dpm-tester.py -index 0d637b61..277b2d2c 100755 ---- a/tests/dpm/dpm-tester.py -+++ b/tests/dpm/dpm-tester.py -@@ -16,10 +16,6 @@ import inspect - import os - import filecmp - import hashlib --try: -- from urllib.parse import quote, unquote --except ImportError: -- from urllib import quote, unquote - import signal - import stat - import sys -@@ -29,9 +25,10 @@ import json - import subprocess - import random - try: -- from urllib.parse import urlparse -+ from urllib.parse import urlparse, quote, unquote - except ImportError: - from urlparse import urlparse -+ from urllib import quote, unquote - - EX_OK = 0 - EX_WARNING = 1 -diff --git a/tests/python/catalog.py b/tests/python/catalog.py -index c329f4c5..4da9e9fb 100644 ---- a/tests/python/catalog.py -+++ b/tests/python/catalog.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/python2 - - import unittest - import pydmlite -@@ -42,7 +42,7 @@ class TestDmliteCatalog(unittest.TestCase): - - xstat = catalog.extendedStat("/dpm/cern.ch", True) - -- self.assertEquals(xstat.name, "cern.ch") -+ self.assertEqual(xstat.name, "cern.ch") - self.assertTrue(xstat.stat.isDir()) - self.assertTrue(stat.S_ISDIR(xstat.stat.st_mode)) - self.assertFalse(xstat.stat.isLnk()) -@@ -50,8 +50,8 @@ class TestDmliteCatalog(unittest.TestCase): - self.assertFalse(xstat.stat.isReg()) - self.assertFalse(stat.S_ISREG(xstat.stat.st_mode)) - self.assertTrue(xstat.stat.st_ino > 0) -- self.assertEquals(xstat.stat.st_nlink, 1) -- self.assertEquals(xstat.stat.st_size, 0) -+ self.assertEqual(xstat.stat.st_nlink, 1) -+ self.assertEqual(xstat.stat.st_size, 0) - - def test_stat_file(self): - # TODO: actually create the file we test against, test times and uid/gid, test parent -@@ -59,7 +59,7 @@ class TestDmliteCatalog(unittest.TestCase): - - xstat = catalog.extendedStat("/dpm/cern.ch/home/dteam/index2.html", True) - -- self.assertEquals(xstat.name, "index2.html") -+ self.assertEqual(xstat.name, "index2.html") - self.assertFalse(xstat.stat.isDir()) - self.assertFalse(stat.S_ISDIR(xstat.stat.st_mode)) - self.assertFalse(xstat.stat.isLnk()) -@@ -67,7 +67,7 @@ class TestDmliteCatalog(unittest.TestCase): - self.assertTrue(xstat.stat.isReg()) - self.assertTrue(stat.S_ISREG(xstat.stat.st_mode)) - self.assertTrue(xstat.stat.st_ino > 0) -- self.assertEquals(xstat.stat.st_nlink, 1) -+ self.assertEqual(xstat.stat.st_nlink, 1) - def test_mkdir(self): - catalog = self.stack.getCatalog() - try: -@@ -94,19 +94,19 @@ class TestDmliteCatalog(unittest.TestCase): - except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - return 1 -- self.assertEquals(xstat.name,self.newdir) # checking the directory name -+ self.assertEqual(xstat.name,self.newdir) # checking the directory name - self.assertTrue(xstat.stat.isDir()) - self.assertTrue(stat.S_ISDIR(xstat.stat.st_mode)) - self.assertFalse(xstat.stat.isLnk()) - self.assertFalse(xstat.stat.isReg()) - self.assertTrue(xstat.stat.st_ino > 0) - self.assertNotEqual(xstat.stat.st_nlink, 1) -- self.assertEquals(xstat.stat.st_size, 0) -+ self.assertEqual(xstat.stat.st_size, 0) - deltatimed = time.time() - xstat.stat.getMTime() - self.assertTrue(deltatimed < 4) # checking the time to create the directory -- self.assertEquals(xstat.stat.st_blksize,0) -- self.assertEquals(xstat.stat.st_size, 0) -- self.assertEquals(xstat.stat.st_mode, 0o40777) # checking the mode -+ self.assertEqual(xstat.stat.st_blksize,0) -+ self.assertEqual(xstat.stat.st_size, 0) -+ self.assertEqual(xstat.stat.st_mode, 0o40777) # checking the mode - def test_rmdir(self): - catalog = self.stack.getCatalog() - try: -@@ -158,14 +158,13 @@ class TestDmliteCatalog(unittest.TestCase): - self.assertFalse(stat.S_ISLNK(xstat.stat.st_mode)) - self.assertTrue(xstat.stat.isReg()) - self.assertTrue(stat.S_ISREG(xstat.stat.st_mode)) -- self.assertEquals(xstat.name, self.newfile) # checking the file name -- self.assertEquals(xstat.stat.st_mode, 0o100777) # checking the file mode -- self.assertTrue(xstat.stat.st_ino > 0) -- self.assertEquals(xstat.stat.st_nlink, 1) -+ self.assertEqual(xstat.name, self.newfile) # checking the file name -+ self.assertEqual(xstat.stat.st_mode, 0o100777) # checking the file mode -+ self.assertEqual(xstat.stat.st_nlink, 1) - deltatime = time.time() - xstat.stat.getMTime() - self.assertTrue(deltatime < 4) # checking the time to create the file -- self.assertEquals(xstat.stat.st_blksize,0) -- self.assertEquals(xstat.stat.st_size, 0) -+ self.assertEqual(xstat.stat.st_blksize, 0) -+ self.assertEqual(xstat.stat.st_size, 0) - self.assertTrue(xstat.stat.st_uid,True) - self.assertTrue(xstat.stat.st_gid,True) - return 0 -@@ -198,15 +197,15 @@ class TestDmliteCatalog(unittest.TestCase): - self.assertFalse(xstat.stat.isDir()) - self.assertFalse(xstat.stat.isReg()) - self.assertTrue(xstat.stat.isLnk()) -- self.assertEquals(xstat.name,self.newlink) # checking the link name -- self.assertEquals(catalog.readLink(self.newlink),self.newfile) # checking of the link (newlink->newfile) -- self.assertEquals(xstat.stat.st_mode, 0o120777) # checking the link mode -+ self.assertEqual(xstat.name,self.newlink) # checking the link name -+ self.assertEqual(catalog.readLink(self.newlink),self.newfile) # checking of the link (newlink->newfile) -+ self.assertEqual(xstat.stat.st_mode, 0o120777) # checking the link mode - self.assertTrue(xstat.stat.st_ino > 0) -- self.assertEquals(xstat.stat.st_nlink, 1) -+ self.assertEqual(xstat.stat.st_nlink, 1) - deltatimel = time.time() - xstat.stat.getMTime() - self.assertTrue(deltatimel < 4) # checking the time to create the link -- self.assertEquals(xstat.stat.st_blksize,0) -- self.assertEquals(xstat.stat.st_size, 0) -+ self.assertEqual(xstat.stat.st_blksize,0) -+ self.assertEqual(xstat.stat.st_size, 0) - - def test_rmlink(self): - catalog = self.stack.getCatalog() -diff --git a/tests/python/catalog_first.py b/tests/python/catalog_first.py -index 4a544e5b..c8848b88 100755 ---- a/tests/python/catalog_first.py -+++ b/tests/python/catalog_first.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/python2 - - import unittest - import pydmlite -@@ -35,7 +35,7 @@ class TestDmliteCatalog(unittest.TestCase): - - xstat = catalog.extendedStat("/dpm/cern.ch", True) - -- self.assertEquals(xstat.name, "cern.ch") -+ self.assertEqual(xstat.name, "cern.ch") - self.assertTrue(xstat.stat.isDir()) - self.assertTrue(stat.S_ISDIR(xstat.stat.st_mode)) - self.assertFalse(xstat.stat.isLnk()) -@@ -43,8 +43,8 @@ class TestDmliteCatalog(unittest.TestCase): - self.assertFalse(xstat.stat.isReg()) - self.assertFalse(stat.S_ISREG(xstat.stat.st_mode)) - self.assertTrue(xstat.stat.st_ino > 0) -- self.assertEquals(xstat.stat.st_nlink, 1) -- self.assertEquals(xstat.stat.st_size, 0) -+ self.assertEqual(xstat.stat.st_nlink, 1) -+ self.assertEqual(xstat.stat.st_size, 0) - - def test_stat_file(self): - # TODO: actually create the file we test against, test times and uid/gid, test parent -@@ -52,7 +52,7 @@ class TestDmliteCatalog(unittest.TestCase): - - xstat = catalog.extendedStat("/dpm/cern.ch/home/dteam/index2.html", True) - -- self.assertEquals(xstat.name, "index2.html") -+ self.assertEqual(xstat.name, "index2.html") - self.assertFalse(xstat.stat.isDir()) - self.assertFalse(stat.S_ISDIR(xstat.stat.st_mode)) - self.assertFalse(xstat.stat.isLnk()) -@@ -60,7 +60,7 @@ class TestDmliteCatalog(unittest.TestCase): - self.assertTrue(xstat.stat.isReg()) - self.assertTrue(stat.S_ISREG(xstat.stat.st_mode)) - self.assertTrue(xstat.stat.st_ino > 0) -- self.assertEquals(xstat.stat.st_nlink, 1) -+ self.assertEqual(xstat.stat.st_nlink, 1) - - if __name__ == '__main__': - unittest.main() -diff --git a/tests/python/dm-ls.py b/tests/python/dm-ls.py -index c6a152fe..9259ff5a 100755 ---- a/tests/python/dm-ls.py -+++ b/tests/python/dm-ls.py -@@ -1,4 +1,4 @@ --#!/usr/bin/python -+#!/usr/bin/python2 - from __future__ import print_function - import pydmlite - import sys -@@ -9,11 +9,11 @@ if __name__ == "__main__": - manager = pydmlite.PluginManager() - len = len(sys.argv) - l=0 --# print len -+# print(len) - if len < 3 or sys.argv[1] == "-h": - print("Usage:", sys.argv[0], "[-l] ") - sys.exit(1) --# print sys.argv[1] -+# print(sys.argv[1]) - if sys.argv[1] == "-l" : - l=1 - try : -@@ -42,9 +42,9 @@ if __name__ == "__main__": - xstat = catalog.extendedStat(sys.argv[l+2],False) - flag = xstat.stat.isLnk() - if flag == True : --# print "\t%s" % xstat.name, " is link " -+# print("\t%s" % xstat.name, " is link ") - filename = catalog.readLink(sys.argv[l+2]) --# print "\t%s" % xstat.name, " is link to ", filename -+# print("\t%s" % xstat.name, " is link to ", filename) - ltime = time.localtime(xstat.stat.getMTime()) - date=datetime(ltime[0],ltime[1],ltime[2],ltime[3],ltime[4],ltime[5]) - print("%o" % xstat.stat.st_mode, "\t", "\t", xstat.stat.st_uid, "\t", xstat.stat.st_gid, '\t', xstat.stat.st_size, "\t", date, "\t%s" %xstat.name, "->",filename) -@@ -54,9 +54,9 @@ if __name__ == "__main__": - sys.stderr.write('ERROR: %s\n' % str(err)) - sys.exit(1) - flag = xstat.stat.isDir() --# print flag -+# print(flag) - if flag == True : --# print "it is directory" -+# print("it is directory") - catalog = stack.getCatalog() - try : - mydir = catalog.openDir(inputname) -@@ -73,14 +73,14 @@ if __name__ == "__main__": - if l>0 : - # name=sys.argv[3] + f.name - name = inputname + f.name --# print name -+# print(name) - try : - xstat = catalog.extendedStat(name, True) - except : - xstat = catalog.extendedStat(name, False) - flag = xstat.stat.isLnk() - if flag == True : --# print "\t%s" % f.name, " is link " -+# print("\t%s" % f.name, " is link ") - filename = catalog.readLink(name) - ltime = time.localtime(xstat.stat.getMTime()) - date=datetime(ltime[0],ltime[1],ltime[2],ltime[3],ltime[4],ltime[5]) -@@ -102,4 +102,3 @@ if __name__ == "__main__": - else : - print("\t%s" % xstat.name) - sys.exit(0) -- -diff --git a/tests/python/dm-mkdir.py b/tests/python/dm-mkdir.py -index c6d165b6..634ac2e0 100755 ---- a/tests/python/dm-mkdir.py -+++ b/tests/python/dm-mkdir.py -@@ -1,4 +1,4 @@ --#!/usr/bin/python -+#!/usr/bin/python2 - from __future__ import print_function - import pydmlite - import sys -@@ -7,11 +7,11 @@ if __name__ == "__main__": - manager = pydmlite.PluginManager() - len = len(sys.argv) - l=0 --# print len -+# print(len) - if len < 3 or sys.argv[1] == "-h": - print("Usage:", sys.argv[0], " []") - sys.exit(1) --# print sys.argv[1] -+# print(sys.argv[1]) - try : - manager.loadConfiguration(sys.argv[1]) - except Exception as err: -@@ -49,5 +49,5 @@ if __name__ == "__main__": - catalog.setMode(sys.argv[3],mode) - # arg = sys.argv[2] + "/" + sys.argv[3] - # catalog.removeDir(arg) --# print arg," was removed" -+# print(arg," was removed") - sys.exit(0) -diff --git a/tests/python/dm-mkfile.py b/tests/python/dm-mkfile.py -index 607318f4..07025669 100755 ---- a/tests/python/dm-mkfile.py -+++ b/tests/python/dm-mkfile.py -@@ -1,4 +1,4 @@ --#!/usr/bin/python -+#!/usr/bin/python2 - from __future__ import print_function - import pydmlite - import sys -@@ -54,15 +54,15 @@ if __name__ == "__main__": - if len == 5 : - try : - mode = int(sys.argv[4],8) --# print "%o" % mode -+# print("%o" % mode) - except Exception as err: - sys.stderr.write('ERROR: %s\n' % str(err)) - - sys.exit(1) - catalog.setMode(sys.argv[3],mode) --# print "%o" % xstat.stat.st_mode -+# print("%o" % xstat.stat.st_mode) - xstat = catalog.extendedStat(sys.argv[3], True) --# print "%o" % xstat.stat.st_mode -+# print("%o" % xstat.stat.st_mode) - ltime = time.localtime(xstat.stat.getMTime()) - date=datetime(ltime[0],ltime[1],ltime[2],ltime[3],ltime[4],ltime[5]) - print("change mode from 775 to ", "%o" % mode) -diff --git a/tests/python/dm-mklink.py b/tests/python/dm-mklink.py -index 2f776fe4..b32b448c 100755 ---- a/tests/python/dm-mklink.py -+++ b/tests/python/dm-mklink.py -@@ -1,4 +1,4 @@ --#!/usr/bin/python -+#!/usr/bin/python2 - from __future__ import print_function - import pydmlite - import sys -@@ -7,11 +7,11 @@ if __name__ == "__main__": - manager = pydmlite.PluginManager() - len = len(sys.argv) - l=0 --# print len -+# print(len) - if len < 5 or sys.argv[1] == "-h": - print("Usage:", sys.argv[0], " ") - sys.exit(1) --# print sys.argv[1] -+# print(sys.argv[1]) - try : - manager.loadConfiguration(sys.argv[1]) - except Exception as err: -@@ -44,5 +44,5 @@ if __name__ == "__main__": - # catalog.setMode(sys.argv[4],120777) - # arg = sys.argv[2] + "/" + sys.argv[3] - # catalog.removeDir(arg) --# print arg," was removed" -+# print(arg," was removed") - sys.exit(0) -diff --git a/tests/python/dm-physicallocation.py b/tests/python/dm-physicallocation.py -index a0133bee..1f2c0de1 100755 ---- a/tests/python/dm-physicallocation.py -+++ b/tests/python/dm-physicallocation.py -@@ -1,4 +1,4 @@ --#!/usr/bin/python -+#!/usr/bin/python2 - from __future__ import print_function - import pydmlite - import sys -diff --git a/tests/python/dm-rmdir.py b/tests/python/dm-rmdir.py -index 6eb950a0..2126e9c6 100755 ---- a/tests/python/dm-rmdir.py -+++ b/tests/python/dm-rmdir.py -@@ -1,4 +1,4 @@ --#!/usr/bin/python -+#!/usr/bin/python2 - from __future__ import print_function - import pydmlite - import sys -@@ -7,11 +7,11 @@ if __name__ == "__main__": - manager = pydmlite.PluginManager() - len = len(sys.argv) - l=0 --# print len -+# print(len) - if len < 3 or sys.argv[1] == "-h": - print("Usage:", sys.argv[0], " ") - sys.exit(1) --# print sys.argv[1] -+# print(sys.argv[1]) - try : - manager.loadConfiguration(sys.argv[1]) - except Exception as err: -diff --git a/tests/python/dm-rmfile.py b/tests/python/dm-rmfile.py -index de68ef2b..eb4a5978 100755 ---- a/tests/python/dm-rmfile.py -+++ b/tests/python/dm-rmfile.py -@@ -1,4 +1,4 @@ --#!/usr/bin/python -+#!/usr/bin/python2 - from __future__ import print_function - import pydmlite - import sys -diff --git a/tests/python/dm-rmlink.py b/tests/python/dm-rmlink.py -index 3104444a..03fc50e6 100755 ---- a/tests/python/dm-rmlink.py -+++ b/tests/python/dm-rmlink.py -@@ -1,4 +1,4 @@ --#!/usr/bin/python -+#!/usr/bin/python2 - from __future__ import print_function - import pydmlite - import sys -@@ -7,11 +7,11 @@ if __name__ == "__main__": - manager = pydmlite.PluginManager() - len = len(sys.argv) - l=0 --# print len -+# print(len) - if len < 3 or sys.argv[1] == "-h": - print("Usage:", sys.argv[0], " ") - sys.exit(1) --# print sys.argv[1] -+# print(sys.argv[1]) - try : - manager.loadConfiguration(sys.argv[1]) - except Exception as err: -@@ -40,5 +40,5 @@ if __name__ == "__main__": - sys.exit(1) - # arg = sys.argv[2] + "/" + sys.argv[3] - # catalog.removeDir(arg) --# print arg," was removed" -+# print(arg," was removed") - sys.exit(0) -diff --git a/tests/python/run-all.py b/tests/python/run-all.py -index 09fc08f3..d250591f 100755 ---- a/tests/python/run-all.py -+++ b/tests/python/run-all.py -@@ -1,4 +1,4 @@ --#!/usr/bin/python -+#!/usr/bin/python2 - - import unittest - diff --git a/dmlite.spec b/dmlite.spec index 40ba5d1..42e3543 100644 --- a/dmlite.spec +++ b/dmlite.spec @@ -47,7 +47,7 @@ %global py_app_version 2 %global boost_py2_version 2 %global boost_py3_version 3 -%global xrootd_version 1:5.0.2 +%global xrootd_version 1:5.2.0 %global xrootd_version_major 5 %if %{?fedora}%{!?fedora:0} >= 31 @@ -103,51 +103,21 @@ %{!?_httpd_mmn: %{expand: %%global _httpd_mmn %%(cat %{_includedir}/httpd/.mmn || echo 0-0)}} Name: dmlite -Version: 1.14.2 -Release: 7%{?dist}%{?_with_asan:.asan} +Version: 1.15.0 +Release: 1%{?dist}%{?_with_asan:.asan} Summary: Lcgdm grid data management and storage framework Group: Applications/Internet License: ASL 2.0 URL: https://gitlab.cern.ch/lcgdm/dmlite # The source of this package was pulled from upstream's vcs. Use the # following commands to generate the tarball: -# git clone http://gitlab.cern.ch/lcgdm/dmlite.git -# cd dmlite && git archive --prefix dmlite-1.14.2/ tags/v1.14.2 | gzip > dmlite-1.14.2.tar.gz +# git clone http://gitlab.cern.ch/lcgdm/dmlite.git && cd dmlite +# git archive --prefix dmlite-1.15.0/ tags/v1.15.0c | gzip > dmlite-1.15.0.tar.gz +# git diff 9fadf8525087f5f9..9fadf8525087f5f9^ src > dmlite-xrootd4-revert.patch Source0: %{name}-%{version}.tar.gz -Patch5: dmlite-xrootd4-revert.patch -Patch10: dmlite-LCGDM-2940-fix_ignorereadonlyfs.patch -Patch11: dmlite-LCGDM-2946-davs_zero_size_files.patch -Patch13: dmlite-LCGDM-2949-xrootd-signing-and-tls.patch -Patch14: dmlite-remove_explicit_xrootd_plugin_version.patch -Patch15: dmlite-remove_explicit_xrootd_plugin_version2.patch -Patch16: dmlite-initialize_class_variable.patch -Patch17: dmlite-fix_puppet_headdisk_node.patch -Patch18: dmlite-LCGDM-2941-fix_negative_cache.patch -Patch19: dmlite-directory_listing_permissions.patch -Patch22: dmlite-LCGDM-2951-invalid_fd_formatting.patch -Patch24: dmlite-LCGDM-2953-http-double-close.patch -Patch25: dmlite-LCGDM-2954-pass-tpc-error.patch -Patch26: dmlite-LCGDM-2955-skip-dpm2.patch -Patch27: dmlite-LCGDM-2957-same-ciphers-for-all-protocols.patch -Patch28: dmlite-LCGDM-2943-avoid-db-congestion.patch -Patch29: dmlite-LCGDM-2958-filename-with-special-sequence.patch -Patch30: dmlite-LCGDM-2959-use-CRL-for-TPC.patch -Patch31: dmlite-LCGDM-2961-remove-client-ip-from-token.patch -Patch32: dmlite-LCGDM-2962-improve-dmlite-tester.patch -Patch33: dmlite-LCGDM-2963-macaroon-READ_METADATA.patch -Patch34: dmlite-LCGDM-2964-dont-ignore-mapfile-for-https.patch -Patch35: dmlite-LCGDM-2967-disable-disk-on-headnode.patch -Patch36: dmlite-perfmarker-sleep-cond.patch -Patch37: dmlite-fix-oob-memory-read.patch -Patch38: dmlite-complilation-dependencies.patch -Patch40: dmlite-no_lfn_draining_exception.patch -Patch41: dmlite-LCGDM-2948-draining_last_transfer_wait.patch -Patch50: dmlite-ellert.patch -Patch51: dmlite-prettyprint.patch -Patch52: dmlite-shell-678.patch -Patch53: dmlite-shell-678-python3.patch Patch0: dmlite-python3.patch +Patch5: dmlite-xrootd4-revert.patch %if %{?fedora}%{!?fedora:0} >= 17 || %{?rhel}%{!?rhel:0} >= 7 BuildRequires: boost-devel >= 1.48.0 @@ -176,7 +146,11 @@ BuildRequires: python3-rpm-macros %endif BuildRequires: gcc-c++ +%if %{?rhel}%{!?rhel:0} >= 8 +BuildRequires: cmake +%else BuildRequires: cmake3 +%endif BuildRequires: cppunit-devel BuildRequires: doxygen BuildRequires: graphviz @@ -742,6 +716,7 @@ commands for system administration, testers and power users. %{_bindir}/dpm-storage-summary.py %{_bindir}/dpm-storage-summary.cgi %{_bindir}/dome-info-provider.py +%{_datarootdir}/dmlite/utils %{_sharedstatedir}/bdii/gip/provider/dome-info-exec %config(noreplace) %{_sysconfdir}/sysconfig/dpminfo %if %{py_app_version} == 2 @@ -863,48 +838,11 @@ This package provides the modules for the DPM configuration via puppet %prep %setup -q -%if %{xrootd_version_major} == 4 -%patch5 -p1 -%endif -%patch10 -p1 -%patch11 -p1 -%patch13 -p1 -%patch14 -p1 -%patch15 -p1 -%patch16 -p1 -%patch17 -p1 -%patch18 -p1 -%patch19 -p1 -%patch22 -p1 -%patch24 -p1 -%patch25 -p1 -%patch26 -p1 -%patch27 -p1 -%patch28 -p1 -%patch29 -p1 -%patch30 -p1 -%patch31 -p1 -%patch32 -p1 -%patch33 -p1 -%patch34 -p1 -%patch35 -p1 -%patch36 -p1 -%patch37 -p1 -%patch38 -p1 -%if %{with_python2_libs} != 0 -# included in dmlite-shell-678.patch -%patch40 -p1 -%patch41 -p1 %if %{py_app_version} == 3 %patch0 -p1 %endif -%else -%patch50 -p1 -%patch51 -p1 -%patch52 -p1 -%if %{py_app_version} == 3 -%patch53 -p1 -%endif +%if %{xrootd_version_major} == 4 +%patch5 -p1 %endif %build @@ -947,7 +885,11 @@ pushd tests %else pushd %{_vpath_builddir}/tests %endif +%if %{?rhel}%{!?rhel:0} >= 8 +LD_LIBRARY_PATH=~+/../src/ ctest -V +%else LD_LIBRARY_PATH=~+/../src/ ctest3 -V +%endif if [ $? -ne 0 ]; then exit 1 fi @@ -985,6 +927,8 @@ mkdir -p %{basefolder}/bdii tar zxvf src/puppet/CERNOps-bdii-*.tar.gz -C %{basefolder}/bdii/ --strip-components 1 mkdir -p %{basefolder}/fetchcrl tar zxvf src/puppet/puppet-fetchcrl-*.tar.gz -C %{basefolder}/fetchcrl/ --strip-components 1 +mkdir -p %{basefolder}/firewalld +tar zxvf src/puppet/puppet-firewalld-*.tar.gz -C %{basefolder}/firewalld/ --strip-components 1 mkdir -p %{basefolder}/firewall tar zxvf src/puppet/puppetlabs-firewall-*.tar.gz -C %{basefolder}/firewall/ --strip-components 1 mkdir -p %{basefolder}/memcached @@ -1000,6 +944,9 @@ tar zxvf src/puppet/puppetlabs-translate-*.tar.gz -C %{basefolder}/translate/ - mkdir -p %{basefolder}/voms tar zxvf src/puppet/lcgdm-voms-*.tar.gz -C %{basefolder}/voms/ --strip-components 1 +## legacy scripts integrated in dmlite-shell +%{__ln_s} %{_datarootdir}/dmlite/utils/dmlite-mysql-dirspaces.py %{buildroot}%{_bindir}/ + ## for dpm-xrootd %{__ln_s} libXrdDPMFinder-%{xrootd_version_major}.so %{buildroot}%{_libdir}/libXrdDPMFinder.so-%{xrootd_version_major}.3 %{__ln_s} libXrdDPMDiskAcc-%{xrootd_version_major}.so %{buildroot}%{_libdir}/libXrdDPMDiskAcc.so-%{xrootd_version_major}.3 @@ -1013,10 +960,6 @@ tar zxvf src/puppet/lcgdm-voms-*.tar.gz -C %{basefolder}/voms/ --strip-componen install -p -d -m 755 %{buildroot}%{_localstatedir}/log/dpm-gsiftp %endif -%if %{?rhel}%{!?rhel:0} > 7 -%{__ln_s} %{_datarootdir}/dmlite/utils/dmlite-mysql-dirspaces.py %{buildroot}%{_bindir}/dmlite-mysql-dirspaces.py -%endif - %post libs %{?ldconfig} /sbin/service rsyslog condrestart || true @@ -1112,6 +1055,12 @@ install -p -d -m 755 %{buildroot}%{_localstatedir}/log/dpm-gsiftp %changelog +* Mon Jun 14 2021 Petr Vokac - 1.15.0-1 +- New release with few additional bugfixes LCGDM-2975, LCGDM-2974 + +* Fri Jun 04 2021 Python Maint - 1.14.2-8 +- Rebuilt for Python 3.10 + * Sun Mar 28 2021 Petr Vokac - 1.14.2-7 - Cleanup and CentOS8 support diff --git a/sources b/sources index f215011..9112d11 100644 --- a/sources +++ b/sources @@ -1 +1 @@ -SHA512 (dmlite-1.14.2.tar.gz) = 8765bd30c9c1c1b7b5fe0433642fc89eba848b8ee1e5122080d955c5efc854302784ebc107984112da908c591522a1c794cd3db9fe13debd7c45faab72b2b3e3 +SHA512 (dmlite-1.15.0.tar.gz) = 9fb9d29d999ebde76af5180e5672a44b74fb185fefe510efeec1ac3a14c5a1bcf5fd2315a6522fff5310a862d4c2d786d4334e1e88fee7dde76873dc961f58f5