From 9324fa8086ddaae769d4cef16f9b41cdd7ba4c36 Mon Sep 17 00:00:00 2001 From: Zbigniew Jędrzejewski-Szmek Date: Nov 26 2023 12:14:55 +0000 Subject: Version 3.9.1 ... (rhbz#2242285) --- diff --git a/0001-Skip-tests-that-fail-on-s390x.patch b/0001-Skip-tests-that-fail-on-s390x.patch index 7675255..790396a 100644 --- a/0001-Skip-tests-that-fail-on-s390x.patch +++ b/0001-Skip-tests-that-fail-on-s390x.patch @@ -1,17 +1,17 @@ -From 35a394b3806687e4be0e369fe293b98e939deb07 Mon Sep 17 00:00:00 2001 +From 66ffc01d175e297b9832dfc309fe2b63be91acd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= -Date: Sat, 26 Mar 2022 11:58:59 +0100 -Subject: [PATCH 1/5] Skip tests that fail on s390x +Date: Sun, 26 Nov 2023 13:10:19 +0100 +Subject: [PATCH] Skip tests that fail on s390x --- - tables/tests/test_index_backcompat.py | 154 -------------------------- - 1 file changed, 154 deletions(-) + tables/tests/test_index_backcompat.py | 153 -------------------------- + 1 file changed, 153 deletions(-) diff --git a/tables/tests/test_index_backcompat.py b/tables/tests/test_index_backcompat.py -index f845a33e16..050820c2a5 100644 +index a3f404f768..e83c4d722a 100644 --- a/tables/tests/test_index_backcompat.py +++ b/tables/tests/test_index_backcompat.py -@@ -1,161 +1,7 @@ +@@ -1,160 +1,7 @@ from tables.tests import common - @@ -29,9 +29,9 @@ index f845a33e16..050820c2a5 100644 - """Checking index version.""" - - t1var1 = self.table1.cols.var1 -- if "2_0" in self.h5fname: +- if "2_0" in str(self.h5fname): - self.assertEqual(t1var1.index._v_version, "2.0") -- elif "2_1" in self.h5fname: +- elif "2_1" in str(self.h5fname): - self.assertEqual(t1var1.index._v_version, "2.1") - - def test01_string(self): @@ -169,10 +169,6 @@ index f845a33e16..050820c2a5 100644 - for n in range(niter): - theSuite.addTest(common.unittest.makeSuite(Indexes2_0TestCase)) - theSuite.addTest(common.unittest.makeSuite(Indexes2_1TestCase)) -- - return theSuite + return theSuite --- -2.41.0 - diff --git a/0002-Relax-dependency-on-blosc2.patch b/0002-Relax-dependency-on-blosc2.patch deleted file mode 100644 index b8bb7c8..0000000 --- a/0002-Relax-dependency-on-blosc2.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 6dad0d61386d69833ca73098b533faa6b01a31b5 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= -Date: Sun, 14 May 2023 18:54:14 +0200 -Subject: [PATCH 2/5] Relax dependency on blosc2 - -As specified before, it translates to "blosc2 >= 2" + "blosc2 < 2.1", -which is obviously not satisfied by current blosc2 = 2.2.2. ---- - pyproject.toml | 2 +- - requirements.txt | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/pyproject.toml b/pyproject.toml -index 022614ad25..eb16091a6c 100644 ---- a/pyproject.toml -+++ b/pyproject.toml -@@ -6,6 +6,6 @@ requires = [ - "packaging", - "py-cpuinfo", - "Cython >=0.29.21", -- "blosc2 ~=2.0.0" -+ "blosc2 >=2.0.0" - ] - build-backend = "setuptools.build_meta" -diff --git a/requirements.txt b/requirements.txt -index 8d2f5d2456..99e7db38dc 100644 ---- a/requirements.txt -+++ b/requirements.txt -@@ -6,6 +6,6 @@ numpy>=1.19.0 - numexpr>=2.6.2 - # blosc2 wheel is actually only needed when compiling on conda envs. - # Otherwise, lib comes bundled in PyTables wheels (but it doesn't hurt either). --blosc2~=2.0.0 -+blosc2>=2.0.0 - packaging - py-cpuinfo --- -2.41.0 - diff --git a/0003-Fix-build-errors-when-compiled-using-cython-3.0.0b1.patch b/0003-Fix-build-errors-when-compiled-using-cython-3.0.0b1.patch deleted file mode 100644 index 1e9b679..0000000 --- a/0003-Fix-build-errors-when-compiled-using-cython-3.0.0b1.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 60acbd6fde7042ca4aeabe0bacc532a62a125352 Mon Sep 17 00:00:00 2001 -From: Matus Valo -Date: Wed, 15 Mar 2023 22:49:07 +0100 -Subject: [PATCH 3/5] Fix build errors when compiled using cython 3.0.0b1. - ---- - tables/tableextension.pyx | 2 +- - tables/utilsextension.pyx | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/tables/tableextension.pyx b/tables/tableextension.pyx -index a85fef252d..f258dcbaee 100644 ---- a/tables/tableextension.pyx -+++ b/tables/tableextension.pyx -@@ -38,7 +38,7 @@ from .utils import SizeType - from .utilsextension cimport get_native_type, cstr_to_pystr - - # numpy functions & objects --from hdf5extension cimport Leaf -+from .hdf5extension cimport Leaf - from cpython cimport PyErr_Clear - from libc.stdio cimport snprintf - from libc.stdlib cimport malloc, free -diff --git a/tables/utilsextension.pyx b/tables/utilsextension.pyx -index 5b16dcd38a..664e1ea58e 100644 ---- a/tables/utilsextension.pyx -+++ b/tables/utilsextension.pyx -@@ -344,7 +344,7 @@ except ImportError: - #--------------------------------------------------------------------- - - # Error handling helpers --cdef herr_t e_walk_cb(unsigned n, const H5E_error_t *err, void *data) with gil: -+cdef herr_t e_walk_cb(unsigned n, const H5E_error_t *err, void *data) noexcept with gil: - cdef object bt = data # list - #cdef char major_msg[256] - #cdef char minor_msg[256] --- -2.41.0 - diff --git a/0004-Fix-compatibility-with-numpu-v1.25.patch b/0004-Fix-compatibility-with-numpu-v1.25.patch deleted file mode 100644 index 7216db1..0000000 --- a/0004-Fix-compatibility-with-numpu-v1.25.patch +++ /dev/null @@ -1,1086 +0,0 @@ -From 7a98a0a57164182f2919c5f8dc868afb98bc0655 Mon Sep 17 00:00:00 2001 -From: Antonio Valentino -Date: Sat, 17 Jun 2023 19:51:00 +0200 -Subject: [PATCH 4/5] Fix compatibility with numpu v1.25 - ---- - bench/chunkshape-testing.py | 4 +- - bench/deep-tree.py | 2 +- - bench/indexed_search.py | 4 +- - bench/keysort.py | 6 +- - bench/optimal-chunksize.py | 2 +- - doc/source/cookbook/hints_for_sql_users.rst | 4 +- - doc/source/cookbook/inmemory_hdf5_files.rst | 4 +- - doc/source/usersguide/tutorials.rst | 10 ++-- - tables/filters.py | 2 +- - tables/hdf5extension.pyx | 66 ++++++++++----------- - tables/indexesextension.pyx | 6 +- - tables/lrucacheextension.pyx | 12 ++-- - tables/table.py | 15 ++--- - tables/tableextension.pyx | 24 ++++---- - tables/tests/test_array.py | 24 ++++---- - tables/tests/test_attributes.py | 4 +- - tables/tests/test_carray.py | 4 +- - tables/tests/test_earray.py | 4 +- - tables/tests/test_indexvalues.py | 6 +- - tables/tests/test_queries.py | 8 +-- - tables/tests/test_timetype.py | 36 +++++++---- - tables/tests/test_types.py | 3 +- - tables/tests/test_vlarray.py | 6 +- - tables/utils.py | 7 ++- - tables/utilsextension.pyx | 20 +++---- - tables/vlarray.py | 4 +- - 26 files changed, 153 insertions(+), 134 deletions(-) - -diff --git a/bench/chunkshape-testing.py b/bench/chunkshape-testing.py -index 84e75875b6..41e0565a3d 100644 ---- a/bench/chunkshape-testing.py -+++ b/bench/chunkshape-testing.py -@@ -83,7 +83,7 @@ print("earray2 populate time:", clock() - t1) - f2.close() - - # t1=time() --# c2[:] = numpy.empty(shape=(M, N), dtype="int32") -+# c2[:] = np.empty(shape=(M, N), dtype="int32") - # print "carray populate time:", time()-t1 - - # f3 = f.create_carray(f.root, 'cfield3', -@@ -94,7 +94,7 @@ f2.close() - # tables.Int32Atom(), (0, M), - # "scalar int32 carray", expectedrows=N) - # t1=time() --# e2.append(numpy.empty(shape=(N, M), dtype="int32")) -+# e2.append(np.empty(shape=(N, M), dtype="int32")) - # print "earray populate time:", time()-t1 - - # t1=time() -diff --git a/bench/deep-tree.py b/bench/deep-tree.py -index b2a43ec7b7..ee43577a63 100644 ---- a/bench/deep-tree.py -+++ b/bench/deep-tree.py -@@ -35,7 +35,7 @@ def show_stats(explain, tref): - - def populate(f, nlevels): - g = f.root -- #arr = numpy.zeros((10,), "f4") -+ #arr = np.zeros((10,), "f4") - #descr = {'f0': tables.Int32Col(), 'f1': tables.Float32Col()} - for i in range(nlevels): - #dset = f.create_array(g, "DS1", arr) -diff --git a/bench/indexed_search.py b/bench/indexed_search.py -index 0109cc078c..d9e33c5967 100644 ---- a/bench/indexed_search.py -+++ b/bench/indexed_search.py -@@ -207,11 +207,11 @@ class DB: - # print "Results len:", results - # self.print_qtime_idx(colname, ltimes, True, verbose) - # Print internal PyTables index tprof statistics -- #tprof = numpy.array(tprof) -+ #tprof = np.array(tprof) - #tmean, tstd = self.norm_times(tprof) - # print "tprof-->", round(tmean, prec), "+-", round(tstd, prec) - # print "tprof hist-->", \ -- # numpy.histogram(tprof) -+ # np.histogram(tprof) - # print "tprof raw-->", tprof - # Always reopen the file after *every* query loop. - # Necessary to make the benchmark to run correctly. -diff --git a/bench/keysort.py b/bench/keysort.py -index 30db7ef02d..6c006d1295 100644 ---- a/bench/keysort.py -+++ b/bench/keysort.py -@@ -28,6 +28,6 @@ for dtype1 in ('S6', 'b1', - tb.indexesextension.keysort(a, b) - tks = clock() - t1 - print("keysort time-->", tks, " {:.2f}x".format(tref / tks)) -- assert np.alltrue(a == e) -- #assert numpy.alltrue(b == d) -- assert np.alltrue(f == d) -+ assert np.all(a == e) -+ #assert np.all(b == d) -+ assert np.all(f == d) -diff --git a/bench/optimal-chunksize.py b/bench/optimal-chunksize.py -index fc071d4746..1c86317aca 100644 ---- a/bench/optimal-chunksize.py -+++ b/bench/optimal-chunksize.py -@@ -61,7 +61,7 @@ def bench(chunkshape, filters): - # Fill the array - t1 = clock() - for i in range(N): -- # e.append([numpy.random.rand(M)]) # use this for less compressibility -+ # e.append([np.random.rand(M)]) # use this for less compressibility - e.append([quantize(np.random.rand(M), 6)]) - # os.system("sync") - print(f"Creation time: {clock() - t1:.3f}", end=' ') -diff --git a/doc/source/cookbook/hints_for_sql_users.rst b/doc/source/cookbook/hints_for_sql_users.rst -index 866357eed3..81ed3fd563 100644 ---- a/doc/source/cookbook/hints_for_sql_users.rst -+++ b/doc/source/cookbook/hints_for_sql_users.rst -@@ -311,8 +311,8 @@ structure described above:: - tbl.append(rows) - - # Using a NumPy container. -- import numpy -- rows = numpy.rec.array(rows) -+ import numpy as np -+ rows = np.rec.array(rows) - tbl.append(rows) - - -diff --git a/doc/source/cookbook/inmemory_hdf5_files.rst b/doc/source/cookbook/inmemory_hdf5_files.rst -index 9797338e99..c35afcc807 100644 ---- a/doc/source/cookbook/inmemory_hdf5_files.rst -+++ b/doc/source/cookbook/inmemory_hdf5_files.rst -@@ -46,8 +46,8 @@ one needs to specify to use the CORE driver:: - - >>> import tables - >>> h5file = tables.open_file("new_sample.h5", "w", driver="H5FD_CORE") -- >>> import numpy -- >>> a = h5file.create_array(h5file.root, "array", numpy.zeros((300, 300))) -+ >>> import numpy as np -+ >>> a = h5file.create_array(h5file.root, "array", np.zeros((300, 300))) - >>> h5file.close() - - -diff --git a/doc/source/usersguide/tutorials.rst b/doc/source/usersguide/tutorials.rst -index 84c30526bc..9435572ad5 100644 ---- a/doc/source/usersguide/tutorials.rst -+++ b/doc/source/usersguide/tutorials.rst -@@ -2222,20 +2222,20 @@ object is returned. - - It is possible to create arrays that immitate nested table-like structure with _v_nested_descr attribute:: - -- >>> import numpy -+ >>> import numpy as np - >>> table.description._v_nested_descr - [('info2', [('info3', [('x', '()f8'), ('y', '()u1')]), ('name', '()S10'), - ('value', '()f8')]), ('info1', [('name', '()S10'), ('value', '()f8')]), - ('color', '()u4')] -- >>> numpy.rec.array(None, shape=0, -- dtype=table.description._v_nested_descr) -+ >>> np.rec.array(None, shape=0, dtype=table.description._v_nested_descr) - recarray([], - dtype=[('info2', [('info3', [('x', '>f8'), ('y', '|u1')]), - ('name', '|S10'), ('value', '>f8')]), - ('info1', [('name', '|S10'), ('value', '>f8')]), - ('color', '>u4')]) -- >>> numpy.rec.array(None, shape=0, -- dtype=table.description.info2._v_nested_descr) -+ >>> np.rec.array( -+ None, shape=0, dtype=table.description.info2._v_nested_descr -+ ) - recarray([], - dtype=[('info3', [('x', '>f8'), ('y', '|u1')]), ('name', '|S10'), - ('value', '>f8')]) -diff --git a/tables/filters.py b/tables/filters.py -index f27bedc278..15dcc926d5 100644 ---- a/tables/filters.py -+++ b/tables/filters.py -@@ -278,7 +278,7 @@ class Filters: - - # Byte 3: least significant digit. - if self.least_significant_digit is not None: -- # assert isinstance(self.least_significant_digit, numpy.int8) -+ # assert isinstance(self.least_significant_digit, np.int8) - packed |= self.least_significant_digit - packed <<= 8 - -diff --git a/tables/hdf5extension.pyx b/tables/hdf5extension.pyx -index 7c0b79e6c1..d90bc07cf1 100644 ---- a/tables/hdf5extension.pyx -+++ b/tables/hdf5extension.pyx -@@ -37,7 +37,7 @@ ObjTimestamps = namedtuple('ObjTimestamps', ['atime', 'mtime', - - import pickle - --import numpy -+import numpy as np - - from .exceptions import HDF5ExtError, DataTypeWarning - -@@ -225,12 +225,12 @@ cdef object get_attribute_string_or_none(hid_t node_id, char* attr_name): - size = H5ATTRget_attribute_string(node_id, attr_name, &attr_value, &cset) - if size == 0: - if cset == H5T_CSET_UTF8: -- retvalue = numpy.unicode_('') -+ retvalue = np.unicode_('') - else: -- retvalue = numpy.bytes_(b'') -+ retvalue = np.bytes_(b'') - elif cset == H5T_CSET_UTF8: - retvalue = PyUnicode_DecodeUTF8(attr_value, size, NULL) -- retvalue = numpy.unicode_(retvalue) -+ retvalue = np.unicode_(retvalue) - else: - retvalue = PyBytes_FromStringAndSize(attr_value, size) - # AV: oct 2012 -@@ -239,9 +239,9 @@ cdef object get_attribute_string_or_none(hid_t node_id, char* attr_name): - # The entire process is quite odd but due to a bug (??) in the way - # numpy arrays are pickled in python 3 we can't assume that - # strlen(attr_value) is the actual length of the attribute -- # and numpy.bytes_(attr_value) can give a truncated pickle string -+ # and np.bytes_(attr_value) can give a truncated pickle string - retvalue = retvalue.rstrip(b'\x00') -- retvalue = numpy.bytes_(retvalue) -+ retvalue = np.bytes_(retvalue) - - # Important to release attr_value, because it has been malloc'ed! - if attr_value: -@@ -274,7 +274,7 @@ cdef object get_dtype_scalar(hid_t type_id, H5T_class_t class_id, - - # Try to get a NumPy type. If this can't be done, return None. - try: -- ntype = numpy.dtype(stype) -+ ntype = np.dtype(stype) - except TypeError: - ntype = None - return ntype -@@ -661,14 +661,14 @@ cdef class AttributeSet: - dset_id = node._v_objectid - - # Convert a NumPy scalar into a NumPy 0-dim ndarray -- if isinstance(value, numpy.generic): -- value = numpy.array(value) -+ if isinstance(value, np.generic): -+ value = np.array(value) - - # Check if value is a NumPy ndarray and of a supported type -- if (isinstance(value, numpy.ndarray) and -+ if (isinstance(value, np.ndarray) and - value.dtype.kind in ('V', 'S', 'b', 'i', 'u', 'f', 'c')): - # get a contiguous array: fixes #270 and gh-176 -- #value = numpy.ascontiguousarray(value) -+ #value = np.ascontiguousarray(value) - value = value.copy() - if value.dtype.kind == 'V': - description, rabyteorder = descr_from_dtype(value.dtype, ptparams=node._v_file.params) -@@ -695,7 +695,7 @@ cdef class AttributeSet: - H5Tclose(type_id) - else: - # Object cannot be natively represented in HDF5. -- if (isinstance(value, numpy.ndarray) and -+ if (isinstance(value, np.ndarray) and - value.dtype.kind == 'U' and - value.shape == ()): - value = value[()].encode('utf-8') -@@ -755,13 +755,13 @@ cdef class AttributeSet: - &cset) - if type_size == 0: - if cset == H5T_CSET_UTF8: -- retvalue = numpy.unicode_('') -+ retvalue = np.unicode_('') - else: -- retvalue = numpy.bytes_(b'') -+ retvalue = np.bytes_(b'') - - elif cset == H5T_CSET_UTF8: - retvalue = PyUnicode_DecodeUTF8(str_value, type_size, NULL) -- retvalue = numpy.unicode_(retvalue) -+ retvalue = np.unicode_(retvalue) - else: - retvalue = PyBytes_FromStringAndSize(str_value, type_size) - # AV: oct 2012 -@@ -770,9 +770,9 @@ cdef class AttributeSet: - # The entire process is quite odd but due to a bug (??) in the way - # numpy arrays are pickled in python 3 we can't assume that - # strlen(attr_value) is the actual length of the attibute -- # and numpy.bytes_(attr_value) can give a truncated pickle sting -+ # and np.bytes_(attr_value) can give a truncated pickle sting - retvalue = retvalue.rstrip(b'\x00') -- retvalue = numpy.bytes_(retvalue) # bytes -+ retvalue = np.bytes_(retvalue) # bytes - # Important to release attr_value, because it has been malloc'ed! - if str_value: - free(str_value) -@@ -803,7 +803,7 @@ cdef class AttributeSet: - # Get the NumPy dtype from the type_id - try: - stype_, shape_ = hdf5_to_np_ext_type(type_id, pure_numpy_types=True, ptparams=node._v_file.params) -- dtype_ = numpy.dtype(stype_, shape_) -+ dtype_ = np.dtype(stype_, shape_) - except TypeError: - if class_id == H5T_STRING and H5Tis_variable_str(type_id): - nelements = H5ATTRget_attribute_vlen_string_array(dset_id, cattrname, -@@ -814,21 +814,21 @@ cdef class AttributeSet: - - # The following generator expressions do not work with Cython 0.15.1 - if cset == H5T_CSET_UTF8: -- #retvalue = numpy.fromiter( -+ #retvalue = np.fromiter( - # PyUnicode_DecodeUTF8(str_values[i], - # strlen(str_values[i]), - # NULL) - # for i in range(nelements), "O8") -- retvalue = numpy.array([ -+ retvalue = np.array([ - PyUnicode_DecodeUTF8(str_values[i], - strlen(str_values[i]), - NULL) - for i in range(nelements)], "O8") - - else: -- #retvalue = numpy.fromiter( -+ #retvalue = np.fromiter( - # str_values[i] for i in range(nelements), "O8") -- retvalue = numpy.array( -+ retvalue = np.array( - [str_values[i] for i in range(nelements)], "O8") - retvalue.shape = shape - -@@ -849,7 +849,7 @@ cdef class AttributeSet: - return None - - # Get the container for data -- ndvalue = numpy.empty(dtype=dtype_, shape=shape) -+ ndvalue = np.empty(dtype=dtype_, shape=shape) - # Get the pointer to the buffer data area - rbuf = PyArray_DATA(ndvalue) - # Actually read the attribute from disk -@@ -1273,7 +1273,7 @@ cdef class Array(Leaf): - self.__class__.__name__, atom_)) - - # Allocate space for the dimension axis info and fill it -- dims = numpy.array(shape, dtype=numpy.intp) -+ dims = np.array(shape, dtype=np.intp) - self.rank = len(shape) - self.dims = npy_malloc_dims(self.rank, PyArray_DATA(dims)) - rbuf = _array_data(nparr) -@@ -1339,11 +1339,11 @@ cdef class Array(Leaf): - class_ = self._c_classid.encode('utf-8') - - # Get the fill values -- if isinstance(atom.dflt, numpy.ndarray) or atom.dflt: -- dflts = numpy.array(atom.dflt, dtype=atom.dtype) -+ if isinstance(atom.dflt, np.ndarray) or atom.dflt: -+ dflts = np.array(atom.dflt, dtype=atom.dtype) - fill_data = PyArray_DATA(dflts) - else: -- dflts = numpy.zeros((), dtype=atom.dtype) -+ dflts = np.zeros((), dtype=atom.dtype) - fill_data = NULL - if atom.shape == (): - # The default is preferred as a scalar value instead of 0-dim array -@@ -1372,7 +1372,7 @@ cdef class Array(Leaf): - H5ATTRset_attribute_string(self.dataset_id, "TITLE", encoded_title, - len(encoded_title), H5T_CSET_ASCII) - if self.extdim >= 0: -- extdim = numpy.array([self.extdim], dtype="int32") -+ extdim = np.array([self.extdim], dtype="int32") - # Attach the EXTDIM attribute in case of enlargeable arrays - H5ATTRset_attribute(self.dataset_id, "EXTDIM", H5T_NATIVE_INT, - 0, NULL, PyArray_BYTES(extdim)) -@@ -1447,7 +1447,7 @@ cdef class Array(Leaf): - # object arrays should not be read directly into memory - if atom.dtype != object: - # Get the fill value -- dflts = numpy.zeros((), dtype=atom.dtype) -+ dflts = np.zeros((), dtype=atom.dtype) - fill_data = PyArray_DATA(dflts) - H5ARRAYget_fill_value(self.dataset_id, self.type_id, - &fill_status, fill_data); -@@ -1692,9 +1692,9 @@ cdef class Array(Leaf): - startl.append(start) - countl.append(count) - stepl.append(step) -- start_ = numpy.array(startl, dtype="i8") -- count_ = numpy.array(countl, dtype="i8") -- step_ = numpy.array(stepl, dtype="i8") -+ start_ = np.array(startl, dtype="i8") -+ count_ = np.array(countl, dtype="i8") -+ step_ = np.array(stepl, dtype="i8") - - # Get the pointers to array data - startp = PyArray_DATA(start_) -@@ -2146,7 +2146,7 @@ cdef class VLArray(Leaf): - # Compute the shape for the read array - shape = list(self._atomicshape) - shape.insert(0, vllen) # put the length at the beginning of the shape -- nparr = numpy.ndarray( -+ nparr = np.ndarray( - buffer=buf, dtype=self._atomicdtype.base, shape=shape) - # Set the writeable flag for this ndarray object - nparr.flags.writeable = True -diff --git a/tables/indexesextension.pyx b/tables/indexesextension.pyx -index 8c7e1ba722..d52ea91c2b 100644 ---- a/tables/indexesextension.pyx -+++ b/tables/indexesextension.pyx -@@ -25,7 +25,7 @@ Misc variables: - """ - - import cython --import numpy -+import numpy as np - cimport numpy as cnp - - from .exceptions import HDF5ExtError -@@ -628,7 +628,7 @@ cdef class IndexArray(Array): - # Create the buffer for reading sorted data chunks if not created yet - if self.bufferlb is None: - # Internal buffers -- self.bufferlb = numpy.empty(dtype=dtype, shape=self.chunksize) -+ self.bufferlb = np.empty(dtype=dtype, shape=self.chunksize) - # Get the pointers to the different buffer data areas - self.rbuflb = PyArray_DATA(self.bufferlb) - # Init structures for accelerating sorted array reads -@@ -663,7 +663,7 @@ cdef class IndexArray(Array): - maxslots = params['BOUNDS_MAX_SIZE'] // rowsize - self.boundscache = NumCache( - (maxslots, self.nbounds), dtype, 'non-opt types bounds') -- self.bufferbc = numpy.empty(dtype=dtype, shape=self.nbounds) -+ self.bufferbc = np.empty(dtype=dtype, shape=self.nbounds) - # Get the pointer for the internal buffer for 2nd level cache - self.rbufbc = PyArray_DATA(self.bufferbc) - # Another NumCache for the sorted values -diff --git a/tables/lrucacheextension.pyx b/tables/lrucacheextension.pyx -index 6f58c52685..16463c4ea1 100644 ---- a/tables/lrucacheextension.pyx -+++ b/tables/lrucacheextension.pyx -@@ -27,7 +27,7 @@ cdef extern from "Python.h": - - import sys - --import numpy -+import numpy as np - from libc.string cimport memcpy, strcmp - from cpython.unicode cimport PyUnicode_Check - from numpy cimport import_array, ndarray, PyArray_DATA -@@ -202,7 +202,7 @@ cdef class BaseCache: - self.name = name - self.incsetcount = False - # The array for keeping the access times (using long ints here) -- self.atimes = numpy.zeros(shape=nslots, dtype=numpy.int_) -+ self.atimes = np.zeros(shape=nslots, dtype=np.int_) - self.ratimes = PyArray_DATA(self.atimes) - - def __len__(self): -@@ -331,7 +331,7 @@ cdef class ObjectCache(BaseCache): - self.__dict = {} - self.mrunode = None # Most Recent Used node - # The array for keeping the object size (using long ints here) -- self.sizes = numpy.zeros(shape=nslots, dtype=numpy.int_) -+ self.sizes = np.zeros(shape=nslots, dtype=np.int_) - self.rsizes = PyArray_DATA(self.sizes) - - # Clear cache -@@ -505,11 +505,11 @@ cdef class NumCache(BaseCache): - # The cache object where all data will go - # The last slot is to allow the setitem1_ method to still return - # a valid scratch area for writing purposes -- self.cacheobj = numpy.empty(shape=(nslots+1, self.slotsize), -+ self.cacheobj = np.empty(shape=(nslots+1, self.slotsize), - dtype=dtype) - self.rcache = PyArray_DATA(self.cacheobj) - # The array for keeping the keys of slots -- self.keys = (-numpy.ones(shape=nslots, dtype=numpy.int64)) -+ self.keys = (-np.ones(shape=nslots, dtype=np.int64)) - self.rkeys = PyArray_DATA(self.keys) - - # Returns the address of nslot -@@ -623,7 +623,7 @@ cdef class NumCache(BaseCache): - elif self.containscount > 0: - hitratio = self.getcount / self.containscount - else: -- hitratio = numpy.nan -+ hitratio = np.nan - return """<%s(%s) - (%d maxslots, %d slots used, %.3f KB cachesize, - hit ratio: %.3f, disabled? %s)> -diff --git a/tables/table.py b/tables/table.py -index 5c854df79e..6c1a8b3a1d 100644 ---- a/tables/table.py -+++ b/tables/table.py -@@ -1517,8 +1517,7 @@ very small/large chunksize, you may want to increase/decrease it.""" - cstart, cstop = coords[0], coords[-1] + 1 - if cstop - cstart == len(coords): - # Chances for monotonically increasing row values. Refine. -- inc_seq = np.alltrue( -- np.arange(cstart, cstop) == np.array(coords)) -+ inc_seq = np.all(np.arange(cstart, cstop) == np.array(coords)) - if inc_seq: - return self.read(cstart, cstop, field=field) - return self.read_coordinates(coords, field) -@@ -2079,8 +2078,9 @@ very small/large chunksize, you may want to increase/decrease it.""" - table[2] = [456,'db2',1.2] - - # Modify two existing rows -- rows = numpy.rec.array([[457,'db1',1.2],[6,'de2',1.3]], -- formats='i4,a3,f8') -+ rows = np.rec.array( -+ [[457,'db1',1.2],[6,'de2',1.3]], formats='i4,a3,f8' -+ ) - table[1:30:2] = rows # modify a table slice - table[[1,3]] = rows # only modifies rows 1 and 3 - table[[True,False,True]] = rows # only modifies rows 0 and 2 -@@ -2088,8 +2088,9 @@ very small/large chunksize, you may want to increase/decrease it.""" - Which is equivalent to:: - - table.modify_rows(start=2, rows=[456,'db2',1.2]) -- rows = numpy.rec.array([[457,'db1',1.2],[6,'de2',1.3]], -- formats='i4,a3,f8') -+ rows = np.rec.array( -+ [[457,'db1',1.2],[6,'de2',1.3]], formats='i4,a3,f8' -+ ) - table.modify_rows(start=1, stop=3, step=2, rows=rows) - table.modify_coordinates([1,3,2], rows) - table.modify_coordinates([True, False, True], rows) -@@ -3466,7 +3467,7 @@ class Column: - table.modify_columns(start=1, columns=[[-1]], names=['col1']) - - # Modify rows 1 and 3 -- columns = numpy.rec.fromarrays([[2,3]], formats='i4') -+ columns = np.rec.fromarrays([[2,3]], formats='i4') - table.modify_columns(start=1, step=2, columns=columns, - names=['col1']) - -diff --git a/tables/tableextension.pyx b/tables/tableextension.pyx -index f258dcbaee..f3539b3224 100644 ---- a/tables/tableextension.pyx -+++ b/tables/tableextension.pyx -@@ -22,7 +22,7 @@ Misc variables: - """ - - import sys --import numpy -+import numpy as np - from time import time - import platform - -@@ -362,13 +362,13 @@ cdef class Table(Leaf): - if colpath == "": - # Compute a byteorder for the entire table - if len(field_byteorders) > 0: -- field_byteorders = numpy.array(field_byteorders) -+ field_byteorders = np.array(field_byteorders) - # Cython doesn't interpret well the extended comparison - # operators so this: field_byteorders == "little" doesn't work - # as expected -- if numpy.alltrue(field_byteorders.__eq__("little")): -+ if np.all(field_byteorders.__eq__("little")): - byteorder = "little" -- elif numpy.alltrue(field_byteorders.__eq__("big")): -+ elif np.all(field_byteorders.__eq__("big")): - byteorder = "big" - else: # Yes! someone has done it! - byteorder = "mixed" -@@ -844,7 +844,7 @@ cdef class Row: - - wdflts = table._v_wdflts - if wdflts is None: -- self.wrec = numpy.zeros(1, dtype=self.dtype) # Defaults are zero -+ self.wrec = np.zeros(1, dtype=self.dtype) # Defaults are zero - else: - self.wrec = table._v_wdflts.copy() - self.wreccpy = self.wrec.copy() # A copy of the defaults -@@ -981,8 +981,8 @@ cdef class Row: - iobuf = self.iobuf - j = 0; recout = 0; cs = self.chunksize - nchunksread = self.nrowsread // cs -- tmp_range = numpy.arange(0, cs, dtype='int64') -- self.bufcoords = numpy.empty(self.nrowsinbuf, dtype='int64') -+ tmp_range = np.arange(0, cs, dtype='int64') -+ self.bufcoords = np.empty(self.nrowsinbuf, dtype='int64') - # Fetch valid chunks until the I/O buffer is full - while nchunksread < self.totalchunks: - if self.chunkmap_data[nchunksread]: -@@ -1072,7 +1072,7 @@ cdef class Row: - lenbuf = self.nrowsinbuf - tmp = self.coords[self.nrowsread:self.nrowsread+lenbuf:self.step] - # We have to get a contiguous buffer, so numpy.array is the way to go -- self.bufcoords = numpy.array(tmp, dtype="uint64") -+ self.bufcoords = np.array(tmp, dtype="uint64") - self._row = -1 - if self.bufcoords.size > 0: - recout = self.table._read_elements(self.bufcoords, self.iobuf) -@@ -1098,7 +1098,7 @@ cdef class Row: - tmp = self.coords[0:self.nextelement + 1] - else: - tmp = self.coords[self.nextelement - ( self.nrowsinbuf) + 1:self.nextelement + 1] -- self.bufcoords = numpy.array(tmp, dtype="uint64") -+ self.bufcoords = np.array(tmp, dtype="uint64") - recout = self.table._read_elements(self.bufcoords, self.iobuf) - self.bufcoords_data = PyArray_DATA(self.bufcoords) - self.nrowsread = self.nrowsread + self.nrowsinbuf -@@ -1144,7 +1144,7 @@ cdef class Row: - self.index_valid_data = PyArray_BYTES(self.indexvalid) - - # Is there any interesting information in this buffer? -- if not numpy.sometrue(self.indexvalid): -+ if not np.any(self.indexvalid): - # No, so take the next one - if self.step >= self.nrowsinbuf: - self.nextelement = self.nextelement + self.step -@@ -1448,7 +1448,7 @@ cdef class Row: - if self.mod_elements is None: - # Initialize an array for keeping the modified elements - # (just in case Row.update() would be used) -- self.mod_elements = numpy.empty(shape=self.nrowsinbuf, dtype=SizeType) -+ self.mod_elements = np.empty(shape=self.nrowsinbuf, dtype=SizeType) - # We need a different copy for self.iobuf here - self.iobufcpy = self.iobuf.copy() - -@@ -1620,7 +1620,7 @@ cdef class Row: - if self.exist_enum_cols: - if key in self.colenums: - enum = self.colenums[key] -- for cenval in numpy.asarray(value).flat: -+ for cenval in np.asarray(value).flat: - enum(cenval) # raises ``ValueError`` on invalid values - - # Get the field to be modified -diff --git a/tables/tests/test_array.py b/tables/tests/test_array.py -index 00e827b16c..e0e6b4ee41 100644 ---- a/tables/tests/test_array.py -+++ b/tables/tests/test_array.py -@@ -2031,8 +2031,9 @@ class PointSelectionTestCase(common.TempFileMixin, common.PyTablesTestCase): - a = nparr[key] - b = tbarr[key] - self.assertTrue( -- np.alltrue(a == b), -- "NumPy array and PyTables selections does not match.") -+ np.all(a == b), -+ "NumPy array and PyTables selections does not match." -+ ) - - def test01b_read(self): - """Test for point-selections (read, integer keys).""" -@@ -2046,7 +2047,7 @@ class PointSelectionTestCase(common.TempFileMixin, common.PyTablesTestCase): - a = nparr[key] - b = tbarr[key] - self.assertTrue( -- np.alltrue(a == b), -+ np.all(a == b), - "NumPy array and PyTables selections does not match.") - - def test01c_read(self): -@@ -2100,7 +2101,7 @@ class PointSelectionTestCase(common.TempFileMixin, common.PyTablesTestCase): - a = nparr[:] - b = tbarr[:] - self.assertTrue( -- np.alltrue(a == b), -+ np.all(a == b), - "NumPy array and PyTables modifications does not match.") - - def test02b_write(self): -@@ -2118,7 +2119,7 @@ class PointSelectionTestCase(common.TempFileMixin, common.PyTablesTestCase): - a = nparr[:] - b = tbarr[:] - self.assertTrue( -- np.alltrue(a == b), -+ np.all(a == b), - "NumPy array and PyTables modifications does not match.") - - def test02c_write(self): -@@ -2136,7 +2137,7 @@ class PointSelectionTestCase(common.TempFileMixin, common.PyTablesTestCase): - a = nparr[:] - b = tbarr[:] - self.assertTrue( -- np.alltrue(a == b), -+ np.all(a == b), - "NumPy array and PyTables modifications does not match.") - - -@@ -2281,7 +2282,7 @@ class FancySelectionTestCase(common.TempFileMixin, common.PyTablesTestCase): - a = nparr[key] - b = tbarr[key] - self.assertTrue( -- np.alltrue(a == b), -+ np.all(a == b), - "NumPy array and PyTables selections does not match.") - - def test01b_read(self): -@@ -2333,8 +2334,9 @@ class FancySelectionTestCase(common.TempFileMixin, common.PyTablesTestCase): - a = nparr[:] - b = tbarr[:] - self.assertTrue( -- np.alltrue(a == b), -- "NumPy array and PyTables modifications does not match.") -+ np.all(a == b), -+ "NumPy array and PyTables modifications does not match." -+ ) - - def test02b_write(self): - """Test for fancy-selections (working selections, write, broadcast).""" -@@ -2353,7 +2355,7 @@ class FancySelectionTestCase(common.TempFileMixin, common.PyTablesTestCase): - # print("NumPy modified array:", a) - # print("PyTables modifyied array:", b) - self.assertTrue( -- np.alltrue(a == b), -+ np.all(a == b), - "NumPy array and PyTables modifications does not match.") - - -@@ -2603,7 +2605,7 @@ class TestCreateArrayArgs(common.TempFileMixin, common.PyTablesTestCase): - atom=atom) - - def test_kwargs_obj_shape_error(self): -- # atom = Atom.from_dtype(numpy.dtype('complex')) -+ # atom = Atom.from_dtype(np.dtype('complex')) - shape = self.shape + self.shape - self.assertRaises(TypeError, - self.h5file.create_array, -diff --git a/tables/tests/test_attributes.py b/tables/tests/test_attributes.py -index 64d725e531..51d218d5a1 100644 ---- a/tables/tests/test_attributes.py -+++ b/tables/tests/test_attributes.py -@@ -578,7 +578,7 @@ class CreateTestCase(common.TempFileMixin, common.PyTablesTestCase): - # In the views old implementation PyTAbles performa a copy of the - # array: - # -- # value = numpy.array(value) -+ # value = np.array(value) - # - # in order to get a contiguous array. - # Unfortunately array with swapped axis are copyed as they are so -@@ -1374,7 +1374,7 @@ class TypesTestCase(common.TempFileMixin, common.PyTablesTestCase): - - # The next raises a `TypeError` when unpickled. See: - # http://projects.scipy.org/numpy/ticket/1037 -- # self.array.attrs.pq = numpy.array(['']) -+ # self.array.attrs.pq = np.array(['']) - self.array.attrs.pq = np.array([''], dtype="U1") - - # Check the results -diff --git a/tables/tests/test_carray.py b/tables/tests/test_carray.py -index b35697f0f4..182f92b54a 100644 ---- a/tables/tests/test_carray.py -+++ b/tables/tests/test_carray.py -@@ -2674,7 +2674,7 @@ class TestCreateCArrayArgs(common.TempFileMixin, common.PyTablesTestCase): - atom=atom) - - def test_kwargs_obj_shape_error(self): -- # atom = Atom.from_dtype(numpy.dtype('complex')) -+ # atom = Atom.from_dtype(np.dtype('complex')) - shape = self.shape + self.shape - self.assertRaises(TypeError, - self.h5file.create_carray, -@@ -2697,7 +2697,7 @@ class TestCreateCArrayArgs(common.TempFileMixin, common.PyTablesTestCase): - shape=self.shape) - - def test_kwargs_obj_atom_shape_error_02(self): -- # atom = Atom.from_dtype(numpy.dtype('complex')) -+ # atom = Atom.from_dtype(np.dtype('complex')) - shape = self.shape + self.shape - self.assertRaises(TypeError, - self.h5file.create_carray, -diff --git a/tables/tests/test_earray.py b/tables/tests/test_earray.py -index 8e400e2c24..aa3a66de4c 100644 ---- a/tables/tests/test_earray.py -+++ b/tables/tests/test_earray.py -@@ -2756,7 +2756,7 @@ class TestCreateEArrayArgs(common.TempFileMixin, common.PyTablesTestCase): - atom=atom) - - def test_kwargs_obj_shape_error(self): -- # atom = tables.Atom.from_dtype(numpy.dtype('complex')) -+ # atom = tables.Atom.from_dtype(np.dtype('complex')) - shape = self.shape + self.shape - self.assertRaises(TypeError, - self.h5file.create_earray, -@@ -2779,7 +2779,7 @@ class TestCreateEArrayArgs(common.TempFileMixin, common.PyTablesTestCase): - shape=self.shape) - - def test_kwargs_obj_atom_shape_error_02(self): -- # atom = tables.Atom.from_dtype(numpy.dtype('complex')) -+ # atom = tables.Atom.from_dtype(np.dtype('complex')) - shape = self.shape + self.shape - self.assertRaises(TypeError, - self.h5file.create_earray, -diff --git a/tables/tests/test_indexvalues.py b/tables/tests/test_indexvalues.py -index 47503d80c4..66312e2d66 100644 ---- a/tables/tests/test_indexvalues.py -+++ b/tables/tests/test_indexvalues.py -@@ -868,7 +868,7 @@ class SelectValuesTestCase(common.TempFileMixin, common.PyTablesTestCase): - table2 = self.h5file.root.table2 - - # Convert the limits to the appropriate type -- # il = numpy.string_(self.il) -+ # il = np.string_(self.il) - sl = np.string_(self.sl) - - # Do some selections and check the results -@@ -1119,7 +1119,7 @@ class SelectValuesTestCase(common.TempFileMixin, common.PyTablesTestCase): - table2 = self.h5file.root.table2 - - # Convert the limits to the appropriate type -- # il = numpy.int32(self.il) -+ # il = np.int32(self.il) - sl = np.uint16(self.sl) - - # Do some selections and check the results -@@ -1315,7 +1315,7 @@ class SelectValuesTestCase(common.TempFileMixin, common.PyTablesTestCase): - table2 = self.h5file.root.table2 - - # Convert the limits to the appropriate type -- # il = numpy.float32(self.il) -+ # il = np.float32(self.il) - sl = np.float64(self.sl) - - # Do some selections and check the results -diff --git a/tables/tests/test_queries.py b/tables/tests/test_queries.py -index b9406a3e9b..f27c263ac8 100644 ---- a/tables/tests/test_queries.py -+++ b/tables/tests/test_queries.py -@@ -63,13 +63,13 @@ func_info = {'log10': np.log10, 'log': np.log, 'exp': np.exp, - if hasattr(np, 'float16'): - type_info['float16'] = (np.float16, float) - # if hasattr(numpy, 'float96'): --# type_info['float96'] = (numpy.float96, float) -+# type_info['float96'] = (np.float96, float) - # if hasattr(numpy, 'float128'): --# type_info['float128'] = (numpy.float128, float) -+# type_info['float128'] = (np.float128, float) - # if hasattr(numpy, 'complex192'): --# type_info['complex192'] = (numpy.complex192, complex) -+# type_info['complex192'] = (np.complex192, complex) - # if hasattr(numpy, 'complex256'): --# type_info['complex256'] = (numpy.complex256, complex) -+# type_info['complex256'] = (np.complex256, complex) - - sctype_from_type = {type_: info[0] for (type_, info) in type_info.items()} - """Maps PyTables types to NumPy scalar types.""" -diff --git a/tables/tests/test_timetype.py b/tables/tests/test_timetype.py -index 3d4d7a996d..d8ac3b1895 100644 ---- a/tables/tests/test_timetype.py -+++ b/tables/tests/test_timetype.py -@@ -229,8 +229,10 @@ class CompareTestCase(common.TempFileMixin, common.PyTablesTestCase): - "Stored and retrieved values do not match.") - - comp = (recarr['t64col'][0] == np.array((wtime, wtime))) -- self.assertTrue(np.alltrue(comp), -- "Stored and retrieved values do not match.") -+ self.assertTrue( -+ np.all(comp), -+ "Stored and retrieved values do not match." -+ ) - - def test02b_CompareTable(self): - """Comparing several written and read time values in a Table.""" -@@ -262,8 +264,10 @@ class CompareTestCase(common.TempFileMixin, common.PyTablesTestCase): - if common.verbose: - print("Original values:", orig_val) - print("Retrieved values:", recarr['t32col'][:]) -- self.assertTrue(np.alltrue(recarr['t32col'][:] == orig_val), -- "Stored and retrieved values do not match.") -+ self.assertTrue( -+ np.all(recarr['t32col'][:] == orig_val), -+ "Stored and retrieved values do not match." -+ ) - - # Time64 column. - orig_val = np.arange(0, nrows * 2, dtype=np.int32) + 0.012 -@@ -365,16 +369,20 @@ class UnalignedTestCase(common.TempFileMixin, common.PyTablesTestCase): - if common.verbose: - print("Original values:", orig_val) - print("Retrieved values:", recarr['i8col'][:]) -- self.assertTrue(np.alltrue(recarr['i8col'][:] == orig_val), -- "Stored and retrieved values do not match.") -+ self.assertTrue( -+ np.all(recarr['i8col'][:] == orig_val), -+ "Stored and retrieved values do not match." -+ ) - - # Time32 column. - orig_val = np.arange(nrows, dtype=np.int32) - if common.verbose: - print("Original values:", orig_val) - print("Retrieved values:", recarr['t32col'][:]) -- self.assertTrue(np.alltrue(recarr['t32col'][:] == orig_val), -- "Stored and retrieved values do not match.") -+ self.assertTrue( -+ np.all(recarr['t32col'][:] == orig_val), -+ "Stored and retrieved values do not match." -+ ) - - # Time64 column. - orig_val = np.arange(0, nrows * 2, dtype=np.int32) + 0.012 -@@ -413,8 +421,10 @@ class BigEndianTestCase(common.PyTablesTestCase): - if common.verbose: - print("Retrieved values:", earr) - print("Should look like:", orig_val) -- self.assertTrue(np.alltrue(earr == orig_val), -- "Retrieved values do not match the expected values.") -+ self.assertTrue( -+ np.all(earr == orig_val), -+ "Retrieved values do not match the expected values." -+ ) - - def test00b_Read64Array(self): - """Checking Time64 type in arrays.""" -@@ -448,8 +458,10 @@ class BigEndianTestCase(common.PyTablesTestCase): - if common.verbose: - print("Retrieved values:", t32) - print("Should look like:", orig_val) -- self.assertTrue(np.alltrue(t32 == orig_val), -- "Retrieved values do not match the expected values.") -+ self.assertTrue( -+ np.all(t32 == orig_val), -+ "Retrieved values do not match the expected values." -+ ) - - def test01b_ReadNestedColumn(self): - """Checking Time64 type in nested columns.""" -diff --git a/tables/tests/test_types.py b/tables/tests/test_types.py -index 11c9af4ba1..c41449dbd0 100644 ---- a/tables/tests/test_types.py -+++ b/tables/tests/test_types.py -@@ -86,7 +86,8 @@ class RangeTestCase(common.TempFileMixin, common.PyTablesTestCase): - rec['var3'] = np.array(i % self.maxshort).astype('i2') - rec['var5'] = float(i) - -- with self.assertRaises(TypeError): -+ # Numpy 1.25 -> ValueError -+ with self.assertRaises((TypeError, ValueError)): - rec['var4'] = "124c" - - rec['var6'] = float(i) -diff --git a/tables/tests/test_vlarray.py b/tables/tests/test_vlarray.py -index e7dab1884a..4418533804 100644 ---- a/tables/tests/test_vlarray.py -+++ b/tables/tests/test_vlarray.py -@@ -697,7 +697,7 @@ class TypesTestCase(common.TempFileMixin, common.PyTablesTestCase): - "int32": np.int32, - "uint32": np.uint32, - "int64": np.int64, -- # "UInt64": numpy.int64, # Unavailable in some platforms -+ # "uint64": np.int64, # Unavailable in some platforms - } - if common.verbose: - print('\n', '-=' * 30) -@@ -795,7 +795,7 @@ class TypesTestCase(common.TempFileMixin, common.PyTablesTestCase): - "int32": np.int32, - "uint32": np.uint32, - "int64": np.int64, -- # "UInt64": numpy.int64, # Unavailable in some platforms -+ # "uint64": np.int64, # Unavailable in some platforms - } - if common.verbose: - print('\n', '-=' * 30) -@@ -851,7 +851,7 @@ class TypesTestCase(common.TempFileMixin, common.PyTablesTestCase): - "int32": np.int32, - "uint32": np.uint32, - "int64": np.int64, -- # "UInt64": numpy.int64, # Unavailable in some platforms -+ # "uint64": np.int64, # Unavailable in some platforms - } - if common.verbose: - print('\n', '-=' * 30) -diff --git a/tables/utils.py b/tables/utils.py -index 8b5d0267d6..e11e5ba73b 100644 ---- a/tables/utils.py -+++ b/tables/utils.py -@@ -66,7 +66,10 @@ def idx2long(index): - """Convert a possible index into a long int.""" - - try: -- return int(index) -+ if hasattr(index, "item"): -+ return index.item() -+ else: -+ return int(index) - except Exception: - raise TypeError("not an integer type.") - -@@ -207,7 +210,7 @@ def lazyattr(fget): - >>> del obj.attribute - Traceback (most recent call last): - ... -- AttributeError: can't delete attribute 'attribute' -+ AttributeError: ... - - .. warning:: - -diff --git a/tables/utilsextension.pyx b/tables/utilsextension.pyx -index 664e1ea58e..6515a9a764 100644 ---- a/tables/utilsextension.pyx -+++ b/tables/utilsextension.pyx -@@ -20,7 +20,7 @@ try: - except ImportError: - zlib_imported = False - --import numpy -+import numpy as np - - from .description import Description, Col - from .misc.enum import Enum -@@ -528,7 +528,7 @@ def encode_filename(object filename): - if hasattr(os, 'fspath'): - filename = os.fspath(filename) - -- if isinstance(filename, (unicode, numpy.str_)): -+ if isinstance(filename, (unicode, np.str_)): - # if type(filename) is unicode: - encoding = sys.getfilesystemencoding() - encname = filename.encode(encoding, 'replace') -@@ -949,16 +949,16 @@ def read_f_attr(hid_t file_id, str attr_name): - size = H5ATTRget_attribute_string(file_id, c_attr_name, &attr_value, &cset) - if size == 0: - if cset == H5T_CSET_UTF8: -- retvalue = numpy.unicode_('') -+ retvalue = np.unicode_('') - else: -- retvalue = numpy.bytes_(b'') -+ retvalue = np.bytes_(b'') - else: - retvalue = (attr_value).rstrip(b'\x00') - if cset == H5T_CSET_UTF8: - retvalue = retvalue.decode('utf-8') -- retvalue = numpy.str_(retvalue) -+ retvalue = np.str_(retvalue) - else: -- retvalue = numpy.bytes_(retvalue) # bytes -+ retvalue = np.bytes_(retvalue) # bytes - - # Important to release attr_value, because it has been malloc'ed! - if attr_value: -@@ -1041,7 +1041,7 @@ def enum_from_hdf5(hid_t enumId, str byteorder): - "supported at this moment") - - dtype = atom.dtype -- npvalue = numpy.array((0,), dtype=dtype) -+ npvalue = np.array((0,), dtype=dtype) - rbuf = PyArray_DATA(npvalue) - - # Get the name and value of each of the members -@@ -1485,15 +1485,15 @@ cdef int load_reference(hid_t dataset_id, hobj_ref_t *refbuf, size_t item_size, - # read entire dataset as numpy array - stype_, shape_ = hdf5_to_np_ext_type(reftype_id, pure_numpy_types=True, atom=True) - if stype_ == "_ref_": -- dtype_ = numpy.dtype("O", shape_) -+ dtype_ = np.dtype("O", shape_) - else: -- dtype_ = numpy.dtype(stype_, shape_) -+ dtype_ = np.dtype(stype_, shape_) - shape = [] - for j in range(rank): - shape.append(dims[j]) - shape = tuple(shape) - -- nprefarr = numpy.empty(dtype=dtype_, shape=shape) -+ nprefarr = np.empty(dtype=dtype_, shape=shape) - nparr[i] = [nprefarr] # box the array in a list to store it as one object - if stype_ == "_ref_": - newrefbuf = malloc(nprefarr.size * item_size) -diff --git a/tables/vlarray.py b/tables/vlarray.py -index e1b4b2c0da..caa6672813 100644 ---- a/tables/vlarray.py -+++ b/tables/vlarray.py -@@ -644,7 +644,7 @@ class VLArray(hdf5extension.VLArray, Leaf): - a_list = vlarray[4:1000:2] - a_list2 = vlarray[[0,2]] # get list of coords - a_list3 = vlarray[[0,-2]] # negative values accepted -- a_list4 = vlarray[numpy.array([True,...,False])] # array of bools -+ a_list4 = vlarray[np.array([True,...,False])] # array of bools - - """ - -@@ -814,7 +814,7 @@ class VLArray(hdf5extension.VLArray, Leaf): - """Read rows specified in `coords`.""" - rows = [] - for coord in coords: -- rows.append(self.read(int(coord), int(coord) + 1, 1)[0]) -+ rows.append(self.read(idx2long(coord), idx2long(coord) + 1, 1)[0]) - return rows - - def _g_copy_with_stats(self, group, name, start, stop, step, --- -2.41.0 - diff --git a/0005-python3.12-cython-fix-slice-indexing.patch b/0005-python3.12-cython-fix-slice-indexing.patch deleted file mode 100644 index bca1ed1..0000000 --- a/0005-python3.12-cython-fix-slice-indexing.patch +++ /dev/null @@ -1,53 +0,0 @@ -From a92166c725bdd7ba758eaef34cf870d9bb1b469d Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= -Date: Tue, 18 Jul 2023 14:44:37 +0200 -Subject: [PATCH 5/5] python3.12: cython: fix slice indexing - -Several tests fail with: - -Traceback (most recent call last): - File "tables/tableextension.pyx", line 1507, in tables.tableextension.Row.__getitem__ -KeyError: slice(None, None, 2) -During handling of the above exception, another exception occurred: -Traceback (most recent call last): - File "tables/tableextension.pyx", line 124, in tables.tableextension.get_nested_field_cache -KeyError: slice(None, None, 2) -During handling of the above exception, another exception occurred: -Traceback (most recent call last): - File "/builddir/build/BUILDROOT/python-tables-3.7.0-8.fc39.aarch64/usr/lib64/python3.12/site-packages/tables/tests/test_tables.py", line 489, in test01a_extslice - result = [rec[::2] for rec in table.iterrows() - ~~~^^^^^ - File "tables/tableextension.pyx", line 1511, in tables.tableextension.Row.__getitem__ - File "tables/tableextension.pyx", line 131, in tables.tableextension.get_nested_field_cache - File "tables/utilsextension.pyx", line 838, in tables.utilsextension.get_nested_field -AttributeError: 'slice' object has no attribute 'encode' - -It seems that __getitem__() expected TypeError from get_nested_field_cache() when -called with a slice. get_nested_field_cache() calls .encode() on the argument, so -this obviously fails with AttributeError. No idea how it worked before. ---- - tables/utilsextension.pyx | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/tables/utilsextension.pyx b/tables/utilsextension.pyx -index 6515a9a764..bd27b9585f 100644 ---- a/tables/utilsextension.pyx -+++ b/tables/utilsextension.pyx -@@ -899,10 +899,13 @@ def get_nested_field(recarray, fieldname): - """Get the maybe nested field named `fieldname` from the `recarray`. - - The `fieldname` may be a simple field name or a nested field name -- with slah-separated components. -+ with slash-separated components. - - """ - -+ if not isinstance(fieldname, str): -+ raise TypeError -+ - cdef bytes name = fieldname.encode('utf-8') - try: - if strchr(name, 47) != NULL: # ord('/') == 47 --- -2.41.0 - diff --git a/0006-Add-workaround-for-staticmethod-invocation-error.patch b/0006-Add-workaround-for-staticmethod-invocation-error.patch deleted file mode 100644 index 23ab01b..0000000 --- a/0006-Add-workaround-for-staticmethod-invocation-error.patch +++ /dev/null @@ -1,40 +0,0 @@ -From aed0f4e320c2868f33fac48c807a91f832080ce0 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= -Date: Tue, 25 Jul 2023 16:56:34 +0200 -Subject: [PATCH] Add workaround for staticmethod invocation error - -ERROR: None (tables.tests.test_hdf5compat.ContiguousCompoundAppendTestCase.None) ----------------------------------------------------------------------- -Traceback (most recent call last): - File "/builddir/build/BUILDROOT/python-tables-3.8.0-2.fc39.x86_64/usr/lib64/python3.12/site-packages/tables/tests/test_hdf5compat.py", line 258, in test - self.assertRaises(tb.HDF5ExtError, tbl.row.append) - File "/usr/lib64/python3.12/unittest/case.py", line 778, in assertRaises - return context.handle('assertRaises', args, kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/usr/lib64/python3.12/unittest/case.py", line 238, in handle - callable_obj(*args, **kwargs) - File "tables/tableextension.pyx", line 1362, in tables.tableextension.Row.append - File "/builddir/build/BUILDROOT/python-tables-3.8.0-2.fc39.x86_64/usr/lib64/python3.12/site-packages/tables/exceptions.py", line 106, in __init__ - self.h5backtrace = self._dump_h5_backtrace() - ^^^^^^^^^^^^^^^^^^^^^^^^^ -TypeError: _dump_h5_backtrace() takes no arguments (1 given) ---- - tables/utilsextension.pyx | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tables/utilsextension.pyx b/tables/utilsextension.pyx -index bd27b9585f..c19b70a896 100644 ---- a/tables/utilsextension.pyx -+++ b/tables/utilsextension.pyx -@@ -378,7 +378,7 @@ cdef herr_t e_walk_cb(unsigned n, const H5E_error_t *err, void *data) noexcept w - return 0 - - --def _dump_h5_backtrace(): -+def _dump_h5_backtrace(self=None): - cdef object bt = [] - - if H5Ewalk(H5E_DEFAULT, H5E_WALK_DOWNWARD, e_walk_cb, bt) < 0: --- -2.41.0 - diff --git a/0007-Drop-misguided-check.patch b/0007-Drop-misguided-check.patch deleted file mode 100644 index 8b7ce72..0000000 --- a/0007-Drop-misguided-check.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 374778166947a8bfe02731c1d2cb81a87bf9fe07 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= -Date: Wed, 16 Aug 2023 15:47:09 +0200 -Subject: [PATCH] Drop misguided check - -Among other things, this check creates a security attack by loading -code from an untrusted path ($PWD). It's also completely unnecessary. -Let's drop it while upstream figures out what to do. - -https://github.com/PyTables/PyTables/pull/1000/commits/bbb02dd0b2f137ab52b447fe5be304805c2dc7ea#r1295938857 ---- - tables/__init__.py | 21 --------------------- - 1 file changed, 21 deletions(-) - -diff --git a/tables/__init__.py b/tables/__init__.py -index 168ee4e108..276ce2563c 100644 ---- a/tables/__init__.py -+++ b/tables/__init__.py -@@ -6,27 +6,6 @@ PyTables is a package for managing hierarchical datasets and designed - to efficiently cope with extremely large amounts of data. - - """ --import os --from ctypes import cdll --import platform -- -- --# Load the blosc2 library, and if not found in standard locations, --# try this directory (it should be automatically copied in setup.py). --current_dir = os.path.dirname(__file__) --platform_system = platform.system() --blosc2_lib = "libblosc2" --if platform_system == "Linux": -- blosc2_lib += ".so" --elif platform_system == "Darwin": -- blosc2_lib += ".dylib" --else: -- blosc2_lib += ".dll" --try: -- cdll.LoadLibrary(blosc2_lib) --except OSError: -- cdll.LoadLibrary(os.path.join(current_dir, blosc2_lib)) -- - - # Necessary imports to get versions stored on the cython extension - from .utilsextension import get_hdf5_version as _get_hdf5_version diff --git a/python-tables.spec b/python-tables.spec index 4e3dc8c..e2ce3e9 100644 --- a/python-tables.spec +++ b/python-tables.spec @@ -6,7 +6,7 @@ Summary: HDF5 support in Python Name: python-tables -Version: 3.8.0 +Version: 3.9.1 Release: %autorelease #Source0: https://github.com/PyTables/PyTables/archive/%{commit}/PyTables-%{commit}.tar.gz Source0: https://github.com/PyTables/PyTables/archive/v%{version}/python-tables-%{version}.tar.gz @@ -18,12 +18,6 @@ Source0: https://github.com/PyTables/PyTables/archive/v%{version}/python- Source1: https://github.com/PyTables/PyTables/releases/download/v%{manual_version}/pytablesmanual-%{manual_version}.pdf Patch1: 0001-Skip-tests-that-fail-on-s390x.patch -Patch2: 0002-Relax-dependency-on-blosc2.patch -Patch3: 0003-Fix-build-errors-when-compiled-using-cython-3.0.0b1.patch -Patch4: 0004-Fix-compatibility-with-numpu-v1.25.patch -Patch5: 0005-python3.12-cython-fix-slice-indexing.patch -Patch6: 0006-Add-workaround-for-staticmethod-invocation-error.patch -Patch7: 0007-Drop-misguided-check.patch License: BSD URL: https://www.pytables.org @@ -74,13 +68,6 @@ The %{name}-doc package contains the documentation for %{name}. %patch 1 -p1 %endif -%patch 2 -p1 -%patch 3 -p1 -%patch 4 -p1 -%patch 5 -p1 -%patch 6 -p1 -%patch 7 -p1 - cp -a %{SOURCE1} pytablesmanual.pdf # Make sure we are not using anything from the bundled blosc by mistake @@ -88,7 +75,7 @@ find c-blosc -mindepth 1 -maxdepth 1 -name hdf5 -prune -o -exec rm -r {} + # circumvent the broken attempt to detect library location sed -r -i \ - '/def get_blosc2_directories\(\):/a \ \ \ \ return "%{_includedir}","%{_libdir}"' \ + '/def get_blosc2_directories\(\):/a \ \ \ \ return Path("%{_includedir}"),Path("%{_libdir}"),None' \ setup.py %build diff --git a/sources b/sources index 02050b5..6d4ffe0 100644 --- a/sources +++ b/sources @@ -1,2 +1,2 @@ -SHA512 (python-tables-3.8.0.tar.gz) = 552d0217cebbf6342f97215caae4519daa88dd333c190da298fb2d583e2350456c63d7e4c37b0019e9bab714fd7ea41361207a06ab4bf13f8255dcce3f0712f5 +SHA512 (python-tables-3.9.1.tar.gz) = 3a20e84cb180a92b5abf8d759d8c9a9ba7c39696b6528bd8f0e4415322c1b488d86a8ba4399a97f6e173775795d640878db375bfe60cbb1436238708c7669038 SHA512 (pytablesmanual-3.3.0.pdf) = beb068a301e20370fdab9f942bed271b94872f409087cba8f7127967a51e45a233d8e6df1aaa12aad532282aec324b0c823b1646be31a5b71e5e7c035ba06270