python3-tables: rebuild against python3-numpy

This commit is contained in:
Andrew J. Hesford 2024-07-01 13:57:27 -04:00
parent d59529b03b
commit 9eb1a95810
2 changed files with 150 additions and 1 deletions

View File

@ -0,0 +1,149 @@
diff -ur a/setup.py b/setup.py
--- a/setup.py 2024-07-31 09:34:03.585371800 -0400
+++ b/setup.py 2024-07-31 09:35:01.472775930 -0400
@@ -161,7 +161,7 @@
# https://pip.pypa.io/en/stable/reference/pip_install.html#installation-order
# at this point we can be sure pip has already installed numpy
numpy_incl = pkg_resources.resource_filename(
- "numpy", "core/include"
+ "numpy", "_core/include"
)
for ext in self.extensions:
@@ -505,7 +505,7 @@
# -----------------------------------------------------------------
- def_macros = [("NDEBUG", 1)]
+ def_macros = [("NDEBUG", 1), ("NPY_TARGET_VERSION", "NPY_1_20_API_VERSION"),]
# Define macros for Windows platform
if os.name == "nt":
def_macros.append(("WIN32", 1))
diff -ur a/src/utils.c b/src/utils.c
--- a/src/utils.c 2024-07-31 09:34:03.588371821 -0400
+++ b/src/utils.c 2024-07-31 09:36:08.648172551 -0400
@@ -767,8 +767,8 @@
return float_id;
}
- H5Tinsert(complex_id, "r", HOFFSET(npy_complex64, real), float_id);
- H5Tinsert(complex_id, "i", HOFFSET(npy_complex64, imag), float_id);
+ H5Tinsert(complex_id, "r", 0, float_id);
+ H5Tinsert(complex_id, "i", 4, float_id);
H5Tclose(float_id);
return complex_id;
}
@@ -792,8 +792,8 @@
return float_id;
}
- H5Tinsert(complex_id, "r", HOFFSET(npy_complex128, real), float_id);
- H5Tinsert(complex_id, "i", HOFFSET(npy_complex128, imag), float_id);
+ H5Tinsert(complex_id, "r", 0, float_id);
+ H5Tinsert(complex_id, "i", 8, float_id);
H5Tclose(float_id);
return complex_id;
}
@@ -824,8 +824,8 @@
return err;
}
- H5Tinsert(complex_id, "r", HOFFSET(npy_complex192, real), float_id);
- H5Tinsert(complex_id, "i", HOFFSET(npy_complex192, imag), float_id);
+ H5Tinsert(complex_id, "r", 0, float_id);
+ H5Tinsert(complex_id, "i", 12, float_id);
H5Tclose(float_id);
return complex_id;
}
@@ -856,8 +856,8 @@
return err;
}
- H5Tinsert(complex_id, "r", HOFFSET(npy_complex256, real), float_id);
- H5Tinsert(complex_id, "i", HOFFSET(npy_complex256, imag), float_id);
+ H5Tinsert(complex_id, "r", 0, float_id);
+ H5Tinsert(complex_id, "i", 16, float_id);
H5Tclose(float_id);
return complex_id;
}
diff -ur a/tables/atom.py b/tables/atom.py
--- a/tables/atom.py 2024-07-31 09:34:03.589371829 -0400
+++ b/tables/atom.py 2024-07-31 09:36:43.663352202 -0400
@@ -276,15 +276,15 @@
>>> atom1 = StringAtom(itemsize=10) # same as ``atom2``
>>> atom2 = Atom.from_kind('string', 10) # same as ``atom1``
>>> atom3 = IntAtom()
- >>> atom1 == 'foo'
+ >>> bool(atom1 == 'foo')
False
- >>> atom1 == atom2
+ >>> bool(atom1 == atom2)
True
- >>> atom2 != atom1
+ >>> bool(atom2 != atom1)
False
- >>> atom1 == atom3
+ >>> bool(atom1 == atom3)
False
- >>> atom3 != atom2
+ >>> bool(atom3 != atom2)
True
"""
diff -ur a/tables/index.py b/tables/index.py
--- a/tables/index.py 2024-07-31 09:34:03.591371843 -0400
+++ b/tables/index.py 2024-07-31 09:38:32.692911595 -0400
@@ -581,7 +581,8 @@
# Add a second offset in this case
# First normalize the number of rows
offset2 = (nrow % self.nslicesblock) * slicesize // lbucket
- idx += offset2
+ assert offset2 < 2**(indsize*8)
+ idx += np.asarray(offset2).astype(idx.dtype)
# Add the last row at the beginning of arr & idx (if needed)
if (indsize == 8 and nelementsILR > 0):
# It is possible that the values in LR are already sorted.
@@ -622,11 +623,11 @@
show_stats("Entering final_idx32", tref)
# Do an upcast first in order to add the offset.
idx = idx.astype('uint64')
- idx += offset
+ idx += np.asarray(offset).astype(idx.dtype)
# The next partition is valid up to table sizes of
# 2**30 * 2**18 = 2**48 bytes, that is, 256 Tera-elements,
# which should be a safe figure, at least for a while.
- idx //= self.lbucket
+ idx //= np.asarray(self.lbucket).astype(idx.dtype)
# After the division, we can downsize the indexes to 'uint32'
idx = idx.astype('uint32')
if profile:
@@ -2002,7 +2003,7 @@
else:
self.indicesLR._read_index_slice(start, stop, idx)
if indsize == 8:
- idx //= lbucket
+ idx //= np.asarray(lbucket).astype(idx.dtype)
elif indsize == 2:
# The chunkmap size cannot be never larger than 'int_'
idx = idx.astype("int_")
diff -ur a/tables/utils.py b/tables/utils.py
--- a/tables/utils.py 2024-07-31 09:34:03.606371947 -0400
+++ b/tables/utils.py 2024-07-31 09:39:41.246261430 -0400
@@ -75,7 +75,7 @@
# with atom from a generic python type. If copy is stated as True, it
# is assured that it will return a copy of the object and never the same
# object or a new one sharing the same memory.
-def convert_to_np_atom(arr, atom, copy=False):
+def convert_to_np_atom(arr, atom, copy=None):
"""Convert a generic object into a NumPy object compliant with atom."""
# First, convert the object into a NumPy array
@@ -109,7 +109,7 @@
# Check whether the object needs to be copied to make the operation
# safe to in-place conversion.
- copy = atom.type in ['time64']
+ copy = True if atom.type in ['time64'] else None
nparr = convert_to_np_atom(object, atom, copy)
# Finally, check the byteorder and change it if needed
byteorder = byteorders[nparr.dtype.byteorder]

View File

@ -1,7 +1,7 @@
# Template file for 'python3-tables'
pkgname=python3-tables
version=3.7.0
revision=3
revision=4
build_style=python3-module
build_helper=numpy
# XXX: c-blosc (using internal for now)