@@ -265,14 +265,14 @@ class unchecked_reference {
265
265
const unsigned char *data_;
266
266
// Storing the shape & strides in local variables (i.e. these arrays) allows the compiler to
267
267
// make large performance gains on big, nested loops, but requires compile-time dimensions
268
- conditional_t <Dynamic, const size_t *, std::array<size_t , (size_t ) Dims>>
269
- shape_, strides_;
268
+ conditional_t <Dynamic, const size_t *, std::array<size_t , (size_t ) Dims>> shape_;
269
+ conditional_t <Dynamic, const ssize_t *, std::array< ssize_t , ( size_t ) Dims>> strides_;
270
270
const size_t dims_;
271
271
272
272
friend class pybind11 ::array;
273
273
// Constructor for compile-time dimensions:
274
274
template <bool Dyn = Dynamic>
275
- unchecked_reference (const void *data, const size_t *shape, const size_t *strides, enable_if_t <!Dyn, size_t >)
275
+ unchecked_reference (const void *data, const size_t *shape, const ssize_t *strides, enable_if_t <!Dyn, size_t >)
276
276
: data_{reinterpret_cast <const unsigned char *>(data)}, dims_{Dims} {
277
277
for (size_t i = 0 ; i < dims_; i++) {
278
278
shape_[i] = shape[i];
@@ -281,7 +281,7 @@ class unchecked_reference {
281
281
}
282
282
// Constructor for runtime dimensions:
283
283
template <bool Dyn = Dynamic>
284
- unchecked_reference (const void *data, const size_t *shape, const size_t *strides, enable_if_t <Dyn, size_t > dims)
284
+ unchecked_reference (const void *data, const size_t *shape, const ssize_t *strides, enable_if_t <Dyn, size_t > dims)
285
285
: data_{reinterpret_cast <const unsigned char *>(data)}, shape_{shape}, strides_{strides}, dims_{dims} {}
286
286
287
287
public:
@@ -573,12 +573,12 @@ class array : public buffer {
573
573
}
574
574
575
575
// / Strides of the array
576
- const size_t * strides () const {
577
- return reinterpret_cast <const size_t *>(detail::array_proxy (m_ptr)->strides );
576
+ const ssize_t * strides () const {
577
+ return reinterpret_cast <const ssize_t *>(detail::array_proxy (m_ptr)->strides );
578
578
}
579
579
580
580
// / Stride along a given axis
581
- size_t strides (size_t dim) const {
581
+ ssize_t strides (size_t dim) const {
582
582
if (dim >= ndim ())
583
583
fail_dim_check (dim, " invalid axis" );
584
584
return strides ()[dim];
@@ -702,9 +702,9 @@ class array : public buffer {
702
702
throw std::domain_error (" array is not writeable" );
703
703
}
704
704
705
- static std::vector<Py_intptr_t > default_strides (const std::vector<Py_intptr_t >& shape, size_t itemsize) {
705
+ static std::vector<ssize_t > default_strides (const std::vector<size_t >& shape, size_t itemsize) {
706
706
auto ndim = shape.size ();
707
- std::vector<Py_intptr_t > strides (ndim);
707
+ std::vector<ssize_t > strides (ndim);
708
708
if (ndim) {
709
709
std::fill (strides.begin (), strides.end (), itemsize);
710
710
for (size_t i = 0 ; i < ndim - 1 ; i++)
@@ -1133,7 +1133,7 @@ array_iterator<T> array_end(const buffer_info& buffer) {
1133
1133
1134
1134
class common_iterator {
1135
1135
public:
1136
- using container_type = std::vector<size_t >;
1136
+ using container_type = std::vector<ssize_t >;
1137
1137
using value_type = container_type::value_type;
1138
1138
using size_type = container_type::size_type;
1139
1139
@@ -1175,7 +1175,7 @@ template <size_t N> class multi_array_iterator {
1175
1175
for (size_t i = 0 ; i < shape.size (); ++i)
1176
1176
m_shape[i] = static_cast <container_type::value_type>(shape[i]);
1177
1177
1178
- container_type strides (shape.size ());
1178
+ std::vector< ssize_t > strides (shape.size ());
1179
1179
for (size_t i = 0 ; i < N; ++i)
1180
1180
init_common_iterator (buffers[i], shape, m_common_iterator[i], strides);
1181
1181
}
@@ -1203,15 +1203,15 @@ template <size_t N> class multi_array_iterator {
1203
1203
1204
1204
void init_common_iterator (const buffer_info &buffer,
1205
1205
const std::vector<size_t > &shape,
1206
- common_iter &iterator, container_type &strides) {
1206
+ common_iter &iterator, std::vector< ssize_t > &strides) {
1207
1207
auto buffer_shape_iter = buffer.shape .rbegin ();
1208
1208
auto buffer_strides_iter = buffer.strides .rbegin ();
1209
1209
auto shape_iter = shape.rbegin ();
1210
1210
auto strides_iter = strides.rbegin ();
1211
1211
1212
1212
while (buffer_shape_iter != buffer.shape .rend ()) {
1213
1213
if (*shape_iter == *buffer_shape_iter)
1214
- *strides_iter = static_cast < size_t >( *buffer_strides_iter) ;
1214
+ *strides_iter = *buffer_strides_iter;
1215
1215
else
1216
1216
*strides_iter = 0 ;
1217
1217
@@ -1283,10 +1283,11 @@ broadcast_trivial broadcast(const std::array<buffer_info, N> &buffers, size_t &n
1283
1283
1284
1284
// Check for C contiguity (but only if previous inputs were also C contiguous)
1285
1285
if (trivial_broadcast_c) {
1286
- size_t expect_stride = buffers[i].itemsize ;
1286
+ ssize_t expect_stride = static_cast < ssize_t >( buffers[i].itemsize ) ;
1287
1287
auto end = buffers[i].shape .crend ();
1288
- for (auto shape_iter = buffers[i].shape .crbegin (), stride_iter = buffers[i].strides .crbegin ();
1289
- trivial_broadcast_c && shape_iter != end; ++shape_iter, ++stride_iter) {
1288
+ auto shape_iter = buffers[i].shape .crbegin ();
1289
+ auto stride_iter = buffers[i].strides .crbegin ();
1290
+ for (; trivial_broadcast_c && shape_iter != end; ++shape_iter, ++stride_iter) {
1290
1291
if (expect_stride == *stride_iter)
1291
1292
expect_stride *= *shape_iter;
1292
1293
else
@@ -1296,10 +1297,11 @@ broadcast_trivial broadcast(const std::array<buffer_info, N> &buffers, size_t &n
1296
1297
1297
1298
// Check for Fortran contiguity (if previous inputs were also F contiguous)
1298
1299
if (trivial_broadcast_f) {
1299
- size_t expect_stride = buffers[i].itemsize ;
1300
+ ssize_t expect_stride = static_cast < ssize_t >( buffers[i].itemsize ) ;
1300
1301
auto end = buffers[i].shape .cend ();
1301
- for (auto shape_iter = buffers[i].shape .cbegin (), stride_iter = buffers[i].strides .cbegin ();
1302
- trivial_broadcast_f && shape_iter != end; ++shape_iter, ++stride_iter) {
1302
+ auto shape_iter = buffers[i].shape .cbegin ();
1303
+ auto stride_iter = buffers[i].strides .cbegin ();
1304
+ for (; trivial_broadcast_f && shape_iter != end; ++shape_iter, ++stride_iter) {
1303
1305
if (expect_stride == *stride_iter)
1304
1306
expect_stride *= *shape_iter;
1305
1307
else
@@ -1336,20 +1338,20 @@ struct vectorize_helper {
1336
1338
auto trivial = broadcast (buffers, ndim, shape);
1337
1339
1338
1340
size_t size = 1 ;
1339
- std::vector<size_t > strides (ndim);
1341
+ std::vector<ssize_t > strides (ndim);
1340
1342
if (ndim > 0 ) {
1341
1343
if (trivial == broadcast_trivial::f_trivial) {
1342
- strides[0 ] = sizeof (Return);
1344
+ strides[0 ] = static_cast < ssize_t >( sizeof (Return) );
1343
1345
for (size_t i = 1 ; i < ndim; ++i) {
1344
- strides[i] = strides[i - 1 ] * shape[i - 1 ];
1346
+ strides[i] = strides[i - 1 ] * static_cast < ssize_t >( shape[i - 1 ]) ;
1345
1347
size *= shape[i - 1 ];
1346
1348
}
1347
1349
size *= shape[ndim - 1 ];
1348
1350
}
1349
1351
else {
1350
- strides[ndim-1 ] = sizeof (Return);
1352
+ strides[ndim-1 ] = static_cast < ssize_t >( sizeof (Return) );
1351
1353
for (size_t i = ndim - 1 ; i > 0 ; --i) {
1352
- strides[i - 1 ] = strides[i] * shape[i];
1354
+ strides[i - 1 ] = strides[i] * static_cast < ssize_t >( shape[i]) ;
1353
1355
size *= shape[i];
1354
1356
}
1355
1357
size *= shape[0 ];
0 commit comments