10
10
#pragma once
11
11
12
12
#include " numpy.h"
13
+ #include " numpy/ndarraytypes.h"
13
14
14
15
#if defined(__INTEL_COMPILER)
15
16
# pragma warning(disable: 1682) // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem)
@@ -139,14 +140,19 @@ template <typename Type_> struct EigenProps {
139
140
const auto dims = a.ndim ();
140
141
if (dims < 1 || dims > 2 )
141
142
return false ;
142
-
143
+ bool is_pyobject = false ;
144
+ if (npy_format_descriptor<Scalar>::value == npy_api::NPY_OBJECT_)
145
+ is_pyobject = true ;
146
+ ssize_t scalar_size = (is_pyobject ? static_cast <ssize_t >(sizeof (PyObject*)) :
147
+ static_cast <ssize_t >(sizeof (Scalar)));
143
148
if (dims == 2 ) { // Matrix type: require exact match (or dynamic)
144
149
145
150
EigenIndex
146
151
np_rows = a.shape (0 ),
147
152
np_cols = a.shape (1 ),
148
- np_rstride = a.strides (0 ) / static_cast <ssize_t >(sizeof (Scalar)),
149
- np_cstride = a.strides (1 ) / static_cast <ssize_t >(sizeof (Scalar));
153
+ np_rstride = a.strides (0 ) / scalar_size,
154
+ np_cstride = a.strides (1 ) / scalar_size;
155
+
150
156
if ((fixed_rows && np_rows != rows) || (fixed_cols && np_cols != cols))
151
157
return false ;
152
158
@@ -156,7 +162,7 @@ template <typename Type_> struct EigenProps {
156
162
// Otherwise we're storing an n-vector. Only one of the strides will be used, but whichever
157
163
// is used, we want the (single) numpy stride value.
158
164
const EigenIndex n = a.shape (0 ),
159
- stride = a.strides (0 ) / static_cast < ssize_t >( sizeof (Scalar)) ;
165
+ stride = a.strides (0 ) / scalar_size ;
160
166
161
167
if (vector) { // Eigen type is a compile-time vector
162
168
if (fixed && size != n)
@@ -207,11 +213,52 @@ template <typename Type_> struct EigenProps {
207
213
template <typename props> handle eigen_array_cast (typename props::Type const &src, handle base = handle(), bool writeable = true) {
208
214
constexpr ssize_t elem_size = sizeof (typename props::Scalar);
209
215
array a;
210
- if (props::vector)
211
- a = array ({ src.size () }, { elem_size * src.innerStride () }, src.data (), base);
212
- else
213
- a = array ({ src.rows (), src.cols () }, { elem_size * src.rowStride (), elem_size * src.colStride () },
214
- src.data (), base);
216
+ using Scalar = typename props::Type::Scalar;
217
+ bool is_pyoject = npy_format_descriptor<Scalar>::value == npy_api::NPY_OBJECT_;
218
+
219
+ if (!is_pyoject) {
220
+ if (props::vector)
221
+ a = array ({ src.size () }, { elem_size * src.innerStride () }, src.data (), base);
222
+ else
223
+ a = array ({ src.rows (), src.cols () }, { elem_size * src.rowStride (), elem_size * src.colStride () },
224
+ src.data (), base);
225
+ }
226
+ else {
227
+ if (props::vector) {
228
+ a = array (
229
+ npy_format_descriptor<Scalar>::dtype (),
230
+ { (size_t ) src.size () },
231
+ nullptr ,
232
+ base
233
+ );
234
+ auto policy = base ? return_value_policy::automatic_reference : return_value_policy::copy;
235
+ for (ssize_t i = 0 ; i < src.size (); ++i) {
236
+ auto value_ = reinterpret_steal<object>(make_caster<Scalar>::cast (src (i, 0 ), policy, base));
237
+ if (!value_)
238
+ return handle ();
239
+ auto p = a.mutable_data (i);
240
+ PyArray_SETITEM (a.ptr (), p, value_.release ().ptr ());
241
+ }
242
+ }
243
+ else {
244
+ a = array (
245
+ npy_format_descriptor<Scalar>::dtype (),
246
+ {(size_t ) src.rows (), (size_t ) src.cols ()},
247
+ nullptr ,
248
+ base
249
+ );
250
+ auto policy = base ? return_value_policy::automatic_reference : return_value_policy::copy;
251
+ for (ssize_t i = 0 ; i < src.rows (); ++i) {
252
+ for (ssize_t j = 0 ; j < src.cols (); ++j) {
253
+ auto value_ = reinterpret_steal<object>(make_caster<Scalar>::cast (src (i, j), policy, base));
254
+ if (!value_)
255
+ return handle ();
256
+ auto p = a.mutable_data (i, j);
257
+ PyArray_SETITEM (a.ptr (), p, value_.release ().ptr ());
258
+ }
259
+ }
260
+ }
261
+ }
215
262
216
263
if (!writeable)
217
264
array_proxy (a.ptr ())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
@@ -265,14 +312,47 @@ struct type_caster<Type, enable_if_t<is_eigen_dense_plain<Type>::value>> {
265
312
auto fits = props::conformable (buf);
266
313
if (!fits)
267
314
return false ;
268
-
315
+ int result = 0 ;
269
316
// Allocate the new type, then build a numpy reference into it
270
317
value = Type (fits.rows , fits.cols );
271
- auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
272
- if (dims == 1 ) ref = ref.squeeze ();
273
- else if (ref.ndim () == 1 ) buf = buf.squeeze ();
274
-
275
- int result = detail::npy_api::get ().PyArray_CopyInto_ (ref.ptr (), buf.ptr ());
318
+ bool is_pyobject = npy_format_descriptor<Scalar>::value == npy_api::NPY_OBJECT_;
319
+
320
+ if (!is_pyobject) {
321
+ auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
322
+ if (dims == 1 ) ref = ref.squeeze ();
323
+ else if (ref.ndim () == 1 ) buf = buf.squeeze ();
324
+ result =
325
+ detail::npy_api::get ().PyArray_CopyInto_ (ref.ptr (), buf.ptr ());
326
+ }
327
+ else {
328
+ if (dims == 1 ){
329
+ if (Type::RowsAtCompileTime == Eigen::Dynamic || Type::ColsAtCompileTime == Eigen::Dynamic) {
330
+ value.resize (buf.shape (0 ), 1 );
331
+ }
332
+ for (ssize_t i = 0 ; i < buf.shape (0 ); ++i) {
333
+ auto p = buf.mutable_data (i);
334
+ make_caster <Scalar> conv_val;
335
+ if (!conv_val.load (PyArray_GETITEM (buf.ptr (), p), convert))
336
+ return false ;
337
+ value (i) = cast_op<Scalar>(conv_val);
338
+ }
339
+ } else {
340
+ if (Type::RowsAtCompileTime == Eigen::Dynamic || Type::ColsAtCompileTime == Eigen::Dynamic) {
341
+ value.resize (buf.shape (0 ), buf.shape (1 ));
342
+ }
343
+ for (ssize_t i = 0 ; i < buf.shape (0 ); ++i) {
344
+ for (ssize_t j = 0 ; j < buf.shape (1 ); ++j) {
345
+ // p is the const void pointer to the item
346
+ auto p = buf.mutable_data (i, j);
347
+ make_caster<Scalar> conv_val;
348
+ if (!conv_val.load (PyArray_GETITEM (buf.ptr (), p),
349
+ convert))
350
+ return false ;
351
+ value (i,j) = cast_op<Scalar>(conv_val);
352
+ }
353
+ }
354
+ }
355
+ }
276
356
277
357
if (result < 0 ) { // Copy failed!
278
358
PyErr_Clear ();
@@ -424,13 +504,19 @@ struct type_caster<
424
504
// storage order conversion. (Note that we refuse to use this temporary copy when loading an
425
505
// argument for a Ref<M> with M non-const, i.e. a read-write reference).
426
506
Array copy_or_ref;
507
+ typename std::remove_cv<PlainObjectType>::type val;
427
508
public:
428
509
bool load (handle src, bool convert) {
429
510
// First check whether what we have is already an array of the right type. If not, we can't
430
511
// avoid a copy (because the copy is also going to do type conversion).
431
512
bool need_copy = !isinstance<Array>(src);
432
513
433
514
EigenConformable<props::row_major> fits;
515
+ bool is_pyobject = false ;
516
+ if (npy_format_descriptor<Scalar>::value == npy_api::NPY_OBJECT_) {
517
+ is_pyobject = true ;
518
+ need_copy = true ;
519
+ }
434
520
if (!need_copy) {
435
521
// We don't need a converting copy, but we also need to check whether the strides are
436
522
// compatible with the Ref's stride requirements
@@ -453,15 +539,55 @@ struct type_caster<
453
539
// We need to copy: If we need a mutable reference, or we're not supposed to convert
454
540
// (either because we're in the no-convert overload pass, or because we're explicitly
455
541
// instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.
456
- if (!convert || need_writeable) return false ;
542
+ if (!is_pyobject && (!convert || need_writeable)) {
543
+ return false ;
544
+ }
457
545
458
546
Array copy = Array::ensure (src);
459
547
if (!copy) return false ;
460
548
fits = props::conformable (copy);
461
- if (!fits || !fits.template stride_compatible <props>())
549
+ if (!fits || !fits.template stride_compatible <props>()) {
462
550
return false ;
463
- copy_or_ref = std::move (copy);
464
- loader_life_support::add_patient (copy_or_ref);
551
+ }
552
+
553
+ if (!is_pyobject) {
554
+ copy_or_ref = std::move (copy);
555
+ loader_life_support::add_patient (copy_or_ref);
556
+ }
557
+ else {
558
+ auto dims = copy.ndim ();
559
+ if (dims == 1 ){
560
+ if (Type::RowsAtCompileTime == Eigen::Dynamic || Type::ColsAtCompileTime == Eigen::Dynamic) {
561
+ val.resize (copy.shape (0 ), 1 );
562
+ }
563
+ for (ssize_t i = 0 ; i < copy.shape (0 ); ++i) {
564
+ auto p = copy.mutable_data (i);
565
+ make_caster <Scalar> conv_val;
566
+ if (!conv_val.load (PyArray_GETITEM (copy.ptr (), p),
567
+ convert))
568
+ return false ;
569
+ val (i) = cast_op<Scalar>(conv_val);
570
+
571
+ }
572
+ } else {
573
+ if (Type::RowsAtCompileTime == Eigen::Dynamic || Type::ColsAtCompileTime == Eigen::Dynamic) {
574
+ val.resize (copy.shape (0 ), copy.shape (1 ));
575
+ }
576
+ for (ssize_t i = 0 ; i < copy.shape (0 ); ++i) {
577
+ for (ssize_t j = 0 ; j < copy.shape (1 ); ++j) {
578
+ // p is the const void pointer to the item
579
+ auto p = copy.mutable_data (i, j);
580
+ make_caster <Scalar> conv_val;
581
+ if (!conv_val.load (PyArray_GETITEM (copy.ptr (), p),
582
+ convert))
583
+ return false ;
584
+ val (i, j) = cast_op<Scalar>(conv_val);
585
+ }
586
+ }
587
+ }
588
+ ref.reset (new Type (val));
589
+ return true ;
590
+ }
465
591
}
466
592
467
593
ref.reset ();
0 commit comments