10
10
#pragma once
11
11
12
12
#include " numpy.h"
13
+ #include " numpy/ndarraytypes.h"
13
14
14
15
#if defined(__INTEL_COMPILER)
15
16
# pragma warning(disable: 1682) // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem)
@@ -139,14 +140,19 @@ template <typename Type_> struct EigenProps {
139
140
const auto dims = a.ndim ();
140
141
if (dims < 1 || dims > 2 )
141
142
return false ;
142
-
143
+ bool is_pyobject = false ;
144
+ if (npy_format_descriptor<Scalar>::value == npy_api::NPY_OBJECT_)
145
+ is_pyobject = true ;
146
+ ssize_t scalar_size = (is_pyobject ? static_cast <ssize_t >(sizeof (PyObject*)) :
147
+ static_cast <ssize_t >(sizeof (Scalar)));
143
148
if (dims == 2 ) { // Matrix type: require exact match (or dynamic)
144
149
145
150
EigenIndex
146
151
np_rows = a.shape (0 ),
147
152
np_cols = a.shape (1 ),
148
- np_rstride = a.strides (0 ) / static_cast <ssize_t >(sizeof (Scalar)),
149
- np_cstride = a.strides (1 ) / static_cast <ssize_t >(sizeof (Scalar));
153
+ np_rstride = a.strides (0 ) / scalar_size,
154
+ np_cstride = a.strides (1 ) / scalar_size;
155
+
150
156
if ((fixed_rows && np_rows != rows) || (fixed_cols && np_cols != cols))
151
157
return false ;
152
158
@@ -156,7 +162,7 @@ template <typename Type_> struct EigenProps {
156
162
// Otherwise we're storing an n-vector. Only one of the strides will be used, but whichever
157
163
// is used, we want the (single) numpy stride value.
158
164
const EigenIndex n = a.shape (0 ),
159
- stride = a.strides (0 ) / static_cast < ssize_t >( sizeof (Scalar)) ;
165
+ stride = a.strides (0 ) / scalar_size ;
160
166
161
167
if (vector) { // Eigen type is a compile-time vector
162
168
if (fixed && size != n)
@@ -207,11 +213,53 @@ template <typename Type_> struct EigenProps {
207
213
template <typename props> handle eigen_array_cast (typename props::Type const &src, handle base = handle(), bool writeable = true) {
208
214
constexpr ssize_t elem_size = sizeof (typename props::Scalar);
209
215
array a;
210
- if (props::vector)
211
- a = array ({ src.size () }, { elem_size * src.innerStride () }, src.data (), base);
212
- else
213
- a = array ({ src.rows (), src.cols () }, { elem_size * src.rowStride (), elem_size * src.colStride () },
214
- src.data (), base);
216
+ using Scalar = typename props::Type::Scalar;
217
+ bool is_pyoject = npy_format_descriptor<Scalar>::value == npy_api::NPY_OBJECT_;
218
+
219
+ if (!is_pyoject) {
220
+ if (props::vector)
221
+ a = array ({ src.size () }, { elem_size * src.innerStride () }, src.data (), base);
222
+ else
223
+ a = array ({ src.rows (), src.cols () }, { elem_size * src.rowStride (), elem_size * src.colStride () },
224
+ src.data (), base);
225
+ }
226
+ else {
227
+ if (props::vector) {
228
+ a = array (
229
+ npy_format_descriptor<Scalar>::dtype (),
230
+ { (size_t ) src.size () },
231
+ nullptr ,
232
+ base
233
+ );
234
+ auto policy = base ? return_value_policy::automatic_reference : return_value_policy::copy;
235
+ for (ssize_t i = 0 ; i < src.size (); ++i) {
236
+ const Scalar src_val = props::fixed_rows ? src (0 , i) : src (i, 0 );
237
+ auto value_ = reinterpret_steal<object>(make_caster<Scalar>::cast (src_val, policy, base));
238
+ if (!value_)
239
+ return handle ();
240
+ auto p = a.mutable_data (i);
241
+ PyArray_SETITEM (a.ptr (), p, value_.release ().ptr ());
242
+ }
243
+ }
244
+ else {
245
+ a = array (
246
+ npy_format_descriptor<Scalar>::dtype (),
247
+ {(size_t ) src.rows (), (size_t ) src.cols ()},
248
+ nullptr ,
249
+ base
250
+ );
251
+ auto policy = base ? return_value_policy::automatic_reference : return_value_policy::copy;
252
+ for (ssize_t i = 0 ; i < src.rows (); ++i) {
253
+ for (ssize_t j = 0 ; j < src.cols (); ++j) {
254
+ auto value_ = reinterpret_steal<object>(make_caster<Scalar>::cast (src (i, j), policy, base));
255
+ if (!value_)
256
+ return handle ();
257
+ auto p = a.mutable_data (i, j);
258
+ PyArray_SETITEM (a.ptr (), p, value_.release ().ptr ());
259
+ }
260
+ }
261
+ }
262
+ }
215
263
216
264
if (!writeable)
217
265
array_proxy (a.ptr ())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
@@ -265,14 +313,49 @@ struct type_caster<Type, enable_if_t<is_eigen_dense_plain<Type>::value>> {
265
313
auto fits = props::conformable (buf);
266
314
if (!fits)
267
315
return false ;
268
-
316
+ int result = 0 ;
269
317
// Allocate the new type, then build a numpy reference into it
270
318
value = Type (fits.rows , fits.cols );
271
- auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
272
- if (dims == 1 ) ref = ref.squeeze ();
273
- else if (ref.ndim () == 1 ) buf = buf.squeeze ();
274
-
275
- int result = detail::npy_api::get ().PyArray_CopyInto_ (ref.ptr (), buf.ptr ());
319
+ bool is_pyobject = npy_format_descriptor<Scalar>::value == npy_api::NPY_OBJECT_;
320
+
321
+ if (!is_pyobject) {
322
+ auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
323
+ if (dims == 1 ) ref = ref.squeeze ();
324
+ else if (ref.ndim () == 1 ) buf = buf.squeeze ();
325
+ result =
326
+ detail::npy_api::get ().PyArray_CopyInto_ (ref.ptr (), buf.ptr ());
327
+ }
328
+ else {
329
+ if (dims == 1 ){
330
+ if (Type::RowsAtCompileTime == Eigen::Dynamic)
331
+ value.resize (buf.shape (0 ), 1 );
332
+ if (Type::ColsAtCompileTime == Eigen::Dynamic)
333
+ value.resize (1 , buf.shape (0 ));
334
+
335
+ for (ssize_t i = 0 ; i < buf.shape (0 ); ++i) {
336
+ auto p = buf.mutable_data (i);
337
+ make_caster <Scalar> conv_val;
338
+ if (!conv_val.load (PyArray_GETITEM (buf.ptr (), p), convert))
339
+ return false ;
340
+ value (i) = cast_op<Scalar>(conv_val);
341
+ }
342
+ } else {
343
+ if (Type::RowsAtCompileTime == Eigen::Dynamic || Type::ColsAtCompileTime == Eigen::Dynamic) {
344
+ value.resize (buf.shape (0 ), buf.shape (1 ));
345
+ }
346
+ for (ssize_t i = 0 ; i < buf.shape (0 ); ++i) {
347
+ for (ssize_t j = 0 ; j < buf.shape (1 ); ++j) {
348
+ // p is the const void pointer to the item
349
+ auto p = buf.mutable_data (i, j);
350
+ make_caster<Scalar> conv_val;
351
+ if (!conv_val.load (PyArray_GETITEM (buf.ptr (), p),
352
+ convert))
353
+ return false ;
354
+ value (i,j) = cast_op<Scalar>(conv_val);
355
+ }
356
+ }
357
+ }
358
+ }
276
359
277
360
if (result < 0 ) { // Copy failed!
278
361
PyErr_Clear ();
@@ -424,13 +507,19 @@ struct type_caster<
424
507
// storage order conversion. (Note that we refuse to use this temporary copy when loading an
425
508
// argument for a Ref<M> with M non-const, i.e. a read-write reference).
426
509
Array copy_or_ref;
510
+ typename std::remove_cv<PlainObjectType>::type val;
427
511
public:
428
512
bool load (handle src, bool convert) {
429
513
// First check whether what we have is already an array of the right type. If not, we can't
430
514
// avoid a copy (because the copy is also going to do type conversion).
431
515
bool need_copy = !isinstance<Array>(src);
432
516
433
517
EigenConformable<props::row_major> fits;
518
+ bool is_pyobject = false ;
519
+ if (npy_format_descriptor<Scalar>::value == npy_api::NPY_OBJECT_) {
520
+ is_pyobject = true ;
521
+ need_copy = true ;
522
+ }
434
523
if (!need_copy) {
435
524
// We don't need a converting copy, but we also need to check whether the strides are
436
525
// compatible with the Ref's stride requirements
@@ -453,15 +542,55 @@ struct type_caster<
453
542
// We need to copy: If we need a mutable reference, or we're not supposed to convert
454
543
// (either because we're in the no-convert overload pass, or because we're explicitly
455
544
// instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.
456
- if (!convert || need_writeable) return false ;
545
+ if (!is_pyobject && (!convert || need_writeable)) {
546
+ return false ;
547
+ }
457
548
458
549
Array copy = Array::ensure (src);
459
550
if (!copy) return false ;
460
551
fits = props::conformable (copy);
461
- if (!fits || !fits.template stride_compatible <props>())
552
+ if (!fits || !fits.template stride_compatible <props>()) {
462
553
return false ;
463
- copy_or_ref = std::move (copy);
464
- loader_life_support::add_patient (copy_or_ref);
554
+ }
555
+
556
+ if (!is_pyobject) {
557
+ copy_or_ref = std::move (copy);
558
+ loader_life_support::add_patient (copy_or_ref);
559
+ }
560
+ else {
561
+ auto dims = copy.ndim ();
562
+ if (dims == 1 ){
563
+ if (Type::RowsAtCompileTime == Eigen::Dynamic || Type::ColsAtCompileTime == Eigen::Dynamic) {
564
+ val.resize (copy.shape (0 ), 1 );
565
+ }
566
+ for (ssize_t i = 0 ; i < copy.shape (0 ); ++i) {
567
+ auto p = copy.mutable_data (i);
568
+ make_caster <Scalar> conv_val;
569
+ if (!conv_val.load (PyArray_GETITEM (copy.ptr (), p),
570
+ convert))
571
+ return false ;
572
+ val (i) = cast_op<Scalar>(conv_val);
573
+
574
+ }
575
+ } else {
576
+ if (Type::RowsAtCompileTime == Eigen::Dynamic || Type::ColsAtCompileTime == Eigen::Dynamic) {
577
+ val.resize (copy.shape (0 ), copy.shape (1 ));
578
+ }
579
+ for (ssize_t i = 0 ; i < copy.shape (0 ); ++i) {
580
+ for (ssize_t j = 0 ; j < copy.shape (1 ); ++j) {
581
+ // p is the const void pointer to the item
582
+ auto p = copy.mutable_data (i, j);
583
+ make_caster <Scalar> conv_val;
584
+ if (!conv_val.load (PyArray_GETITEM (copy.ptr (), p),
585
+ convert))
586
+ return false ;
587
+ val (i, j) = cast_op<Scalar>(conv_val);
588
+ }
589
+ }
590
+ }
591
+ ref.reset (new Type (val));
592
+ return true ;
593
+ }
465
594
}
466
595
467
596
ref.reset ();
0 commit comments