Skip to content

Implement .par_map_inplace() and .par_map(f) -> Array #255

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 19 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,19 +39,24 @@ optional = true
blas-sys = { version = "0.6.5", optional = true, default-features = false }
matrixmultiply = { version = "0.1.13" }

rayon = { version = "0.6.0", optional = true }

[dependencies.serde]
version = "0.8.20"
optional = true

[dev-dependencies]
num_cpus = "1.2"

[features]
blas = ["blas-sys"]

# These features are used for testing
blas-openblas-sys = ["blas"]
test = ["blas-openblas-sys"]
test = ["blas-openblas-sys", "rayon"]

# This feature is used for docs
docs = ["rustc-serialize", "serde"]
docs = ["rustc-serialize", "serde", "rayon"]

[profile.release]
[profile.bench]
Expand Down
5 changes: 5 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,11 @@ your `Cargo.toml`.
Uses ``blas-sys`` for pluggable backend, which needs to be configured
separately.

- ``rayon``

- Optional, compatible with Rust stable
- Implement rayon 0.6 parallelization.

How to use with cargo::

[dependencies]
Expand Down
144 changes: 144 additions & 0 deletions benches/rayon.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@

#![feature(test)]

extern crate num_cpus;
extern crate test;
use test::Bencher;

#[macro_use(s)]
extern crate ndarray;
use ndarray::prelude::*;

extern crate rayon;
use rayon::prelude::*;

const EXP_N: usize = 128;

use std::cmp::max;

fn set_threads() {
let n = max(1, num_cpus::get() / 2);
let cfg = rayon::Configuration::new().set_num_threads(n);
let _ = rayon::initialize(cfg);
}

#[bench]
fn map_exp_regular(bench: &mut Bencher)
{
let mut a = Array2::<f64>::zeros((EXP_N, EXP_N));
a.swap_axes(0, 1);
bench.iter(|| {
a.mapv_inplace(|x| x.exp());
});
}

#[bench]
fn rayon_exp_regular(bench: &mut Bencher)
{
set_threads();
let mut a = Array2::<f64>::zeros((EXP_N, EXP_N));
a.swap_axes(0, 1);
bench.iter(|| {
a.view_mut().into_par_iter().for_each(|x| *x = x.exp());
});
}

const FASTEXP: usize = 800;

#[inline]
fn fastexp(x: f64) -> f64 {
let x = 1. + x/1024.;
x.powi(1024)
}

#[bench]
fn map_fastexp_regular(bench: &mut Bencher)
{
let mut a = Array2::<f64>::zeros((FASTEXP, FASTEXP));
bench.iter(|| {
a.mapv_inplace(|x| fastexp(x))
});
}

#[bench]
fn rayon_fastexp_regular(bench: &mut Bencher)
{
set_threads();
let mut a = Array2::<f64>::zeros((FASTEXP, FASTEXP));
bench.iter(|| {
a.view_mut().into_par_iter().for_each(|x| *x = fastexp(*x));
});
}

#[bench]
fn map_fastexp_cut(bench: &mut Bencher)
{
let mut a = Array2::<f64>::zeros((FASTEXP, FASTEXP));
let mut a = a.slice_mut(s![.., ..-1]);
bench.iter(|| {
a.mapv_inplace(|x| fastexp(x))
});
}

#[bench]
fn rayon_fastexp_cut(bench: &mut Bencher)
{
set_threads();
let mut a = Array2::<f64>::zeros((FASTEXP, FASTEXP));
let mut a = a.slice_mut(s![.., ..-1]);
bench.iter(|| {
a.view_mut().into_par_iter().for_each(|x| *x = fastexp(*x));
});
}

#[bench]
fn map_fastexp_by_axis(bench: &mut Bencher)
{
let mut a = Array2::<f64>::zeros((FASTEXP, FASTEXP));
bench.iter(|| {
for mut sheet in a.axis_iter_mut(Axis(0)) {
sheet.mapv_inplace(fastexp)
}
});
}

#[bench]
fn rayon_fastexp_by_axis(bench: &mut Bencher)
{
set_threads();
let mut a = Array2::<f64>::zeros((FASTEXP, FASTEXP));
bench.iter(|| {
a.axis_iter_mut(Axis(0)).into_par_iter()
.for_each(|mut sheet| sheet.mapv_inplace(fastexp));
});
}

#[bench]
fn par_map_inplace_fastexp(bench: &mut Bencher)
{
set_threads();
let mut a = Array2::<f64>::zeros((FASTEXP, FASTEXP));
bench.iter(|| {
a.par_map_inplace(|x| *x = fastexp(*x));
});
}

#[bench]
fn map_fastexp(bench: &mut Bencher)
{
set_threads();
let a = Array2::<f64>::zeros((FASTEXP, FASTEXP));
bench.iter(|| {
a.map(|x| fastexp(*x))
});
}

#[bench]
fn par_map_fastexp(bench: &mut Bencher)
{
set_threads();
let a = Array2::<f64>::zeros((FASTEXP, FASTEXP));
bench.iter(|| {
a.par_map(|x| fastexp(*x))
});
}
55 changes: 55 additions & 0 deletions src/impl_methods.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ use super::ZipExt;
use dimension::IntoDimension;
use dimension::{axes_of, Axes};

use rayon::prelude::*;

use {
NdIndex,
AxisChunksIter,
Expand Down Expand Up @@ -1214,6 +1216,51 @@ impl<A, S, D> ArrayBase<S, D> where S: Data<Elem=A>, D: Dimension
}
}

pub fn par_map<B, F>(&self, f: F) -> Array<B, D>
where F: Fn(&A) -> B + Sync,
D: RemoveAxis,
A: Sync,
B: Send + Sync + Copy,
{
struct SendPtr<T>(*mut T);
unsafe impl<T: Send> Send for SendPtr<T> { }
unsafe impl<T: Sync> Sync for SendPtr<T> { }
impl<T> Copy for SendPtr<T> { }
impl<T> Clone for SendPtr<T> { fn clone(&self) -> Self { *self } }

// FIXME
// parallelization along one array axis — the widest axis,
// is not enough if that axis is either short, or the only in the array.

let mut result = Vec::with_capacity(self.len());
let ax = self.max_stride_axis();
let base_ptr = SendPtr(result.as_mut_ptr());
let f = &f;
self.axis_iter(ax)
.into_par_iter()
.enumerate()
.for_each(move |(i, sheet)| {
unsafe {
let ptr = base_ptr.0.offset((sheet.len() * i) as isize);
let mut j = 0;
for elt in sheet {
*ptr.offset(j) = f(elt);
j += 1;
}
}
});
unsafe {
result.set_len(self.len());

// swap the largest axis in place
let mut dim = self.dim.clone();
dim.slice_mut().swap(0, ax.axis());
let mut a = Array::from_shape_vec_unchecked(dim, result);
a.swap_axes(0, ax.axis());
a
}
}

/// Call `f` by **v**alue on each element and create a new array
/// with the new values.
///
Expand Down Expand Up @@ -1261,6 +1308,14 @@ impl<A, S, D> ArrayBase<S, D> where S: Data<Elem=A>, D: Dimension
self.unordered_foreach_mut(f);
}

pub fn par_map_inplace<F>(&mut self, f: F)
where S: DataMut,
F: Fn(&mut A) + Sync,
A: Send + Sync,
{
self.view_mut().into_par_iter().for_each(f)
}

/// Modify the array in place by calling `f` by **v**alue on each element.
/// The array is updated with the new values.
///
Expand Down
Loading