Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,11 @@ Authors of this POM file: iirekm, jheaton
</build>

<dependencies>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>3.0</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import org.encog.ml.factory.MLActivationFactory;
import org.encog.util.obj.ActivationUtil;

import org.apache.commons.math3.util.FastMath;
/**
* The hyperbolic tangent activation function takes the curved shape of the
* hyperbolic tangent. This activation function produces both positive and
Expand Down Expand Up @@ -59,7 +60,7 @@ public ActivationTANH() {
public final void activationFunction(final double[] x, final int start,
final int size) {
for (int i = start; i < start + size; i++) {
x[i] = Math.tanh(x[i]);
x[i] = FastMath.tanh(x[i]);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -236,21 +236,27 @@ private void processLevel(final int currentLevel) {
final double currentFlatSpot = this.flatSpot[currentLevel + 1];

// handle weights
// array references are made method local to avoid one indirection
final double[] layerDelta = this.layerDelta;
final double[] weights = this.weights;
final double[] gradients = this.gradients;
final double[] layerOutput = this.layerOutput;
final double[] layerSums = this.layerSums;
int yi = fromLayerIndex;
for (int y = 0; y < fromLayerSize; y++) {
final double output = this.layerOutput[yi];
final double output = layerOutput[yi];
double sum = 0;
int xi = toLayerIndex;

int wi = index + y;
for (int x = 0; x < toLayerSize; x++) {
this.gradients[wi] += output * this.layerDelta[xi];
sum += this.weights[wi] * this.layerDelta[xi];
wi += fromLayerSize;
xi++;
final int loopEnd = toLayerIndex+toLayerSize;
for (int xi = toLayerIndex; xi < loopEnd; xi++, wi += fromLayerSize) {
gradients[wi] += output * layerDelta[xi];
sum += weights[wi] * layerDelta[xi];
}

this.layerDelta[yi] = sum
* (activation.derivativeFunction(this.layerSums[yi],this.layerOutput[yi])+currentFlatSpot);
layerDelta[yi] = sum
* (activation.derivativeFunction(layerSums[yi], layerOutput[yi])+currentFlatSpot);

yi++;
}
}
Expand Down
10 changes: 7 additions & 3 deletions src/main/java/org/encog/util/concurrency/DetermineWorkload.java
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,14 @@ public DetermineWorkload(final int threads, final int workloadSize) {
if (threads == 0) {
int num = Runtime.getRuntime().availableProcessors();

// NOTE: This was tested on a Intel i7 4 cores 8 threads with
// Encog Benchmark. Ca 15% higher performance with exactly 8 threads.

// if there is more than one processor, use processor count +1
if (num != 1) {
num++;
}
// if (num != 1) {
// num++;
// }

// if there is a single processor, just use one thread

// Now see how big the training sets are going to be.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,10 @@ public static EngineConcurrency getInstance() {
public EngineConcurrency() {
Runtime runtime = Runtime.getRuntime();
int threads = runtime.availableProcessors();
if( threads>1 )
threads++;
// NOTE: This was tested on a Intel i7 4 cores 8 threads with
// Encog Benchmark. Ca 15% higher performance with exactly 8 threads.
// if( threads>1 )
// threads++;
this.executor = Executors.newFixedThreadPool(threads);
}

Expand Down