From 8e81cb03313513551c8e3d07b284053b872dc9e8 Mon Sep 17 00:00:00 2001 From: J W Date: Tue, 26 Feb 2019 21:28:13 -0500 Subject: [PATCH 1/6] Start updating Float to float --- src/Microsoft.ML.Core/Utilities/BinFinder.cs | 5 +- src/Microsoft.ML.Core/Utilities/MathUtils.cs | 179 +++++++++---------- 2 files changed, 91 insertions(+), 93 deletions(-) diff --git a/src/Microsoft.ML.Core/Utilities/BinFinder.cs b/src/Microsoft.ML.Core/Utilities/BinFinder.cs index d5456e2956..2120b503e3 100644 --- a/src/Microsoft.ML.Core/Utilities/BinFinder.cs +++ b/src/Microsoft.ML.Core/Utilities/BinFinder.cs @@ -4,7 +4,6 @@ using System; using System.Collections.Generic; -using Float = System.Single; namespace Microsoft.ML.Internal.Utilities { @@ -318,7 +317,7 @@ public Peg(int index, int split) private HeapNode.Heap _pegHeap; // heap used for selecting the largest energy decrease private int[] _accum; // integral of counts private int[] _path; // current set of pegs - private Float _meanBinSize; + private float _meanBinSize; public GreedyBinFinder() { @@ -338,7 +337,7 @@ protected override void FindBinsCore(List counts, int[] path) _accum = new int[CountValues + 1]; for (int i = 0; i < CountValues; i++) _accum[i + 1] = _accum[i] + counts[i]; - _meanBinSize = (Float)_accum[CountValues] / CountBins; + _meanBinSize = (float)_accum[CountValues] / CountBins; PlacePegs(); diff --git a/src/Microsoft.ML.Core/Utilities/MathUtils.cs b/src/Microsoft.ML.Core/Utilities/MathUtils.cs index 7ab349da12..9dce866bb4 100644 --- a/src/Microsoft.ML.Core/Utilities/MathUtils.cs +++ b/src/Microsoft.ML.Core/Utilities/MathUtils.cs @@ -4,7 +4,6 @@ using System; using System.Collections.Generic; -using Float = System.Single; namespace Microsoft.ML.Internal.Utilities { @@ -14,7 +13,7 @@ namespace Microsoft.ML.Internal.Utilities [BestFriend] internal static class MathUtils { - public static Float ToFloat(this Double dbl) + public static float ToFloat(this Double dbl) { return (Single)dbl; } @@ -27,22 +26,22 @@ public static void ToFloat(this Single dbl) throw Contracts.Except(); } - public static Float Sqrt(Float x) + public static float Sqrt(float x) { return Math.Sqrt(x).ToFloat(); } - public static Float Log(Float x) + public static float Log(float x) { return Math.Log(x).ToFloat(); } - public static Float Log(Float a, Float newBase) + public static float Log(float a, float newBase) { return Math.Log(a, newBase).ToFloat(); } - public static Float Pow(Float x, Float y) + public static float Pow(float x, float y) { return Math.Pow(x, y).ToFloat(); } @@ -54,7 +53,7 @@ public static Float Pow(Float x, Float y) /// The y values. /// The coefficent a. /// The intercept b. - public static void SimpleLinearRegression(Float[] x, Float[] y, out Float a, out Float b) + public static void SimpleLinearRegression(float[] x, float[] y, out float a, out float b) { Contracts.CheckValue(x, nameof(x)); Contracts.CheckValue(y, nameof(y)); @@ -62,15 +61,15 @@ public static void SimpleLinearRegression(Float[] x, Float[] y, out Float a, out int m = x.Length; - Float sumSqX = 0; - Float sumX = 0; - Float sumXY = 0; - Float sumY = 0; - Float sumSqY = 0; + float sumSqX = 0; + float sumX = 0; + float sumXY = 0; + float sumY = 0; + float sumSqY = 0; for (int i = 0; i < m; i++) { - Float xVal = x[i]; - Float yVal = y[i]; + float xVal = x[i]; + float yVal = y[i]; sumSqX += xVal * xVal; sumX += xVal; sumXY += xVal * yVal; @@ -78,7 +77,7 @@ public static void SimpleLinearRegression(Float[] x, Float[] y, out Float a, out sumSqY += yVal * yVal; } - Float denom = sumSqX * m - sumX * sumX; + float denom = sumSqX * m - sumX * sumX; a = (sumXY * m - sumY * sumX) / denom; b = (sumSqX * sumY - sumXY * sumX) / denom; } @@ -107,10 +106,10 @@ public static int Product(int[] a) /// /// an array /// the max element - public static Float Max(Float[] a) + public static float Max(float[] a) { Contracts.AssertValue(a); - Float result = Float.NegativeInfinity; + float result = float.NegativeInfinity; foreach (var x in a) result = Math.Max(result, x); return result; @@ -121,10 +120,10 @@ public static Float Max(Float[] a) /// /// an array /// the minimum element - public static Float Min(Float[] a) + public static float Min(float[] a) { Contracts.AssertValue(a); - Float result = Float.PositiveInfinity; + float result = float.PositiveInfinity; foreach (var x in a) result = Math.Min(result, x); return result; @@ -140,13 +139,13 @@ public static Float Min(Float[] a) /// /// The span of floats. /// the first index of the max element - public static int ArgMax(ReadOnlySpan a) + public static int ArgMax(ReadOnlySpan a) { if (a.IsEmpty) return -1; int amax = -1; - Float max = Float.NegativeInfinity; + float max = float.NegativeInfinity; for (int i = a.Length - 1; i >= 0; i--) { if (max <= a[i]) @@ -169,13 +168,13 @@ public static int ArgMax(ReadOnlySpan a) /// /// The span of floats. /// the first index of the minimum element - public static int ArgMin(ReadOnlySpan a) + public static int ArgMin(ReadOnlySpan a) { if (a.IsEmpty) return -1; int amin = -1; - Float min = Float.PositiveInfinity; + float min = float.PositiveInfinity; for (int i = a.Length - 1; i >= 0; i--) { if (min >= a[i]) @@ -192,7 +191,7 @@ public static int ArgMin(ReadOnlySpan a) * LOG FUNCTIONS * *****************/ - private const Float LogTolerance = 30; + private const float LogTolerance = 30; /// /// computes the "softmax" function: log sum_i exp x_i @@ -200,10 +199,10 @@ public static int ArgMin(ReadOnlySpan a) /// Span of numbers to softmax /// the softmax of the numbers /// may have slightly lower roundoff error if inputs are sorted, smallest first - public static Float SoftMax(ReadOnlySpan inputs) + public static float SoftMax(ReadOnlySpan inputs) { int maxIdx = 0; - Float max = Float.NegativeInfinity; + float max = float.NegativeInfinity; for (int i = 0; i < inputs.Length; i++) { if (inputs[i] > max) @@ -213,14 +212,14 @@ public static Float SoftMax(ReadOnlySpan inputs) } } - if (Float.IsNegativeInfinity(max)) - return Float.NegativeInfinity; + if (float.IsNegativeInfinity(max)) + return float.NegativeInfinity; if (inputs.Length == 1) return max; double intermediate = 0.0; - Float cutoff = max - LogTolerance; + float cutoff = max - LogTolerance; for (int i = 0; i < inputs.Length; i++) { @@ -231,17 +230,17 @@ public static Float SoftMax(ReadOnlySpan inputs) } if (intermediate > 0.0) - return (Float)(max + Math.Log(1.0 + intermediate)); + return (float)(max + Math.Log(1.0 + intermediate)); return max; } /// /// computes "softmax" function of two arguments: log (exp x + exp y) /// - public static Float SoftMax(Float lx, Float ly) + public static float SoftMax(float lx, float ly) { - Float max; - Float negDiff; + float max; + float negDiff; if (lx > ly) { max = lx; @@ -252,13 +251,13 @@ public static Float SoftMax(Float lx, Float ly) max = ly; negDiff = lx - ly; } - if (Float.IsNegativeInfinity(max) || negDiff < -LogTolerance) + if (float.IsNegativeInfinity(max) || negDiff < -LogTolerance) { return max; } else { - return (Float)(max + Math.Log(1.0 + Math.Exp(negDiff))); + return (float)(max + Math.Log(1.0 + Math.Exp(negDiff))); } } @@ -266,29 +265,29 @@ public static Float SoftMax(Float lx, Float ly) * OTHER FUNCTIONS * *******************/ - public const Float DefaultMaxRelativeErr = (Float)1e-8; - public const Float DefaultMaxAbsErr = (Float)1e-12; + public const float DefaultMaxRelativeErr = (float)1e-8; + public const float DefaultMaxAbsErr = (float)1e-12; /// - /// true if two Float values are close (using relative comparison) + /// true if two float values are close (using relative comparison) /// /// /// /// - public static bool AlmostEqual(Float a, Float b) + public static bool AlmostEqual(float a, float b) { return AlmostEqual(a, b, DefaultMaxRelativeErr, DefaultMaxAbsErr); } - public static bool AlmostEqual(Float a, Float b, Float maxRelErr, Float maxAbsError) + public static bool AlmostEqual(float a, float b, float maxRelErr, float maxAbsError) { Contracts.Assert(FloatUtils.IsFinite(maxRelErr)); Contracts.Assert(FloatUtils.IsFinite(maxAbsError)); - Float absDiff = Math.Abs(a - b); + float absDiff = Math.Abs(a - b); if (absDiff < maxAbsError) return true; - Float maxAbs = Math.Max(Math.Abs(a), Math.Abs(b)); + float maxAbs = Math.Max(Math.Abs(a), Math.Abs(b)); return (absDiff / maxAbs) <= maxRelErr; } @@ -479,7 +478,7 @@ private static Double Unclamp(Double val) /// /// The logistic sigmoid function: 1 / (1 + e^(-x)). /// - public static Float Sigmoid(Float x) + public static float Sigmoid(float x) { #if SLOW_EXP return SigmoidSlow(x); @@ -491,7 +490,7 @@ public static Float Sigmoid(Float x) /// /// Hyperbolic tangent. /// - public static Float Tanh(Float x) + public static float Tanh(float x) { #if SLOW_EXP return TanhSlow(x); @@ -503,7 +502,7 @@ public static Float Tanh(Float x) /// /// The logistic sigmoid function: 1 / (1 + e^(-x)). /// - public static Float SigmoidSlow(Float x) + public static float SigmoidSlow(float x) { // The following two expressions are mathematically equivalent. Due to the potential of getting overflow we should // not call exp(x) for large positive x: instead, we modify the expression to compute exp(-x). @@ -519,7 +518,7 @@ public static Float SigmoidSlow(Float x) /// /// Hyperbolic tangent. /// - public static Float TanhSlow(Float x) + public static float TanhSlow(float x) { return Math.Tanh(x).ToFloat(); } @@ -527,20 +526,20 @@ public static Float TanhSlow(Float x) /// /// The exponential function: e^(x). /// - public static Float ExpSlow(Float x) + public static float ExpSlow(float x) { return Math.Exp(x).ToFloat(); } private const int ExpInf = 128; - private const Float Coef1 = (Float)0.013555747234814917704030793; - private const Float Coef2 = (Float)0.065588116243247810171479524; - private const Float Coef3 = (Float)0.3069678791803394491901401; + private const float Coef1 = (float)0.013555747234814917704030793; + private const float Coef2 = (float)0.065588116243247810171479524; + private const float Coef3 = (float)0.3069678791803394491901401; // 1 / ln(2). - private const Float RecipLn2 = (Float)1.44269504088896340735992468100; + private const float RecipLn2 = (float)1.44269504088896340735992468100; - private static Float PowerOfTwo(int exp) + private static float PowerOfTwo(int exp) { Contracts.Assert(0 <= exp && exp < ExpInf); return FloatUtils.GetPowerOfTwoSingle(exp); @@ -549,10 +548,10 @@ private static Float PowerOfTwo(int exp) /// /// The logistic sigmoid function: 1 / (1 + e^(-x)). /// - public static Float SigmoidFast(Float x) + public static float SigmoidFast(float x) { // This is a loose translation from SSE code - if (Float.IsNaN(x)) + if (float.IsNaN(x)) return x; bool neg = false; @@ -565,20 +564,20 @@ public static Float SigmoidFast(Float x) // Multiply by 1/ln(2). x *= RecipLn2; if (x >= ExpInf) - return neg ? (Float)0 : (Float)1; + return neg ? (float)0 : (float)1; // Get the floor and fractional part. int n = (int)x; Contracts.Assert(0 <= n && n < ExpInf); - Float f = x - n; + float f = x - n; Contracts.Assert(0 <= f && f < 1); // Get the integer power of two part. - Float r = PowerOfTwo(n); - Contracts.Assert(1 <= r && r < Float.PositiveInfinity); + float r = PowerOfTwo(n); + Contracts.Assert(1 <= r && r < float.PositiveInfinity); // This approximates 2^f for 0 <= f <= 1. Note that it is exact at the endpoints. - Float res = 1 + f + (f - 1) * f * ((Coef1 * f + Coef2) * f + Coef3); + float res = 1 + f + (f - 1) * f * ((Coef1 * f + Coef2) * f + Coef3); res = 1 / (1 + r * res); if (!neg) @@ -589,9 +588,9 @@ public static Float SigmoidFast(Float x) /// /// The hyperbolic tangent function. /// - public static Float TanhFast(Float x) + public static float TanhFast(float x) { - if (Float.IsNaN(x)) + if (float.IsNaN(x)) return x; bool neg = false; @@ -604,20 +603,20 @@ public static Float TanhFast(Float x) // Multiply by 2/ln(2). x *= 2 * RecipLn2; if (x >= ExpInf) - return neg ? (Float)(-1) : (Float)1; + return neg ? (float)(-1) : (float)1; // Get the floor and fractional part. int n = (int)x; Contracts.Assert(0 <= n && n < ExpInf); - Float f = x - n; + float f = x - n; Contracts.Assert(0 <= f && f < 1); // Get the integer power of two part. - Float r = PowerOfTwo(n); - Contracts.Assert(1 <= r && r < Float.PositiveInfinity); + float r = PowerOfTwo(n); + Contracts.Assert(1 <= r && r < float.PositiveInfinity); // This approximates 2^f - 1 for 0 <= f <= 1. Note that it is exact at the endpoints. - Float res = f + (f - 1) * f * ((Coef1 * f + Coef2) * f + Coef3); + float res = f + (f - 1) * f * ((Coef1 * f + Coef2) * f + Coef3); res *= r; res = (res + (r - 1)) / (res + (r + 1)); @@ -629,9 +628,9 @@ public static Float TanhFast(Float x) /// /// The exponential function: e^(x). /// - public static Float ExpFast(Float x) + public static float ExpFast(float x) { - if (Float.IsNaN(x)) + if (float.IsNaN(x)) return x; bool neg = false; @@ -644,20 +643,20 @@ public static Float ExpFast(Float x) // Multiply by 1/ln(2). Then we need to calculate 2^x. x *= RecipLn2; if (x >= ExpInf) - return neg ? (Float)0 : Float.PositiveInfinity; + return neg ? (float)0 : float.PositiveInfinity; // Get the floor and fractional part. int n = (int)x; Contracts.Assert(0 <= n && n < ExpInf); - Float f = x - n; + float f = x - n; Contracts.Assert(0 <= f && f < 1); // Get the integer power of two part. - Float r = PowerOfTwo(n); - Contracts.Assert(1 <= r && r < Float.PositiveInfinity); + float r = PowerOfTwo(n); + Contracts.Assert(1 <= r && r < float.PositiveInfinity); // This approximates 2^f for 0 <= f <= 1. Note that it is exact at the endpoints. - Float res = 1 + f + (f - 1) * f * ((Coef1 * f + Coef2) * f + Coef3); + float res = 1 + f + (f - 1) * f * ((Coef1 * f + Coef2) * f + Coef3); res *= r; if (neg) @@ -666,29 +665,29 @@ public static Float ExpFast(Float x) } /// - /// Apply a soft max on an array of Floats. Note that src and dst may be the same array. + /// Apply a soft max on an array of floats. Note that src and dst may be the same array. /// - public static void ApplySoftMax(Float[] src, Float[] dst) + public static void ApplySoftMax(float[] src, float[] dst) { Contracts.Assert(src.Length == dst.Length); ApplySoftMax(src, dst, 0, src.Length); } /// - /// Apply a soft max on a range within an array of Floats. Note that src and dst may be the same array. + /// Apply a soft max on a range within an array of floats. Note that src and dst may be the same array. /// - public static void ApplySoftMax(Float[] src, Float[] dst, int start, int end) + public static void ApplySoftMax(float[] src, float[] dst, int start, int end) { Contracts.Assert(src.Length == dst.Length); Contracts.Assert(0 <= start && start <= end && end <= src.Length); // Compute max output. - Float maxOut = Float.NegativeInfinity; + float maxOut = float.NegativeInfinity; for (int i = start; i < end; i++) maxOut = Math.Max(maxOut, src[i]); // Compute exp and sum. - Float sum = 0; + float sum = 0; for (int i = start; i < end; i++) { dst[i] = ExpFast(src[i] - maxOut); @@ -700,28 +699,28 @@ public static void ApplySoftMax(Float[] src, Float[] dst, int start, int end) dst[i] /= sum; } - public static Float GetMedianInPlace(Float[] src, int count) + public static float GetMedianInPlace(float[] src, int count) { Contracts.Assert(count >= 0); Contracts.Assert(Utils.Size(src) >= count); if (count == 0) - return Float.NaN; + return float.NaN; Array.Sort(src, 0, count); // Skip any NaNs. They sort to the low end. int ivMin = 0; int ivLim = count; - while (ivMin < ivLim && Float.IsNaN(src[ivMin])) + while (ivMin < ivLim && float.IsNaN(src[ivMin])) ivMin++; Contracts.Assert(ivMin <= ivLim); if (ivMin >= ivLim) - return Float.NaN; + return float.NaN; // This assert will fire if Array.Sort changes to put NaNs at the high end. - Contracts.Assert(!Float.IsNaN(src[ivLim - 1])); + Contracts.Assert(!float.IsNaN(src[ivLim - 1])); // If we're dealing with an odd number of things, just grab the middel item; otherwise, // average the two middle items. @@ -732,7 +731,7 @@ public static Float GetMedianInPlace(Float[] src, int count) return (src[iv - 1] + src[iv]) / 2; } - public static Double CosineSimilarity(ReadOnlySpan a, ReadOnlySpan b, int aIdx, int bIdx, int len) + public static Double CosineSimilarity(ReadOnlySpan a, ReadOnlySpan b, int aIdx, int bIdx, int len) { const Double epsilon = 1e-12f; Contracts.Assert(len > 0); @@ -794,17 +793,17 @@ public static Double CrossEntropy(Double probTrue, Double probPredicted, bool us /// when working with log probabilities and likelihoods. /// /// - public static Float LnSum(IEnumerable terms) + public static float LnSum(IEnumerable terms) { // Two passes to find the overall max is a *lot* simpler, // but potentially more computationally intensive. - Float max = Float.NegativeInfinity; + float max = float.NegativeInfinity; Double soFar = 0; - foreach (Float term in terms) + foreach (float term in terms) { // At this point, all *prior* terms, Math.Exp(x - max). - if (Float.IsNegativeInfinity(term)) + if (float.IsNegativeInfinity(term)) continue; if (!(term > max)) soFar += Math.Exp(term - max); @@ -814,7 +813,7 @@ public static Float LnSum(IEnumerable terms) max = term; } } - return (Float)Math.Log(soFar) + max; + return (float)Math.Log(soFar) + max; } /// From fdcfe2f1701cbb6625b5c6d033b8dea2ab8233fa Mon Sep 17 00:00:00 2001 From: J W Date: Tue, 26 Feb 2019 21:42:35 -0500 Subject: [PATCH 2/6] Remove more Floats --- src/Microsoft.ML.Core/Utilities/Stats.cs | 11 +- src/Microsoft.ML.CpuMath/AlignedMatrix.cs | 45 ++++---- src/Microsoft.ML.CpuMath/EigenUtils.cs | 103 +++++++++--------- src/Microsoft.ML.CpuMath/ICpuBuffer.cs | 9 +- src/Microsoft.ML.Data/Data/Combiner.cs | 7 +- .../Depricated/Vector/VBufferMathUtils.cs | 41 ++++--- .../UnitTests/TestLoss.cs | 12 +- 7 files changed, 110 insertions(+), 118 deletions(-) diff --git a/src/Microsoft.ML.Core/Utilities/Stats.cs b/src/Microsoft.ML.Core/Utilities/Stats.cs index fad719e80d..35b21d8cf0 100644 --- a/src/Microsoft.ML.Core/Utilities/Stats.cs +++ b/src/Microsoft.ML.Core/Utilities/Stats.cs @@ -3,7 +3,6 @@ // See the LICENSE file in the project root for more information. using System; -using Float = System.Single; namespace Microsoft.ML.Internal.Utilities { @@ -200,11 +199,11 @@ public static int SampleFromPoisson(Random rand, double lambda) // Mean refers to the mu parameter. Scale refers to the b parameter. // https://en.wikipedia.org/wiki/Laplace_distribution - public static Float SampleFromLaplacian(Random rand, Float mean, Float scale) + public static float SampleFromLaplacian(Random rand, float mean, float scale) { - Float u = rand.NextSingle(); + float u = rand.NextSingle(); u = u - 0.5f; - Float ret = mean; + float ret = mean; if (u >= 0) ret -= scale * MathUtils.Log(1 - 2 * u); else @@ -219,9 +218,9 @@ public static Float SampleFromLaplacian(Random rand, Float mean, Float scale) /// /// /// - public static Float SampleFromCauchy(Random rand) + public static float SampleFromCauchy(Random rand) { - return (Float)Math.Tan(Math.PI * (rand.NextSingle() - 0.5)); + return (float)Math.Tan(Math.PI * (rand.NextSingle() - 0.5)); } /// diff --git a/src/Microsoft.ML.CpuMath/AlignedMatrix.cs b/src/Microsoft.ML.CpuMath/AlignedMatrix.cs index d9dee9a868..4aa824d401 100644 --- a/src/Microsoft.ML.CpuMath/AlignedMatrix.cs +++ b/src/Microsoft.ML.CpuMath/AlignedMatrix.cs @@ -6,7 +6,6 @@ using System.Collections; using System.Collections.Generic; using Microsoft.ML.Internal.CpuMath.Core; -using Float = System.Single; namespace Microsoft.ML.Internal.CpuMath { @@ -54,10 +53,10 @@ public CpuAlignedVector(int size, int cbAlign) { Contracts.Assert(0 < size); // cbAlign should be a power of two. - Contracts.Assert(sizeof(Float) <= cbAlign); + Contracts.Assert(sizeof(float) <= cbAlign); Contracts.Assert((cbAlign & (cbAlign - 1)) == 0); - int cfltAlign = cbAlign / sizeof(Float); + int cfltAlign = cbAlign / sizeof(float); int cflt = RoundUp(size, cfltAlign); _items = new AlignedArray(cflt, cbAlign); _size = size; @@ -98,7 +97,7 @@ public int CbAlign /// /// The index /// The value at the given index - public Float this[int index] + public float this[int index] { get { @@ -117,7 +116,7 @@ public Float this[int index] /// /// The index /// The value at the given index - public Float GetValue(int i) + public float GetValue(int i) { Contracts.Assert(0 <= i && i < _size); return _items[i]; @@ -127,7 +126,7 @@ public Float GetValue(int i) /// Assign randomized values to the vector elements via the input function. /// /// The input rand om function that takes no arguments and returns a float value - public void Randomize(Func rand) + public void Randomize(Func rand) { Contracts.AssertValue(rand); for (int i = 0; i < _size; i++) @@ -147,7 +146,7 @@ public void Zero() /// /// The destination array /// The starting index in the destination array - public void CopyTo(Float[] dst, ref int ivDst) + public void CopyTo(float[] dst, ref int ivDst) { Contracts.AssertValue(dst); Contracts.Assert(0 <= ivDst && ivDst <= dst.Length - _size); @@ -163,7 +162,7 @@ public void CopyTo(Float[] dst, ref int ivDst) /// The destination array /// The starting index in the destination array /// The number of elements to be copied - public void CopyTo(int ivSrc, Float[] dst, int ivDst, int count) + public void CopyTo(int ivSrc, float[] dst, int ivDst, int count) { Contracts.AssertValue(dst); Contracts.Assert(0 <= count && count <= dst.Length); @@ -177,7 +176,7 @@ public void CopyTo(int ivSrc, Float[] dst, int ivDst, int count) /// /// The source array /// The starting index in the source array - public void CopyFrom(Float[] src, ref int index) + public void CopyFrom(float[] src, ref int index) { Contracts.AssertValue(src); Contracts.Assert(0 <= index && index <= src.Length - _size); @@ -193,7 +192,7 @@ public void CopyFrom(Float[] src, ref int index) /// The source array /// The starting index in the source array /// The number of elements to be copied - public void CopyFrom(int ivDst, Float[] src, int ivSrc, int count) + public void CopyFrom(int ivDst, float[] src, int ivSrc, int count) { Contracts.AssertValue(src); Contracts.Assert(0 <= count && count <= src.Length); @@ -216,7 +215,7 @@ public void CopyFrom(CpuAlignedVector src) /// /// Get the underlying AlignedArray as IEnumerator<Float>. /// - public IEnumerator GetEnumerator() + public IEnumerator GetEnumerator() { for (int i = 0; i < _size; i++) yield return _items[i]; @@ -305,13 +304,13 @@ protected CpuAlignedMatrixBase(int runLen, int runCnt, int cbAlign) Contracts.Assert(0 < runLen); Contracts.Assert(0 < runCnt); // cbAlign should be a power of two. - Contracts.Assert(sizeof(Float) <= cbAlign); + Contracts.Assert(sizeof(float) <= cbAlign); Contracts.Assert((cbAlign & (cbAlign - 1)) == 0); RunLen = runLen; RunCnt = runCnt; - FloatAlign = cbAlign / sizeof(Float); + FloatAlign = cbAlign / sizeof(float); Shift = GeneralUtils.CbitLowZero((uint)FloatAlign); Mask = FloatAlign - 1; @@ -354,7 +353,7 @@ public void Dispose() /// Assign randomized values to the matrix elements via the input function. /// /// The input rand om function that takes no arguments and returns a float value - public void Randomize(Func rand) + public void Randomize(Func rand) { Contracts.AssertValue(rand); for (int i = 0, k = 0; i < RunCnt; i++) @@ -396,7 +395,7 @@ public void CopyFrom(CpuAlignedMatrixBase src) /// The ctor takes an alignment value, which must be a power of two at least sizeof(Float). /// [BestFriend] - internal abstract class CpuAlignedMatrixRowBase : CpuAlignedMatrixBase, ICpuBuffer + internal abstract class CpuAlignedMatrixRowBase : CpuAlignedMatrixBase, ICpuBuffer { protected CpuAlignedMatrixRowBase(int crow, int ccol, int cbAlign) : base(ccol, crow, cbAlign) @@ -428,7 +427,7 @@ protected CpuAlignedMatrixRowBase(int crow, int ccol, int cbAlign) /// /// The destination array /// The starting index in the destination array - public void CopyTo(Float[] dst, ref int ivDst) + public void CopyTo(float[] dst, ref int ivDst) { Contracts.AssertValue(dst); Contracts.Assert(0 <= ivDst && ivDst <= dst.Length - ValueCount); @@ -457,7 +456,7 @@ public void CopyTo(Float[] dst, ref int ivDst) /// /// The source array /// The starting index in the source array - public void CopyFrom(Float[] src, ref int ivSrc) + public void CopyFrom(float[] src, ref int ivSrc) { Contracts.AssertValue(src); Contracts.Assert(0 <= ivSrc && ivSrc <= src.Length - ValueCount); @@ -480,7 +479,7 @@ public void CopyFrom(Float[] src, ref int ivSrc) /// /// Get the underlying AlignedArray as IEnumerator<Float>. /// - public IEnumerator GetEnumerator() + public IEnumerator GetEnumerator() { for (int row = 0; row < RowCount; row++) { @@ -534,7 +533,7 @@ public CpuAlignedMatrixRow(int crow, int ccol, int cbAlign) /// The starting row in this matrix /// The destination array /// The starting index in the destination array - public void CopyTo(int row, Float[] dst, ref int ivDst) + public void CopyTo(int row, float[] dst, ref int ivDst) { Contracts.AssertValue(dst); Contracts.Assert(0 <= row && row < RowCount); @@ -598,7 +597,7 @@ public CpuAlignedMatrixCol(int crow, int ccol, int cbAlign) /// /// The destination array /// The starting index in the destination array - public void CopyTo(Float[] dst, ref int ivDst) + public void CopyTo(float[] dst, ref int ivDst) { Contracts.AssertValue(dst); Contracts.Assert(0 <= ivDst && ivDst <= dst.Length - ValueCount); @@ -616,7 +615,7 @@ public void CopyTo(Float[] dst, ref int ivDst) /// The starting row in this matrix /// The destination array /// The starting index in the destination array - public void CopyTo(int row, Float[] dst, ref int ivDst) + public void CopyTo(int row, float[] dst, ref int ivDst) { Contracts.AssertValue(dst); Contracts.Assert(0 <= row && row < RowCount); @@ -631,7 +630,7 @@ public void CopyTo(int row, Float[] dst, ref int ivDst) /// /// The source array /// The starting index in the source array - public void CopyFrom(Float[] src, ref int ivSrc) + public void CopyFrom(float[] src, ref int ivSrc) { Contracts.AssertValue(src); Contracts.Assert(0 <= ivSrc && ivSrc <= src.Length - ValueCount); @@ -663,7 +662,7 @@ public void ZeroItems(int[] indices) /// /// Get the underlying AlignedArray as IEnumerator<Float>. /// - public IEnumerator GetEnumerator() + public IEnumerator GetEnumerator() { for (int row = 0; row < RowCount; row++) { diff --git a/src/Microsoft.ML.CpuMath/EigenUtils.cs b/src/Microsoft.ML.CpuMath/EigenUtils.cs index 94d8ee1a88..9b12e4e58b 100644 --- a/src/Microsoft.ML.CpuMath/EigenUtils.cs +++ b/src/Microsoft.ML.CpuMath/EigenUtils.cs @@ -4,7 +4,6 @@ using System; using Microsoft.ML.Internal.CpuMath.Core; -using Float = System.Single; namespace Microsoft.ML.Internal.CpuMath { @@ -14,21 +13,21 @@ internal static class EigenUtils { //Compute the Eigen-decomposition of a symmetric matrix // REVIEW: use matrix/vector operations, not Array Math - public static void EigenDecomposition(Float[] a, out Float[] eigenvalues, out Float[] eigenvectors) + public static void EigenDecomposition(float[] a, out float[] eigenvalues, out float[] eigenvectors) { var count = a.Length; var n = (int)Math.Sqrt(count); Contracts.Assert(n * n == count); - eigenvectors = new Float[count]; - eigenvalues = new Float[n]; + eigenvectors = new float[count]; + eigenvalues = new float[n]; //Reduce A to tridiagonal form // REVIEW: it's not ideal to keep using the same variable name for different purposes // - After the operation, "eigenvalues" means the diagonal elements of the reduced matrix //and "eigenvectors" means the orthogonal similarity transformation matrix // - Consider aliasing variables - var w = new Float[n]; + var w = new float[n]; Tred(a, eigenvalues, w, eigenvectors, n); //Eigen-decomposition of the tridiagonal matrix @@ -36,10 +35,10 @@ public static void EigenDecomposition(Float[] a, out Float[] eigenvalues, out Fl Imtql(eigenvalues, w, eigenvectors, n); for (int i = 0; i < n; i++) - eigenvalues[i] = eigenvalues[i] <= 0 ? (Float)(0.0) : (Float)Math.Sqrt(eigenvalues[i]); + eigenvalues[i] = eigenvalues[i] <= 0 ? (float)(0.0) : (float)Math.Sqrt(eigenvalues[i]); } - private static Float Hypot(Float x, Float y) + private static float Hypot(float x, float y) { x = Math.Abs(x); y = Math.Abs(y); @@ -49,23 +48,23 @@ private static Float Hypot(Float x, Float y) if (x < y) { - Float t = x / y; - return y * (Float)Math.Sqrt(1 + t * t); + float t = x / y; + return y * (float)Math.Sqrt(1 + t * t); } else { - Float t = y / x; - return x * (Float)Math.Sqrt(1 + t * t); + float t = y / x; + return x * (float)Math.Sqrt(1 + t * t); } } - private static Float CopySign(Float x, Float y) + private static float CopySign(float x, float y) { - Float xx = Math.Abs(x); + float xx = Math.Abs(x); return y < 0 ? -xx : xx; } - private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) + private static void Tred(float[] a, float[] d, float[] e, float[] z, int n) { Double g; Double h; @@ -74,7 +73,7 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) int k; int l; - /* this subroutine reduces a Float symmetric matrix to a */ + /* this subroutine reduces a float symmetric matrix to a */ /* symmetric tridiagonal matrix using and accumulating */ /* orthogonal similarity transformations. */ @@ -82,7 +81,7 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) /* n is the order of the matrix. */ - /* a contains the Float symmetric input matrix. only the */ + /* a contains the float symmetric input matrix. only the */ /* lower triangle of the matrix need be supplied. */ /* on output */ @@ -126,7 +125,7 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) d[0] = z[0]; z[1] = 0; z[n] = 0; - d[1] = (Float)h; + d[1] = (float)h; continue; } // .......... scale row .......... @@ -145,21 +144,21 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) z[i + j * n] = 0; z[j + i * n] = 0; } - d[i] = (Float)h; + d[i] = (float)h; continue; } for (k = 0; k < i; ++k) { - d[k] = (Float) (d[k]/scale); + d[k] = (float) (d[k]/scale); h += d[k] * d[k]; } Double f = d[l]; - g = CopySign((Float)Math.Sqrt(h), (Float)f); - e[i] = (Float)(scale * g); + g = CopySign((float)Math.Sqrt(h), (float)f); + e[i] = (float)(scale * g); h -= f * g; - d[l] = (Float)(f - g); + d[l] = (float)(f - g); // .......... form a*u .......... for (j = 0; j < i; ++j) { @@ -169,28 +168,28 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) for (j = 0; j < i; ++j) { f = d[j]; - z[j + i * n] = (Float)f; + z[j + i * n] = (float)f; g = e[j] + z[j + j * n] * f; if (j + 1 == i) { - e[j] = (Float)g; + e[j] = (float)g; continue; } for (k = j + 1; k < i; ++k) { g += z[k + j * n] * d[k]; - e[k] = (Float)(e[k] + z[k + j * n] * f); + e[k] = (float)(e[k] + z[k + j * n] * f); } - e[j] = (Float)g; + e[j] = (float)g; } // .......... form p .......... f = 0; for (j = 0; j < i; ++j) { - e[j] = (Float)(e[j] / h); + e[j] = (float)(e[j] / h); f += e[j] * d[j]; } @@ -198,7 +197,7 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) // .......... form q .......... for (j = 0; j < i; ++j) { - e[j] = (Float)(e[j] - hh * d[j]); + e[j] = (float)(e[j] - hh * d[j]); } // .......... form reduced a .......... for (j = 0; j < i; ++j) @@ -208,14 +207,14 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) for (k = j; k < i; ++k) { - z[k + j * n] = (Float)(z[k + j * n] - f * e[k] - g * d[k]); + z[k + j * n] = (float)(z[k + j * n] - f * e[k] - g * d[k]); } d[j] = z[l + j * n]; z[i + j * n] = 0; } - d[i] = (Float)h; + d[i] = (float)h; } // .......... accumulation of transformation matrices .......... @@ -230,7 +229,7 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) { for (k = 0; k < i; ++k) { - d[k] = (Float)(z[k + i * n] / h); + d[k] = (float)(z[k + i * n] / h); } for (j = 0; j < i; ++j) @@ -244,7 +243,7 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) for (k = 0; k < i; ++k) { - z[k + j * n] = (Float)(z[k + j * n] - g * d[k]); + z[k + j * n] = (float)(z[k + j * n] - g * d[k]); } } } @@ -265,23 +264,23 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) } /* Tred */ /* Subroutine */ - private static int Imtql(Float[] d, Float[] e, Float[] z, int n) + private static int Imtql(float[] d, float[] e, float[] z, int n) { /* Local variables */ - Float b; - Float c; - Float f; - Float g; + float b; + float c; + float f; + float g; int i; int j; int k; int l; int m; - Float p; - Float r; - Float s; - Float tst1; - Float tst2; + float p; + float r; + float s; + float tst1; + float tst2; /* this subroutine is a translation of the algol procedure imtql2, */ /* num. math. 12, 377-383(1968) by martin and wilkinson, */ @@ -346,7 +345,7 @@ private static int Imtql(Float[] d, Float[] e, Float[] z, int n) { e[i - 1] = e[i]; } - e[n - 1] = (Float)(0.0); + e[n - 1] = (float)(0.0); for (l = 0; l < n; ++l) { @@ -369,12 +368,12 @@ private static int Imtql(Float[] d, Float[] e, Float[] z, int n) return l; } /* .......... form shift .......... */ - g = (d[l + 1] - p) / (e[l] * (Float)(2.0)); - r = Hypot(g, (Float)(1.0)); + g = (d[l + 1] - p) / (e[l] * (float)(2.0)); + r = Hypot(g, (float)(1.0)); g = d[m] - p + e[l] / (g + CopySign(r, g)); - s = (Float)(1.0); - c = (Float)(1.0); - p = (Float)(0.0); + s = (float)(1.0); + c = (float)(1.0); + p = (float)(0.0); /* .......... for i=m-1 step -1 until l do -- .......... */ for (i = m - 1; i >= l; i--) { @@ -382,7 +381,7 @@ private static int Imtql(Float[] d, Float[] e, Float[] z, int n) b = c * e[i]; r = Hypot(f, g); e[i + 1] = r; - if (r == (Float)(0.0)) + if (r == (float)(0.0)) { /* .......... recover from underflow .......... */ d[i + 1] -= p; @@ -392,7 +391,7 @@ private static int Imtql(Float[] d, Float[] e, Float[] z, int n) s = f / r; c = g / r; g = d[i + 1] - p; - r = (d[i] - g) * s + c * (Float)(2.0) * b; + r = (d[i] - g) * s + c * (float)(2.0) * b; p = s * r; d[i + 1] = g + p; g = c * r - b; @@ -404,11 +403,11 @@ private static int Imtql(Float[] d, Float[] e, Float[] z, int n) z[k + i * n] = c * z[k + i * n] - s * f; } } - if (r == (Float)(0.0) && i >= l) + if (r == (float)(0.0) && i >= l) continue; d[l] -= p; e[l] = g; - e[m] = (Float)(0.0); + e[m] = (float)(0.0); } } while (m != l); } diff --git a/src/Microsoft.ML.CpuMath/ICpuBuffer.cs b/src/Microsoft.ML.CpuMath/ICpuBuffer.cs index 3a3e76d6c6..5194a4bb42 100644 --- a/src/Microsoft.ML.CpuMath/ICpuBuffer.cs +++ b/src/Microsoft.ML.CpuMath/ICpuBuffer.cs @@ -5,7 +5,6 @@ using System; using System.Collections.Generic; using Microsoft.ML.Internal.CpuMath.Core; -using Float = System.Single; namespace Microsoft.ML.Internal.CpuMath { @@ -40,7 +39,7 @@ internal interface ICpuBuffer : IEnumerable, IDisposable /// A logical math vector. /// [BestFriend] - internal interface ICpuVector : ICpuBuffer + internal interface ICpuVector : ICpuBuffer { /// /// The vector size @@ -50,11 +49,11 @@ internal interface ICpuVector : ICpuBuffer /// /// Get the i'th component of the vector. /// - Float GetValue(int i); + float GetValue(int i); } [BestFriend] - internal interface ICpuMatrix : ICpuBuffer + internal interface ICpuMatrix : ICpuBuffer { /// /// The row count @@ -76,7 +75,7 @@ internal interface ICpuFullMatrix : ICpuMatrix /// /// Copy the values for the given row into dst, starting at slot ivDst. /// - void CopyTo(int row, Float[] dst, ref int ivDst); + void CopyTo(int row, float[] dst, ref int ivDst); /// /// Zero out the items with the given indices. diff --git a/src/Microsoft.ML.Data/Data/Combiner.cs b/src/Microsoft.ML.Data/Data/Combiner.cs index 88cde05bbe..4824409a6c 100644 --- a/src/Microsoft.ML.Data/Data/Combiner.cs +++ b/src/Microsoft.ML.Data/Data/Combiner.cs @@ -4,7 +4,6 @@ using System; using System.Threading; -using Float = System.Single; namespace Microsoft.ML.Data { @@ -43,7 +42,7 @@ public override void Combine(ref ReadOnlyMemory dst, ReadOnlyMemory } [BestFriend] - internal sealed class FloatAdder : Combiner + internal sealed class FloatAdder : Combiner { private static volatile FloatAdder _instance; public static FloatAdder Instance @@ -60,8 +59,8 @@ private FloatAdder() { } - public override bool IsDefault(Float value) { return value == 0; } - public override void Combine(ref Float dst, Float src) { dst += src; } + public override bool IsDefault(float value) { return value == 0; } + public override void Combine(ref float dst, float src) { dst += src; } } [BestFriend] diff --git a/src/Microsoft.ML.Data/Depricated/Vector/VBufferMathUtils.cs b/src/Microsoft.ML.Data/Depricated/Vector/VBufferMathUtils.cs index 52b8094fdb..2591b11b56 100644 --- a/src/Microsoft.ML.Data/Depricated/Vector/VBufferMathUtils.cs +++ b/src/Microsoft.ML.Data/Depricated/Vector/VBufferMathUtils.cs @@ -9,16 +9,13 @@ namespace Microsoft.ML.Numeric { - // REVIEW: Once we do the conversions from Vector/WritableVector, review names of methods, - // parameters, parameter order, etc. - using Float = System.Single; internal static partial class VectorUtils { /// /// Returns the L2 norm squared of the vector (sum of squares of the components). /// - public static Float NormSquared(in VBuffer a) + public static float NormSquared(in VBuffer a) { var aValues = a.GetValues(); if (aValues.Length == 0) @@ -29,7 +26,7 @@ public static Float NormSquared(in VBuffer a) /// /// Returns the L2 norm squared of the vector (sum of squares of the components). /// - public static Float NormSquared(ReadOnlySpan a) + public static float NormSquared(ReadOnlySpan a) { return CpuMathUtils.SumSq(a); } @@ -38,7 +35,7 @@ public static Float NormSquared(ReadOnlySpan a) /// Returns the L2 norm of the vector. /// /// L2 norm of the vector - public static Float Norm(in VBuffer a) + public static float Norm(in VBuffer a) { return MathUtils.Sqrt(NormSquared(in a)); } @@ -47,7 +44,7 @@ public static Float Norm(in VBuffer a) /// Returns the L1 norm of the vector. /// /// L1 norm of the vector - public static Float L1Norm(in VBuffer a) + public static float L1Norm(in VBuffer a) { var aValues = a.GetValues(); if (aValues.Length == 0) @@ -59,7 +56,7 @@ public static Float L1Norm(in VBuffer a) /// Returns the L-infinity norm of the vector (i.e., the maximum absolute value). /// /// L-infinity norm of the vector - public static Float MaxNorm(in VBuffer a) + public static float MaxNorm(in VBuffer a) { var aValues = a.GetValues(); if (aValues.Length == 0) @@ -70,7 +67,7 @@ public static Float MaxNorm(in VBuffer a) /// /// Returns the sum of elements in the vector. /// - public static Float Sum(in VBuffer a) + public static float Sum(in VBuffer a) { var aValues = a.GetValues(); if (aValues.Length == 0) @@ -83,7 +80,7 @@ public static Float Sum(in VBuffer a) /// /// Incoming vector /// Value to multiply vector with - public static void ScaleBy(ref VBuffer dst, Float c) + public static void ScaleBy(ref VBuffer dst, float c) { if (c == 1 || dst.GetValues().Length == 0) return; @@ -99,7 +96,7 @@ public static void ScaleBy(ref VBuffer dst, Float c) /// Scales the vector by a real value. /// = * /// - public static void ScaleBy(in VBuffer src, ref VBuffer dst, Float c) + public static void ScaleBy(in VBuffer src, ref VBuffer dst, float c) { int length = src.Length; var srcValues = src.GetValues(); @@ -138,7 +135,7 @@ public static void ScaleBy(in VBuffer src, ref VBuffer dst, Float /// /// Perform in-place vector addition += . /// - public static void Add(in VBuffer src, ref VBuffer dst) + public static void Add(in VBuffer src, ref VBuffer dst) { Contracts.Check(src.Length == dst.Length, "Vectors must have the same dimensionality."); @@ -156,7 +153,7 @@ public static void Add(in VBuffer src, ref VBuffer dst) return; } // REVIEW: Should we use SSE for any of these possibilities? - VBufferUtils.ApplyWith(in src, ref dst, (int i, Float v1, ref Float v2) => v2 += v1); + VBufferUtils.ApplyWith(in src, ref dst, (int i, float v1, ref float v2) => v2 += v1); } // REVIEW: Rename all instances of AddMult to AddScale, as soon as convesion concerns are no more. @@ -166,7 +163,7 @@ public static void Add(in VBuffer src, ref VBuffer dst) /// If either vector is dense, will be dense, unless /// is 0 in which case this method does nothing. /// - public static void AddMult(in VBuffer src, Float c, ref VBuffer dst) + public static void AddMult(in VBuffer src, float c, ref VBuffer dst) { Contracts.Check(src.Length == dst.Length, "Vectors must have the same dimensionality."); @@ -184,14 +181,14 @@ public static void AddMult(in VBuffer src, Float c, ref VBuffer ds return; } // REVIEW: Should we use SSE for any of these possibilities? - VBufferUtils.ApplyWith(in src, ref dst, (int i, Float v1, ref Float v2) => v2 += c * v1); + VBufferUtils.ApplyWith(in src, ref dst, (int i, float v1, ref float v2) => v2 += c * v1); } /// /// Perform scalar vector addition /// = * + /// - public static void AddMult(in VBuffer src, Float c, ref VBuffer dst, ref VBuffer res) + public static void AddMult(in VBuffer src, float c, ref VBuffer dst, ref VBuffer res) { Contracts.Check(src.Length == dst.Length, "Vectors must have the same dimensionality."); int length = src.Length; @@ -213,7 +210,7 @@ public static void AddMult(in VBuffer src, Float c, ref VBuffer ds return; } - VBufferUtils.ApplyWithCopy(in src, ref dst, ref res, (int i, Float v1, Float v2, ref Float v3) => v3 = v2 + c * v1); + VBufferUtils.ApplyWithCopy(in src, ref dst, ref res, (int i, float v1, float v2, ref float v3) => v3 = v2 + c * v1); } /// @@ -221,7 +218,7 @@ public static void AddMult(in VBuffer src, Float c, ref VBuffer ds /// + * /// and store the result in . /// - public static void AddMultInto(in VBuffer a, Float c, in VBuffer b, ref VBuffer dst) + public static void AddMultInto(in VBuffer a, float c, in VBuffer b, ref VBuffer dst) { Contracts.Check(a.Length == b.Length, "Vectors must have the same dimensionality."); @@ -239,7 +236,7 @@ public static void AddMultInto(in VBuffer a, Float c, in VBuffer b /// except that this takes place in the section of starting /// at slot . /// - public static void AddMultWithOffset(in VBuffer src, Float c, ref VBuffer dst, int offset) + public static void AddMultWithOffset(in VBuffer src, float c, ref VBuffer dst, int offset) { Contracts.CheckParam(0 <= offset && offset <= dst.Length, nameof(offset)); Contracts.CheckParam(src.Length <= dst.Length - offset, nameof(offset)); @@ -380,7 +377,7 @@ public static void AddMultWithOffset(in VBuffer src, Float c, ref VBuffer /// is sparse, will have a count of zero, instead of the /// same count as . /// - public static void ScaleInto(in VBuffer src, Float c, ref VBuffer dst) + public static void ScaleInto(in VBuffer src, float c, ref VBuffer dst) { // REVIEW: The analogous WritableVector method insisted on // equal lengths, but I assume I don't care here. @@ -407,7 +404,7 @@ public static void ScaleInto(in VBuffer src, Float c, ref VBuffer VBufferUtils.ApplyIntoEitherDefined(in src, ref dst, (i, v) => c * v); } - public static int ArgMax(in VBuffer src) + public static int ArgMax(in VBuffer src) { if (src.Length == 0) return -1; @@ -443,7 +440,7 @@ public static int ArgMax(in VBuffer src) return ind; } - public static int ArgMin(in VBuffer src) + public static int ArgMin(in VBuffer src) { if (src.Length == 0) return -1; diff --git a/test/Microsoft.ML.Core.Tests/UnitTests/TestLoss.cs b/test/Microsoft.ML.Core.Tests/UnitTests/TestLoss.cs index 8a10eb6051..4daa225d92 100644 --- a/test/Microsoft.ML.Core.Tests/UnitTests/TestLoss.cs +++ b/test/Microsoft.ML.Core.Tests/UnitTests/TestLoss.cs @@ -4,7 +4,7 @@ using System; using Xunit; -using Float = System.Single; + namespace Microsoft.ML.RunTests { /// @@ -14,7 +14,7 @@ public class TestLoss { private const string _category = "Loss"; - private const Float _epsilon = 1e-4f; + private const float _epsilon = 1e-4f; /// /// A small helper for comparing a loss's computations to expected values. @@ -30,8 +30,8 @@ public class TestLoss /// w.r.t. the output in the vicinity of the output value private void TestHelper(IScalarOutputLoss lossFunc, double label, double output, double expectedLoss, double expectedUpdate, bool differentiable = true) { - Double loss = lossFunc.Loss((Float)output, (Float)label); - Float derivative = lossFunc.Derivative((Float)output, (Float)label); + Double loss = lossFunc.Loss((float)output, (float)label); + float derivative = lossFunc.Derivative((float)output, (float)label); Assert.Equal(expectedLoss, loss, 5); Assert.Equal(expectedUpdate, -derivative, 5); @@ -39,8 +39,8 @@ private void TestHelper(IScalarOutputLoss lossFunc, double label, double output, { // In principle, the update should be the negative of the first derivative of the loss. // Use a simple finite difference method to see if it's in the right ballpark. - Float almostOutput = Math.Max((Float)output * (1 + _epsilon), (Float)output + _epsilon); - Double almostLoss = lossFunc.Loss(almostOutput, (Float)label); + float almostOutput = Math.Max((float)output * (1 + _epsilon), (float)output + _epsilon); + Double almostLoss = lossFunc.Loss(almostOutput, (float)label); Assert.Equal((almostLoss - loss) / (almostOutput - output), derivative, 1); } } From 971bf3281badca55adc4c49bf30ae8793bacd2cb Mon Sep 17 00:00:00 2001 From: J W Date: Tue, 26 Feb 2019 21:47:03 -0500 Subject: [PATCH 3/6] Remove Float from files --- .../Depricated/Vector/VectorUtils.cs | 73 +++++++++---------- .../Dirty/PredictionUtils.cs | 4 +- .../MultiOutputRegressionEvaluator.cs | 48 ++++++------ .../Evaluators/QuantileRegressionEvaluator.cs | 27 ++++--- .../Evaluators/RegressionEvaluator.cs | 19 +++-- 5 files changed, 82 insertions(+), 89 deletions(-) diff --git a/src/Microsoft.ML.Data/Depricated/Vector/VectorUtils.cs b/src/Microsoft.ML.Data/Depricated/Vector/VectorUtils.cs index bb8b6f69b2..e43cb95406 100644 --- a/src/Microsoft.ML.Data/Depricated/Vector/VectorUtils.cs +++ b/src/Microsoft.ML.Data/Depricated/Vector/VectorUtils.cs @@ -7,7 +7,6 @@ using Microsoft.ML.Data; using Microsoft.ML.Internal.CpuMath; using Microsoft.ML.Internal.Utilities; -using Float = System.Single; namespace Microsoft.ML.Numeric { @@ -20,14 +19,14 @@ namespace Microsoft.ML.Numeric [BestFriend] internal static partial class VectorUtils { - public static Float DotProduct(Float[] a, Float[] b) + public static float DotProduct(float[] a, float[] b) { Contracts.Check(Utils.Size(a) == Utils.Size(b), "Arrays must have the same length"); Contracts.Check(Utils.Size(a) > 0); return CpuMathUtils.DotProductDense(a, b, a.Length); } - public static Float DotProduct(Float[] a, in VBuffer b) + public static float DotProduct(float[] a, in VBuffer b) { Contracts.Check(Utils.Size(a) == b.Length, "Vectors must have the same dimensionality."); var bValues = b.GetValues(); @@ -38,7 +37,7 @@ public static Float DotProduct(Float[] a, in VBuffer b) return CpuMathUtils.DotProductSparse(a, bValues, b.GetIndices(), bValues.Length); } - public static Float DotProduct(in VBuffer a, in VBuffer b) + public static float DotProduct(in VBuffer a, in VBuffer b) { Contracts.Check(a.Length == b.Length, "Vectors must have the same dimensionality."); @@ -67,15 +66,15 @@ public static Float DotProduct(in VBuffer a, in VBuffer b) /// How many bottom (negative) elements to preserve after sparsification. /// Whether to normalize results to [-1,1] range. /// - public static void SparsifyNormalize(ref VBuffer a, int top, int bottom, bool normalize) + public static void SparsifyNormalize(ref VBuffer a, int top, int bottom, bool normalize) { Contracts.CheckParam(top >= 0, nameof(top), "Top count needs to be non-negative"); Contracts.CheckParam(bottom >= 0, nameof(bottom), "Bottom count needs to be non-negative"); - Float absMax = 0; + float absMax = 0; // In the top heap, we pop the smallest values, so that the 'top' largest remain. - var topHeap = new Heap>((left, right) => right.Value < left.Value, top + 1); - var bottomHeap = new Heap>((left, right) => right.Value > left.Value, bottom + 1); + var topHeap = new Heap>((left, right) => right.Value < left.Value, top + 1); + var bottomHeap = new Heap>((left, right) => right.Value > left.Value, bottom + 1); bool isDense = a.IsDense; var aValues = a.GetValues(); @@ -157,7 +156,7 @@ public static void SparsifyNormalize(ref VBuffer a, int top, int bottom, /// /// Multiplies arrays Dst *= A element by element and returns the result in (Hadamard product). /// - public static void MulElementWise(in VBuffer a, ref VBuffer dst) + public static void MulElementWise(in VBuffer a, ref VBuffer dst) { Contracts.Check(a.Length == dst.Length, "Vectors must have the same dimensionality."); @@ -167,22 +166,22 @@ public static void MulElementWise(in VBuffer a, ref VBuffer dst) CpuMathUtils.MulElementWise(a.GetValues(), dst.GetValues(), editor.Values, a.Length); } else - VBufferUtils.ApplyWithEitherDefined(in a, ref dst, (int ind, Float v1, ref Float v2) => { v2 *= v1; }); + VBufferUtils.ApplyWithEitherDefined(in a, ref dst, (int ind, float v1, ref float v2) => { v2 *= v1; }); } - private static Float L2DistSquaredSparse(ReadOnlySpan valuesA, ReadOnlySpan indicesA, ReadOnlySpan valuesB, ReadOnlySpan indicesB) + private static float L2DistSquaredSparse(ReadOnlySpan valuesA, ReadOnlySpan indicesA, ReadOnlySpan valuesB, ReadOnlySpan indicesB) { Contracts.Assert(valuesA.Length == indicesA.Length); Contracts.Assert(valuesB.Length == indicesB.Length); - Float res = 0; + float res = 0; int ia = 0; int ib = 0; while (ia < indicesA.Length && ib < indicesB.Length) { int diff = indicesA[ia] - indicesB[ib]; - Float d; + float d; if (diff == 0) { d = valuesA[ia] - valuesB[ib]; @@ -219,7 +218,7 @@ private static Float L2DistSquaredSparse(ReadOnlySpan valuesA, ReadOnlySp return res; } - private static Float L2DistSquaredHalfSparse(ReadOnlySpan valuesA, ReadOnlySpan valuesB, ReadOnlySpan indicesB) + private static float L2DistSquaredHalfSparse(ReadOnlySpan valuesA, ReadOnlySpan valuesB, ReadOnlySpan indicesB) { var normA = CpuMathUtils.SumSq(valuesA); if (valuesB.Length == 0) @@ -230,7 +229,7 @@ private static Float L2DistSquaredHalfSparse(ReadOnlySpan valuesA, ReadOn return res < 0 ? 0 : res; } - private static Float L2DiffSquaredDense(ReadOnlySpan valuesA, ReadOnlySpan valuesB, int length) + private static float L2DiffSquaredDense(ReadOnlySpan valuesA, ReadOnlySpan valuesB, int length) { Contracts.Assert(0 <= length && length <= valuesA.Length); Contracts.Assert(0 <= length && length <= valuesB.Length); @@ -248,7 +247,7 @@ private static Float L2DiffSquaredDense(ReadOnlySpan valuesA, ReadOnlySpa /// the second array (given as a VBuffer) /// offset in 'a' /// the dot product - public static Float DotProductWithOffset(in VBuffer a, int offset, in VBuffer b) + public static float DotProductWithOffset(in VBuffer a, int offset, in VBuffer b) { Contracts.Check(0 <= offset && offset <= a.Length); Contracts.Check(b.Length <= a.Length - offset, "VBuffer b must be no longer than a.Length - offset."); @@ -265,7 +264,7 @@ public static Float DotProductWithOffset(in VBuffer a, int offset, in VBu } else { - Float result = 0; + float result = 0; var aIndices = a.GetIndices(); int aMin = Utils.FindIndexSorted(aIndices, 0, aIndices.Length, offset); int aLim = Utils.FindIndexSorted(aIndices, 0, aIndices.Length, offset + b.Length); @@ -300,7 +299,7 @@ public static Float DotProductWithOffset(in VBuffer a, int offset, in VBu /// the second array (given as a VBuffer) /// offset in 'a' /// the dot product - public static Float DotProductWithOffset(Float[] a, int offset, in VBuffer b) + public static float DotProductWithOffset(float[] a, int offset, in VBuffer b) { Contracts.Check(0 <= offset && offset <= a.Length); Contracts.Check(b.Length <= a.Length - offset, "VBuffer b must be no longer than a.Length - offset."); @@ -314,7 +313,7 @@ public static Float DotProductWithOffset(Float[] a, int offset, in VBuffer aValues, ReadOnlySpan aIndices, int ia, int iaLim, ReadOnlySpan bValues, ReadOnlySpan bIndices, int ib, int ibLim) + private static float DotProductSparse(ReadOnlySpan aValues, ReadOnlySpan aIndices, int ia, int iaLim, ReadOnlySpan bValues, ReadOnlySpan bIndices, int ib, int ibLim) { Contracts.AssertNonEmpty(aValues); Contracts.AssertNonEmpty(aIndices); @@ -323,7 +322,7 @@ private static Float DotProductSparse(ReadOnlySpan aValues, ReadOnlySpan< Contracts.Assert(0 <= ia && ia < iaLim && iaLim <= aIndices.Length); Contracts.Assert(0 <= ib && ib < ibLim && ibLim <= bIndices.Length); - Float res = 0; + float res = 0; // Do binary searches when the indices mismatch by more than this. const int thresh = 20; @@ -366,9 +365,9 @@ private static Float DotProductSparse(ReadOnlySpan aValues, ReadOnlySpan< /// one VBuffer /// another VBuffer /// L1 Distance from a to b - public static Float L1Distance(in VBuffer a, in VBuffer b) + public static float L1Distance(in VBuffer a, in VBuffer b) { - Float res = 0; + float res = 0; VBufferUtils.ForEachEitherDefined(in a, in b, (slot, val1, val2) => res += Math.Abs(val1 - val2)); return res; @@ -380,7 +379,7 @@ public static Float L1Distance(in VBuffer a, in VBuffer b) /// one VBuffer /// another VBuffer /// Distance from a to b - public static Float Distance(in VBuffer a, in VBuffer b) + public static float Distance(in VBuffer a, in VBuffer b) { return MathUtils.Sqrt(L2DistSquared(in a, in b)); } @@ -391,7 +390,7 @@ public static Float Distance(in VBuffer a, in VBuffer b) /// one VBuffer /// another VBuffer /// Distance from a to b - public static Float L2DistSquared(in VBuffer a, in VBuffer b) + public static float L2DistSquared(in VBuffer a, in VBuffer b) { Contracts.Check(a.Length == b.Length, "Vectors must have the same dimensionality."); if (a.IsDense) @@ -409,9 +408,9 @@ public static Float L2DistSquared(in VBuffer a, in VBuffer b) /// Given two vectors a and b, calculate their L2 distance squared (|a-b|^2). /// /// The first vector, given as an array - /// The second vector, given as a VBuffer{Float} + /// The second vector, given as a VBuffer{float} /// The squared L2 distance between a and b - public static Float L2DistSquared(Float[] a, in VBuffer b) + public static float L2DistSquared(float[] a, in VBuffer b) { Contracts.CheckValue(a, nameof(a)); Contracts.Check(Utils.Size(a) == b.Length, "Vectors must have the same dimensionality."); @@ -423,7 +422,7 @@ public static Float L2DistSquared(Float[] a, in VBuffer b) /// /// Perform in-place vector addition += . /// - public static void Add(Float[] src, Float[] dst) + public static void Add(float[] src, float[] dst) { Contracts.CheckValue(src, nameof(src)); Contracts.CheckValue(dst, nameof(dst)); @@ -434,12 +433,12 @@ public static void Add(Float[] src, Float[] dst) } /// - /// Adds a multiple of a to a array. + /// Adds a multiple of a to a array. /// /// Buffer to add /// Span to add to /// Coefficient - public static void AddMult(in VBuffer src, Span dst, Float c) + public static void AddMult(in VBuffer src, Span dst, float c) { Contracts.CheckParam(src.Length == dst.Length, nameof(dst), "Arrays must have the same dimensionality."); @@ -458,14 +457,14 @@ public static void AddMult(in VBuffer src, Span dst, Float c) } /// - /// Adds a multiple of a to a array, with an offset into the destination. + /// Adds a multiple of a to a array, with an offset into the destination. /// /// Buffer to add /// Array to add to /// The offset into at which to add /// Coefficient - public static void AddMultWithOffset(in VBuffer src, Float[] dst, int offset, Float c) + public static void AddMultWithOffset(in VBuffer src, float[] dst, int offset, float c) { Contracts.CheckValue(dst, nameof(dst)); Contracts.Check(0 <= offset && offset <= dst.Length); @@ -494,7 +493,7 @@ public static void AddMultWithOffset(in VBuffer src, Float[] dst, int off /// Array to add /// Array to add to /// Multiple - public static void AddMult(Float[] src, Float[] dst, Float c) + public static void AddMult(float[] src, float[] dst, float c) { Contracts.Check(src.Length == dst.Length, "Arrays must have the same dimensionality."); @@ -507,7 +506,7 @@ public static void AddMult(Float[] src, Float[] dst, Float c) /// /// Returns the L2 norm of the vector (sum of squares of the components). /// - public static Float Norm(Float[] a) + public static float Norm(float[] a) { return MathUtils.Sqrt(CpuMathUtils.SumSq(a)); } @@ -515,7 +514,7 @@ public static Float Norm(Float[] a) /// /// Returns sum of elements in array /// - public static Float Sum(Float[] a) + public static float Sum(float[] a) { if (a == null || a.Length == 0) return 0; @@ -527,7 +526,7 @@ public static Float Sum(Float[] a) /// /// The array /// Value to multiply vector with - public static void ScaleBy(Float[] dst, Float c) + public static void ScaleBy(float[] dst, float c) { if (c == 1) return; @@ -538,11 +537,11 @@ public static void ScaleBy(Float[] dst, Float c) Array.Clear(dst, 0, dst.Length); } - public static Float Distance(Float[] a, Float[] b) + public static float Distance(float[] a, float[] b) { Contracts.Check(a.Length == b.Length, "Arrays must have the same dimensionality."); - Float res = 0; + float res = 0; for (int i = 0; i < a.Length; i++) { var diff = a[i] - b[i]; diff --git a/src/Microsoft.ML.Data/Dirty/PredictionUtils.cs b/src/Microsoft.ML.Data/Dirty/PredictionUtils.cs index 6ade2b5e1d..f5ffc98082 100644 --- a/src/Microsoft.ML.Data/Dirty/PredictionUtils.cs +++ b/src/Microsoft.ML.Data/Dirty/PredictionUtils.cs @@ -11,8 +11,6 @@ namespace Microsoft.ML.Internal.Internallearn { - using Float = System.Single; - /// /// Various utilities /// @@ -92,7 +90,7 @@ public static string[] SplitOnSemis(string[] args) /// /// Make a string representation of an array /// - public static string Array2String(Float[] a, string sep) + public static string Array2String(float[] a, string sep) { StringBuilder sb = new StringBuilder(); if (a.Length == 0) diff --git a/src/Microsoft.ML.Data/Evaluators/MultiOutputRegressionEvaluator.cs b/src/Microsoft.ML.Data/Evaluators/MultiOutputRegressionEvaluator.cs index 554426b5eb..bf0af4bd1d 100644 --- a/src/Microsoft.ML.Data/Evaluators/MultiOutputRegressionEvaluator.cs +++ b/src/Microsoft.ML.Data/Evaluators/MultiOutputRegressionEvaluator.cs @@ -12,9 +12,7 @@ using Microsoft.ML.Data; using Microsoft.ML.EntryPoints; using Microsoft.ML.Internal.Utilities; -using Microsoft.ML.Model; using Microsoft.ML.Numeric; -using Float = System.Single; [assembly: LoadableClass(typeof(MultiOutputRegressionEvaluator), typeof(MultiOutputRegressionEvaluator), typeof(MultiOutputRegressionEvaluator.Arguments), typeof(SignatureEvaluator), "Multi Output Regression Evaluator", MultiOutputRegressionEvaluator.LoadName, "MultiOutputRegression", "MRE")] @@ -246,7 +244,7 @@ public Counters(IRegressionLoss lossFunction, int size) _fnLoss = new double[size]; } - public void Update(ReadOnlySpan score, ReadOnlySpan label, int length, Float weight) + public void Update(ReadOnlySpan score, ReadOnlySpan label, int length, float weight) { Contracts.Assert(length == _l1Loss.Length); Contracts.Assert(score.Length >= length); @@ -271,16 +269,16 @@ public void Update(ReadOnlySpan score, ReadOnlySpan label, int len } } - private ValueGetter> _labelGetter; - private ValueGetter> _scoreGetter; - private ValueGetter _weightGetter; + private ValueGetter> _labelGetter; + private ValueGetter> _scoreGetter; + private ValueGetter _weightGetter; private readonly int _size; - private VBuffer _label; - private VBuffer _score; - private readonly Float[] _labelArr; - private readonly Float[] _scoreArr; + private VBuffer _label; + private VBuffer _score; + private readonly float[] _labelArr; + private readonly float[] _scoreArr; public readonly Counters UnweightedCounters; public readonly Counters WeightedCounters; @@ -293,8 +291,8 @@ public Aggregator(IHostEnvironment env, IRegressionLoss lossFunction, int size, Host.Assert(size > 0); _size = size; - _labelArr = new Float[_size]; - _scoreArr = new Float[_size]; + _labelArr = new float[_size]; + _scoreArr = new float[_size]; UnweightedCounters = new Counters(lossFunction, _size); Weighted = weighted; WeightedCounters = Weighted ? new Counters(lossFunction, _size) : null; @@ -307,13 +305,13 @@ internal override void InitializeNextPass(DataViewRow row, RoleMappedSchema sche var score = schema.GetUniqueColumn(AnnotationUtils.Const.ScoreValueKind.Score); - _labelGetter = RowCursorUtils.GetVecGetterAs(NumberDataViewType.Single, row, schema.Label.Value.Index); - _scoreGetter = row.GetGetter>(score.Index); + _labelGetter = RowCursorUtils.GetVecGetterAs(NumberDataViewType.Single, row, schema.Label.Value.Index); + _scoreGetter = row.GetGetter>(score.Index); Contracts.AssertValue(_labelGetter); Contracts.AssertValue(_scoreGetter); if (schema.Weight.HasValue) - _weightGetter = row.GetGetter(schema.Weight.Value.Index); + _weightGetter = row.GetGetter(schema.Weight.Value.Index); } public override void ProcessRow() @@ -329,7 +327,7 @@ public override void ProcessRow() return; } - Float weight = 1; + float weight = 1; if (_weightGetter != null) { _weightGetter(ref weight); @@ -466,15 +464,15 @@ private protected override Delegate[] CreateGettersCore(DataViewRow input, Func< disposer = null; long cachedPosition = -1; - var label = default(VBuffer); - var score = default(VBuffer); + var label = default(VBuffer); + var score = default(VBuffer); - ValueGetter> nullGetter = (ref VBuffer vec) => vec = default(VBuffer); + ValueGetter> nullGetter = (ref VBuffer vec) => vec = default(VBuffer); var labelGetter = activeCols(LabelOutput) || activeCols(L1Output) || activeCols(L2Output) || activeCols(DistCol) - ? RowCursorUtils.GetVecGetterAs(NumberDataViewType.Single, input, LabelIndex) + ? RowCursorUtils.GetVecGetterAs(NumberDataViewType.Single, input, LabelIndex) : nullGetter; var scoreGetter = activeCols(ScoreOutput) || activeCols(L1Output) || activeCols(L2Output) || activeCols(DistCol) - ? input.GetGetter>(ScoreIndex) + ? input.GetGetter>(ScoreIndex) : nullGetter; Action updateCacheIfNeeded = () => @@ -490,8 +488,8 @@ private protected override Delegate[] CreateGettersCore(DataViewRow input, Func< var getters = new Delegate[5]; if (activeCols(LabelOutput)) { - ValueGetter> labelFn = - (ref VBuffer dst) => + ValueGetter> labelFn = + (ref VBuffer dst) => { updateCacheIfNeeded(); label.CopyTo(ref dst); @@ -500,8 +498,8 @@ private protected override Delegate[] CreateGettersCore(DataViewRow input, Func< } if (activeCols(ScoreOutput)) { - ValueGetter> scoreFn = - (ref VBuffer dst) => + ValueGetter> scoreFn = + (ref VBuffer dst) => { updateCacheIfNeeded(); score.CopyTo(ref dst); diff --git a/src/Microsoft.ML.Data/Evaluators/QuantileRegressionEvaluator.cs b/src/Microsoft.ML.Data/Evaluators/QuantileRegressionEvaluator.cs index 137af482d7..7287a346c5 100644 --- a/src/Microsoft.ML.Data/Evaluators/QuantileRegressionEvaluator.cs +++ b/src/Microsoft.ML.Data/Evaluators/QuantileRegressionEvaluator.cs @@ -11,7 +11,6 @@ using Microsoft.ML.EntryPoints; using Microsoft.ML.Internal.Utilities; using Microsoft.ML.Model; -using Float = System.Single; [assembly: LoadableClass(typeof(QuantileRegressionEvaluator), typeof(QuantileRegressionEvaluator), typeof(QuantileRegressionEvaluator.Arguments), typeof(SignatureEvaluator), "Quantile Regression Evaluator", QuantileRegressionEvaluator.LoadName, "QuantileRegression")] @@ -27,7 +26,7 @@ namespace Microsoft.ML.Data { [BestFriend] internal sealed class QuantileRegressionEvaluator : - RegressionEvaluatorBase, VBuffer> + RegressionEvaluatorBase, VBuffer> { public sealed class Arguments : ArgumentsBase { @@ -130,13 +129,13 @@ public Counters(int size) TotalLoss = VBufferUtils.CreateDense(size); } - protected override void UpdateCore(Float label, in VBuffer score, in VBuffer loss, Float weight) + protected override void UpdateCore(float label, in VBuffer score, in VBuffer loss, float weight) { AddL1AndL2Loss(label, in score, weight); AddCustomLoss(weight, in loss); } - private void AddL1AndL2Loss(Float label, in VBuffer score, Float weight) + private void AddL1AndL2Loss(float label, in VBuffer score, float weight) { Contracts.Check(score.Length == TotalL1Loss.Length, "Vectors must have the same dimensionality."); @@ -168,7 +167,7 @@ private void AddL1AndL2Loss(Float label, in VBuffer score, Float weight) } } - private void AddCustomLoss(Float weight, in VBuffer loss) + private void AddCustomLoss(float weight, in VBuffer loss) { Contracts.Check(loss.Length == TotalL1Loss.Length, "Vectors must have the same dimensionality."); @@ -233,12 +232,12 @@ public Aggregator(IHostEnvironment env, IRegressionLoss lossFunction, bool weigh protected override void ApplyLossFunction(in VBuffer score, float label, ref VBuffer loss) { - VBufferUtils.PairManipulator lossFn = - (int slot, Float src, ref Double dst) => dst = LossFunction.Loss(src, label); + VBufferUtils.PairManipulator lossFn = + (int slot, float src, ref Double dst) => dst = LossFunction.Loss(src, label); VBufferUtils.ApplyWith(in score, ref loss, lossFn); } - protected override bool IsNaN(in VBuffer score) + protected override bool IsNaN(in VBuffer score) { return VBufferUtils.HasNaNs(in score); } @@ -386,17 +385,17 @@ private protected override Delegate[] CreateGettersCore(DataViewRow input, Func< disposer = null; long cachedPosition = -1; - Float label = 0; - var score = default(VBuffer); + float label = 0; + var score = default(VBuffer); var l1 = VBufferUtils.CreateDense(_scoreSize); - ValueGetter nanGetter = (ref Float value) => value = Single.NaN; + ValueGetter nanGetter = (ref float value) => value = Single.NaN; var labelGetter = activeCols(L1Col) || activeCols(L2Col) ? RowCursorUtils.GetLabelGetter(input, LabelIndex) : nanGetter; - ValueGetter> scoreGetter; + ValueGetter> scoreGetter; if (activeCols(L1Col) || activeCols(L2Col)) - scoreGetter = input.GetGetter>(ScoreIndex); + scoreGetter = input.GetGetter>(ScoreIndex); else - scoreGetter = (ref VBuffer dst) => dst = default(VBuffer); + scoreGetter = (ref VBuffer dst) => dst = default(VBuffer); Action updateCacheIfNeeded = () => { diff --git a/src/Microsoft.ML.Data/Evaluators/RegressionEvaluator.cs b/src/Microsoft.ML.Data/Evaluators/RegressionEvaluator.cs index cfc59f5edd..c329cc66d1 100644 --- a/src/Microsoft.ML.Data/Evaluators/RegressionEvaluator.cs +++ b/src/Microsoft.ML.Data/Evaluators/RegressionEvaluator.cs @@ -10,7 +10,6 @@ using Microsoft.ML.Data; using Microsoft.ML.EntryPoints; using Microsoft.ML.Model; -using Float = System.Single; [assembly: LoadableClass(typeof(RegressionEvaluator), typeof(RegressionEvaluator), typeof(RegressionEvaluator.Arguments), typeof(SignatureEvaluator), "Regression Evaluator", RegressionEvaluator.LoadName, "Regression")] @@ -26,7 +25,7 @@ namespace Microsoft.ML.Data { [BestFriend] internal sealed class RegressionEvaluator : - RegressionEvaluatorBase + RegressionEvaluatorBase { public sealed class Arguments : ArgumentsBase { @@ -107,7 +106,7 @@ public override double RSquared } } - protected override void UpdateCore(Float label, in float score, in double loss, Float weight) + protected override void UpdateCore(float label, in float score, in double loss, float weight) { Double currL1Loss = Math.Abs((Double)label - score); TotalL1Loss += currL1Loss * weight; @@ -145,9 +144,9 @@ protected override void ApplyLossFunction(in float score, float label, ref doubl loss = LossFunction.Loss(score, label); } - protected override bool IsNaN(in Float score) + protected override bool IsNaN(in float score) { - return Float.IsNaN(score); + return float.IsNaN(score); } public override void AddColumn(ArrayDataViewBuilder dvBldr, string metricName, params double[] metric) @@ -267,14 +266,14 @@ private protected override Delegate[] CreateGettersCore(DataViewRow input, Func< disposer = null; long cachedPosition = -1; - Float label = 0; - Float score = 0; + float label = 0; + float score = 0; - ValueGetter nan = (ref Float value) => value = Single.NaN; + ValueGetter nan = (ref float value) => value = Single.NaN; var labelGetter = activeCols(L1Col) || activeCols(L2Col) ? RowCursorUtils.GetLabelGetter(input, LabelIndex) : nan; - ValueGetter scoreGetter; + ValueGetter scoreGetter; if (activeCols(L1Col) || activeCols(L2Col)) - scoreGetter = input.GetGetter(ScoreIndex); + scoreGetter = input.GetGetter(ScoreIndex); else scoreGetter = nan; Action updateCacheIfNeeded = From 5573595fe4bd8225409991f3d368a7d6b41a3538 Mon Sep 17 00:00:00 2001 From: J W Date: Tue, 26 Feb 2019 21:49:50 -0500 Subject: [PATCH 4/6] Remove Float from more files --- .../Scorers/BinaryClassifierScorer.cs | 28 +++++++++---------- .../Scorers/ClusteringScorer.cs | 9 +++--- .../Scorers/MultiClassClassifierScorer.cs | 9 +++--- .../Scorers/PredictedLabelScorerBase.cs | 4 +-- 4 files changed, 22 insertions(+), 28 deletions(-) diff --git a/src/Microsoft.ML.Data/Scorers/BinaryClassifierScorer.cs b/src/Microsoft.ML.Data/Scorers/BinaryClassifierScorer.cs index 7a4162f298..5f943f1afd 100644 --- a/src/Microsoft.ML.Data/Scorers/BinaryClassifierScorer.cs +++ b/src/Microsoft.ML.Data/Scorers/BinaryClassifierScorer.cs @@ -7,11 +7,9 @@ using Microsoft.ML; using Microsoft.ML.Data; using Microsoft.ML.Internal.Utilities; -using Microsoft.ML.Model; using Microsoft.ML.Model.OnnxConverter; using Microsoft.ML.Model.Pfa; using Newtonsoft.Json.Linq; -using Float = System.Single; [assembly: LoadableClass(typeof(BinaryClassifierScorer), typeof(BinaryClassifierScorer.Arguments), typeof(SignatureDataScorer), "Binary Classifier Scorer", "BinaryClassifierScorer", "BinaryClassifier", "Binary", @@ -46,7 +44,7 @@ private static VersionInfo GetVersionInfo() private const string RegistrationName = "BinaryClassifierScore"; - private readonly Float _threshold; + private readonly float _threshold; /// /// This function performs a number of checks on the inputs and, if appropriate and possible, will produce @@ -148,11 +146,11 @@ private BinaryClassifierScorer(IHost host, ModelLoadContext ctx, IDataView input // *** Binary format *** // - // int: sizeof(Float) - // Float: threshold + // int: sizeof(float) + // float: threshold int cbFloat = ctx.Reader.ReadInt32(); - Contracts.CheckDecode(cbFloat == sizeof(Float)); + Contracts.CheckDecode(cbFloat == sizeof(float)); _threshold = ctx.Reader.ReadFloat(); } @@ -174,11 +172,11 @@ private protected override void SaveCore(ModelSaveContext ctx) // *** Binary format *** // - // int: sizeof(Float) - // Float: threshold + // int: sizeof(float) + // float: threshold base.SaveCore(ctx); - ctx.Writer.Write(sizeof(Float)); + ctx.Writer.Write(sizeof(float)); ctx.Writer.Write(_threshold); } @@ -224,13 +222,13 @@ protected override Delegate GetPredictedLabelGetter(DataViewRow output, out Dele Host.Assert(output.Schema == Bindings.RowMapper.OutputSchema); Host.Assert(output.IsColumnActive(Bindings.ScoreColumnIndex)); - ValueGetter mapperScoreGetter = output.GetGetter(Bindings.ScoreColumnIndex); + ValueGetter mapperScoreGetter = output.GetGetter(Bindings.ScoreColumnIndex); long cachedPosition = -1; - Float score = 0; + float score = 0; - ValueGetter scoreFn = - (ref Float dst) => + ValueGetter scoreFn = + (ref float dst) => { EnsureCachedPosition(ref cachedPosition, ref score, output, mapperScoreGetter); dst = score; @@ -257,13 +255,13 @@ protected override Delegate GetPredictedLabelGetter(DataViewRow output, out Dele return predFn; } - private void GetPredictedLabelCore(Float score, ref bool value) + private void GetPredictedLabelCore(float score, ref bool value) { //Behavior for NA values is undefined. value = score > _threshold; } - private void GetPredictedLabelCoreAsKey(Float score, ref uint value) + private void GetPredictedLabelCoreAsKey(float score, ref uint value) { value = (uint)(score > _threshold ? 2 : score <= _threshold ? 1 : 0); } diff --git a/src/Microsoft.ML.Data/Scorers/ClusteringScorer.cs b/src/Microsoft.ML.Data/Scorers/ClusteringScorer.cs index ff05f1427c..a6bdfc289a 100644 --- a/src/Microsoft.ML.Data/Scorers/ClusteringScorer.cs +++ b/src/Microsoft.ML.Data/Scorers/ClusteringScorer.cs @@ -11,7 +11,6 @@ using Microsoft.ML.Model.Pfa; using Microsoft.ML.Numeric; using Newtonsoft.Json.Linq; -using Float = System.Single; [assembly: LoadableClass(typeof(ClusteringScorer), typeof(ClusteringScorer.Arguments), typeof(SignatureDataScorer), "Clustering Scorer", "ClusteringScorer", AnnotationUtils.Const.ScoreColumnKind.Clustering)] @@ -98,10 +97,10 @@ protected override Delegate GetPredictedLabelGetter(DataViewRow output, out Dele Contracts.Assert(output.Schema == Bindings.RowMapper.OutputSchema); Contracts.Assert(output.IsColumnActive(Bindings.ScoreColumnIndex)); - ValueGetter> mapperScoreGetter = output.GetGetter>(Bindings.ScoreColumnIndex); + ValueGetter> mapperScoreGetter = output.GetGetter>(Bindings.ScoreColumnIndex); long cachedPosition = -1; - VBuffer score = default(VBuffer); + VBuffer score = default(VBuffer); int keyCount = Bindings.PredColType is KeyType key ? key.GetCountAsInt32(Host) : 0; int scoreLength = keyCount; @@ -116,8 +115,8 @@ protected override Delegate GetPredictedLabelGetter(DataViewRow output, out Dele else dst = (uint)index + 1; }; - ValueGetter> scoreFn = - (ref VBuffer dst) => + ValueGetter> scoreFn = + (ref VBuffer dst) => { EnsureCachedPosition(ref cachedPosition, ref score, output, mapperScoreGetter); Contracts.Check(score.Length == scoreLength); diff --git a/src/Microsoft.ML.Data/Scorers/MultiClassClassifierScorer.cs b/src/Microsoft.ML.Data/Scorers/MultiClassClassifierScorer.cs index 8c8976900e..c1db01f626 100644 --- a/src/Microsoft.ML.Data/Scorers/MultiClassClassifierScorer.cs +++ b/src/Microsoft.ML.Data/Scorers/MultiClassClassifierScorer.cs @@ -15,7 +15,6 @@ using Microsoft.ML.Model.Pfa; using Microsoft.ML.Numeric; using Newtonsoft.Json.Linq; -using Float = System.Single; [assembly: LoadableClass(typeof(MultiClassClassifierScorer), typeof(MultiClassClassifierScorer.Arguments), typeof(SignatureDataScorer), @@ -502,10 +501,10 @@ protected override Delegate GetPredictedLabelGetter(DataViewRow output, out Dele Host.Assert(output.Schema == Bindings.RowMapper.OutputSchema); Host.Assert(output.IsColumnActive(Bindings.ScoreColumnIndex)); - ValueGetter> mapperScoreGetter = output.GetGetter>(Bindings.ScoreColumnIndex); + ValueGetter> mapperScoreGetter = output.GetGetter>(Bindings.ScoreColumnIndex); long cachedPosition = -1; - VBuffer score = default; + VBuffer score = default; int scoreLength = Bindings.PredColType.GetKeyCountAsInt32(Host); ValueGetter predFn = @@ -519,8 +518,8 @@ protected override Delegate GetPredictedLabelGetter(DataViewRow output, out Dele else dst = (uint)index + 1; }; - ValueGetter> scoreFn = - (ref VBuffer dst) => + ValueGetter> scoreFn = + (ref VBuffer dst) => { EnsureCachedPosition(ref cachedPosition, ref score, output, mapperScoreGetter); Host.Check(score.Length == scoreLength); diff --git a/src/Microsoft.ML.Data/Scorers/PredictedLabelScorerBase.cs b/src/Microsoft.ML.Data/Scorers/PredictedLabelScorerBase.cs index 86714d69a9..5708ca7ceb 100644 --- a/src/Microsoft.ML.Data/Scorers/PredictedLabelScorerBase.cs +++ b/src/Microsoft.ML.Data/Scorers/PredictedLabelScorerBase.cs @@ -7,11 +7,9 @@ using Microsoft.Data.DataView; using Microsoft.ML.CommandLine; using Microsoft.ML.Internal.Utilities; -using Microsoft.ML.Model; using Microsoft.ML.Model.OnnxConverter; using Microsoft.ML.Model.Pfa; using Newtonsoft.Json.Linq; -using Float = System.Single; namespace Microsoft.ML.Data { @@ -24,7 +22,7 @@ internal abstract class PredictedLabelScorerBase : RowToRowScorerBase, ITransfor public abstract class ThresholdArgumentsBase : ScorerArgumentsBase { [Argument(ArgumentType.AtMostOnce, HelpText = "Value for classification thresholding", ShortName = "t")] - public Float Threshold; + public float Threshold; [Argument(ArgumentType.AtMostOnce, HelpText = "Specify which predictor output to use for classification thresholding", ShortName = "tcol")] public string ThresholdColumn = AnnotationUtils.Const.ScoreValueKind.Score; From 5183c1d997f1069d87f97954b0b36456c4ecdaea Mon Sep 17 00:00:00 2001 From: J W Date: Tue, 26 Feb 2019 23:09:09 -0500 Subject: [PATCH 5/6] Change other Floats to floats --- .../Scorers/SchemaBindablePredictorWrapper.cs | 43 +++-- .../Transforms/GenerateNumberTransform.cs | 19 +- .../Transforms/LabelConvertTransform.cs | 6 +- src/Microsoft.ML.Data/Transforms/NAFilter.cs | 4 +- .../Transforms/RangeFilter.cs | 6 +- src/Microsoft.ML.Data/Utils/LossFunctions.cs | 123 ++++++------- src/Microsoft.ML.FastTree/BoostingFastTree.cs | 6 +- src/Microsoft.ML.FastTree/FastTree.cs | 93 +++++----- .../Training/EarlyStoppingCriteria.cs | 57 +++--- .../InternalQuantileRegressionTree.cs | 11 +- .../KMeansModelParameters.cs | 35 ++-- .../ResultProcessor.cs | 45 +++-- .../Optimizer/DifferentiableFunction.cs | 133 +++++++------- .../Optimizer/L1Optimizer.cs | 39 ++-- .../Optimizer/LineSearch.cs | 173 +++++++++--------- .../Optimizer/OptimizationMonitor.cs | 67 ++++--- .../Optimizer/Optimizer.cs | 105 ++++++----- .../Optimizer/SgdOptimizer.cs | 79 ++++---- .../Standard/LinearPredictorUtils.cs | 15 +- .../Standard/SdcaMultiClass.cs | 37 ++-- .../Algorithms/KdoSweeper.cs | 15 +- .../Algorithms/NelderMead.cs | 75 ++++---- .../Algorithms/SmacSweeper.cs | 21 +-- src/Microsoft.ML.Sweeper/ISweeper.cs | 13 +- src/Microsoft.ML.Sweeper/Parameters.cs | 61 +++--- src/Microsoft.ML.TimeSeries/EigenUtils.cs | 57 +++--- .../MissingValueIndicatorTransform.cs | 27 ++- .../Text/NgramUtils.cs | 7 +- .../TestPredictors.cs | 98 +++++----- .../DataPipe/TestDataPipe.cs | 67 ++++--- 30 files changed, 750 insertions(+), 787 deletions(-) diff --git a/src/Microsoft.ML.Data/Scorers/SchemaBindablePredictorWrapper.cs b/src/Microsoft.ML.Data/Scorers/SchemaBindablePredictorWrapper.cs index 2781c05fe8..1d1e16caba 100644 --- a/src/Microsoft.ML.Data/Scorers/SchemaBindablePredictorWrapper.cs +++ b/src/Microsoft.ML.Data/Scorers/SchemaBindablePredictorWrapper.cs @@ -16,7 +16,6 @@ using Microsoft.ML.Model.OnnxConverter; using Microsoft.ML.Model.Pfa; using Newtonsoft.Json.Linq; -using Float = System.Single; [assembly: LoadableClass(typeof(SchemaBindablePredictorWrapper), null, typeof(SignatureLoadModel), "Bindable Mapper", SchemaBindablePredictorWrapper.LoaderSignature)] @@ -440,7 +439,7 @@ private protected override bool SaveAsOnnxCore(OnnxContext ctx, RoleMappedSchema private void CheckValid(out IValueMapperDist distMapper) { - Contracts.Check(ScoreType == NumberDataViewType.Single, "Expected predictor result type to be Float"); + Contracts.Check(ScoreType == NumberDataViewType.Single, "Expected predictor result type to be float"); distMapper = Predictor as IValueMapperDist; if (distMapper == null) @@ -467,7 +466,7 @@ private protected override ISchemaBoundMapper BindCore(IChannel ch, RoleMappedSc /// /// The implementation for distribution predictor wrappers that produce - /// two Float-valued output columns. Note that the Bindable wrapper does input schema validation. + /// two float-valued output columns. Note that the Bindable wrapper does input schema validation. /// private sealed class CalibratedRowMapper : ISchemaBoundRowMapper { @@ -525,18 +524,18 @@ private Delegate[] CreateGetters(DataViewRow input, bool[] active) if (active[0] || active[1]) { // Put all captured locals at this scope. - var featureGetter = InputRoleMappedSchema.Feature?.Index is int idx ? input.GetGetter>(idx) : null; - Float prob = 0; - Float score = 0; + var featureGetter = InputRoleMappedSchema.Feature?.Index is int idx ? input.GetGetter>(idx) : null; + float prob = 0; + float score = 0; long cachedPosition = -1; - var features = default(VBuffer); - ValueMapper, Float, Float> mapper; + var features = default(VBuffer); + ValueMapper, float, float> mapper; - mapper = _parent._distMapper.GetMapper, Float, Float>(); + mapper = _parent._distMapper.GetMapper, float, float>(); if (active[0]) { - ValueGetter getScore = - (ref Float dst) => + ValueGetter getScore = + (ref float dst) => { EnsureCachedResultValueMapper(mapper, ref cachedPosition, featureGetter, ref features, ref score, ref prob, input); dst = score; @@ -545,8 +544,8 @@ private Delegate[] CreateGetters(DataViewRow input, bool[] active) } if (active[1]) { - ValueGetter getProb = - (ref Float dst) => + ValueGetter getProb = + (ref float dst) => { EnsureCachedResultValueMapper(mapper, ref cachedPosition, featureGetter, ref features, ref score, ref prob, input); dst = prob; @@ -557,9 +556,9 @@ private Delegate[] CreateGetters(DataViewRow input, bool[] active) return getters; } - private static void EnsureCachedResultValueMapper(ValueMapper, Float, Float> mapper, - ref long cachedPosition, ValueGetter> featureGetter, ref VBuffer features, - ref Float score, ref Float prob, DataViewRow input) + private static void EnsureCachedResultValueMapper(ValueMapper, float, float> mapper, + ref long cachedPosition, ValueGetter> featureGetter, ref VBuffer features, + ref float score, ref float prob, DataViewRow input) { Contracts.AssertValue(mapper); if (cachedPosition != input.Position) @@ -672,17 +671,17 @@ protected override Delegate GetPredictionGetter(DataViewRow input, int colSrc) typeSrc.Size == ValueMapper.InputType.GetVectorSize() || ValueMapper.InputType.GetVectorSize() == 0); Contracts.Assert(Utils.Size(_quantiles) > 0); - var featureGetter = input.GetGetter>(colSrc); + var featureGetter = input.GetGetter>(colSrc); var featureCount = ValueMapper != null ? ValueMapper.InputType.GetVectorSize() : 0; - var quantiles = new Float[_quantiles.Length]; + var quantiles = new float[_quantiles.Length]; for (int i = 0; i < quantiles.Length; i++) - quantiles[i] = (Float)_quantiles[i]; + quantiles[i] = (float)_quantiles[i]; var map = _qpred.GetMapper(quantiles); - var features = default(VBuffer); - ValueGetter> del = - (ref VBuffer value) => + var features = default(VBuffer); + ValueGetter> del = + (ref VBuffer value) => { featureGetter(ref features); Contracts.Check(features.Length == featureCount || featureCount == 0); diff --git a/src/Microsoft.ML.Data/Transforms/GenerateNumberTransform.cs b/src/Microsoft.ML.Data/Transforms/GenerateNumberTransform.cs index 91d798d2a7..1336b95e65 100644 --- a/src/Microsoft.ML.Data/Transforms/GenerateNumberTransform.cs +++ b/src/Microsoft.ML.Data/Transforms/GenerateNumberTransform.cs @@ -13,7 +13,6 @@ using Microsoft.ML.Internal.Utilities; using Microsoft.ML.Model; using Microsoft.ML.Transforms; -using Float = System.Single; [assembly: LoadableClass(GenerateNumberTransform.Summary, typeof(GenerateNumberTransform), typeof(GenerateNumberTransform.Options), typeof(SignatureDataTransform), GenerateNumberTransform.UserName, GenerateNumberTransform.LoadName, "GenerateNumber", GenerateNumberTransform.ShortName)] @@ -292,10 +291,10 @@ private GenerateNumberTransform(IHost host, ModelLoadContext ctx, IDataView inpu Host.AssertValue(ctx); // *** Binary format *** - // int: sizeof(Float) + // int: sizeof(float) // bindings int cbFloat = ctx.Reader.ReadInt32(); - Host.CheckDecode(cbFloat == sizeof(Float)); + Host.CheckDecode(cbFloat == sizeof(float)); _bindings = Bindings.Create(ctx, Source.Schema); } @@ -316,9 +315,9 @@ private protected override void SaveModel(ModelSaveContext ctx) ctx.SetVersionInfo(GetVersionInfo()); // *** Binary format *** - // int: sizeof(Float) + // int: sizeof(float) // bindings - ctx.Writer.Write(sizeof(Float)); + ctx.Writer.Write(sizeof(float)); _bindings.Save(ctx); } @@ -385,7 +384,7 @@ private sealed class Cursor : SynchronizedCursorBase private readonly Bindings _bindings; private readonly bool[] _active; private readonly Delegate[] _getters; - private readonly Float[] _values; + private readonly float[] _values; private readonly TauswortheHybrid[] _rngs; private readonly long[] _lastCounters; @@ -400,7 +399,7 @@ public Cursor(IChannelProvider provider, Bindings bindings, DataViewRowCursor in _active = active; var length = _bindings.InfoCount; _getters = new Delegate[length]; - _values = new Float[length]; + _values = new float[length]; _rngs = new TauswortheHybrid[length]; _lastCounters = new long[length]; for (int iinfo = 0; iinfo < length; iinfo++) @@ -447,7 +446,7 @@ private ValueGetter MakeGetter() }; } - private void EnsureValue(ref long lastCounter, ref Float value, TauswortheHybrid rng) + private void EnsureValue(ref long lastCounter, ref float value, TauswortheHybrid rng) { Ch.Assert(lastCounter <= Input.Position); while (lastCounter < Input.Position) @@ -457,9 +456,9 @@ private void EnsureValue(ref long lastCounter, ref Float value, TauswortheHybrid } } - private ValueGetter MakeGetter(int iinfo) + private ValueGetter MakeGetter(int iinfo) { - return (ref Float value) => + return (ref float value) => { Ch.Check(IsGood, RowCursorUtils.FetchValueStateError); Ch.Assert(!_bindings.UseCounter[iinfo]); diff --git a/src/Microsoft.ML.Data/Transforms/LabelConvertTransform.cs b/src/Microsoft.ML.Data/Transforms/LabelConvertTransform.cs index dd30b70bd0..4b49721bb3 100644 --- a/src/Microsoft.ML.Data/Transforms/LabelConvertTransform.cs +++ b/src/Microsoft.ML.Data/Transforms/LabelConvertTransform.cs @@ -10,9 +10,7 @@ using Microsoft.ML.CommandLine; using Microsoft.ML.Data; using Microsoft.ML.Internal.Utilities; -using Microsoft.ML.Model; using Microsoft.ML.Transforms; -using Float = System.Single; [assembly: LoadableClass(LabelConvertTransform.Summary, typeof(LabelConvertTransform), typeof(LabelConvertTransform.Arguments), typeof(SignatureDataTransform), "", "LabelConvert", "LabelConvertTransform")] @@ -116,7 +114,7 @@ public static LabelConvertTransform Create(IHostEnvironment env, ModelLoadContex // int: sizeof(Float) // int cbFloat = ctx.Reader.ReadInt32(); - h.CheckDecode(cbFloat == sizeof(Float)); + h.CheckDecode(cbFloat == sizeof(float)); return new LabelConvertTransform(h, ctx, input); }); } @@ -131,7 +129,7 @@ private protected override void SaveModel(ModelSaveContext ctx) // int: sizeof(Float) // Host.AssertNonEmpty(Infos); - ctx.Writer.Write(sizeof(Float)); + ctx.Writer.Write(sizeof(float)); SaveBase(ctx); } diff --git a/src/Microsoft.ML.Data/Transforms/NAFilter.cs b/src/Microsoft.ML.Data/Transforms/NAFilter.cs index 725f37aa3d..bf8a394855 100644 --- a/src/Microsoft.ML.Data/Transforms/NAFilter.cs +++ b/src/Microsoft.ML.Data/Transforms/NAFilter.cs @@ -13,9 +13,7 @@ using Microsoft.ML.Data; using Microsoft.ML.EntryPoints; using Microsoft.ML.Internal.Utilities; -using Microsoft.ML.Model; using Microsoft.ML.Transforms; -using Float = System.Single; [assembly: LoadableClass(NAFilter.Summary, typeof(NAFilter), typeof(NAFilter.Arguments), typeof(SignatureDataTransform), NAFilter.FriendlyName, NAFilter.ShortName, "MissingValueFilter", "MissingFilter")] @@ -179,7 +177,7 @@ private protected override void SaveModel(ModelSaveContext ctx) // int: sizeof(Float) // int: number of columns // int[]: ids of column names - ctx.Writer.Write(sizeof(Float)); + ctx.Writer.Write(sizeof(float)); Host.Assert(_infos.Length > 0); ctx.Writer.Write(_infos.Length); foreach (var info in _infos) diff --git a/src/Microsoft.ML.Data/Transforms/RangeFilter.cs b/src/Microsoft.ML.Data/Transforms/RangeFilter.cs index fb08fc34d2..10a7f4d106 100644 --- a/src/Microsoft.ML.Data/Transforms/RangeFilter.cs +++ b/src/Microsoft.ML.Data/Transforms/RangeFilter.cs @@ -12,9 +12,7 @@ using Microsoft.ML.Data; using Microsoft.ML.EntryPoints; using Microsoft.ML.Internal.Utilities; -using Microsoft.ML.Model; using Microsoft.ML.Transforms; -using Float = System.Single; [assembly: LoadableClass(RangeFilter.Summary, typeof(RangeFilter), typeof(RangeFilter.Options), typeof(SignatureDataTransform), RangeFilter.UserName, "RangeFilter")] @@ -147,7 +145,7 @@ private RangeFilter(IHost host, ModelLoadContext ctx, IDataView input) // double: max // byte: complement int cbFloat = ctx.Reader.ReadInt32(); - Host.CheckDecode(cbFloat == sizeof(Float)); + Host.CheckDecode(cbFloat == sizeof(float)); var column = ctx.LoadNonEmptyString(); var schema = Source.Schema; @@ -191,7 +189,7 @@ private protected override void SaveModel(ModelSaveContext ctx) // byte: complement // byte: includeMin // byte: includeMax - ctx.Writer.Write(sizeof(Float)); + ctx.Writer.Write(sizeof(float)); ctx.SaveNonEmptyString(Source.Schema[_index].Name); Host.Assert(_min < _max); ctx.Writer.Write(_min); diff --git a/src/Microsoft.ML.Data/Utils/LossFunctions.cs b/src/Microsoft.ML.Data/Utils/LossFunctions.cs index bf3eec4af7..a48b05c1be 100644 --- a/src/Microsoft.ML.Data/Utils/LossFunctions.cs +++ b/src/Microsoft.ML.Data/Utils/LossFunctions.cs @@ -7,7 +7,6 @@ using Microsoft.ML.CommandLine; using Microsoft.ML.EntryPoints; using Microsoft.ML.Internal.Utilities; -using Float = System.Single; [assembly: LoadableClass(LogLoss.Summary, typeof(LogLoss), null, typeof(SignatureClassificationLoss), "Log Loss", "LogLoss", "Logistic", "CrossEntropy")] @@ -52,7 +51,7 @@ public interface ISupportSdcaLoss : IScalarOutputLoss // - lambda is the L2 const // - n is the number of instances // Note that if we are going to implement Online-DCA then n = t and varies. - Float ComputeDualUpdateInvariant(Float scaledFeaturesNormSquared); + float ComputeDualUpdateInvariant(float scaledFeaturesNormSquared); /// /// Compute the dual update (\Delta\alpha_i) in SDCA @@ -60,7 +59,7 @@ public interface ISupportSdcaLoss : IScalarOutputLoss /// - lambdaN: L2 const x number of instances /// - cached invariant, hinted by the method above /// - Float DualUpdate(Float output, Float label, Float dual, Float invariant, int maxNumThreads); + float DualUpdate(float output, float label, float dual, float invariant, int maxNumThreads); /// /// The dual loss function for a training example. @@ -70,7 +69,7 @@ public interface ISupportSdcaLoss : IScalarOutputLoss /// /// The label of the example. /// The dual variable of the example. - Double DualLoss(Float label, Double dual); + Double DualLoss(float label, Double dual); } public interface ISupportSdcaClassificationLoss : ISupportSdcaLoss, IClassificationLoss @@ -104,30 +103,30 @@ public sealed class LogLossFactory : ISupportSdcaClassificationLossFactory, ISup public sealed class LogLoss : ISupportSdcaClassificationLoss { internal const string Summary = "The log loss function for classification. Supported by SDCA."; - private const Float Threshold = 0.5f; + private const float Threshold = 0.5f; - public Double Loss(Float output, Float label) + public Double Loss(float output, float label) { - Float prediction = MathUtils.Sigmoid(output); + float prediction = MathUtils.Sigmoid(output); return label > 0 ? -Log(prediction) : -Log(1 - prediction); } - public Float Derivative(Float output, Float label) + public float Derivative(float output, float label) { - Float prediction = MathUtils.Sigmoid(output); + float prediction = MathUtils.Sigmoid(output); return label > 0 ? prediction - 1 : prediction; } - public Float ComputeDualUpdateInvariant(Float scaledFeaturesNormSquared) + public float ComputeDualUpdateInvariant(float scaledFeaturesNormSquared) { - return 1 / Math.Max(1, (Float)0.25 + scaledFeaturesNormSquared); + return 1 / Math.Max(1, (float)0.25 + scaledFeaturesNormSquared); } // REVIEW: this dual update uses a different log loss formulation, //although the two are equivalents if the labels are restricted to 0 and 1 //Need to update so that it can handle probability label and true to the //definition, which is a smooth loss function - public Float DualUpdate(Float output, Float label, Float dual, Float invariant, int maxNumThreads) + public float DualUpdate(float output, float label, float dual, float invariant, int maxNumThreads) { label = label > 0 ? 1 : -1; @@ -137,7 +136,7 @@ public Float DualUpdate(Float output, Float label, Float dual, Float invariant, return maxNumThreads >= 2 && Math.Abs(fullUpdate) > Threshold ? fullUpdate / maxNumThreads : fullUpdate; } - public Double DualLoss(Float label, Double dual) + public Double DualLoss(float label, Double dual) { // Normalize the dual with label. if (label <= 0) @@ -165,7 +164,7 @@ public sealed class HingeLoss : ISupportSdcaClassificationLoss public sealed class Options : ISupportSdcaClassificationLossFactory, ISupportClassificationLossFactory { [Argument(ArgumentType.AtMostOnce, HelpText = "Margin value", ShortName = "marg")] - public Float Margin = Defaults.Margin; + public float Margin = Defaults.Margin; public ISupportSdcaClassificationLoss CreateComponent(IHostEnvironment env) => new HingeLoss(this); @@ -173,8 +172,8 @@ public sealed class Options : ISupportSdcaClassificationLossFactory, ISupportCla } internal const string Summary = "The Hinge loss function for classification. Supported by SDCA."; - private const Float Threshold = 0.5f; - private readonly Float _margin; + private const float Threshold = 0.5f; + private readonly float _margin; internal HingeLoss(Options options) { @@ -191,33 +190,33 @@ public HingeLoss(float margin = Defaults.Margin) { } - public Double Loss(Float output, Float label) + public Double Loss(float output, float label) { - Float truth = label > 0 ? 1 : -1; - Float loss = _margin - truth * output; + float truth = label > 0 ? 1 : -1; + float loss = _margin - truth * output; return loss > 0 ? loss : 0; } - public Float Derivative(Float output, Float label) + public float Derivative(float output, float label) { - Float truth = label > 0 ? 1 : -1; + float truth = label > 0 ? 1 : -1; return _margin > truth * output ? -truth : 0; } - public Float ComputeDualUpdateInvariant(Float scaledFeaturesNormSquared) + public float ComputeDualUpdateInvariant(float scaledFeaturesNormSquared) { return 1 / scaledFeaturesNormSquared; } - public Float DualUpdate(Float output, Float label, Float alpha, Float invariant, int maxNumThreads) + public float DualUpdate(float output, float label, float alpha, float invariant, int maxNumThreads) { - Float truth = label > 0 ? 1 : -1; + float truth = label > 0 ? 1 : -1; var tmp = (_margin - output * truth) * invariant + alpha * truth; var fullUpdate = truth * Math.Max(0, Math.Min(1, tmp)) - alpha; return maxNumThreads >= 2 && Math.Abs(fullUpdate) > Threshold ? fullUpdate / maxNumThreads : fullUpdate; } - public Double DualLoss(Float label, Double dual) + public Double DualLoss(float label, Double dual) { if (label <= 0) dual = -dual; @@ -237,7 +236,7 @@ public sealed class SmoothedHingeLoss : ISupportSdcaClassificationLoss public sealed class Options : ISupportSdcaClassificationLossFactory, ISupportClassificationLossFactory { [Argument(ArgumentType.AtMostOnce, HelpText = "Smoothing constant", ShortName = "smooth")] - public Float SmoothingConst = Defaults.SmoothingConst; + public float SmoothingConst = Defaults.SmoothingConst; public ISupportSdcaClassificationLoss CreateComponent(IHostEnvironment env) => new SmoothedHingeLoss(env, this); @@ -245,9 +244,9 @@ public sealed class Options : ISupportSdcaClassificationLossFactory, ISupportCla } internal const string Summary = "The smooth Hinge loss function for classification. Supported by SDCA."; - private const Float Threshold = 0.5f; + private const float Threshold = 0.5f; // The smoothed Hinge loss is 1/(_SmoothParam) smooth (its definition can be found in http://jmlr.org/papers/volume14/shalev-shwartz13a/shalev-shwartz13a.pdf (page 568 Definition 1) - private readonly Float _smoothConst; + private readonly float _smoothConst; private readonly Double _halfSmoothConst; private readonly Double _doubleSmoothConst; @@ -273,10 +272,10 @@ private SmoothedHingeLoss(IHostEnvironment env, Options options) { } - public Double Loss(Float output, Float label) + public Double Loss(float output, float label) { - Float truth = label > 0 ? 1 : -1; - Float u = 1 - truth * output; + float truth = label > 0 ? 1 : -1; + float u = 1 - truth * output; if (u < 0) return 0; @@ -287,10 +286,10 @@ public Double Loss(Float output, Float label) return u - _halfSmoothConst; } - public Float Derivative(Float output, Float label) + public float Derivative(float output, float label) { - Float truth = label > 0 ? 1 : -1; - Float u = 1 - truth * output; + float truth = label > 0 ? 1 : -1; + float u = 1 - truth * output; if (u < 0) return 0; @@ -301,20 +300,20 @@ public Float Derivative(Float output, Float label) return -truth; } - public Float ComputeDualUpdateInvariant(Float scaledFeaturesNormSquared) + public float ComputeDualUpdateInvariant(float scaledFeaturesNormSquared) { return 1 / (scaledFeaturesNormSquared + _smoothConst); } - public Float DualUpdate(Float output, Float label, Float alpha, Float invariant, int maxNumThreads) + public float DualUpdate(float output, float label, float alpha, float invariant, int maxNumThreads) { - Float truth = label > 0 ? 1 : -1; + float truth = label > 0 ? 1 : -1; var tmp = (1 - output * truth - _smoothConst * alpha * truth) * invariant + alpha * truth; var fullUpdate = truth * Math.Max(0, Math.Min(1, tmp)) - alpha; return maxNumThreads >= 2 && Math.Abs(fullUpdate) > Threshold ? fullUpdate / maxNumThreads : fullUpdate; } - public Double DualLoss(Float label, Double dual) + public Double DualLoss(float label, Double dual) { if (label <= 0) dual = -dual; @@ -336,30 +335,30 @@ public sealed class ExpLoss : IClassificationLoss public sealed class Options : ISupportClassificationLossFactory { [Argument(ArgumentType.AtMostOnce, HelpText = "Beta (dilation)", ShortName = "beta")] - public Float Beta = 1; + public float Beta = 1; public IClassificationLoss CreateComponent(IHostEnvironment env) => new ExpLoss(this); } internal const string Summary = "The exponential loss function for classification."; - private readonly Float _beta; + private readonly float _beta; public ExpLoss(Options options) { _beta = options.Beta; } - public Double Loss(Float output, Float label) + public Double Loss(float output, float label) { - Float truth = label > 0 ? 1 : -1; + float truth = label > 0 ? 1 : -1; return MathUtils.ExpSlow(-_beta * truth * output); } - public Float Derivative(Float output, Float label) + public float Derivative(float output, float label) { - Float truth = label > 0 ? 1 : -1; - Float factor = -_beta * truth; + float truth = label > 0 ? 1 : -1; + float factor = -_beta * truth; return factor * MathUtils.ExpSlow(factor * output); } } @@ -376,30 +375,30 @@ public sealed class SquaredLoss : ISupportSdcaRegressionLoss { internal const string Summary = "The squared loss function for regression."; - public Double Loss(Float output, Float label) + public Double Loss(float output, float label) { - Float diff = output - label; + float diff = output - label; return diff * diff; } - public Float Derivative(Float output, Float label) + public float Derivative(float output, float label) { - Float diff = output - label; + float diff = output - label; return 2 * diff; } - public Float ComputeDualUpdateInvariant(Float scaledFeaturesNormSquared) + public float ComputeDualUpdateInvariant(float scaledFeaturesNormSquared) { - return 1 / ((Float)0.5 + scaledFeaturesNormSquared); + return 1 / ((float)0.5 + scaledFeaturesNormSquared); } - public Float DualUpdate(Float output, Float label, Float dual, Float invariant, int maxNumThreads) + public float DualUpdate(float output, float label, float dual, float invariant, int maxNumThreads) { - var fullUpdate = (label - output - (Float)0.5 * dual) * invariant; + var fullUpdate = (label - output - (float)0.5 * dual) * invariant; return maxNumThreads >= 2 ? fullUpdate / maxNumThreads : fullUpdate; } - public Double DualLoss(Float label, Double dual) + public Double DualLoss(float label, Double dual) { return -dual * (dual / 4 - label); } @@ -418,7 +417,7 @@ public sealed class PoissonLoss : IRegressionLoss { internal const string Summary = "The Poisson loss function for regression."; - public Double Loss(Float output, Float label) + public Double Loss(float output, float label) { // REVIEW: This is stupid and leads to error whenever this loss is used in an evaluator. // The output is in the log-space, while the label is in the original space, while the evaluator @@ -426,9 +425,9 @@ public Double Loss(Float output, Float label) return Math.Exp(output) - label * output; } - public Float Derivative(Float output, Float label) + public float Derivative(float output, float label) { - return (Float)Math.Exp(output) - label; + return (float)Math.Exp(output) - label; } } @@ -475,14 +474,14 @@ public TweedieLoss(double index = 1.5) _index2 = 2 - _index; } - private static void Clamp(ref Float val) + private static void Clamp(ref float val) { - const Float eps = (Float)1e-7; + const float eps = (float)1e-7; if (val < eps) // I tawt I taw a negwawive wowue. val = eps; // I did! I did taw a negwawive wowue!! } - public Double Loss(Float output, Float label) + public Double Loss(float output, float label) { Clamp(ref output); Clamp(ref label); @@ -502,14 +501,14 @@ public Double Loss(Float output, Float label) - (Math.Pow(label, _index2) / _index2 - label * Math.Pow(label, _index1) / _index1); } - public Float Derivative(Float output, Float label) + public float Derivative(float output, float label) { Clamp(ref output); Clamp(ref label); if (_index1 == 0) return output - label; - return (Float)(Math.Pow(output, _index2) - label * Math.Pow(output, _index1)); + return (float)(Math.Pow(output, _index2) - label * Math.Pow(output, _index1)); } } } \ No newline at end of file diff --git a/src/Microsoft.ML.FastTree/BoostingFastTree.cs b/src/Microsoft.ML.FastTree/BoostingFastTree.cs index 847e9b6208..0281b8ab88 100644 --- a/src/Microsoft.ML.FastTree/BoostingFastTree.cs +++ b/src/Microsoft.ML.FastTree/BoostingFastTree.cs @@ -4,8 +4,6 @@ using System; using System.Linq; -using Microsoft.ML.Internal.Internallearn; -using Float = System.Single; namespace Microsoft.ML.Trainers.FastTree { @@ -138,8 +136,8 @@ private protected override bool ShouldStop(IChannel ch, ref IEarlyStoppingCriter } bool isBestCandidate; - bool shouldStop = earlyStoppingRule.CheckScore((Float)validationResult.FinalValue, - (Float)trainingResult.FinalValue, out isBestCandidate); + bool shouldStop = earlyStoppingRule.CheckScore((float)validationResult.FinalValue, + (float)trainingResult.FinalValue, out isBestCandidate); if (isBestCandidate) bestIteration = Ensemble.NumTrees; diff --git a/src/Microsoft.ML.FastTree/FastTree.cs b/src/Microsoft.ML.FastTree/FastTree.cs index 3a532f2506..2436d90124 100644 --- a/src/Microsoft.ML.FastTree/FastTree.cs +++ b/src/Microsoft.ML.FastTree/FastTree.cs @@ -24,7 +24,6 @@ using Microsoft.ML.Transforms.Conversions; using Microsoft.ML.TreePredictor; using Newtonsoft.Json.Linq; -using Float = System.Single; // All of these reviews apply in general to fast tree and random forest implementations. //REVIEW: Decouple train method in Application.cs to have boosting and random forest logic seperate. @@ -171,9 +170,9 @@ private protected FastTreeTrainerBase(IHostEnvironment env, TOptions options, Sc private protected abstract ObjectiveFunctionBase ConstructObjFunc(IChannel ch); - private protected virtual Float GetMaxLabel() + private protected virtual float GetMaxLabel() { - return Float.PositiveInfinity; + return float.PositiveInfinity; } private void Initialize(IHostEnvironment env) @@ -895,7 +894,7 @@ internal abstract class DataConverter private protected readonly int NumFeatures; public abstract int NumExamples { get; } - private protected readonly Float MaxLabel; + private protected readonly float MaxLabel; private protected readonly PredictionKind PredictionKind; @@ -926,10 +925,10 @@ internal abstract class DataConverter private protected bool UsingMaxLabel { - get { return MaxLabel != Float.PositiveInfinity; } + get { return MaxLabel != float.PositiveInfinity; } } - private DataConverter(RoleMappedData data, IHost host, Double[][] binUpperBounds, Float maxLabel, + private DataConverter(RoleMappedData data, IHost host, Double[][] binUpperBounds, float maxLabel, PredictionKind kind, int[] categoricalFeatureIndices, bool categoricalSplit) { Contracts.AssertValue(host, "host"); @@ -956,7 +955,7 @@ private DataConverter(RoleMappedData data, IHost host, Double[][] binUpperBounds } public static DataConverter Create(RoleMappedData data, IHost host, int maxBins, - Float maxLabel, bool diskTranspose, bool noFlocks, int minDocsPerLeaf, PredictionKind kind, + float maxLabel, bool diskTranspose, bool noFlocks, int minDocsPerLeaf, PredictionKind kind, IParallelTraining parallelTraining, int[] categoricalFeatureIndices, bool categoricalSplit) { Contracts.AssertValue(host, "host"); @@ -975,7 +974,7 @@ public static DataConverter Create(RoleMappedData data, IHost host, int maxBins, } public static DataConverter Create(RoleMappedData data, IHost host, Double[][] binUpperBounds, - Float maxLabel, bool diskTranspose, bool noFlocks, PredictionKind kind, int[] categoricalFeatureIndices, bool categoricalSplit) + float maxLabel, bool diskTranspose, bool noFlocks, PredictionKind kind, int[] categoricalFeatureIndices, bool categoricalSplit) { Contracts.AssertValue(host, "host"); host.AssertValue(data); @@ -1285,7 +1284,7 @@ private sealed class DiskImpl : DataConverter public override int NumExamples { get { return _numExamples; } } - public DiskImpl(RoleMappedData data, IHost host, int maxBins, Float maxLabel, PredictionKind kind, + public DiskImpl(RoleMappedData data, IHost host, int maxBins, float maxLabel, PredictionKind kind, IParallelTraining parallelTraining, int[] categoricalFeatureIndices, bool categoricalSplit) : base(data, host, null, maxLabel, kind, categoricalFeatureIndices, categoricalSplit) { @@ -1295,7 +1294,7 @@ public DiskImpl(RoleMappedData data, IHost host, int maxBins, Float maxLabel, Pr } public DiskImpl(RoleMappedData data, IHost host, - double[][] binUpperBounds, Float maxLabel, PredictionKind kind, int[] categoricalFeatureIndices, bool categoricalSplit) + double[][] binUpperBounds, float maxLabel, PredictionKind kind, int[] categoricalFeatureIndices, bool categoricalSplit) : base(data, host, binUpperBounds, maxLabel, kind, categoricalFeatureIndices, categoricalSplit) { _dataset = Construct(data, ref _numExamples, -1, null); @@ -1397,7 +1396,7 @@ private Dataset Construct(RoleMappedData examples, ref int numExamples, int maxB BinFinder finder = new BinFinder(); FeaturesToContentMap fmap = new FeaturesToContentMap(examples.Schema); - var hasMissingPred = Conversions.Instance.GetHasMissingPredicate(((ITransposeDataView)trans).GetSlotType(featIdx)); + var hasMissingPred = Conversions.Instance.GetHasMissingPredicate(((ITransposeDataView)trans).GetSlotType(featIdx)); // There is no good mechanism to filter out rows with missing feature values on transposed data. // So, we instead perform one featurization pass which, if successful, will remain one pass but, // if we ever encounter missing values will become a "detect missing features" pass, which will @@ -1416,12 +1415,12 @@ private Dataset Construct(RoleMappedData examples, ref int numExamples, int maxB using (var cursor = trans.GetSlotCursor(featIdx)) { HashSet constructed = new HashSet(); - var getter = SubsetGetter(cursor.GetGetter(), slotDropper); + var getter = SubsetGetter(cursor.GetGetter(), slotDropper); numExamples = slotDropper?.DstLength ?? trans.RowCount; // Perhaps we should change the binning to just work over singles. VBuffer doubleTemp = default(VBuffer); - var copier = GetCopier(NumberDataViewType.Single, NumberDataViewType.Double); + var copier = GetCopier(NumberDataViewType.Single, NumberDataViewType.Double); int iFeature = 0; pch.SetHeader(new ProgressHeader("features"), e => e.SetProgress(0, iFeature, features.Length)); while (cursor.MoveNext()) @@ -1482,15 +1481,15 @@ private Dataset Construct(RoleMappedData examples, ref int numExamples, int maxB using (var cursor = trans.GetSlotCursor(featIdx)) using (var catCursor = trans.GetSlotCursor(featIdx)) { - var getter = SubsetGetter(cursor.GetGetter(), slotDropper); - var catGetter = SubsetGetter(catCursor.GetGetter(), slotDropper); + var getter = SubsetGetter(cursor.GetGetter(), slotDropper); + var catGetter = SubsetGetter(catCursor.GetGetter(), slotDropper); numExamples = slotDropper?.DstLength ?? trans.RowCount; // Perhaps we should change the binning to just work over singles. VBuffer doubleTemp = default(VBuffer); int[] binnedValues = new int[numExamples]; - var copier = GetCopier(NumberDataViewType.Single, NumberDataViewType.Double); + var copier = GetCopier(NumberDataViewType.Single, NumberDataViewType.Double); int iFeature = 0; if (CategoricalSplit && CategoricalFeatureIndices != null) { @@ -1600,7 +1599,7 @@ private Dataset Construct(RoleMappedData examples, ref int numExamples, int maxB if (labelIdx >= 0) { - trans.GetSingleSlotValue(labelIdx, ref temp); + trans.GetSingleSlotValue(labelIdx, ref temp); slotDropper?.DropSlots(ref temp, ref temp); var tempValues = temp.GetValues(); @@ -1697,7 +1696,7 @@ private static SlotDropper ConstructDropSlotRanges(SlotCursor cursor, { foreach (var kv in temp.Items()) { - if (Float.IsNaN(kv.Value)) + if (float.IsNaN(kv.Value)) rowHasMissing.Set(kv.Key, true); } if (!cursor.MoveNext()) @@ -1773,7 +1772,7 @@ public override int NumExamples get { return _numExamples; } } - private MemImpl(RoleMappedData data, IHost host, double[][] binUpperBounds, Float maxLabel, bool dummy, + private MemImpl(RoleMappedData data, IHost host, double[][] binUpperBounds, float maxLabel, bool dummy, bool noFlocks, PredictionKind kind, int[] categoricalFeatureIndices, bool categoricalSplit) : base(data, host, binUpperBounds, maxLabel, kind, categoricalFeatureIndices, categoricalSplit) { @@ -1795,7 +1794,7 @@ private MemImpl(RoleMappedData data, IHost host, double[][] binUpperBounds, Floa _numExamples = (int)numInstances; } - public MemImpl(RoleMappedData data, IHost host, int maxBins, Float maxLabel, bool noFlocks, int minDocsPerLeaf, + public MemImpl(RoleMappedData data, IHost host, int maxBins, float maxLabel, bool noFlocks, int minDocsPerLeaf, PredictionKind kind, IParallelTraining parallelTraining, int[] categoricalFeatureIndices, bool categoricalSplit) : this(data, host, null, maxLabel, dummy: true, noFlocks: noFlocks, kind: kind, categoricalFeatureIndices: categoricalFeatureIndices, categoricalSplit: categoricalSplit) @@ -1805,7 +1804,7 @@ public MemImpl(RoleMappedData data, IHost host, int maxBins, Float maxLabel, boo InitializeBins(maxBins, parallelTraining); } - public MemImpl(RoleMappedData data, IHost host, double[][] binUpperBounds, Float maxLabel, + public MemImpl(RoleMappedData data, IHost host, double[][] binUpperBounds, float maxLabel, bool noFlocks, PredictionKind kind, int[] categoricalFeatureIndices, bool categoricalSplit) : this(data, host, binUpperBounds, maxLabel, dummy: true, noFlocks: noFlocks, kind: kind, categoricalFeatureIndices: categoricalFeatureIndices, categoricalSplit: categoricalSplit) @@ -2380,7 +2379,7 @@ private Dataset.DatasetSkeleton CreateDatasetSkeleton() } } - // REVIEW: Change this, as well as the bin finding code and bin upper bounds, to be Float instead of Double. + // REVIEW: Change this, as well as the bin finding code and bin upper bounds, to be float instead of Double. /// /// A mutable list of index,value that may be kept sparse or dense. @@ -2730,7 +2729,7 @@ public ForwardIndexer(ValuesList[] values, int[] features, ref int[] workArray) internal sealed class ExamplesToFastTreeBins { private readonly int _maxBins; - private readonly Float _maxLabel; + private readonly float _maxLabel; private readonly IHost _host; private readonly bool _diskTranspose; private readonly bool _noFlocks; @@ -2745,7 +2744,7 @@ public double[][] BinUpperBounds public int[] FeatureMap { get; private set; } - public ExamplesToFastTreeBins(IHostEnvironment env, int maxBins, bool diskTranspose, bool noFlocks, int minDocsPerLeaf, Float maxLabel) + public ExamplesToFastTreeBins(IHostEnvironment env, int maxBins, bool diskTranspose, bool noFlocks, int minDocsPerLeaf, float maxLabel) { Contracts.AssertValue(env); _host = env.Register("Converter"); @@ -2786,7 +2785,7 @@ public Dataset GetCompatibleDataset(RoleMappedData data, PredictionKind kind, in } public abstract class TreeEnsembleModelParameters : - ModelParametersBase, + ModelParametersBase, IValueMapper, ICanSaveInTextFormat, ICanSaveInIniFormat, @@ -2794,7 +2793,7 @@ public abstract class TreeEnsembleModelParameters : ICanSaveSummary, ICanGetSummaryInKeyValuePairs, ITreeEnsemble, - IPredictorWithFeatureWeights, + IPredictorWithFeatureWeights, IFeatureContributionMapper, ICalculateFeatureContribution, ICanGetSummaryAsIRow, @@ -2925,14 +2924,14 @@ private protected override void SaveCore(ModelSaveContext ctx) ValueMapper IValueMapper.GetMapper() { - Host.Check(typeof(TIn) == typeof(VBuffer)); - Host.Check(typeof(TOut) == typeof(Float)); + Host.Check(typeof(TIn) == typeof(VBuffer)); + Host.Check(typeof(TOut) == typeof(float)); - ValueMapper, Float> del = Map; + ValueMapper, float> del = Map; return (ValueMapper)(Delegate)del; } - private protected virtual void Map(in VBuffer src, ref Float dst) + private protected virtual void Map(in VBuffer src, ref float dst) { int inputVectorSize = InputType.GetVectorSize(); if (inputVectorSize > 0) @@ -2940,27 +2939,27 @@ private protected virtual void Map(in VBuffer src, ref Float dst) else Host.Check(src.Length > MaxSplitFeatIdx); - dst = (Float)TrainedEnsemble.GetOutput(in src); + dst = (float)TrainedEnsemble.GetOutput(in src); } - ValueMapper> IFeatureContributionMapper.GetFeatureContributionMapper(int top, int bottom, bool normalize) + ValueMapper> IFeatureContributionMapper.GetFeatureContributionMapper(int top, int bottom, bool normalize) { - Host.Check(typeof(TSrc) == typeof(VBuffer)); - Host.Check(typeof(TDst) == typeof(VBuffer)); + Host.Check(typeof(TSrc) == typeof(VBuffer)); + Host.Check(typeof(TDst) == typeof(VBuffer)); Host.Check(top >= 0, "top must be non-negative"); Host.Check(bottom >= 0, "bottom must be non-negative"); - BufferBuilder builder = null; - ValueMapper, VBuffer> del = - (in VBuffer src, ref VBuffer dst) => + BufferBuilder builder = null; + ValueMapper, VBuffer> del = + (in VBuffer src, ref VBuffer dst) => { FeatureContributionMap(in src, ref dst, ref builder); Numeric.VectorUtils.SparsifyNormalize(ref dst, top, bottom, normalize); }; - return (ValueMapper>)(Delegate)del; + return (ValueMapper>)(Delegate)del; } - private void FeatureContributionMap(in VBuffer src, ref VBuffer dst, ref BufferBuilder builder) + private void FeatureContributionMap(in VBuffer src, ref VBuffer dst, ref BufferBuilder builder) { int inputVectorSize = InputType.GetVectorSize(); if (inputVectorSize > 0) @@ -3232,7 +3231,7 @@ private void ToCSharp(InternalRegressionTree tree, TextWriter writer, int node, } } - public void GetFeatureWeights(ref VBuffer weights) + public void GetFeatureWeights(ref VBuffer weights) { var numFeatures = Math.Max(NumFeatures, MaxSplitFeatIdx + 1); FeatureToGainMap gainMap = new FeatureToGainMap(TrainedEnsemble.Trees.ToList(), normalize: true); @@ -3246,10 +3245,10 @@ public void GetFeatureWeights(ref VBuffer weights) Double max = gainMap.Values.Max(); Double normFactor = max == 0 ? 1.0 : (1.0 / Math.Sqrt(max)); - var bldr = new BufferBuilder(R4Adder.Instance); + var bldr = new BufferBuilder(R4Adder.Instance); bldr.Reset(numFeatures, false); foreach (var pair in gainMap) - bldr.AddFeature(pair.Key, (Float)(Math.Sqrt(pair.Value) * normFactor)); + bldr.AddFeature(pair.Key, (float)(Math.Sqrt(pair.Value) * normFactor)); bldr.GetResult(ref weights); } @@ -3258,9 +3257,9 @@ ITree[] ITreeEnsemble.GetTrees() return TrainedEnsemble.Trees.Select(k => new Tree(k)).ToArray(); } - public Float GetLeafValue(int treeId, int leafId) + public float GetLeafValue(int treeId, int leafId) { - return (Float)TrainedEnsemble.GetTreeAt(treeId).LeafValue(leafId); + return (float)TrainedEnsemble.GetTreeAt(treeId).LeafValue(leafId); } /// @@ -3268,7 +3267,7 @@ public Float GetLeafValue(int treeId, int leafId) /// internal nodes in the path from the root to that leaf. If 'path' is null a new list is initialized. All elements /// in 'path' are cleared before filling in the current path nodes. /// - public int GetLeaf(int treeId, in VBuffer features, ref List path) + public int GetLeaf(int treeId, in VBuffer features, ref List path) { return TrainedEnsemble.GetTreeAt(treeId).GetLeaf(in features, ref path); } @@ -3293,7 +3292,7 @@ DataViewRow ICanGetSummaryAsIRow.GetStatsIRowOrNull(RoleMappedSchema schema) return null; } - private sealed class Tree : ITree> + private sealed class Tree : ITree> { private readonly InternalRegressionTree _regTree; @@ -3310,7 +3309,7 @@ public Tree(InternalRegressionTree regTree) public int NumLeaves => _regTree.NumLeaves; - public int GetLeaf(in VBuffer feat) + public int GetLeaf(in VBuffer feat) { return _regTree.GetLeaf(in feat); } diff --git a/src/Microsoft.ML.FastTree/Training/EarlyStoppingCriteria.cs b/src/Microsoft.ML.FastTree/Training/EarlyStoppingCriteria.cs index f430b76427..e01a6b25c7 100644 --- a/src/Microsoft.ML.FastTree/Training/EarlyStoppingCriteria.cs +++ b/src/Microsoft.ML.FastTree/Training/EarlyStoppingCriteria.cs @@ -7,7 +7,6 @@ using Microsoft.ML.CommandLine; using Microsoft.ML.EntryPoints; using Microsoft.ML.Trainers.FastTree; -using Float = System.Single; [assembly: LoadableClass(typeof(TolerantEarlyStoppingCriterion), typeof(TolerantEarlyStoppingCriterion.Options), typeof(SignatureEarlyStoppingCriterion), "Tolerant (TR)", "tr")] [assembly: LoadableClass(typeof(GLEarlyStoppingCriterion), typeof(GLEarlyStoppingCriterion.Options), typeof(SignatureEarlyStoppingCriterion), "Loss of Generality (GL)", "gl")] @@ -35,7 +34,7 @@ public abstract class IEarlyStoppingCriterion /// A non negative number. Higher score means better result unless "_lowerIsBetter" is true. /// True if the current result is the best ever. /// If true, the learning should stop. - public abstract bool CheckScore(Float validationScore, Float trainingScore, out bool isBestCandidate); + public abstract bool CheckScore(float validationScore, float trainingScore, out bool isBestCandidate); } [TlcModule.ComponentKind("EarlyStoppingCriterion")] @@ -49,11 +48,11 @@ public abstract class EarlyStoppingCriterion : IEarlyStoppingCriterion { public abstract class OptionsBase { } - private Float _bestScore; + private float _bestScore; protected readonly TOptions EarlyStoppingCriterionOptions; protected readonly bool LowerIsBetter; - protected Float BestScore { + protected float BestScore { get { return _bestScore; } set { @@ -66,7 +65,7 @@ internal EarlyStoppingCriterion(TOptions options, bool lowerIsBetter) { EarlyStoppingCriterionOptions = options; LowerIsBetter = lowerIsBetter; - _bestScore = LowerIsBetter ? Float.PositiveInfinity : Float.NegativeInfinity; + _bestScore = LowerIsBetter ? float.PositiveInfinity : float.NegativeInfinity; } /// @@ -74,7 +73,7 @@ internal EarlyStoppingCriterion(TOptions options, bool lowerIsBetter) /// /// The latest score /// True if the given score is the best ever. - protected bool CheckBestScore(Float score) + protected bool CheckBestScore(float score) { bool isBestEver = ((score > BestScore) != LowerIsBetter); if (isBestEver) @@ -105,7 +104,7 @@ public TolerantEarlyStoppingCriterion(Options options, bool lowerIsBetter) Contracts.CheckUserArg(EarlyStoppingCriterionOptions.Threshold >= 0, nameof(options.Threshold), "Must be non-negative."); } - public override bool CheckScore(Float validationScore, Float trainingScore, out bool isBestCandidate) + public override bool CheckScore(float validationScore, float trainingScore, out bool isBestCandidate) { Contracts.Assert(validationScore >= 0); @@ -128,14 +127,14 @@ public class Options : OptionsBase { [Argument(ArgumentType.AtMostOnce, HelpText = "Threshold in range [0,1].", ShortName = "th")] [TlcModule.Range(Min = 0.0f, Max = 1.0f)] - public Float Threshold = 0.01f; + public float Threshold = 0.01f; [Argument(ArgumentType.AtMostOnce, HelpText = "The window size.", ShortName = "w")] [TlcModule.Range(Inf = 0)] public int WindowSize = 5; } - protected Queue PastScores; + protected Queue PastScores; private protected MovingWindowEarlyStoppingCriterion(Options args, bool lowerIsBetter) : base(args, lowerIsBetter) @@ -143,18 +142,18 @@ private protected MovingWindowEarlyStoppingCriterion(Options args, bool lowerIsB Contracts.CheckUserArg(0 <= EarlyStoppingCriterionOptions.Threshold && args.Threshold <= 1, nameof(args.Threshold), "Must be in range [0,1]."); Contracts.CheckUserArg(EarlyStoppingCriterionOptions.WindowSize > 0, nameof(args.WindowSize), "Must be positive."); - PastScores = new Queue(EarlyStoppingCriterionOptions.WindowSize); + PastScores = new Queue(EarlyStoppingCriterionOptions.WindowSize); } /// /// Calculate the average score in the given list of scores. /// /// The moving average. - private Float GetRecentAvg(Queue recentScores) + private float GetRecentAvg(Queue recentScores) { - Float avg = 0; + float avg = 0; - foreach (Float score in recentScores) + foreach (float score in recentScores) avg += score; Contracts.Assert(recentScores.Count > 0); @@ -166,10 +165,10 @@ private Float GetRecentAvg(Queue recentScores) /// /// The list of scores. /// The best score. - private Float GetRecentBest(IEnumerable recentScores) + private float GetRecentBest(IEnumerable recentScores) { - Float recentBestScore = LowerIsBetter ? Float.PositiveInfinity : Float.NegativeInfinity; - foreach (Float score in recentScores) + float recentBestScore = LowerIsBetter ? float.PositiveInfinity : float.NegativeInfinity; + foreach (float score in recentScores) { if ((score > recentBestScore) != LowerIsBetter) recentBestScore = score; @@ -178,7 +177,7 @@ private Float GetRecentBest(IEnumerable recentScores) return recentBestScore; } - protected bool CheckRecentScores(Float score, int windowSize, out Float recentBest, out Float recentAverage) + protected bool CheckRecentScores(float score, int windowSize, out float recentBest, out float recentAverage) { if (PastScores.Count >= windowSize) { @@ -191,8 +190,8 @@ protected bool CheckRecentScores(Float score, int windowSize, out Float recentBe else { PastScores.Enqueue(score); - recentBest = default(Float); - recentAverage = default(Float); + recentBest = default(float); + recentAverage = default(float); return false; } } @@ -223,7 +222,7 @@ public GLEarlyStoppingCriterion(Options options, bool lowerIsBetter) Contracts.CheckUserArg(0 <= EarlyStoppingCriterionOptions.Threshold && options.Threshold <= 1, nameof(options.Threshold), "Must be in range [0,1]."); } - public override bool CheckScore(Float validationScore, Float trainingScore, out bool isBestCandidate) + public override bool CheckScore(float validationScore, float trainingScore, out bool isBestCandidate) { Contracts.Assert(validationScore >= 0); @@ -254,15 +253,15 @@ public IEarlyStoppingCriterion CreateComponent(IHostEnvironment env, bool lowerI public LPEarlyStoppingCriterion(Options options, bool lowerIsBetter) : base(options, lowerIsBetter) { } - public override bool CheckScore(Float validationScore, Float trainingScore, out bool isBestCandidate) + public override bool CheckScore(float validationScore, float trainingScore, out bool isBestCandidate) { Contracts.Assert(validationScore >= 0); Contracts.Assert(trainingScore >= 0); isBestCandidate = CheckBestScore(validationScore); - Float recentBest; - Float recentAverage; + float recentBest; + float recentAverage; if (CheckRecentScores(trainingScore, EarlyStoppingCriterionOptions.WindowSize, out recentBest, out recentAverage)) { if (LowerIsBetter) @@ -292,15 +291,15 @@ public IEarlyStoppingCriterion CreateComponent(IHostEnvironment env, bool lowerI public PQEarlyStoppingCriterion(Options options, bool lowerIsBetter) : base(options, lowerIsBetter) { } - public override bool CheckScore(Float validationScore, Float trainingScore, out bool isBestCandidate) + public override bool CheckScore(float validationScore, float trainingScore, out bool isBestCandidate) { Contracts.Assert(validationScore >= 0); Contracts.Assert(trainingScore >= 0); isBestCandidate = CheckBestScore(validationScore); - Float recentBest; - Float recentAverage; + float recentBest; + float recentAverage; if (CheckRecentScores(trainingScore, EarlyStoppingCriterionOptions.WindowSize, out recentBest, out recentAverage)) { if (LowerIsBetter) @@ -333,17 +332,17 @@ public IEarlyStoppingCriterion CreateComponent(IHostEnvironment env, bool lowerI } private int _count; - private Float _prevScore; + private float _prevScore; public UPEarlyStoppingCriterion(Options options, bool lowerIsBetter) : base(options, lowerIsBetter) { Contracts.CheckUserArg(EarlyStoppingCriterionOptions.WindowSize > 0, nameof(options.WindowSize), "Must be positive"); - _prevScore = LowerIsBetter ? Float.PositiveInfinity : Float.NegativeInfinity; + _prevScore = LowerIsBetter ? float.PositiveInfinity : float.NegativeInfinity; } - public override bool CheckScore(Float validationScore, Float trainingScore, out bool isBestCandidate) + public override bool CheckScore(float validationScore, float trainingScore, out bool isBestCandidate) { Contracts.Assert(validationScore >= 0); diff --git a/src/Microsoft.ML.FastTree/TreeEnsemble/InternalQuantileRegressionTree.cs b/src/Microsoft.ML.FastTree/TreeEnsemble/InternalQuantileRegressionTree.cs index 7a8d5dcf23..73c308be8f 100644 --- a/src/Microsoft.ML.FastTree/TreeEnsemble/InternalQuantileRegressionTree.cs +++ b/src/Microsoft.ML.FastTree/TreeEnsemble/InternalQuantileRegressionTree.cs @@ -5,7 +5,6 @@ using Microsoft.ML.Data; using Microsoft.ML.Internal.Utilities; using Microsoft.ML.Model; -using Float = System.Single; namespace Microsoft.ML.Trainers.FastTree { @@ -63,13 +62,13 @@ internal override void Save(ModelSaveContext ctx) /// Loads the sampled labels of this tree to the distribution array for the sparse instance type. /// By calling for all the trees, the distribution array will have all the samples from all the trees /// - public void LoadSampledLabels(in VBuffer feat, Float[] distribution, Float[] weights, int sampleCount, int destinationIndex) + public void LoadSampledLabels(in VBuffer feat, float[] distribution, float[] weights, int sampleCount, int destinationIndex) { int leaf = GetLeaf(in feat); LoadSampledLabels(distribution, weights, sampleCount, destinationIndex, leaf); } - private void LoadSampledLabels(Float[] distribution, Float[] weights, int sampleCount, int destinationIndex, int leaf) + private void LoadSampledLabels(float[] distribution, float[] weights, int sampleCount, int destinationIndex, int leaf) { Contracts.Check(sampleCount == _labelsDistribution.Length / NumLeaves, "Bad quantile sample count"); Contracts.Check(_instanceWeights == null || sampleCount == _instanceWeights.Length / NumLeaves, "Bad quantile weight count"); @@ -78,14 +77,14 @@ private void LoadSampledLabels(Float[] distribution, Float[] weights, int sample { for (int i = 0, j = sampleCount * leaf, k = destinationIndex; i < sampleCount; i++, j++, k++) { - distribution[k] = (Float)_labelsDistribution[j]; - weights[k] = (Float)_instanceWeights[j]; + distribution[k] = (float)_labelsDistribution[j]; + weights[k] = (float)_instanceWeights[j]; } } else { for (int i = 0, j = sampleCount * leaf, k = destinationIndex; i < sampleCount; i++, j++, k++) - distribution[k] = (Float)_labelsDistribution[j]; + distribution[k] = (float)_labelsDistribution[j]; } } diff --git a/src/Microsoft.ML.KMeansClustering/KMeansModelParameters.cs b/src/Microsoft.ML.KMeansClustering/KMeansModelParameters.cs index 20de43d068..40f0ed5ca3 100644 --- a/src/Microsoft.ML.KMeansClustering/KMeansModelParameters.cs +++ b/src/Microsoft.ML.KMeansClustering/KMeansModelParameters.cs @@ -14,7 +14,6 @@ using Microsoft.ML.Model.OnnxConverter; using Microsoft.ML.Numeric; using Microsoft.ML.Trainers.KMeans; -using Float = System.Single; [assembly: LoadableClass(typeof(KMeansModelParameters), null, typeof(SignatureLoadModel), "KMeans predictor", KMeansModelParameters.LoaderSignature)] @@ -28,7 +27,7 @@ namespace Microsoft.ML.Trainers.KMeans /// ]]> /// public sealed class KMeansModelParameters : - ModelParametersBase>, + ModelParametersBase>, IValueMapper, ICanSaveInTextFormat, ISingleCanSaveOnnx @@ -61,8 +60,8 @@ private static VersionInfo GetVersionInfo() private readonly int _dimensionality; private readonly int _k; - private readonly VBuffer[] _centroids; - private readonly Float[] _centroidL2s; // L2 norms of the centroids + private readonly VBuffer[] _centroids; + private readonly float[] _centroidL2s; // L2 norms of the centroids /// /// Initialize predictor with a trained model. @@ -84,8 +83,8 @@ internal KMeansModelParameters(IHostEnvironment env, int k, VBuffer[] cen _k = k; _dimensionality = centroids[0].Length; - _centroidL2s = new Float[_k]; - _centroids = new VBuffer[_k]; + _centroidL2s = new float[_k]; + _centroids = new VBuffer[_k]; for (int i = 0; i < _k; i++) { Host.CheckParam(centroids[i].Length == _dimensionality, @@ -125,8 +124,8 @@ private KMeansModelParameters(IHostEnvironment env, ModelLoadContext ctx) _dimensionality = ctx.Reader.ReadInt32(); Host.CheckDecode(_dimensionality > 0); - _centroidL2s = new Float[_k]; - _centroids = new VBuffer[_k]; + _centroidL2s = new float[_k]; + _centroids = new VBuffer[_k]; for (int i = 0; i < _k; i++) { // Prior to allowing sparse vectors, count was not written and was implicitly @@ -136,7 +135,7 @@ private KMeansModelParameters(IHostEnvironment env, ModelLoadContext ctx) var indices = count < _dimensionality ? ctx.Reader.ReadIntArray(count) : null; var values = ctx.Reader.ReadFloatArray(count); Host.CheckDecode(FloatUtils.IsFinite(values)); - _centroids[i] = new VBuffer(_dimensionality, count, values, indices); + _centroids[i] = new VBuffer(_dimensionality, count, values, indices); } WarnOnOldNormalizer(ctx, GetType(), Host); @@ -148,11 +147,11 @@ private KMeansModelParameters(IHostEnvironment env, ModelLoadContext ctx) ValueMapper IValueMapper.GetMapper() { - Host.Check(typeof(TIn) == typeof(VBuffer)); - Host.Check(typeof(TOut) == typeof(VBuffer)); + Host.Check(typeof(TIn) == typeof(VBuffer)); + Host.Check(typeof(TOut) == typeof(VBuffer)); - ValueMapper, VBuffer> del = - (in VBuffer src, ref VBuffer dst) => + ValueMapper, VBuffer> del = + (in VBuffer src, ref VBuffer dst) => { if (src.Length != _dimensionality) throw Host.Except($"Incorrect number of features: expected {_dimensionality}, got {src.Length}"); @@ -164,14 +163,14 @@ ValueMapper IValueMapper.GetMapper() return (ValueMapper)(Delegate)del; } - private void Map(in VBuffer src, Span distances) + private void Map(in VBuffer src, Span distances) { Host.Assert(distances.Length >= _k); - Float instanceL2 = VectorUtils.NormSquared(in src); + float instanceL2 = VectorUtils.NormSquared(in src); for (int i = 0; i < _k; i++) { - Float distance = Math.Max(0, + float distance = Math.Max(0, -2 * VectorUtils.DotProduct(in _centroids[i], in src) + _centroidL2s[i] + instanceL2); distances[i] = distance; } @@ -279,7 +278,7 @@ private void InitPredictor() /// an appropriate length, if necessary. /// The number of clusters, corresponding to the logical size of /// . - public void GetClusterCentroids(ref VBuffer[] centroids, out int k) + public void GetClusterCentroids(ref VBuffer[] centroids, out int k) { Contracts.Assert(_centroids.Length == _k); Utils.EnsureSize(ref centroids, _k, _k); @@ -338,7 +337,7 @@ bool ISingleCanSaveOnnx.SaveAsOnnx(OnnxContext ctx, string[] outputNames, string // Compute -2XC^T. Note that Gemm always takes three inputs. Since we only have two here, // a dummy one, named zero, is created. - var zeroName = ctx.AddInitializer(new Float[] { 0f }, null, "zero"); + var zeroName = ctx.AddInitializer(new float[] { 0f }, null, "zero"); var nameXC2 = ctx.AddIntermediateVariable(null, "XC2", true); var gemmNodeXC2 = ctx.CreateNode("Gemm", new[] { nameX, nameC, zeroName}, new[] { nameXC2 }, ctx.GetNodeName("Gemm"), ""); gemmNodeXC2.AddAttribute("alpha", -2f); diff --git a/src/Microsoft.ML.ResultProcessor/ResultProcessor.cs b/src/Microsoft.ML.ResultProcessor/ResultProcessor.cs index ba7f0b2fce..eb1b390825 100644 --- a/src/Microsoft.ML.ResultProcessor/ResultProcessor.cs +++ b/src/Microsoft.ML.ResultProcessor/ResultProcessor.cs @@ -23,7 +23,6 @@ namespace Microsoft.ML.ResultProcessor { - using Float = System.Single; /// /// The processed Results of a particular Learner /// @@ -193,16 +192,16 @@ public void Initialize(ExperimentItemResult result) [Serializable] public class ResultMetric { - public Float MetricValue { get; set; } - public Float Deviation { get; set; } - public Float[] AllValues { get; set; } + public float MetricValue { get; set; } + public float Deviation { get; set; } + public float[] AllValues { get; set; } /// /// Constructor initializing the object. /// /// metric value /// Deviation, 0.0 if not passed - public ResultMetric(Float metricValue, Float deviation = 0) + public ResultMetric(float metricValue, float deviation = 0) { MetricValue = metricValue; Deviation = deviation; @@ -798,8 +797,8 @@ private static Dictionary ParseResultLines(List fi { string name = matchNameValueDeviation.Groups["name"].Value; Double doubleValue = Double.Parse(matchNameValueDeviation.Groups["value"].Value, CultureInfo.InvariantCulture); - Float value = (Float)doubleValue; - Float deviation = (Float)Double.Parse(matchNameValueDeviation.Groups["deviation"].Value, CultureInfo.InvariantCulture); + float value = (float)doubleValue; + float deviation = (float)Double.Parse(matchNameValueDeviation.Groups["deviation"].Value, CultureInfo.InvariantCulture); if (name == metricName) metricValue = value; @@ -817,7 +816,7 @@ private static Dictionary ParseResultLines(List fi if (matchNameValue.Success) { string name = matchNameValue.Groups["name"].Value; - Float value = Float.Parse(matchNameValue.Groups["value"].Value, CultureInfo.InvariantCulture); + float value = float.Parse(matchNameValue.Groups["value"].Value, CultureInfo.InvariantCulture); runResults[name] = new ResultMetric(value); continue; @@ -890,7 +889,7 @@ protected static Dictionary GetPerFoldResults(IList perFoldMetrics = new Dictionary(); - Dictionary> foldResults = new Dictionary>(); + Dictionary> foldResults = new Dictionary>(); int i = 0; while (i < lines.Count) { @@ -925,7 +924,7 @@ protected static Dictionary GetPerFoldResults(IList>(); + var metricToFoldValuesDict = new Dictionary>(); List allFoldIndices = new List(foldResults.Keys); allFoldIndices.Sort(); foreach (var kvp in foldResults) @@ -933,10 +932,10 @@ protected static Dictionary GetPerFoldResults(IList metricDict = null; + Dictionary metricDict = null; if (!metricToFoldValuesDict.TryGetValue(kvp1.Key, out metricDict)) { - metricDict = new Dictionary(); + metricDict = new Dictionary(); metricToFoldValuesDict[kvp1.Key] = metricDict; } metricDict[foldIdx] = kvp1.Value; @@ -945,9 +944,9 @@ protected static Dictionary GetPerFoldResults(IList(from kvp in metricValues.Value + AllValues = new List(from kvp in metricValues.Value orderby kvp.Key ascending select kvp.Value).ToArray() }; @@ -959,20 +958,20 @@ orderby kvp.Key ascending /// /// Given output for a single fold, add its results /// - protected static KeyValuePair> AddFoldResults(IList lines) + protected static KeyValuePair> AddFoldResults(IList lines) { int foldIdx = -1; string[] foldLineCols = lines[0].Split(); if (foldLineCols.Length < 2) { Console.Error.WriteLine("Couldn't parse fold index line: " + lines[0]); - return new KeyValuePair>(-1, null); + return new KeyValuePair>(-1, null); } if (!int.TryParse(foldLineCols[foldLineCols.Length - 1], out foldIdx)) { Console.Error.WriteLine("Couldn't parse fold index line: " + lines[0]); - return new KeyValuePair>(-1, null); + return new KeyValuePair>(-1, null); } // if run index is in front of fold index, account for it @@ -983,7 +982,7 @@ protected static KeyValuePair> AddFoldResults(ILi foldIdx += (int)(foldIdxExtra * Math.Pow(1000, j)); } - Dictionary valuesDict = new Dictionary(); + Dictionary valuesDict = new Dictionary(); for (int i = 1; i < lines.Count; i++) { if (lines[i].IndexOf(':') < 0) @@ -993,12 +992,12 @@ protected static KeyValuePair> AddFoldResults(ILi continue; if (nameValCols[1].EndsWith("%")) nameValCols[1] = nameValCols[1].Substring(0, nameValCols[1].Length - 1); - Float value = 0; - if (!Float.TryParse(nameValCols[1], out value)) + float value = 0; + if (!float.TryParse(nameValCols[1], out value)) continue; valuesDict[nameValCols[0]] = value; } - return new KeyValuePair>(foldIdx, valuesDict); + return new KeyValuePair>(foldIdx, valuesDict); } /// @@ -1306,9 +1305,9 @@ protected static void Run(IHostEnvironment env, string[] args) { foreach (var kvp in result.PerFoldResults) { - if (Float.IsNaN(kvp.Value.MetricValue) && kvp.Value.AllValues != null) + if (float.IsNaN(kvp.Value.MetricValue) && kvp.Value.AllValues != null) outStream.Write("\t" + kvp.Key + ":" - + string.Join(cmd.PerFoldResultSeparator, new List(new List(kvp.Value.AllValues).Select(d => "" + d)))); + + string.Join(cmd.PerFoldResultSeparator, new List(new List(kvp.Value.AllValues).Select(d => "" + d)))); } } diff --git a/src/Microsoft.ML.StandardLearners/Optimizer/DifferentiableFunction.cs b/src/Microsoft.ML.StandardLearners/Optimizer/DifferentiableFunction.cs index b033feea89..ae74f51e59 100644 --- a/src/Microsoft.ML.StandardLearners/Optimizer/DifferentiableFunction.cs +++ b/src/Microsoft.ML.StandardLearners/Optimizer/DifferentiableFunction.cs @@ -7,7 +7,6 @@ using System.Threading; using Microsoft.ML.Data; using Microsoft.ML.Internal.Utilities; -using Float = System.Single; namespace Microsoft.ML.Numeric { @@ -18,7 +17,7 @@ namespace Microsoft.ML.Numeric /// The gradient vector, which must be filled in (its initial contents are undefined) /// The progress channel provider that can be used to report calculation progress. Can be null. /// The value of the function - internal delegate Float DifferentiableFunction(in VBuffer input, ref VBuffer gradient, IProgressChannelProvider progress); + internal delegate float DifferentiableFunction(in VBuffer input, ref VBuffer gradient, IProgressChannelProvider progress); /// /// A delegate for indexed sets of functions with gradients. @@ -30,7 +29,7 @@ namespace Microsoft.ML.Numeric /// The point at which to evaluate the function /// The gradient vector, which must be filled in (its initial contents are undefined) /// The value of the function - internal delegate Float IndexedDifferentiableFunction(int index, in VBuffer input, ref VBuffer gradient); + internal delegate float IndexedDifferentiableFunction(int index, in VBuffer input, ref VBuffer gradient); /// /// Class to aggregate an indexed differentiable function into a single function, in parallel @@ -42,9 +41,9 @@ internal class DifferentiableFunctionAggregator private readonly int _threads; private readonly int _dim; - private readonly VBuffer[] _tempGrads; - private VBuffer _input; - private readonly Float[] _tempVals; + private readonly VBuffer[] _tempGrads; + private VBuffer _input; + private readonly float[] _tempVals; private readonly AutoResetEvent[] _threadFinished; /// @@ -67,12 +66,12 @@ public DifferentiableFunctionAggregator(IndexedDifferentiableFunction func, int threads = 64; _threads = threads; - _tempGrads = new VBuffer[threads]; + _tempGrads = new VBuffer[threads]; _threadFinished = new AutoResetEvent[threads]; for (int i = 0; i < threads; ++i) _threadFinished[i] = new AutoResetEvent(false); - _tempVals = new Float[threads]; + _tempVals = new float[threads]; } private void Eval(object chunkIndexObj) @@ -97,7 +96,7 @@ private void Eval(object chunkIndexObj) _tempVals[chunkIndex] = 0; VectorUtils.ScaleBy(ref _tempGrads[chunkIndex], 0); - VBuffer tempGrad = default(VBuffer); + VBuffer tempGrad = default(VBuffer); for (int i = from; i < to; ++i) { VBufferUtils.Resize(ref tempGrad, 0, 0); @@ -117,7 +116,7 @@ private void Eval(object chunkIndexObj) /// The point at which to evaluate the function /// The gradient vector, which must be filled in (its initial contents are undefined) /// Function value - public Float Eval(in VBuffer input, ref VBuffer gradient) + public float Eval(in VBuffer input, ref VBuffer gradient) { _input = input; @@ -129,7 +128,7 @@ public Float Eval(in VBuffer input, ref VBuffer gradient) AutoResetEvent.WaitAll(_threadFinished); VectorUtils.ScaleBy(ref gradient, 0); - Float value = 0; + float value = 0; for (int c = 0; c < _threads; ++c) { if (gradient.Length == 0) @@ -158,7 +157,7 @@ internal static class GradientTester { // approximately u^(1/3), where u is the unit roundoff ~ 1.1e-16. // the optimal value of eps for the central difference approximation, Nocedal & Wright - private const Float Eps = (Float)4.79e-6; + private const float Eps = (float)4.79e-6; private static Random _r = new Random(5); @@ -168,7 +167,7 @@ internal static class GradientTester /// function to test /// point at which to test /// maximum normalized difference between analytic and numeric directional derivative over multiple tests - public static Float Test(DifferentiableFunction f, in VBuffer x) + public static float Test(DifferentiableFunction f, in VBuffer x) { // REVIEW: Delete this method? return Test(f, in x, false); @@ -181,19 +180,19 @@ public static Float Test(DifferentiableFunction f, in VBuffer x) /// point at which to test /// If false, outputs detailed info. /// maximum normalized difference between analytic and numeric directional derivative over multiple tests - public static Float Test(DifferentiableFunction f, in VBuffer x, bool quiet) + public static float Test(DifferentiableFunction f, in VBuffer x, bool quiet) { // REVIEW: Delete this method? - VBuffer grad = default(VBuffer); - VBuffer newGrad = default(VBuffer); - VBuffer newX = default(VBuffer); - Float normX = VectorUtils.Norm(x); + VBuffer grad = default(VBuffer); + VBuffer newGrad = default(VBuffer); + VBuffer newX = default(VBuffer); + float normX = VectorUtils.Norm(x); f(in x, ref grad, null); if (!quiet) Console.WriteLine(Header); - Float maxNormDiff = Float.NegativeInfinity; + float maxNormDiff = float.NegativeInfinity; int numIters = Math.Min((int)x.Length, 10); int maxDirCount = Math.Min((int)x.Length / 2, 100); @@ -202,7 +201,7 @@ public static Float Test(DifferentiableFunction f, in VBuffer x, bool qui { int dirCount = Math.Min(n * 10, maxDirCount); List indices = new List(dirCount); - List values = new List(dirCount); + List values = new List(dirCount); for (int i = 0; i < dirCount; i++) { int index = _r.Next((int)x.Length); @@ -211,22 +210,22 @@ public static Float Test(DifferentiableFunction f, in VBuffer x, bool qui indices.Add(index); values.Add(SampleFromGaussian(_r)); } - VBuffer dir = new VBuffer(x.Length, values.Count, values.ToArray(), indices.ToArray()); + VBuffer dir = new VBuffer(x.Length, values.Count, values.ToArray(), indices.ToArray()); - Float norm = VectorUtils.Norm(dir); + float norm = VectorUtils.Norm(dir); VectorUtils.ScaleBy(ref dir, 1 / norm); VectorUtils.AddMultInto(in x, Eps, in dir, ref newX); - Float rVal = f(in newX, ref newGrad, null); + float rVal = f(in newX, ref newGrad, null); VectorUtils.AddMultInto(in x, -Eps, in dir, ref newX); - Float lVal = f(in newX, ref newGrad, null); + float lVal = f(in newX, ref newGrad, null); - Float dirDeriv = VectorUtils.DotProduct(in grad, in dir); - Float numDeriv = (rVal - lVal) / (2 * Eps); + float dirDeriv = VectorUtils.DotProduct(in grad, in dir); + float numDeriv = (rVal - lVal) / (2 * Eps); - Float normDiff = Math.Abs(1 - numDeriv / dirDeriv); - Float diff = numDeriv - dirDeriv; + float normDiff = Math.Abs(1 - numDeriv / dirDeriv); + float diff = numDeriv - dirDeriv; if (!quiet) Console.WriteLine("{0,-9}{1,-18:0.0000e0}{2,-18:0.0000e0}{3,-15:0.0000e0}{4,0:0.0000e0}", n, numDeriv, dirDeriv, diff, normDiff); @@ -246,34 +245,34 @@ public static Float Test(DifferentiableFunction f, in VBuffer x, bool qui /// /// /// - public static void TestAllCoords(DifferentiableFunction f, in VBuffer x) + public static void TestAllCoords(DifferentiableFunction f, in VBuffer x) { // REVIEW: Delete this method? - VBuffer grad = default(VBuffer); - VBuffer newGrad = default(VBuffer); - VBuffer newX = default(VBuffer); - Float val = f(in x, ref grad, null); - Float normX = VectorUtils.Norm(x); + VBuffer grad = default(VBuffer); + VBuffer newGrad = default(VBuffer); + VBuffer newX = default(VBuffer); + float val = f(in x, ref grad, null); + float normX = VectorUtils.Norm(x); Console.WriteLine(Header); Random r = new Random(5); - VBuffer dir = new VBuffer(x.Length, 1, new Float[] { 1 }, new int[] { 0 }); + VBuffer dir = new VBuffer(x.Length, 1, new float[] { 1 }, new int[] { 0 }); for (int n = 0; n < x.Length; n++) { VBufferEditor.CreateFromBuffer(ref dir).Values[0] = n; VectorUtils.AddMultInto(in x, Eps, in dir, ref newX); - Float rVal = f(in newX, ref newGrad, null); + float rVal = f(in newX, ref newGrad, null); VectorUtils.AddMultInto(in x, -Eps, in dir, ref newX); - Float lVal = f(in newX, ref newGrad, null); + float lVal = f(in newX, ref newGrad, null); - Float dirDeriv = VectorUtils.DotProduct(in grad, in dir); - Float numDeriv = (rVal - lVal) / (2 * Eps); + float dirDeriv = VectorUtils.DotProduct(in grad, in dir); + float numDeriv = (rVal - lVal) / (2 * Eps); - Float normDiff = Math.Abs(1 - numDeriv / dirDeriv); - Float diff = numDeriv - dirDeriv; + float normDiff = Math.Abs(1 - numDeriv / dirDeriv); + float diff = numDeriv - dirDeriv; if (diff != 0) Console.WriteLine("{0,-9}{1,-18:0.0000e0}{2,-18:0.0000e0}{3,-15:0.0000e0}{4,0:0.0000e0}", n, numDeriv, dirDeriv, diff, normDiff); } @@ -285,34 +284,34 @@ public static void TestAllCoords(DifferentiableFunction f, in VBuffer x) /// Function to test /// Point at which to test /// List of coordinates to test - public static void TestCoords(DifferentiableFunction f, in VBuffer x, IList coords) + public static void TestCoords(DifferentiableFunction f, in VBuffer x, IList coords) { // REVIEW: Delete this method? - VBuffer grad = default(VBuffer); - VBuffer newGrad = default(VBuffer); - VBuffer newX = default(VBuffer); - Float val = f(in x, ref grad, null); - Float normX = VectorUtils.Norm(x); + VBuffer grad = default(VBuffer); + VBuffer newGrad = default(VBuffer); + VBuffer newX = default(VBuffer); + float val = f(in x, ref grad, null); + float normX = VectorUtils.Norm(x); Console.WriteLine(Header); Random r = new Random(5); - VBuffer dir = new VBuffer(x.Length, 1, new Float[] { 1 }, new int[] { 0 }); + VBuffer dir = new VBuffer(x.Length, 1, new float[] { 1 }, new int[] { 0 }); foreach (int n in coords) { VBufferEditor.CreateFromBuffer(ref dir).Values[0] = n; VectorUtils.AddMultInto(in x, Eps, in dir, ref newX); - Float rVal = f(in newX, ref newGrad, null); + float rVal = f(in newX, ref newGrad, null); VectorUtils.AddMultInto(in x, -Eps, in dir, ref newX); - Float lVal = f(in newX, ref newGrad, null); + float lVal = f(in newX, ref newGrad, null); - Float dirDeriv = VectorUtils.DotProduct(in grad, in dir); - Float numDeriv = (rVal - lVal) / (2 * Eps); + float dirDeriv = VectorUtils.DotProduct(in grad, in dir); + float numDeriv = (rVal - lVal) / (2 * Eps); - Float normDiff = Math.Abs(1 - numDeriv / dirDeriv); - Float diff = numDeriv - dirDeriv; + float normDiff = Math.Abs(1 - numDeriv / dirDeriv); + float diff = numDeriv - dirDeriv; Console.WriteLine("{0,-9}{1,-18:0.0000e0}{2,-18:0.0000e0}{3,-15:0.0000e0}{4,0:0.0000e0}", n, numDeriv, dirDeriv, diff, normDiff); } } @@ -327,37 +326,37 @@ public static void TestCoords(DifferentiableFunction f, in VBuffer x, ILi /// This is a reusable working buffer for intermediate calculations /// This is a reusable working buffer for intermediate calculations /// Normalized difference between analytic and numeric directional derivative - public static Float Test(DifferentiableFunction f, in VBuffer x, ref VBuffer dir, bool quiet, - ref VBuffer newGrad, ref VBuffer newX) + public static float Test(DifferentiableFunction f, in VBuffer x, ref VBuffer dir, bool quiet, + ref VBuffer newGrad, ref VBuffer newX) { - Float normDir = VectorUtils.Norm(dir); + float normDir = VectorUtils.Norm(dir); - Float val = f(in x, ref newGrad, null); - Float dirDeriv = VectorUtils.DotProduct(in newGrad, in dir); + float val = f(in x, ref newGrad, null); + float dirDeriv = VectorUtils.DotProduct(in newGrad, in dir); - Float scaledEps = Eps / normDir; + float scaledEps = Eps / normDir; VectorUtils.AddMultInto(in x, scaledEps, in dir, ref newX); - Float rVal = f(in newX, ref newGrad, null); + float rVal = f(in newX, ref newGrad, null); VectorUtils.AddMultInto(in x, -scaledEps, in dir, ref newX); - Float lVal = f(in newX, ref newGrad, null); + float lVal = f(in newX, ref newGrad, null); - Float numDeriv = (rVal - lVal) / (2 * scaledEps); + float numDeriv = (rVal - lVal) / (2 * scaledEps); - Float normDiff = Math.Abs(1 - numDeriv / dirDeriv); - Float diff = numDeriv - dirDeriv; + float normDiff = Math.Abs(1 - numDeriv / dirDeriv); + float diff = numDeriv - dirDeriv; if (!quiet) Console.WriteLine("{0,-18:0.0000e0}{1,-18:0.0000e0}{2,-15:0.0000e0}{3,0:0.0000e0}", numDeriv, dirDeriv, diff, normDiff); return normDiff; } - private static Float SampleFromGaussian(Random r) + private static float SampleFromGaussian(Random r) { double a = r.NextDouble(); double b = r.NextDouble(); - return (Float)(Math.Sqrt(-2 * Math.Log(a)) * MathUtils.Cos(2 * Math.PI * b)); + return (float)(Math.Sqrt(-2 * Math.Log(a)) * MathUtils.Cos(2 * Math.PI * b)); } } } diff --git a/src/Microsoft.ML.StandardLearners/Optimizer/L1Optimizer.cs b/src/Microsoft.ML.StandardLearners/Optimizer/L1Optimizer.cs index fa52df727e..bcbd54ed71 100644 --- a/src/Microsoft.ML.StandardLearners/Optimizer/L1Optimizer.cs +++ b/src/Microsoft.ML.StandardLearners/Optimizer/L1Optimizer.cs @@ -5,7 +5,6 @@ using System; using Microsoft.ML.Data; using Microsoft.ML.Internal.Utilities; -using Float = System.Single; namespace Microsoft.ML.Numeric { @@ -19,7 +18,7 @@ internal sealed class L1Optimizer : Optimizer { // Biases do not contribute to the L1 norm and are assumed to be at the beginning of the weights. private readonly int _biasCount; - private readonly Float _l1weight; + private readonly float _l1weight; /// /// Create an L1Optimizer with the supplied value of M and termination criterion @@ -31,7 +30,7 @@ internal sealed class L1Optimizer : Optimizer /// Whether the optimizer will keep its internal state dense /// Termination criterion /// The flag enforcing the non-negativity constraint - public L1Optimizer(IHostEnvironment env, int biasCount, Float l1weight, int m = 20, bool keepDense = false, + public L1Optimizer(IHostEnvironment env, int biasCount, float l1weight, int m = 20, bool keepDense = false, ITerminationCriterion term = null, bool enforceNonNegativity = false) : base(env, m, keepDense, term, enforceNonNegativity) { @@ -41,14 +40,14 @@ public L1Optimizer(IHostEnvironment env, int biasCount, Float l1weight, int m = _l1weight = l1weight; } - internal override OptimizerState MakeState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, ref VBuffer initial) + internal override OptimizerState MakeState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, ref VBuffer initial) { Contracts.AssertValue(ch); ch.AssertValue(progress); if (EnforceNonNegativity) { - VBufferUtils.Apply(ref initial, delegate(int ind, ref Float initialVal) + VBufferUtils.Apply(ref initial, delegate(int ind, ref float initialVal) { if (initialVal < 0.0 && ind >= _biasCount) initialVal = 0; @@ -65,15 +64,15 @@ internal override OptimizerState MakeState(IChannel ch, IProgressChannelProvider /// public sealed class L1OptimizerState : OptimizerState { - private const Float Gamma = (Float)1e-4; + private const float Gamma = (float)1e-4; private const int MaxLineSearch = 8; private readonly DifferentiableFunction _function; private readonly int _biasCount; - private readonly Float _l1weight; + private readonly float _l1weight; - internal L1OptimizerState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, in VBuffer initial, int m, long totalMemLimit, - int biasCount, Float l1Weight, bool keepDense, bool enforceNonNegativity) + internal L1OptimizerState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, in VBuffer initial, int m, long totalMemLimit, + int biasCount, float l1Weight, bool keepDense, bool enforceNonNegativity) : base(ch, progress, in initial, m, totalMemLimit, keepDense, enforceNonNegativity) { Contracts.AssertValue(ch); @@ -95,10 +94,10 @@ public override DifferentiableFunction Function /// /// This is the original differentiable function with the injected L1 term. /// - private Float EvalCore(in VBuffer input, ref VBuffer gradient, IProgressChannelProvider progress) + private float EvalCore(in VBuffer input, ref VBuffer gradient, IProgressChannelProvider progress) { // REVIEW: Leverage Vector methods that use SSE. - Float res = 0; + float res = 0; if (!EnforceNonNegativity) { @@ -120,7 +119,7 @@ private Float EvalCore(in VBuffer input, ref VBuffer gradient, IPr return res; } - public override Float Eval(in VBuffer input, ref VBuffer gradient) + public override float Eval(in VBuffer input, ref VBuffer gradient) { return EvalCore(in input, ref gradient, ProgressProvider); } @@ -161,14 +160,14 @@ private void MakeSteepestDescDir() _steepestDescDir.CopyTo(ref _dir); } - private void GetNextPoint(Float alpha) + private void GetNextPoint(float alpha) { VectorUtils.AddMultInto(in _x, alpha, in _dir, ref _newX); if (!EnforceNonNegativity) { VBufferUtils.ApplyWith(in _x, ref _newX, - delegate(int ind, Float xVal, ref Float newXval) + delegate(int ind, float xVal, ref float newXval) { if (xVal*newXval < 0.0 && ind >= _biasCount) newXval = 0; @@ -176,7 +175,7 @@ private void GetNextPoint(Float alpha) } else { - VBufferUtils.Apply(ref _newX, delegate(int ind, ref Float newXval) + VBufferUtils.Apply(ref _newX, delegate(int ind, ref float newXval) { if (newXval < 0.0 && ind >= _biasCount) newXval = 0; @@ -196,7 +195,7 @@ internal override void UpdateDir() /// internal override bool LineSearch(IChannel ch, bool force) { - Float dirDeriv = -VectorUtils.DotProduct(in _dir, in _steepestDescDir); + float dirDeriv = -VectorUtils.DotProduct(in _dir, in _steepestDescDir); if (dirDeriv == 0) throw ch.Process(new PrematureConvergenceException(this, "Directional derivative is zero. You may be sitting on the optimum.")); @@ -206,13 +205,13 @@ internal override bool LineSearch(IChannel ch, bool force) // It may also indicate that your function is not convex. ch.Check(dirDeriv < 0, "L-BFGS chose a non-descent direction."); - Float alpha = (Iter == 1 ? (1 / VectorUtils.Norm(_dir)) : 1); + float alpha = (Iter == 1 ? (1 / VectorUtils.Norm(_dir)) : 1); GetNextPoint(alpha); - Float unnormCos = VectorUtils.DotProduct(in _steepestDescDir, in _newX) - VectorUtils.DotProduct(in _steepestDescDir, in _x); + float unnormCos = VectorUtils.DotProduct(in _steepestDescDir, in _newX) - VectorUtils.DotProduct(in _steepestDescDir, in _x); if (unnormCos < 0) { VBufferUtils.ApplyWith(in _steepestDescDir, ref _dir, - (int ind, Float sdVal, ref Float dirVal) => + (int ind, float sdVal, ref float dirVal) => { if (sdVal * dirVal < 0 && ind >= _biasCount) dirVal = 0; @@ -237,7 +236,7 @@ internal override bool LineSearch(IChannel ch, bool force) return false; } - alpha *= (Float)0.25; + alpha *= (float)0.25; GetNextPoint(alpha); unnormCos = VectorUtils.DotProduct(in _steepestDescDir, in _newX) - VectorUtils.DotProduct(in _steepestDescDir, in _x); } diff --git a/src/Microsoft.ML.StandardLearners/Optimizer/LineSearch.cs b/src/Microsoft.ML.StandardLearners/Optimizer/LineSearch.cs index 98962b2d47..4f8a49a2e3 100644 --- a/src/Microsoft.ML.StandardLearners/Optimizer/LineSearch.cs +++ b/src/Microsoft.ML.StandardLearners/Optimizer/LineSearch.cs @@ -5,7 +5,6 @@ using System; using Microsoft.ML.Data; using Microsoft.ML.Internal.Utilities; -using Float = System.Single; namespace Microsoft.ML.Numeric { @@ -19,7 +18,7 @@ internal interface ILineSearch : IDiffLineSearch /// /// Function to minimize /// Minimizing value - Float Minimize(Func func); + float Minimize(Func func); } /// @@ -28,7 +27,7 @@ internal interface ILineSearch : IDiffLineSearch /// Point to evaluate /// Derivative at that point /// - internal delegate Float DiffFunc1D(Float x, out Float deriv); + internal delegate float DiffFunc1D(float x, out float deriv); /// /// Line search that uses derivatives @@ -42,7 +41,7 @@ internal interface IDiffLineSearch /// Value of function at 0 /// Derivative of function at 0 /// Minimizing value - Float Minimize(DiffFunc1D func, Float initValue, Float initDeriv); + float Minimize(DiffFunc1D func, float initValue, float initDeriv); } /// @@ -50,8 +49,8 @@ internal interface IDiffLineSearch /// internal sealed class CubicInterpLineSearch : IDiffLineSearch { - private Float _step; - private const Float _minProgress = (Float)0.01; + private float _step; + private const float _minProgress = (float)0.01; /// /// Gets or sets maximum number of steps. @@ -61,12 +60,12 @@ internal sealed class CubicInterpLineSearch : IDiffLineSearch /// /// Gets or sets the minimum relative size of bounds around solution. /// - public Float MinWindow { get; set; } + public float MinWindow { get; set; } /// /// Gets or sets maximum step size /// - public Float MaxStep { get; set; } + public float MaxStep { get; set; } /// /// Makes a CubicInterpLineSearch @@ -74,7 +73,7 @@ internal sealed class CubicInterpLineSearch : IDiffLineSearch /// Maximum number of steps before terminating public CubicInterpLineSearch(int maxNumSteps) { - MaxStep = Float.PositiveInfinity; + MaxStep = float.PositiveInfinity; MaxNumSteps = maxNumSteps; _step = 1; } @@ -83,9 +82,9 @@ public CubicInterpLineSearch(int maxNumSteps) /// Makes a CubicInterpLineSearch /// /// Minimum relative size of bounds around solution - public CubicInterpLineSearch(Float minWindow) + public CubicInterpLineSearch(float minWindow) { - MaxStep = Float.PositiveInfinity; + MaxStep = float.PositiveInfinity; MinWindow = minWindow; _step = 1; } @@ -96,12 +95,12 @@ public CubicInterpLineSearch(Float minWindow) /// first point, with value and derivative /// second point, with value and derivative /// local minimum of interpolating cubic polynomial - private static Float CubicInterp(StepValueDeriv a, StepValueDeriv b) + private static float CubicInterp(StepValueDeriv a, StepValueDeriv b) { - Float t1 = a.Deriv + b.Deriv - 3 * (a.Value - b.Value) / (a.Step - b.Step); - Float t2 = Math.Sign(b.Step - a.Step) * MathUtils.Sqrt(t1 * t1 - a.Deriv * b.Deriv); - Float num = b.Deriv + t2 - t1; - Float denom = b.Deriv - a.Deriv + 2 * t2; + float t1 = a.Deriv + b.Deriv - 3 * (a.Value - b.Value) / (a.Step - b.Step); + float t2 = Math.Sign(b.Step - a.Step) * MathUtils.Sqrt(t1 * t1 - a.Deriv * b.Deriv); + float num = b.Deriv + t2 - t1; + float denom = b.Deriv - a.Deriv + 2 * t2; return b.Step - (b.Step - a.Step) * num / denom; } @@ -114,13 +113,13 @@ public StepValueDeriv(DiffFunc1D func) _func = func; } - public StepValueDeriv(DiffFunc1D func, Float initStep) + public StepValueDeriv(DiffFunc1D func, float initStep) { _func = func; Step = initStep; } - public StepValueDeriv(DiffFunc1D func, Float initStep, Float initVal, Float initDeriv) + public StepValueDeriv(DiffFunc1D func, float initStep, float initVal, float initDeriv) { _func = func; _step = initStep; @@ -128,18 +127,18 @@ public StepValueDeriv(DiffFunc1D func, Float initStep, Float initVal, Float init _deriv = initDeriv; } - private Float _step; - private Float _value; - private Float _deriv; + private float _step; + private float _value; + private float _deriv; - public Float Step { + public float Step { get { return _step; } set { _step = value; _value = _func(value, out _deriv); } } - public Float Value => _value; + public float Value => _value; - public Float Deriv => _deriv; + public float Deriv => _deriv; } private static void Swap(ref T a, ref T b) @@ -156,13 +155,13 @@ private static void Swap(ref T a, ref T b) /// Value of function at 0 /// Derivative of function at 0 /// Minimizing value - public Float Minimize(DiffFunc1D func, Float initValue, Float initDeriv) + public float Minimize(DiffFunc1D func, float initValue, float initDeriv) { _step = FindMinimum(func, initValue, initDeriv); return Math.Min(_step, MaxStep); } - private Float FindMinimum(DiffFunc1D func, Float initValue, Float initDeriv) + private float FindMinimum(DiffFunc1D func, float initValue, float initDeriv) { Contracts.CheckParam(initDeriv < 0, nameof(initDeriv), "Cannot search in direction of ascent!"); @@ -178,21 +177,21 @@ private Float FindMinimum(DiffFunc1D func, Float initValue, Float initDeriv) hi.Step = lo.Step * 2; } - Float window = 1; + float window = 1; StepValueDeriv mid = new StepValueDeriv(func); for (int numSteps = 1; ; ++numSteps) { - Float interp = CubicInterp(lo, hi); + float interp = CubicInterp(lo, hi); if (window <= MinWindow || numSteps == MaxNumSteps) return interp; // insure minimal progress to narrow interval - Float minProgressStep = _minProgress * (hi.Step - lo.Step); - Float maxMid = hi.Step - minProgressStep; + float minProgressStep = _minProgress * (hi.Step - lo.Step); + float maxMid = hi.Step - minProgressStep; if (interp > maxMid) interp = maxMid; - Float minMid = lo.Step + minProgressStep; + float minMid = lo.Step + minProgressStep; if (interp < minMid) interp = minMid; @@ -219,8 +218,8 @@ private Float FindMinimum(DiffFunc1D func, Float initValue, Float initDeriv) /// internal sealed class GoldenSectionSearch : ILineSearch { - private Float _step; - private static readonly Float _phi = (1 + MathUtils.Sqrt(5)) / 2; + private float _step; + private static readonly float _phi = (1 + MathUtils.Sqrt(5)) / 2; /// /// Gets or sets maximum number of steps before terminating. @@ -230,12 +229,12 @@ internal sealed class GoldenSectionSearch : ILineSearch /// /// Gets or sets minimum relative size of bounds around solution. /// - public Float MinWindow { get; set; } + public float MinWindow { get; set; } /// /// Gets or sets maximum step size. /// - public Float MaxStep { get; set; } + public float MaxStep { get; set; } /// /// Makes a new GoldenSectionSearch @@ -243,7 +242,7 @@ internal sealed class GoldenSectionSearch : ILineSearch /// Maximum number of steps before terminating (not including bracketing) public GoldenSectionSearch(int maxNumSteps) { - MaxStep = Float.PositiveInfinity; + MaxStep = float.PositiveInfinity; MaxNumSteps = maxNumSteps; _step = 1; } @@ -252,9 +251,9 @@ public GoldenSectionSearch(int maxNumSteps) /// Makes a new GoldenSectionSearch /// /// Minimum relative size of bounds around solution - public GoldenSectionSearch(Float minWindow) + public GoldenSectionSearch(float minWindow) { - MaxStep = Float.PositiveInfinity; + MaxStep = float.PositiveInfinity; MaxNumSteps = int.MaxValue; MinWindow = minWindow; _step = 1; @@ -270,10 +269,10 @@ private static void Rotate(ref T a, ref T b, ref T c) private sealed class StepAndValue { - private readonly Func _func; - private Float _step; + private readonly Func _func; + private float _step; - public Float Step { + public float Step { get { return _step; } set { _step = value; @@ -281,15 +280,15 @@ public Float Step { } } - public Float Value { get; private set; } + public float Value { get; private set; } - public StepAndValue(Func func) + public StepAndValue(Func func) { _func = func; - _step = Value = Float.NaN; + _step = Value = float.NaN; } - public StepAndValue(Func func, Float initStep) + public StepAndValue(Func func, float initStep) : this(func) { Step = initStep; @@ -303,7 +302,7 @@ public StepAndValue(Func func, Float initStep) /// Value of function at 0 /// Derivative of function at 0 /// Minimizing value - public Float Minimize(DiffFunc1D f, Float initVal, Float initDeriv) + public float Minimize(DiffFunc1D f, float initVal, float initDeriv) { return Minimize(f); } @@ -313,9 +312,9 @@ public Float Minimize(DiffFunc1D f, Float initVal, Float initDeriv) /// /// Function to minimize /// Minimizing value - public Float Minimize(DiffFunc1D func) + public float Minimize(DiffFunc1D func) { - Float d; + float d; return Minimize(x => func(x, out d)); } @@ -324,13 +323,13 @@ public Float Minimize(DiffFunc1D func) /// /// Function to minimize /// Minimizing value - public Float Minimize(Func func) + public float Minimize(Func func) { _step = FindMinimum(func); return Math.Min(_step, MaxStep); } - private Float FindMinimum(Func func) + private float FindMinimum(Func func) { StepAndValue lo = new StepAndValue(func, _step / _phi); StepAndValue left = new StepAndValue(func, _step); @@ -358,7 +357,7 @@ private Float FindMinimum(Func func) } } - Float window = 1 - 1 / (_phi * _phi); + float window = 1 - 1 / (_phi * _phi); int numSteps = 0; if (window <= MinWindow || numSteps == MaxNumSteps) @@ -398,14 +397,14 @@ private Float FindMinimum(Func func) /// internal sealed class BacktrackingLineSearch : IDiffLineSearch { - private Float _step; - private Float _c1; + private float _step; + private float _c1; /// /// Makes a backtracking line search /// /// Parameter for Armijo condition - public BacktrackingLineSearch(Float c1 = (Float)1e-4) + public BacktrackingLineSearch(float c1 = (float)1e-4) { _step = 1; _c1 = c1; @@ -418,14 +417,14 @@ public BacktrackingLineSearch(Float c1 = (Float)1e-4) /// Value of function at 0 /// Derivative of function at 0 /// Minimizing value - public Float Minimize(DiffFunc1D f, Float initVal, Float initDeriv) + public float Minimize(DiffFunc1D f, float initVal, float initDeriv) { Contracts.Check(initDeriv < 0, "Cannot search in direction of ascent!"); - Float dummy; + float dummy; for (_step *= 2; ; _step /= 2) { - Float newVal = f(_step, out dummy); + float newVal = f(_step, out dummy); if (newVal <= initVal + _c1 * _step * initDeriv) return _step; } @@ -436,45 +435,45 @@ public Float Minimize(DiffFunc1D f, Float initVal, Float initDeriv) // possibly something we should put into our unit tests? internal static class Test { - private static VBuffer _c1; - private static VBuffer _c2; - private static VBuffer _c3; + private static VBuffer _c1; + private static VBuffer _c2; + private static VBuffer _c3; - private static Float QuadTest(Float x, out Float deriv) + private static float QuadTest(float x, out float deriv) { - const Float a = (Float)1.32842; - const Float b = (Float)(-28.38092); - const Float c = 93; + const float a = (float)1.32842; + const float b = (float)(-28.38092); + const float c = 93; deriv = a * x + b; - return (Float)0.5 * a * x * x + b * x + c; + return (float)0.5 * a * x * x + b * x + c; } - private static Float LogTest(Float x, out Float deriv) + private static float LogTest(float x, out float deriv) { double e = Math.Exp(x); - deriv = (Float)(-1.0 / (1.0 + e) + e / (1.0 + e) - 0.5); - return (Float)(Math.Log(1 + 1.0 / e) + Math.Log(1 + e) - 0.5 * x); + deriv = (float)(-1.0 / (1.0 + e) + e / (1.0 + e) - 0.5); + return (float)(Math.Log(1 + 1.0 / e) + Math.Log(1 + e) - 0.5 * x); } - private static Float QuadTest2D(in VBuffer x, ref VBuffer grad, IProgressChannelProvider progress = null) + private static float QuadTest2D(in VBuffer x, ref VBuffer grad, IProgressChannelProvider progress = null) { - Float d1 = VectorUtils.DotProduct(in x, in _c1); - Float d2 = VectorUtils.DotProduct(in x, in _c2); - Float d3 = VectorUtils.DotProduct(in x, in _c3); + float d1 = VectorUtils.DotProduct(in x, in _c1); + float d2 = VectorUtils.DotProduct(in x, in _c2); + float d3 = VectorUtils.DotProduct(in x, in _c3); _c3.CopyTo(ref grad); VectorUtils.AddMult(in _c1, d1, ref grad); VectorUtils.AddMult(in _c2, d2, ref grad); - return (Float)0.5 * (d1 * d1 + d2 * d2) + d3 + 55; + return (float)0.5 * (d1 * d1 + d2 * d2) + d3 + 55; } - private static void StochasticQuadTest2D(in VBuffer x, ref VBuffer grad) + private static void StochasticQuadTest2D(in VBuffer x, ref VBuffer grad) { QuadTest2D(in x, ref grad); } - private static void CreateWrapped(out VBuffer vec, params Float[] values) + private static void CreateWrapped(out VBuffer vec, params float[] values) { - vec = new VBuffer(Utils.Size(values), values); + vec = new VBuffer(Utils.Size(values), values); } static Test() @@ -486,14 +485,14 @@ static Test() private static void RunTest(DiffFunc1D f) { - CubicInterpLineSearch cils = new CubicInterpLineSearch((Float)1e-8); - Float val; - Float deriv; + CubicInterpLineSearch cils = new CubicInterpLineSearch((float)1e-8); + float val; + float deriv; val = f(0, out deriv); - Float min = cils.Minimize(f, val, deriv); + float min = cils.Minimize(f, val, deriv); val = f(min, out deriv); Console.WriteLine(deriv); - GoldenSectionSearch gss = new GoldenSectionSearch((Float)1e-8); + GoldenSectionSearch gss = new GoldenSectionSearch((float)1e-8); min = gss.Minimize(f); val = f(min, out deriv); Console.WriteLine(deriv); @@ -504,22 +503,22 @@ public static void Main(string[] argv) RunTest(QuadTest); RunTest(LogTest); - VBuffer grad = VBufferUtils.CreateEmpty(2); + VBuffer grad = VBufferUtils.CreateEmpty(2); int n = 0; bool print = false; DTerminate term = - (in VBuffer x) => + (in VBuffer x) => { QuadTest2D(in x, ref grad); - Float norm = VectorUtils.Norm(grad); + float norm = VectorUtils.Norm(grad); if (++n % 1000 == 0 || print) Console.WriteLine("{0}\t{1}", n, norm); return (norm < 1e-5); }; - SgdOptimizer sgdo = new SgdOptimizer(term, SgdOptimizer.RateScheduleType.Constant, false, 100, 1, (Float)0.99); - VBuffer init; + SgdOptimizer sgdo = new SgdOptimizer(term, SgdOptimizer.RateScheduleType.Constant, false, 100, 1, (float)0.99); + VBuffer init; CreateWrapped(out init, 0, 0); - VBuffer ans = default(VBuffer); + VBuffer ans = default(VBuffer); sgdo.Minimize(StochasticQuadTest2D, ref init, ref ans); QuadTest2D(in ans, ref grad); Console.WriteLine(VectorUtils.Norm(grad)); diff --git a/src/Microsoft.ML.StandardLearners/Optimizer/OptimizationMonitor.cs b/src/Microsoft.ML.StandardLearners/Optimizer/OptimizationMonitor.cs index d5e4e41a80..ffae097913 100644 --- a/src/Microsoft.ML.StandardLearners/Optimizer/OptimizationMonitor.cs +++ b/src/Microsoft.ML.StandardLearners/Optimizer/OptimizationMonitor.cs @@ -6,7 +6,6 @@ using System.Collections.Generic; using Microsoft.ML.Data; using Microsoft.ML.Internal.Utilities; -using Float = System.Single; namespace Microsoft.ML.Numeric { @@ -43,8 +42,8 @@ internal sealed class GradientCheckingMonitor : ITerminationCriterion private readonly ITerminationCriterion _termCrit; private readonly int _gradCheckInterval; // Reusable vectors utilized by the gradient tester. - private VBuffer _newGrad; - private VBuffer _newX; + private VBuffer _newGrad; + private VBuffer _newX; /// /// Initializes a new instance of the class. @@ -80,13 +79,13 @@ public bool Terminate(Optimizer.OptimizerState state, out string message) return terminate; } - private Float Check(Optimizer.OptimizerState state) + private float Check(Optimizer.OptimizerState state) { Console.Error.Write(_checkingMessage); Console.Error.Flush(); VBuffer x = state.X; var lastDir = state.LastDir; - Float checkResult = GradientTester.Test(state.Function, in x, ref lastDir, true, ref _newGrad, ref _newX); + float checkResult = GradientTester.Test(state.Function, in x, ref lastDir, true, ref _newGrad, ref _newX); for (int i = 0; i < _checkingMessage.Length; i++) Console.Error.Write('\b'); return checkResult; @@ -129,10 +128,10 @@ public void Reset() { } /// internal sealed class MeanImprovementCriterion : ITerminationCriterion { - private readonly Float _tol; - private readonly Float _lambda; + private readonly float _tol; + private readonly float _lambda; private readonly int _maxIterations; - private Float _unnormMeanImprovement; + private float _unnormMeanImprovement; /// /// Initializes a new instance of the class. @@ -140,7 +139,7 @@ internal sealed class MeanImprovementCriterion : ITerminationCriterion /// The tolerance parameter /// The geometric weighting factor. Higher means more heavily weighted toward older values. /// Maximum amount of iteration - public MeanImprovementCriterion(Float tol = (Float)1e-4, Float lambda = (Float)0.5, int maxIterations = int.MaxValue) + public MeanImprovementCriterion(float tol = (float)1e-4, float lambda = (float)0.5, int maxIterations = int.MaxValue) { _tol = tol; _lambda = lambda; @@ -150,7 +149,7 @@ public MeanImprovementCriterion(Float tol = (Float)1e-4, Float lambda = (Float)0 /// /// When criterion drops below this value, optimization is terminated /// - public Float Tolerance + public float Tolerance { get { return _tol; } } @@ -169,7 +168,7 @@ public bool Terminate(Optimizer.OptimizerState state, out string message) { _unnormMeanImprovement = (state.LastValue - state.Value) + _lambda * _unnormMeanImprovement; - Float crit = _unnormMeanImprovement * (1 - _lambda) / (1 - MathUtils.Pow(_lambda, state.Iter)); + float crit = _unnormMeanImprovement * (1 - _lambda) / (1 - MathUtils.Pow(_lambda, state.Iter)); message = string.Format("{0:0.000e0}", crit); return (crit < _tol || state.Iter >= _maxIterations); } @@ -193,14 +192,14 @@ public void Reset() internal sealed class MeanRelativeImprovementCriterion : ITerminationCriterion { private readonly int _n; - private readonly Float _tol; + private readonly float _tol; private readonly int _maxIterations; - private Queue _pastValues; + private Queue _pastValues; /// /// When criterion drops below this value, optimization is terminated /// - public Float Tolerance + public float Tolerance { get { return _tol; } } @@ -219,12 +218,12 @@ public int Iters /// tolerance level /// number of past iterations to average over /// Maximum amount of iteration - public MeanRelativeImprovementCriterion(Float tol = (Float)1e-4, int n = 5, int maxIterations = int.MaxValue) + public MeanRelativeImprovementCriterion(float tol = (float)1e-4, int n = 5, int maxIterations = int.MaxValue) { _tol = tol; _n = n; _maxIterations = maxIterations; - _pastValues = new Queue(n); + _pastValues = new Queue(n); } public string FriendlyName { get { return ToString(); } } @@ -238,7 +237,7 @@ public MeanRelativeImprovementCriterion(Float tol = (Float)1e-4, int n = 5, int /// true if criterion is less than tolerance public bool Terminate(Optimizer.OptimizerState state, out string message) { - Float value = state.Value; + float value = state.Value; if (_pastValues.Count < _n) { @@ -247,9 +246,9 @@ public bool Terminate(Optimizer.OptimizerState state, out string message) return false; } - Float avgImprovement = (_pastValues.Dequeue() - value) / _n; + float avgImprovement = (_pastValues.Dequeue() - value) / _n; _pastValues.Enqueue(value); - Float val = avgImprovement / Math.Abs(value); + float val = avgImprovement / Math.Abs(value); message = string.Format("{0,0:0.0000e0}", val); return (val < _tol || state.Iter >= _maxIterations); } @@ -282,14 +281,14 @@ public void Reset() /// internal sealed class UpperBoundOnDistanceWithL2 : StaticTerminationCriterion { - private readonly Float _sigmaSq; - private readonly Float _tol; - private Float _bestBoundOnMin; + private readonly float _sigmaSq; + private readonly float _tol; + private float _bestBoundOnMin; /// /// When criterion drops below this value, optimization is terminated /// - public Float Tolerance + public float Tolerance { get { return _tol; } } @@ -299,13 +298,13 @@ public Float Tolerance /// /// value of sigmaSq in L2 regularizer /// tolerance level - public UpperBoundOnDistanceWithL2(Float sigmaSq = 1, Float tol = (Float)1e-2) + public UpperBoundOnDistanceWithL2(float sigmaSq = 1, float tol = (float)1e-2) { _sigmaSq = sigmaSq; _tol = tol; // REVIEW: Why shouldn't this be "Reset"? - _bestBoundOnMin = Float.NegativeInfinity; + _bestBoundOnMin = float.NegativeInfinity; } public override string FriendlyName { get { return ToString(); } } @@ -320,11 +319,11 @@ public UpperBoundOnDistanceWithL2(Float sigmaSq = 1, Float tol = (Float)1e-2) public override bool Terminate(Optimizer.OptimizerState state, out string message) { var gradient = state.Grad; - Float gradientNormSquared = VectorUtils.NormSquared(gradient); - Float value = state.Value; - Float newBoundOnMin = value - (Float)0.5 * _sigmaSq * gradientNormSquared; + float gradientNormSquared = VectorUtils.NormSquared(gradient); + float value = state.Value; + float newBoundOnMin = value - (float)0.5 * _sigmaSq * gradientNormSquared; _bestBoundOnMin = Math.Max(_bestBoundOnMin, newBoundOnMin); - Float val = (value - _bestBoundOnMin) / Math.Abs(value); + float val = (value - _bestBoundOnMin) / Math.Abs(value); message = string.Format("{0,0:0.0000e0}", val); return (val < _tol); } @@ -347,12 +346,12 @@ public override string ToString() /// internal sealed class RelativeNormGradient : StaticTerminationCriterion { - private readonly Float _tol; + private readonly float _tol; /// /// When criterion drops below this value, optimization is terminated /// - public Float Tolerance + public float Tolerance { get { return _tol; } } @@ -361,7 +360,7 @@ public Float Tolerance /// Create a RelativeNormGradient with the supplied tolerance /// /// tolerance level - public RelativeNormGradient(Float tol = (Float)1e-4) + public RelativeNormGradient(float tol = (float)1e-4) { _tol = tol; } @@ -377,8 +376,8 @@ public RelativeNormGradient(Float tol = (Float)1e-4) public override bool Terminate(Optimizer.OptimizerState state, out string message) { var grad = state.Grad; - Float norm = VectorUtils.Norm(grad); - Float val = norm / Math.Abs(state.Value); + float norm = VectorUtils.Norm(grad); + float val = norm / Math.Abs(state.Value); message = string.Format("{0,0:0.0000e0}", val); return val < _tol; } diff --git a/src/Microsoft.ML.StandardLearners/Optimizer/Optimizer.cs b/src/Microsoft.ML.StandardLearners/Optimizer/Optimizer.cs index 45d33e1de2..0a9c2061bd 100644 --- a/src/Microsoft.ML.StandardLearners/Optimizer/Optimizer.cs +++ b/src/Microsoft.ML.StandardLearners/Optimizer/Optimizer.cs @@ -6,7 +6,6 @@ using System.Collections.Generic; using Microsoft.ML.Data; using Microsoft.ML.Internal.Utilities; -using Float = System.Single; namespace Microsoft.ML.Numeric { @@ -90,7 +89,7 @@ internal OptimizerException(OptimizerState state, string message) } } - internal virtual OptimizerState MakeState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, ref VBuffer initial) + internal virtual OptimizerState MakeState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, ref VBuffer initial) { return new FunctionOptimizerState(ch, progress, function, in initial, M, TotalMemoryLimit, KeepDense, EnforceNonNegativity); } @@ -99,7 +98,7 @@ internal sealed class FunctionOptimizerState : OptimizerState { public override DifferentiableFunction Function { get; } - internal FunctionOptimizerState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, in VBuffer initial, int m, + internal FunctionOptimizerState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, in VBuffer initial, int m, long totalMemLimit, bool keepDense, bool enforceNonNegativity) : base(ch, progress, in initial, m, totalMemLimit, keepDense, enforceNonNegativity) { @@ -107,7 +106,7 @@ internal FunctionOptimizerState(IChannel ch, IProgressChannelProvider progress, Init(); } - public override Float Eval(in VBuffer input, ref VBuffer gradient) + public override float Eval(in VBuffer input, ref VBuffer gradient) { return Function(in input, ref gradient, ProgressProvider); } @@ -119,12 +118,12 @@ public override Float Eval(in VBuffer input, ref VBuffer gradient) public abstract class OptimizerState { #pragma warning disable MSML_GeneralName // Too annoying in this case. Consider fixing later. - protected internal VBuffer _x; - protected internal VBuffer _grad; - protected internal VBuffer _newX; - protected internal VBuffer _newGrad; - protected internal VBuffer _dir; - protected internal VBuffer _steepestDescDir; + protected internal VBuffer _x; + protected internal VBuffer _grad; + protected internal VBuffer _newX; + protected internal VBuffer _newGrad; + protected internal VBuffer _dir; + protected internal VBuffer _steepestDescDir; #pragma warning restore MSML_GeneralName /// @@ -140,32 +139,32 @@ public abstract class OptimizerState /// The function being optimized /// public abstract DifferentiableFunction Function { get; } - public abstract Float Eval(in VBuffer input, ref VBuffer gradient); + public abstract float Eval(in VBuffer input, ref VBuffer gradient); /// /// The current point being explored /// - public VBuffer X { get { return _newX; } } + public VBuffer X { get { return _newX; } } /// /// The gradient at the current point /// - public VBuffer Grad { get { return _newGrad; } } + public VBuffer Grad { get { return _newGrad; } } /// /// The direction of search that led to the current point /// - public VBuffer LastDir { get { return _dir; } } + public VBuffer LastDir { get { return _dir; } } /// /// The current function value /// - public Float Value { get; protected internal set; } + public float Value { get; protected internal set; } /// /// The function value at the last point /// - public Float LastValue { get; protected internal set; } + public float LastValue { get; protected internal set; } /// /// The number of iterations so far @@ -186,14 +185,14 @@ public abstract class OptimizerState /// private readonly bool _keepDense; - private readonly VBuffer[] _sList; - private readonly VBuffer[] _yList; - private readonly List _roList; + private readonly VBuffer[] _sList; + private readonly VBuffer[] _yList; + private readonly List _roList; private int _m; private readonly long _totalMemLimit; - protected internal OptimizerState(IChannel ch, IProgressChannelProvider progress, in VBuffer initial, + protected internal OptimizerState(IChannel ch, IProgressChannelProvider progress, in VBuffer initial, int m, long totalMemLimit, bool keepDense, bool enforceNonNegativity) { Contracts.AssertValue(ch); @@ -217,9 +216,9 @@ protected internal OptimizerState(IChannel ch, IProgressChannelProvider progress _newGrad = CreateWorkingVector(); _steepestDescDir = CreateWorkingVector(); - _sList = new VBuffer[_m]; - _yList = new VBuffer[_m]; - _roList = new List(); + _sList = new VBuffer[_m]; + _yList = new VBuffer[_m]; + _roList = new List(); EnforceNonNegativity = enforceNonNegativity; } @@ -228,11 +227,11 @@ protected internal OptimizerState(IChannel ch, IProgressChannelProvider progress /// Convenience function to construct a working vector of length Dim. /// /// - protected VBuffer CreateWorkingVector() + protected VBuffer CreateWorkingVector() { // Owing to the way the operations are structured, if the "x", "newX", and "dir" vectors // start out (or somehow naturally become) dense, they will remain dense. - return _keepDense ? VBufferUtils.CreateDense(Dim) : VBufferUtils.CreateEmpty(Dim); + return _keepDense ? VBufferUtils.CreateDense(Dim) : VBufferUtils.CreateEmpty(Dim); } // Leaf constructors must call this once they are fully initialized. @@ -250,7 +249,7 @@ internal void MapDirByInverseHessian() if (count != 0) { - Float[] alphas = new Float[count]; + float[] alphas = new float[count]; int lastGoodRo = -1; @@ -269,14 +268,14 @@ internal void MapDirByInverseHessian() if (lastGoodRo == -1) return; - Float yDotY = VectorUtils.DotProduct(in _yList[lastGoodRo], in _yList[lastGoodRo]); + float yDotY = VectorUtils.DotProduct(in _yList[lastGoodRo], in _yList[lastGoodRo]); VectorUtils.ScaleBy(ref _dir, _roList[lastGoodRo] / yDotY); for (int i = 0; i <= lastGoodRo; i++) { if (_roList[i] > 0) { - Float beta = VectorUtils.DotProduct(in _yList[i], in _dir) / _roList[i]; + float beta = VectorUtils.DotProduct(in _yList[i], in _dir) / _roList[i]; VectorUtils.AddMult(in _sList[i], -alphas[i] - beta, ref _dir); } } @@ -293,7 +292,7 @@ internal void DiscardOldVectors() protected void FixDirZeros() { VBufferUtils.ApplyWithEitherDefined(in _steepestDescDir, ref _dir, - (int i, Float sdVal, ref Float dirVal) => + (int i, float sdVal, ref float dirVal) => { if (sdVal == 0) dirVal = 0; @@ -335,8 +334,8 @@ internal void Shift() } } - VBuffer nextS; - VBuffer nextY; + VBuffer nextS; + VBuffer nextY; if (_roList.Count == _m) { @@ -356,7 +355,7 @@ internal void Shift() VectorUtils.AddMultInto(in _newX, -1, in _x, ref nextS); VectorUtils.AddMultInto(in _newGrad, -1, in _grad, ref nextY); - Float ro = VectorUtils.DotProduct(in nextS, in nextY); + float ro = VectorUtils.DotProduct(in nextS, in nextY); if (ro == 0) throw Ch.Process(new PrematureConvergenceException(this, "ro equals zero. Is your function linear?")); @@ -380,7 +379,7 @@ internal void Shift() internal virtual bool LineSearch(IChannel ch, bool force) { Contracts.AssertValue(ch); - Float dirDeriv = VectorUtils.DotProduct(in _dir, in _grad); + float dirDeriv = VectorUtils.DotProduct(in _dir, in _grad); if (dirDeriv == 0) throw ch.Process(new PrematureConvergenceException(this, "Directional derivative is zero. You may be sitting on the optimum.")); @@ -389,10 +388,10 @@ internal virtual bool LineSearch(IChannel ch, bool force) // The most likely reasons for this is a bug in your function's gradient computation, ch.Check(dirDeriv < 0, "L-BFGS chose a non-descent direction."); - Float c1 = (Float)1e-4 * dirDeriv; - Float c2 = (Float)0.9 * dirDeriv; + float c1 = (float)1e-4 * dirDeriv; + float c2 = (float)0.9 * dirDeriv; - Float alpha = (Iter == 1 ? (1 / VectorUtils.Norm(_dir)) : 1); + float alpha = (Iter == 1 ? (1 / VectorUtils.Norm(_dir)) : 1); PointValueDeriv last = new PointValueDeriv(0, LastValue, dirDeriv); PointValueDeriv aLo = new PointValueDeriv(); @@ -404,7 +403,7 @@ internal virtual bool LineSearch(IChannel ch, bool force) VectorUtils.AddMultInto(in _x, alpha, in _dir, ref _newX); if (EnforceNonNegativity) { - VBufferUtils.Apply(ref _newX, delegate(int ind, ref Float newXval) + VBufferUtils.Apply(ref _newX, delegate(int ind, ref float newXval) { if (newXval < 0.0) newXval = 0; @@ -413,7 +412,7 @@ internal virtual bool LineSearch(IChannel ch, bool force) Value = Eval(in _newX, ref _newGrad); GradientCalculations++; - if (Float.IsPositiveInfinity(Value)) + if (float.IsPositiveInfinity(Value)) { alpha /= 2; continue; @@ -444,12 +443,12 @@ internal virtual bool LineSearch(IChannel ch, bool force) last = curr; if (alpha == 0) - alpha = Float.Epsilon; // Robust to divisional underflow. + alpha = float.Epsilon; // Robust to divisional underflow. else alpha *= 2; } - Float minChange = (Float)0.01; + float minChange = (float)0.01; int maxSteps = 10; // this loop is the "zoom" procedure described in Nocedal & Wright @@ -469,23 +468,23 @@ internal virtual bool LineSearch(IChannel ch, bool force) else { alpha = CubicInterp(aLo, aHi); - if (Float.IsNaN(alpha) || Float.IsInfinity(alpha)) + if (float.IsNaN(alpha) || float.IsInfinity(alpha)) alpha = (aLo.A + aHi.A) / 2; } // this is to ensure that the new point is within bounds // and that the change is reasonably sized - Float ub = (minChange * left.A + (1 - minChange) * right.A); + float ub = (minChange * left.A + (1 - minChange) * right.A); if (alpha > ub) alpha = ub; - Float lb = (minChange * right.A + (1 - minChange) * left.A); + float lb = (minChange * right.A + (1 - minChange) * left.A); if (alpha < lb) alpha = lb; VectorUtils.AddMultInto(in _x, alpha, in _dir, ref _newX); if (EnforceNonNegativity) { - VBufferUtils.Apply(ref _newX, delegate(int ind, ref Float newXval) + VBufferUtils.Apply(ref _newX, delegate(int ind, ref float newXval) { if (newXval < 0.0) newXval = 0; @@ -537,22 +536,22 @@ internal virtual bool LineSearch(IChannel ch, bool force) /// first point, with value and derivative /// second point, with value and derivative /// local minimum of interpolating cubic polynomial - private static Float CubicInterp(PointValueDeriv p0, PointValueDeriv p1) + private static float CubicInterp(PointValueDeriv p0, PointValueDeriv p1) { double t1 = p0.D + p1.D - 3 * (p0.V - p1.V) / (p0.A - p1.A); double t2 = Math.Sign(p1.A - p0.A) * Math.Sqrt(t1 * t1 - p0.D * p1.D); double num = p1.D + t2 - t1; double denom = p1.D - p0.D + 2 * t2; - return (Float)(p1.A - (p1.A - p0.A) * num / denom); + return (float)(p1.A - (p1.A - p0.A) * num / denom); } private readonly struct PointValueDeriv { - public readonly Float A; - public readonly Float V; - public readonly Float D; + public readonly float A; + public readonly float V; + public readonly float D; - public PointValueDeriv(Float a, Float value, Float deriv) + public PointValueDeriv(float a, float value, float deriv) { A = a; V = value; @@ -570,7 +569,7 @@ public PointValueDeriv(Float a, Float value, Float deriv) /// The point at the optimum /// The optimum function value /// Thrown if successive points are within numeric precision of each other, but termination condition is still unsatisfied. - public void Minimize(DifferentiableFunction function, ref VBuffer initial, Float tolerance, ref VBuffer result, out Float optimum) + public void Minimize(DifferentiableFunction function, ref VBuffer initial, float tolerance, ref VBuffer result, out float optimum) { ITerminationCriterion term = new MeanRelativeImprovementCriterion(tolerance); Minimize(function, ref initial, term, ref result, out optimum); @@ -584,7 +583,7 @@ public void Minimize(DifferentiableFunction function, ref VBuffer initial /// The point at the optimum /// The optimum function value /// Thrown if successive points are within numeric precision of each other, but termination condition is still unsatisfied. - public void Minimize(DifferentiableFunction function, ref VBuffer initial, ref VBuffer result, out Float optimum) + public void Minimize(DifferentiableFunction function, ref VBuffer initial, ref VBuffer result, out float optimum) { Minimize(function, ref initial, _staticTerm, ref result, out optimum); } @@ -598,7 +597,7 @@ public void Minimize(DifferentiableFunction function, ref VBuffer initial /// The point at the optimum /// The optimum function value /// Thrown if successive points are within numeric precision of each other, but termination condition is still unsatisfied. - public void Minimize(DifferentiableFunction function, ref VBuffer initial, ITerminationCriterion term, ref VBuffer result, out Float optimum) + public void Minimize(DifferentiableFunction function, ref VBuffer initial, ITerminationCriterion term, ref VBuffer result, out float optimum) { const string computationName = "LBFGS Optimizer"; using (var pch = Env.StartProgressChannel(computationName)) diff --git a/src/Microsoft.ML.StandardLearners/Optimizer/SgdOptimizer.cs b/src/Microsoft.ML.StandardLearners/Optimizer/SgdOptimizer.cs index d44d6db16e..0813f2fb79 100644 --- a/src/Microsoft.ML.StandardLearners/Optimizer/SgdOptimizer.cs +++ b/src/Microsoft.ML.StandardLearners/Optimizer/SgdOptimizer.cs @@ -5,7 +5,6 @@ using System; using Microsoft.ML.Data; using Microsoft.ML.Internal.Utilities; -using Float = System.Single; namespace Microsoft.ML.Numeric { @@ -14,7 +13,7 @@ namespace Microsoft.ML.Numeric /// /// Current iterate /// True if search should terminate - internal delegate bool DTerminate(in VBuffer x); + internal delegate bool DTerminate(in VBuffer x); /// /// Stochastic gradient descent with variations (minibatch, momentum, averaging). @@ -34,12 +33,12 @@ public int BatchSize { } } - private Float _momentum; + private float _momentum; /// /// Momentum parameter /// - public Float Momentum { + public float Momentum { get { return _momentum; } set { Contracts.Check(0 <= value && value < 1); @@ -47,12 +46,12 @@ public Float Momentum { } } - private Float _t0; + private float _t0; /// /// Base of step size schedule s_t = 1 / (t0 + f(t)) /// - public Float T0 { + public float T0 { get { return _t0; } set { Contracts.Check(value >= 0); @@ -129,7 +128,7 @@ public enum RateScheduleType /// Average this number of stochastic gradients for each update /// Momentum parameter /// Maximum number of updates (0 for no max) - public SgdOptimizer(DTerminate terminate, RateScheduleType rateSchedule = RateScheduleType.Sqrt, bool averaging = false, Float t0 = 1, int batchSize = 1, Float momentum = 0, int maxSteps = 0) + public SgdOptimizer(DTerminate terminate, RateScheduleType rateSchedule = RateScheduleType.Sqrt, bool averaging = false, float t0 = 1, int batchSize = 1, float momentum = 0, int maxSteps = 0) { _terminate = terminate; _rateSchedule = rateSchedule; @@ -145,7 +144,7 @@ public SgdOptimizer(DTerminate terminate, RateScheduleType rateSchedule = RateSc /// /// Point at which to evaluate /// Vector to be filled in with gradient - public delegate void DStochasticGradient(in VBuffer x, ref VBuffer grad); + public delegate void DStochasticGradient(in VBuffer x, ref VBuffer grad); /// /// Minimize the function represented by . @@ -153,17 +152,17 @@ public SgdOptimizer(DTerminate terminate, RateScheduleType rateSchedule = RateSc /// Stochastic gradients of function to minimize /// Initial point /// Approximate minimum of - public void Minimize(DStochasticGradient f, ref VBuffer initial, ref VBuffer result) + public void Minimize(DStochasticGradient f, ref VBuffer initial, ref VBuffer result) { Contracts.Check(FloatUtils.IsFinite(initial.GetValues()), "The initial vector contains NaNs or infinite values."); int dim = initial.Length; - VBuffer grad = VBufferUtils.CreateEmpty(dim); - VBuffer step = VBufferUtils.CreateEmpty(dim); - VBuffer x = default(VBuffer); + VBuffer grad = VBufferUtils.CreateEmpty(dim); + VBuffer step = VBufferUtils.CreateEmpty(dim); + VBuffer x = default(VBuffer); initial.CopyTo(ref x); - VBuffer prev = default(VBuffer); - VBuffer avg = VBufferUtils.CreateEmpty(dim); + VBuffer prev = default(VBuffer); + VBuffer avg = VBufferUtils.CreateEmpty(dim); for (int n = 0; _maxSteps == 0 || n < _maxSteps; ++n) { @@ -172,7 +171,7 @@ public void Minimize(DStochasticGradient f, ref VBuffer initial, ref VBuf else VectorUtils.ScaleBy(ref step, _momentum); - Float stepSize; + float stepSize; switch (_rateSchedule) { case RateScheduleType.Constant: @@ -188,7 +187,7 @@ public void Minimize(DStochasticGradient f, ref VBuffer initial, ref VBuf throw Contracts.Except(); } - Float scale = (1 - _momentum) / _batchSize; + float scale = (1 - _momentum) / _batchSize; for (int i = 0; i < _batchSize; ++i) { f(in x, ref grad); @@ -198,9 +197,9 @@ public void Minimize(DStochasticGradient f, ref VBuffer initial, ref VBuf if (_averaging) { Utils.Swap(ref avg, ref prev); - VectorUtils.ScaleBy(prev, ref avg, (Float)n / (n + 1)); + VectorUtils.ScaleBy(prev, ref avg, (float)n / (n + 1)); VectorUtils.AddMult(in step, -stepSize, ref x); - VectorUtils.AddMult(in x, (Float)1 / (n + 1), ref avg); + VectorUtils.AddMult(in x, (float)1 / (n + 1), ref avg); if ((n > 0 && TerminateTester.ShouldTerminate(in avg, in prev)) || _terminate(in avg)) { @@ -270,7 +269,7 @@ public GDOptimizer(DTerminate terminate, IDiffLineSearch lineSearch = null, bool if (LineSearch == null) { if (useCG) - LineSearch = new CubicInterpLineSearch((Float)0.01); + LineSearch = new CubicInterpLineSearch((float)0.01); else LineSearch = new BacktrackingLineSearch(); } @@ -284,24 +283,24 @@ private class LineFunc { private bool _useCG; - private VBuffer _point; - private VBuffer _newPoint; - private VBuffer _grad; - private VBuffer _newGrad; - private VBuffer _dir; + private VBuffer _point; + private VBuffer _newPoint; + private VBuffer _grad; + private VBuffer _newGrad; + private VBuffer _dir; - public VBuffer NewPoint => _newPoint; + public VBuffer NewPoint => _newPoint; - private Float _value; - private Float _newValue; + private float _value; + private float _newValue; - public Float Value => _value; + public float Value => _value; private DifferentiableFunction _func; - public Float Deriv => VectorUtils.DotProduct(in _dir, in _grad); + public float Deriv => VectorUtils.DotProduct(in _dir, in _grad); - public LineFunc(DifferentiableFunction function, in VBuffer initial, bool useCG = false) + public LineFunc(DifferentiableFunction function, in VBuffer initial, bool useCG = false) { int dim = initial.Length; @@ -314,7 +313,7 @@ public LineFunc(DifferentiableFunction function, in VBuffer initial, bool _useCG = useCG; } - public Float Eval(Float step, out Float deriv) + public float Eval(float step, out float deriv) { VectorUtils.AddMultInto(in _point, step, in _dir, ref _newPoint); _newValue = _func(in _newPoint, ref _newGrad, null); @@ -326,11 +325,11 @@ public void ChangeDir() { if (_useCG) { - Float newByNew = VectorUtils.NormSquared(_newGrad); - Float newByOld = VectorUtils.DotProduct(in _newGrad, in _grad); - Float oldByOld = VectorUtils.NormSquared(_grad); - Float betaPR = (newByNew - newByOld) / oldByOld; - Float beta = Math.Max(0, betaPR); + float newByNew = VectorUtils.NormSquared(_newGrad); + float newByOld = VectorUtils.DotProduct(in _newGrad, in _grad); + float oldByOld = VectorUtils.NormSquared(_grad); + float betaPR = (newByNew - newByOld) / oldByOld; + float beta = Math.Max(0, betaPR); VectorUtils.ScaleBy(ref _dir, beta); VectorUtils.AddMult(in _newGrad, -1, ref _dir); } @@ -348,16 +347,16 @@ public void ChangeDir() /// Function to minimize /// Initial point /// Approximate minimum - public void Minimize(DifferentiableFunction function, in VBuffer initial, ref VBuffer result) + public void Minimize(DifferentiableFunction function, in VBuffer initial, ref VBuffer result) { Contracts.Check(FloatUtils.IsFinite(initial.GetValues()), "The initial vector contains NaNs or infinite values."); LineFunc lineFunc = new LineFunc(function, in initial, UseCG); - VBuffer prev = default(VBuffer); + VBuffer prev = default(VBuffer); initial.CopyTo(ref prev); for (int n = 0; _maxSteps == 0 || n < _maxSteps; ++n) { - Float step = LineSearch.Minimize(lineFunc.Eval, lineFunc.Value, lineFunc.Deriv); + float step = LineSearch.Minimize(lineFunc.Eval, lineFunc.Value, lineFunc.Deriv); var newPoint = lineFunc.NewPoint; bool terminateNow = n > 0 && TerminateTester.ShouldTerminate(in newPoint, in prev); if (terminateNow || Terminate(in newPoint)) @@ -381,7 +380,7 @@ internal static class TerminateTester /// The current value. /// The value from the previous iteration. /// True if the optimization routine should terminate at this iteration. - internal static bool ShouldTerminate(in VBuffer x, in VBuffer xprev) + internal static bool ShouldTerminate(in VBuffer x, in VBuffer xprev) { Contracts.Assert(x.Length == xprev.Length, "Vectors must have the same dimensionality."); Contracts.Assert(FloatUtils.IsFinite(xprev.GetValues())); diff --git a/src/Microsoft.ML.StandardLearners/Standard/LinearPredictorUtils.cs b/src/Microsoft.ML.StandardLearners/Standard/LinearPredictorUtils.cs index 454522b086..bde1ee0dfe 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/LinearPredictorUtils.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/LinearPredictorUtils.cs @@ -11,7 +11,6 @@ using Microsoft.ML.Calibrators; using Microsoft.ML.Data; using Microsoft.ML.Internal.Utilities; -using Float = System.Single; namespace Microsoft.ML.Trainers { @@ -23,12 +22,12 @@ internal static class LinearPredictorUtils // Epsilon for 0-comparisons. // REVIEW: Why is this doing any thresholding? Shouldn't it faithfully // represent what is in the binary model? - private const Float Epsilon = (Float)1e-15; + private const float Epsilon = (float)1e-15; /// /// print the linear model as code /// - public static void SaveAsCode(TextWriter writer, in VBuffer weights, Float bias, + public static void SaveAsCode(TextWriter writer, in VBuffer weights, float bias, RoleMappedSchema schema, string codeVariable = "output") { Contracts.CheckValue(writer, nameof(writer)); @@ -93,7 +92,7 @@ private static string FeatureNameAsCode(string featureName, int idx) /// /// Build a Bing TreeEnsemble .ini representation of the given predictor /// - public static string LinearModelAsIni(in VBuffer weights, Float bias, IPredictor predictor = null, + public static string LinearModelAsIni(in VBuffer weights, float bias, IPredictor predictor = null, RoleMappedSchema schema = null, PlattCalibrator calibrator = null) { // TODO: Might need to consider a max line length for the Weights list, requiring us to split it up into @@ -175,7 +174,7 @@ public static string LinearModelAsIni(in VBuffer weights, Float bias, IPr /// Output the weights of a linear model to a given writer /// public static string LinearModelAsText( - string userName, string loadName, string settings, in VBuffer weights, Float bias, + string userName, string loadName, string settings, in VBuffer weights, float bias, RoleMappedSchema schema = null, PlattCalibrator calibrator = null) { // Review: added a text description for each calibrator (not only Platt), would be nice to add to this method. @@ -197,8 +196,8 @@ public static string LinearModelAsText( SaveLinearModelWeightsInKeyValuePairs(in weights, bias, schema, weightValues); foreach (var weightValue in weightValues) { - Contracts.Assert(weightValue.Value is Float); - b.AppendLine().AppendFormat("{0}\t{1}", weightValue.Key, (Float)weightValue.Value); + Contracts.Assert(weightValue.Value is float); + b.AppendLine().AppendFormat("{0}\t{1}", weightValue.Key, (float)weightValue.Value); } return b.ToString(); @@ -226,7 +225,7 @@ public static IEnumerable> GetSortedLinearModelFeat /// Output the weights of a linear model to key value pairs. /// public static void SaveLinearModelWeightsInKeyValuePairs( - in VBuffer weights, Float bias, RoleMappedSchema schema, List> results) + in VBuffer weights, float bias, RoleMappedSchema schema, List> results) { var names = default(VBuffer>); AnnotationUtils.GetSlotNames(schema, RoleMappedSchema.ColumnRole.Feature, weights.Length, ref names); diff --git a/src/Microsoft.ML.StandardLearners/Standard/SdcaMultiClass.cs b/src/Microsoft.ML.StandardLearners/Standard/SdcaMultiClass.cs index e7f2f8e1fa..2576512961 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/SdcaMultiClass.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/SdcaMultiClass.cs @@ -15,7 +15,6 @@ using Microsoft.ML.Model; using Microsoft.ML.Numeric; using Microsoft.ML.Trainers; -using Float = System.Single; [assembly: LoadableClass(SdcaMultiClassTrainer.Summary, typeof(SdcaMultiClassTrainer), typeof(SdcaMultiClassTrainer.Options), new[] { typeof(SignatureMultiClassClassifierTrainer), typeof(SignatureTrainer), typeof(SignatureFeatureScorerTrainer) }, @@ -117,8 +116,8 @@ private protected override void CheckLabelCompatible(SchemaShape.Column labelCol /// private protected override void TrainWithoutLock(IProgressChannelProvider progress, FloatLabelCursor.Factory cursorFactory, Random rand, - IdToIdxLookup idToIdx, int numThreads, DualsTableBase duals, Float[] biasReg, Float[] invariants, Float lambdaNInv, - VBuffer[] weights, Float[] biasUnreg, VBuffer[] l1IntermediateWeights, Float[] l1IntermediateBias, Float[] featureNormSquared) + IdToIdxLookup idToIdx, int numThreads, DualsTableBase duals, float[] biasReg, float[] invariants, float lambdaNInv, + VBuffer[] weights, float[] biasUnreg, VBuffer[] l1IntermediateWeights, float[] l1IntermediateBias, float[] featureNormSquared) { Contracts.AssertValueOrNull(progress); Contracts.Assert(SdcaTrainerOptions.L1Threshold.HasValue); @@ -149,8 +148,8 @@ private protected override void TrainWithoutLock(IProgressChannelProvider progre long dualIndexInitPos = idx * numClasses; var features = cursor.Features; var label = (int)cursor.Label; - Float invariant; - Float normSquared; + float invariant; + float normSquared; if (invariants != null) { invariant = invariants[idx]; @@ -171,13 +170,13 @@ private protected override void TrainWithoutLock(IProgressChannelProvider progre var instanceWeight = GetInstanceWeight(cursor); // This will be the new dual variable corresponding to the label class. - Float labelDual = 0; + float labelDual = 0; // This will be used to update the weights and regularized bias corresponding to the label class. - Float labelPrimalUpdate = 0; + float labelPrimalUpdate = 0; // This will be used to update the unregularized bias corresponding to the label class. - Float labelAdjustment = 0; + float labelAdjustment = 0; // Iterates through all classes. for (int iClass = 0; iClass < numClasses; iClass++) @@ -206,7 +205,7 @@ private protected override void TrainWithoutLock(IProgressChannelProvider progre var adjustment = l1ThresholdZero ? lr * biasReg[iClass] : lr * l1IntermediateBias[iClass]; dualUpdate -= adjustment; bool success = false; - duals.ApplyAt(dualIndex, (long index, ref Float value) => + duals.ApplyAt(dualIndex, (long index, ref float value) => success = Interlocked.CompareExchange(ref value, dual + dualUpdate, dual) == dual); if (success) @@ -287,12 +286,12 @@ private protected override bool CheckConvergence( FloatLabelCursor.Factory cursorFactory, DualsTableBase duals, IdToIdxLookup idToIdx, - VBuffer[] weights, - VBuffer[] bestWeights, - Float[] biasUnreg, - Float[] bestBiasUnreg, - Float[] biasReg, - Float[] bestBiasReg, + VBuffer[] weights, + VBuffer[] bestWeights, + float[] biasUnreg, + float[] bestBiasUnreg, + float[] biasReg, + float[] bestBiasReg, long count, Double[] metrics, ref Double bestPrimalLoss, @@ -407,7 +406,7 @@ private protected override bool CheckConvergence( return converged; } - private protected override MulticlassLogisticRegressionModelParameters CreatePredictor(VBuffer[] weights, Float[] bias) + private protected override MulticlassLogisticRegressionModelParameters CreatePredictor(VBuffer[] weights, float[] bias) { Host.CheckValue(weights, nameof(weights)); Host.CheckValue(bias, nameof(bias)); @@ -422,13 +421,13 @@ private protected override void CheckLabel(RoleMappedData examples, out int weig examples.CheckMultiClassLabel(out weightSetCount); } - private protected override Float[] InitializeFeatureNormSquared(int length) + private protected override float[] InitializeFeatureNormSquared(int length) { Contracts.Assert(0 < length & length <= Utils.ArrayMaxSize); - return new Float[length]; + return new float[length]; } - private protected override Float GetInstanceWeight(FloatLabelCursor cursor) + private protected override float GetInstanceWeight(FloatLabelCursor cursor) { return cursor.Weight; } diff --git a/src/Microsoft.ML.Sweeper/Algorithms/KdoSweeper.cs b/src/Microsoft.ML.Sweeper/Algorithms/KdoSweeper.cs index cf413f5aad..8d0cdb86df 100644 --- a/src/Microsoft.ML.Sweeper/Algorithms/KdoSweeper.cs +++ b/src/Microsoft.ML.Sweeper/Algorithms/KdoSweeper.cs @@ -10,7 +10,6 @@ using Microsoft.ML.Internal.Utilities; using Microsoft.ML.Sweeper.Algorithms; using Microsoft.ML.Trainers.FastTree; -using Float = System.Single; [assembly: LoadableClass(typeof(KdoSweeper), typeof(KdoSweeper.Options), typeof(SignatureSweeper), "KDO Sweeper", "KDOSweeper", "KDO")] @@ -82,7 +81,7 @@ public sealed class Options private readonly IValueGenerator[] _sweepParameters; private readonly SweeperProbabilityUtils _spu; - private readonly SortedSet _alreadySeenConfigs; + private readonly SortedSet _alreadySeenConfigs; private readonly List _randomParamSets; public KdoSweeper(IHostEnvironment env, Options options) @@ -102,7 +101,7 @@ public KdoSweeper(IHostEnvironment env, Options options) _randomSweeper = new UniformRandomSweeper(env, new SweeperBase.OptionsBase(), _sweepParameters); _redundantSweeper = new UniformRandomSweeper(env, new SweeperBase.OptionsBase { Retries = 0 }, _sweepParameters); _spu = new SweeperProbabilityUtils(_host); - _alreadySeenConfigs = new SortedSet(new FloatArrayComparer()); + _alreadySeenConfigs = new SortedSet(new FloatArrayComparer()); _randomParamSets = new List(); } @@ -201,7 +200,7 @@ private ParameterSet[] GenerateChildConfigurations(IRunResult[] history, int[] p /// A mutated version of parent (i.e., point sampled near parent). private ParameterSet SampleChild(ParameterSet parent, double fitness, int n, IRunResult[] previousRuns, double rMean, double rVar, bool isMetricMaximizing) { - Float[] child = SweeperProbabilityUtils.ParameterSetAsFloatArray(_host, _sweepParameters, parent, false); + float[] child = SweeperProbabilityUtils.ParameterSetAsFloatArray(_host, _sweepParameters, parent, false); List numericParamIndices = new List(); List numericParamValues = new List(); int loopCount = 0; @@ -252,7 +251,7 @@ private ParameterSet SampleChild(ParameterSet parent, double fitness, int n, IRu double[][] bandwidthMatrix = BuildBandwidthMatrix(n, stddevs); double[] sampledPoint = SampleDiagonalCovMultivariateGaussian(1, mu, bandwidthMatrix)[0]; for (int j = 0; j < sampledPoint.Length; j++) - child[numericParamIndices[j]] = (Float)Corral(sampledPoint[j]); + child[numericParamIndices[j]] = (float)Corral(sampledPoint[j]); } else { @@ -264,7 +263,7 @@ private ParameterSet SampleChild(ParameterSet parent, double fitness, int n, IRu const double epsCutoff = 1e-10; double eps = Math.Min(Math.Max(child[index], epsCutoff), 1 - epsCutoff); double beta = alpha / eps - alpha; - child[index] = (Float)Stats.SampleFromBeta(rng, alpha, beta); + child[index] = (float)Stats.SampleFromBeta(rng, alpha, beta); } } } @@ -495,9 +494,9 @@ private double[] VectorTransformAdd(double[] m, double[] z, double[][] a) return result; } - private sealed class FloatArrayComparer : IComparer + private sealed class FloatArrayComparer : IComparer { - public int Compare(Float[] x, Float[] y) + public int Compare(float[] x, float[] y) { if (x.Length != y.Length) return x.Length > y.Length ? 1 : -1; diff --git a/src/Microsoft.ML.Sweeper/Algorithms/NelderMead.cs b/src/Microsoft.ML.Sweeper/Algorithms/NelderMead.cs index 14c58ca49a..7687439ead 100644 --- a/src/Microsoft.ML.Sweeper/Algorithms/NelderMead.cs +++ b/src/Microsoft.ML.Sweeper/Algorithms/NelderMead.cs @@ -9,7 +9,6 @@ using Microsoft.ML.CommandLine; using Microsoft.ML.Numeric; using Microsoft.ML.Sweeper; -using Float = System.Single; [assembly: LoadableClass(typeof(NelderMeadSweeper), typeof(NelderMeadSweeper.Options), typeof(SignatureSweeper), "Nelder Mead Sweeper", "NelderMeadSweeper", "NelderMead", "NM")] @@ -30,7 +29,7 @@ public sealed class Options public int RandomSeed; [Argument(ArgumentType.LastOccurenceWins, HelpText = "Simplex diameter for stopping", ShortName = "dstop")] - public Float StoppingSimplexDiameter = (Float)0.001; + public float StoppingSimplexDiameter = (float)0.001; [Argument(ArgumentType.LastOccurenceWins, HelpText = "If iteration point is outside parameter definitions, should it be projected?", ShortName = "project")] @@ -38,19 +37,19 @@ public sealed class Options #region Core algorithm constants [Argument(ArgumentType.LastOccurenceWins, HelpText = "Reflection parameter", ShortName = "dr")] - public Float DeltaReflection = (Float)1.0; + public float DeltaReflection = (float)1.0; [Argument(ArgumentType.LastOccurenceWins, HelpText = "Expansion parameter", ShortName = "de")] - public Float DeltaExpansion = (Float)1.5; + public float DeltaExpansion = (float)1.5; [Argument(ArgumentType.LastOccurenceWins, HelpText = "Inside contraction parameter", ShortName = "dic")] - public Float DeltaInsideContraction = -(Float)0.5; + public float DeltaInsideContraction = -(float)0.5; [Argument(ArgumentType.LastOccurenceWins, HelpText = "Outside contraction parameter", ShortName = "doc")] - public Float DeltaOutsideContraction = (Float)0.5; + public float DeltaOutsideContraction = (float)0.5; [Argument(ArgumentType.LastOccurenceWins, HelpText = "Shrinkage parameter", ShortName = "ds")] - public Float GammaShrink = (Float)0.5; + public float GammaShrink = (float)0.5; #endregion } @@ -68,19 +67,19 @@ private enum OptimizationStage private readonly ISweeper _initSweeper; private readonly Options _args; - private SortedList _simplexVertices; + private SortedList _simplexVertices; private readonly int _dim; private OptimizationStage _stage; - private readonly List> _pendingSweeps; - private Queue> _pendingSweepsNotSubmitted; - private KeyValuePair _lastReflectionResult; + private readonly List> _pendingSweeps; + private Queue> _pendingSweepsNotSubmitted; + private KeyValuePair _lastReflectionResult; - private KeyValuePair _worst; - private KeyValuePair _secondWorst; - private KeyValuePair _best; + private KeyValuePair _worst; + private KeyValuePair _secondWorst; + private KeyValuePair _best; - private Float[] _centroid; + private float[] _centroid; private readonly List _sweepParameters; @@ -116,10 +115,10 @@ public NelderMeadSweeper(IHostEnvironment env, Options options) _dim = _sweepParameters.Count; env.CheckUserArg(_dim > 1, nameof(options.SweptParameters), "Nelder-Mead sweeper needs at least two parameters to sweep over."); - _simplexVertices = new SortedList(new SimplexVertexComparer()); + _simplexVertices = new SortedList(new SimplexVertexComparer()); _stage = OptimizationStage.NeedReflectionPoint; - _pendingSweeps = new List>(); - _pendingSweepsNotSubmitted = new Queue>(); + _pendingSweeps = new List>(); + _pendingSweepsNotSubmitted = new Queue>(); } public ParameterSet[] ProposeSweeps(int maxSweeps, IEnumerable previousRuns = null) @@ -163,7 +162,7 @@ public ParameterSet[] ProposeSweeps(int maxSweeps, IEnumerable previ } else _stage = OptimizationStage.WaitingForReflectionResult; - _pendingSweeps.Add(new KeyValuePair(FloatArrayAsParameterSet(nextPoint), nextPoint)); + _pendingSweeps.Add(new KeyValuePair(FloatArrayAsParameterSet(nextPoint), nextPoint)); if (previousRuns.Any(runResult => runResult.ParameterSet.Equals(_pendingSweeps[0].Key))) { _stage = OptimizationStage.WaitingForReductionResult; @@ -213,7 +212,7 @@ public ParameterSet[] ProposeSweeps(int maxSweeps, IEnumerable previ _stage = OptimizationStage.WaitingForInnerContractionResult; } _pendingSweeps.Clear(); - _pendingSweeps.Add(new KeyValuePair(FloatArrayAsParameterSet(nextPoint), nextPoint)); + _pendingSweeps.Add(new KeyValuePair(FloatArrayAsParameterSet(nextPoint), nextPoint)); if (previousRuns.Any(runResult => runResult.ParameterSet.Equals(_pendingSweeps[0].Key))) { _stage = OptimizationStage.WaitingForReductionResult; @@ -295,7 +294,7 @@ public ParameterSet[] ProposeSweeps(int maxSweeps, IEnumerable previ } } - private void UpdateSimplex(IRunResult newVertexResult, Float[] newVertex) + private void UpdateSimplex(IRunResult newVertexResult, float[] newVertex) { Contracts.Assert(_centroid != null); Contracts.Assert(_simplexVertices.Count == _dim + 1); @@ -314,9 +313,9 @@ private void ComputeExtremes() _centroid = GetCentroid(); } - private Float SimplexDiameter() + private float SimplexDiameter() { - Float maxDistance = Float.MinValue; + float maxDistance = float.MinValue; var simplexVertices = _simplexVertices.ToArray(); for (int i = 0; i < simplexVertices.Length; i++) @@ -333,7 +332,7 @@ private Float SimplexDiameter() return maxDistance; } - private bool OutOfBounds(Float[] point) + private bool OutOfBounds(float[] point) { Contracts.Assert(point.Length == _sweepParameters.Count); for (int i = 0; i < _sweepParameters.Count; i++) @@ -348,7 +347,7 @@ private bool OutOfBounds(Float[] point) private void ReplaceSimplexVertices(IEnumerable previousRuns) { var results = FindRunResult(previousRuns); - var newSimplexVertices = new SortedList(new SimplexVertexComparer()); + var newSimplexVertices = new SortedList(new SimplexVertexComparer()); foreach (var result in results) newSimplexVertices.Add(result.Key, result.Value); newSimplexVertices.Add(_best.Key, _best.Value); @@ -358,16 +357,16 @@ private void ReplaceSimplexVertices(IEnumerable previousRuns) } // given some ParameterSets, find their results. - private List> FindRunResult(IEnumerable previousRuns) + private List> FindRunResult(IEnumerable previousRuns) { - var result = new List>(); + var result = new List>(); foreach (var sweep in _pendingSweeps) { foreach (var run in previousRuns) { if (run.ParameterSet.Equals(sweep.Key)) { - result.Add(new KeyValuePair(run, sweep.Value)); + result.Add(new KeyValuePair(run, sweep.Value)); break; } } @@ -378,18 +377,18 @@ private List> FindRunResult(IEnumerable previo if (previousRuns.Any(runResult => runResult.ParameterSet.Equals(newParameterSet))) return false; if (i < numPoints) - _pendingSweeps.Add(new KeyValuePair(newParameterSet, newPoint)); + _pendingSweeps.Add(new KeyValuePair(newParameterSet, newPoint)); else - _pendingSweepsNotSubmitted.Enqueue(new KeyValuePair(FloatArrayAsParameterSet(newPoint), newPoint)); + _pendingSweepsNotSubmitted.Enqueue(new KeyValuePair(FloatArrayAsParameterSet(newPoint), newPoint)); } return true; } @@ -428,11 +427,11 @@ private ParameterSet[] SubmitMoreReductionPoints(int maxSweeps) return result; } - private Float[] ParameterSetAsFloatArray(ParameterSet parameterSet) + private float[] ParameterSetAsFloatArray(ParameterSet parameterSet) { Contracts.Assert(parameterSet.Count == _sweepParameters.Count); - var result = new List(); + var result = new List(); for (int i = 0; i < _sweepParameters.Count; i++) { Contracts.AssertValue(parameterSet[_sweepParameters[i].Name]); @@ -442,7 +441,7 @@ private Float[] ParameterSetAsFloatArray(ParameterSet parameterSet) return result.ToArray(); } - private ParameterSet FloatArrayAsParameterSet(Float[] array) + private ParameterSet FloatArrayAsParameterSet(float[] array) { Contracts.Assert(array.Length == _sweepParameters.Count); diff --git a/src/Microsoft.ML.Sweeper/Algorithms/SmacSweeper.cs b/src/Microsoft.ML.Sweeper/Algorithms/SmacSweeper.cs index 6053bdc4bd..472ffcd634 100644 --- a/src/Microsoft.ML.Sweeper/Algorithms/SmacSweeper.cs +++ b/src/Microsoft.ML.Sweeper/Algorithms/SmacSweeper.cs @@ -13,7 +13,6 @@ using Microsoft.ML.Sweeper; using Microsoft.ML.Sweeper.Algorithms; using Microsoft.ML.Trainers.FastTree; -using Float = System.Single; [assembly: LoadableClass(typeof(SmacSweeper), typeof(SmacSweeper.Options), typeof(SignatureSweeper), "SMAC Sweeper", "SMACSweeper", "SMAC")] @@ -51,10 +50,10 @@ public sealed class Options public int NumRandomEISearchConfigurations = 10000; [Argument(ArgumentType.LastOccurenceWins, HelpText = "Fraction of eligible dimensions to split on (i.e., split ratio)", ShortName = "sr")] - public Float SplitRatio = (Float)0.8; + public float SplitRatio = (float)0.8; [Argument(ArgumentType.LastOccurenceWins, HelpText = "Epsilon threshold for ending local searches", ShortName = "eps")] - public Float Epsilon = (Float)0.00001; + public float Epsilon = (float)0.00001; [Argument(ArgumentType.LastOccurenceWins, HelpText = "Number of neighbors to sample for locally searching each numerical parameter", ShortName = "nnnp")] public int NumNeighborsForNumericalParams = 4; @@ -119,7 +118,7 @@ private FastForestRegressionModelParameters FitModel(IEnumerable pre foreach (RunResult r in previousRuns) { features[i] = SweeperProbabilityUtils.ParameterSetAsFloatArray(_host, _sweepParameters, r.ParameterSet, true); - targets[i] = (Float)r.MetricValue; + targets[i] = (float)r.MetricValue; i++; } @@ -285,7 +284,7 @@ private ParameterSet[] GetOneMutationNeighborhood(ParameterSet parent) if (parameterDiscrete != null) { // Create one neighbor for every discrete parameter. - Float[] neighbor = SweeperProbabilityUtils.ParameterSetAsFloatArray(_host, _sweepParameters, parent, false); + float[] neighbor = SweeperProbabilityUtils.ParameterSetAsFloatArray(_host, _sweepParameters, parent, false); int hotIndex = -1; for (int j = 0; j < parameterDiscrete.Count; j++) @@ -313,11 +312,11 @@ private ParameterSet[] GetOneMutationNeighborhood(ParameterSet parent) // Create k neighbors (typically 4) for every numerical parameter. for (int j = 0; j < _args.NumNeighborsForNumericalParams; j++) { - Float[] neigh = SweeperProbabilityUtils.ParameterSetAsFloatArray(_host, _sweepParameters, parent, false); + float[] neigh = SweeperProbabilityUtils.ParameterSetAsFloatArray(_host, _sweepParameters, parent, false); double newVal = spu.NormalRVs(1, neigh[i], 0.2)[0]; while (newVal <= 0.0 || newVal >= 1.0) newVal = spu.NormalRVs(1, neigh[i], 0.2)[0]; - neigh[i] = (Float)newVal; + neigh[i] = (float)newVal; ParameterSet neighbor = SweeperProbabilityUtils.FloatArrayAsParameterSet(_host, _sweepParameters, neigh, false); neighbors.Add(neighbor); } @@ -341,9 +340,9 @@ private double[][] GetForestRegressionLeafValues(FastForestRegressionModelParame List leafValues = new List(); foreach (InternalRegressionTree t in e.Trees) { - Float[] transformedParams = SweeperProbabilityUtils.ParameterSetAsFloatArray(_host, _sweepParameters, config, true); - VBuffer features = new VBuffer(transformedParams.Length, transformedParams); - leafValues.Add((Float)t.LeafValues[t.GetLeaf(in features)]); + float[] transformedParams = SweeperProbabilityUtils.ParameterSetAsFloatArray(_host, _sweepParameters, config, true); + VBuffer features = new VBuffer(transformedParams.Length, transformedParams); + leafValues.Add((float)t.LeafValues[t.GetLeaf(in features)]); } datasetLeafValues.Add(leafValues.ToArray()); } @@ -435,7 +434,7 @@ private ParameterSet UpdateParameterSet(ParameterSet original, IParameterValue n return new ParameterSet(parameters); } - private Float ParameterAsFloat(ParameterSet parameterSet, int index) + private float ParameterAsFloat(ParameterSet parameterSet, int index) { _host.Assert(parameterSet.Count == _sweepParameters.Length); _host.Assert(index >= 0 && index <= _sweepParameters.Length); diff --git a/src/Microsoft.ML.Sweeper/ISweeper.cs b/src/Microsoft.ML.Sweeper/ISweeper.cs index f81318fd7f..c40e43a396 100644 --- a/src/Microsoft.ML.Sweeper/ISweeper.cs +++ b/src/Microsoft.ML.Sweeper/ISweeper.cs @@ -7,7 +7,6 @@ using System.Collections.Generic; using System.Linq; using Microsoft.ML.Internal.Utilities; -using Float = System.Single; namespace Microsoft.ML { @@ -272,10 +271,10 @@ IComparable IRunResult.MetricValue /// public sealed class RunMetric { - private readonly Float _primaryMetric; - private readonly Float[] _metricDistribution; + private readonly float _primaryMetric; + private readonly float[] _metricDistribution; - public RunMetric(Float primaryMetric, IEnumerable metricDistribution = null) + public RunMetric(float primaryMetric, IEnumerable metricDistribution = null) { _primaryMetric = primaryMetric; if (metricDistribution != null) @@ -288,7 +287,7 @@ public RunMetric(Float primaryMetric, IEnumerable metricDistribution = nu /// By default, smart sweeping algorithms will maximize this metric. /// If you want to minimize, either negate this value or change the option in the arguments of the sweeper constructor. /// - public Float PrimaryMetric + public float PrimaryMetric { get { return _primaryMetric; } } @@ -297,11 +296,11 @@ public Float PrimaryMetric /// The (optional) distribution of the metric. /// This distribution can be a secondary measure of how good a run was, e.g per-fold AUC, per-fold accuracy, (sampled) per-instance log loss etc. /// - public Float[] GetMetricDistribution() + public float[] GetMetricDistribution() { if (_metricDistribution == null) return null; - var result = new Float[_metricDistribution.Length]; + var result = new float[_metricDistribution.Length]; Array.Copy(_metricDistribution, result, _metricDistribution.Length); return result; } diff --git a/src/Microsoft.ML.Sweeper/Parameters.cs b/src/Microsoft.ML.Sweeper/Parameters.cs index 6f484c3dee..276b0e45cc 100644 --- a/src/Microsoft.ML.Sweeper/Parameters.cs +++ b/src/Microsoft.ML.Sweeper/Parameters.cs @@ -11,12 +11,11 @@ using Microsoft.ML.CommandLine; using Microsoft.ML.Internal.Utilities; using Microsoft.ML.Sweeper; -using Float = System.Single; [assembly: LoadableClass(typeof(LongValueGenerator), typeof(LongParamOptions), typeof(SignatureSweeperParameter), "Long parameter", "lp")] [assembly: LoadableClass(typeof(FloatValueGenerator), typeof(FloatParamOptions), typeof(SignatureSweeperParameter), - "Float parameter", "fp")] + "float parameter", "fp")] [assembly: LoadableClass(typeof(DiscreteValueGenerator), typeof(DiscreteParamOptions), typeof(SignatureSweeperParameter), "Discrete parameter", "dp")] @@ -45,10 +44,10 @@ public abstract class NumericParamOptions : BaseParamOptions public class FloatParamOptions : NumericParamOptions { [Argument(ArgumentType.Required, HelpText = "Minimum value")] - public Float Min; + public float Min; [Argument(ArgumentType.Required, HelpText = "Maximum value")] - public Float Max; + public float Max; } public class LongParamOptions : NumericParamOptions @@ -111,11 +110,11 @@ public override int GetHashCode() } } - public sealed class FloatParameterValue : IParameterValue + public sealed class FloatParameterValue : IParameterValue { private readonly string _name; private readonly string _valueText; - private readonly Float _value; + private readonly float _value; public string Name { @@ -127,14 +126,14 @@ public string ValueText get { return _valueText; } } - public Float Value + public float Value { get { return _value; } } - public FloatParameterValue(string name, Float value) + public FloatParameterValue(string name, float value) { - Contracts.Check(!Float.IsNaN(value)); + Contracts.Check(!float.IsNaN(value)); _name = name; _value = value; _valueText = _value.ToString("R"); @@ -202,7 +201,7 @@ public override int GetHashCode() public interface INumericValueGenerator : IValueGenerator { - Float NormalizeValue(IParameterValue value); + float NormalizeValue(IParameterValue value); bool InRange(IParameterValue value); } @@ -226,7 +225,7 @@ public LongValueGenerator(LongParamOptions options) _options = options; } - // REVIEW: Is Float accurate enough? + // REVIEW: Is float accurate enough? public IParameterValue CreateFromNormalized(Double normalizedValue) { long val; @@ -310,7 +309,7 @@ public int Count } } - public Float NormalizeValue(IParameterValue value) + public float NormalizeValue(IParameterValue value) { var valueTyped = value as LongParameterValue; Contracts.Check(valueTyped != null, "LongValueGenerator could not normalized parameter because it is not of the correct type"); @@ -318,11 +317,11 @@ public Float NormalizeValue(IParameterValue value) if (_options.LogBase) { - Float logBase = (Float)(_options.StepSize ?? Math.Pow(1.0 * _options.Max / _options.Min, 1.0 / (_options.NumSteps - 1))); - return (Float)((Math.Log(valueTyped.Value, logBase) - Math.Log(_options.Min, logBase)) / (Math.Log(_options.Max, logBase) - Math.Log(_options.Min, logBase))); + float logBase = (float)(_options.StepSize ?? Math.Pow(1.0 * _options.Max / _options.Min, 1.0 / (_options.NumSteps - 1))); + return (float)((Math.Log(valueTyped.Value, logBase) - Math.Log(_options.Min, logBase)) / (Math.Log(_options.Max, logBase) - Math.Log(_options.Min, logBase))); } else - return (Float)(valueTyped.Value - _options.Min) / (_options.Max - _options.Min); + return (float)(valueTyped.Value - _options.Min) / (_options.Max - _options.Min); } public bool InRange(IParameterValue value) @@ -358,10 +357,10 @@ public FloatValueGenerator(FloatParamOptions options) _options = options; } - // REVIEW: Is Float accurate enough? + // REVIEW: Is float accurate enough? public IParameterValue CreateFromNormalized(Double normalizedValue) { - Float val; + float val; if (_options.LogBase) { // REVIEW: review the math below, it only works for positive Min and Max @@ -370,10 +369,10 @@ public IParameterValue CreateFromNormalized(Double normalizedValue) : _options.StepSize.Value; var logMax = Math.Log(_options.Max, logBase); var logMin = Math.Log(_options.Min, logBase); - val = (Float)(_options.Min * Math.Pow(logBase, normalizedValue * (logMax - logMin))); + val = (float)(_options.Min * Math.Pow(logBase, normalizedValue * (logMax - logMin))); } else - val = (Float)(_options.Min + normalizedValue * (_options.Max - _options.Min)); + val = (float)(_options.Min + normalizedValue * (_options.Max - _options.Min)); return new FloatParameterValue(_options.Name, val); } @@ -389,11 +388,11 @@ private void EnsureParameterValues() // REVIEW: review the math below, it only works for positive Min and Max var logBase = _options.StepSize ?? Math.Pow(1.0 * _options.Max / _options.Min, 1.0 / (_options.NumSteps - 1)); - Float prevValue = Float.NegativeInfinity; + float prevValue = float.NegativeInfinity; var maxPlusEpsilon = _options.Max * Math.Sqrt(logBase); for (Double value = _options.Min; value <= maxPlusEpsilon; value *= logBase) { - var floatValue = (Float)value; + var floatValue = (float)value; if (floatValue > prevValue) result.Add(new FloatParameterValue(_options.Name, floatValue)); prevValue = floatValue; @@ -402,11 +401,11 @@ private void EnsureParameterValues() else { var stepSize = _options.StepSize ?? (Double)(_options.Max - _options.Min) / (_options.NumSteps - 1); - Float prevValue = Float.NegativeInfinity; + float prevValue = float.NegativeInfinity; var maxPlusEpsilon = _options.Max + stepSize / 2; for (Double value = _options.Min; value <= maxPlusEpsilon; value += stepSize) { - var floatValue = (Float)value; + var floatValue = (float)value; if (floatValue > prevValue) result.Add(new FloatParameterValue(_options.Name, floatValue)); prevValue = floatValue; @@ -434,7 +433,7 @@ public int Count } } - public Float NormalizeValue(IParameterValue value) + public float NormalizeValue(IParameterValue value) { var valueTyped = value as FloatParameterValue; Contracts.Check(valueTyped != null, "FloatValueGenerator could not normalized parameter because it is not of the correct type"); @@ -442,8 +441,8 @@ public Float NormalizeValue(IParameterValue value) if (_options.LogBase) { - Float logBase = (Float)(_options.StepSize ?? Math.Pow(1.0 * _options.Max / _options.Min, 1.0 / (_options.NumSteps - 1))); - return (Float)((Math.Log(valueTyped.Value, logBase) - Math.Log(_options.Min, logBase)) / (Math.Log(_options.Max, logBase) - Math.Log(_options.Min, logBase))); + float logBase = (float)(_options.StepSize ?? Math.Pow(1.0 * _options.Max / _options.Min, 1.0 / (_options.NumSteps - 1))); + return (float)((Math.Log(valueTyped.Value, logBase) - Math.Log(_options.Min, logBase)) / (Math.Log(_options.Max, logBase) - Math.Log(_options.Min, logBase))); } else return (valueTyped.Value - _options.Min) / (_options.Max - _options.Min); @@ -477,7 +476,7 @@ public DiscreteValueGenerator(DiscreteParamOptions options) _options = options; } - // REVIEW: Is Float accurate enough? + // REVIEW: Is float accurate enough? public IParameterValue CreateFromNormalized(Double normalizedValue) { return new StringParameterValue(_options.Name, _options.Values[(int)(_options.Values.Length * normalizedValue)]); @@ -555,7 +554,7 @@ public bool TryParseParameter(string paramValue, Type paramType, string paramNam // Extract the minimum, and the maximum value of the list of suggested sweeps. // Positive lookahead splitting at the '-' character. - // It is used for the Float and Long param types. + // It is used for the float and Long param types. // Example format: "0.02-0.1;steps:5". string[] minMaxRegex = Regex.Split(paramValue, "(?<=[^eE])-"); if (minMaxRegex.Length != 2) @@ -639,9 +638,9 @@ public bool TryParseParameter(string paramValue, Type paramType, string paramNam } else { - Float minF; - Float maxF; - if (!Float.TryParse(minStr, out minF) || !Float.TryParse(maxStr, out maxF)) + float minF; + float maxF; + if (!float.TryParse(minStr, out minF) || !float.TryParse(maxStr, out maxF)) return false; var floatOptions = new FloatParamOptions(); floatOptions.Name = paramName; diff --git a/src/Microsoft.ML.TimeSeries/EigenUtils.cs b/src/Microsoft.ML.TimeSeries/EigenUtils.cs index fc8224b94e..48c211a608 100644 --- a/src/Microsoft.ML.TimeSeries/EigenUtils.cs +++ b/src/Microsoft.ML.TimeSeries/EigenUtils.cs @@ -6,7 +6,6 @@ using System.Runtime.InteropServices; using System.Security; using Microsoft.ML.Internal.Utilities; -using Float = System.Single; namespace Microsoft.ML.Transforms.TimeSeries { @@ -15,21 +14,21 @@ internal static class EigenUtils { //Compute the Eigen-decomposition of a symmetric matrix //REVIEW: use matrix/vector operations, not Array Math - public static void EigenDecomposition(Float[] a, out Float[] eigenvalues, out Float[] eigenvectors) + public static void EigenDecomposition(float[] a, out float[] eigenvalues, out float[] eigenvectors) { var count = a.Length; var n = (int)Math.Sqrt(count); Contracts.Assert(n * n == count); - eigenvectors = new Float[count]; - eigenvalues = new Float[n]; + eigenvectors = new float[count]; + eigenvalues = new float[n]; //Reduce A to tridiagonal form //REVIEW: it's not ideal to keep using the same variable name for different purposes // - After the operation, "eigenvalues" means the diagonal elements of the reduced matrix //and "eigenvectors" means the orthogonal similarity transformation matrix // - Consider aliasing variables - var w = new Float[n]; + var w = new float[n]; Tred(a, eigenvalues, w, eigenvectors, n); //Eigen-decomposition of the tridiagonal matrix @@ -37,10 +36,10 @@ public static void EigenDecomposition(Float[] a, out Float[] eigenvalues, out Fl Imtql(eigenvalues, w, eigenvectors, n); for (int i = 0; i < n; i++) - eigenvalues[i] = eigenvalues[i] <= 0 ? (Float)(0.0) : (Float)Math.Sqrt(eigenvalues[i]); + eigenvalues[i] = eigenvalues[i] <= 0 ? (float)(0.0) : (float)Math.Sqrt(eigenvalues[i]); } - private static Float Hypot(Float x, Float y) + private static float Hypot(float x, float y) { x = Math.Abs(x); y = Math.Abs(y); @@ -51,22 +50,22 @@ private static Float Hypot(Float x, Float y) if (x < y) { double t = x / y; - return y * (Float)Math.Sqrt(1 + t * t); + return y * (float)Math.Sqrt(1 + t * t); } else { double t = y / x; - return x * (Float)Math.Sqrt(1 + t * t); + return x * (float)Math.Sqrt(1 + t * t); } } - private static Float CopySign(Float x, Float y) + private static float CopySign(float x, float y) { - Float xx = Math.Abs(x); + float xx = Math.Abs(x); return y < 0 ? -xx : xx; } - private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) + private static void Tred(float[] a, float[] d, float[] e, float[] z, int n) { float g; float h; @@ -75,7 +74,7 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) int k; int l; - /* this subroutine reduces a Float symmetric matrix to a */ + /* this subroutine reduces a float symmetric matrix to a */ /* symmetric tridiagonal matrix using and accumulating */ /* orthogonal similarity transformations. */ @@ -83,7 +82,7 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) /* n is the order of the matrix. */ - /* a contains the Float symmetric input matrix. only the */ + /* a contains the float symmetric input matrix. only the */ /* lower triangle of the matrix need be supplied. */ /* on output */ @@ -120,7 +119,7 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) { l = i - 1; h = 0; - Float scale = 0; + float scale = 0; if (l == 0) { e[1] = d[0]; @@ -156,8 +155,8 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) h += d[k] * d[k]; } - Float f = d[l]; - g = CopySign((Float)Math.Sqrt(h), f); + float f = d[l]; + g = CopySign((float)Math.Sqrt(h), f); e[i] = scale * g; h -= f * g; d[l] = f - g; @@ -195,7 +194,7 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) f += e[j] * d[j]; } - Float hh = f / (h + h); + float hh = f / (h + h); // .......... form q .......... for (j = 0; j < i; ++j) { @@ -266,7 +265,7 @@ private static void Tred(Float[] a, Float[] d, Float[] e, Float[] z, int n) } /* Tred */ /* Subroutine */ - private static int Imtql(Float[] d, Float[] e, Float[] z, int n) + private static int Imtql(float[] d, float[] e, float[] z, int n) { /* Local variables */ double b; @@ -347,7 +346,7 @@ private static int Imtql(Float[] d, Float[] e, Float[] z, int n) { e[i - 1] = e[i]; } - e[n - 1] = (Float)(0.0); + e[n - 1] = (float)(0.0); for (l = 0; l < n; ++l) { @@ -370,12 +369,12 @@ private static int Imtql(Float[] d, Float[] e, Float[] z, int n) return l; } /* .......... form shift .......... */ - g = (d[l + 1] - p) / (e[l] * (Float)(2.0)); - r = Hypot((float)g, (Float)(1.0)); + g = (d[l + 1] - p) / (e[l] * (float)(2.0)); + r = Hypot((float)g, (float)(1.0)); g = d[m] - p + e[l] / (g + CopySign((float)r, (float)g)); - s = (Float)(1.0); - c = (Float)(1.0); - p = (Float)(0.0); + s = (float)(1.0); + c = (float)(1.0); + p = (float)(0.0); /* .......... for i=m-1 step -1 until l do -- .......... */ for (i = m - 1; i >= l; i--) { @@ -383,7 +382,7 @@ private static int Imtql(Float[] d, Float[] e, Float[] z, int n) b = c * e[i]; r = Hypot((float)f, (float)g); e[i + 1] = (float)r; - if (r == (Float)(0.0)) + if (r == (float)(0.0)) { /* .......... recover from underflow .......... */ d[i + 1] -= (float)p; @@ -393,7 +392,7 @@ private static int Imtql(Float[] d, Float[] e, Float[] z, int n) s = f / r; c = g / r; g = d[i + 1] - p; - r = (d[i] - g) * s + c * (Float)(2.0) * b; + r = (d[i] - g) * s + c * (float)(2.0) * b; p = s * r; d[i + 1] = (float)(g + p); g = c * r - b; @@ -405,11 +404,11 @@ private static int Imtql(Float[] d, Float[] e, Float[] z, int n) z[k + i * n] = (float)(c * z[k + i * n] - s * f); } } - if (r == (Float)(0.0) && i >= l) + if (r == (float)(0.0) && i >= l) continue; d[l] -= (float)p; e[l] = (float)g; - e[m] = (Float)(0.0); + e[m] = (float)(0.0); } } while (m != l); } diff --git a/src/Microsoft.ML.Transforms/MissingValueIndicatorTransform.cs b/src/Microsoft.ML.Transforms/MissingValueIndicatorTransform.cs index 3ce1e3b6cb..6a352cf6a1 100644 --- a/src/Microsoft.ML.Transforms/MissingValueIndicatorTransform.cs +++ b/src/Microsoft.ML.Transforms/MissingValueIndicatorTransform.cs @@ -11,7 +11,6 @@ using Microsoft.ML.Internal.Utilities; using Microsoft.ML.Model; using Microsoft.ML.Transforms; -using Float = System.Single; [assembly: LoadableClass(typeof(MissingValueIndicatorTransform), typeof(MissingValueIndicatorTransform.Arguments), typeof(SignatureDataTransform), "", "MissingValueIndicatorTransform", "MissingValueTransform", "MissingTransform", "Missing")] @@ -111,7 +110,7 @@ public static MissingValueIndicatorTransform Create(IHostEnvironment env, ModelL // int: sizeof(Float) // int cbFloat = ctx.Reader.ReadInt32(); - ch.CheckDecode(cbFloat == sizeof(Float)); + ch.CheckDecode(cbFloat == sizeof(float)); return new MissingValueIndicatorTransform(h, ctx, input); }); } @@ -125,7 +124,7 @@ private protected override void SaveModel(ModelSaveContext ctx) // *** Binary format *** // int: sizeof(Float) // - ctx.Writer.Write(sizeof(Float)); + ctx.Writer.Write(sizeof(float)); SaveBase(ctx); } @@ -243,12 +242,12 @@ protected override Delegate GetGetterCore(IChannel ch, DataViewRow input, int ii Host.Assert(0 <= iinfo && iinfo < Infos.Length); disposer = null; - ValueGetter> del; + ValueGetter> del; if (Infos[iinfo].TypeSrc is VectorType) { - var getSrc = GetSrcGetter>(input, iinfo); + var getSrc = GetSrcGetter>(input, iinfo); del = - (ref VBuffer dst) => + (ref VBuffer dst) => { getSrc(ref dst); FillValues(Host, ref dst); @@ -256,11 +255,11 @@ protected override Delegate GetGetterCore(IChannel ch, DataViewRow input, int ii } else { - var getSrc = GetSrcGetter(input, iinfo); + var getSrc = GetSrcGetter(input, iinfo); del = - (ref VBuffer dst) => + (ref VBuffer dst) => { - var src = default(Float); + var src = default(float); getSrc(ref src); FillValues(src, ref dst); Host.Assert(dst.Length == 2); @@ -269,7 +268,7 @@ protected override Delegate GetGetterCore(IChannel ch, DataViewRow input, int ii return del; } - private static void FillValues(Float input, ref VBuffer result) + private static void FillValues(float input, ref VBuffer result) { if (input == 0) { @@ -278,7 +277,7 @@ private static void FillValues(Float input, ref VBuffer result) } var editor = VBufferEditor.Create(ref result, 2, 1); - if (Float.IsNaN(input)) + if (float.IsNaN(input)) { editor.Values[0] = 1; editor.Indices[0] = 1; @@ -293,7 +292,7 @@ private static void FillValues(Float input, ref VBuffer result) } // This converts in place. - private static void FillValues(IExceptionContext ectx, ref VBuffer buffer) + private static void FillValues(IExceptionContext ectx, ref VBuffer buffer) { int size = buffer.Length; ectx.Check(0 <= size & size < int.MaxValue / 2); @@ -311,7 +310,7 @@ private static void FillValues(IExceptionContext ectx, ref VBuffer buffer var val = values[ivSrc]; if (val == 0) continue; - if (Float.IsNaN(val)) + if (float.IsNaN(val)) { editor.Values[iivDst] = 1; editor.Indices[iivDst] = 2 * ivSrc + 1; @@ -339,7 +338,7 @@ private static void FillValues(IExceptionContext ectx, ref VBuffer buffer int iv = indices[iivSrc]; ectx.Assert(ivPrev < iv & iv < size); ivPrev = iv; - if (Float.IsNaN(val)) + if (float.IsNaN(val)) { editor.Values[iivDst] = 1; editor.Indices[iivDst] = 2 * iv + 1; diff --git a/src/Microsoft.ML.Transforms/Text/NgramUtils.cs b/src/Microsoft.ML.Transforms/Text/NgramUtils.cs index 4614240942..cf3f0f3d09 100644 --- a/src/Microsoft.ML.Transforms/Text/NgramUtils.cs +++ b/src/Microsoft.ML.Transforms/Text/NgramUtils.cs @@ -4,7 +4,6 @@ using System; using Microsoft.ML.Internal.Utilities; -using Float = System.Single; namespace Microsoft.ML.Data { @@ -26,7 +25,7 @@ namespace Microsoft.ML.Data internal sealed class NgramBufferBuilder { // This buffer builder maintains the vector of ngram-counts. - private readonly BufferBuilder _bldr; + private readonly BufferBuilder _bldr; // A queue that holds _ngramLength+_skipLength keys, so that it contains all the ngrams starting with the // first key in the ngram. private readonly FixedSizeQueue _queue; @@ -57,7 +56,7 @@ public NgramBufferBuilder(int ngramLength, int skipLength, int slotLim, NgramIdF _ngram = new uint[_ngramLength]; _queue = new FixedSizeQueue(_ngramLength + _skipLength); - _bldr = BufferBuilder.CreateDefault(); + _bldr = BufferBuilder.CreateDefault(); _finder = finder; } @@ -130,7 +129,7 @@ public bool AddNgrams(in VBuffer src, int icol, uint keyMax) return true; } - public void GetResult(ref VBuffer dst) + public void GetResult(ref VBuffer dst) { _bldr.GetResult(ref dst); } diff --git a/test/Microsoft.ML.Predictor.Tests/TestPredictors.cs b/test/Microsoft.ML.Predictor.Tests/TestPredictors.cs index 1493327d20..eabd4c5a15 100644 --- a/test/Microsoft.ML.Predictor.Tests/TestPredictors.cs +++ b/test/Microsoft.ML.Predictor.Tests/TestPredictors.cs @@ -6,7 +6,6 @@ using System.Collections.Generic; using System.IO; using Microsoft.ML.TestFramework.Attributes; -using Float = System.Single; namespace Microsoft.ML.RunTests { @@ -20,7 +19,6 @@ namespace Microsoft.ML.RunTests using Microsoft.ML.LightGBM; using Microsoft.ML.TestFramework; using Microsoft.ML.Trainers; - using Microsoft.ML.Trainers.Ensemble; using Microsoft.ML.Trainers.FastTree; using Microsoft.ML.Trainers.HalLearners; using Microsoft.ML.Trainers.Online; @@ -1199,8 +1197,8 @@ public void RegressorSdcaTest() public void RegressorSyntheticOlsTest() { const int featureCount = 15; - const Float scale = 2; - Float[] model = new Float[featureCount + 1]; + const float scale = 2; + float[] model = new float[featureCount + 1]; Random rgen = new Random(0); for (int i = 0; i < model.Length; ++i) model[i] = scale * (2 * rgen.NextFloat() - 1); @@ -1208,12 +1206,12 @@ public void RegressorSyntheticOlsTest() ListInstances instances = new ListInstances(); for (int id = 0; id < 10 * model.Length; ++id) { - Float label = model[featureCount]; + float label = model[featureCount]; WritableVector vec; if (rgen.Next(2) == 1) { // Dense - Float[] features = new Float[featureCount]; + float[] features = new float[featureCount]; for (int i = 0; i < features.Length; ++i) label += model[i] * (features[i] = scale * (2 * rgen.NextFloat() - 1)); vec = WritableVector.CreateDense(features, false); @@ -1223,7 +1221,7 @@ public void RegressorSyntheticOlsTest() // Sparse int entryCount = rgen.Next(featureCount); int[] indices = Utils.GetRandomPermutation(rgen, featureCount).Take(entryCount).OrderBy(x => x).ToArray(); - Float[] features = new Float[indices.Length]; + float[] features = new float[indices.Length]; for (int ii = 0; ii < indices.Length; ++ii) label += model[indices[ii]] * (features[ii] = scale * (2 * rgen.NextFloat() - 1)); vec = WritableVector.CreateSparse(featureCount, indices, features, false); @@ -1255,7 +1253,7 @@ public void RegressorSyntheticOlsTest() Assert.AreEqual(inst.Label, pred.Predict(inst), tol, "Mismatch on example id {0}", inst.Id); } - Float finalNorm; + float finalNorm; { // Overdetermined but still exact case. Log("Train using more examples with non-noised label, so we have an exact solution, and statistics."); @@ -1296,9 +1294,9 @@ public void RegressorSyntheticOlsTest() Assert.AreEqual(w1[i], w2[i]); } - Float[] regularizationParams = new Float[] { 0, (Float)0.01, (Float)0.1 }; + float[] regularizationParams = new float[] { 0, (float)0.01, (float)0.1 }; - foreach (Float regParam in regularizationParams) + foreach (float regParam in regularizationParams) { foreach (bool subdefined in new bool[] { true, false }) { @@ -1306,12 +1304,12 @@ public void RegressorSyntheticOlsTest() Log(""); Log("Train using noised label, reg param {0}, so solution is no longer exact", regParam); ListInstances noisyInstances = new ListInstances(); - Float boundCost = 0; + float boundCost = 0; foreach (Instance inst in instances) { // When we noise the label, we do it on an appreciable but still relatively small scale, // compared to the regular distribution of the labels. - Float diff = scale * (2 * rgen.NextFloat() - 1) / 3; + float diff = scale * (2 * rgen.NextFloat() - 1) / 3; boundCost += diff * diff; noisyInstances.Add(new Instance(inst.Features, inst.Label + diff, inst.Name, false) { Id = inst.Id }); // Make sure this solver also works, when we have @@ -1348,30 +1346,30 @@ public void RegressorSyntheticOlsTest() Assert.AreEqual(featureCount, pred.InputType.VectorSize, "Unexpected input size"); Assert.IsTrue(0 <= pred.RSquared && pred.RSquared < 1, "R-squared not in expected range"); - Func, Float> getError = p => + Func, float> getError = p => noisyInstances.Select(inst => inst.Label - p(inst)).Sum(e => e * e); // In principle there should be no "better" solution with a lower L2 weight. Wiggle the parameters // with a finite difference, and evaluate the change in error. var referenceNorm = pred.Weights.Sum(x => x * x); - Float referenceError = getError(pred.Predict); - Float referenceCost = referenceError + regParam2 * referenceNorm; - Float smoothing = (Float)(referenceCost * 5e-6); + float referenceError = getError(pred.Predict); + float referenceCost = referenceError + regParam2 * referenceNorm; + float smoothing = (float)(referenceCost * 5e-6); Log("Reference cost is {0} + {1} * {2} = {3}, upper bound was {4}", referenceError, regParam2, referenceNorm, referenceCost, boundCost); Assert.IsTrue(boundCost > referenceCost, "Reference cost {0} was above theoretical upper bound {1}", referenceCost, boundCost); - Float lastCost = 0; + float lastCost = 0; var weights = pred.Weights.Sum(x => x * x); for (int trial = 0; trial < model.Length * 2; ++trial) { int param = trial / 2; bool up = (trial & 1) == 1; - Float[] w = pred.Weights.ToArray(); + float[] w = pred.Weights.ToArray(); Assert.AreEqual(featureCount, w.Length); - Float b = pred.Bias; + float b = pred.Bias; bool isBias = param == featureCount; - Float normDelta; - Float origValue; - Float newValue; + float normDelta; + float origValue; + float newValue; if (isBias) { origValue = OlsWiggle(ref b, out normDelta, up); @@ -1384,18 +1382,18 @@ public void RegressorSyntheticOlsTest() origValue = OlsWiggle(ref w[param], out normDelta, up); newValue = w[param]; } - Func del = inst => b + inst.Features.AllValues.Select((v, i) => w[i] * v).Sum(); - Float wiggledCost = getError(del) + regParam2 * (referenceNorm + normDelta); + Func del = inst => b + inst.Features.AllValues.Select((v, i) => w[i] * v).Sum(); + float wiggledCost = getError(del) + regParam2 * (referenceNorm + normDelta); string desc = string.Format("after wiggling {0} {1} from {2} to {3}", isBias ? "bias" : string.Format("weight[{0}]", param), up ? "up" : "down", origValue, newValue); Log("Finite difference cost is {0} ({1}), {2}", wiggledCost, wiggledCost - referenceCost, desc); - Assert.IsTrue(wiggledCost > referenceCost * (Float)(1 - 5e-7), "Finite difference cost {0} not higher than reference cost {1}, {2}", + Assert.IsTrue(wiggledCost > referenceCost * (float)(1 - 5e-7), "Finite difference cost {0} not higher than reference cost {1}, {2}", wiggledCost, referenceCost, desc); if (up) { // If the solution to the problem really does like at the base of the quadratic, then wiggling // equal amounts up and down should lead to *roughly* the same error. - Float ratio = 1 - (lastCost - referenceCost + smoothing) / (wiggledCost - referenceCost + smoothing); + float ratio = 1 - (lastCost - referenceCost + smoothing) / (wiggledCost - referenceCost + smoothing); Log("Wiggled up had a relative difference of {0:0.0%} vs. wiggled down", ratio); Assert.IsTrue(0.1 > Math.Abs(ratio), "Ratio {0} of up/down too high, {1}", ratio, desc); } @@ -1407,10 +1405,10 @@ public void RegressorSyntheticOlsTest() Done(); } - private Float OlsWiggle(ref Float value, out Float deltaNorm, bool up) + private float OlsWiggle(ref float value, out float deltaNorm, bool up) { - Float origValue = value; - Float wiggle = (Float)Math.Max(1e-7, Math.Abs(1e-3 * value)); + float origValue = value; + float wiggle = (float)Math.Max(1e-7, Math.Abs(1e-3 * value)); value += up ? wiggle : -wiggle; deltaNorm = value * value - origValue * origValue; return origValue; @@ -1435,8 +1433,8 @@ public void RegressorSyntheticDuplicatedOlsTest() // OLS should result in the same predictor if we just simply duplicate data. // Make certain that ridge regression works. const int featureCount = 10; - const Float scale = 2; - Float[] model = new Float[featureCount + 1]; + const float scale = 2; + float[] model = new float[featureCount + 1]; Random rgen = new Random(1); for (int i = 0; i < model.Length; ++i) model[i] = scale * (2 * rgen.NextFloat() - 1); @@ -1444,12 +1442,12 @@ public void RegressorSyntheticDuplicatedOlsTest() ListInstances instances = new ListInstances(); for (int id = 0; id < 2 * model.Length; ++id) { - Float label = model[featureCount]; + float label = model[featureCount]; WritableVector vec; if (rgen.Next(2) == 1) { // Dense - Float[] features = new Float[featureCount]; + float[] features = new float[featureCount]; for (int i = 0; i < features.Length; ++i) label += model[i] * (features[i] = scale * (2 * rgen.NextFloat() - 1)); vec = WritableVector.CreateDense(features, false); @@ -1459,12 +1457,12 @@ public void RegressorSyntheticDuplicatedOlsTest() // Sparse int entryCount = rgen.Next(featureCount); int[] indices = Utils.GetRandomPermutation(rgen, featureCount).Take(entryCount).OrderBy(x => x).ToArray(); - Float[] features = new Float[indices.Length]; + float[] features = new float[indices.Length]; for (int ii = 0; ii < indices.Length; ++ii) label += model[indices[ii]] * (features[ii] = scale * (2 * rgen.NextFloat() - 1)); vec = WritableVector.CreateSparse(featureCount, indices, features, false); } - Float diff = scale * (2 * rgen.NextFloat() - 1) / 5; + float diff = scale * (2 * rgen.NextFloat() - 1) / 5; instances.Add(new Instance(vec, label + diff, "", false) { Id = id }); } @@ -1475,7 +1473,7 @@ public void RegressorSyntheticDuplicatedOlsTest() instances2.Add(new Instance(inst.Features, inst.Label, inst.Name, false) { Id = 2 * inst.Id + 1 }); } OlsLinearRegressionTrainer.OldArguments args = new OlsLinearRegressionTrainer.OldArguments(); - args.l2Weight = (Float)1; + args.l2Weight = (float)1; TrainHost host = new TrainHost(new Random(0)); var trainer = new OlsLinearRegressionTrainer(args, host); trainer.Train(instances); @@ -1854,23 +1852,23 @@ private void CompareSvmToLibSvmCore(string kernelType, string kernel, IHostEnvir trainer2.TrainCore(trainData, out predictor2); LibSvmInterface.ChangeSvmType(predictor2, 4); - var predictions1 = new List(); - var predictions2 = new List(); + var predictions1 = new List(); + var predictions2 = new List(); int instanceNum = 0; int colFeat = testData.Schema.Feature.Index; using (var cursor = testData.Data.GetRowCursor(col => col == colFeat)) { - Float res1 = 0; - var buf = default(VBuffer); - var getter = cursor.GetGetter>(colFeat); - var map1 = predictor1.GetMapper, Float>(); + float res1 = 0; + var buf = default(VBuffer); + var getter = cursor.GetGetter>(colFeat); + var map1 = predictor1.GetMapper, float>(); while (cursor.MoveNext()) { getter(ref buf); map1(ref buf, ref res1); - Float res2; + float res2; unsafe { if (buf.IsDense) @@ -1908,22 +1906,22 @@ private void CompareSvmToLibSvmCore(string kernelType, string kernel, IHostEnvir } } #endif - private bool IsLessThanOrEqual(Float a, Float b, Float maxRelError, Float maxAbsError) + private bool IsLessThanOrEqual(float a, float b, float maxRelError, float maxAbsError) { if (a <= b) return true; - Float diff = a - b; + float diff = a - b; if (diff <= maxAbsError) return true; return diff <= maxRelError * a; } - private bool AreEqual(Float a, Float b, Float maxRelError, Float maxAbsError) + private bool AreEqual(float a, float b, float maxRelError, float maxAbsError) { - Float diff = Math.Abs(a - b); + float diff = Math.Abs(a - b); if (diff <= maxAbsError) return true; - Float largest = Math.Max(Math.Abs(a), Math.Abs(b)); + float largest = Math.Max(Math.Abs(a), Math.Abs(b)); return diff < largest * maxRelError; } } @@ -1981,11 +1979,11 @@ public void StreamingTimeSeriesAnomalyDetectorTest() var dataset = TestDatasets.AppFailure; var instances = new TlcTextInstances(instArgs, GetDataPath(dataset.trainFilename)); - var predictor = new OLSAnomalyDetector(45, (Float)0.1); + var predictor = new OLSAnomalyDetector(45, (float)0.1); var sb = new StringBuilder().AppendLine("Instance\tAnomaly Score\tBad anomaly?"); foreach (var instance in instances) { - Float score, trend; + float score, trend; if (predictor.Classify(instance.Features[0], out score, out trend)) sb.AppendFormat("{0}\t{1:G4}\t{2}", instance.Name, score, trend > 0).AppendLine(); // trigger alert } diff --git a/test/Microsoft.ML.TestFramework/DataPipe/TestDataPipe.cs b/test/Microsoft.ML.TestFramework/DataPipe/TestDataPipe.cs index ffe85c392a..def5caa3e5 100644 --- a/test/Microsoft.ML.TestFramework/DataPipe/TestDataPipe.cs +++ b/test/Microsoft.ML.TestFramework/DataPipe/TestDataPipe.cs @@ -9,17 +9,14 @@ using Microsoft.ML.CommandLine; using Microsoft.ML.Data; using Microsoft.ML.Internal.Utilities; -using Microsoft.ML.Transforms; using Microsoft.ML.Transforms.Conversions; -using Microsoft.ML.Transforms.Text; using Xunit; -using Float = System.Single; namespace Microsoft.ML.RunTests { public sealed partial class TestDataPipe : TestDataPipeBase { - private static Float[] dataFloat = new Float[] { -0.0f, 0, 1, -1, 2, -2, Single.NaN, Single.MinValue, + private static float[] dataFloat = new float[] { -0.0f, 0, 1, -1, 2, -2, Single.NaN, Single.MinValue, Single.MaxValue, Single.Epsilon, Single.NegativeInfinity, Single.PositiveInfinity }; private static uint[] resultsFloat = new uint[] { 21, 21, 16, 16, 31, 17, 0, 23, 24, 15, 10, 7 }; @@ -215,18 +212,18 @@ public void SavePipeKeyToVec() using (var c = pipe.GetRowCursorForAllColumns()) { var cols = new[] { "MarVec", "MarVecU8", "CombBagVec", "CombBagVecU1", "CombIndVec", "CombIndVecU1" }; - var getters = new ValueGetter>[cols.Length]; + var getters = new ValueGetter>[cols.Length]; for (int i = 0; i < cols.Length; i++) { int col; if (!Check(c.Schema.TryGetColumnIndex(cols[i], out col), "{0} not found!", cols[i])) return; - getters[i] = c.GetGetter>(col); + getters[i] = c.GetGetter>(col); } - Func fn = (x, y) => FloatUtils.GetBits(x) == FloatUtils.GetBits(y); - var v1 = default(VBuffer); - var v2 = default(VBuffer); + Func fn = (x, y) => FloatUtils.GetBits(x) == FloatUtils.GetBits(y); + var v1 = default(VBuffer); + var v2 = default(VBuffer); while (c.MoveNext()) { for (int i = 0; i < cols.Length; i += 2) @@ -583,10 +580,10 @@ public void SavePipeWordHash() if (!Check(tmp2, "Column F23 not found!")) return; - var get1 = c.GetGetter>(col1); - var get2 = c.GetGetter>(col2); - VBuffer bag1 = default; - VBuffer bag2 = default; + var get1 = c.GetGetter>(col1); + var get2 = c.GetGetter>(col2); + VBuffer bag1 = default; + VBuffer bag2 = default; while (c.MoveNext()) { get1(ref bag1); @@ -627,10 +624,10 @@ public void SavePipeWordHashUnordered() if (!Check(tmp2, "Column F2 not found!")) return; - var get1 = c.GetGetter>(col1); - var get2 = c.GetGetter>(col2); - VBuffer bag1 = default; - VBuffer bag2 = default; + var get1 = c.GetGetter>(col1); + var get2 = c.GetGetter>(col2); + VBuffer bag1 = default; + VBuffer bag2 = default; while (c.MoveNext()) { get1(ref bag1); @@ -772,16 +769,16 @@ public void SavePipeWordBagManyToOne() // Verify that WB2 = 2 * WB1 using (var c = pipe.GetRowCursorForAllColumns()) { - var b1 = default(VBuffer); - var b2 = default(VBuffer); + var b1 = default(VBuffer); + var b2 = default(VBuffer); int col1, col2; if (!c.Schema.TryGetColumnIndex("WB1", out col1) || !c.Schema.TryGetColumnIndex("WB2", out col2)) { Fail("Did not find expected columns"); return; } - var get1 = c.GetGetter>(col1); - var get2 = c.GetGetter>(col2); + var get1 = c.GetGetter>(col1); + var get2 = c.GetGetter>(col2); while (c.MoveNext()) { get1(ref b1); @@ -1158,9 +1155,9 @@ public void ArrayDataViewBuilder() ArrayDataViewBuilder builder = new ArrayDataViewBuilder(Env); const int rows = 100; Random rgen = new Random(0); - Float[] values = new Float[rows]; + float[] values = new float[rows]; for (int i = 0; i < values.Length; ++i) - values[i] = (Float)(2 * rgen.NextDouble() - 1); + values[i] = (float)(2 * rgen.NextDouble() - 1); builder.AddColumn("Foo", NumberDataViewType.Single, values); int[][] barValues = new int[rows][]; @@ -1198,10 +1195,10 @@ public void ArrayDataViewBuilder() using (DataViewRowCursor cursor = view.GetRowCursorForAllColumns()) { - var del = cursor.GetGetter(0); + var del = cursor.GetGetter(0); var del2 = cursor.GetGetter>(1); var del3 = cursor.GetGetter(2); - Float value = 0; + float value = 0; VBuffer value2 = default(VBuffer); bool value3 = default(bool); int row = 0; @@ -1316,9 +1313,9 @@ public void TestLDATransform() var builder = new ArrayDataViewBuilder(Env); var data = new[] { - new[] { (Float)1.0, (Float)0.0, (Float)0.0 }, - new[] { (Float)0.0, (Float)1.0, (Float)0.0 }, - new[] { (Float)0.0, (Float)0.0, (Float)1.0 }, + new[] { (float)1.0, (float)0.0, (float)0.0 }, + new[] { (float)0.0, (float)1.0, (float)0.0 }, + new[] { (float)0.0, (float)0.0, (float)1.0 }, }; builder.AddColumn("F1V", NumberDataViewType.Single, data); @@ -1330,10 +1327,10 @@ public void TestLDATransform() using (var cursor = transformedData.GetRowCursorForAllColumns()) { - var resultGetter = cursor.GetGetter>(1); - VBuffer resultFirstRow = new VBuffer(); - VBuffer resultSecondRow = new VBuffer(); - VBuffer resultThirdRow = new VBuffer(); + var resultGetter = cursor.GetGetter>(1); + VBuffer resultFirstRow = new VBuffer(); + VBuffer resultSecondRow = new VBuffer(); + VBuffer resultThirdRow = new VBuffer(); Assert.True(cursor.MoveNext()); resultGetter(ref resultFirstRow); @@ -1365,9 +1362,9 @@ public void TestLdaTransformerEmptyDocumentException() string colName = "Zeros"; var data = new[] { - new[] { (Float)0.0, (Float)0.0, (Float)0.0 }, - new[] { (Float)0.0, (Float)0.0, (Float)0.0 }, - new[] { (Float)0.0, (Float)0.0, (Float)0.0 }, + new[] { (float)0.0, (float)0.0, (float)0.0 }, + new[] { (float)0.0, (float)0.0, (float)0.0 }, + new[] { (float)0.0, (float)0.0, (float)0.0 }, }; builder.AddColumn(colName, NumberDataViewType.Single, data); From ee25f1487a55f6f27f16ae47006434b15bbf01da Mon Sep 17 00:00:00 2001 From: J W Date: Wed, 27 Feb 2019 13:37:42 -0500 Subject: [PATCH 6/6] Fix PR commens --- src/Microsoft.ML.Sweeper/Parameters.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Microsoft.ML.Sweeper/Parameters.cs b/src/Microsoft.ML.Sweeper/Parameters.cs index 276b0e45cc..e0c95dab12 100644 --- a/src/Microsoft.ML.Sweeper/Parameters.cs +++ b/src/Microsoft.ML.Sweeper/Parameters.cs @@ -15,7 +15,7 @@ [assembly: LoadableClass(typeof(LongValueGenerator), typeof(LongParamOptions), typeof(SignatureSweeperParameter), "Long parameter", "lp")] [assembly: LoadableClass(typeof(FloatValueGenerator), typeof(FloatParamOptions), typeof(SignatureSweeperParameter), - "float parameter", "fp")] + "Float parameter", "fp")] [assembly: LoadableClass(typeof(DiscreteValueGenerator), typeof(DiscreteParamOptions), typeof(SignatureSweeperParameter), "Discrete parameter", "dp")] @@ -554,7 +554,7 @@ public bool TryParseParameter(string paramValue, Type paramType, string paramNam // Extract the minimum, and the maximum value of the list of suggested sweeps. // Positive lookahead splitting at the '-' character. - // It is used for the float and Long param types. + // It is used for the Float and Long param types. // Example format: "0.02-0.1;steps:5". string[] minMaxRegex = Regex.Split(paramValue, "(?<=[^eE])-"); if (minMaxRegex.Length != 2)