diff --git a/src/Microsoft.ML.Core/Data/VBuffer.cs b/src/Microsoft.ML.Core/Data/VBuffer.cs index 133bf93e4b..6560d82a55 100644 --- a/src/Microsoft.ML.Core/Data/VBuffer.cs +++ b/src/Microsoft.ML.Core/Data/VBuffer.cs @@ -14,7 +14,7 @@ namespace Microsoft.ML.Runtime.Data /// is passed to a row cursor getter, the callee is free to take ownership of /// and re-use the arrays (Values and Indices). /// - public struct VBuffer + public readonly struct VBuffer { /// /// The logical length of the buffer. @@ -424,11 +424,6 @@ public static void Copy(T[] src, int srcIndex, ref VBuffer dst, int length) dst = new VBuffer(length, values, dst.Indices); } - public static void Copy(ref VBuffer src, ref VBuffer dst) - { - src.CopyTo(ref dst); - } - public IEnumerable> Items(bool all = false) { return VBufferUtils.Items(Values, Indices, Length, Count, all); diff --git a/src/Microsoft.ML.Core/Prediction/ITree.cs b/src/Microsoft.ML.Core/Prediction/ITree.cs index 971407d3fb..9014eadd68 100644 --- a/src/Microsoft.ML.Core/Prediction/ITree.cs +++ b/src/Microsoft.ML.Core/Prediction/ITree.cs @@ -71,7 +71,7 @@ public interface ITree : ITree /// Type of features container (instance) on which to make predictions /// node id /// Type of features container (instance) on which to make predictions - int GetLeaf(ref TFeatures features); + int GetLeaf(in TFeatures features); } /// diff --git a/src/Microsoft.ML.Core/Utilities/VBufferUtils.cs b/src/Microsoft.ML.Core/Utilities/VBufferUtils.cs index ca360b669e..8b3bfb2330 100644 --- a/src/Microsoft.ML.Core/Utilities/VBufferUtils.cs +++ b/src/Microsoft.ML.Core/Utilities/VBufferUtils.cs @@ -91,7 +91,7 @@ internal static IEnumerable DenseValues(T[] values, int[] indices, int len } } - public static bool HasNaNs(ref VBuffer buffer) + public static bool HasNaNs(in VBuffer buffer) { for (int i = 0; i < buffer.Count; i++) { @@ -101,7 +101,7 @@ public static bool HasNaNs(ref VBuffer buffer) return false; } - public static bool HasNaNs(ref VBuffer buffer) + public static bool HasNaNs(in VBuffer buffer) { for (int i = 0; i < buffer.Count; i++) { @@ -111,7 +111,7 @@ public static bool HasNaNs(ref VBuffer buffer) return false; } - public static bool HasNonFinite(ref VBuffer buffer) + public static bool HasNonFinite(in VBuffer buffer) { for (int i = 0; i < buffer.Count; i++) { @@ -121,7 +121,7 @@ public static bool HasNonFinite(ref VBuffer buffer) return false; } - public static bool HasNonFinite(ref VBuffer buffer) + public static bool HasNonFinite(in VBuffer buffer) { for (int i = 0; i < buffer.Count; i++) { @@ -147,7 +147,7 @@ public static VBuffer CreateDense(int length) /// Applies to every explicitly defined element of the vector, /// in order of index. /// - public static void ForEachDefined(ref VBuffer a, Action visitor) + public static void ForEachDefined(in VBuffer a, Action visitor) { Contracts.CheckValue(visitor, nameof(visitor)); @@ -175,7 +175,7 @@ public static void ForEachDefined(ref VBuffer a, Action visitor) /// The second vector /// Delegate to apply to each pair of non-zero values. /// This is passed the index, and two values - public static void ForEachBothDefined(ref VBuffer a, ref VBuffer b, Action visitor) + public static void ForEachBothDefined(in VBuffer a, in VBuffer b, Action visitor) { Contracts.Check(a.Length == b.Length, "Vectors must have the same dimensionality."); Contracts.CheckValue(visitor, nameof(visitor)); @@ -220,7 +220,7 @@ public static void ForEachBothDefined(ref VBuffer a, ref VBuffer b, Act /// a vector /// another vector /// Function to apply to each pair of non-zero values - passed the index, and two values - public static void ForEachEitherDefined(ref VBuffer a, ref VBuffer b, Action visitor) + public static void ForEachEitherDefined(in VBuffer a, in VBuffer b, Action visitor) { Contracts.Check(a.Length == b.Length, "Vectors must have the same dimensionality."); Contracts.CheckValue(visitor, nameof(visitor)); @@ -492,7 +492,7 @@ public static void DensifyFirst(ref VBuffer dst, int denseCount) /// Creates a maybe sparse copy of a VBuffer. /// Whether the created copy is sparse or not is determined by the proportion of non-default entries compared to the sparsity parameter. /// - public static void CreateMaybeSparseCopy(ref VBuffer src, ref VBuffer dst, InPredicate isDefaultPredicate, float sparsityThreshold = SparsityThreshold) + public static void CreateMaybeSparseCopy(in VBuffer src, ref VBuffer dst, InPredicate isDefaultPredicate, float sparsityThreshold = SparsityThreshold) { Contracts.CheckParam(0 < sparsityThreshold && sparsityThreshold < 1, nameof(sparsityThreshold)); if (!src.IsDense || src.Length < 20) @@ -573,9 +573,9 @@ public static void CreateMaybeSparseCopy(ref VBuffer src, ref VBuffer d /// Argument vector, whose elements are only read /// Argument vector, that could change /// Function to apply to each pair of elements - public static void ApplyWith(ref VBuffer src, ref VBuffer dst, PairManipulator manip) + public static void ApplyWith(in VBuffer src, ref VBuffer dst, PairManipulator manip) { - ApplyWithCore(ref src, ref dst, manip, outer: false); + ApplyWithCore(in src, ref dst, manip, outer: false); } /// @@ -589,12 +589,13 @@ public static void ApplyWith(ref VBuffer src, ref VBuffer /// Argument vector, whose elements are only read - /// Argument vector, whose elements are only read + /// Argument vector, whose elements are read in most cases. But in some + /// cases may be densified. /// Result vector /// Function to apply to each pair of elements - public static void ApplyWithCopy(ref VBuffer src, ref VBuffer dst, ref VBuffer res, PairManipulatorCopy manip) + public static void ApplyWithCopy(in VBuffer src, ref VBuffer dst, ref VBuffer res, PairManipulatorCopy manip) { - ApplyWithCoreCopy(ref src, ref dst, ref res, manip, outer: false); + ApplyWithCoreCopy(in src, ref dst, ref res, manip, outer: false); } /// @@ -608,9 +609,9 @@ public static void ApplyWithCopy(ref VBuffer src, ref VBuffer< /// Argument vector, whose elements are only read /// Argument vector, that could change /// Function to apply to each pair of elements - public static void ApplyWithEitherDefined(ref VBuffer src, ref VBuffer dst, PairManipulator manip) + public static void ApplyWithEitherDefined(in VBuffer src, ref VBuffer dst, PairManipulator manip) { - ApplyWithCore(ref src, ref dst, manip, outer: true); + ApplyWithCore(in src, ref dst, manip, outer: true); } /// @@ -622,12 +623,13 @@ public static void ApplyWithEitherDefined(ref VBuffer src, ref /// there is any slot that is not explicitly represented in either vector. /// /// Argument vector, whose elements are only read - /// Argument vector, whose elements are only read + /// Argument vector, whose elements are read in most cases. But in some + /// cases may be densified. /// Result vector /// Function to apply to each pair of elements - public static void ApplyWithEitherDefinedCopy(ref VBuffer src, ref VBuffer dst, ref VBuffer res, PairManipulatorCopy manip) + public static void ApplyWithEitherDefinedCopy(in VBuffer src, ref VBuffer dst, ref VBuffer res, PairManipulatorCopy manip) { - ApplyWithCoreCopy(ref src, ref dst, ref res, manip, outer: true); + ApplyWithCoreCopy(in src, ref dst, ref res, manip, outer: true); } /// @@ -636,7 +638,7 @@ public static void ApplyWithEitherDefinedCopy(ref VBuffer src, /// where necessary depending on whether this is an inner or outer join of the /// indices of on . /// - private static void ApplyWithCore(ref VBuffer src, ref VBuffer dst, PairManipulator manip, bool outer) + private static void ApplyWithCore(in VBuffer src, ref VBuffer dst, PairManipulator manip, bool outer) { Contracts.Check(src.Length == dst.Length, "Vectors must have the same dimensionality."); Contracts.CheckValue(manip, nameof(manip)); @@ -773,7 +775,7 @@ private static void ApplyWithCore(ref VBuffer src, ref VBuffer // This is unnecessary -- falling through to the sparse code will // actually handle this case just fine -- but it is more efficient. Densify(ref dst); - ApplyWithCore(ref src, ref dst, manip, outer); + ApplyWithCore(in src, ref dst, manip, outer); return; } @@ -908,7 +910,7 @@ private static void ApplyWithCore(ref VBuffer src, ref VBuffer /// where necessary depending on whether this is an inner or outer join of the /// indices of on . /// - private static void ApplyWithCoreCopy(ref VBuffer src, ref VBuffer dst, ref VBuffer res, PairManipulatorCopy manip, bool outer) + private static void ApplyWithCoreCopy(in VBuffer src, ref VBuffer dst, ref VBuffer res, PairManipulatorCopy manip, bool outer) { Contracts.Check(src.Length == dst.Length, "Vectors must have the same dimensionality."); Contracts.CheckValue(manip, nameof(manip)); @@ -1090,7 +1092,7 @@ private static void ApplyWithCoreCopy(ref VBuffer src, ref VBu // This is unnecessary -- falling through to the sparse code will // actually handle this case just fine -- but it is more efficient. Densify(ref dst); - ApplyWithCoreCopy(ref src, ref dst, ref res, manip, outer); + ApplyWithCoreCopy(in src, ref dst, ref res, manip, outer); } else { @@ -1152,7 +1154,7 @@ private static void ApplyWithCoreCopy(ref VBuffer src, ref VBu /// /// /// - public static void ApplyIntoEitherDefined(ref VBuffer src, ref VBuffer dst, Func func) + public static void ApplyIntoEitherDefined(in VBuffer src, ref VBuffer dst, Func func) { Contracts.CheckValue(func, nameof(func)); @@ -1189,7 +1191,7 @@ public static void ApplyIntoEitherDefined(ref VBuffer src, ref /// necessarily be dense. Otherwise, if both are sparse, the output will be sparse iff /// there is any slot that is not explicitly represented in either vector. /// - public static void ApplyInto(ref VBuffer a, ref VBuffer b, ref VBuffer dst, Func func) + public static void ApplyInto(in VBuffer a, in VBuffer b, ref VBuffer dst, Func func) { Contracts.Check(a.Length == b.Length, "Vectors must have the same dimensionality."); Contracts.CheckValue(func, nameof(func)); diff --git a/src/Microsoft.ML.Data/Data/BufferBuilder.cs b/src/Microsoft.ML.Data/Data/BufferBuilder.cs index b5f20eac5a..5020ae0418 100644 --- a/src/Microsoft.ML.Data/Data/BufferBuilder.cs +++ b/src/Microsoft.ML.Data/Data/BufferBuilder.cs @@ -427,7 +427,7 @@ public void Reset(int length, bool dense) SetActiveRangeImpl(0, length); } - public void AddFeatures(int index, ref VBuffer buffer) + public void AddFeatures(int index, in VBuffer buffer) { Contracts.Check(0 <= index && index <= _length - buffer.Length); diff --git a/src/Microsoft.ML.Data/DataLoadSave/Binary/BinarySaver.cs b/src/Microsoft.ML.Data/DataLoadSave/Binary/BinarySaver.cs index bed86561e4..b4854070e5 100644 --- a/src/Microsoft.ML.Data/DataLoadSave/Binary/BinarySaver.cs +++ b/src/Microsoft.ML.Data/DataLoadSave/Binary/BinarySaver.cs @@ -129,7 +129,7 @@ public override void FetchAndWrite() { Contracts.Assert(_writer != null); _getter(ref _value); - _writer.Write(ref _value); + _writer.Write(in _value); } public override MemoryStream EndBlock() @@ -362,7 +362,7 @@ private IValueCodec WriteMetadataCore(Stream stream, ISchema schema, int col, MemoryStream uncompressedMem = _memPool.Get(); using (IValueWriter writer = codec.OpenWriter(uncompressedMem)) { - writer.Write(ref value); + writer.Write(in value); writer.Commit(); } MemoryStream compressedMem = _memPool.Get(); @@ -791,7 +791,7 @@ private void EstimatorCore(IRowCursor cursor, ColumnCodec col, fetchWriteEstimator = () => { getter(ref val); - specificWriter.Write(ref val); + specificWriter.Write(in val); return specificWriter.GetCommitLengthEstimate(); }; } @@ -867,7 +867,7 @@ public bool TryWriteTypeAndValue(Stream stream, ColumnType type, ref T value, using (var writer = codecT.OpenWriter(stream)) { - writer.Write(ref value); + writer.Write(in value); bytesWritten += (int)writer.GetCommitLengthEstimate(); writer.Commit(); } diff --git a/src/Microsoft.ML.Data/DataLoadSave/Binary/Codecs.cs b/src/Microsoft.ML.Data/DataLoadSave/Binary/Codecs.cs index 3e4f997431..05a776cc7b 100644 --- a/src/Microsoft.ML.Data/DataLoadSave/Binary/Codecs.cs +++ b/src/Microsoft.ML.Data/DataLoadSave/Binary/Codecs.cs @@ -45,7 +45,7 @@ public virtual void Dispose() } } - public abstract void Write(ref T value); + public abstract void Write(in T value); public virtual void Write(T[] values, int index, int count) { @@ -53,7 +53,7 @@ public virtual void Write(T[] values, int index, int count) Contracts.Assert(0 <= count && count <= Utils.Size(values) - index); // Basic un-optimized reference implementation. for (int i = 0; i < count; ++i) - Write(ref values[i + index]); + Write(in values[i + index]); } public abstract void Commit(); @@ -214,7 +214,7 @@ public Writer(UnsafeTypeCodec codec, Stream stream) _ops = codec._ops; } - public override void Write(ref T value) + public override void Write(in T value) { _ops.Write(value, Writer); _numWritten++; @@ -346,7 +346,7 @@ public Writer(TextCodec codec, Stream stream) _boundaries = new List(); } - public override void Write(ref ReadOnlyMemory value) + public override void Write(in ReadOnlyMemory value) { Contracts.Check(_builder != null, "writer was already committed"); _builder.AppendMemory(value); @@ -456,7 +456,7 @@ public Writer(BoolCodec codec, Stream stream) { } - public override void Write(ref bool value) + public override void Write(in bool value) { Contracts.Assert(0 <= _currentIndex && _currentIndex < 8); @@ -620,7 +620,7 @@ public Writer(DateTimeCodec codec, Stream stream) { } - public override void Write(ref DateTime value) + public override void Write(in DateTime value) { Writer.Write(value.Ticks); _numWritten++; @@ -698,7 +698,7 @@ public Writer(DateTimeOffsetCodec codec, Stream stream) _ticks = new List(); } - public override void Write(ref DateTimeOffset value) + public override void Write(in DateTimeOffset value) { Contracts.Assert(_offsets != null, "writer was already committed"); @@ -927,7 +927,7 @@ public override long GetCommitLengthEstimate() return structureLength + _valueWriter.GetCommitLengthEstimate(); } - public override void Write(ref VBuffer value) + public override void Write(in VBuffer value) { Contracts.Check(_valuesStream != null, "writer already committed"); if (FixedLength) diff --git a/src/Microsoft.ML.Data/DataLoadSave/Binary/IValueCodec.cs b/src/Microsoft.ML.Data/DataLoadSave/Binary/IValueCodec.cs index 9c6e607022..f78beec25c 100644 --- a/src/Microsoft.ML.Data/DataLoadSave/Binary/IValueCodec.cs +++ b/src/Microsoft.ML.Data/DataLoadSave/Binary/IValueCodec.cs @@ -100,7 +100,7 @@ internal interface IValueWriter : IValueWriter /// /// Writes a single value to the writer. /// - void Write(ref T value); + void Write(in T value); /// /// Writes an array of values. This should be equivalent to writing each element diff --git a/src/Microsoft.ML.Data/DataView/ArrayDataViewBuilder.cs b/src/Microsoft.ML.Data/DataView/ArrayDataViewBuilder.cs index 40b6b66497..b7f9b494e9 100644 --- a/src/Microsoft.ML.Data/DataView/ArrayDataViewBuilder.cs +++ b/src/Microsoft.ML.Data/DataView/ArrayDataViewBuilder.cs @@ -393,7 +393,7 @@ public Column(ColumnType type, TIn[] values) /// compromising this object's ownership of src. What that operation will be /// will depend on the types. /// - protected abstract void CopyOut(ref TIn src, ref TOut dst); + protected abstract void CopyOut(in TIn src, ref TOut dst); /// /// Produce the output value given the index. This overload utilizes the CopyOut @@ -402,7 +402,7 @@ public Column(ColumnType type, TIn[] values) public override void CopyOut(int index, ref TOut value) { Contracts.Assert(0 <= index & index < _values.Length); - CopyOut(ref _values[index], ref value); + CopyOut(in _values[index], ref value); } } @@ -417,7 +417,7 @@ public AssignmentColumn(PrimitiveType type, T[] values) { } - protected override void CopyOut(ref T src, ref T dst) + protected override void CopyOut(in T src, ref T dst) { dst = src; } @@ -433,7 +433,7 @@ public StringToTextColumn(string[] values) { } - protected override void CopyOut(ref string src, ref ReadOnlyMemory dst) + protected override void CopyOut(in string src, ref ReadOnlyMemory dst) { dst = src.AsMemory(); } @@ -482,7 +482,7 @@ public VBufferColumn(PrimitiveType itemType, VBuffer[] values) { } - protected override void CopyOut(ref VBuffer src, ref VBuffer dst) + protected override void CopyOut(in VBuffer src, ref VBuffer dst) { src.CopyTo(ref dst); } @@ -495,7 +495,7 @@ public ArrayToVBufferColumn(PrimitiveType itemType, T[][] values) { } - protected override void CopyOut(ref T[] src, ref VBuffer dst) + protected override void CopyOut(in T[] src, ref VBuffer dst) { VBuffer.Copy(src, 0, ref dst, Utils.Size(src)); } @@ -511,7 +511,7 @@ public ArrayToSparseVBufferColumn(PrimitiveType itemType, Combiner combiner, _bldr = new BufferBuilder(combiner); } - protected override void CopyOut(ref T[] src, ref VBuffer dst) + protected override void CopyOut(in T[] src, ref VBuffer dst) { var length = Utils.Size(src); _bldr.Reset(length, false); diff --git a/src/Microsoft.ML.Data/Depricated/Instances/HeaderSchema.cs b/src/Microsoft.ML.Data/Depricated/Instances/HeaderSchema.cs index 205ed42117..dc0fd236c6 100644 --- a/src/Microsoft.ML.Data/Depricated/Instances/HeaderSchema.cs +++ b/src/Microsoft.ML.Data/Depricated/Instances/HeaderSchema.cs @@ -226,7 +226,7 @@ private static VersionInfo GetVersionInfo() loaderAssemblyName: typeof(FeatureNameCollection).Assembly.FullName); } - public static void Save(ModelSaveContext ctx, ref VBuffer> names) + public static void Save(ModelSaveContext ctx, in VBuffer> names) { Contracts.AssertValue(ctx); ctx.CheckAtModel(); diff --git a/src/Microsoft.ML.Data/Depricated/Vector/VBufferMathUtils.cs b/src/Microsoft.ML.Data/Depricated/Vector/VBufferMathUtils.cs index b8dba514df..1438cce601 100644 --- a/src/Microsoft.ML.Data/Depricated/Vector/VBufferMathUtils.cs +++ b/src/Microsoft.ML.Data/Depricated/Vector/VBufferMathUtils.cs @@ -46,7 +46,7 @@ public static Float Norm(in VBuffer a) /// Returns the L1 norm of the vector. /// /// L1 norm of the vector - public static Float L1Norm(ref VBuffer a) + public static Float L1Norm(in VBuffer a) { if (a.Count == 0) return 0; @@ -57,7 +57,7 @@ public static Float L1Norm(ref VBuffer a) /// Returns the L-infinity norm of the vector (i.e., the maximum absolute value). /// /// L-infinity norm of the vector - public static Float MaxNorm(ref VBuffer a) + public static Float MaxNorm(in VBuffer a) { if (a.Count == 0) return 0; @@ -67,7 +67,7 @@ public static Float MaxNorm(ref VBuffer a) /// /// Returns the sum of elements in the vector. /// - public static Float Sum(ref VBuffer a) + public static Float Sum(in VBuffer a) { if (a.Count == 0) return 0; @@ -132,7 +132,7 @@ public static void ScaleBy(in VBuffer src, ref VBuffer dst, Float /// /// Perform in-place vector addition += . /// - public static void Add(ref VBuffer src, ref VBuffer dst) + public static void Add(in VBuffer src, ref VBuffer dst) { Contracts.Check(src.Length == dst.Length, "Vectors must have the same dimensionality."); @@ -148,7 +148,7 @@ public static void Add(ref VBuffer src, ref VBuffer dst) return; } // REVIEW: Should we use SSE for any of these possibilities? - VBufferUtils.ApplyWith(ref src, ref dst, (int i, Float v1, ref Float v2) => v2 += v1); + VBufferUtils.ApplyWith(in src, ref dst, (int i, Float v1, ref Float v2) => v2 += v1); } // REVIEW: Rename all instances of AddMult to AddScale, as soon as convesion concerns are no more. @@ -158,7 +158,7 @@ public static void Add(ref VBuffer src, ref VBuffer dst) /// If either vector is dense, will be dense, unless /// is 0 in which case this method does nothing. /// - public static void AddMult(ref VBuffer src, Float c, ref VBuffer dst) + public static void AddMult(in VBuffer src, Float c, ref VBuffer dst) { Contracts.Check(src.Length == dst.Length, "Vectors must have the same dimensionality."); @@ -174,14 +174,14 @@ public static void AddMult(ref VBuffer src, Float c, ref VBuffer d return; } // REVIEW: Should we use SSE for any of these possibilities? - VBufferUtils.ApplyWith(ref src, ref dst, (int i, Float v1, ref Float v2) => v2 += c * v1); + VBufferUtils.ApplyWith(in src, ref dst, (int i, Float v1, ref Float v2) => v2 += c * v1); } /// /// Perform scalar vector addition /// = * + /// - public static void AddMult(ref VBuffer src, Float c, ref VBuffer dst, ref VBuffer res) + public static void AddMult(in VBuffer src, Float c, ref VBuffer dst, ref VBuffer res) { Contracts.Check(src.Length == dst.Length, "Vectors must have the same dimensionality."); int length = src.Length; @@ -202,7 +202,7 @@ public static void AddMult(ref VBuffer src, Float c, ref VBuffer d return; } - VBufferUtils.ApplyWithCopy(ref src, ref dst, ref res, (int i, Float v1, Float v2, ref Float v3) => v3 = v2 + c * v1); + VBufferUtils.ApplyWithCopy(in src, ref dst, ref res, (int i, Float v1, Float v2, ref Float v3) => v3 = v2 + c * v1); } /// @@ -210,16 +210,16 @@ public static void AddMult(ref VBuffer src, Float c, ref VBuffer d /// + * /// and store the result in . /// - public static void AddMultInto(ref VBuffer a, Float c, ref VBuffer b, ref VBuffer dst) + public static void AddMultInto(in VBuffer a, Float c, in VBuffer b, ref VBuffer dst) { Contracts.Check(a.Length == b.Length, "Vectors must have the same dimensionality."); if (c == 0 || b.Count == 0) a.CopyTo(ref dst); else if (a.Count == 0) - ScaleInto(ref b, c, ref dst); + ScaleInto(in b, c, ref dst); else - VBufferUtils.ApplyInto(ref a, ref b, ref dst, (ind, v1, v2) => v1 + c * v2); + VBufferUtils.ApplyInto(in a, in b, ref dst, (ind, v1, v2) => v1 + c * v2); } /// @@ -228,7 +228,7 @@ public static void AddMultInto(ref VBuffer a, Float c, ref VBuffer /// except that this takes place in the section of starting /// at slot . /// - public static void AddMultWithOffset(ref VBuffer src, Float c, ref VBuffer dst, int offset) + public static void AddMultWithOffset(in VBuffer src, Float c, ref VBuffer dst, int offset) { Contracts.CheckParam(0 <= offset && offset <= dst.Length, nameof(offset)); Contracts.CheckParam(src.Length <= dst.Length - offset, nameof(offset)); @@ -355,7 +355,7 @@ public static void AddMultWithOffset(ref VBuffer src, Float c, ref VBuffe /// is sparse, will have a count of zero, instead of the /// same count as . /// - public static void ScaleInto(ref VBuffer src, Float c, ref VBuffer dst) + public static void ScaleInto(in VBuffer src, Float c, ref VBuffer dst) { // REVIEW: The analogous WritableVector method insisted on // equal lengths, but I assume I don't care here. @@ -376,12 +376,12 @@ public static void ScaleInto(ref VBuffer src, Float c, ref VBuffer dst = new VBuffer(src.Length, 0, dst.Values, dst.Indices); } else if (c == -1) - VBufferUtils.ApplyIntoEitherDefined(ref src, ref dst, (i, v) => -v); + VBufferUtils.ApplyIntoEitherDefined(in src, ref dst, (i, v) => -v); else - VBufferUtils.ApplyIntoEitherDefined(ref src, ref dst, (i, v) => c * v); + VBufferUtils.ApplyIntoEitherDefined(in src, ref dst, (i, v) => c * v); } - public static int ArgMax(ref VBuffer src) + public static int ArgMax(in VBuffer src) { if (src.Length == 0) return -1; @@ -415,7 +415,7 @@ public static int ArgMax(ref VBuffer src) return ind; } - public static int ArgMin(ref VBuffer src) + public static int ArgMin(in VBuffer src) { if (src.Length == 0) return -1; diff --git a/src/Microsoft.ML.Data/Depricated/Vector/VectorUtils.cs b/src/Microsoft.ML.Data/Depricated/Vector/VectorUtils.cs index 54eada4623..79af700bcc 100644 --- a/src/Microsoft.ML.Data/Depricated/Vector/VectorUtils.cs +++ b/src/Microsoft.ML.Data/Depricated/Vector/VectorUtils.cs @@ -27,7 +27,7 @@ public static Float DotProduct(Float[] a, Float[] b) return CpuMathUtils.DotProductDense(a, b, a.Length); } - public static Float DotProduct(Float[] a, ref VBuffer b) + public static Float DotProduct(Float[] a, in VBuffer b) { Contracts.Check(Utils.Size(a) == b.Length, "Vectors must have the same dimensionality."); if (b.Count == 0) @@ -37,7 +37,7 @@ public static Float DotProduct(Float[] a, ref VBuffer b) return CpuMathUtils.DotProductSparse(a, b.Values, b.Indices, b.Count); } - public static Float DotProduct(ref VBuffer a, ref VBuffer b) + public static Float DotProduct(in VBuffer a, in VBuffer b) { Contracts.Check(a.Length == b.Length, "Vectors must have the same dimensionality."); @@ -154,14 +154,14 @@ public static void SparsifyNormalize(ref VBuffer a, int top, int bottom, /// /// Multiplies arrays Dst *= A element by element and returns the result in (Hadamard product). /// - public static void MulElementWise(ref VBuffer a, ref VBuffer dst) + public static void MulElementWise(in VBuffer a, ref VBuffer dst) { Contracts.Check(a.Length == dst.Length, "Vectors must have the same dimensionality."); if (a.IsDense && dst.IsDense) CpuMathUtils.MulElementWise(a.Values, dst.Values, dst.Values, a.Length); else - VBufferUtils.ApplyWithEitherDefined(ref a, ref dst, (int ind, Float v1, ref Float v2) => { v2 *= v1; }); + VBufferUtils.ApplyWithEitherDefined(in a, ref dst, (int ind, Float v1, ref Float v2) => { v2 *= v1; }); } private static Float L2DistSquaredSparse(Float[] valuesA, int[] indicesA, int countA, Float[] valuesB, int[] indicesB, int countB, int length) @@ -257,7 +257,7 @@ private static Float L2DiffSquaredDense(Float[] valuesA, Float[] valuesB, int le /// the second array (given as a VBuffer) /// offset in 'a' /// the dot product - public static Float DotProductWithOffset(ref VBuffer a, int offset, ref VBuffer b) + public static Float DotProductWithOffset(in VBuffer a, int offset, in VBuffer b) { Contracts.Check(0 <= offset && offset <= a.Length); Contracts.Check(b.Length <= a.Length - offset, "VBuffer b must be no longer than a.Length - offset."); @@ -305,7 +305,7 @@ public static Float DotProductWithOffset(ref VBuffer a, int offset, ref V /// the second array (given as a VBuffer) /// offset in 'a' /// the dot product - public static Float DotProductWithOffset(Float[] a, int offset, ref VBuffer b) + public static Float DotProductWithOffset(Float[] a, int offset, in VBuffer b) { Contracts.Check(0 <= offset && offset <= a.Length); Contracts.Check(b.Length <= a.Length - offset, "VBuffer b must be no longer than a.Length - offset."); @@ -370,10 +370,10 @@ private static Float DotProductSparse(Float[] aValues, int[] aIndices, int ia, i /// one VBuffer /// another VBuffer /// L1 Distance from a to b - public static Float L1Distance(ref VBuffer a, ref VBuffer b) + public static Float L1Distance(in VBuffer a, in VBuffer b) { Float res = 0; - VBufferUtils.ForEachEitherDefined(ref a, ref b, + VBufferUtils.ForEachEitherDefined(in a, in b, (slot, val1, val2) => res += Math.Abs(val1 - val2)); return res; } @@ -384,9 +384,9 @@ public static Float L1Distance(ref VBuffer a, ref VBuffer b) /// one VBuffer /// another VBuffer /// Distance from a to b - public static Float Distance(ref VBuffer a, ref VBuffer b) + public static Float Distance(in VBuffer a, in VBuffer b) { - return MathUtils.Sqrt(L2DistSquared(ref a, ref b)); + return MathUtils.Sqrt(L2DistSquared(in a, in b)); } /// @@ -395,7 +395,7 @@ public static Float Distance(ref VBuffer a, ref VBuffer b) /// one VBuffer /// another VBuffer /// Distance from a to b - public static Float L2DistSquared(ref VBuffer a, ref VBuffer b) + public static Float L2DistSquared(in VBuffer a, in VBuffer b) { Contracts.Check(a.Length == b.Length, "Vectors must have the same dimensionality."); if (a.IsDense) @@ -415,7 +415,7 @@ public static Float L2DistSquared(ref VBuffer a, ref VBuffer b) /// The first vector, given as an array /// The second vector, given as a VBuffer{Float} /// The squared L2 distance between a and b - public static Float L2DistSquared(Float[] a, ref VBuffer b) + public static Float L2DistSquared(Float[] a, in VBuffer b) { Contracts.CheckValue(a, nameof(a)); Contracts.Check(Utils.Size(a) == b.Length, "Vectors must have the same dimensionality."); @@ -443,7 +443,7 @@ public static void Add(Float[] src, Float[] dst) /// Buffer to add /// Array to add to /// Coefficient - public static void AddMult(ref VBuffer src, Float[] dst, Float c) + public static void AddMult(in VBuffer src, Float[] dst, Float c) { Contracts.CheckValue(dst, nameof(dst)); Contracts.CheckParam(src.Length == dst.Length, nameof(dst), "Arrays must have the same dimensionality."); @@ -468,7 +468,7 @@ public static void AddMult(ref VBuffer src, Float[] dst, Float c) /// The offset into at which to add /// Coefficient - public static void AddMultWithOffset(ref VBuffer src, Float[] dst, int offset, Float c) + public static void AddMultWithOffset(in VBuffer src, Float[] dst, int offset, Float c) { Contracts.CheckValue(dst, nameof(dst)); Contracts.Check(0 <= offset && offset <= dst.Length); diff --git a/src/Microsoft.ML.Data/Evaluators/ClusteringEvaluator.cs b/src/Microsoft.ML.Data/Evaluators/ClusteringEvaluator.cs index 2c0645c2e4..34113e36fd 100644 --- a/src/Microsoft.ML.Data/Evaluators/ClusteringEvaluator.cs +++ b/src/Microsoft.ML.Data/Evaluators/ClusteringEvaluator.cs @@ -303,7 +303,7 @@ public Double Dbi continue; var centroidJ = _clusterCentroids[j]; Double num = _distancesToCentroids[i] + _distancesToCentroids[j]; - Single denom = VectorUtils.Distance(ref centroidI, ref centroidJ); + Single denom = VectorUtils.Distance(in centroidI, in centroidJ); maxi = Math.Max(maxi, num / denom); } @@ -364,11 +364,11 @@ public void InitializeSecondPass(VBuffer[] clusterCentroids) } } - public void UpdateSecondPass(ref VBuffer features, int[] indices) + public void UpdateSecondPass(in VBuffer features, int[] indices) { int assigned = indices[0]; - var distance = VectorUtils.Distance(ref _clusterCentroids[assigned], ref features); + var distance = VectorUtils.Distance(in _clusterCentroids[assigned], in features); _distancesToCentroids[assigned] += distance; } } @@ -431,7 +431,7 @@ private void ProcessRowFirstPass() _scoreGetter(ref _scores); Host.Check(_scores.Length == _scoresArr.Length); - if (VBufferUtils.HasNaNs(ref _scores) || VBufferUtils.HasNonFinite(ref _scores)) + if (VBufferUtils.HasNaNs(in _scores) || VBufferUtils.HasNonFinite(in _scores)) { NumBadScores++; return; @@ -459,7 +459,7 @@ private void ProcessRowFirstPass() if (_clusterCentroids != null) { _featGetter(ref _features); - VectorUtils.Add(ref _features, ref _clusterCentroids[_indicesArr[0]]); + VectorUtils.Add(in _features, ref _clusterCentroids[_indicesArr[0]]); } } @@ -471,16 +471,16 @@ private void ProcessRowSecondPass() _scoreGetter(ref _scores); Host.Check(_scores.Length == _scoresArr.Length); - if (VBufferUtils.HasNaNs(ref _scores) || VBufferUtils.HasNonFinite(ref _scores)) + if (VBufferUtils.HasNaNs(in _scores) || VBufferUtils.HasNonFinite(in _scores)) return; _scores.CopyTo(_scoresArr); int j = 0; foreach (var index in Enumerable.Range(0, _scoresArr.Length).OrderBy(i => _scoresArr[i])) _indicesArr[j++] = index; - UnweightedCounters.UpdateSecondPass(ref _features, _indicesArr); + UnweightedCounters.UpdateSecondPass(in _features, _indicesArr); if (WeightedCounters != null) - WeightedCounters.UpdateSecondPass(ref _features, _indicesArr); + WeightedCounters.UpdateSecondPass(in _features, _indicesArr); } public override void InitializeNextPass(IRow row, RoleMappedSchema schema) diff --git a/src/Microsoft.ML.Data/Evaluators/EvaluatorUtils.cs b/src/Microsoft.ML.Data/Evaluators/EvaluatorUtils.cs index 2f4607d849..c3374a61f5 100644 --- a/src/Microsoft.ML.Data/Evaluators/EvaluatorUtils.cs +++ b/src/Microsoft.ML.Data/Evaluators/EvaluatorUtils.cs @@ -855,7 +855,7 @@ private static IDataView AppendPerInstanceDataViews(IHostEnvironment env, string // In the event that no slot names were recorded here, then slotNames will be // the default, length 0 vector. firstDvSlotNames.TryGetValue(name, out slotNames); - if (!VerifyVectorColumnsMatch(cachedSize, i, dv, type, ref slotNames)) + if (!VerifyVectorColumnsMatch(cachedSize, i, dv, type, in slotNames)) variableSizeVectorColumnNamesList.Add(name); } else @@ -951,7 +951,7 @@ private static IEnumerable FindHiddenColumns(Schema schema, string colName) } private static bool VerifyVectorColumnsMatch(int cachedSize, int col, IDataView dv, - ColumnType type, ref VBuffer> firstDvSlotNames) + ColumnType type, in VBuffer> firstDvSlotNames) { if (cachedSize != type.VectorSize) return false; @@ -968,7 +968,7 @@ private static bool VerifyVectorColumnsMatch(int cachedSize, int col, IDataView else { var result = true; - VBufferUtils.ForEachEitherDefined(ref currSlotNames, ref firstDvSlotNames, + VBufferUtils.ForEachEitherDefined(in currSlotNames, in firstDvSlotNames, (slot, val1, val2) => result = result && val1.Span.SequenceEqual(val2.Span)); return result; } diff --git a/src/Microsoft.ML.Data/Evaluators/MultiOutputRegressionEvaluator.cs b/src/Microsoft.ML.Data/Evaluators/MultiOutputRegressionEvaluator.cs index a507495056..bb9dd9f8f8 100644 --- a/src/Microsoft.ML.Data/Evaluators/MultiOutputRegressionEvaluator.cs +++ b/src/Microsoft.ML.Data/Evaluators/MultiOutputRegressionEvaluator.cs @@ -323,7 +323,7 @@ public override void ProcessRow() _scoreGetter(ref _score); Contracts.Check(_score.Length == _size); - if (VBufferUtils.HasNaNs(ref _score)) + if (VBufferUtils.HasNaNs(in _score)) { NumBadScores++; return; @@ -517,7 +517,7 @@ public override Delegate[] CreateGetters(IRow input, Func activeCols, (ref double dst) => { updateCacheIfNeeded(); - dst = VectorUtils.L1Distance(ref label, ref score); + dst = VectorUtils.L1Distance(in label, in score); }; getters[L1Output] = l1Fn; } @@ -527,7 +527,7 @@ public override Delegate[] CreateGetters(IRow input, Func activeCols, (ref double dst) => { updateCacheIfNeeded(); - dst = VectorUtils.L2DistSquared(ref label, ref score); + dst = VectorUtils.L2DistSquared(in label, in score); }; getters[L2Output] = l2Fn; } @@ -537,7 +537,7 @@ public override Delegate[] CreateGetters(IRow input, Func activeCols, (ref double dst) => { updateCacheIfNeeded(); - dst = MathUtils.Sqrt(VectorUtils.L2DistSquared(ref label, ref score)); + dst = MathUtils.Sqrt(VectorUtils.L2DistSquared(in label, in score)); }; getters[DistCol] = distFn; } diff --git a/src/Microsoft.ML.Data/Evaluators/MulticlassClassifierEvaluator.cs b/src/Microsoft.ML.Data/Evaluators/MulticlassClassifierEvaluator.cs index eb2e52b8a0..fb27335e87 100644 --- a/src/Microsoft.ML.Data/Evaluators/MulticlassClassifierEvaluator.cs +++ b/src/Microsoft.ML.Data/Evaluators/MulticlassClassifierEvaluator.cs @@ -419,7 +419,7 @@ public override void ProcessRow() _scoreGetter(ref _scores); Host.Check(_scores.Length == _scoresArr.Length); - if (VBufferUtils.HasNaNs(ref _scores) || VBufferUtils.HasNonFinite(ref _scores)) + if (VBufferUtils.HasNaNs(in _scores) || VBufferUtils.HasNonFinite(in _scores)) { NumBadScores++; return; diff --git a/src/Microsoft.ML.Data/Evaluators/QuantileRegressionEvaluator.cs b/src/Microsoft.ML.Data/Evaluators/QuantileRegressionEvaluator.cs index 520fa00813..6b1b480e13 100644 --- a/src/Microsoft.ML.Data/Evaluators/QuantileRegressionEvaluator.cs +++ b/src/Microsoft.ML.Data/Evaluators/QuantileRegressionEvaluator.cs @@ -77,7 +77,7 @@ protected override Aggregator GetAggregatorCore(RoleMappedSchema schema, string t = schema.Schema.GetMetadataTypeOrNull(MetadataUtils.Kinds.SlotNames, scoreInfo.Index); if (t != null && t.VectorSize == scoreInfo.Type.VectorSize && t.ItemType.IsText) schema.Schema.GetMetadata(MetadataUtils.Kinds.SlotNames, scoreInfo.Index, ref slotNames); - return new Aggregator(Host, LossFunction, schema.Weight != null, scoreInfo.Type.VectorSize, ref slotNames, stratName); + return new Aggregator(Host, LossFunction, schema.Weight != null, scoreInfo.Type.VectorSize, in slotNames, stratName); } public override IEnumerable GetOverallMetricColumns() @@ -132,13 +132,13 @@ public Counters(int size) TotalLoss = VBufferUtils.CreateDense(size); } - protected override void UpdateCore(Float label, ref VBuffer score, ref VBuffer loss, Float weight) + protected override void UpdateCore(Float label, in VBuffer score, in VBuffer loss, Float weight) { - AddL1AndL2Loss(label, ref score, weight); - AddCustomLoss(weight, ref loss); + AddL1AndL2Loss(label, in score, weight); + AddCustomLoss(weight, in loss); } - private void AddL1AndL2Loss(Float label, ref VBuffer score, Float weight) + private void AddL1AndL2Loss(Float label, in VBuffer score, Float weight) { Contracts.Check(score.Length == TotalL1Loss.Length, "Vectors must have the same dimensionality."); @@ -165,7 +165,7 @@ private void AddL1AndL2Loss(Float label, ref VBuffer score, Float weight) } } - private void AddCustomLoss(Float weight, ref VBuffer loss) + private void AddCustomLoss(Float weight, in VBuffer loss) { Contracts.Check(loss.Length == TotalL1Loss.Length, "Vectors must have the same dimensionality."); @@ -182,7 +182,7 @@ private void AddCustomLoss(Float weight, ref VBuffer loss) TotalLoss.Values[loss.Indices[i]] += loss.Values[i] * weight; } - protected override void Normalize(ref VBuffer src, ref VBuffer dst) + protected override void Normalize(in VBuffer src, ref VBuffer dst) { Contracts.Assert(SumWeights > 0); Contracts.Assert(src.IsDense); @@ -212,7 +212,7 @@ protected override VBuffer Zero() public override CountersBase WeightedCounters { get { return _weightedCounters; } } public Aggregator(IHostEnvironment env, IRegressionLoss lossFunction, bool weighted, int size, - ref VBuffer> slotNames, string stratName) + in VBuffer> slotNames, string stratName) : base(env, lossFunction, weighted, stratName) { Host.Assert(size > 0); @@ -225,16 +225,16 @@ public Aggregator(IHostEnvironment env, IRegressionLoss lossFunction, bool weigh _slotNames = slotNames; } - protected override void ApplyLossFunction(ref VBuffer score, float label, ref VBuffer loss) + protected override void ApplyLossFunction(in VBuffer score, float label, ref VBuffer loss) { VBufferUtils.PairManipulator lossFn = (int slot, Float src, ref Double dst) => dst = LossFunction.Loss(src, label); - VBufferUtils.ApplyWith(ref score, ref loss, lossFn); + VBufferUtils.ApplyWith(in score, ref loss, lossFn); } - protected override bool IsNaN(ref VBuffer score) + protected override bool IsNaN(in VBuffer score) { - return VBufferUtils.HasNaNs(ref score); + return VBufferUtils.HasNaNs(in score); } public override void AddColumn(ArrayDataViewBuilder dvBldr, string metricName, params VBuffer[] metric) @@ -423,7 +423,7 @@ public override Delegate[] CreateGetters(IRow input, Func activeCols, { updateCacheIfNeeded(); dst = new VBuffer(_scoreSize, 0, dst.Values, dst.Indices); - VBufferUtils.ApplyWith(ref l1, ref dst, sqr); + VBufferUtils.ApplyWith(in l1, ref dst, sqr); }; getters[L2Col] = l2Fn; } diff --git a/src/Microsoft.ML.Data/Evaluators/RegressionEvaluator.cs b/src/Microsoft.ML.Data/Evaluators/RegressionEvaluator.cs index 3615db2959..4c8c36496e 100644 --- a/src/Microsoft.ML.Data/Evaluators/RegressionEvaluator.cs +++ b/src/Microsoft.ML.Data/Evaluators/RegressionEvaluator.cs @@ -107,7 +107,7 @@ public override double RSquared } } - protected override void UpdateCore(Float label, ref float score, ref double loss, Float weight) + protected override void UpdateCore(Float label, in float score, in double loss, Float weight) { Double currL1Loss = Math.Abs((Double)label - score); TotalL1Loss += currL1Loss * weight; @@ -115,7 +115,7 @@ protected override void UpdateCore(Float label, ref float score, ref double loss TotalLoss += loss * weight; // REVIEW: Fix this! += (Double)loss * wht; //Loss as reported by regressor, note it can result in NaN if loss is NaN } - protected override void Normalize(ref double src, ref double dst) + protected override void Normalize(in double src, ref double dst) { dst = src / SumWeights; } @@ -140,12 +140,12 @@ public Aggregator(IHostEnvironment env, IRegressionLoss lossFunction, bool weigh _weightedCounters = Weighted ? new Counters() : null; } - protected override void ApplyLossFunction(ref float score, float label, ref double loss) + protected override void ApplyLossFunction(in float score, float label, ref double loss) { loss = LossFunction.Loss(score, label); } - protected override bool IsNaN(ref Float score) + protected override bool IsNaN(in Float score) { return Float.IsNaN(score); } diff --git a/src/Microsoft.ML.Data/Evaluators/RegressionEvaluatorBase.cs b/src/Microsoft.ML.Data/Evaluators/RegressionEvaluatorBase.cs index c4f55ba2ba..dcd2ce618e 100644 --- a/src/Microsoft.ML.Data/Evaluators/RegressionEvaluatorBase.cs +++ b/src/Microsoft.ML.Data/Evaluators/RegressionEvaluatorBase.cs @@ -123,7 +123,7 @@ public TMetrics L1 { var res = Zero(); if (SumWeights > 0) - Normalize(ref TotalL1Loss, ref res); + Normalize(in TotalL1Loss, ref res); return res; } } @@ -134,7 +134,7 @@ public TMetrics L2 { var res = Zero(); if (SumWeights > 0) - Normalize(ref TotalL2Loss, ref res); + Normalize(in TotalL2Loss, ref res); return res; } } @@ -148,7 +148,7 @@ public TMetrics Loss { var res = Zero(); if (SumWeights > 0) - Normalize(ref TotalLoss, ref res); + Normalize(in TotalLoss, ref res); return res; } } @@ -160,12 +160,12 @@ public void Update(ref TScore score, float label, float weight, ref TMetrics los SumWeights += weight; TotalLabelW += label * weight; TotalLabelSquaredW += label * label * weight; - UpdateCore(label, ref score, ref loss, weight); + UpdateCore(label, in score, in loss, weight); } - protected abstract void UpdateCore(float label, ref TScore score, ref TMetrics loss, float weight); + protected abstract void UpdateCore(float label, in TScore score, in TMetrics loss, float weight); - protected abstract void Normalize(ref TMetrics src, ref TMetrics dst); + protected abstract void Normalize(in TMetrics src, ref TMetrics dst); protected abstract TMetrics Zero(); } @@ -219,7 +219,7 @@ public override void ProcessRow() return; } - if (IsNaN(ref Score)) + if (IsNaN(in Score)) { NumBadScores++; return; @@ -236,15 +236,15 @@ public override void ProcessRow() } } - ApplyLossFunction(ref Score, label, ref Loss); + ApplyLossFunction(in Score, label, ref Loss); UnweightedCounters.Update(ref Score, label, 1, ref Loss); if (WeightedCounters != null) WeightedCounters.Update(ref Score, label, weight, ref Loss); } - protected abstract void ApplyLossFunction(ref TScore score, float label, ref TMetrics loss); + protected abstract void ApplyLossFunction(in TScore score, float label, ref TMetrics loss); - protected abstract bool IsNaN(ref TScore score); + protected abstract bool IsNaN(in TScore score); public abstract void AddColumn(ArrayDataViewBuilder dvBldr, string metricName, params TMetrics[] metric); } diff --git a/src/Microsoft.ML.Data/Scorers/ClusteringScorer.cs b/src/Microsoft.ML.Data/Scorers/ClusteringScorer.cs index ca8ef35caa..40fd5b621f 100644 --- a/src/Microsoft.ML.Data/Scorers/ClusteringScorer.cs +++ b/src/Microsoft.ML.Data/Scorers/ClusteringScorer.cs @@ -108,7 +108,7 @@ protected override Delegate GetPredictedLabelGetter(IRow output, out Delegate sc { EnsureCachedPosition(ref cachedPosition, ref score, output, mapperScoreGetter); Contracts.Check(score.Length == scoreLength); - int index = VectorUtils.ArgMin(ref score); + int index = VectorUtils.ArgMin(in score); if (index < 0) dst = 0; else diff --git a/src/Microsoft.ML.Data/Scorers/MultiClassClassifierScorer.cs b/src/Microsoft.ML.Data/Scorers/MultiClassClassifierScorer.cs index 84898e3089..e4c2b80c83 100644 --- a/src/Microsoft.ML.Data/Scorers/MultiClassClassifierScorer.cs +++ b/src/Microsoft.ML.Data/Scorers/MultiClassClassifierScorer.cs @@ -569,7 +569,7 @@ protected override Delegate GetPredictedLabelGetter(IRow output, out Delegate sc { EnsureCachedPosition(ref cachedPosition, ref score, output, mapperScoreGetter); Host.Check(score.Length == scoreLength); - int index = VectorUtils.ArgMax(ref score); + int index = VectorUtils.ArgMax(in score); if (index < 0) dst = 0; else diff --git a/src/Microsoft.ML.Data/Scorers/ScoreMapperSchema.cs b/src/Microsoft.ML.Data/Scorers/ScoreMapperSchema.cs index 023a2f46f0..68cbd34333 100644 --- a/src/Microsoft.ML.Data/Scorers/ScoreMapperSchema.cs +++ b/src/Microsoft.ML.Data/Scorers/ScoreMapperSchema.cs @@ -242,7 +242,7 @@ public sealed class SequencePredictorSchema : ScoreMapperSchemaBase /// metadata. Note that we do not copy /// the input key names, but instead take a reference to it. /// - public SequencePredictorSchema(ColumnType type, ref VBuffer> keyNames, string scoreColumnKind) + public SequencePredictorSchema(ColumnType type, in VBuffer> keyNames, string scoreColumnKind) : base(type, scoreColumnKind) { if (keyNames.Length > 0) diff --git a/src/Microsoft.ML.Data/Transforms/InvertHashUtils.cs b/src/Microsoft.ML.Data/Transforms/InvertHashUtils.cs index dc86b6c87b..c51fc07231 100644 --- a/src/Microsoft.ML.Data/Transforms/InvertHashUtils.cs +++ b/src/Microsoft.ML.Data/Transforms/InvertHashUtils.cs @@ -372,7 +372,7 @@ private static void Load(IChannel ch, ModelLoadContext ctx, CodecFactory factory } } - private static void Save(IChannel ch, ModelSaveContext ctx, CodecFactory factory, ref VBuffer> values) + private static void Save(IChannel ch, ModelSaveContext ctx, CodecFactory factory, in VBuffer> values) { Contracts.AssertValue(ch); ch.CheckValue(ctx, nameof(ctx)); @@ -398,7 +398,7 @@ private static void Save(IChannel ch, ModelSaveContext ctx, CodecFactory factory { using (var writer = textCodec.OpenWriter(mem)) { - writer.Write(ref values); + writer.Write(in values); writer.Commit(); } ctx.Writer.WriteByteArray(mem.ToArray()); @@ -491,7 +491,7 @@ public static void SaveAll(IHost host, ModelSaveContext ctx, int infoLim, VBuffe if (keyValues[iinfo].Length == 0) continue; ctx.SaveSubModel(string.Format(dirFormat, iinfo), - c => Save(ch, c, factory, ref keyValues[iinfo])); + c => Save(ch, c, factory, in keyValues[iinfo])); } } } diff --git a/src/Microsoft.ML.Data/Transforms/NormalizeColumn.cs b/src/Microsoft.ML.Data/Transforms/NormalizeColumn.cs index 95624c75b9..3daec40ebc 100644 --- a/src/Microsoft.ML.Data/Transforms/NormalizeColumn.cs +++ b/src/Microsoft.ML.Data/Transforms/NormalizeColumn.cs @@ -704,10 +704,10 @@ public bool ProcessValue() { TFloat tmp = default(TFloat); _getSrc(ref tmp); - return ProcessValue(ref tmp); + return ProcessValue(in tmp); } - protected virtual bool ProcessValue(ref TFloat val) + protected virtual bool ProcessValue(in TFloat val) { Host.Assert(Rem >= 0); if (Rem == 0) @@ -739,10 +739,10 @@ protected VecColumnFunctionBuilderBase(IHost host, long lim, ValueGetter buffer) + protected virtual bool ProcessValue(in VBuffer buffer) { Host.Assert(Rem >= 0); if (Rem == 0) @@ -859,13 +859,13 @@ protected override bool AcceptColumnValue() { TFloat colValue = default(TFloat); _colGetterSrc(ref colValue); - var result = AcceptColumnValue(ref colValue); + var result = AcceptColumnValue(in colValue); if (result) ColValues.Add(colValue); return result; } - protected abstract bool AcceptColumnValue(ref TFloat colValue); + protected abstract bool AcceptColumnValue(in TFloat colValue); } private abstract class VecColumnSupervisedBinFunctionBuilderBase : SupervisedBinFunctionBuilderBase @@ -894,7 +894,7 @@ protected VecColumnSupervisedBinFunctionBuilderBase(IHost host, long lim, int va protected override bool AcceptColumnValue() { _colValueGetter(ref _buffer); - bool result = AcceptColumnValue(ref _buffer); + bool result = AcceptColumnValue(in _buffer); if (result) { if (_buffer.IsDense) @@ -925,7 +925,7 @@ protected override bool AcceptColumnValue() return result; } - protected abstract bool AcceptColumnValue(ref VBuffer buffer); + protected abstract bool AcceptColumnValue(in VBuffer buffer); } internal static partial class MinMaxUtils diff --git a/src/Microsoft.ML.Data/Transforms/NormalizeColumnDbl.cs b/src/Microsoft.ML.Data/Transforms/NormalizeColumnDbl.cs index 994b583c50..6da6191951 100644 --- a/src/Microsoft.ML.Data/Transforms/NormalizeColumnDbl.cs +++ b/src/Microsoft.ML.Data/Transforms/NormalizeColumnDbl.cs @@ -351,7 +351,7 @@ public long[] Count get { return _vCount; } } - public void ProcessValue(ref VBuffer value) + public void ProcessValue(in VBuffer value) { var size = _min.Length; Contracts.Check(value.Length == size); @@ -455,7 +455,7 @@ public Double[] M2 get { return _m2; } } - public void ProcessValue(ref VBuffer value) + public void ProcessValue(in VBuffer value) { _trainCount++; var size = _mean.Length; @@ -673,7 +673,7 @@ public override Delegate GetGetter(IRow input, int icol) { getSrc(ref dst); Contracts.Check(dst.Length == Scale.Length); - FillValues(ref dst, bldr, Scale); + FillValues(in dst, bldr, Scale); bldr.GetResult(ref dst); }; } @@ -683,7 +683,7 @@ public override Delegate GetGetter(IRow input, int icol) { getSrc(ref dst); Contracts.Check(dst.Length == Scale.Length); - FillValues(ref dst, bldr, Scale, Offset); + FillValues(in dst, bldr, Scale, Offset); bldr.GetResult(ref dst); }; } @@ -693,7 +693,7 @@ public override Delegate GetGetter(IRow input, int icol) { getSrc(ref dst); Contracts.Check(dst.Length == Scale.Length); - FillValues(ref dst, bldr, Scale, Offset, IndicesNonZeroOffset); + FillValues(in dst, bldr, Scale, Offset, IndicesNonZeroOffset); bldr.GetResult(ref dst); }; } @@ -702,7 +702,7 @@ public override Delegate GetGetter(IRow input, int icol) } // REVIEW: Change to normalize in place. when there are no offsets. - private static void FillValues(ref VBuffer input, BufferBuilder bldr, TFloat[] scale) + private static void FillValues(in VBuffer input, BufferBuilder bldr, TFloat[] scale) { Contracts.Assert(input.Length == scale.Length); int size = scale.Length; @@ -732,7 +732,7 @@ private static void FillValues(ref VBuffer input, BufferBuilder } } - private static void FillValues(ref VBuffer input, BufferBuilder bldr, TFloat[] scale, + private static void FillValues(in VBuffer input, BufferBuilder bldr, TFloat[] scale, TFloat[] offset) { Contracts.Assert(input.Length == scale.Length); @@ -777,7 +777,7 @@ private static void FillValues(ref VBuffer input, BufferBuilder } } - private static void FillValues(ref VBuffer input, BufferBuilder bldr, TFloat[] scale, + private static void FillValues(in VBuffer input, BufferBuilder bldr, TFloat[] scale, TFloat[] offset, int[] nz) { Contracts.Assert(input.Length == scale.Length); @@ -971,14 +971,14 @@ public override Delegate GetGetter(IRow input, int icol) { getSrc(ref dst); Host.Check(dst.Length == Mean.Length); - FillValues(ref dst, bldr, Mean, Stddev, UseLog); + FillValues(in dst, bldr, Mean, Stddev, UseLog); bldr.GetResult(ref dst); }; return del; } - private static void FillValues(ref VBuffer input, BufferBuilder bldr, TFloat[] mean, + private static void FillValues(in VBuffer input, BufferBuilder bldr, TFloat[] mean, TFloat[] stddev, bool useLog) { Contracts.Assert(input.Length == mean.Length); @@ -1188,12 +1188,12 @@ public override Delegate GetGetter(IRow input, int icol) { getSrc(ref dst); Host.Check(dst.Length == _binUpperBounds.Length); - GetResult(ref dst, ref dst, bldr); + GetResult(in dst, ref dst, bldr); }; return del; } - private void GetResult(ref VBuffer input, ref VBuffer value, BufferBuilder bldr) + private void GetResult(in VBuffer input, ref VBuffer value, BufferBuilder bldr) { Contracts.Assert(input.Length == _binUpperBounds.Length); int size = _binUpperBounds.Length; @@ -1415,12 +1415,12 @@ protected MinMaxOneColumnFunctionBuilderBase(IHost host, long lim, bool fix, Val _buffer = new VBuffer(1, new TFloat[1]); } - protected override bool ProcessValue(ref TFloat val) + protected override bool ProcessValue(in TFloat val) { - if (!base.ProcessValue(ref val)) + if (!base.ProcessValue(in val)) return false; _buffer.Values[0] = val; - Aggregator.ProcessValue(ref _buffer); + Aggregator.ProcessValue(in _buffer); return true; } } @@ -1462,14 +1462,14 @@ protected MinMaxVecColumnFunctionBuilderBase(IHost host, int cv, long lim, bool Aggregator = new MinMaxDblAggregator(cv); } - protected override bool ProcessValue(ref VBuffer buffer) + protected override bool ProcessValue(in VBuffer buffer) { - if (!base.ProcessValue(ref buffer)) + if (!base.ProcessValue(in buffer)) return false; var size = Aggregator.Min.Length; if (buffer.Length != size) throw Host.Except("Normalizer expected {0} slots but got {1}", size, buffer.Length); - Aggregator.ProcessValue(ref buffer); + Aggregator.ProcessValue(in buffer); return true; } } @@ -1559,12 +1559,12 @@ public static IColumnFunctionBuilder Create(NormalizingEstimator.LogMeanVarColum return new MeanVarOneColumnFunctionBuilder(host, lim, false, getter, true, column.UseCdf); } - protected override bool ProcessValue(ref TFloat origVal) + protected override bool ProcessValue(in TFloat origVal) { - if (!base.ProcessValue(ref origVal)) + if (!base.ProcessValue(in origVal)) return false; _buffer.Values[0] = origVal; - _aggregator.ProcessValue(ref _buffer); + _aggregator.ProcessValue(in _buffer); return true; } @@ -1635,12 +1635,12 @@ public static IColumnFunctionBuilder Create(NormalizingEstimator.LogMeanVarColum return new MeanVarVecColumnFunctionBuilder(host, cv, lim, false, getter, true, column.UseCdf); } - protected override bool ProcessValue(ref VBuffer buffer) + protected override bool ProcessValue(in VBuffer buffer) { - if (!base.ProcessValue(ref buffer)) + if (!base.ProcessValue(in buffer)) return false; - _aggregator.ProcessValue(ref buffer); + _aggregator.ProcessValue(in buffer); return true; } @@ -1745,9 +1745,9 @@ public static IColumnFunctionBuilder Create(NormalizingEstimator.BinningColumn c return new BinOneColumnFunctionBuilder(host, lim, fix, numBins, getter); } - protected override bool ProcessValue(ref TFloat val) + protected override bool ProcessValue(in TFloat val) { - if (!base.ProcessValue(ref val)) + if (!base.ProcessValue(in val)) return false; if (val != 0) _values.Add(val); @@ -1795,9 +1795,9 @@ public static IColumnFunctionBuilder Create(NormalizingEstimator.BinningColumn c return new BinVecColumnFunctionBuilder(host, cv, lim, fix, numBins, getter); } - protected override bool ProcessValue(ref VBuffer buffer) + protected override bool ProcessValue(in VBuffer buffer) { - if (!base.ProcessValue(ref buffer)) + if (!base.ProcessValue(in buffer)) return false; int size = _values.Length; @@ -1857,7 +1857,7 @@ private SupervisedBinOneColumnFunctionBuilder(IHost host, long lim, bool fix, in _minBinSize = minBinSize; } - protected override bool AcceptColumnValue(ref TFloat colValue) + protected override bool AcceptColumnValue(in TFloat colValue) { return !TFloat.IsNaN(colValue); } @@ -1895,7 +1895,7 @@ private SupervisedBinVecColumnFunctionBuilder(IHost host, long lim, bool fix, in _minBinSize = minBinSize; } - protected override bool AcceptColumnValue(ref VBuffer colValuesBuffer) + protected override bool AcceptColumnValue(in VBuffer colValuesBuffer) { return !colValuesBuffer.Values.Any(TFloat.IsNaN); } diff --git a/src/Microsoft.ML.Data/Transforms/NormalizeColumnSng.cs b/src/Microsoft.ML.Data/Transforms/NormalizeColumnSng.cs index c466353cbb..7afba0b83b 100644 --- a/src/Microsoft.ML.Data/Transforms/NormalizeColumnSng.cs +++ b/src/Microsoft.ML.Data/Transforms/NormalizeColumnSng.cs @@ -351,7 +351,7 @@ public long[] Count get { return _vCount; } } - public void ProcessValue(ref VBuffer value) + public void ProcessValue(in VBuffer value) { var size = _min.Length; Contracts.Check(value.Length == size); @@ -455,7 +455,7 @@ public Double[] M2 get { return _m2; } } - public void ProcessValue(ref VBuffer value) + public void ProcessValue(in VBuffer value) { _trainCount++; var size = _mean.Length; @@ -673,7 +673,7 @@ public override Delegate GetGetter(IRow input, int icol) { getSrc(ref dst); Contracts.Check(dst.Length == Scale.Length); - FillValues(ref dst, bldr, Scale); + FillValues(in dst, bldr, Scale); bldr.GetResult(ref dst); }; } @@ -683,7 +683,7 @@ public override Delegate GetGetter(IRow input, int icol) { getSrc(ref dst); Contracts.Check(dst.Length == Scale.Length); - FillValues(ref dst, bldr, Scale, Offset); + FillValues(in dst, bldr, Scale, Offset); bldr.GetResult(ref dst); }; } @@ -693,7 +693,7 @@ public override Delegate GetGetter(IRow input, int icol) { getSrc(ref dst); Contracts.Check(dst.Length == Scale.Length); - FillValues(ref dst, bldr, Scale, Offset, IndicesNonZeroOffset); + FillValues(in dst, bldr, Scale, Offset, IndicesNonZeroOffset); bldr.GetResult(ref dst); }; } @@ -702,7 +702,7 @@ public override Delegate GetGetter(IRow input, int icol) } // REVIEW: Change to normalize in place. when there are no offsets. - private static void FillValues(ref VBuffer input, BufferBuilder bldr, TFloat[] scale) + private static void FillValues(in VBuffer input, BufferBuilder bldr, TFloat[] scale) { Contracts.Assert(input.Length == scale.Length); int size = scale.Length; @@ -732,7 +732,7 @@ private static void FillValues(ref VBuffer input, BufferBuilder } } - private static void FillValues(ref VBuffer input, BufferBuilder bldr, TFloat[] scale, + private static void FillValues(in VBuffer input, BufferBuilder bldr, TFloat[] scale, TFloat[] offset) { Contracts.Assert(input.Length == scale.Length); @@ -777,7 +777,7 @@ private static void FillValues(ref VBuffer input, BufferBuilder } } - private static void FillValues(ref VBuffer input, BufferBuilder bldr, TFloat[] scale, + private static void FillValues(in VBuffer input, BufferBuilder bldr, TFloat[] scale, TFloat[] offset, int[] nz) { Contracts.Assert(input.Length == scale.Length); @@ -971,14 +971,14 @@ public override Delegate GetGetter(IRow input, int icol) { getSrc(ref dst); Host.Check(dst.Length == Mean.Length); - FillValues(ref dst, bldr, Mean, Stddev, UseLog); + FillValues(in dst, bldr, Mean, Stddev, UseLog); bldr.GetResult(ref dst); }; return del; } - private static void FillValues(ref VBuffer input, BufferBuilder bldr, TFloat[] mean, + private static void FillValues(in VBuffer input, BufferBuilder bldr, TFloat[] mean, TFloat[] stddev, bool useLog) { Contracts.Assert(input.Length == mean.Length); @@ -1188,12 +1188,12 @@ public override Delegate GetGetter(IRow input, int icol) { getSrc(ref dst); Host.Check(dst.Length == _binUpperBounds.Length); - GetResult(ref dst, ref dst, bldr); + GetResult(in dst, ref dst, bldr); }; return del; } - private void GetResult(ref VBuffer input, ref VBuffer value, BufferBuilder bldr) + private void GetResult(in VBuffer input, ref VBuffer value, BufferBuilder bldr) { Contracts.Assert(input.Length == _binUpperBounds.Length); int size = _binUpperBounds.Length; @@ -1415,12 +1415,12 @@ protected MinMaxOneColumnFunctionBuilderBase(IHost host, long lim, bool fix, Val _buffer = new VBuffer(1, new TFloat[1]); } - protected override bool ProcessValue(ref TFloat val) + protected override bool ProcessValue(in TFloat val) { - if (!base.ProcessValue(ref val)) + if (!base.ProcessValue(in val)) return false; _buffer.Values[0] = val; - Aggregator.ProcessValue(ref _buffer); + Aggregator.ProcessValue(in _buffer); return true; } } @@ -1462,14 +1462,14 @@ protected MinMaxVecColumnFunctionBuilderBase(IHost host, int cv, long lim, bool Aggregator = new MinMaxSngAggregator(cv); } - protected override bool ProcessValue(ref VBuffer buffer) + protected override bool ProcessValue(in VBuffer buffer) { - if (!base.ProcessValue(ref buffer)) + if (!base.ProcessValue(in buffer)) return false; var size = Aggregator.Min.Length; if (buffer.Length != size) throw Host.Except("Normalizer expected {0} slots but got {1}", size, buffer.Length); - Aggregator.ProcessValue(ref buffer); + Aggregator.ProcessValue(in buffer); return true; } } @@ -1559,12 +1559,12 @@ public static IColumnFunctionBuilder Create(NormalizingEstimator.LogMeanVarColum return new MeanVarOneColumnFunctionBuilder(host, lim, false, getter, true, column.UseCdf); } - protected override bool ProcessValue(ref TFloat origVal) + protected override bool ProcessValue(in TFloat origVal) { - if (!base.ProcessValue(ref origVal)) + if (!base.ProcessValue(in origVal)) return false; _buffer.Values[0] = origVal; - _aggregator.ProcessValue(ref _buffer); + _aggregator.ProcessValue(in _buffer); return true; } @@ -1635,12 +1635,12 @@ public static IColumnFunctionBuilder Create(NormalizingEstimator.LogMeanVarColum return new MeanVarVecColumnFunctionBuilder(host, cv, lim, false, getter, true, column.UseCdf); } - protected override bool ProcessValue(ref VBuffer buffer) + protected override bool ProcessValue(in VBuffer buffer) { - if (!base.ProcessValue(ref buffer)) + if (!base.ProcessValue(in buffer)) return false; - _aggregator.ProcessValue(ref buffer); + _aggregator.ProcessValue(in buffer); return true; } @@ -1745,9 +1745,9 @@ public static IColumnFunctionBuilder Create(NormalizingEstimator.BinningColumn c return new BinOneColumnFunctionBuilder(host, lim, fix, numBins, getter); } - protected override bool ProcessValue(ref TFloat val) + protected override bool ProcessValue(in TFloat val) { - if (!base.ProcessValue(ref val)) + if (!base.ProcessValue(in val)) return false; if (val != 0) _values.Add(val); @@ -1795,9 +1795,9 @@ public static IColumnFunctionBuilder Create(NormalizingEstimator.BinningColumn c return new BinVecColumnFunctionBuilder(host, cv, lim, fix, numBins, getter); } - protected override bool ProcessValue(ref VBuffer buffer) + protected override bool ProcessValue(in VBuffer buffer) { - if (!base.ProcessValue(ref buffer)) + if (!base.ProcessValue(in buffer)) return false; int size = _values.Length; @@ -1857,7 +1857,7 @@ private SupervisedBinOneColumnFunctionBuilder(IHost host, long lim, bool fix, in _minBinSize = minBinSize; } - protected override bool AcceptColumnValue(ref TFloat colValue) + protected override bool AcceptColumnValue(in TFloat colValue) { return !TFloat.IsNaN(colValue); } @@ -1895,7 +1895,7 @@ private SupervisedBinVecColumnFunctionBuilder(IHost host, long lim, bool fix, in _minBinSize = minBinSize; } - protected override bool AcceptColumnValue(ref VBuffer colValuesBuffer) + protected override bool AcceptColumnValue(in VBuffer colValuesBuffer) { return !colValuesBuffer.Values.Any(TFloat.IsNaN); } diff --git a/src/Microsoft.ML.Data/Transforms/NormalizeUtils.cs b/src/Microsoft.ML.Data/Transforms/NormalizeUtils.cs index afa5dea59d..8c01d69c74 100644 --- a/src/Microsoft.ML.Data/Transforms/NormalizeUtils.cs +++ b/src/Microsoft.ML.Data/Transforms/NormalizeUtils.cs @@ -44,7 +44,7 @@ public interface IColumnAggregator /// /// Updates the aggregate function with a value /// - void ProcessValue(ref T val); + void ProcessValue(in T val); /// /// Finishes the aggregation diff --git a/src/Microsoft.ML.Data/Transforms/ShuffleTransform.cs b/src/Microsoft.ML.Data/Transforms/ShuffleTransform.cs index b674a7fe8b..e3ecc922cf 100644 --- a/src/Microsoft.ML.Data/Transforms/ShuffleTransform.cs +++ b/src/Microsoft.ML.Data/Transforms/ShuffleTransform.cs @@ -406,7 +406,7 @@ public ImplVec(int bufferSize, Delegate getter) { } - protected override void Copy(ref VBuffer src, ref VBuffer dst) + protected override void Copy(in VBuffer src, ref VBuffer dst) { src.CopyTo(ref dst); } @@ -419,7 +419,7 @@ public ImplOne(int bufferSize, Delegate getter) { } - protected override void Copy(ref T src, ref T dst) + protected override void Copy(in T src, ref T dst) { dst = src; } @@ -452,10 +452,10 @@ public override void Fill(int idx) public void Fetch(int idx, ref T value) { Contracts.Assert(0 <= idx && idx < Buffer.Length); - Copy(ref Buffer[idx], ref value); + Copy(in Buffer[idx], ref value); } - protected abstract void Copy(ref T src, ref T dst); + protected abstract void Copy(in T src, ref T dst); } // The number of examples to have in each synchronization block. This should be >= 1. diff --git a/src/Microsoft.ML.Data/Transforms/TermTransformImpl.cs b/src/Microsoft.ML.Data/Transforms/TermTransformImpl.cs index 7cc2f18c6a..1f7c5475b0 100644 --- a/src/Microsoft.ML.Data/Transforms/TermTransformImpl.cs +++ b/src/Microsoft.ML.Data/Transforms/TermTransformImpl.cs @@ -708,7 +708,7 @@ internal override void Save(ModelSaveContext ctx, IHostEnvironment host, CodecFa for (int i = 0; i < _values.Count; ++i) { T val = _values.GetItem(i); - writer.Write(ref val); + writer.Write(in val); } writer.Commit(); } @@ -771,7 +771,7 @@ protected TermMap(PrimitiveType type, int count) public abstract void GetTerms(ref VBuffer dst); } - private static void GetTextTerms(ref VBuffer src, ValueMapper stringMapper, ref VBuffer> dst) + private static void GetTextTerms(in VBuffer src, ValueMapper stringMapper, ref VBuffer> dst) { // REVIEW: This convenience function is not optimized. For non-string // types, creating a whole bunch of string objects on the heap is one that is @@ -1055,7 +1055,7 @@ public override void AddMetadata(Schema.Metadata.Builder builder) // No buffer sharing convenient here. VBuffer dstT = default; TypedMap.GetTerms(ref dstT); - GetTextTerms(ref dstT, stringMapper, ref dst); + GetTextTerms(in dstT, stringMapper, ref dst); }; builder.AddKeyValues(TypedMap.OutputType.KeyCount, TextType.Instance, getter); } @@ -1147,7 +1147,7 @@ private bool AddMetadataCore(ColumnType srcMetaType, Schema.Metadata.Buil var tempMeta = default(VBuffer); getter(ref tempMeta); Contracts.Assert(tempMeta.IsDense); - GetTextTerms(ref tempMeta, stringMapper, ref dst); + GetTextTerms(in tempMeta, stringMapper, ref dst); _host.Assert(dst.Length == TypedMap.OutputType.KeyCount); }; builder.AddKeyValues(TypedMap.OutputType.KeyCount, TextType.Instance, mgetter); diff --git a/src/Microsoft.ML.Ensemble/EnsembleUtils.cs b/src/Microsoft.ML.Ensemble/EnsembleUtils.cs index ae6c2adac6..6340c719eb 100644 --- a/src/Microsoft.ML.Ensemble/EnsembleUtils.cs +++ b/src/Microsoft.ML.Ensemble/EnsembleUtils.cs @@ -31,7 +31,7 @@ public static RoleMappedData SelectFeatures(IHost host, RoleMappedData data, Bit var name = data.Schema.Feature.Name; var view = LambdaColumnMapper.Create( host, "FeatureSelector", data.Data, name, name, type, type, - (ref VBuffer src, ref VBuffer dst) => SelectFeatures(ref src, features, card, ref dst)); + (ref VBuffer src, ref VBuffer dst) => SelectFeatures(in src, features, card, ref dst)); var res = new RoleMappedData(view, data.Schema.GetColumnRoleNames()); return res; @@ -41,7 +41,7 @@ public static RoleMappedData SelectFeatures(IHost host, RoleMappedData data, Bit /// Fill dst with values selected from src if the indices of the src values are set in includedIndices, /// otherwise assign default(T). The length of dst will be equal to src.Length. /// - public static void SelectFeatures(ref VBuffer src, BitArray includedIndices, int cardinality, ref VBuffer dst) + public static void SelectFeatures(in VBuffer src, BitArray includedIndices, int cardinality, ref VBuffer dst) { Contracts.Assert(Utils.Size(includedIndices) == src.Length); Contracts.Assert(cardinality == Utils.GetCardinality(includedIndices)); diff --git a/src/Microsoft.ML.Ensemble/OutputCombiners/BaseMultiAverager.cs b/src/Microsoft.ML.Ensemble/OutputCombiners/BaseMultiAverager.cs index 64ec41d613..15985316fc 100644 --- a/src/Microsoft.ML.Ensemble/OutputCombiners/BaseMultiAverager.cs +++ b/src/Microsoft.ML.Ensemble/OutputCombiners/BaseMultiAverager.cs @@ -49,7 +49,7 @@ protected void CombineCore(ref VBuffer dst, VBuffer[] src, Singl { weightTotal = src.Length; for (int i = 0; i < src.Length; i++) - VectorUtils.Add(ref src[i], ref dst); + VectorUtils.Add(in src[i], ref dst); } else { @@ -58,7 +58,7 @@ protected void CombineCore(ref VBuffer dst, VBuffer[] src, Singl { var w = weights[i]; weightTotal += w; - VectorUtils.AddMult(ref src[i], w, ref dst); + VectorUtils.AddMult(in src[i], w, ref dst); } } diff --git a/src/Microsoft.ML.Ensemble/OutputCombiners/BaseMultiCombiner.cs b/src/Microsoft.ML.Ensemble/OutputCombiners/BaseMultiCombiner.cs index 1258313df1..cea9c698ae 100644 --- a/src/Microsoft.ML.Ensemble/OutputCombiners/BaseMultiCombiner.cs +++ b/src/Microsoft.ML.Ensemble/OutputCombiners/BaseMultiCombiner.cs @@ -86,7 +86,7 @@ protected bool TryNormalize(VBuffer[] values) for (int i = 0; i < values.Length; i++) { // Leave a zero vector as all zeros. Otherwise, make the L1 norm equal to 1. - var sum = VectorUtils.L1Norm(ref values[i]); + var sum = VectorUtils.L1Norm(in values[i]); if (!FloatUtils.IsFinite(sum)) return false; if (sum > 0) diff --git a/src/Microsoft.ML.Ensemble/OutputCombiners/BaseStacking.cs b/src/Microsoft.ML.Ensemble/OutputCombiners/BaseStacking.cs index f1fb96f3eb..75d0989828 100644 --- a/src/Microsoft.ML.Ensemble/OutputCombiners/BaseStacking.cs +++ b/src/Microsoft.ML.Ensemble/OutputCombiners/BaseStacking.cs @@ -159,7 +159,7 @@ public void Train(List>> models, var model = models[i]; if (model.SelectedFeatures != null) { - EnsembleUtils.SelectFeatures(ref cursor.Features, model.SelectedFeatures, model.Cardinality, ref vBuffers[i]); + EnsembleUtils.SelectFeatures(in cursor.Features, model.SelectedFeatures, model.Cardinality, ref vBuffers[i]); maps[i](ref vBuffers[i], ref predictions[i]); } else diff --git a/src/Microsoft.ML.Ensemble/OutputCombiners/MultiVoting.cs b/src/Microsoft.ML.Ensemble/OutputCombiners/MultiVoting.cs index 0c68f70287..75fd4f5222 100644 --- a/src/Microsoft.ML.Ensemble/OutputCombiners/MultiVoting.cs +++ b/src/Microsoft.ML.Ensemble/OutputCombiners/MultiVoting.cs @@ -91,7 +91,7 @@ private void CombineCore(ref VBuffer dst, VBuffer[] src, Single[ int voteCount = 0; for (int i = 0; i < count; i++) { - int index = VectorUtils.ArgMax(ref src[i]); + int index = VectorUtils.ArgMax(in src[i]); if (index >= 0) { values[index]++; diff --git a/src/Microsoft.ML.Ensemble/PipelineEnsemble.cs b/src/Microsoft.ML.Ensemble/PipelineEnsemble.cs index bacc130874..b6207e1d45 100644 --- a/src/Microsoft.ML.Ensemble/PipelineEnsemble.cs +++ b/src/Microsoft.ML.Ensemble/PipelineEnsemble.cs @@ -659,13 +659,13 @@ private static int CheckKeyLabelColumnCore(IHostEnvironment env, IPredictorMo if (!mdType.Equals(keyValuesType)) throw env.Except("Label column of model {0} has different key value type than model 0", i); rmd.Schema.Schema.GetMetadata(MetadataUtils.Kinds.KeyValues, labelInfo.Index, ref curLabelNames); - if (!AreEqual(ref labelNames, ref curLabelNames)) + if (!AreEqual(in labelNames, in curLabelNames)) throw env.Except("Label of model {0} has different values than model 0", i); } return classCount; } - private static bool AreEqual(ref VBuffer v1, ref VBuffer v2) + private static bool AreEqual(in VBuffer v1, in VBuffer v2) where T : IEquatable { if (v1.Length != v2.Length) diff --git a/src/Microsoft.ML.Ensemble/Selector/DiversityMeasure/BaseDisagreementDiversityMeasure.cs b/src/Microsoft.ML.Ensemble/Selector/DiversityMeasure/BaseDisagreementDiversityMeasure.cs index 6a34da757a..52d5dd9a58 100644 --- a/src/Microsoft.ML.Ensemble/Selector/DiversityMeasure/BaseDisagreementDiversityMeasure.cs +++ b/src/Microsoft.ML.Ensemble/Selector/DiversityMeasure/BaseDisagreementDiversityMeasure.cs @@ -27,7 +27,7 @@ public List> CalculateDiversityMeasure(IList() { @@ -40,6 +40,6 @@ public List> CalculateDiversityMeasure(IList 0 && valueY < 0 || valueX < 0 && valueY > 0) ? 1 : 0; } diff --git a/src/Microsoft.ML.Ensemble/Selector/DiversityMeasure/MultiDisagreementDiversityMeasure.cs b/src/Microsoft.ML.Ensemble/Selector/DiversityMeasure/MultiDisagreementDiversityMeasure.cs index fe4632791d..424e2a328d 100644 --- a/src/Microsoft.ML.Ensemble/Selector/DiversityMeasure/MultiDisagreementDiversityMeasure.cs +++ b/src/Microsoft.ML.Ensemble/Selector/DiversityMeasure/MultiDisagreementDiversityMeasure.cs @@ -18,9 +18,9 @@ public class MultiDisagreementDiversityMeasure : BaseDisagreementDiversityMeasur { public const string LoadName = "MultiDisagreementDiversityMeasure"; - protected override Single GetDifference(ref VBuffer valueX, ref VBuffer valueY) + protected override Single GetDifference(in VBuffer valueX, in VBuffer valueY) { - return (VectorUtils.ArgMax(ref valueX) != VectorUtils.ArgMax(ref valueY)) ? 1 : 0; + return (VectorUtils.ArgMax(in valueX) != VectorUtils.ArgMax(in valueY)) ? 1 : 0; } } } diff --git a/src/Microsoft.ML.Ensemble/Selector/DiversityMeasure/RegressionDisagreementDiversityMeasure.cs b/src/Microsoft.ML.Ensemble/Selector/DiversityMeasure/RegressionDisagreementDiversityMeasure.cs index 62724d387e..68f882d19c 100644 --- a/src/Microsoft.ML.Ensemble/Selector/DiversityMeasure/RegressionDisagreementDiversityMeasure.cs +++ b/src/Microsoft.ML.Ensemble/Selector/DiversityMeasure/RegressionDisagreementDiversityMeasure.cs @@ -16,7 +16,7 @@ public class RegressionDisagreementDiversityMeasure : BaseDisagreementDiversityM { public const string LoadName = "RegressionDisagreementDiversityMeasure"; - protected override Single GetDifference(ref Single valueX, ref Single valueY) + protected override Single GetDifference(in Single valueX, in Single valueY) { return Math.Abs(valueX - valueY); } diff --git a/src/Microsoft.ML.Ensemble/Trainer/EnsembleDistributionPredictor.cs b/src/Microsoft.ML.Ensemble/Trainer/EnsembleDistributionPredictor.cs index a15563a289..b236448870 100644 --- a/src/Microsoft.ML.Ensemble/Trainer/EnsembleDistributionPredictor.cs +++ b/src/Microsoft.ML.Ensemble/Trainer/EnsembleDistributionPredictor.cs @@ -141,7 +141,7 @@ public ValueMapper GetMapper() var model = Models[i]; if (model.SelectedFeatures != null) { - EnsembleUtils.SelectFeatures(ref tmp, model.SelectedFeatures, model.Cardinality, ref vBuffers[i]); + EnsembleUtils.SelectFeatures(in tmp, model.SelectedFeatures, model.Cardinality, ref vBuffers[i]); maps[i](ref vBuffers[i], ref predictions[i], ref probabilities[i]); } else @@ -179,7 +179,7 @@ public ValueMapper GetMapper() var model = Models[i]; if (model.SelectedFeatures != null) { - EnsembleUtils.SelectFeatures(ref tmp, model.SelectedFeatures, model.Cardinality, ref vBuffers[i]); + EnsembleUtils.SelectFeatures(in tmp, model.SelectedFeatures, model.Cardinality, ref vBuffers[i]); maps[i](ref vBuffers[i], ref predictions[i], ref probabilities[i]); } else diff --git a/src/Microsoft.ML.Ensemble/Trainer/EnsemblePredictor.cs b/src/Microsoft.ML.Ensemble/Trainer/EnsemblePredictor.cs index 1f2ed87bf6..3200b298e1 100644 --- a/src/Microsoft.ML.Ensemble/Trainer/EnsemblePredictor.cs +++ b/src/Microsoft.ML.Ensemble/Trainer/EnsemblePredictor.cs @@ -133,7 +133,7 @@ public ValueMapper GetMapper() var model = Models[i]; if (model.SelectedFeatures != null) { - EnsembleUtils.SelectFeatures(ref tmp, model.SelectedFeatures, model.Cardinality, ref buffers[i]); + EnsembleUtils.SelectFeatures(in tmp, model.SelectedFeatures, model.Cardinality, ref buffers[i]); maps[i](ref buffers[i], ref predictions[i]); } else diff --git a/src/Microsoft.ML.Ensemble/Trainer/Multiclass/EnsembleMultiClassPredictor.cs b/src/Microsoft.ML.Ensemble/Trainer/Multiclass/EnsembleMultiClassPredictor.cs index 4dfaf3983a..a73259da6c 100644 --- a/src/Microsoft.ML.Ensemble/Trainer/Multiclass/EnsembleMultiClassPredictor.cs +++ b/src/Microsoft.ML.Ensemble/Trainer/Multiclass/EnsembleMultiClassPredictor.cs @@ -132,7 +132,7 @@ public ValueMapper GetMapper() var model = Models[i]; if (model.SelectedFeatures != null) { - EnsembleUtils.SelectFeatures(ref tmp, model.SelectedFeatures, model.Cardinality, ref features[i]); + EnsembleUtils.SelectFeatures(in tmp, model.SelectedFeatures, model.Cardinality, ref features[i]); maps[i](ref features[i], ref predictions[i]); } else diff --git a/src/Microsoft.ML.FastTree/BinFile/BinFinder.cs b/src/Microsoft.ML.FastTree/BinFile/BinFinder.cs index c0eb97f47d..782d85f24a 100644 --- a/src/Microsoft.ML.FastTree/BinFile/BinFinder.cs +++ b/src/Microsoft.ML.FastTree/BinFile/BinFinder.cs @@ -49,7 +49,7 @@ public BinFinder() /// values detected within /// The logical length of both and /// - private int FindDistinctCounts(ref VBuffer values, double[] distinctValues, int[] counts) + private int FindDistinctCounts(in VBuffer values, double[] distinctValues, int[] counts) { if (values.Count == 0) { @@ -219,7 +219,7 @@ private bool IsTrivial(int[] distinctCounts, int numDistinct, int minPerLeaf) /// The calculated upper bound of each bin /// Whether finding the bins is successful. If there were NaN values in , /// this will return false and the output arrays will be null. Otherwise it will return true. - public bool FindBins(ref VBuffer values, int maxBins, int minPerLeaf, out double[] binUpperBounds) + public bool FindBins(in VBuffer values, int maxBins, int minPerLeaf, out double[] binUpperBounds) { Contracts.Assert(maxBins > 0); Contracts.Assert(minPerLeaf >= 0); @@ -234,7 +234,7 @@ public bool FindBins(ref VBuffer values, int maxBins, int minPerLeaf, ou Utils.EnsureSize(ref _distinctValues, arraySize, arraySize, keepOld: false); Utils.EnsureSize(ref _counts, arraySize, arraySize, keepOld: false); - int numValues = FindDistinctCounts(ref values, _distinctValues, _counts); + int numValues = FindDistinctCounts(in values, _distinctValues, _counts); if (numValues < 0) { binUpperBounds = null; diff --git a/src/Microsoft.ML.FastTree/FastTree.cs b/src/Microsoft.ML.FastTree/FastTree.cs index e86cca32de..8c5f121667 100644 --- a/src/Microsoft.ML.FastTree/FastTree.cs +++ b/src/Microsoft.ML.FastTree/FastTree.cs @@ -1028,17 +1028,13 @@ protected void GetFeatureIniContent(RoleMappedData data, ref VBufferThe values for one particular feature value across all examples /// The maximum number of bins to find /// - /// The working array of distinct values, a temporary buffer that should be called - /// to multiple invocations of this method, but not meant to be useful to the caller. This method will reallocate - /// the array to a new size if necessary. Passing in null at first is acceptable. - /// Similar working array, but for distinct counts /// The bin upper bounds, maximum length will be /// Whether finding the bins was successful or not. It will be unsuccessful iff /// has any missing values. In that event, the out parameters will be left as null. - protected static bool CalculateBins(BinFinder binFinder, ref VBuffer values, int maxBins, int minDocsPerLeaf, - ref double[] distinctValues, ref int[] distinctCounts, out double[] upperBounds) + protected static bool CalculateBins(BinFinder binFinder, in VBuffer values, int maxBins, int minDocsPerLeaf, + out double[] upperBounds) { - return binFinder.FindBins(ref values, maxBins, minDocsPerLeaf, out upperBounds); + return binFinder.FindBins(in values, maxBins, minDocsPerLeaf, out upperBounds); } private static IEnumerable> NonZeroBinnedValuesForSparse(VBuffer values, Double[] binUpperBounds) @@ -1067,7 +1063,7 @@ private FeatureFlockBase CreateOneHotFlock(IChannel ch, int fi = features[0]; var values = instanceList[fi]; values.CopyTo(NumExamples, ref temp); - return CreateSingletonFlock(ch, ref temp, binnedValues, BinUpperBounds[fi]); + return CreateSingletonFlock(ch, in temp, binnedValues, BinUpperBounds[fi]); } // Multiple, one hot. int[] hotFeatureStarts = new int[features.Count + 1]; @@ -1220,7 +1216,7 @@ private FeatureFlockBase CreateOneHotFlockCategorical(IChannel ch, /// A working array of length equal to the length of the input feature vector /// The upper bounds of the binning of this feature. /// A derived binned derived feature vector. - protected static SingletonFeatureFlock CreateSingletonFlock(IChannel ch, ref VBuffer values, int[] binnedValues, + protected static SingletonFeatureFlock CreateSingletonFlock(IChannel ch, in VBuffer values, int[] binnedValues, Double[] binUpperBounds) { Contracts.AssertValue(ch); @@ -1445,8 +1441,6 @@ private Dataset Construct(RoleMappedData examples, ref int numExamples, int maxB // Perhaps we should change the binning to just work over singles. VBuffer doubleTemp = default(VBuffer); - double[] distinctValues = null; - int[] distinctCounts = null; var copier = GetCopier(NumberType.Float, NumberType.R8); int iFeature = 0; pch.SetHeader(new ProgressHeader("features"), e => e.SetProgress(0, iFeature, features.Length)); @@ -1469,8 +1463,7 @@ private Dataset Construct(RoleMappedData examples, ref int numExamples, int maxB finder = finder ?? new BinFinder(); // Must copy over, as bin calculation is potentially destructive. copier(ref temp, ref doubleTemp); - hasMissing = !CalculateBins(finder, ref doubleTemp, maxBins, 0, - ref distinctValues, ref distinctCounts, + hasMissing = !CalculateBins(finder, in doubleTemp, maxBins, 0, out BinUpperBounds[iFeature]); } else @@ -1594,7 +1587,7 @@ private Dataset Construct(RoleMappedData examples, ref int numExamples, int maxB if (upperBounds.Length == 1) continue; //trivial feature, skip it. - flocks.Add(CreateSingletonFlock(ch, ref doubleTemp, binnedValues, upperBounds)); + flocks.Add(CreateSingletonFlock(ch, in doubleTemp, binnedValues, upperBounds)); } } } @@ -1608,7 +1601,7 @@ private Dataset Construct(RoleMappedData examples, ref int numExamples, int maxB if (upperBounds.Length == 1) continue; //trivial feature, skip it. - flocks.Add(CreateSingletonFlock(ch, ref doubleTemp, binnedValues, upperBounds)); + flocks.Add(CreateSingletonFlock(ch, in doubleTemp, binnedValues, upperBounds)); } } @@ -1650,7 +1643,7 @@ private Dataset Construct(RoleMappedData examples, ref int numExamples, int maxB trans.GetSingleSlotValue(groupIdx, ref groupIds); slotDropper?.DropSlots(ref groupIds, ref groupIds); - ConstructBoundariesAndQueryIds(ref groupIds, out boundaries, out qids); + ConstructBoundariesAndQueryIds(in groupIds, out boundaries, out qids); } else { @@ -1750,7 +1743,7 @@ private static SlotDropper ConstructDropSlotRanges(ISlotCursor cursor, return new SlotDropper(temp.Length, minSlots.ToArray(), maxSlots.ToArray()); } - private static void ConstructBoundariesAndQueryIds(ref VBuffer groupIds, out int[] boundariesArray, out ulong[] qidsArray) + private static void ConstructBoundariesAndQueryIds(in VBuffer groupIds, out int[] boundariesArray, out ulong[] qidsArray) { List qids = new List(); List boundaries = new List(); @@ -1942,8 +1935,6 @@ private void InitializeBins(int maxBins, IParallelTraining parallelTraining) BinFinder binFinder = new BinFinder(); VBuffer temp = default(VBuffer); int len = _numExamples; - double[] distinctValues = null; - int[] distinctCounts = null; bool[] localConstructBinFeatures = parallelTraining.GetLocalBinConstructionFeatures(NumFeatures); int iFeature = 0; pch.SetHeader(new ProgressHeader("features"), e => e.SetProgress(0, iFeature, NumFeatures)); @@ -1957,8 +1948,7 @@ private void InitializeBins(int maxBins, IParallelTraining parallelTraining) // REVIEW: In principle we could also put the min docs per leaf information // into here, and collapse bins somehow as we determine the bins, so that "trivial" // bins on the head or tail of the bin distribution are never actually considered. - CalculateBins(binFinder, ref temp, maxBins, _minDocsPerLeaf, - ref distinctValues, ref distinctCounts, + CalculateBins(binFinder, in temp, maxBins, _minDocsPerLeaf, out double[] binUpperBounds); BinUpperBounds[iFeature] = binUpperBounds; } @@ -2118,7 +2108,7 @@ private IEnumerable CreateFlocksCore(IChannel ch, IProgressCha var values = _instanceList[iFeature]; _instanceList[iFeature] = null; values.CopyTo(NumExamples, ref temp); - yield return CreateSingletonFlock(ch, ref temp, binnedValues, bup); + yield return CreateSingletonFlock(ch, in temp, binnedValues, bup); } yield break; } @@ -2936,7 +2926,7 @@ protected virtual void Map(ref VBuffer src, ref Float dst) else Host.Check(src.Length > MaxSplitFeatIdx); - dst = (Float)TrainedEnsemble.GetOutput(ref src); + dst = (Float)TrainedEnsemble.GetOutput(in src); } public ValueMapper> GetWhatTheFeatureMapper(int top, int bottom, bool normalize) @@ -2950,20 +2940,20 @@ public ValueMapper> GetWhatTheFeatureMapper(int ValueMapper, VBuffer> del = (ref VBuffer src, ref VBuffer dst) => { - WhatTheFeatureMap(ref src, ref dst, ref builder); + WhatTheFeatureMap(in src, ref dst, ref builder); Runtime.Numeric.VectorUtils.SparsifyNormalize(ref dst, top, bottom, normalize); }; return (ValueMapper>)(Delegate)del; } - private void WhatTheFeatureMap(ref VBuffer src, ref VBuffer dst, ref BufferBuilder builder) + private void WhatTheFeatureMap(in VBuffer src, ref VBuffer dst, ref BufferBuilder builder) { if (InputType.VectorSize > 0) Host.Check(src.Length == InputType.VectorSize); else Host.Check(src.Length > MaxSplitFeatIdx); - TrainedEnsemble.GetFeatureContributions(ref src, ref dst, ref builder); + TrainedEnsemble.GetFeatureContributions(in src, ref dst, ref builder); } /// @@ -3217,7 +3207,7 @@ private void SaveEnsembleAsCode(TextWriter writer, RoleMappedSchema schema) foreach (RegressionTree tree in TrainedEnsemble.Trees) { writer.Write("double treeOutput{0}=", i); - SaveTreeAsCode(tree, writer, ref names); + SaveTreeAsCode(tree, writer, in names); writer.Write(";\n"); i++; } @@ -3230,13 +3220,13 @@ private void SaveEnsembleAsCode(TextWriter writer, RoleMappedSchema schema) /// /// Convert a single tree to code, called recursively /// - private void SaveTreeAsCode(RegressionTree tree, TextWriter writer, ref VBuffer> names) + private void SaveTreeAsCode(RegressionTree tree, TextWriter writer, in VBuffer> names) { - ToCSharp(tree, writer, 0, ref names); + ToCSharp(tree, writer, 0, in names); } // converts a subtree into a C# expression - private void ToCSharp(RegressionTree tree, TextWriter writer, int node, ref VBuffer> names) + private void ToCSharp(RegressionTree tree, TextWriter writer, int node, in VBuffer> names) { if (node < 0) { @@ -3250,9 +3240,9 @@ private void ToCSharp(RegressionTree tree, TextWriter writer, int node, ref VBuf name = $"f{tree.SplitFeature(node)}"; writer.Write("(({0} > {1}) ? ", name, FloatUtils.ToRoundTripString(tree.RawThreshold(node))); - ToCSharp(tree, writer, tree.GetGtChildForNode(node), ref names); + ToCSharp(tree, writer, tree.GetGtChildForNode(node), in names); writer.Write(" : "); - ToCSharp(tree, writer, tree.GetLteChildForNode(node), ref names); + ToCSharp(tree, writer, tree.GetLteChildForNode(node), in names); writer.Write(")"); } } @@ -3310,9 +3300,9 @@ public Float GetLeafValue(int treeId, int leafId) /// internal nodes in the path from the root to that leaf. If 'path' is null a new list is initialized. All elements /// in 'path' are cleared before filling in the current path nodes. /// - public int GetLeaf(int treeId, ref VBuffer features, ref List path) + public int GetLeaf(int treeId, in VBuffer features, ref List path) { - return TrainedEnsemble.GetTreeAt(treeId).GetLeaf(ref features, ref path); + return TrainedEnsemble.GetTreeAt(treeId).GetLeaf(in features, ref path); } public IRow GetSummaryIRowOrNull(RoleMappedSchema schema) @@ -3350,9 +3340,9 @@ public Tree(RegressionTree regTree) public int NumLeaves => _regTree.NumLeaves; - public int GetLeaf(ref VBuffer feat) + public int GetLeaf(in VBuffer feat) { - return _regTree.GetLeaf(ref feat); + return _regTree.GetLeaf(in feat); } public INode GetNode(int nodeId, bool isLeaf, IEnumerable featuresNames = null) diff --git a/src/Microsoft.ML.FastTree/GamTrainer.cs b/src/Microsoft.ML.FastTree/GamTrainer.cs index e4318187d0..b7b2e0287f 100644 --- a/src/Microsoft.ML.FastTree/GamTrainer.cs +++ b/src/Microsoft.ML.FastTree/GamTrainer.cs @@ -832,7 +832,7 @@ private void Map(ref VBuffer features, ref float response) /// is used as a buffer to accumulate the contributions across trees. /// If is null, it will be created, otherwise it will be reused. /// - internal void GetFeatureContributions(ref VBuffer features, ref VBuffer contribs, ref BufferBuilder builder) + internal void GetFeatureContributions(in VBuffer features, ref VBuffer contribs, ref BufferBuilder builder) { if (builder == null) builder = new BufferBuilder(R4Adder.Instance); @@ -879,7 +879,7 @@ internal void GetFeatureContributions(ref VBuffer features, ref VBuffer features, int[] bins) + internal double GetFeatureBinsAndScore(in VBuffer features, int[] bins) { Host.CheckParam(features.Length == _inputLength, nameof(features)); Host.CheckParam(Utils.Size(bins) == _numFeatures, nameof(bins)); @@ -1086,7 +1086,7 @@ public Context(IChannel ch, GamPredictorBase pred, RoleMappedData data, IEvaluat while (cursor.MoveNext()) { labels.Add(cursor.Label); - var score = _pred.GetFeatureBinsAndScore(ref cursor.Features, bins); + var score = _pred.GetFeatureBinsAndScore(in cursor.Features, bins); scores.Add((float)score); for (int f = 0; f < numFeatures; f++) _binDocsList[f][bins[f]].Add(doc); diff --git a/src/Microsoft.ML.FastTree/RandomForestRegression.cs b/src/Microsoft.ML.FastTree/RandomForestRegression.cs index 23b110f072..ca9af38315 100644 --- a/src/Microsoft.ML.FastTree/RandomForestRegression.cs +++ b/src/Microsoft.ML.FastTree/RandomForestRegression.cs @@ -107,7 +107,7 @@ protected override void Map(ref VBuffer src, ref float dst) else Host.Check(src.Length > MaxSplitFeatIdx); - dst = (float)TrainedEnsemble.GetOutput(ref src) / TrainedEnsemble.NumTrees; + dst = (float)TrainedEnsemble.GetOutput(in src) / TrainedEnsemble.NumTrees; } public ValueMapper, VBuffer> GetMapper(float[] quantiles) @@ -117,7 +117,7 @@ public ValueMapper, VBuffer> GetMapper(float[] quantiles) { // REVIEW: Should make this more efficient - it repeatedly allocates too much stuff. float[] weights = null; - var distribution = TrainedEnsemble.GetDistribution(ref src, _quantileSampleCount, out weights); + var distribution = TrainedEnsemble.GetDistribution(in src, _quantileSampleCount, out weights); var qdist = new QuantileStatistics(distribution, weights); var values = dst.Values; diff --git a/src/Microsoft.ML.FastTree/TreeEnsemble/Ensemble.cs b/src/Microsoft.ML.FastTree/TreeEnsemble/Ensemble.cs index 912876b126..5c73b01c70 100644 --- a/src/Microsoft.ML.FastTree/TreeEnsemble/Ensemble.cs +++ b/src/Microsoft.ML.FastTree/TreeEnsemble/Ensemble.cs @@ -252,15 +252,15 @@ public double GetOutput(int[] binnedInstance) return output; } - public double GetOutput(ref VBuffer feat) + public double GetOutput(in VBuffer feat) { double output = 0.0; for (int h = 0; h < NumTrees; h++) - output += _trees[h].GetOutput(ref feat); + output += _trees[h].GetOutput(in feat); return output; } - public float[] GetDistribution(ref VBuffer feat, int sampleCount, out float[] weights) + public float[] GetDistribution(in VBuffer feat, int sampleCount, out float[] weights) { var distribution = new float[sampleCount * NumTrees]; @@ -271,7 +271,7 @@ public float[] GetDistribution(ref VBuffer feat, int sampleCount, out flo for (int h = 0; h < NumTrees; h++) { - ((QuantileRegressionTree)_trees[h]).LoadSampledLabels(ref feat, distribution, + ((QuantileRegressionTree)_trees[h]).LoadSampledLabels(in feat, distribution, weights, sampleCount, h * sampleCount); } return distribution; @@ -341,7 +341,7 @@ public string ToGainSummary(FeaturesToContentMap fmap, Dictionary feat /// is used as a buffer to accumulate the contributions across trees. /// If is null, it will be created, otherwise it will be reused. /// - internal void GetFeatureContributions(ref VBuffer features, ref VBuffer contribs, ref BufferBuilder builder) + internal void GetFeatureContributions(in VBuffer features, ref VBuffer contribs, ref BufferBuilder builder) { // The feature contributions are equal to the sum of per-tree contributions. @@ -351,7 +351,7 @@ internal void GetFeatureContributions(ref VBuffer features, ref VBuffer - public void LoadSampledLabels(ref VBuffer feat, Float[] distribution, Float[] weights, int sampleCount, int destinationIndex) + public void LoadSampledLabels(in VBuffer feat, Float[] distribution, Float[] weights, int sampleCount, int destinationIndex) { - int leaf = GetLeaf(ref feat); + int leaf = GetLeaf(in feat); LoadSampledLabels(distribution, weights, sampleCount, destinationIndex, leaf); } diff --git a/src/Microsoft.ML.FastTree/TreeEnsemble/RegressionTree.cs b/src/Microsoft.ML.FastTree/TreeEnsemble/RegressionTree.cs index d76340f7ad..c0b040521e 100644 --- a/src/Microsoft.ML.FastTree/TreeEnsemble/RegressionTree.cs +++ b/src/Microsoft.ML.FastTree/TreeEnsemble/RegressionTree.cs @@ -699,11 +699,11 @@ public virtual double GetOutput(int[] binnedInstance) return GetOutput(leaf); } - public virtual double GetOutput(ref VBuffer feat) + public virtual double GetOutput(in VBuffer feat) { if (LteChild[0] == 0) return 0; - int leaf = GetLeaf(ref feat); + int leaf = GetLeaf(in feat); return GetOutput(leaf); } @@ -758,7 +758,7 @@ public int GetLeaf(int[] binnedInstance) // Returns index to a leaf an instance/document belongs to. // Input are the raw feature values in dense format. // For empty tree returns 0. - public int GetLeaf(ref VBuffer feat) + public int GetLeaf(in VBuffer feat) { // REVIEW: This really should validate feat.Length! if (feat.IsDense) @@ -769,7 +769,7 @@ public int GetLeaf(ref VBuffer feat) /// /// Returns leaf index the instance falls into, if we start the search from the node. /// - private int GetLeafFrom(ref VBuffer feat, int root) + private int GetLeafFrom(in VBuffer feat, int root) { if (root < 0) { @@ -787,7 +787,7 @@ private int GetLeafFrom(ref VBuffer feat, int root) /// path from the root to that leaf. If 'path' is null a new list is initialized. All elements in 'path' are cleared /// before filling in the current path nodes. /// - public int GetLeaf(ref VBuffer feat, ref List path) + public int GetLeaf(in VBuffer feat, ref List path) { // REVIEW: This really should validate feat.Length! if (path == null) @@ -1482,7 +1482,7 @@ private bool PathToLeaf(int currentNodeIndex, int leafIndex, List path) return false; } - public void AppendFeatureContributions(ref VBuffer src, BufferBuilder contributions) + public void AppendFeatureContributions(in VBuffer src, BufferBuilder contributions) { if (LteChild[0] == 0) { @@ -1491,7 +1491,7 @@ public void AppendFeatureContributions(ref VBuffer src, BufferBuilder src, BufferBuilder, float>(); float yh = default; while (cursor.MoveNext()) @@ -317,7 +317,7 @@ private OlsLinearRegressionPredictor TrainCore(IChannel ch, FloatLabelCursor.Fac // Also we can't estimate it, unless we can estimate the variance, which requires more examples than // parameters. if (!_perParameterSignificance || m >= n) - return new OlsLinearRegressionPredictor(Host, ref weights, bias, null, null, null, rSquared, rSquaredAdjusted); + return new OlsLinearRegressionPredictor(Host, in weights, bias, null, null, null, rSquared, rSquaredAdjusted); ch.Assert(!Double.IsNaN(rSquaredAdjusted)); var standardErrors = new Double[m]; @@ -364,7 +364,7 @@ private OlsLinearRegressionPredictor TrainCore(IChannel ch, FloatLabelCursor.Fac ch.Check(0 <= pValues[i] && pValues[i] <= 1, "p-Value calculated outside expected [0,1] range"); } - return new OlsLinearRegressionPredictor(Host, ref weights, bias, standardErrors, tValues, pValues, rSquared, rSquaredAdjusted); + return new OlsLinearRegressionPredictor(Host, in weights, bias, standardErrors, tValues, pValues, rSquared, rSquaredAdjusted); } internal static class Mkl @@ -598,9 +598,9 @@ public IReadOnlyCollection TValues public IReadOnlyCollection PValues { get { return _pValues.AsReadOnly(); } } - internal OlsLinearRegressionPredictor(IHostEnvironment env, ref VBuffer weights, float bias, + internal OlsLinearRegressionPredictor(IHostEnvironment env, in VBuffer weights, float bias, Double[] standardErrors, Double[] tValues, Double[] pValues, Double rSquared, Double rSquaredAdjusted) - : base(env, RegistrationName, ref weights, bias) + : base(env, RegistrationName, in weights, bias) { Contracts.AssertValueOrNull(standardErrors); Contracts.AssertValueOrNull(tValues); diff --git a/src/Microsoft.ML.HalLearners/SymSgdClassificationTrainer.cs b/src/Microsoft.ML.HalLearners/SymSgdClassificationTrainer.cs index 76f23c3e8a..b0ec0608f1 100644 --- a/src/Microsoft.ML.HalLearners/SymSgdClassificationTrainer.cs +++ b/src/Microsoft.ML.HalLearners/SymSgdClassificationTrainer.cs @@ -189,9 +189,9 @@ private TPredictor CreatePredictor(VBuffer weights, float bias) Host.CheckParam(weights.Length > 0, nameof(weights)); VBuffer maybeSparseWeights = default; - VBufferUtils.CreateMaybeSparseCopy(ref weights, ref maybeSparseWeights, + VBufferUtils.CreateMaybeSparseCopy(in weights, ref maybeSparseWeights, Conversions.Instance.GetIsDefaultPredicate(NumberType.R4)); - var predictor = new LinearBinaryPredictor(Host, ref maybeSparseWeights, bias); + var predictor = new LinearBinaryPredictor(Host, in maybeSparseWeights, bias); return new ParameterMixingCalibratedPredictor(Host, predictor, new PlattCalibrator(Host, -1, 0)); } diff --git a/src/Microsoft.ML.KMeansClustering/KMeansPlusPlusTrainer.cs b/src/Microsoft.ML.KMeansClustering/KMeansPlusPlusTrainer.cs index f3b227419e..c325f87a4d 100644 --- a/src/Microsoft.ML.KMeansClustering/KMeansPlusPlusTrainer.cs +++ b/src/Microsoft.ML.KMeansClustering/KMeansPlusPlusTrainer.cs @@ -348,7 +348,7 @@ public static void Initialize( for (int j = 0; j < i; j++) { - var distance = -2 * VectorUtils.DotProduct(ref cursor.Features, ref centroids[j]) + var distance = -2 * VectorUtils.DotProduct(in cursor.Features, in centroids[j]) + l2 + centroidL2s[j]; probabilityWeight = Math.Min(probabilityWeight, distance); } @@ -581,8 +581,8 @@ public void SetInstanceCluster(int n, float bestWeight, int bestCluster) /// Computes and stores the distance of a new cluster to an old cluster /// must be between 0..numSamplesPerRound-1. /// - public void SetClusterDistance(int newClusterIdxWithinSample, ref VBuffer newClusterFeatures, float newClusterL2, - int oldClusterIdx, ref VBuffer oldClusterFeatures, float oldClusterL2) + public void SetClusterDistance(int newClusterIdxWithinSample, in VBuffer newClusterFeatures, float newClusterL2, + int oldClusterIdx, in VBuffer oldClusterFeatures, float oldClusterL2) { if (_clusterDistances != null) { @@ -591,7 +591,7 @@ public void SetClusterDistance(int newClusterIdxWithinSample, ref VBuffer Contracts.Assert(0 <= oldClusterIdx && oldClusterIdx < _clusterDistances.GetLength(1)); _clusterDistances[newClusterIdxWithinSample, oldClusterIdx] = - MathUtils.Sqrt(newClusterL2 - 2 * VectorUtils.DotProduct(ref newClusterFeatures, ref oldClusterFeatures) + oldClusterL2); + MathUtils.Sqrt(newClusterL2 - 2 * VectorUtils.DotProduct(in newClusterFeatures, in oldClusterFeatures) + oldClusterL2); } } @@ -633,7 +633,7 @@ public bool CanWeightComputationBeAvoided(float instanceDistanceToBestOldCluster /// Note that is used to avoid the storing the new cluster in /// final round. After the final round, best cluster information will be ignored. /// - private static void FindBestCluster(ref VBuffer point, int pointRowIndex, SharedState initializationState, + private static void FindBestCluster(in VBuffer point, int pointRowIndex, SharedState initializationState, int clusterCount, int clusterPrevCount, VBuffer[] clusters, float[] clustersL2s, bool needRealDistanceSquared, bool needToStoreWeight, out float minDistanceSquared, out int bestCluster) { @@ -669,11 +669,11 @@ private static void FindBestCluster(ref VBuffer point, int pointRowIndex, { #if DEBUG // Lets check if our invariant actually holds - Contracts.Assert(-2 * VectorUtils.DotProduct(ref point, ref clusters[j]) + clustersL2s[j] > bestWeight); + Contracts.Assert(-2 * VectorUtils.DotProduct(in point, in clusters[j]) + clustersL2s[j] > bestWeight); #endif continue; } - float weight = -2 * VectorUtils.DotProduct(ref point, ref clusters[j]) + clustersL2s[j]; + float weight = -2 * VectorUtils.DotProduct(in point, in clusters[j]) + clustersL2s[j]; if (bestWeight >= weight) { bestWeight = weight; @@ -694,7 +694,7 @@ private static void FindBestCluster(ref VBuffer point, int pointRowIndex, // So, we need to go over all clusters to find the best cluster. int discardSecondBestCluster; float discardSecondBestWeight; - KMeansUtils.FindBestCluster(ref point, clusters, clustersL2s, clusterCount, needRealDistanceSquared, + KMeansUtils.FindBestCluster(in point, clusters, clustersL2s, clusterCount, needRealDistanceSquared, out minDistanceSquared, out bestCluster, out discardSecondBestWeight, out discardSecondBestCluster); } @@ -793,7 +793,7 @@ public static void Initialize(IHost host, int numThreads, IChannel ch, FeatureFl // sample. Heap[] buffer = null; var rowStats = KMeansUtils.ParallelWeightedReservoirSample(host, numThreads, 1, cursorFactory, - (ref VBuffer point, int pointIndex) => (float)1.0, (FeatureFloatVectorCursor cur) => -1, + (in VBuffer point, int pointIndex) => (float)1.0, (FeatureFloatVectorCursor cur) => -1, ref clusters, ref buffer); totalTrainingInstances = rowStats.TotalTrainingInstances; missingFeatureCount = rowStats.MissingFeatureCount; @@ -822,11 +822,11 @@ public static void Initialize(IHost host, int numThreads, IChannel ch, FeatureFl // far from our current total running set of instances as possible. VBuffer[] roundSamples = new VBuffer[numSamplesPerRound]; - KMeansUtils.WeightFunc weightFn = (ref VBuffer point, int pointRowIndex) => + KMeansUtils.WeightFunc weightFn = (in VBuffer point, int pointRowIndex) => { float distanceSquared; int discardBestCluster; - FindBestCluster(ref point, pointRowIndex, initializationState, clusterCount, clusterPrevCount, clusters, + FindBestCluster(in point, pointRowIndex, initializationState, clusterCount, clusterPrevCount, clusters, clustersL2s, true, true, out distanceSquared, out discardBestCluster); return (distanceSquared >= 0.0f) ? distanceSquared : 0.0f; @@ -847,7 +847,7 @@ public static void Initialize(IHost host, int numThreads, IChannel ch, FeatureFl clustersL2s[clusterCount] = VectorUtils.NormSquared(clusters[clusterCount]); for (int j = 0; j < clusterPrevCount; j++) - initializationState.SetClusterDistance(i, ref clusters[clusterCount], clustersL2s[clusterCount], j, ref clusters[j], clustersL2s[j]); + initializationState.SetClusterDistance(i, in clusters[clusterCount], clustersL2s[clusterCount], j, in clusters[j], clustersL2s[j]); clusterCount++; } @@ -870,10 +870,10 @@ public static void Initialize(IHost host, int numThreads, IChannel ch, FeatureFl { int bestCluster; float discardBestWeight; - FindBestCluster(ref point, pointRowIndex, initializationState, clusterCount, clusterPrevCount, clusters, + FindBestCluster(in point, pointRowIndex, initializationState, clusterCount, clusterPrevCount, clusters, clustersL2s, false, false, out discardBestWeight, out bestCluster); #if DEBUG - int debugBestCluster = KMeansUtils.FindBestCluster(ref point, clusters, clustersL2s); + int debugBestCluster = KMeansUtils.FindBestCluster(in point, clusters, clustersL2s); ch.Assert(bestCluster == debugBestCluster); #endif weights[bestCluster]++; @@ -893,7 +893,7 @@ public static void Initialize(IHost host, int numThreads, IChannel ch, FeatureFl KMeansUtils.ParallelMapReduce( numThreads, host, cursorFactory, (FeatureFloatVectorCursor cur) => -1, (ref float[] weights) => weights = new float[totalSamples], - (ref VBuffer point, int discard, float[] weights, IRandom rand) => weights[KMeansUtils.FindBestCluster(ref point, clusters, clustersL2s)]++, + (ref VBuffer point, int discard, float[] weights, IRandom rand) => weights[KMeansUtils.FindBestCluster(in point, clusters, clustersL2s)]++, (float[][] workStateWeights, IRandom rand, ref float[] weights) => { weights = new float[totalSamples]; @@ -949,7 +949,7 @@ public static void Initialize( Heap[] buffer = null; VBuffer[] outCentroids = null; var rowStats = KMeansUtils.ParallelWeightedReservoirSample(host, numThreads, k, cursorFactory, - (ref VBuffer point, int pointRowIndex) => 1f, (FeatureFloatVectorCursor cur) => -1, + (in VBuffer point, int pointRowIndex) => 1f, (FeatureFloatVectorCursor cur) => -1, ref outCentroids, ref buffer); missingFeatureCount = rowStats.MissingFeatureCount; totalTrainingInstances = rowStats.TotalTrainingInstances; @@ -1060,19 +1060,19 @@ public void KeepYinYangAssignment(int bestCluster) GloballyFiltered++; } - public void UpdateClusterAssignment(bool firstIteration, ref VBuffer features, int cluster, int previousCluster, float distance) + public void UpdateClusterAssignment(bool firstIteration, in VBuffer features, int cluster, int previousCluster, float distance) { if (firstIteration) { - VectorUtils.Add(ref features, ref CachedSum[cluster]); + VectorUtils.Add(in features, ref CachedSum[cluster]); NumChanged++; } else if (previousCluster != cluster) { // update the cachedSum as the instance moves from (previous) bestCluster[n] to cluster - VectorUtils.Add(ref features, ref CachedSum[cluster]); + VectorUtils.Add(in features, ref CachedSum[cluster]); // There doesnt seem to be a Subtract function that does a -= b, so doing a += (-1 * b) - VectorUtils.AddMult(ref features, -1, ref CachedSum[previousCluster]); + VectorUtils.AddMult(in features, -1, ref CachedSum[previousCluster]); NumChanged++; } else @@ -1081,9 +1081,9 @@ public void UpdateClusterAssignment(bool firstIteration, ref VBuffer feat UpdateClusterAssignmentMetrics(cluster, distance); } - public void UpdateClusterAssignment(ref VBuffer features, int cluster, float distance) + public void UpdateClusterAssignment(in VBuffer features, int cluster, float distance) { - VectorUtils.Add(ref features, ref Centroids[cluster]); + VectorUtils.Add(in features, ref Centroids[cluster]); UpdateClusterAssignmentMetrics(cluster, distance); } @@ -1112,8 +1112,8 @@ public static void Reduce(WorkChunkState[] workChunkArr, ReducedWorkChunkState r for (int j = 0; j < reducedState.ClusterSizes.Length; j++) { reducedState.ClusterSizes[j] += workChunkArr[i].ClusterSizes[j]; - VectorUtils.Add(ref workChunkArr[i].CachedSum[j], ref reducedState.CachedSum[j]); - VectorUtils.Add(ref workChunkArr[i].Centroids[j], ref reducedState.Centroids[j]); + VectorUtils.Add(in workChunkArr[i].CachedSum[j], ref reducedState.CachedSum[j]); + VectorUtils.Add(in workChunkArr[i].Centroids[j], ref reducedState.Centroids[j]); } workChunkArr[i].Clear(keepCachedSums: false); @@ -1159,14 +1159,14 @@ public void UpdateClusters(VBuffer[] centroids, float[] centroidL2s, floa for (int i = 0; i < K; i++) { if (isAccelerated) - VectorUtils.Add(ref CachedSum[i], ref Centroids[i]); + VectorUtils.Add(in CachedSum[i], ref Centroids[i]); if (ClusterSizes[i] > 1) VectorUtils.ScaleBy(ref Centroids[i], (float)(1.0 / ClusterSizes[i])); if (isAccelerated) { - float clusterDelta = MathUtils.Sqrt(VectorUtils.L2DistSquared(ref Centroids[i], ref centroids[i])); + float clusterDelta = MathUtils.Sqrt(VectorUtils.L2DistSquared(in Centroids[i], in centroids[i])); deltas[i] = clusterDelta; if (deltaMax < clusterDelta) @@ -1246,14 +1246,14 @@ public SharedState(FeatureFloatVectorCursor.Factory factory, IChannel ch, long b /// to its closer and second closed cluster, as well as the identity of the new /// closest cluster. This method returns the last known closest cluster. /// - public int SetYinYangCluster(int n, ref VBuffer features, float minDistance, int minCluster, float secMinDistance) + public int SetYinYangCluster(int n, in VBuffer features, float minDistance, int minCluster, float secMinDistance) { if (n == -1) return -1; // update upper and lower bound // updates have to be true distances to use triangular inequality - float instanceNormSquared = VectorUtils.NormSquared(features); + float instanceNormSquared = VectorUtils.NormSquared(in features); _upperBound[n] = MathUtils.Sqrt(instanceNormSquared + minDistance); _lowerBound[n] = MathUtils.Sqrt(instanceNormSquared + secMinDistance); int previousCluster = _bestCluster[n]; @@ -1282,16 +1282,16 @@ public bool IsYinYangGloballyBound(int n) } #if DEBUG - public void AssertValidYinYangBounds(int n, ref VBuffer features, VBuffer[] centroids) + public void AssertValidYinYangBounds(int n, in VBuffer features, VBuffer[] centroids) { // Assert that the global filter is indeed doing the right thing - float bestDistance = MathUtils.Sqrt(VectorUtils.L2DistSquared(ref features, ref centroids[_bestCluster[n]])); + float bestDistance = MathUtils.Sqrt(VectorUtils.L2DistSquared(in features, in centroids[_bestCluster[n]])); Contracts.Assert(KMeansLloydsYinYangTrain.AlmostLeq(bestDistance, _upperBound[n])); for (int j = 0; j < centroids.Length; j++) { if (j == _bestCluster[n]) continue; - float distance = MathUtils.Sqrt(VectorUtils.L2DistSquared(ref features, ref centroids[j])); + float distance = MathUtils.Sqrt(VectorUtils.L2DistSquared(in features, in centroids[j])); Contracts.Assert(AlmostLeq(_lowerBound[n], distance)); } @@ -1373,7 +1373,7 @@ public static void Train(IHost host, int numThreads, IChannel ch, FeatureFloatVe int id = state.RowIndexGetter(cursor); if (id != -1) { - VectorUtils.Add(ref cursor.Features, ref cachedSumCopy[state.GetBestCluster(id)]); + VectorUtils.Add(in cursor.Features, ref cachedSumCopy[state.GetBestCluster(id)]); numCounted++; } } @@ -1449,7 +1449,7 @@ private static void ProcessChunk(FeatureFloatVectorCursor cursor, SharedState st { chunkState.KeepYinYangAssignment(state.GetBestCluster(n)); #if DEBUG - state.AssertValidYinYangBounds(n, ref cursor.Features, centroids); + state.AssertValidYinYangBounds(n, in cursor.Features, centroids); #endif continue; } @@ -1459,14 +1459,14 @@ private static void ProcessChunk(FeatureFloatVectorCursor cursor, SharedState st float secMinDistance; int cluster; int secCluster; - KMeansUtils.FindBestCluster(ref cursor.Features, centroids, centroidL2s, k, false, out minDistance, out cluster, out secMinDistance, out secCluster); + KMeansUtils.FindBestCluster(in cursor.Features, centroids, centroidL2s, k, false, out minDistance, out cluster, out secMinDistance, out secCluster); if (n == -1) - chunkState.UpdateClusterAssignment(ref cursor.Features, cluster, minDistance); + chunkState.UpdateClusterAssignment(in cursor.Features, cluster, minDistance); else { - int prevCluster = state.SetYinYangCluster(n, ref cursor.Features, minDistance, cluster, secMinDistance); - chunkState.UpdateClusterAssignment(firstIteration, ref cursor.Features, cluster, prevCluster, minDistance); + int prevCluster = state.SetYinYangCluster(n, in cursor.Features, minDistance, cluster, secMinDistance); + chunkState.UpdateClusterAssignment(firstIteration, in cursor.Features, cluster, prevCluster, minDistance); } } } @@ -1522,7 +1522,7 @@ public struct RowStats public long TotalTrainingInstances; } - public delegate float WeightFunc(ref VBuffer point, int pointRowIndex); + public delegate float WeightFunc(in VBuffer point, int pointRowIndex); /// /// Performs a multithreaded version of weighted reservior sampling, returning @@ -1570,7 +1570,7 @@ public static RowStats ParallelWeightedReservoirSample( // more than once. float sameClusterEpsilon = (float)1e-15; - float weight = weightFn(ref point, pointRowIndex); + float weight = weightFn(in point, pointRowIndex); // If numeric instability has forced it to zero, then we bound it to epsilon to // keep the key valid and avoid NaN, (although the math does tend to work out regardless: @@ -1700,22 +1700,22 @@ public static RowStats ParallelMapReduce( }; } - public static int FindBestCluster(ref VBuffer features, VBuffer[] centroids, float[] centroidL2s) + public static int FindBestCluster(in VBuffer features, VBuffer[] centroids, float[] centroidL2s) { float discard1; float discard2; int discard3; int cluster; - FindBestCluster(ref features, centroids, centroidL2s, centroids.Length, false, out discard1, out cluster, out discard2, out discard3); + FindBestCluster(in features, centroids, centroidL2s, centroids.Length, false, out discard1, out cluster, out discard2, out discard3); return cluster; } - public static int FindBestCluster(ref VBuffer features, VBuffer[] centroids, float[] centroidL2s, int centroidCount, bool realWeight, out float minDistance) + public static int FindBestCluster(in VBuffer features, VBuffer[] centroids, float[] centroidL2s, int centroidCount, bool realWeight, out float minDistance) { float discard1; int discard2; int cluster; - FindBestCluster(ref features, centroids, centroidL2s, centroidCount, realWeight, out minDistance, out cluster, out discard1, out discard2); + FindBestCluster(in features, centroids, centroidL2s, centroidCount, realWeight, out minDistance, out cluster, out discard1, out discard2); return cluster; } @@ -1734,7 +1734,7 @@ public static int FindBestCluster(ref VBuffer features, VBuffer[] /// The second nearest distance, or PosInf if only contains a single point. /// The index of the second nearest centroid, or -1 if only contains a single point. public static void FindBestCluster( - ref VBuffer features, + in VBuffer features, VBuffer[] centroids, float[] centroidL2s, int centroidCount, bool needRealDistance, out float minDistance, out int cluster, out float secMinDistance, out int secCluster) { @@ -1750,7 +1750,7 @@ public static void FindBestCluster( { // this is not a real distance, since we don't add L2 norm of the instance // This won't affect minimum calculations, and total score will just be lowered by sum(L2 norms) - float distance = -2 * VectorUtils.DotProduct(ref features, ref centroids[j]) + centroidL2s[j]; + float distance = -2 * VectorUtils.DotProduct(in features, in centroids[j]) + centroidL2s[j]; if (distance <= minDistance) { @@ -1773,7 +1773,7 @@ public static void FindBestCluster( if (needRealDistance) { - float l2 = VectorUtils.NormSquared(features); + float l2 = VectorUtils.NormSquared(in features); minDistance += l2; if (secCluster != -1) secMinDistance += l2; diff --git a/src/Microsoft.ML.KMeansClustering/KMeansPredictor.cs b/src/Microsoft.ML.KMeansClustering/KMeansPredictor.cs index 6a102d0454..b0c55e9523 100644 --- a/src/Microsoft.ML.KMeansClustering/KMeansPredictor.cs +++ b/src/Microsoft.ML.KMeansClustering/KMeansPredictor.cs @@ -151,14 +151,14 @@ public ValueMapper GetMapper() var values = dst.Values; if (Utils.Size(values) < _k) values = new Float[_k]; - Map(ref src, values); + Map(in src, values); dst = new VBuffer(_k, values, dst.Indices); }; return (ValueMapper)(Delegate)del; } - private void Map(ref VBuffer src, Float[] distances) + private void Map(in VBuffer src, Float[] distances) { Host.Assert(Utils.Size(distances) >= _k); @@ -166,7 +166,7 @@ private void Map(ref VBuffer src, Float[] distances) for (int i = 0; i < _k; i++) { Float distance = Math.Max(0, - -2 * VectorUtils.DotProduct(ref _centroids[i], ref src) + _centroidL2s[i] + instanceL2); + -2 * VectorUtils.DotProduct(in _centroids[i], in src) + _centroidL2s[i] + instanceL2); distances[i] = distance; } } diff --git a/src/Microsoft.ML.PCA/PcaTrainer.cs b/src/Microsoft.ML.PCA/PcaTrainer.cs index 207472284e..c097538135 100644 --- a/src/Microsoft.ML.PCA/PcaTrainer.cs +++ b/src/Microsoft.ML.PCA/PcaTrainer.cs @@ -189,7 +189,7 @@ private PcaPredictor TrainCore(IChannel ch, RoleMappedData data, int dimension) // Make the next vectors in the queue orthogonal to the orthonormalized vectors. for (var j = i + 1; j < oversampledRank; ++j) //subtract the projection of y[j] on v. - VectorUtils.AddMult(ref v, -VectorUtils.DotProduct(ref v, ref y[j]), ref y[j]); + VectorUtils.AddMult(in v, -VectorUtils.DotProduct(in v, in y[j]), ref y[j]); } var q = y; // q in QR decomposition. @@ -201,7 +201,7 @@ private PcaPredictor TrainCore(IChannel ch, RoleMappedData data, int dimension) for (var i = 0; i < oversampledRank; ++i) { for (var j = i; j < oversampledRank; ++j) - b2[i * oversampledRank + j] = b2[j * oversampledRank + i] = VectorUtils.DotProduct(ref b[i], ref b[j]); + b2[i * oversampledRank + j] = b2[j * oversampledRank + i] = VectorUtils.DotProduct(in b[i], in b[j]); } float[] smallEigenvalues;// eigenvectors and eigenvalues of the small matrix B2. @@ -209,7 +209,7 @@ private PcaPredictor TrainCore(IChannel ch, RoleMappedData data, int dimension) EigenUtils.EigenDecomposition(b2, out smallEigenvalues, out smallEigenvectors); PostProcess(b, smallEigenvalues, smallEigenvectors, dimension, oversampledRank); - return new PcaPredictor(Host, _rank, b, ref mean); + return new PcaPredictor(Host, _rank, b, in mean); } private static VBuffer[] Zeros(int k, int d) @@ -258,12 +258,12 @@ private static void Project(IHost host, FeatureFloatVectorCursor.Factory cursorF while (cursor.MoveNext()) { if (center) - VectorUtils.AddMult(ref cursor.Features, cursor.Weight, ref mean); + VectorUtils.AddMult(in cursor.Features, cursor.Weight, ref mean); for (int i = 0; i < numCols; i++) { VectorUtils.AddMult( - ref cursor.Features, - cursor.Weight * VectorUtils.DotProduct(ref omega[i], ref cursor.Features), + in cursor.Features, + cursor.Weight * VectorUtils.DotProduct(in omega[i], in cursor.Features), ref y[i]); } n += cursor.Weight; @@ -283,7 +283,7 @@ private static void Project(IHost host, FeatureFloatVectorCursor.Factory cursorF { VectorUtils.ScaleBy(ref mean, invn); for (int i = 0; i < numCols; i++) - VectorUtils.AddMult(ref mean, -VectorUtils.DotProduct(ref omega[i], ref mean), ref y[i]); + VectorUtils.AddMult(in mean, -VectorUtils.DotProduct(in omega[i], in mean), ref y[i]); } } @@ -393,7 +393,7 @@ public override PredictionKind PredictionKind get { return PredictionKind.AnomalyDetection; } } - internal PcaPredictor(IHostEnvironment env, int rank, VBuffer[] eigenVectors, ref VBuffer mean) + internal PcaPredictor(IHostEnvironment env, int rank, VBuffer[] eigenVectors, in VBuffer mean) : base(env, RegistrationName) { _dimension = eigenVectors[0].Length; @@ -404,7 +404,7 @@ internal PcaPredictor(IHostEnvironment env, int rank, VBuffer[] eigenVect for (var i = 0; i < rank; ++i) // Only want first k { _eigenVectors[i] = eigenVectors[i]; - _meanProjected[i] = VectorUtils.DotProduct(ref eigenVectors[i], ref mean); + _meanProjected[i] = VectorUtils.DotProduct(in eigenVectors[i], in mean); } _mean = mean; @@ -450,7 +450,7 @@ private PcaPredictor(IHostEnvironment env, ModelLoadContext ctx) var vi = ctx.Reader.ReadFloatArray(_dimension); Host.CheckDecode(vi.All(FloatUtils.IsFinite)); _eigenVectors[i] = new VBuffer(_dimension, vi); - _meanProjected[i] = VectorUtils.DotProduct(ref _eigenVectors[i], ref _mean); + _meanProjected[i] = VectorUtils.DotProduct(in _eigenVectors[i], in _mean); } WarnOnOldNormalizer(ctx, GetType(), Host); @@ -519,7 +519,7 @@ public void SaveAsText(TextWriter writer, RoleMappedSchema schema) writer.WriteLine("# V"); for (var i = 0; i < _rank; ++i) { - VBufferUtils.ForEachDefined(ref _eigenVectors[i], + VBufferUtils.ForEachDefined(in _eigenVectors[i], (ind, val) => { if (val != 0) writer.Write(" {0}:{1}", ind, val); }); writer.WriteLine(); } @@ -564,19 +564,19 @@ public ValueMapper GetMapper() (ref VBuffer src, ref float dst) => { Host.Check(src.Length == _dimension); - dst = Score(ref src); + dst = Score(in src); }; return (ValueMapper)(Delegate)del; } - private float Score(ref VBuffer src) + private float Score(in VBuffer src) { Host.Assert(src.Length == _dimension); // REVIEW: Can this be done faster in a single pass over src and _mean? var mean = _mean; - float norm2X = VectorUtils.NormSquared(src) - - 2 * VectorUtils.DotProduct(ref mean, ref src) + _norm2Mean; + float norm2X = VectorUtils.NormSquared(in src) - + 2 * VectorUtils.DotProduct(in mean, in src) + _norm2Mean; // Because the distance between src and _mean is computed using the above expression, the result // may be negative due to round off error. If this happens, we let the distance be 0. if (norm2X < 0) @@ -585,7 +585,7 @@ private float Score(ref VBuffer src) float norm2U = 0; for (int i = 0; i < _rank; i++) { - float component = VectorUtils.DotProduct(ref _eigenVectors[i], ref src) - _meanProjected[i]; + float component = VectorUtils.DotProduct(in _eigenVectors[i], in src) - _meanProjected[i]; norm2U += component * component; } diff --git a/src/Microsoft.ML.PCA/PcaTransform.cs b/src/Microsoft.ML.PCA/PcaTransform.cs index aa20f7e033..7fce5f1e63 100644 --- a/src/Microsoft.ML.PCA/PcaTransform.cs +++ b/src/Microsoft.ML.PCA/PcaTransform.cs @@ -482,10 +482,10 @@ private void Project(IDataView trainingData, float[][] mean, float[][][] omega, totalColWeight[iinfo] += weight; if (center[iinfo]) - VectorUtils.AddMult(ref features, mean[iinfo], weight); + VectorUtils.AddMult(in features, mean[iinfo], weight); for (int i = 0; i < omega[iinfo].Length; i++) - VectorUtils.AddMult(ref features, y[iinfo][i], weight * VectorUtils.DotProductWithOffset(omega[iinfo][i], 0, ref features)); + VectorUtils.AddMult(in features, y[iinfo][i], weight * VectorUtils.DotProductWithOffset(omega[iinfo][i], 0, in features)); } } } @@ -619,13 +619,13 @@ protected override Delegate MakeGetter(IRow input, int iinfo, out Action dispose ValueGetter> dstGetter = (ref VBuffer dst) => { srcGetter(ref src); - TransformFeatures(Host, ref src, ref dst, _parent._transformInfos[iinfo]); + TransformFeatures(Host, in src, ref dst, _parent._transformInfos[iinfo]); }; return dstGetter; } - private static void TransformFeatures(IExceptionContext ectx, ref VBuffer src, ref VBuffer dst, TransformInfo transformInfo) + private static void TransformFeatures(IExceptionContext ectx, in VBuffer src, ref VBuffer dst, TransformInfo transformInfo) { ectx.Check(src.Length == transformInfo.Dimension); @@ -635,7 +635,7 @@ private static void TransformFeatures(IExceptionContext ectx, ref VBuffer for (int i = 0; i < transformInfo.Rank; i++) { - values[i] = VectorUtils.DotProductWithOffset(transformInfo.Eigenvectors[i], 0, ref src) - + values[i] = VectorUtils.DotProductWithOffset(transformInfo.Eigenvectors[i], 0, in src) - (transformInfo.MeanProjected == null ? 0 : transformInfo.MeanProjected[i]); } diff --git a/src/Microsoft.ML.StandardLearners/Optimizer/DifferentiableFunction.cs b/src/Microsoft.ML.StandardLearners/Optimizer/DifferentiableFunction.cs index 735df2a50f..ee928e5cda 100644 --- a/src/Microsoft.ML.StandardLearners/Optimizer/DifferentiableFunction.cs +++ b/src/Microsoft.ML.StandardLearners/Optimizer/DifferentiableFunction.cs @@ -19,7 +19,7 @@ namespace Microsoft.ML.Runtime.Numeric /// The gradient vector, which must be filled in (its initial contents are undefined) /// The progress channel provider that can be used to report calculation progress. Can be null. /// The value of the function - public delegate Float DifferentiableFunction(ref VBuffer input, ref VBuffer gradient, IProgressChannelProvider progress); + public delegate Float DifferentiableFunction(in VBuffer input, ref VBuffer gradient, IProgressChannelProvider progress); /// /// A delegate for indexed sets of functions with gradients. @@ -31,7 +31,7 @@ namespace Microsoft.ML.Runtime.Numeric /// The point at which to evaluate the function /// The gradient vector, which must be filled in (its initial contents are undefined) /// The value of the function - public delegate Float IndexedDifferentiableFunction(int index, ref VBuffer input, ref VBuffer gradient); + public delegate Float IndexedDifferentiableFunction(int index, in VBuffer input, ref VBuffer gradient); /// /// Class to aggregate an indexed differentiable function into a single function, in parallel @@ -102,11 +102,11 @@ private void Eval(object chunkIndexObj) for (int i = from; i < to; ++i) { tempGrad = new VBuffer(0, 0, tempGrad.Values, tempGrad.Indices); - _tempVals[chunkIndex] += _func(i, ref _input, ref tempGrad); + _tempVals[chunkIndex] += _func(i, in _input, ref tempGrad); if (_tempGrads[chunkIndex].Length == 0) tempGrad.CopyTo(ref _tempGrads[chunkIndex]); else - VectorUtils.Add(ref tempGrad, ref _tempGrads[chunkIndex]); + VectorUtils.Add(in tempGrad, ref _tempGrads[chunkIndex]); } _threadFinished[chunkIndex].Set(); @@ -118,7 +118,7 @@ private void Eval(object chunkIndexObj) /// The point at which to evaluate the function /// The gradient vector, which must be filled in (its initial contents are undefined) /// Function value - public Float Eval(ref VBuffer input, ref VBuffer gradient) + public Float Eval(in VBuffer input, ref VBuffer gradient) { _input = input; @@ -136,7 +136,7 @@ public Float Eval(ref VBuffer input, ref VBuffer gradient) if (gradient.Length == 0) _tempGrads[c].CopyTo(ref gradient); else - VectorUtils.Add(ref _tempGrads[c], ref gradient); + VectorUtils.Add(in _tempGrads[c], ref gradient); value += _tempVals[c]; } @@ -169,10 +169,10 @@ public static class GradientTester /// function to test /// point at which to test /// maximum normalized difference between analytic and numeric directional derivative over multiple tests - public static Float Test(DifferentiableFunction f, ref VBuffer x) + public static Float Test(DifferentiableFunction f, in VBuffer x) { // REVIEW: Delete this method? - return Test(f, ref x, false); + return Test(f, in x, false); } /// @@ -182,14 +182,14 @@ public static Float Test(DifferentiableFunction f, ref VBuffer x) /// point at which to test /// If false, outputs detailed info. /// maximum normalized difference between analytic and numeric directional derivative over multiple tests - public static Float Test(DifferentiableFunction f, ref VBuffer x, bool quiet) + public static Float Test(DifferentiableFunction f, in VBuffer x, bool quiet) { // REVIEW: Delete this method? VBuffer grad = default(VBuffer); VBuffer newGrad = default(VBuffer); VBuffer newX = default(VBuffer); Float normX = VectorUtils.Norm(x); - f(ref x, ref grad, null); + f(in x, ref grad, null); if (!quiet) Console.WriteLine(Header); @@ -217,13 +217,13 @@ public static Float Test(DifferentiableFunction f, ref VBuffer x, bool qu Float norm = VectorUtils.Norm(dir); VectorUtils.ScaleBy(ref dir, 1 / norm); - VectorUtils.AddMultInto(ref x, Eps, ref dir, ref newX); - Float rVal = f(ref newX, ref newGrad, null); + VectorUtils.AddMultInto(in x, Eps, in dir, ref newX); + Float rVal = f(in newX, ref newGrad, null); - VectorUtils.AddMultInto(ref x, -Eps, ref dir, ref newX); - Float lVal = f(ref newX, ref newGrad, null); + VectorUtils.AddMultInto(in x, -Eps, in dir, ref newX); + Float lVal = f(in newX, ref newGrad, null); - Float dirDeriv = VectorUtils.DotProduct(ref grad, ref dir); + Float dirDeriv = VectorUtils.DotProduct(in grad, in dir); Float numDeriv = (rVal - lVal) / (2 * Eps); Float normDiff = Math.Abs(1 - numDeriv / dirDeriv); @@ -253,7 +253,7 @@ public static void TestAllCoords(DifferentiableFunction f, ref VBuffer x) VBuffer grad = default(VBuffer); VBuffer newGrad = default(VBuffer); VBuffer newX = default(VBuffer); - Float val = f(ref x, ref grad, null); + Float val = f(in x, ref grad, null); Float normX = VectorUtils.Norm(x); Console.WriteLine(Header); @@ -264,13 +264,13 @@ public static void TestAllCoords(DifferentiableFunction f, ref VBuffer x) for (int n = 0; n < x.Length; n++) { dir.Values[0] = n; - VectorUtils.AddMultInto(ref x, Eps, ref dir, ref newX); - Float rVal = f(ref newX, ref newGrad, null); + VectorUtils.AddMultInto(in x, Eps, in dir, ref newX); + Float rVal = f(in newX, ref newGrad, null); - VectorUtils.AddMultInto(ref x, -Eps, ref dir, ref newX); - Float lVal = f(ref newX, ref newGrad, null); + VectorUtils.AddMultInto(in x, -Eps, in dir, ref newX); + Float lVal = f(in newX, ref newGrad, null); - Float dirDeriv = VectorUtils.DotProduct(ref grad, ref dir); + Float dirDeriv = VectorUtils.DotProduct(in grad, in dir); Float numDeriv = (rVal - lVal) / (2 * Eps); Float normDiff = Math.Abs(1 - numDeriv / dirDeriv); @@ -292,7 +292,7 @@ public static void TestCoords(DifferentiableFunction f, ref VBuffer x, IL VBuffer grad = default(VBuffer); VBuffer newGrad = default(VBuffer); VBuffer newX = default(VBuffer); - Float val = f(ref x, ref grad, null); + Float val = f(in x, ref grad, null); Float normX = VectorUtils.Norm(x); Console.WriteLine(Header); @@ -303,13 +303,13 @@ public static void TestCoords(DifferentiableFunction f, ref VBuffer x, IL foreach (int n in coords) { dir.Values[0] = n; - VectorUtils.AddMultInto(ref x, Eps, ref dir, ref newX); - Float rVal = f(ref newX, ref newGrad, null); + VectorUtils.AddMultInto(in x, Eps, in dir, ref newX); + Float rVal = f(in newX, ref newGrad, null); - VectorUtils.AddMultInto(ref x, -Eps, ref dir, ref newX); - Float lVal = f(ref newX, ref newGrad, null); + VectorUtils.AddMultInto(in x, -Eps, in dir, ref newX); + Float lVal = f(in newX, ref newGrad, null); - Float dirDeriv = VectorUtils.DotProduct(ref grad, ref dir); + Float dirDeriv = VectorUtils.DotProduct(in grad, in dir); Float numDeriv = (rVal - lVal) / (2 * Eps); Float normDiff = Math.Abs(1 - numDeriv / dirDeriv); @@ -328,21 +328,21 @@ public static void TestCoords(DifferentiableFunction f, ref VBuffer x, IL /// This is a reusable working buffer for intermediate calculations /// This is a reusable working buffer for intermediate calculations /// Normalized difference between analytic and numeric directional derivative - public static Float Test(DifferentiableFunction f, ref VBuffer x, ref VBuffer dir, bool quiet, + public static Float Test(DifferentiableFunction f, in VBuffer x, ref VBuffer dir, bool quiet, ref VBuffer newGrad, ref VBuffer newX) { Float normDir = VectorUtils.Norm(dir); - Float val = f(ref x, ref newGrad, null); - Float dirDeriv = VectorUtils.DotProduct(ref newGrad, ref dir); + Float val = f(in x, ref newGrad, null); + Float dirDeriv = VectorUtils.DotProduct(in newGrad, in dir); Float scaledEps = Eps / normDir; - VectorUtils.AddMultInto(ref x, scaledEps, ref dir, ref newX); - Float rVal = f(ref newX, ref newGrad, null); + VectorUtils.AddMultInto(in x, scaledEps, in dir, ref newX); + Float rVal = f(in newX, ref newGrad, null); - VectorUtils.AddMultInto(ref x, -scaledEps, ref dir, ref newX); - Float lVal = f(ref newX, ref newGrad, null); + VectorUtils.AddMultInto(in x, -scaledEps, in dir, ref newX); + Float lVal = f(in newX, ref newGrad, null); Float numDeriv = (rVal - lVal) / (2 * scaledEps); diff --git a/src/Microsoft.ML.StandardLearners/Optimizer/L1Optimizer.cs b/src/Microsoft.ML.StandardLearners/Optimizer/L1Optimizer.cs index 3c47db0333..1f9174a601 100644 --- a/src/Microsoft.ML.StandardLearners/Optimizer/L1Optimizer.cs +++ b/src/Microsoft.ML.StandardLearners/Optimizer/L1Optimizer.cs @@ -57,8 +57,8 @@ internal override OptimizerState MakeState(IChannel ch, IProgressChannelProvider } if (_l1weight > 0 && _biasCount < initial.Length) - return new L1OptimizerState(ch, progress, function, ref initial, M, TotalMemoryLimit, _biasCount, _l1weight, KeepDense, EnforceNonNegativity); - return new FunctionOptimizerState(ch, progress, function, ref initial, M, TotalMemoryLimit, KeepDense, EnforceNonNegativity); + return new L1OptimizerState(ch, progress, function, in initial, M, TotalMemoryLimit, _biasCount, _l1weight, KeepDense, EnforceNonNegativity); + return new FunctionOptimizerState(ch, progress, function, in initial, M, TotalMemoryLimit, KeepDense, EnforceNonNegativity); } /// @@ -73,9 +73,9 @@ public sealed class L1OptimizerState : OptimizerState private readonly int _biasCount; private readonly Float _l1weight; - internal L1OptimizerState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, ref VBuffer initial, int m, long totalMemLimit, + internal L1OptimizerState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, in VBuffer initial, int m, long totalMemLimit, int biasCount, Float l1Weight, bool keepDense, bool enforceNonNegativity) - : base(ch, progress, ref initial, m, totalMemLimit, keepDense, enforceNonNegativity) + : base(ch, progress, in initial, m, totalMemLimit, keepDense, enforceNonNegativity) { Contracts.AssertValue(ch); ch.Assert(0 <= biasCount && biasCount < initial.Length); @@ -96,7 +96,7 @@ public override DifferentiableFunction Function /// /// This is the original differentiable function with the injected L1 term. /// - private Float EvalCore(ref VBuffer input, ref VBuffer gradient, IProgressChannelProvider progress) + private Float EvalCore(in VBuffer input, ref VBuffer gradient, IProgressChannelProvider progress) { // REVIEW: Leverage Vector methods that use SSE. Float res = 0; @@ -104,33 +104,33 @@ private Float EvalCore(ref VBuffer input, ref VBuffer gradient, IP if (!EnforceNonNegativity) { if (_biasCount > 0) - VBufferUtils.ForEachDefined(ref input, + VBufferUtils.ForEachDefined(in input, (ind, value) => { if (ind >= _biasCount) res += Math.Abs(value); }); else - VBufferUtils.ForEachDefined(ref input, (ind, value) => res += Math.Abs(value)); + VBufferUtils.ForEachDefined(in input, (ind, value) => res += Math.Abs(value)); } else { if (_biasCount > 0) - VBufferUtils.ForEachDefined(ref input, + VBufferUtils.ForEachDefined(in input, (ind, value) => { if (ind >= _biasCount) res += value; }); else - VBufferUtils.ForEachDefined(ref input, (ind, value) => res += value); + VBufferUtils.ForEachDefined(in input, (ind, value) => res += value); } - res = _l1weight * res + _function(ref input, ref gradient, progress); + res = _l1weight * res + _function(in input, ref gradient, progress); return res; } - public override Float Eval(ref VBuffer input, ref VBuffer gradient) + public override Float Eval(in VBuffer input, ref VBuffer gradient) { - return EvalCore(ref input, ref gradient, ProgressProvider); + return EvalCore(in input, ref gradient, ProgressProvider); } private void MakeSteepestDescDir() { if (!EnforceNonNegativity) { - VBufferUtils.ApplyInto(ref _x, ref _grad, ref _steepestDescDir, + VBufferUtils.ApplyInto(in _x, in _grad, ref _steepestDescDir, (ind, xVal, gradVal) => { if (ind < _biasCount) @@ -148,7 +148,7 @@ private void MakeSteepestDescDir() } else { - VBufferUtils.ApplyInto(ref _x, ref _grad, ref _steepestDescDir, + VBufferUtils.ApplyInto(in _x, in _grad, ref _steepestDescDir, (ind, xVal, gradVal) => { if (ind < _biasCount) @@ -164,11 +164,11 @@ private void MakeSteepestDescDir() private void GetNextPoint(Float alpha) { - VectorUtils.AddMultInto(ref _x, alpha, ref _dir, ref _newX); + VectorUtils.AddMultInto(in _x, alpha, in _dir, ref _newX); if (!EnforceNonNegativity) { - VBufferUtils.ApplyWith(ref _x, ref _newX, + VBufferUtils.ApplyWith(in _x, ref _newX, delegate(int ind, Float xVal, ref Float newXval) { if (xVal*newXval < 0.0 && ind >= _biasCount) @@ -197,7 +197,7 @@ internal override void UpdateDir() /// internal override bool LineSearch(IChannel ch, bool force) { - Float dirDeriv = -VectorUtils.DotProduct(ref _dir, ref _steepestDescDir); + Float dirDeriv = -VectorUtils.DotProduct(in _dir, in _steepestDescDir); if (dirDeriv == 0) throw ch.Process(new PrematureConvergenceException(this, "Directional derivative is zero. You may be sitting on the optimum.")); @@ -209,10 +209,10 @@ internal override bool LineSearch(IChannel ch, bool force) Float alpha = (Iter == 1 ? (1 / VectorUtils.Norm(_dir)) : 1); GetNextPoint(alpha); - Float unnormCos = VectorUtils.DotProduct(ref _steepestDescDir, ref _newX) - VectorUtils.DotProduct(ref _steepestDescDir, ref _x); + Float unnormCos = VectorUtils.DotProduct(in _steepestDescDir, in _newX) - VectorUtils.DotProduct(in _steepestDescDir, in _x); if (unnormCos < 0) { - VBufferUtils.ApplyWith(ref _steepestDescDir, ref _dir, + VBufferUtils.ApplyWith(in _steepestDescDir, ref _dir, (int ind, Float sdVal, ref Float dirVal) => { if (sdVal * dirVal < 0 && ind >= _biasCount) @@ -220,13 +220,13 @@ internal override bool LineSearch(IChannel ch, bool force) }); GetNextPoint(alpha); - unnormCos = VectorUtils.DotProduct(ref _steepestDescDir, ref _newX) - VectorUtils.DotProduct(ref _steepestDescDir, ref _x); + unnormCos = VectorUtils.DotProduct(in _steepestDescDir, in _newX) - VectorUtils.DotProduct(in _steepestDescDir, in _x); } int i = 0; while (true) { - Value = Eval(ref _newX, ref _newGrad); + Value = Eval(in _newX, ref _newGrad); GradientCalculations++; if (Value <= LastValue - Gamma * unnormCos) @@ -240,7 +240,7 @@ internal override bool LineSearch(IChannel ch, bool force) alpha *= (Float)0.25; GetNextPoint(alpha); - unnormCos = VectorUtils.DotProduct(ref _steepestDescDir, ref _newX) - VectorUtils.DotProduct(ref _steepestDescDir, ref _x); + unnormCos = VectorUtils.DotProduct(in _steepestDescDir, in _newX) - VectorUtils.DotProduct(in _steepestDescDir, in _x); } } } diff --git a/src/Microsoft.ML.StandardLearners/Optimizer/LineSearch.cs b/src/Microsoft.ML.StandardLearners/Optimizer/LineSearch.cs index 612c52c615..fb8e2a6520 100644 --- a/src/Microsoft.ML.StandardLearners/Optimizer/LineSearch.cs +++ b/src/Microsoft.ML.StandardLearners/Optimizer/LineSearch.cs @@ -457,20 +457,20 @@ private static Float LogTest(Float x, out Float deriv) return (Float)(Math.Log(1 + 1.0 / e) + Math.Log(1 + e) - 0.5 * x); } - private static Float QuadTest2D(ref VBuffer x, ref VBuffer grad, IProgressChannelProvider progress = null) + private static Float QuadTest2D(in VBuffer x, ref VBuffer grad, IProgressChannelProvider progress = null) { - Float d1 = VectorUtils.DotProduct(ref x, ref _c1); - Float d2 = VectorUtils.DotProduct(ref x, ref _c2); - Float d3 = VectorUtils.DotProduct(ref x, ref _c3); + Float d1 = VectorUtils.DotProduct(in x, in _c1); + Float d2 = VectorUtils.DotProduct(in x, in _c2); + Float d3 = VectorUtils.DotProduct(in x, in _c3); _c3.CopyTo(ref grad); - VectorUtils.AddMult(ref _c1, d1, ref grad); - VectorUtils.AddMult(ref _c2, d2, ref grad); + VectorUtils.AddMult(in _c1, d1, ref grad); + VectorUtils.AddMult(in _c2, d2, ref grad); return (Float)0.5 * (d1 * d1 + d2 * d2) + d3 + 55; } - private static void StochasticQuadTest2D(ref VBuffer x, ref VBuffer grad) + private static void StochasticQuadTest2D(in VBuffer x, ref VBuffer grad) { - QuadTest2D(ref x, ref grad); + QuadTest2D(in x, ref grad); } private static void CreateWrapped(out VBuffer vec, params Float[] values) @@ -509,9 +509,9 @@ public static void Main(string[] argv) int n = 0; bool print = false; DTerminate term = - (ref VBuffer x) => + (in VBuffer x) => { - QuadTest2D(ref x, ref grad); + QuadTest2D(in x, ref grad); Float norm = VectorUtils.Norm(grad); if (++n % 1000 == 0 || print) Console.WriteLine("{0}\t{1}", n, norm); @@ -522,7 +522,7 @@ public static void Main(string[] argv) CreateWrapped(out init, 0, 0); VBuffer ans = default(VBuffer); sgdo.Minimize(StochasticQuadTest2D, ref init, ref ans); - QuadTest2D(ref ans, ref grad); + QuadTest2D(in ans, ref grad); Console.WriteLine(VectorUtils.Norm(grad)); Console.WriteLine(); Console.WriteLine(); @@ -531,7 +531,7 @@ public static void Main(string[] argv) print = true; CreateWrapped(out init, 0, 0); gdo.Minimize(QuadTest2D, ref init, ref ans); - QuadTest2D(ref ans, ref grad); + QuadTest2D(in ans, ref grad); Console.WriteLine(VectorUtils.Norm(grad)); } } diff --git a/src/Microsoft.ML.StandardLearners/Optimizer/OptimizationMonitor.cs b/src/Microsoft.ML.StandardLearners/Optimizer/OptimizationMonitor.cs index 2f4d7974f4..7b231bb027 100644 --- a/src/Microsoft.ML.StandardLearners/Optimizer/OptimizationMonitor.cs +++ b/src/Microsoft.ML.StandardLearners/Optimizer/OptimizationMonitor.cs @@ -87,7 +87,7 @@ private Float Check(Optimizer.OptimizerState state) Console.Error.Flush(); var x = state.X; var lastDir = state.LastDir; - Float checkResult = GradientTester.Test(state.Function, ref x, ref lastDir, true, ref _newGrad, ref _newX); + Float checkResult = GradientTester.Test(state.Function, in x, ref lastDir, true, ref _newGrad, ref _newX); for (int i = 0; i < _checkingMessage.Length; i++) Console.Error.Write('\b'); return checkResult; diff --git a/src/Microsoft.ML.StandardLearners/Optimizer/Optimizer.cs b/src/Microsoft.ML.StandardLearners/Optimizer/Optimizer.cs index 12ffc81c91..4b8c1e2cc4 100644 --- a/src/Microsoft.ML.StandardLearners/Optimizer/Optimizer.cs +++ b/src/Microsoft.ML.StandardLearners/Optimizer/Optimizer.cs @@ -93,24 +93,24 @@ internal OptimizerException(OptimizerState state, string message) internal virtual OptimizerState MakeState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, ref VBuffer initial) { - return new FunctionOptimizerState(ch, progress, function, ref initial, M, TotalMemoryLimit, KeepDense, EnforceNonNegativity); + return new FunctionOptimizerState(ch, progress, function, in initial, M, TotalMemoryLimit, KeepDense, EnforceNonNegativity); } internal sealed class FunctionOptimizerState : OptimizerState { public override DifferentiableFunction Function { get; } - internal FunctionOptimizerState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, ref VBuffer initial, int m, + internal FunctionOptimizerState(IChannel ch, IProgressChannelProvider progress, DifferentiableFunction function, in VBuffer initial, int m, long totalMemLimit, bool keepDense, bool enforceNonNegativity) - : base(ch, progress, ref initial, m, totalMemLimit, keepDense, enforceNonNegativity) + : base(ch, progress, in initial, m, totalMemLimit, keepDense, enforceNonNegativity) { Function = function; Init(); } - public override Float Eval(ref VBuffer input, ref VBuffer gradient) + public override Float Eval(in VBuffer input, ref VBuffer gradient) { - return Function(ref input, ref gradient, ProgressProvider); + return Function(in input, ref gradient, ProgressProvider); } } @@ -141,7 +141,7 @@ public abstract class OptimizerState /// The function being optimized /// public abstract DifferentiableFunction Function { get; } - public abstract Float Eval(ref VBuffer input, ref VBuffer gradient); + public abstract Float Eval(in VBuffer input, ref VBuffer gradient); /// /// The current point being explored @@ -194,7 +194,7 @@ public abstract class OptimizerState private int _m; private readonly long _totalMemLimit; - protected internal OptimizerState(IChannel ch, IProgressChannelProvider progress, ref VBuffer initial, + protected internal OptimizerState(IChannel ch, IProgressChannelProvider progress, in VBuffer initial, int m, long totalMemLimit, bool keepDense, bool enforceNonNegativity) { Contracts.AssertValue(ch); @@ -239,7 +239,7 @@ protected VBuffer CreateWorkingVector() // Leaf constructors must call this once they are fully initialized. protected virtual void Init() { - Value = LastValue = Eval(ref _x, ref _grad); + Value = LastValue = Eval(in _x, ref _grad); GradientCalculations++; if (!FloatUtils.IsFinite(LastValue)) throw Ch.Except("Optimizer unable to proceed with loss function yielding {0}", LastValue); @@ -259,8 +259,8 @@ internal void MapDirByInverseHessian() { if (_roList[i] > 0) { - alphas[i] = -VectorUtils.DotProduct(ref _sList[i], ref _dir) / _roList[i]; - VectorUtils.AddMult(ref _yList[i], alphas[i], ref _dir); + alphas[i] = -VectorUtils.DotProduct(in _sList[i], in _dir) / _roList[i]; + VectorUtils.AddMult(in _yList[i], alphas[i], ref _dir); if (lastGoodRo == -1) lastGoodRo = i; } @@ -270,15 +270,15 @@ internal void MapDirByInverseHessian() if (lastGoodRo == -1) return; - Float yDotY = VectorUtils.DotProduct(ref _yList[lastGoodRo], ref _yList[lastGoodRo]); + Float yDotY = VectorUtils.DotProduct(in _yList[lastGoodRo], in _yList[lastGoodRo]); VectorUtils.ScaleBy(ref _dir, _roList[lastGoodRo] / yDotY); for (int i = 0; i <= lastGoodRo; i++) { if (_roList[i] > 0) { - Float beta = VectorUtils.DotProduct(ref _yList[i], ref _dir) / _roList[i]; - VectorUtils.AddMult(ref _sList[i], -alphas[i] - beta, ref _dir); + Float beta = VectorUtils.DotProduct(in _yList[i], in _dir) / _roList[i]; + VectorUtils.AddMult(in _sList[i], -alphas[i] - beta, ref _dir); } } } @@ -293,7 +293,7 @@ internal void DiscardOldVectors() protected void FixDirZeros() { - VBufferUtils.ApplyWithEitherDefined(ref _steepestDescDir, ref _dir, + VBufferUtils.ApplyWithEitherDefined(in _steepestDescDir, ref _dir, (int i, Float sdVal, ref Float dirVal) => { if (sdVal == 0) @@ -305,7 +305,7 @@ internal virtual void UpdateDir() { if (EnforceNonNegativity) { - VBufferUtils.ApplyInto(ref _x, ref _grad, ref _steepestDescDir, + VBufferUtils.ApplyInto(in _x, in _grad, ref _steepestDescDir, (ind, xVal, gradVal) => { if (xVal > 0) @@ -316,7 +316,7 @@ internal virtual void UpdateDir() _steepestDescDir.CopyTo(ref _dir); } else - VectorUtils.ScaleInto(ref _grad, -1, ref _dir); + VectorUtils.ScaleInto(in _grad, -1, ref _dir); MapDirByInverseHessian(); @@ -355,9 +355,9 @@ internal void Shift() nextY = CreateWorkingVector(); } - VectorUtils.AddMultInto(ref _newX, -1, ref _x, ref nextS); - VectorUtils.AddMultInto(ref _newGrad, -1, ref _grad, ref nextY); - Float ro = VectorUtils.DotProduct(ref nextS, ref nextY); + VectorUtils.AddMultInto(in _newX, -1, in _x, ref nextS); + VectorUtils.AddMultInto(in _newGrad, -1, in _grad, ref nextY); + Float ro = VectorUtils.DotProduct(in nextS, in nextY); if (ro == 0) throw Ch.Process(new PrematureConvergenceException(this, "ro equals zero. Is your function linear?")); @@ -381,7 +381,7 @@ internal void Shift() internal virtual bool LineSearch(IChannel ch, bool force) { Contracts.AssertValue(ch); - Float dirDeriv = VectorUtils.DotProduct(ref _dir, ref _grad); + Float dirDeriv = VectorUtils.DotProduct(in _dir, in _grad); if (dirDeriv == 0) throw ch.Process(new PrematureConvergenceException(this, "Directional derivative is zero. You may be sitting on the optimum.")); @@ -402,7 +402,7 @@ internal virtual bool LineSearch(IChannel ch, bool force) // initial bracketing phase while (true) { - VectorUtils.AddMultInto(ref _x, alpha, ref _dir, ref _newX); + VectorUtils.AddMultInto(in _x, alpha, in _dir, ref _newX); if (EnforceNonNegativity) { VBufferUtils.Apply(ref _newX, delegate(int ind, ref Float newXval) @@ -412,7 +412,7 @@ internal virtual bool LineSearch(IChannel ch, bool force) }); } - Value = Eval(ref _newX, ref _newGrad); + Value = Eval(in _newX, ref _newGrad); GradientCalculations++; if (Float.IsPositiveInfinity(Value)) { @@ -423,7 +423,7 @@ internal virtual bool LineSearch(IChannel ch, bool force) if (!FloatUtils.IsFinite(Value)) throw ch.Except("Optimizer unable to proceed with loss function yielding {0}", Value); - dirDeriv = VectorUtils.DotProduct(ref _dir, ref _newGrad); + dirDeriv = VectorUtils.DotProduct(in _dir, in _newGrad); PointValueDeriv curr = new PointValueDeriv(alpha, Value, dirDeriv); if ((curr.V > LastValue + c1 * alpha) || (last.A > 0 && curr.V >= last.V)) @@ -483,7 +483,7 @@ internal virtual bool LineSearch(IChannel ch, bool force) if (alpha < lb) alpha = lb; - VectorUtils.AddMultInto(ref _x, alpha, ref _dir, ref _newX); + VectorUtils.AddMultInto(in _x, alpha, in _dir, ref _newX); if (EnforceNonNegativity) { VBufferUtils.Apply(ref _newX, delegate(int ind, ref Float newXval) @@ -493,11 +493,11 @@ internal virtual bool LineSearch(IChannel ch, bool force) }); } - Value = Eval(ref _newX, ref _newGrad); + Value = Eval(in _newX, ref _newGrad); GradientCalculations++; if (!FloatUtils.IsFinite(Value)) throw ch.Except("Optimizer unable to proceed with loss function yielding {0}", Value); - dirDeriv = VectorUtils.DotProduct(ref _dir, ref _newGrad); + dirDeriv = VectorUtils.DotProduct(in _dir, in _newGrad); PointValueDeriv curr = new PointValueDeriv(alpha, Value, dirDeriv); diff --git a/src/Microsoft.ML.StandardLearners/Optimizer/SgdOptimizer.cs b/src/Microsoft.ML.StandardLearners/Optimizer/SgdOptimizer.cs index 5a6def6b98..a49800a895 100644 --- a/src/Microsoft.ML.StandardLearners/Optimizer/SgdOptimizer.cs +++ b/src/Microsoft.ML.StandardLearners/Optimizer/SgdOptimizer.cs @@ -15,7 +15,7 @@ namespace Microsoft.ML.Runtime.Numeric /// /// Current iterate /// True if search should terminate - public delegate bool DTerminate(ref VBuffer x); + public delegate bool DTerminate(in VBuffer x); /// /// Stochastic gradient descent with variations (minibatch, momentum, averaging). @@ -146,7 +146,7 @@ public SgdOptimizer(DTerminate terminate, RateScheduleType rateSchedule = RateSc /// /// Point at which to evaluate /// Vector to be filled in with gradient - public delegate void DStochasticGradient(ref VBuffer x, ref VBuffer grad); + public delegate void DStochasticGradient(in VBuffer x, ref VBuffer grad); /// /// Minimize the function represented by . @@ -192,18 +192,18 @@ public void Minimize(DStochasticGradient f, ref VBuffer initial, ref VBuf Float scale = (1 - _momentum) / _batchSize; for (int i = 0; i < _batchSize; ++i) { - f(ref x, ref grad); - VectorUtils.AddMult(ref grad, scale, ref step); + f(in x, ref grad); + VectorUtils.AddMult(in grad, scale, ref step); } if (_averaging) { Utils.Swap(ref avg, ref prev); VectorUtils.ScaleBy(prev, ref avg, (Float)n / (n + 1)); - VectorUtils.AddMult(ref step, -stepSize, ref x); - VectorUtils.AddMult(ref x, (Float)1 / (n + 1), ref avg); + VectorUtils.AddMult(in step, -stepSize, ref x); + VectorUtils.AddMult(in x, (Float)1 / (n + 1), ref avg); - if ((n > 0 && TerminateTester.ShouldTerminate(ref avg, ref prev)) || _terminate(ref avg)) + if ((n > 0 && TerminateTester.ShouldTerminate(in avg, in prev)) || _terminate(in avg)) { result = avg; return; @@ -212,8 +212,8 @@ public void Minimize(DStochasticGradient f, ref VBuffer initial, ref VBuf else { Utils.Swap(ref x, ref prev); - VectorUtils.AddMult(ref step, -stepSize, ref prev, ref x); - if ((n > 0 && TerminateTester.ShouldTerminate(ref x, ref prev)) || _terminate(ref x)) + VectorUtils.AddMult(in step, -stepSize, ref prev, ref x); + if ((n > 0 && TerminateTester.ShouldTerminate(in x, in prev)) || _terminate(in x)) { result = x; return; @@ -300,26 +300,26 @@ private class LineFunc private DifferentiableFunction _func; - public Float Deriv => VectorUtils.DotProduct(ref _dir, ref _grad); + public Float Deriv => VectorUtils.DotProduct(in _dir, in _grad); - public LineFunc(DifferentiableFunction function, ref VBuffer initial, bool useCG = false) + public LineFunc(DifferentiableFunction function, in VBuffer initial, bool useCG = false) { int dim = initial.Length; initial.CopyTo(ref _point); _func = function; // REVIEW: plumb the IProgressChannelProvider through. - _value = _func(ref _point, ref _grad, null); - VectorUtils.ScaleInto(ref _grad, -1, ref _dir); + _value = _func(in _point, ref _grad, null); + VectorUtils.ScaleInto(in _grad, -1, ref _dir); _useCG = useCG; } public Float Eval(Float step, out Float deriv) { - VectorUtils.AddMultInto(ref _point, step, ref _dir, ref _newPoint); - _newValue = _func(ref _newPoint, ref _newGrad, null); - deriv = VectorUtils.DotProduct(ref _dir, ref _newGrad); + VectorUtils.AddMultInto(in _point, step, in _dir, ref _newPoint); + _newValue = _func(in _newPoint, ref _newGrad, null); + deriv = VectorUtils.DotProduct(in _dir, in _newGrad); return _newValue; } @@ -328,15 +328,15 @@ public void ChangeDir() if (_useCG) { Float newByNew = VectorUtils.NormSquared(_newGrad); - Float newByOld = VectorUtils.DotProduct(ref _newGrad, ref _grad); + Float newByOld = VectorUtils.DotProduct(in _newGrad, in _grad); Float oldByOld = VectorUtils.NormSquared(_grad); Float betaPR = (newByNew - newByOld) / oldByOld; Float beta = Math.Max(0, betaPR); VectorUtils.ScaleBy(ref _dir, beta); - VectorUtils.AddMult(ref _newGrad, -1, ref _dir); + VectorUtils.AddMult(in _newGrad, -1, ref _dir); } else - VectorUtils.ScaleInto(ref _newGrad, -1, ref _dir); + VectorUtils.ScaleInto(in _newGrad, -1, ref _dir); _newPoint.CopyTo(ref _point); _newGrad.CopyTo(ref _grad); _value = _newValue; @@ -352,7 +352,7 @@ public void ChangeDir() public void Minimize(DifferentiableFunction function, ref VBuffer initial, ref VBuffer result) { Contracts.Check(FloatUtils.IsFinite(initial.Values, initial.Count), "The initial vector contains NaNs or infinite values."); - LineFunc lineFunc = new LineFunc(function, ref initial, UseCG); + LineFunc lineFunc = new LineFunc(function, in initial, UseCG); VBuffer prev = default(VBuffer); initial.CopyTo(ref prev); @@ -360,8 +360,8 @@ public void Minimize(DifferentiableFunction function, ref VBuffer initial { Float step = LineSearch.Minimize(lineFunc.Eval, lineFunc.Value, lineFunc.Deriv); var newPoint = lineFunc.NewPoint; - bool terminateNow = n > 0 && TerminateTester.ShouldTerminate(ref newPoint, ref prev); - if (terminateNow || Terminate(ref newPoint)) + bool terminateNow = n > 0 && TerminateTester.ShouldTerminate(in newPoint, in prev); + if (terminateNow || Terminate(in newPoint)) break; newPoint.CopyTo(ref prev); lineFunc.ChangeDir(); @@ -382,7 +382,7 @@ internal static class TerminateTester /// The current value. /// The value from the previous iteration. /// True if the optimization routine should terminate at this iteration. - internal static bool ShouldTerminate(ref VBuffer x, ref VBuffer xprev) + internal static bool ShouldTerminate(in VBuffer x, in VBuffer xprev) { Contracts.Assert(x.Length == xprev.Length, "Vectors must have the same dimensionality."); Contracts.Assert(FloatUtils.IsFinite(xprev.Values, xprev.Count)); diff --git a/src/Microsoft.ML.StandardLearners/Standard/LinearClassificationTrainer.cs b/src/Microsoft.ML.StandardLearners/Standard/LinearClassificationTrainer.cs index dec0a6e38d..5805629230 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/LinearClassificationTrainer.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/LinearClassificationTrainer.cs @@ -130,14 +130,14 @@ protected RoleMappedData PrepareDataFromTrainingExamples(IChannel ch, RoleMapped protected abstract void CheckLabel(RoleMappedData examples, out int weightSetCount); - protected float WDot(ref VBuffer features, ref VBuffer weights, float bias) + protected float WDot(in VBuffer features, in VBuffer weights, float bias) { - return VectorUtils.DotProduct(ref weights, ref features) + bias; + return VectorUtils.DotProduct(in weights, in features) + bias; } - protected float WScaledDot(ref VBuffer features, Double scaling, ref VBuffer weights, float bias) + protected float WScaledDot(in VBuffer features, Double scaling, in VBuffer weights, float bias) { - return VectorUtils.DotProduct(ref weights, ref features) * (float)scaling + bias; + return VectorUtils.DotProduct(in weights, in features) * (float)scaling + bias; } protected virtual int ComputeNumThreads(FloatLabelCursor.Factory cursorFactory) @@ -275,9 +275,9 @@ internal SdcaTrainerBase(IHostEnvironment env, TArgs args, SchemaShape.Column la Args.Check(env); } - protected float WDot(ref VBuffer features, ref VBuffer weights, float bias) + protected float WDot(in VBuffer features, in VBuffer weights, float bias) { - return VectorUtils.DotProduct(ref weights, ref features) + bias; + return VectorUtils.DotProduct(in weights, in features) + bias; } protected sealed override TModel TrainCore(IChannel ch, RoleMappedData data, LinearPredictor predictor, int weightSetCount) @@ -790,7 +790,7 @@ protected virtual void TrainWithoutLock(IProgressChannelProvider progress, Float for (int numTrials = 0; numTrials < maxUpdateTrials; numTrials++) { var dual = duals[idx]; - var output = WDot(ref features, ref weights[0], biasReg[0] + biasUnreg[0]); + var output = WDot(in features, in weights[0], biasReg[0] + biasUnreg[0]); var dualUpdate = Loss.DualUpdate(output, label, dual, invariant, numThreads); // The successive over-relaxation apporach to adjust the sum of dual variables (biasReg) to zero. @@ -812,7 +812,7 @@ protected virtual void TrainWithoutLock(IProgressChannelProvider progress, Float if (l1ThresholdZero) { - VectorUtils.AddMult(ref features, weights[0].Values, primalUpdate); + VectorUtils.AddMult(in features, weights[0].Values, primalUpdate); biasReg[0] += primalUpdate; } else @@ -929,7 +929,7 @@ protected virtual bool CheckConvergence( { var instanceWeight = GetInstanceWeight(cursor); var features = cursor.Features; - var output = WDot(ref features, ref weights[0], biasTotal); + var output = WDot(in features, in weights[0], biasTotal); Double subLoss = Loss.Loss(output, cursor.Label); long idx = getIndexFromIdAndRow(cursor.Id, row); Double subDualLoss = Loss.DualLoss(cursor.Label, duals[idx]); @@ -944,7 +944,7 @@ protected virtual bool CheckConvergence( Contracts.Assert(Args.L1Threshold.HasValue); Double l2Const = Args.L2Const.Value; Double l1Threshold = Args.L1Threshold.Value; - Double l1Regularizer = l1Threshold * l2Const * (VectorUtils.L1Norm(ref weights[0]) + Math.Abs(biasReg[0])); + Double l1Regularizer = l1Threshold * l2Const * (VectorUtils.L1Norm(in weights[0]) + Math.Abs(biasReg[0])); var l2Regularizer = l2Const * (VectorUtils.NormSquared(weights[0]) + biasReg[0] * biasReg[0]) * 0.5; var newLoss = lossSum.Sum / count + l2Regularizer + l1Regularizer; var newDualLoss = dualLossSum.Sum / count - l2Regularizer - l2Const * biasUnreg[0] * biasReg[0]; @@ -1548,10 +1548,11 @@ protected override TScalarPredictor CreatePredictor(VBuffer[] weights, fl Host.CheckParam(weights[0].Length > 0, nameof(weights)); VBuffer maybeSparseWeights = default; - VBufferUtils.CreateMaybeSparseCopy(ref weights[0], ref maybeSparseWeights, + // below should be `in weights[0]`, but can't because of https://github.com/dotnet/roslyn/issues/29371 + VBufferUtils.CreateMaybeSparseCopy(weights[0], ref maybeSparseWeights, Conversions.Instance.GetIsDefaultPredicate(NumberType.Float)); - var predictor = new LinearBinaryPredictor(Host, ref maybeSparseWeights, bias[0]); + var predictor = new LinearBinaryPredictor(Host, in maybeSparseWeights, bias[0]); if (!(_loss is LogLoss)) return predictor; return new ParameterMixingCalibratedPredictor(Host, predictor, new PlattCalibrator(Host, -1, 0)); @@ -1795,7 +1796,7 @@ protected override TScalarPredictor TrainCore(IChannel ch, RoleMappedData data, count++; var instanceWeight = cursor.Weight; var features = cursor.Features; - Double subLoss = lossFunc.Loss(WScaledDot(ref features, weightScaling, ref weights, bias), cursor.Label); + Double subLoss = lossFunc.Loss(WScaledDot(in features, weightScaling, in weights, bias), cursor.Label); if (cursor.Label > 0) lossSum.Add(subLoss * instanceWeight * positiveInstanceWeight); @@ -1834,7 +1835,7 @@ protected override TScalarPredictor TrainCore(IChannel ch, RoleMappedData data, { VBuffer features = cursor.Features; float label = cursor.Label; - float derivative = cursor.Weight * lossFunc.Derivative(WScaledDot(ref features, weightScaling, ref weights, bias), label); // complexity: O(k) + float derivative = cursor.Weight * lossFunc.Derivative(WScaledDot(in features, weightScaling, in weights, bias), label); // complexity: O(k) //Note that multiplying the gradient by a weight h is not equivalent to doing h updates //on the same instance. A potentially better way to do weighted update is described in @@ -1845,7 +1846,7 @@ protected override TScalarPredictor TrainCore(IChannel ch, RoleMappedData data, Double rate = ilr / (1 + ilr * l2Weight * (t++)); Double step = -derivative * rate; weightScaling *= 1 - rate * l2Weight; - VectorUtils.AddMult(ref features, weights.Values, (float)(step / weightScaling)); + VectorUtils.AddMult(in features, weights.Values, (float)(step / weightScaling)); bias += (float)step; } if (e == 1) @@ -1910,8 +1911,8 @@ protected override TScalarPredictor TrainCore(IChannel ch, RoleMappedData data, VectorUtils.ScaleBy(ref weights, (float)weightScaling); // restore the true weights VBuffer maybeSparseWeights = default; - VBufferUtils.CreateMaybeSparseCopy(ref weights, ref maybeSparseWeights, Conversions.Instance.GetIsDefaultPredicate(NumberType.Float)); - var pred = new LinearBinaryPredictor(Host, ref maybeSparseWeights, bias); + VBufferUtils.CreateMaybeSparseCopy(in weights, ref maybeSparseWeights, Conversions.Instance.GetIsDefaultPredicate(NumberType.Float)); + var pred = new LinearBinaryPredictor(Host, in maybeSparseWeights, bias); if (!(_loss is LogLoss)) return pred; return new ParameterMixingCalibratedPredictor(Host, pred, new PlattCalibrator(Host, -1, 0)); diff --git a/src/Microsoft.ML.StandardLearners/Standard/LinearPredictor.cs b/src/Microsoft.ML.StandardLearners/Standard/LinearPredictor.cs index db1075fc2d..3d0569644b 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/LinearPredictor.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/LinearPredictor.cs @@ -111,7 +111,7 @@ IEnumerator IEnumerable.GetEnumerator() /// The weights for the linear predictor. Note that this /// will take ownership of the . /// The bias added to every output score. - internal LinearPredictor(IHostEnvironment env, string name, ref VBuffer weights, Float bias) + internal LinearPredictor(IHostEnvironment env, string name, in VBuffer weights, Float bias) : base(env, name) { Host.CheckParam(FloatUtils.IsFinite(weights.Values, weights.Count), nameof(weights), "Cannot initialize linear predictor with non-finite weights"); @@ -246,25 +246,25 @@ public bool SaveAsOnnx(OnnxContext ctx, string[] outputs, string featureColumn) } // Generate the score from the given values, assuming they have already been normalized. - protected virtual Float Score(ref VBuffer src) + protected virtual Float Score(in VBuffer src) { if (src.IsDense) { var weights = Weight; - return Bias + VectorUtils.DotProduct(ref weights, ref src); + return Bias + VectorUtils.DotProduct(in weights, in src); } EnsureWeightsDense(); - return Bias + VectorUtils.DotProduct(ref _weightsDense, ref src); + return Bias + VectorUtils.DotProduct(in _weightsDense, in src); } - protected virtual void GetFeatureContributions(ref VBuffer features, ref VBuffer contributions, int top, int bottom, bool normalize) + protected virtual void GetFeatureContributions(in VBuffer features, ref VBuffer contributions, int top, int bottom, bool normalize) { if (features.Length != Weight.Length) throw Contracts.Except("Input is of length {0} does not match expected length of weights {1}", features.Length, Weight.Length); var weights = Weight; - VBuffer.Copy(ref features, ref contributions); - VectorUtils.MulElementWise(ref weights, ref contributions); + features.CopyTo(ref contributions); + VectorUtils.MulElementWise(in weights, ref contributions); VectorUtils.SparsifyNormalize(ref contributions, top, bottom, normalize); } @@ -291,7 +291,7 @@ public ValueMapper GetMapper() { if (src.Length != Weight.Length) throw Contracts.Except("Input is of length {0}, but predictor expected length {1}", src.Length, Weight.Length); - dst = Score(ref src); + dst = Score(in src); }; return (ValueMapper)(Delegate)del; } @@ -317,7 +317,7 @@ protected void CombineParameters(IList> models, out VBuff var sub = (LinearPredictor)m; var subweights = sub.Weight; - VectorUtils.Add(ref subweights, ref weights); + VectorUtils.Add(in subweights, ref weights); bias += sub.Bias; } VectorUtils.ScaleBy(ref weights, (Float)1 / models.Count); @@ -338,7 +338,7 @@ public void SaveAsCode(TextWriter writer, RoleMappedSchema schema) Host.CheckValue(schema, nameof(schema)); var weights = Weight; - LinearPredictorUtils.SaveAsCode(writer, ref weights, Bias, schema); + LinearPredictorUtils.SaveAsCode(writer, in weights, Bias, schema); } public abstract void SaveSummary(TextWriter writer, RoleMappedSchema schema); @@ -382,7 +382,7 @@ public ValueMapper> GetWhatTheFeatureMapper, VBuffer> del = (ref VBuffer src, ref VBuffer dstContributions) => { - GetFeatureContributions(ref src, ref dstContributions, top, bottom, normalize); + GetFeatureContributions(in src, ref dstContributions, top, bottom, normalize); }; return (ValueMapper>)(Delegate)del; } @@ -421,8 +421,8 @@ private static VersionInfo GetVersionInfo() /// will take ownership of the . /// The bias added to every output score. /// - public LinearBinaryPredictor(IHostEnvironment env, ref VBuffer weights, Float bias, LinearModelStatistics stats = null) - : base(env, RegistrationName, ref weights, bias) + public LinearBinaryPredictor(IHostEnvironment env, in VBuffer weights, Float bias, LinearModelStatistics stats = null) + : base(env, RegistrationName, in weights, bias) { Contracts.AssertValueOrNull(stats); _stats = stats; @@ -498,7 +498,7 @@ public IParameterMixer CombineParameters(IList> mo VBuffer weights; Float bias; CombineParameters(models, out weights, out bias); - return new LinearBinaryPredictor(Host, ref weights, bias); + return new LinearBinaryPredictor(Host, in weights, bias); } public override void SaveSummary(TextWriter writer, RoleMappedSchema schema) @@ -508,7 +508,7 @@ public override void SaveSummary(TextWriter writer, RoleMappedSchema schema) // REVIEW: Would be nice to have the settings! var weights = Weight; writer.WriteLine(LinearPredictorUtils.LinearModelAsText("Linear Binary Classification Predictor", null, null, - ref weights, Bias, schema)); + in weights, Bias, schema)); _stats?.SaveText(writer, this, schema, 20); } @@ -520,7 +520,7 @@ public IList> GetSummaryInKeyValuePairs(RoleMappedS var weights = Weight; List> results = new List>(); - LinearPredictorUtils.SaveLinearModelWeightsInKeyValuePairs(ref weights, Bias, schema, results); + LinearPredictorUtils.SaveLinearModelWeightsInKeyValuePairs(in weights, Bias, schema, results); _stats?.SaveSummaryInKeyValuePairs(this, schema, int.MaxValue, results); return results; } @@ -534,7 +534,7 @@ public override IRow GetStatsIRowOrNull(RoleMappedSchema schema) MetadataUtils.GetSlotNames(schema, RoleMappedSchema.ColumnRole.Feature, Weight.Length, ref names); // Add the stat columns. - _stats.AddStatsColumns(cols, this, schema, ref names); + _stats.AddStatsColumns(cols, this, schema, in names); return RowColumnUtils.GetRow(null, cols.ToArray()); } @@ -545,15 +545,15 @@ public override void SaveAsIni(TextWriter writer, RoleMappedSchema schema, ICali Host.CheckValueOrNull(calibrator); var weights = Weight; - writer.Write(LinearPredictorUtils.LinearModelAsIni(ref weights, Bias, this, + writer.Write(LinearPredictorUtils.LinearModelAsIni(in weights, Bias, this, schema, calibrator as PlattCalibrator)); } } public abstract class RegressionPredictor : LinearPredictor { - protected RegressionPredictor(IHostEnvironment env, string name, ref VBuffer weights, Float bias) - : base(env, name, ref weights, bias) + protected RegressionPredictor(IHostEnvironment env, string name, in VBuffer weights, Float bias) + : base(env, name, in weights, bias) { } @@ -579,7 +579,7 @@ public override void SaveAsIni(TextWriter writer, RoleMappedSchema schema, ICali // REVIEW: For Poisson should encode the exp operation in the ini as well, bug 2433. var weights = Weight; - writer.Write(LinearPredictorUtils.LinearModelAsIni(ref weights, Bias, this, schema, null)); + writer.Write(LinearPredictorUtils.LinearModelAsIni(in weights, Bias, this, schema, null)); } } @@ -609,8 +609,8 @@ private static VersionInfo GetVersionInfo() /// The weights for the linear predictor. Note that this /// will take ownership of the . /// The bias added to every output score. - public LinearRegressionPredictor(IHostEnvironment env, ref VBuffer weights, Float bias) - : base(env, RegistrationName, ref weights, bias) + public LinearRegressionPredictor(IHostEnvironment env, in VBuffer weights, Float bias) + : base(env, RegistrationName, in weights, bias) { } @@ -641,7 +641,7 @@ public override void SaveSummary(TextWriter writer, RoleMappedSchema schema) // REVIEW: Would be nice to have the settings! var weights = Weight; writer.WriteLine(LinearPredictorUtils.LinearModelAsText("Linear Regression Predictor", null, null, - ref weights, Bias, schema, null)); + in weights, Bias, schema, null)); } /// @@ -652,7 +652,7 @@ public IParameterMixer CombineParameters(IList> mo VBuffer weights; Float bias; CombineParameters(models, out weights, out bias); - return new LinearRegressionPredictor(Host, ref weights, bias); + return new LinearRegressionPredictor(Host, in weights, bias); } /// @@ -662,7 +662,7 @@ public IList> GetSummaryInKeyValuePairs(RoleMappedS var weights = Weight; List> results = new List>(); - LinearPredictorUtils.SaveLinearModelWeightsInKeyValuePairs(ref weights, Bias, schema, results); + LinearPredictorUtils.SaveLinearModelWeightsInKeyValuePairs(in weights, Bias, schema, results); return results; } @@ -685,8 +685,8 @@ private static VersionInfo GetVersionInfo() loaderAssemblyName: typeof(PoissonRegressionPredictor).Assembly.FullName); } - internal PoissonRegressionPredictor(IHostEnvironment env, ref VBuffer weights, Float bias) - : base(env, RegistrationName, ref weights, bias) + internal PoissonRegressionPredictor(IHostEnvironment env, in VBuffer weights, Float bias) + : base(env, RegistrationName, in weights, bias) { } @@ -709,9 +709,9 @@ protected override void SaveCore(ModelSaveContext ctx) ctx.SetVersionInfo(GetVersionInfo()); } - protected override Float Score(ref VBuffer src) + protected override Float Score(in VBuffer src) { - return MathUtils.ExpSlow(base.Score(ref src)); + return MathUtils.ExpSlow(base.Score(in src)); } public override void SaveSummary(TextWriter writer, RoleMappedSchema schema) @@ -722,7 +722,7 @@ public override void SaveSummary(TextWriter writer, RoleMappedSchema schema) // REVIEW: Would be nice to have the settings! var weights = Weight; writer.WriteLine(LinearPredictorUtils.LinearModelAsText("Poisson Regression Predictor", null, null, - ref weights, Bias, schema, null)); + in weights, Bias, schema, null)); } /// @@ -733,7 +733,7 @@ public IParameterMixer CombineParameters(IList> mo VBuffer weights; Float bias; CombineParameters(models, out weights, out bias); - return new PoissonRegressionPredictor(Host, ref weights, bias); + return new PoissonRegressionPredictor(Host, in weights, bias); } } } \ No newline at end of file diff --git a/src/Microsoft.ML.StandardLearners/Standard/LinearPredictorUtils.cs b/src/Microsoft.ML.StandardLearners/Standard/LinearPredictorUtils.cs index 505d3363e3..244019bcdd 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/LinearPredictorUtils.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/LinearPredictorUtils.cs @@ -29,7 +29,7 @@ internal static class LinearPredictorUtils /// /// print the linear model as code /// - public static void SaveAsCode(TextWriter writer, ref VBuffer weights, Float bias, + public static void SaveAsCode(TextWriter writer, in VBuffer weights, Float bias, RoleMappedSchema schema, string codeVariable = "output") { Contracts.CheckValue(writer, nameof(writer)); @@ -41,7 +41,7 @@ public static void SaveAsCode(TextWriter writer, ref VBuffer weights, Flo int numNonZeroWeights = 0; writer.Write(codeVariable); writer.Write(" = "); - VBufferUtils.ForEachDefined(ref weights, + VBufferUtils.ForEachDefined(in weights, (idx, value) => { if (Math.Abs(value - 0) >= Epsilon) @@ -94,7 +94,7 @@ private static string FeatureNameAsCode(string featureName, int idx) /// /// Build a Bing TreeEnsemble .ini representation of the given predictor /// - public static string LinearModelAsIni(ref VBuffer weights, Float bias, IPredictor predictor = null, + public static string LinearModelAsIni(in VBuffer weights, Float bias, IPredictor predictor = null, RoleMappedSchema schema = null, PlattCalibrator calibrator = null) { // TODO: Might need to consider a max line length for the Weights list, requiring us to split it up into @@ -108,7 +108,7 @@ public static string LinearModelAsIni(ref VBuffer weights, Float bias, IP int numNonZeroWeights = 0; const string weightsSep = "\t"; - VBufferUtils.ForEachDefined(ref weights, + VBufferUtils.ForEachDefined(in weights, (idx, value) => { if (Math.Abs(value - 0) >= Epsilon) @@ -176,7 +176,7 @@ public static string LinearModelAsIni(ref VBuffer weights, Float bias, IP /// Output the weights of a linear model to a given writer /// public static string LinearModelAsText( - string userName, string loadName, string settings, ref VBuffer weights, Float bias, + string userName, string loadName, string settings, in VBuffer weights, Float bias, RoleMappedSchema schema = null, PlattCalibrator calibrator = null) { // Review: added a text description for each calibrator (not only Platt), would be nice to add to this method. @@ -195,7 +195,7 @@ public static string LinearModelAsText( b.AppendLine(); List> weightValues = new List>(); - SaveLinearModelWeightsInKeyValuePairs(ref weights, bias, schema, weightValues); + SaveLinearModelWeightsInKeyValuePairs(in weights, bias, schema, weightValues); foreach (var weightValue in weightValues) { Contracts.Assert(weightValue.Value is Float); @@ -206,7 +206,7 @@ public static string LinearModelAsText( } public static IEnumerable> GetSortedLinearModelFeatureNamesAndWeights(Single bias, - ref VBuffer weights, ref VBuffer> names) + in VBuffer weights, in VBuffer> names) { var orderedWeights = weights.Items() .Where(weight => Math.Abs(weight.Value) >= Epsilon) @@ -227,12 +227,12 @@ public static IEnumerable> GetSortedLinearModelFeat /// Output the weights of a linear model to key value pairs. /// public static void SaveLinearModelWeightsInKeyValuePairs( - ref VBuffer weights, Float bias, RoleMappedSchema schema, List> results) + in VBuffer weights, Float bias, RoleMappedSchema schema, List> results) { var names = default(VBuffer>); MetadataUtils.GetSlotNames(schema, RoleMappedSchema.ColumnRole.Feature, weights.Length, ref names); - var pairs = GetSortedLinearModelFeatureNamesAndWeights(bias, ref weights, ref names); + var pairs = GetSortedLinearModelFeatureNamesAndWeights(bias, in weights, in names); foreach (var kvp in pairs) results.Add(new KeyValuePair(kvp.Key, kvp.Value)); diff --git a/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LbfgsPredictorBase.cs b/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LbfgsPredictorBase.cs index 048dc11d34..f17e617e29 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LbfgsPredictorBase.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LbfgsPredictorBase.cs @@ -301,11 +301,11 @@ protected virtual VBuffer InitializeWeightsSgd(IChannel ch, FloatLabelCur int numExamples = 0; var oldWeights = VBufferUtils.CreateEmpty(BiasCount + WeightCount); DTerminate terminateSgd = - (ref VBuffer x) => + (in VBuffer x) => { if (++numExamples % 1000 != 0) return false; - VectorUtils.AddMult(ref x, -1, ref oldWeights); + VectorUtils.AddMult(in x, -1, ref oldWeights); float normDiff = VectorUtils.Norm(oldWeights); x.CopyTo(ref oldWeights); // #if OLD_TRACING // REVIEW: How should this be ported? @@ -326,7 +326,7 @@ protected virtual VBuffer InitializeWeightsSgd(IChannel ch, FloatLabelCur float[] scratch = null; SgdOptimizer.DStochasticGradient lossSgd = - (ref VBuffer x, ref VBuffer grad) => + (in VBuffer x, ref VBuffer grad) => { // Zero out the gradient by sparsifying. grad = new VBuffer(grad.Length, 0, grad.Values, grad.Indices); @@ -340,7 +340,7 @@ protected virtual VBuffer InitializeWeightsSgd(IChannel ch, FloatLabelCur if (!cursor.MoveNext()) return; } - AccumulateOneGradient(ref cursor.Features, cursor.Label, cursor.Weight, ref x, ref grad, ref scratch); + AccumulateOneGradient(in cursor.Features, cursor.Label, cursor.Weight, in x, ref grad, ref scratch); }; VBuffer sgdWeights; @@ -369,7 +369,7 @@ protected virtual VBuffer InitializeWeightsSgd(IChannel ch, FloatLabelCur protected abstract void CheckLabel(RoleMappedData data); - protected virtual void PreTrainingProcessInstance(float label, ref VBuffer feat, float weight) + protected virtual void PreTrainingProcessInstance(float label, in VBuffer feat, float weight) { } @@ -458,7 +458,7 @@ protected virtual void TrainCore(IChannel ch, RoleMappedData data) if (ShowTrainingStats) ProcessPriorDistribution(cursor.Label, cursor.Weight); - PreTrainingProcessInstance(cursor.Label, ref cursor.Features, cursor.Weight); + PreTrainingProcessInstance(cursor.Label, in cursor.Features, cursor.Weight); exCount++; if (_features != null) { @@ -556,7 +556,7 @@ protected virtual void TrainCore(IChannel ch, RoleMappedData data) int numParams = BiasCount; if ((L1Weight > 0 && !Quiet) || ShowTrainingStats) { - VBufferUtils.ForEachDefined(ref CurrentWeights, (index, value) => { if (index >= BiasCount && value != 0) numParams++; }); + VBufferUtils.ForEachDefined(in CurrentWeights, (index, value) => { if (index >= BiasCount && value != 0) numParams++; }); if (L1Weight > 0 && !Quiet) ch.Info("L1 regularization selected {0} of {1} weights.", numParams, BiasCount + WeightCount); } @@ -575,8 +575,8 @@ protected void EnsureBiases(ref VBuffer vec) VBufferUtils.DensifyFirst(ref vec, BiasCount); } - protected abstract float AccumulateOneGradient(ref VBuffer feat, float label, float weight, - ref VBuffer xDense, ref VBuffer grad, ref float[] scratch); + protected abstract float AccumulateOneGradient(in VBuffer feat, float label, float weight, + in VBuffer xDense, ref VBuffer grad, ref float[] scratch); protected abstract void ComputeTrainingStatistics(IChannel ch, FloatLabelCursor.Factory cursorFactory, float loss, int numParams); @@ -584,7 +584,7 @@ protected abstract float AccumulateOneGradient(ref VBuffer feat, float la /// /// The gradient being used by the optimizer /// - protected virtual float DifferentiableFunction(ref VBuffer x, ref VBuffer gradient, + protected virtual float DifferentiableFunction(in VBuffer x, ref VBuffer gradient, IProgressChannelProvider progress) { Contracts.Assert((_numChunks == 0) != (_data == null)); @@ -606,8 +606,8 @@ protected virtual float DifferentiableFunction(ref VBuffer x, ref VBuffer using (pch) { loss = _data == null - ? DifferentiableFunctionMultithreaded(ref xDense, ref gradient, pch) - : DifferentiableFunctionStream(_cursorFactory, ref xDense, ref gradient, pch); + ? DifferentiableFunctionMultithreaded(in xDense, ref gradient, pch) + : DifferentiableFunctionStream(_cursorFactory, in xDense, ref gradient, pch); } float regLoss = 0; if (L2Weight > 0) @@ -623,7 +623,7 @@ protected virtual float DifferentiableFunction(ref VBuffer x, ref VBuffer regLoss = (float)(r * L2Weight * 0.5); // Here we probably want to use sparse x - VBufferUtils.ApplyWithEitherDefined(ref x, ref gradient, + VBufferUtils.ApplyWithEitherDefined(in x, ref gradient, (int ind, float v1, ref float v2) => { if (ind >= BiasCount) v2 += L2Weight * v1; }); } VectorUtils.ScaleBy(ref gradient, scaleFactor); @@ -640,7 +640,7 @@ protected virtual float DifferentiableFunction(ref VBuffer x, ref VBuffer /// REVIEW: consider getting rid of multithread-targeted members /// Using TPL, the distinction between Multithreaded and Sequential implementations is unnecessary /// - protected virtual float DifferentiableFunctionMultithreaded(ref VBuffer xDense, ref VBuffer gradient, IProgressChannel pch) + protected virtual float DifferentiableFunctionMultithreaded(in VBuffer xDense, ref VBuffer gradient, IProgressChannel pch) { Contracts.Assert(_data == null); Contracts.Assert(_cursorFactory == null); @@ -658,21 +658,21 @@ protected virtual float DifferentiableFunctionMultithreaded(ref VBuffer x ichk => { if (ichk == 0) - _localLosses[ichk] = DifferentiableFunctionComputeChunk(ichk, ref xx, ref gg, pch); + _localLosses[ichk] = DifferentiableFunctionComputeChunk(ichk, in xx, ref gg, pch); else - _localLosses[ichk] = DifferentiableFunctionComputeChunk(ichk, ref xx, ref _localGradients[ichk - 1], null); + _localLosses[ichk] = DifferentiableFunctionComputeChunk(ichk, in xx, ref _localGradients[ichk - 1], null); }); gradient = gg; float loss = _localLosses[0]; for (int i = 1; i < _numChunks; i++) { - VectorUtils.Add(ref _localGradients[i - 1], ref gradient); + VectorUtils.Add(in _localGradients[i - 1], ref gradient); loss += _localLosses[i]; } return loss; } - protected float DifferentiableFunctionComputeChunk(int ichk, ref VBuffer xDense, ref VBuffer grad, IProgressChannel pch) + protected float DifferentiableFunctionComputeChunk(int ichk, in VBuffer xDense, ref VBuffer grad, IProgressChannel pch) { Contracts.Assert(0 <= ichk && ichk < _numChunks); Contracts.AssertValueOrNull(pch); @@ -690,7 +690,7 @@ protected float DifferentiableFunctionComputeChunk(int ichk, ref VBuffer for (iv = ivMin; iv < ivLim; iv++) { float weight = _weights != null ? _weights[iv] : 1; - loss += AccumulateOneGradient(ref _features[iv], _labels[iv], weight, ref xDense, ref grad, ref scratch); + loss += AccumulateOneGradient(in _features[iv], _labels[iv], weight, in xDense, ref grad, ref scratch); } // we need use double type to accumulate loss to avoid roundoff error // please see http://mathworld.wolfram.com/RoundoffError.html for roundoff error definition @@ -698,7 +698,7 @@ protected float DifferentiableFunctionComputeChunk(int ichk, ref VBuffer return (float)loss; } - protected float DifferentiableFunctionStream(FloatLabelCursor.Factory cursorFactory, ref VBuffer xDense, ref VBuffer grad, IProgressChannel pch) + protected float DifferentiableFunctionStream(FloatLabelCursor.Factory cursorFactory, in VBuffer xDense, ref VBuffer grad, IProgressChannel pch) { Contracts.AssertValue(cursorFactory); @@ -714,8 +714,8 @@ protected float DifferentiableFunctionStream(FloatLabelCursor.Factory cursorFact { while (cursor.MoveNext()) { - loss += AccumulateOneGradient(ref cursor.Features, cursor.Label, cursor.Weight, - ref xDense, ref grad, ref scratch); + loss += AccumulateOneGradient(in cursor.Features, cursor.Label, cursor.Weight, + in xDense, ref grad, ref scratch); count++; } } diff --git a/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LogisticRegression.cs b/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LogisticRegression.cs index 48757b347a..2db21d6320 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LogisticRegression.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LogisticRegression.cs @@ -111,12 +111,12 @@ protected override SchemaShape.Column[] GetOutputColumnsCore(SchemaShape inputSc protected override BinaryPredictionTransformer MakeTransformer(ParameterMixingCalibratedPredictor model, Schema trainSchema) => new BinaryPredictionTransformer(Host, model, trainSchema, FeatureColumn.Name); - protected override float AccumulateOneGradient(ref VBuffer feat, float label, float weight, - ref VBuffer x, ref VBuffer grad, ref float[] scratch) + protected override float AccumulateOneGradient(in VBuffer feat, float label, float weight, + in VBuffer x, ref VBuffer grad, ref float[] scratch) { float bias = 0; x.GetItemOrDefault(0, ref bias); - float score = bias + VectorUtils.DotProductWithOffset(ref x, 1, ref feat); + float score = bias + VectorUtils.DotProductWithOffset(in x, 1, in feat); float s = score / 2; @@ -131,7 +131,7 @@ protected override float AccumulateOneGradient(ref VBuffer feat, float la Contracts.Check(!float.IsNaN(datumLoss), "Unexpected NaN"); float mult = weight * (modelProb1 - label01); - VectorUtils.AddMultWithOffset(ref feat, mult, ref grad, 1); // Note that 0th L-BFGS weight is for bias. + VectorUtils.AddMultWithOffset(in feat, mult, ref grad, 1); // Note that 0th L-BFGS weight is for bias. // Add bias using this strange trick that has advantage of working well for dense and sparse arrays. // Due to the call to EnsureBiases, we know this region is dense. Contracts.Assert(grad.Count >= BiasCount && (grad.IsDense || grad.Indices[BiasCount - 1] == BiasCount - 1)); @@ -169,7 +169,7 @@ protected override void ComputeTrainingStatistics(IChannel ch, FloatLabelCursor. // Need to subtract L1 regularization loss. // The bias term is not regularized. Double regLoss = 0; - VBufferUtils.ForEachDefined(ref CurrentWeights, (ind, value) => { if (ind >= BiasCount) regLoss += Math.Abs(value); }); + VBufferUtils.ForEachDefined(in CurrentWeights, (ind, value) => { if (ind >= BiasCount) regLoss += Math.Abs(value); }); deviance -= (float)regLoss * L1Weight * 2; } @@ -268,7 +268,7 @@ protected override void ComputeTrainingStatistics(IChannel ch, FloatLabelCursor. { var label = cursor.Label; var weight = cursor.Weight; - var score = bias + VectorUtils.DotProductWithOffset(ref CurrentWeights, 1, ref cursor.Features); + var score = bias + VectorUtils.DotProductWithOffset(in CurrentWeights, 1, in cursor.Features); // Compute Bernoulli variance n_i * p_i * (1 - p_i) for the i-th training example. var variance = weight / (2 + 2 * Math.Cosh(score)); @@ -373,7 +373,7 @@ protected override ParameterMixingCalibratedPredictor CreatePredictor() CurrentWeights.GetItemOrDefault(0, ref bias); CurrentWeights.CopyTo(ref weights, 1, CurrentWeights.Length - 1); return new ParameterMixingCalibratedPredictor(Host, - new LinearBinaryPredictor(Host, ref weights, bias, _stats), + new LinearBinaryPredictor(Host, in weights, bias, _stats), new PlattCalibrator(Host, -1, 0)); } diff --git a/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/MulticlassLogisticRegression.cs b/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/MulticlassLogisticRegression.cs index 70e896feef..99bc373498 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/MulticlassLogisticRegression.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/MulticlassLogisticRegression.cs @@ -189,8 +189,8 @@ protected override Optimizer InitializeOptimizer(IChannel ch, FloatLabelCursor.F return opt; } - protected override float AccumulateOneGradient(ref VBuffer feat, float label, float weight, - ref VBuffer x, ref VBuffer grad, ref float[] scores) + protected override float AccumulateOneGradient(in VBuffer feat, float label, float weight, + in VBuffer x, ref VBuffer grad, ref float[] scores) { if (Utils.Size(scores) < _numClasses) scores = new float[_numClasses]; @@ -199,7 +199,7 @@ protected override float AccumulateOneGradient(ref VBuffer feat, float la for (int c = 0, start = _numClasses; c < _numClasses; c++, start += NumFeatures) { x.GetItemOrDefault(c, ref bias); - scores[c] = bias + VectorUtils.DotProductWithOffset(ref x, start, ref feat); + scores[c] = bias + VectorUtils.DotProductWithOffset(in x, start, in feat); } float logZ = MathUtils.SoftMax(scores, _numClasses); @@ -214,7 +214,7 @@ protected override float AccumulateOneGradient(ref VBuffer feat, float la float modelProb = MathUtils.ExpSlow(scores[c] - logZ); float mult = weight * (modelProb - probLabel); - VectorUtils.AddMultWithOffset(ref feat, mult, ref grad, start); + VectorUtils.AddMultWithOffset(in feat, mult, ref grad, start); // Due to the call to EnsureBiases, we know this region is dense. Contracts.Assert(grad.Count >= BiasCount && (grad.IsDense || grad.Indices[BiasCount - 1] == BiasCount - 1)); grad.Values[c] += mult; @@ -248,7 +248,7 @@ protected override MulticlassLogisticRegressionPredictor CreatePredictor() } } - return new MulticlassLogisticRegressionPredictor(Host, ref CurrentWeights, _numClasses, NumFeatures, _labelNames, _stats); + return new MulticlassLogisticRegressionPredictor(Host, in CurrentWeights, _numClasses, NumFeatures, _labelNames, _stats); } protected override void ComputeTrainingStatistics(IChannel ch, FloatLabelCursor.Factory cursorFactory, float loss, int numParams) @@ -279,7 +279,7 @@ protected override void ComputeTrainingStatistics(IChannel ch, FloatLabelCursor. // Need to subtract L1 regularization loss. // The bias term is not regularized. Double regLoss = 0; - VBufferUtils.ForEachDefined(ref CurrentWeights, (ind, value) => { if (ind >= BiasCount) regLoss += Math.Abs(value); }); + VBufferUtils.ForEachDefined(in CurrentWeights, (ind, value) => { if (ind >= BiasCount) regLoss += Math.Abs(value); }); deviance -= (float)regLoss * L1Weight * 2; } @@ -395,7 +395,7 @@ private static VersionInfo GetVersionInfo() public bool CanSavePfa => true; public bool CanSaveOnnx(OnnxContext ctx) => true; - internal MulticlassLogisticRegressionPredictor(IHostEnvironment env, ref VBuffer weights, int numClasses, int numFeatures, string[] labelNames, LinearModelStatistics stats = null) + internal MulticlassLogisticRegressionPredictor(IHostEnvironment env, in VBuffer weights, int numClasses, int numFeatures, string[] labelNames, LinearModelStatistics stats = null) : base(env, RegistrationName) { Contracts.Assert(weights.Length == numClasses + numClasses * numFeatures); @@ -634,7 +634,7 @@ protected override void SaveCore(ModelSaveContext ctx) // This is actually a bug waiting to happen: sparse/dense vectors // can have different dot products even if they are logically the // same vector. - numIndices += NonZeroCount(ref _weights[i]); + numIndices += NonZeroCount(in _weights[i]); ctx.Writer.Write(numIndices); } @@ -708,7 +708,7 @@ protected override void SaveCore(ModelSaveContext ctx) } // REVIEW: Destroy. - private static int NonZeroCount(ref VBuffer vector) + private static int NonZeroCount(in VBuffer vector) { int count = 0; if (!vector.IsDense) @@ -741,13 +741,13 @@ public ValueMapper GetMapper() Host.Check(src.Length == _numFeatures); var values = dst.Values; - PredictCore(ref src, ref values); + PredictCore(in src, ref values); dst = new VBuffer(_numClasses, values, dst.Indices); }; return (ValueMapper)(Delegate)del; } - private void PredictCore(ref VBuffer src, ref float[] dst) + private void PredictCore(in VBuffer src, ref float[] dst) { Host.Check(src.Length == _numFeatures, "src length should equal the number of features"); var weights = _weights; @@ -758,7 +758,7 @@ private void PredictCore(ref VBuffer src, ref float[] dst) dst = new float[_numClasses]; for (int i = 0; i < _biases.Length; i++) - dst[i] = _biases[i] + VectorUtils.DotProduct(ref weights[i], ref src); + dst[i] = _biases[i] + VectorUtils.DotProduct(in weights[i], in src); Calibrate(dst); } @@ -867,7 +867,7 @@ public void SaveAsCode(TextWriter writer, RoleMappedSchema schema) for (int i = 0; i < _biases.Length; i++) { LinearPredictorUtils.SaveAsCode(writer, - ref _weights[i], + in _weights[i], _biases[i], schema, "score[" + i.ToString() + "]"); @@ -1024,7 +1024,7 @@ public IRow GetStatsIRowOrNull(RoleMappedSchema schema) var cols = new List(); var names = default(VBuffer>); - _stats.AddStatsColumns(cols, null, schema, ref names); + _stats.AddStatsColumns(cols, null, schema, in names); return RowColumnUtils.GetRow(null, cols.ToArray()); } } diff --git a/src/Microsoft.ML.StandardLearners/Standard/ModelStatistics.cs b/src/Microsoft.ML.StandardLearners/Standard/ModelStatistics.cs index 8e12b04c3f..5f8d096c81 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/ModelStatistics.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/ModelStatistics.cs @@ -104,7 +104,7 @@ internal LinearModelStatistics(IHostEnvironment env, long trainingExampleCount, _nullDeviance = nullDeviance; } - internal LinearModelStatistics(IHostEnvironment env, long trainingExampleCount, int paramCount, Single deviance, Single nullDeviance, ref VBuffer coeffStdError) + internal LinearModelStatistics(IHostEnvironment env, long trainingExampleCount, int paramCount, Single deviance, Single nullDeviance, in VBuffer coeffStdError) : this(env, trainingExampleCount, paramCount, deviance, nullDeviance) { _env.Assert(coeffStdError.Count == _paramCount); @@ -226,7 +226,7 @@ public static bool TryGetBiasStatistics(LinearModelStatistics stats, Single bias return true; } - private static void GetUnorderedCoefficientStatistics(LinearModelStatistics stats, ref VBuffer weights, ref VBuffer> names, + private static void GetUnorderedCoefficientStatistics(LinearModelStatistics stats, in VBuffer weights, in VBuffer> names, ref VBuffer estimate, ref VBuffer stdErr, ref VBuffer zScore, ref VBuffer pValue, out ValueGetter>> getSlotNames) { if (!stats._coeffStdError.HasValue) @@ -409,7 +409,7 @@ public void SaveSummaryInKeyValuePairs(LinearBinaryPredictor parent, } } - public void AddStatsColumns(List list, LinearBinaryPredictor parent, RoleMappedSchema schema, ref VBuffer> names) + public void AddStatsColumns(List list, LinearBinaryPredictor parent, RoleMappedSchema schema, in VBuffer> names) { _env.AssertValue(list); _env.AssertValueOrNull(parent); @@ -446,7 +446,7 @@ public void AddStatsColumns(List list, LinearBinaryPredictor parent, Ro var zScore = default(VBuffer); var pValue = default(VBuffer); ValueGetter>> getSlotNames; - GetUnorderedCoefficientStatistics(parent.Statistics, ref weights, ref names, ref estimate, ref stdErr, ref zScore, ref pValue, out getSlotNames); + GetUnorderedCoefficientStatistics(parent.Statistics, in weights, in names, ref estimate, ref stdErr, ref zScore, ref pValue, out getSlotNames); var slotNamesCol = RowColumnUtils.GetColumn(MetadataUtils.Kinds.SlotNames, new VectorType(TextType.Instance, stdErr.Length), getSlotNames); diff --git a/src/Microsoft.ML.StandardLearners/Standard/Online/AveragedLinear.cs b/src/Microsoft.ML.StandardLearners/Standard/Online/AveragedLinear.cs index 2175cb0417..a5f0f60092 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/Online/AveragedLinear.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/Online/AveragedLinear.cs @@ -121,14 +121,14 @@ private protected AveragedTrainStateBase(IChannel ch, int numFeatures, LinearPre /// /// Return the raw margin from the decision hyperplane /// - public Float AveragedMargin(ref VBuffer feat) + public Float AveragedMargin(in VBuffer feat) { Contracts.Assert(Averaged); - return (TotalBias + VectorUtils.DotProduct(ref feat, ref TotalWeights)) / (Float)NumWeightUpdates; + return (TotalBias + VectorUtils.DotProduct(in feat, in TotalWeights)) / (Float)NumWeightUpdates; } - public override Float Margin(ref VBuffer feat) - => Averaged ? AveragedMargin(ref feat) : CurrentMargin(ref feat); + public override Float Margin(in VBuffer feat) + => Averaged ? AveragedMargin(in feat) : CurrentMargin(in feat); public override void FinishIteration(IChannel ch) { @@ -138,7 +138,7 @@ public override void FinishIteration(IChannel ch) if (_args.DoLazyUpdates && NumNoUpdates > 0) { // Update the total weights to include the final loss=0 updates - VectorUtils.AddMult(ref Weights, NumNoUpdates * WeightsScale, ref TotalWeights); + VectorUtils.AddMult(in Weights, NumNoUpdates * WeightsScale, ref TotalWeights); TotalBias += Bias * NumNoUpdates; NumWeightUpdates += NumNoUpdates; NumNoUpdates = 0; @@ -150,7 +150,7 @@ public override void FinishIteration(IChannel ch) if (_args.ResetWeightsAfterXExamples == 0) { ch.Info("Resetting weights to average weights"); - VectorUtils.ScaleInto(ref TotalWeights, 1 / (Float)NumWeightUpdates, ref Weights); + VectorUtils.ScaleInto(in TotalWeights, 1 / (Float)NumWeightUpdates, ref Weights); WeightsScale = 1; Bias = TotalBias / (Float)NumWeightUpdates; } @@ -159,12 +159,12 @@ public override void FinishIteration(IChannel ch) base.FinishIteration(ch); } - public override void ProcessDataInstance(IChannel ch, ref VBuffer feat, Float label, Float weight) + public override void ProcessDataInstance(IChannel ch, in VBuffer feat, Float label, Float weight) { - base.ProcessDataInstance(ch, ref feat, label, weight); + base.ProcessDataInstance(ch, in feat, label, weight); // compute the update and update if needed - Float output = CurrentMargin(ref feat); + Float output = CurrentMargin(in feat); Double loss = _loss.Loss(output, label); // REVIEW: Should this be biasUpdate != 0? @@ -175,7 +175,7 @@ public override void ProcessDataInstance(IChannel ch, ref VBuffer feat, F // If doing lazy weights, we need to update the totalWeights and totalBias before updating weights/bias if (_args.DoLazyUpdates && _args.Averaged && NumNoUpdates > 0 && TotalMultipliers * _args.AveragedTolerance <= PendingMultipliers) { - VectorUtils.AddMult(ref Weights, NumNoUpdates * WeightsScale, ref TotalWeights); + VectorUtils.AddMult(in Weights, NumNoUpdates * WeightsScale, ref TotalWeights); TotalBias += Bias * NumNoUpdates * WeightsScale; NumWeightUpdates += NumNoUpdates; NumNoUpdates = 0; @@ -190,7 +190,7 @@ public override void ProcessDataInstance(IChannel ch, ref VBuffer feat, F Float biasUpdate = -rate * _loss.Derivative(output, label); // Perform the update to weights and bias. - VectorUtils.AddMult(ref feat, biasUpdate / WeightsScale, ref Weights); + VectorUtils.AddMult(in feat, biasUpdate / WeightsScale, ref Weights); WeightsScale *= 1 - 2 * _args.L2RegularizerWeight; // L2 regularization. ScaleWeightsIfNeeded(); Bias += biasUpdate; @@ -209,7 +209,7 @@ public override void ProcessDataInstance(IChannel ch, ref VBuffer feat, F if (_resetWeightsAfterXExamples > 0 && NumIterExamples % _resetWeightsAfterXExamples == 0) { ch.Info("Resetting weights to average weights"); - VectorUtils.ScaleInto(ref TotalWeights, 1 / (Float)NumWeightUpdates, ref Weights); + VectorUtils.ScaleInto(in TotalWeights, 1 / (Float)NumWeightUpdates, ref Weights); WeightsScale = 1; Bias = TotalBias / (Float)NumWeightUpdates; } @@ -223,12 +223,12 @@ private void IncrementAverageNonLazy() { if (_args.RecencyGain == 0) { - VectorUtils.AddMult(ref Weights, WeightsScale, ref TotalWeights); + VectorUtils.AddMult(in Weights, WeightsScale, ref TotalWeights); TotalBias += Bias; NumWeightUpdates++; return; } - VectorUtils.AddMult(ref Weights, Gain * WeightsScale, ref TotalWeights); + VectorUtils.AddMult(in Weights, Gain * WeightsScale, ref TotalWeights); TotalBias += Gain * Bias; NumWeightUpdates += Gain; Gain = (_args.RecencyGainMulti ? Gain * _args.RecencyGain : Gain + _args.RecencyGain); diff --git a/src/Microsoft.ML.StandardLearners/Standard/Online/AveragedPerceptron.cs b/src/Microsoft.ML.StandardLearners/Standard/Online/AveragedPerceptron.cs index 3cbb553015..0e86d91c47 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/Online/AveragedPerceptron.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/Online/AveragedPerceptron.cs @@ -79,7 +79,7 @@ public override LinearBinaryPredictor CreatePredictor() bias = TotalBias / (float)NumWeightUpdates; } - return new LinearBinaryPredictor(ParentHost, ref weights, bias); + return new LinearBinaryPredictor(ParentHost, in weights, bias); } } diff --git a/src/Microsoft.ML.StandardLearners/Standard/Online/LinearSvm.cs b/src/Microsoft.ML.StandardLearners/Standard/Online/LinearSvm.cs index a190b90f19..716bd1c4fc 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/Online/LinearSvm.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/Online/LinearSvm.cs @@ -122,22 +122,22 @@ private void BeginBatch() _weightsUpdate = new VBuffer(_weightsUpdate.Length, 0, _weightsUpdate.Values, _weightsUpdate.Indices); } - private void FinishBatch(ref VBuffer weightsUpdate, Float weightsUpdateScale) + private void FinishBatch(in VBuffer weightsUpdate, Float weightsUpdateScale) { if (_numBatchExamples > 0) - UpdateWeights(ref weightsUpdate, weightsUpdateScale); + UpdateWeights(in weightsUpdate, weightsUpdateScale); _numBatchExamples = 0; } /// /// Observe an example and update weights if necesary. /// - public override void ProcessDataInstance(IChannel ch, ref VBuffer feat, Float label, Float weight) + public override void ProcessDataInstance(IChannel ch, in VBuffer feat, Float label, Float weight) { - base.ProcessDataInstance(ch, ref feat, label, weight); + base.ProcessDataInstance(ch, in feat, label, weight); // compute the update and update if needed - Float output = Margin(ref feat); + Float output = Margin(in feat); Float trueOutput = (label > 0 ? 1 : -1); Float loss = output * trueOutput - 1; @@ -149,11 +149,11 @@ public override void ProcessDataInstance(IChannel ch, ref VBuffer feat, F // Only aggregate in the case where we're handling multiple instances. if (_weightsUpdate.Count == 0) { - VectorUtils.ScaleInto(ref feat, currentBiasUpdate, ref _weightsUpdate); + VectorUtils.ScaleInto(in feat, currentBiasUpdate, ref _weightsUpdate); _weightsUpdateScale = 1; } else - VectorUtils.AddMult(ref feat, currentBiasUpdate, ref _weightsUpdate); + VectorUtils.AddMult(in feat, currentBiasUpdate, ref _weightsUpdate); } if (++_numBatchExamples >= _batchSize) @@ -165,10 +165,10 @@ public override void ProcessDataInstance(IChannel ch, ref VBuffer feat, F // vector directly. Float currentBiasUpdate = trueOutput * weight; _biasUpdate += currentBiasUpdate; - FinishBatch(ref feat, currentBiasUpdate); + FinishBatch(in feat, currentBiasUpdate); } else - FinishBatch(ref _weightsUpdate, _weightsUpdateScale); + FinishBatch(in _weightsUpdate, _weightsUpdateScale); BeginBatch(); } } @@ -177,7 +177,7 @@ public override void ProcessDataInstance(IChannel ch, ref VBuffer feat, F /// Updates the weights at the end of the batch. Since weightsUpdate can be an instance /// feature vector, this function should not change the contents of weightsUpdate. /// - private void UpdateWeights(ref VBuffer weightsUpdate, Float weightsUpdateScale) + private void UpdateWeights(in VBuffer weightsUpdate, Float weightsUpdateScale) { Contracts.Assert(_batch > 0); @@ -188,7 +188,7 @@ private void UpdateWeights(ref VBuffer weightsUpdate, Float weightsUpdate // w_{t+1/2} = (1 - eta*lambda) w_t + eta/k * totalUpdate WeightsScale *= 1 - rate * _lambda; ScaleWeightsIfNeeded(); - VectorUtils.AddMult(ref weightsUpdate, rate * weightsUpdateScale / (_numBatchExamples * WeightsScale), ref Weights); + VectorUtils.AddMult(in weightsUpdate, rate * weightsUpdateScale / (_numBatchExamples * WeightsScale), ref Weights); Contracts.Assert(!_noBias || Bias == 0); if (!_noBias) @@ -211,13 +211,14 @@ private void UpdateWeights(ref VBuffer weightsUpdate, Float weightsUpdate /// /// Return the raw margin from the decision hyperplane. /// - public override Float Margin(ref VBuffer feat) - => Bias + VectorUtils.DotProduct(ref feat, ref Weights) * WeightsScale; + public override Float Margin(in VBuffer feat) + => Bias + VectorUtils.DotProduct(in feat, in Weights) * WeightsScale; public override TPredictor CreatePredictor() { Contracts.Assert(WeightsScale == 1); - return new LinearBinaryPredictor(ParentHost, ref Weights, Bias); + // below should be `in Weights`, but can't because of https://github.com/dotnet/roslyn/issues/29371 + return new LinearBinaryPredictor(ParentHost, Weights, Bias); } } diff --git a/src/Microsoft.ML.StandardLearners/Standard/Online/OnlineGradientDescent.cs b/src/Microsoft.ML.StandardLearners/Standard/Online/OnlineGradientDescent.cs index b3080212df..7bc7b7d96d 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/Online/OnlineGradientDescent.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/Online/OnlineGradientDescent.cs @@ -83,7 +83,7 @@ public override LinearRegressionPredictor CreatePredictor() VectorUtils.ScaleBy(ref weights, 1 / (float)NumWeightUpdates); bias = TotalBias / (float)NumWeightUpdates; } - return new LinearRegressionPredictor(ParentHost, ref weights, bias); + return new LinearRegressionPredictor(ParentHost, in weights, bias); } } diff --git a/src/Microsoft.ML.StandardLearners/Standard/Online/OnlineLinear.cs b/src/Microsoft.ML.StandardLearners/Standard/Online/OnlineLinear.cs index 8779609a36..e355f9b0e6 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/Online/OnlineLinear.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/Online/OnlineLinear.cs @@ -70,7 +70,7 @@ private protected abstract class TrainStateBase public int Iteration; /// - /// The number of examples in the current iteration. Incremented by , + /// The number of examples in the current iteration. Incremented by , /// and reset by . /// public long NumIterExamples; @@ -200,7 +200,7 @@ public virtual void FinishIteration(IChannel ch) /// /// This should be overridden by derived classes. This implementation simply increments . /// - public virtual void ProcessDataInstance(IChannel ch, ref VBuffer feat, Float label, Float weight) + public virtual void ProcessDataInstance(IChannel ch, in VBuffer feat, Float label, Float weight) { ch.Assert(FloatUtils.IsFinite(feat.Values, feat.Count)); ++NumIterExamples; @@ -209,16 +209,16 @@ public virtual void ProcessDataInstance(IChannel ch, ref VBuffer feat, Fl /// /// Return the raw margin from the decision hyperplane /// - public Float CurrentMargin(ref VBuffer feat) - => Bias + VectorUtils.DotProduct(ref feat, ref Weights) * WeightsScale; + public Float CurrentMargin(in VBuffer feat) + => Bias + VectorUtils.DotProduct(in feat, in Weights) * WeightsScale; /// - /// The default implementation just calls . + /// The default implementation just calls . /// /// /// - public virtual Float Margin(ref VBuffer feat) - => CurrentMargin(ref feat); + public virtual Float Margin(in VBuffer feat) + => CurrentMargin(in feat); public abstract TModel CreatePredictor(); } @@ -271,7 +271,7 @@ protected sealed override TModel TrainModelCore(TrainContext context) TrainCore(ch, data, state); ch.Assert(state.WeightsScale == 1); - Float maxNorm = Math.Max(VectorUtils.MaxNorm(ref state.Weights), Math.Abs(state.Bias)); + Float maxNorm = Math.Max(VectorUtils.MaxNorm(in state.Weights), Math.Abs(state.Bias)); ch.Check(FloatUtils.IsFinite(maxNorm), "The weights/bias contain invalid values (NaN or Infinite). Potential causes: high learning rates, no normalization, high initial weights, etc."); return state.CreatePredictor(); @@ -299,7 +299,7 @@ private void TrainCore(IChannel ch, RoleMappedData data, TrainStateBase state) using (var cursor = cursorFactory.Create(rand)) { while (cursor.MoveNext()) - state.ProcessDataInstance(ch, ref cursor.Features, cursor.Label, cursor.Weight); + state.ProcessDataInstance(ch, in cursor.Features, cursor.Label, cursor.Weight); numBad += cursor.BadFeaturesRowCount; } diff --git a/src/Microsoft.ML.StandardLearners/Standard/PoissonRegression/PoissonRegression.cs b/src/Microsoft.ML.StandardLearners/Standard/PoissonRegression/PoissonRegression.cs index 4de37a209e..34d9a743cc 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/PoissonRegression/PoissonRegression.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/PoissonRegression/PoissonRegression.cs @@ -101,7 +101,7 @@ protected override VBuffer InitializeWeightsFromPredictor(PoissonRegressi return InitializeWeights(srcPredictor.Weights2, new[] { srcPredictor.Bias }); } - protected override void PreTrainingProcessInstance(float label, ref VBuffer feat, float weight) + protected override void PreTrainingProcessInstance(float label, in VBuffer feat, float weight) { if (!(label >= 0)) throw Contracts.Except("Poisson regression must regress to a non-negative label, but label {0} encountered", label); @@ -109,9 +109,9 @@ protected override void PreTrainingProcessInstance(float label, ref VBuffer x, ref VBuffer gradient, IProgressChannelProvider progress) + protected override float DifferentiableFunction(in VBuffer x, ref VBuffer gradient, IProgressChannelProvider progress) { - return base.DifferentiableFunction(ref x, ref gradient, progress) + (float)(_lossNormalizer / NumGoodRows); + return base.DifferentiableFunction(in x, ref gradient, progress) + (float)(_lossNormalizer / NumGoodRows); } // Poisson: p(y;lambda) = lambda^y * exp(-lambda) / y! @@ -124,17 +124,17 @@ protected override float DifferentiableFunction(ref VBuffer x, ref VBuffe // Goal is to find w that maximizes // Note: We negate the above in ordrer to minimize - protected override float AccumulateOneGradient(ref VBuffer feat, float label, float weight, - ref VBuffer x, ref VBuffer grad, ref float[] scratch) + protected override float AccumulateOneGradient(in VBuffer feat, float label, float weight, + in VBuffer x, ref VBuffer grad, ref float[] scratch) { float bias = 0; x.GetItemOrDefault(0, ref bias); - float dot = VectorUtils.DotProductWithOffset(ref x, 1, ref feat) + bias; + float dot = VectorUtils.DotProductWithOffset(in x, 1, in feat) + bias; float lambda = MathUtils.ExpSlow(dot); float y = label; float mult = -(y - lambda) * weight; - VectorUtils.AddMultWithOffset(ref feat, mult, ref grad, 1); + VectorUtils.AddMultWithOffset(in feat, mult, ref grad, 1); // Due to the call to EnsureBiases, we know this region is dense. Contracts.Assert(grad.Count >= BiasCount && (grad.IsDense || grad.Indices[BiasCount - 1] == BiasCount - 1)); grad.Values[0] += mult; @@ -152,7 +152,7 @@ protected override PoissonRegressionPredictor CreatePredictor() CurrentWeights.CopyTo(ref weights, 1, CurrentWeights.Length - 1); float bias = 0; CurrentWeights.GetItemOrDefault(0, ref bias); - return new PoissonRegressionPredictor(Host, ref weights, bias); + return new PoissonRegressionPredictor(Host, in weights, bias); } protected override void ComputeTrainingStatistics(IChannel ch, FloatLabelCursor.Factory factory, float loss, int numParams) diff --git a/src/Microsoft.ML.StandardLearners/Standard/SdcaMultiClass.cs b/src/Microsoft.ML.StandardLearners/Standard/SdcaMultiClass.cs index 0ec4ccd878..deaa204b03 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/SdcaMultiClass.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/SdcaMultiClass.cs @@ -175,7 +175,7 @@ protected override void TrainWithoutLock(IProgressChannelProvider progress, Floa } // The output for the label class using current weights and bias. - var labelOutput = WDot(ref features, ref weights[label], biasReg[label] + biasUnreg[label]); + var labelOutput = WDot(in features, in weights[label], biasReg[label] + biasUnreg[label]); var instanceWeight = GetInstanceWeight(cursor); // This will be the new dual variable corresponding to the label class. @@ -201,7 +201,7 @@ protected override void TrainWithoutLock(IProgressChannelProvider progress, Floa { long dualIndex = iClass + dualIndexInitPos; var dual = duals[dualIndex]; - var output = labelOutput + labelPrimalUpdate * normSquared - WDot(ref features, ref weights[iClass], biasReg[iClass] + biasUnreg[iClass]); + var output = labelOutput + labelPrimalUpdate * normSquared - WDot(in features, in weights[iClass], biasReg[iClass] + biasUnreg[iClass]); var dualUpdate = _loss.DualUpdate(output, 1, dual, invariant, numThreads); // The successive over-relaxation apporach to adjust the sum of dual variables (biasReg) to zero. @@ -223,7 +223,7 @@ protected override void TrainWithoutLock(IProgressChannelProvider progress, Floa if (l1ThresholdZero) { - VectorUtils.AddMult(ref features, weights[iClass].Values, -primalUpdate); + VectorUtils.AddMult(in features, weights[iClass].Values, -primalUpdate); biasReg[iClass] -= primalUpdate; } else @@ -256,7 +256,7 @@ protected override void TrainWithoutLock(IProgressChannelProvider progress, Floa biasUnreg[label] += labelAdjustment * lambdaNInv * instanceWeight; if (l1ThresholdZero) { - VectorUtils.AddMult(ref features, weights[label].Values, labelPrimalUpdate); + VectorUtils.AddMult(in features, weights[label].Values, labelPrimalUpdate); biasReg[label] += labelPrimalUpdate; } else @@ -321,7 +321,7 @@ protected override bool CheckConvergence( var instanceWeight = GetInstanceWeight(cursor); var features = cursor.Features; var label = (int)cursor.Label; - var labelOutput = WDot(ref features, ref weights[label], biasReg[label] + biasUnreg[label]); + var labelOutput = WDot(in features, in weights[label], biasReg[label] + biasUnreg[label]); Double subLoss = 0; Double subDualLoss = 0; long idx = getIndexFromIdAndRow(cursor.Id, row); @@ -334,7 +334,7 @@ protected override bool CheckConvergence( continue; } - var currentClassOutput = WDot(ref features, ref weights[iClass], biasReg[iClass] + biasUnreg[iClass]); + var currentClassOutput = WDot(in features, in weights[iClass], biasReg[iClass] + biasUnreg[iClass]); subLoss += _loss.Loss(labelOutput - currentClassOutput, 1); Contracts.Assert(dualIndex == iClass + idx * numClasses); var dual = duals[dualIndex++]; @@ -359,7 +359,7 @@ protected override bool CheckConvergence( Double biasRegularizationAdjustment = 0; for (int iClass = 0; iClass < numClasses; iClass++) { - weightsL1Norm += VectorUtils.L1Norm(ref weights[iClass]) + Math.Abs(biasReg[iClass]); + weightsL1Norm += VectorUtils.L1Norm(in weights[iClass]) + Math.Abs(biasReg[iClass]); weightsL2NormSquared += VectorUtils.NormSquared(weights[iClass]) + biasReg[iClass] * biasReg[iClass]; biasRegularizationAdjustment += biasReg[iClass] * biasUnreg[iClass]; } diff --git a/src/Microsoft.ML.StandardLearners/Standard/SdcaRegression.cs b/src/Microsoft.ML.StandardLearners/Standard/SdcaRegression.cs index 9d3e1205cc..f55ba18fe1 100644 --- a/src/Microsoft.ML.StandardLearners/Standard/SdcaRegression.cs +++ b/src/Microsoft.ML.StandardLearners/Standard/SdcaRegression.cs @@ -107,9 +107,10 @@ protected override LinearRegressionPredictor CreatePredictor(VBuffer[] we Host.CheckParam(weights[0].Length > 0, nameof(weights)); VBuffer maybeSparseWeights = default; - VBufferUtils.CreateMaybeSparseCopy(ref weights[0], ref maybeSparseWeights, + // below should be `in weights[0]`, but can't because of https://github.com/dotnet/roslyn/issues/29371 + VBufferUtils.CreateMaybeSparseCopy(weights[0], ref maybeSparseWeights, Conversions.Instance.GetIsDefaultPredicate(NumberType.Float)); - return new LinearRegressionPredictor(Host, ref maybeSparseWeights, bias[0]); + return new LinearRegressionPredictor(Host, in maybeSparseWeights, bias[0]); } protected override Float GetInstanceWeight(FloatLabelCursor cursor) diff --git a/src/Microsoft.ML.Sweeper/Algorithms/SmacSweeper.cs b/src/Microsoft.ML.Sweeper/Algorithms/SmacSweeper.cs index 4d2e7d6616..d78b6eaa7d 100644 --- a/src/Microsoft.ML.Sweeper/Algorithms/SmacSweeper.cs +++ b/src/Microsoft.ML.Sweeper/Algorithms/SmacSweeper.cs @@ -344,7 +344,7 @@ private double[][] GetForestRegressionLeafValues(FastForestRegressionPredictor f { Float[] transformedParams = SweeperProbabilityUtils.ParameterSetAsFloatArray(_host, _sweepParameters, config, true); VBuffer features = new VBuffer(transformedParams.Length, transformedParams); - leafValues.Add((Float)t.LeafValues[t.GetLeaf(ref features)]); + leafValues.Add((Float)t.LeafValues[t.GetLeaf(in features)]); } datasetLeafValues.Add(leafValues.ToArray()); } diff --git a/src/Microsoft.ML.Transforms/CountFeatureSelection.cs b/src/Microsoft.ML.Transforms/CountFeatureSelection.cs index 8ace98e7c0..b4b876138d 100644 --- a/src/Microsoft.ML.Transforms/CountFeatureSelection.cs +++ b/src/Microsoft.ML.Transforms/CountFeatureSelection.cs @@ -278,10 +278,10 @@ public override long[] Count public override void ProcessValue() { _fillBuffer(); - ProcessValue(ref _buffer); + ProcessValue(in _buffer); } - public void ProcessValue(ref VBuffer value) + public void ProcessValue(in VBuffer value) { var size = _count.Length; Contracts.Check(value.Length == size); diff --git a/src/Microsoft.ML.Transforms/GcnTransform.cs b/src/Microsoft.ML.Transforms/GcnTransform.cs index 0f42621d72..2364704c14 100644 --- a/src/Microsoft.ML.Transforms/GcnTransform.cs +++ b/src/Microsoft.ML.Transforms/GcnTransform.cs @@ -446,7 +446,7 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou getSrc(ref src); Float mean = Mean(src.Values, src.Count, src.Length); Float divisor = StdDev(src.Values, src.Count, src.Length, mean); - FillValues(Host, ref src, ref dst, divisor, scale, mean); + FillValues(Host, in src, ref dst, divisor, scale, mean); }; return del; case NormalizerKind.L2Norm: @@ -456,7 +456,7 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou getSrc(ref src); Float mean = Mean(src.Values, src.Count, src.Length); Float divisor = L2Norm(src.Values, src.Count, mean); - FillValues(Host, ref src, ref dst, divisor, scale, mean); + FillValues(Host, in src, ref dst, divisor, scale, mean); }; return del; case NormalizerKind.L1Norm: @@ -466,7 +466,7 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou getSrc(ref src); Float mean = Mean(src.Values, src.Count, src.Length); Float divisor = L1Norm(src.Values, src.Count, mean); - FillValues(Host, ref src, ref dst, divisor, scale, mean); + FillValues(Host, in src, ref dst, divisor, scale, mean); }; return del; case NormalizerKind.LInf: @@ -476,7 +476,7 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou getSrc(ref src); Float mean = Mean(src.Values, src.Count, src.Length); Float divisor = LInfNorm(src.Values, src.Count, mean); - FillValues(Host, ref src, ref dst, divisor, scale, mean); + FillValues(Host, in src, ref dst, divisor, scale, mean); }; return del; default: @@ -493,7 +493,7 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou { getSrc(ref src); Float divisor = StdDev(src.Values, src.Count, src.Length); - FillValues(Host, ref src, ref dst, divisor, scale); + FillValues(Host, in src, ref dst, divisor, scale); }; return del; case NormalizerKind.L2Norm: @@ -502,7 +502,7 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou { getSrc(ref src); Float divisor = L2Norm(src.Values, src.Count); - FillValues(Host, ref src, ref dst, divisor, scale); + FillValues(Host, in src, ref dst, divisor, scale); }; return del; case NormalizerKind.L1Norm: @@ -511,7 +511,7 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou { getSrc(ref src); Float divisor = L1Norm(src.Values, src.Count); - FillValues(Host, ref src, ref dst, divisor, scale); + FillValues(Host, in src, ref dst, divisor, scale); }; return del; case NormalizerKind.LInf: @@ -520,7 +520,7 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou { getSrc(ref src); Float divisor = LInfNorm(src.Values, src.Count); - FillValues(Host, ref src, ref dst, divisor, scale); + FillValues(Host, in src, ref dst, divisor, scale); }; return del; default: @@ -529,7 +529,7 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou } } - private static void FillValues(IExceptionContext ectx, ref VBuffer src, ref VBuffer dst, Float divisor, Float scale, Float offset = 0) + private static void FillValues(IExceptionContext ectx, in VBuffer src, ref VBuffer dst, Float divisor, Float scale, Float offset = 0) { int count = src.Count; int length = src.Length; diff --git a/src/Microsoft.ML.Transforms/LearnerFeatureSelection.cs b/src/Microsoft.ML.Transforms/LearnerFeatureSelection.cs index a75cec4a19..a67816f348 100644 --- a/src/Microsoft.ML.Transforms/LearnerFeatureSelection.cs +++ b/src/Microsoft.ML.Transforms/LearnerFeatureSelection.cs @@ -94,7 +94,7 @@ public static IDataTransform Create(IHostEnvironment env, Arguments args, IDataV using (var ch = host.Start("Dropping Slots")) { int selectedCount; - var column = CreateDropSlotsColumn(args, ref scores, out selectedCount); + var column = CreateDropSlotsColumn(args, in scores, out selectedCount); if (column == null) { @@ -110,7 +110,7 @@ public static IDataTransform Create(IHostEnvironment env, Arguments args, IDataV } } - private static DropSlotsTransform.Column CreateDropSlotsColumn(Arguments args, ref VBuffer scores, out int selectedCount) + private static DropSlotsTransform.Column CreateDropSlotsColumn(Arguments args, in VBuffer scores, out int selectedCount) { // Not checking the scores.Length, because: // 1. If it's the same as the features column length, we should be constructing the right DropSlots arguments. diff --git a/src/Microsoft.ML.Transforms/MutualInformationFeatureSelection.cs b/src/Microsoft.ML.Transforms/MutualInformationFeatureSelection.cs index 635ed63f38..15f7884e28 100644 --- a/src/Microsoft.ML.Transforms/MutualInformationFeatureSelection.cs +++ b/src/Microsoft.ML.Transforms/MutualInformationFeatureSelection.cs @@ -557,7 +557,7 @@ private Single[] ComputeMutualInformation(Transposer trans, int col, Mapper(Transposer trans, int col, Mapper /// Computes the mutual information for one slot. /// - private Single ComputeMutualInformation(ref VBuffer features, int numFeatures, int offset) + private Single ComputeMutualInformation(in VBuffer features, int numFeatures, int offset) { Contracts.Assert(_labels.Length == features.Length); if (Utils.Size(_contingencyTable[0]) < numFeatures) @@ -580,7 +580,7 @@ private Single ComputeMutualInformation(ref VBuffer features, int numFeatur Array.Clear(_labelSums, 0, _numLabels); Array.Clear(_featureSums, 0, numFeatures); - FillTable(ref features, offset, numFeatures); + FillTable(in features, offset, numFeatures); for (int i = 0; i < _numLabels; i++) { for (int j = 0; j < numFeatures; j++) @@ -607,7 +607,7 @@ private Single ComputeMutualInformation(ref VBuffer features, int numFeatur /// /// Fills the contingency table. /// - private void FillTable(ref VBuffer features, int offset, int numFeatures) + private void FillTable(in VBuffer features, int offset, int numFeatures) { Contracts.Assert(_labels.Length == features.Length); if (features.IsDense) diff --git a/src/Microsoft.ML.Transforms/NAIndicatorTransform.cs b/src/Microsoft.ML.Transforms/NAIndicatorTransform.cs index 9a64306896..25f7d65fb9 100644 --- a/src/Microsoft.ML.Transforms/NAIndicatorTransform.cs +++ b/src/Microsoft.ML.Transforms/NAIndicatorTransform.cs @@ -280,7 +280,7 @@ private ValueGetter> ComposeGetterVec(IRow input, int iinfo) // Sense indicates if the values added to the indices list represent NAs or non-NAs. bool sense; getSrc(ref src); - FindNAs(ref src, isNA, defaultIsNA, indices, out sense); + FindNAs(in src, isNA, defaultIsNA, indices, out sense); FillValues(src.Length, ref dst, indices, sense); }; } @@ -288,7 +288,7 @@ private ValueGetter> ComposeGetterVec(IRow input, int iinfo) /// /// Adds all NAs (or non-NAs) to the indices List. Whether NAs or non-NAs have been added is indicated by the bool sense. /// - private void FindNAs(ref VBuffer src, InPredicate isNA, bool defaultIsNA, List indices, out bool sense) + private void FindNAs(in VBuffer src, InPredicate isNA, bool defaultIsNA, List indices, out bool sense) { Host.AssertValue(isNA); Host.AssertValue(indices); diff --git a/src/Microsoft.ML.Transforms/NAReplaceTransform.cs b/src/Microsoft.ML.Transforms/NAReplaceTransform.cs index fdff24bc03..61250b801f 100644 --- a/src/Microsoft.ML.Transforms/NAReplaceTransform.cs +++ b/src/Microsoft.ML.Transforms/NAReplaceTransform.cs @@ -710,7 +710,7 @@ private Delegate ComposeGetterVec(IRow input, int iinfo) (ref VBuffer dst) => { getSrc(ref src); - FillValues(ref src, ref dst, isNA, rep, repIsDefault); + FillValues(in src, ref dst, isNA, rep, repIsDefault); }; } @@ -724,14 +724,14 @@ private Delegate ComposeGetterVec(IRow input, int iinfo) { getSrc(ref src); Host.Check(src.Length == repArray.Length); - FillValues(ref src, ref dst, isNA, repArray, _parent._repIsDefault[iinfo]); + FillValues(in src, ref dst, isNA, repArray, _parent._repIsDefault[iinfo]); }; } /// /// Fills values for vectors where there is one replacement value. /// - private void FillValues(ref VBuffer src, ref VBuffer dst, InPredicate isNA, T rep, bool repIsDefault) + private void FillValues(in VBuffer src, ref VBuffer dst, InPredicate isNA, T rep, bool repIsDefault) { Host.AssertValue(isNA); @@ -811,7 +811,7 @@ private void FillValues(ref VBuffer src, ref VBuffer dst, InPredicate /// Fills values for vectors where there is slot-wise replacement values. /// - private void FillValues(ref VBuffer src, ref VBuffer dst, InPredicate isNA, T[] rep, BitArray repIsDefault) + private void FillValues(in VBuffer src, ref VBuffer dst, InPredicate isNA, T[] rep, BitArray repIsDefault) { Host.AssertValue(rep); Host.Assert(rep.Length == src.Length); diff --git a/src/Microsoft.ML.Transforms/NAReplaceUtils.cs b/src/Microsoft.ML.Transforms/NAReplaceUtils.cs index c83f7b3388..82ca10a966 100644 --- a/src/Microsoft.ML.Transforms/NAReplaceUtils.cs +++ b/src/Microsoft.ML.Transforms/NAReplaceUtils.cs @@ -162,10 +162,10 @@ public sealed override void ProcessRow() { _rowCount++; _getter(ref _val); - ProcessRow(ref _val); + ProcessRow(in _val); } - protected abstract void ProcessRow(ref TValue val); + protected abstract void ProcessRow(in TValue val); } private abstract class StatAggregatorAcrossSlots : StatAggregator, TStat> @@ -183,19 +183,19 @@ protected StatAggregatorAcrossSlots(IChannel ch, IRowCursor cursor, int col) { } - protected sealed override void ProcessRow(ref VBuffer src) + protected sealed override void ProcessRow(in VBuffer src) { var srcCount = src.Count; var srcValues = src.Values; Ch.Assert(Utils.Size(srcValues) >= srcCount); for (int slot = 0; slot < srcCount; slot++) - ProcessValue(ref srcValues[slot]); + ProcessValue(in srcValues[slot]); _valueCount = _valueCount + (ulong)src.Length; } - protected abstract void ProcessValue(ref TItem val); + protected abstract void ProcessValue(in TItem val); } private abstract class StatAggregatorBySlot : StatAggregator, TStatItem[]> @@ -208,7 +208,7 @@ protected StatAggregatorBySlot(IChannel ch, ColumnType type, IRowCursor cursor, Stat = new TStatItem[type.VectorSize]; } - protected sealed override void ProcessRow(ref VBuffer src) + protected sealed override void ProcessRow(in VBuffer src) { var srcCount = src.Count; var srcValues = src.Values; @@ -217,7 +217,7 @@ protected sealed override void ProcessRow(ref VBuffer src) { // The src vector is dense. for (int slot = 0; slot < srcCount; slot++) - ProcessValue(ref srcValues[slot], slot); + ProcessValue(in srcValues[slot], slot); } else { @@ -225,17 +225,17 @@ protected sealed override void ProcessRow(ref VBuffer src) var srcIndices = src.Indices; Ch.Assert(Utils.Size(srcIndices) >= srcCount); for (int islot = 0; islot < srcCount; islot++) - ProcessValue(ref srcValues[islot], srcIndices[islot]); + ProcessValue(in srcValues[islot], srcIndices[islot]); } } - protected abstract void ProcessValue(ref TItem val, int slot); + protected abstract void ProcessValue(in TItem val, int slot); } private abstract class MinMaxAggregatorOne : StatAggregator { protected readonly bool ReturnMax; - private delegate void ProcessValueDelegate(ref TValue val); + private delegate void ProcessValueDelegate(in TValue val); private readonly ProcessValueDelegate _processValueDelegate; protected MinMaxAggregatorOne(IChannel ch, IRowCursor cursor, int col, bool returnMax) @@ -248,9 +248,9 @@ protected MinMaxAggregatorOne(IChannel ch, IRowCursor cursor, int col, bool retu _processValueDelegate = ProcessValueMin; } - protected override void ProcessRow(ref TValue val) + protected override void ProcessRow(in TValue val) { - _processValueDelegate(ref val); + _processValueDelegate(in val); } public override object GetStat() @@ -258,14 +258,14 @@ public override object GetStat() return Stat; } - protected abstract void ProcessValueMin(ref TValue val); - protected abstract void ProcessValueMax(ref TValue val); + protected abstract void ProcessValueMin(in TValue val); + protected abstract void ProcessValueMax(in TValue val); } private abstract class MinMaxAggregatorAcrossSlots : StatAggregatorAcrossSlots { protected readonly bool ReturnMax; - protected delegate void ProcessValueDelegate(ref TItem val); + protected delegate void ProcessValueDelegate(in TItem val); protected readonly ProcessValueDelegate ProcValueDelegate; // The count of the number of times ProcessValue has been called (used for tracking sparsity). private long _valuesProcessed; @@ -285,20 +285,20 @@ protected MinMaxAggregatorAcrossSlots(IChannel ch, IRowCursor cursor, int col, b ProcValueDelegate = ProcessValueMin; } - protected override void ProcessValue(ref TItem val) + protected override void ProcessValue(in TItem val) { _valuesProcessed = _valuesProcessed + 1; - ProcValueDelegate(ref val); + ProcValueDelegate(in val); } - protected abstract void ProcessValueMin(ref TItem val); - protected abstract void ProcessValueMax(ref TItem val); + protected abstract void ProcessValueMin(in TItem val); + protected abstract void ProcessValueMax(in TItem val); } private abstract class MinMaxAggregatorBySlot : StatAggregatorBySlot { protected readonly bool ReturnMax; - protected delegate void ProcessValueDelegate(ref TItem val, int slot); + protected delegate void ProcessValueDelegate(in TItem val, int slot); protected readonly ProcessValueDelegate ProcValueDelegate; // The count of the number of times ProcessValue has been called on a specific slot (used for tracking sparsity). private readonly long[] _valuesProcessed; @@ -317,11 +317,11 @@ protected MinMaxAggregatorBySlot(IChannel ch, ColumnType type, IRowCursor cursor _valuesProcessed = new long[type.VectorSize]; } - protected override void ProcessValue(ref TItem val, int slot) + protected override void ProcessValue(in TItem val, int slot) { Ch.Assert(0 <= slot && slot < Stat.Length); _valuesProcessed[slot]++; - ProcValueDelegate(ref val, slot); + ProcValueDelegate(in val, slot); } protected long GetValuesProcessed(int slot) @@ -329,8 +329,8 @@ protected long GetValuesProcessed(int slot) return _valuesProcessed[slot]; } - protected abstract void ProcessValueMin(ref TItem val, int slot); - protected abstract void ProcessValueMax(ref TItem val, int slot); + protected abstract void ProcessValueMin(in TItem val, int slot); + protected abstract void ProcessValueMax(in TItem val, int slot); } /// @@ -548,7 +548,7 @@ public MeanAggregatorOne(IChannel ch, IRowCursor cursor, int col) { } - protected override void ProcessRow(ref Single val) + protected override void ProcessRow(in Single val) { Stat.Update(val); } @@ -568,7 +568,7 @@ public MeanAggregatorAcrossSlots(IChannel ch, IRowCursor cursor, int col) { } - protected override void ProcessValue(ref Single val) + protected override void ProcessValue(in Single val) { Stat.Update(val); } @@ -588,7 +588,7 @@ public MeanAggregatorBySlot(IChannel ch, ColumnType type, IRowCursor cursor, int { } - protected override void ProcessValue(ref Single val, int slot) + protected override void ProcessValue(in Single val, int slot) { Ch.Assert(0 <= slot && slot < Stat.Length); Stat[slot].Update(val); @@ -615,13 +615,13 @@ public MinMaxAggregatorOne(IChannel ch, IRowCursor cursor, int col, bool returnM Stat = ReturnMax ? Single.NegativeInfinity : Single.PositiveInfinity; } - protected override void ProcessValueMin(ref Single val) + protected override void ProcessValueMin(in Single val) { if (val < Stat) Stat = val; } - protected override void ProcessValueMax(ref Single val) + protected override void ProcessValueMax(in Single val) { if (val > Stat) Stat = val; @@ -636,13 +636,13 @@ public MinMaxAggregatorAcrossSlots(IChannel ch, IRowCursor cursor, int col, bool Stat = ReturnMax ? Single.NegativeInfinity : Single.PositiveInfinity; } - protected override void ProcessValueMin(ref Single val) + protected override void ProcessValueMin(in Single val) { if (val < Stat) Stat = val; } - protected override void ProcessValueMax(ref Single val) + protected override void ProcessValueMax(in Single val) { if (val > Stat) Stat = val; @@ -654,7 +654,7 @@ public override object GetStat() if (ValueCount > (ulong)ValuesProcessed) { Single def = 0; - ProcValueDelegate(ref def); + ProcValueDelegate(in def); } return (Single)Stat; } @@ -670,14 +670,14 @@ public MinMaxAggregatorBySlot(IChannel ch, ColumnType type, IRowCursor cursor, i Stat[i] = bound; } - protected override void ProcessValueMin(ref Single val, int slot) + protected override void ProcessValueMin(in Single val, int slot) { Ch.Assert(0 <= slot && slot < Stat.Length); if (val < Stat[slot]) Stat[slot] = val; } - protected override void ProcessValueMax(ref Single val, int slot) + protected override void ProcessValueMax(in Single val, int slot) { Ch.Assert(0 <= slot && slot < Stat.Length); if (val > Stat[slot]) @@ -692,7 +692,7 @@ public override object GetStat() if (GetValuesProcessed(slot) < RowCount) { Single def = 0; - ProcValueDelegate(ref def, slot); + ProcValueDelegate(in def, slot); } } return Stat; @@ -709,7 +709,7 @@ public MeanAggregatorOne(IChannel ch, IRowCursor cursor, int col) { } - protected override void ProcessRow(ref Double val) + protected override void ProcessRow(in Double val) { Stat.Update(val); } @@ -727,7 +727,7 @@ public MeanAggregatorAcrossSlots(IChannel ch, IRowCursor cursor, int col) { } - protected override void ProcessValue(ref Double val) + protected override void ProcessValue(in Double val) { Stat.Update(val); } @@ -745,7 +745,7 @@ public MeanAggregatorBySlot(IChannel ch, ColumnType type, IRowCursor cursor, int { } - protected override void ProcessValue(ref Double val, int slot) + protected override void ProcessValue(in Double val, int slot) { Ch.Assert(0 <= slot && slot < Stat.Length); Stat[slot].Update(val); @@ -768,13 +768,13 @@ public MinMaxAggregatorOne(IChannel ch, IRowCursor cursor, int col, bool returnM Stat = ReturnMax ? Double.NegativeInfinity : Double.PositiveInfinity; } - protected override void ProcessValueMin(ref Double val) + protected override void ProcessValueMin(in Double val) { if (val < Stat) Stat = val; } - protected override void ProcessValueMax(ref Double val) + protected override void ProcessValueMax(in Double val) { if (val > Stat) Stat = val; @@ -789,13 +789,13 @@ public MinMaxAggregatorAcrossSlots(IChannel ch, IRowCursor cursor, int col, bool Stat = ReturnMax ? Double.NegativeInfinity : Double.PositiveInfinity; } - protected override void ProcessValueMin(ref Double val) + protected override void ProcessValueMin(in Double val) { if (val < Stat) Stat = val; } - protected override void ProcessValueMax(ref Double val) + protected override void ProcessValueMax(in Double val) { if (val > Stat) Stat = val; @@ -807,7 +807,7 @@ public override object GetStat() if (ValueCount > (ulong)ValuesProcessed) { Double def = 0; - ProcValueDelegate(ref def); + ProcValueDelegate(in def); } return Stat; } @@ -823,7 +823,7 @@ public MinMaxAggregatorBySlot(IChannel ch, ColumnType type, IRowCursor cursor, i Stat[i] = bound; } - protected override void ProcessValueMin(ref Double val, int slot) + protected override void ProcessValueMin(in Double val, int slot) { Ch.Assert(0 <= slot && slot < Stat.Length); if (FloatUtils.IsFinite(val)) @@ -833,7 +833,7 @@ protected override void ProcessValueMin(ref Double val, int slot) } } - protected override void ProcessValueMax(ref Double val, int slot) + protected override void ProcessValueMax(in Double val, int slot) { Ch.Assert(0 <= slot && slot < Stat.Length); if (FloatUtils.IsFinite(val)) @@ -851,7 +851,7 @@ public override object GetStat() if (GetValuesProcessed(slot) < RowCount) { Double def = 0; - ProcValueDelegate(ref def, slot); + ProcValueDelegate(in def, slot); } } return Stat; diff --git a/src/Microsoft.ML.Transforms/RffTransform.cs b/src/Microsoft.ML.Transforms/RffTransform.cs index 204f88b3ca..53463b7a2b 100644 --- a/src/Microsoft.ML.Transforms/RffTransform.cs +++ b/src/Microsoft.ML.Transforms/RffTransform.cs @@ -397,8 +397,8 @@ private float[] GetAvgDistances(ColumnInfo[] columns, IDataView input) { for (int j = i + 1; j < instanceCount; j++) { - distances[count++] = gaussian ? VectorUtils.L2DistSquared(ref res[i], ref res[j]) - : VectorUtils.L1Distance(ref res[i], ref res[j]); + distances[count++] = gaussian ? VectorUtils.L2DistSquared(in res[i], in res[j]) + : VectorUtils.L1Distance(in res[i], in res[j]); } } Host.Assert(count == distances.Length); @@ -410,8 +410,8 @@ private float[] GetAvgDistances(ColumnInfo[] columns, IDataView input) { // For Gaussian kernels, we scale by the L2 distance squared, since the kernel function is exp(-gamma ||x-y||^2). // For Laplacian kernels, we scale by the L1 distance, since the kernel function is exp(-gamma ||x-y||_1). - distances[i / 2] = gaussian ? VectorUtils.L2DistSquared(ref res[i], ref res[i + 1]) : - VectorUtils.L1Distance(ref res[i], ref res[i + 1]); + distances[i / 2] = gaussian ? VectorUtils.L2DistSquared(in res[i], in res[i + 1]) : + VectorUtils.L1Distance(in res[i], in res[i + 1]); } } @@ -561,7 +561,7 @@ private ValueGetter> GetterFromVectorType(IRow input, int iinfo) (ref VBuffer dst) => { getSrc(ref src); - TransformFeatures(ref src, ref dst, _parent._transformInfos[iinfo], featuresAligned, productAligned); + TransformFeatures(in src, ref dst, _parent._transformInfos[iinfo], featuresAligned, productAligned); }; } @@ -580,11 +580,11 @@ private ValueGetter> GetterFromFloatType(IRow input, int iinfo) { getSrc(ref src); oneDimensionalVector.Values[0] = src; - TransformFeatures(ref oneDimensionalVector, ref dst, _parent._transformInfos[iinfo], featuresAligned, productAligned); + TransformFeatures(in oneDimensionalVector, ref dst, _parent._transformInfos[iinfo], featuresAligned, productAligned); }; } - private void TransformFeatures(ref VBuffer src, ref VBuffer dst, TransformInfo transformInfo, + private void TransformFeatures(in VBuffer src, ref VBuffer dst, TransformInfo transformInfo, AlignedArray featuresAligned, AlignedArray productAligned) { Host.Check(src.Length == transformInfo.SrcDim, "column does not have the expected dimensionality."); diff --git a/src/Microsoft.ML.Transforms/TermLookupTransform.cs b/src/Microsoft.ML.Transforms/TermLookupTransform.cs index 917aa1e3b1..2fd46f4c63 100644 --- a/src/Microsoft.ML.Transforms/TermLookupTransform.cs +++ b/src/Microsoft.ML.Transforms/TermLookupTransform.cs @@ -199,14 +199,14 @@ private ValueGetter GetGetterCore(ValueGetter> getTer else { Contracts.Assert(0 <= nstr.Id && nstr.Id < _values.Length); - CopyValue(ref _values[nstr.Id], ref dst); + CopyValue(in _values[nstr.Id], ref dst); } }; } protected abstract void GetMissing(ref TRes dst); - protected abstract void CopyValue(ref TRes src, ref TRes dst); + protected abstract void CopyValue(in TRes src, ref TRes dst); } /// @@ -238,7 +238,7 @@ protected override void GetMissing(ref TRes dst) dst = _badValue; } - protected override void CopyValue(ref TRes src, ref TRes dst) + protected override void CopyValue(in TRes src, ref TRes dst) { dst = src; } @@ -260,7 +260,7 @@ protected override void GetMissing(ref VBuffer dst) dst = new VBuffer(Type.VectorSize, 0, dst.Values, dst.Indices); } - protected override void CopyValue(ref VBuffer src, ref VBuffer dst) + protected override void CopyValue(in VBuffer src, ref VBuffer dst) { src.CopyTo(ref dst); } diff --git a/src/Microsoft.ML.Transforms/Text/LdaTransform.cs b/src/Microsoft.ML.Transforms/Text/LdaTransform.cs index bb6f64a3e7..818fbe7302 100644 --- a/src/Microsoft.ML.Transforms/Text/LdaTransform.cs +++ b/src/Microsoft.ML.Transforms/Text/LdaTransform.cs @@ -574,7 +574,7 @@ private void Train(IChannel ch, IDataView trainingData, LdaState[] states) for (int i = 0; i < Infos.Length; i++) { getters[i](ref src); - docSizeCheck[i] += states[i].FeedTrain(Host, ref src); + docSizeCheck[i] += states[i].FeedTrain(Host, in src); } } for (int i = 0; i < Infos.Length; i++) @@ -781,7 +781,7 @@ public void AllocateDataMemory(int docNum, long corpusSize) _ldaTrainer.AllocateDataMemory(docNum, corpusSize); } - public int FeedTrain(IExceptionContext ectx, ref VBuffer input) + public int FeedTrain(IExceptionContext ectx, in VBuffer input) { Contracts.AssertValue(ectx); @@ -830,7 +830,7 @@ public void CompleteTrain() _ldaTrainer.Train(""); /* Need to pass in an empty string */ } - public void Output(ref VBuffer src, ref VBuffer dst, int numBurninIter, bool reset) + public void Output(in VBuffer src, ref VBuffer dst, int numBurninIter, bool reset) { // Prediction for a single document. // LdaSingleBox.InitializeBeforeTest() is NOT thread-safe. @@ -967,7 +967,7 @@ private ValueGetter> GetTopic(IRow input, int iinfo) // REVIEW: This will work, but there are opportunities for caching // based on input.Counter that are probably worthwhile given how long inference takes. getSrc(ref src); - lda.Output(ref src, ref dst, numBurninIter, reset); + lda.Output(in src, ref dst, numBurninIter, reset); }; } } diff --git a/src/Microsoft.ML.Transforms/Text/NgramHashTransform.cs b/src/Microsoft.ML.Transforms/Text/NgramHashTransform.cs index efe097726c..5f2678c546 100644 --- a/src/Microsoft.ML.Transforms/Text/NgramHashTransform.cs +++ b/src/Microsoft.ML.Transforms/Text/NgramHashTransform.cs @@ -705,7 +705,7 @@ private Delegate MakeGetter(IChannel ch, IRow input, int iinfo, FinderDecorator for (int i = 0; i < srcCount; i++) { getSrc[i](ref src); - bldr.AddNgrams(ref src, i, keyCounts[i]); + bldr.AddNgrams(in src, i, keyCounts[i]); } bldr.GetResult(ref dst); }; diff --git a/src/Microsoft.ML.Transforms/Text/NgramTransform.cs b/src/Microsoft.ML.Transforms/Text/NgramTransform.cs index 3f24f290f9..870a552f6b 100644 --- a/src/Microsoft.ML.Transforms/Text/NgramTransform.cs +++ b/src/Microsoft.ML.Transforms/Text/NgramTransform.cs @@ -512,7 +512,7 @@ private SequencePool[] Train(Arguments args, IDataView trainingData, out double[ if (_exes[iinfo].RequireIdf()) helpers[iinfo].Reset(); - helpers[iinfo].AddNgrams(ref src[iinfo], 0, keyCount); + helpers[iinfo].AddNgrams(in src[iinfo], 0, keyCount); if (_exes[iinfo].RequireIdf()) { int totalNgrams = counts[iinfo].Sum(); @@ -649,7 +649,7 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou if (!bldr.IsEmpty) { bldr.Reset(); - bldr.AddNgrams(ref src, 0, keyCount); + bldr.AddNgrams(in src, 0, keyCount); bldr.GetResult(ref dst); VBufferUtils.Apply(ref dst, (int i, ref Float v) => v = (Float)(v * _invDocFreqs[iinfo][i])); } @@ -666,7 +666,7 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou if (!bldr.IsEmpty) { bldr.Reset(); - bldr.AddNgrams(ref src, 0, keyCount); + bldr.AddNgrams(in src, 0, keyCount); bldr.GetResult(ref dst); VBufferUtils.Apply(ref dst, (int i, ref Float v) => v = v >= 1 ? (Float)_invDocFreqs[iinfo][i] : 0); } @@ -682,7 +682,7 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou if (!bldr.IsEmpty) { bldr.Reset(); - bldr.AddNgrams(ref src, 0, keyCount); + bldr.AddNgrams(in src, 0, keyCount); bldr.GetResult(ref dst); } else diff --git a/src/Microsoft.ML.Transforms/Text/NgramUtils.cs b/src/Microsoft.ML.Transforms/Text/NgramUtils.cs index 8d76a20041..7a0db6d8bd 100644 --- a/src/Microsoft.ML.Transforms/Text/NgramUtils.cs +++ b/src/Microsoft.ML.Transforms/Text/NgramUtils.cs @@ -68,7 +68,7 @@ public void Reset() _queue.Clear(); } - public bool AddNgrams(ref VBuffer src, int icol, uint keyMax) + public bool AddNgrams(in VBuffer src, int icol, uint keyMax) { Contracts.Assert(icol >= 0); Contracts.Assert(keyMax > 0); diff --git a/src/Microsoft.ML.Transforms/WhiteningTransform.cs b/src/Microsoft.ML.Transforms/WhiteningTransform.cs index b4ec9990f6..beac54dcbf 100644 --- a/src/Microsoft.ML.Transforms/WhiteningTransform.cs +++ b/src/Microsoft.ML.Transforms/WhiteningTransform.cs @@ -542,12 +542,12 @@ protected override Delegate GetGetterCore(IChannel ch, IRow input, int iinfo, ou { getSrc(ref src); Host.Check(src.Length == cslotSrc, "Invalid column size."); - FillValues(model, ref src, ref dst, cslotDst); + FillValues(model, in src, ref dst, cslotDst); }; return del; } - private static void FillValues(Float[] model, ref VBuffer src, ref VBuffer dst, int cdst) + private static void FillValues(Float[] model, in VBuffer src, ref VBuffer dst, int cdst) { int count = src.Count; int length = src.Length; diff --git a/test/Microsoft.ML.Core.Tests/UnitTests/CoreBaseTestClass.cs b/test/Microsoft.ML.Core.Tests/UnitTests/CoreBaseTestClass.cs index 114d8c48b9..34eb063b5b 100644 --- a/test/Microsoft.ML.Core.Tests/UnitTests/CoreBaseTestClass.cs +++ b/test/Microsoft.ML.Core.Tests/UnitTests/CoreBaseTestClass.cs @@ -82,16 +82,16 @@ protected Func GetComparerVec(IRow r1, IRow r2, int col, int size, Func { g1(ref v1); g2(ref v2); - return CompareVec(ref v1, ref v2, size, fn); + return CompareVec(in v1, in v2, size, fn); }; } - protected bool CompareVec(ref VBuffer v1, ref VBuffer v2, int size, Func fn) + protected bool CompareVec(in VBuffer v1, in VBuffer v2, int size, Func fn) { - return CompareVec(ref v1, ref v2, size, (i, x, y) => fn(x, y)); + return CompareVec(in v1, in v2, size, (i, x, y) => fn(x, y)); } - protected bool CompareVec(ref VBuffer v1, ref VBuffer v2, int size, Func fn) + protected bool CompareVec(in VBuffer v1, in VBuffer v2, int size, Func fn) { Contracts.Assert(size == 0 || v1.Length == size); Contracts.Assert(size == 0 || v2.Length == size); diff --git a/test/Microsoft.ML.Core.Tests/UnitTests/TestCSharpApi.cs b/test/Microsoft.ML.Core.Tests/UnitTests/TestCSharpApi.cs index 7b2864fe36..af9fedfde3 100644 --- a/test/Microsoft.ML.Core.Tests/UnitTests/TestCSharpApi.cs +++ b/test/Microsoft.ML.Core.Tests/UnitTests/TestCSharpApi.cs @@ -813,7 +813,7 @@ public void TestCrossValidationMacroWithNonDefaultNames() Assert.True(b); getter(ref val); foldGetter(ref fold); - sumBldr.AddFeatures(0, ref val); + sumBldr.AddFeatures(0, in val); Assert.True(ReadOnlyMemoryUtils.EqualsStr("Fold " + f, fold)); } var sum = default(VBuffer); diff --git a/test/Microsoft.ML.Core.Tests/UnitTests/TestEntryPoints.cs b/test/Microsoft.ML.Core.Tests/UnitTests/TestEntryPoints.cs index 5c2e2898e2..9666f2136b 100644 --- a/test/Microsoft.ML.Core.Tests/UnitTests/TestEntryPoints.cs +++ b/test/Microsoft.ML.Core.Tests/UnitTests/TestEntryPoints.cs @@ -1328,9 +1328,9 @@ public void EntryPointMulticlassPipelineEnsemble() getter3(ref score0[3]); getter4(ref score0[4]); getterSaved(ref scoreSaved); - Assert.True(CompareVBuffers(ref scoreSaved, ref score, ref dense1, ref dense2)); + Assert.True(CompareVBuffers(in scoreSaved, in score, ref dense1, ref dense2)); c(ref avg, score0, null); - Assert.True(CompareVBuffers(ref avg, ref score, ref dense1, ref dense2)); + Assert.True(CompareVBuffers(in avg, in score, ref dense1, ref dense2)); } Assert.False(curs0.MoveNext()); Assert.False(curs1.MoveNext()); @@ -1476,7 +1476,7 @@ public void EntryPointPipelineEnsembleGetSummary() Done(); } - private static bool CompareVBuffers(ref VBuffer v1, ref VBuffer v2, ref VBuffer dense1, ref VBuffer dense2) + private static bool CompareVBuffers(in VBuffer v1, in VBuffer v2, ref VBuffer dense1, ref VBuffer dense2) { if (v1.Length != v2.Length) return false; diff --git a/test/Microsoft.ML.TestFramework/DataPipe/TestDataPipe.cs b/test/Microsoft.ML.TestFramework/DataPipe/TestDataPipe.cs index 9edb7bd312..7e0d334a68 100644 --- a/test/Microsoft.ML.TestFramework/DataPipe/TestDataPipe.cs +++ b/test/Microsoft.ML.TestFramework/DataPipe/TestDataPipe.cs @@ -233,7 +233,7 @@ public void SavePipeKeyToVec() { getters[i](ref v1); getters[i + 1](ref v2); - Check(CompareVec(ref v1, ref v2, v1.Length, fn), "Mismatch"); + Check(CompareVec(in v1, in v2, v1.Length, fn), "Mismatch"); } } } diff --git a/test/Microsoft.ML.TestFramework/DataPipe/TestDataPipeBase.cs b/test/Microsoft.ML.TestFramework/DataPipe/TestDataPipeBase.cs index 36750e6810..b684596ec7 100644 --- a/test/Microsoft.ML.TestFramework/DataPipe/TestDataPipeBase.cs +++ b/test/Microsoft.ML.TestFramework/DataPipe/TestDataPipeBase.cs @@ -666,7 +666,7 @@ protected bool CheckMetadataNames(string kind, int size, ISchema sch1, ISchema s sch1.GetMetadata(kind, col, ref names1); sch2.GetMetadata(kind, col, ref names2); - if (!CompareVec(ref names1, ref names2, size, (a, b) => a.Span.SequenceEqual(b.Span))) + if (!CompareVec(in names1, in names2, size, (a, b) => a.Span.SequenceEqual(b.Span))) { Fail("Different {0} metadata values", kind); return Failed(); @@ -1221,16 +1221,16 @@ protected Func GetComparerVec(IRow r1, IRow r2, int col, int size, Func { g1(ref v1); g2(ref v2); - return CompareVec(ref v1, ref v2, size, fn); + return CompareVec(in v1, in v2, size, fn); }; } - protected bool CompareVec(ref VBuffer v1, ref VBuffer v2, int size, Func fn) + protected bool CompareVec(in VBuffer v1, in VBuffer v2, int size, Func fn) { - return CompareVec(ref v1, ref v2, size, (i, x, y) => fn(x, y)); + return CompareVec(in v1, in v2, size, (i, x, y) => fn(x, y)); } - protected bool CompareVec(ref VBuffer v1, ref VBuffer v2, int size, Func fn) + protected bool CompareVec(in VBuffer v1, in VBuffer v2, int size, Func fn) { Contracts.Assert(size == 0 || v1.Length == size); Contracts.Assert(size == 0 || v2.Length == size); @@ -1303,7 +1303,7 @@ protected void VerifyVecEquality(ValueGetter> vecGetter, ValueGett VBuffer fvn = default(VBuffer); vecGetter(ref fv); vecNGetter(ref fvn); - Assert.True(CompareVec(ref fv, ref fvn, size, compare)); + Assert.True(CompareVec(in fv, in fvn, size, compare)); } #if !CORECLR diff --git a/test/Microsoft.ML.Tests/Transformers/CopyColumnEstimatorTests.cs b/test/Microsoft.ML.Tests/Transformers/CopyColumnEstimatorTests.cs index ff86db9180..019103ff44 100644 --- a/test/Microsoft.ML.Tests/Transformers/CopyColumnEstimatorTests.cs +++ b/test/Microsoft.ML.Tests/Transformers/CopyColumnEstimatorTests.cs @@ -156,7 +156,7 @@ void TestMetadataCopy() var type2 = result.Schema.GetColumnType(copyIndex); result.Schema.GetMetadata(MetadataUtils.Kinds.KeyValues, termIndex, ref names1); result.Schema.GetMetadata(MetadataUtils.Kinds.KeyValues, copyIndex, ref names2); - Assert.True(CompareVec(ref names1, ref names2, size, (a, b) => a.Span.SequenceEqual(b.Span))); + Assert.True(CompareVec(in names1, in names2, size, (a, b) => a.Span.SequenceEqual(b.Span))); } } @@ -196,12 +196,12 @@ private void ValidateCopyColumnTransformer(IDataView result) } } } - private bool CompareVec(ref VBuffer v1, ref VBuffer v2, int size, Func fn) + private bool CompareVec(in VBuffer v1, in VBuffer v2, int size, Func fn) { - return CompareVec(ref v1, ref v2, size, (i, x, y) => fn(x, y)); + return CompareVec(in v1, in v2, size, (i, x, y) => fn(x, y)); } - private bool CompareVec(ref VBuffer v1, ref VBuffer v2, int size, Func fn) + private bool CompareVec(in VBuffer v1, in VBuffer v2, int size, Func fn) { Contracts.Assert(size == 0 || v1.Length == size); Contracts.Assert(size == 0 || v2.Length == size);