From 84e56ca21afc53a8256c185be8bc2be8726550fe Mon Sep 17 00:00:00 2001 From: Shahab Moradi Date: Tue, 16 Apr 2019 12:15:02 -0700 Subject: [PATCH 1/4] Removed tmpurl_lr --- .../Standard/Online/AveragedLinear.cs | 2 +- src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs | 2 +- .../StandardTrainersCatalog.cs | 8 ++++---- src/Microsoft.ML.StaticPipe/SgdStatic.cs | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedLinear.cs b/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedLinear.cs index 0bcd1b3c6e..e02d4bcdbe 100644 --- a/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedLinear.cs +++ b/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedLinear.cs @@ -21,7 +21,7 @@ namespace Microsoft.ML.Trainers public abstract class AveragedLinearOptions : OnlineLinearOptions { /// - /// Learning rate. + /// Learning rate. /// [Argument(ArgumentType.AtMostOnce, HelpText = "Learning rate", ShortName = "lr", SortOrder = 50)] [TGUI(Label = "Learning rate", SuggestedSweeps = "0.01,0.1,0.5,1.0")] diff --git a/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs b/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs index 5c763998c2..50cede6265 100644 --- a/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs +++ b/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs @@ -1814,7 +1814,7 @@ public class OptionsBase : TrainerInputBaseWithWeight public int NumberOfIterations = Defaults.NumberOfIterations; /// - /// The initial learning rate used by SGD. + /// The initial learning rate used by SGD. /// [Argument(ArgumentType.AtMostOnce, HelpText = "Initial learning rate (only used by SGD)", Name = "InitialLearningRate", ShortName = "ilr,lr,InitLearningRate")] [TGUI(Label = "Initial Learning Rate (for SGD)")] diff --git a/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs b/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs index 4eaeb9dc53..a11b1c34dc 100644 --- a/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs +++ b/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs @@ -25,7 +25,7 @@ public static class StandardTrainersCatalog /// The features, or independent variables. /// The name of the example weight column (optional). /// The maximum number of passes through the training dataset; set to 1 to simulate online learning. - /// The initial learning rate used by SGD. + /// The initial learning rate used by SGD. /// The L2 weight for regularization. /// /// @@ -81,7 +81,7 @@ public static SgdCalibratedTrainer SgdCalibrated(this BinaryClassificationCatalo /// The name of the example weight column (optional). /// The loss function minimized in the training process. Using, for example, leads to a support vector machine trainer. /// The maximum number of passes through the training dataset; set to 1 to simulate online learning. - /// The initial learning rate used by SGD. + /// The initial learning rate used by SGD. /// The L2 weight for regularization. /// /// @@ -389,7 +389,7 @@ public static SdcaNonCalibratedMulticlassTrainer SdcaNonCalibrated(this Multicla /// The name of the label column. The column data must be . /// The name of the feature column. The column data must be a known-sized vector of . /// The loss function minimized in the training process. If , would be used and lead to a max-margin averaged perceptron trainer. - /// The initial learning rate used by SGD. + /// The initial learning rate used by SGD. /// /// to decrease the as iterations progress; otherwise, . /// Default is . @@ -463,7 +463,7 @@ public IClassificationLoss CreateComponent(IHostEnvironment env) /// The name of the label column. /// The name of the feature column. /// The loss function minimized in the training process. Using, for example, leads to a least square trainer. - /// The initial learning rate used by SGD. + /// The initial learning rate used by SGD. /// Decrease learning rate as iterations progress. /// The L2 weight for regularization. /// The number of passes through the training dataset. diff --git a/src/Microsoft.ML.StaticPipe/SgdStatic.cs b/src/Microsoft.ML.StaticPipe/SgdStatic.cs index e228a064ef..e905d835e9 100644 --- a/src/Microsoft.ML.StaticPipe/SgdStatic.cs +++ b/src/Microsoft.ML.StaticPipe/SgdStatic.cs @@ -21,7 +21,7 @@ public static class SgdStaticExtensions /// The name of the feature column. /// The name for the example weight column. /// The maximum number of iterations; set to 1 to simulate online learning. - /// The initial learning rate used by SGD. + /// The initial learning rate used by SGD. /// The L2 weight for regularization. /// A delegate that is called every time the /// method is called on the @@ -101,7 +101,7 @@ public static (Scalar score, Scalar probability, Scalar pred /// The name of the feature column. /// The name for the example weight column. /// The maximum number of iterations; set to 1 to simulate online learning. - /// The initial learning rate used by SGD. + /// The initial learning rate used by SGD. /// The L2 weight for regularization. /// The loss function to use. /// A delegate that is called every time the From 9526f568b30a0d5ffc3ac193ade3ef4f1863ed43 Mon Sep 17 00:00:00 2001 From: Shahab Moradi Date: Tue, 16 Apr 2019 12:18:38 -0700 Subject: [PATCH 2/4] Replaced tmpurl_regularization --- .../Standard/Online/AveragedLinear.cs | 2 +- .../Standard/SdcaBinary.cs | 6 +-- .../StandardTrainersCatalog.cs | 40 +++++++++---------- src/Microsoft.ML.StaticPipe/SgdStatic.cs | 4 +- 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedLinear.cs b/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedLinear.cs index e02d4bcdbe..8d8ef61f97 100644 --- a/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedLinear.cs +++ b/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedLinear.cs @@ -61,7 +61,7 @@ public abstract class AveragedLinearOptions : OnlineLinearOptions public bool LazyUpdate = true; /// - /// The L2 weight for regularization. + /// The L2 weight for regularization. /// [Argument(ArgumentType.AtMostOnce, HelpText = "L2 Regularization Weight", ShortName = "reg,L2RegularizerWeight", SortOrder = 50)] [TGUI(Label = "L2 Regularization Weight")] diff --git a/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs b/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs index 50cede6265..b67118e2b8 100644 --- a/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs +++ b/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs @@ -157,7 +157,7 @@ public abstract class SdcaTrainerBase : Stochast public abstract class OptionsBase : TrainerInputBaseWithWeight { /// - /// The L2 regularization hyperparameter. + /// The L2 regularization hyperparameter. /// [Argument(ArgumentType.AtMostOnce, HelpText = "L2 regularizer constant. By default the l2 constant is automatically inferred based on data set.", NullName = "", ShortName = "l2, L2Const", SortOrder = 1)] [TGUI(Label = "L2 Regularizer Constant", SuggestedSweeps = ",1e-7,1e-6,1e-5,1e-4,1e-3,1e-2")] @@ -166,7 +166,7 @@ public abstract class OptionsBase : TrainerInputBaseWithWeight // REVIEW: make the default positive when we know how to consume a sparse model /// - /// The L1 regularization hyperparameter. + /// The L1 regularization hyperparameter. /// [Argument(ArgumentType.AtMostOnce, HelpText = "L1 soft threshold (L1/L2). Note that it is easier to control and sweep using the threshold parameter than the raw L1-regularizer constant. By default the l1 threshold is automatically inferred based on data set.", NullName = "", Name = "L1Threshold", ShortName = "l1", SortOrder = 2)] @@ -1776,7 +1776,7 @@ public abstract class SgdBinaryTrainerBase : public class OptionsBase : TrainerInputBaseWithWeight { /// - /// The L2 weight for regularization. + /// The L2 weight for regularization. /// [Argument(ArgumentType.AtMostOnce, HelpText = "L2 Regularization constant", ShortName = "l2, L2Weight", SortOrder = 50)] [TGUI(Label = "L2 Regularization Constant", SuggestedSweeps = "1e-7,5e-7,1e-6,5e-6,1e-5")] diff --git a/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs b/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs index a11b1c34dc..7addf35132 100644 --- a/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs +++ b/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs @@ -26,7 +26,7 @@ public static class StandardTrainersCatalog /// The name of the example weight column (optional). /// The maximum number of passes through the training dataset; set to 1 to simulate online learning. /// The initial learning rate used by SGD. - /// The L2 weight for regularization. + /// The L2 weight for regularization. /// /// /// The loss function minimized in the training process. Using, for example, leads to a support vector machine trainer. /// The maximum number of passes through the training dataset; set to 1 to simulate online learning. /// The initial learning rate used by SGD. - /// The L2 weight for regularization. + /// The L2 weight for regularization. /// /// /// The name of the feature column. /// The name of the example weight column (optional). /// The loss function minimized in the training process. Using, for example, its default leads to a least square trainer. - /// The L2 weight for regularization. - /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. + /// The L2 weight for regularization. + /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. /// The maximum number of passes to perform over the data. /// /// @@ -187,8 +187,8 @@ public static SdcaRegressionTrainer Sdca(this RegressionCatalog.RegressionTraine /// The name of the label column. /// The name of the feature column. /// The name of the example weight column (optional). - /// The L2 weight for regularization. - /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. + /// The L2 weight for regularization. + /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. /// The maximum number of passes to perform over the data. /// /// @@ -240,8 +240,8 @@ public static SdcaLogisticRegressionBinaryTrainer SdcaLogisticRegression( /// The name of the feature column. /// The name of the example weight column (optional). /// The loss function minimized in the training process. Defaults to if not specified. - /// The L2 weight for regularization. - /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. + /// The L2 weight for regularization. + /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. /// The maximum number of passes to perform over the data. /// /// @@ -287,8 +287,8 @@ public static SdcaNonCalibratedBinaryTrainer SdcaNonCalibrated( /// The name of the label column. /// The name of the feature column. /// The name of the example weight column (optional). - /// The L2 weight for regularization. - /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. + /// The L2 weight for regularization. + /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. /// The maximum number of passes to perform over the data. /// /// @@ -338,8 +338,8 @@ public static SdcaMaximumEntropyMulticlassTrainer SdcaMaximumEntropy(this Multic /// The name of the feature column. /// The name of the example weight column (optional). /// The loss function to be minimized. Defaults to if not specified. - /// The L2 weight for regularization. - /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. + /// The L2 weight for regularization. + /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. /// The maximum number of passes to perform over the data. /// /// @@ -394,7 +394,7 @@ public static SdcaNonCalibratedMulticlassTrainer SdcaNonCalibrated(this Multicla /// to decrease the as iterations progress; otherwise, . /// Default is . /// - /// The L2 weight for regularization. + /// The L2 weight for regularization. /// Number of passes through the training dataset. /// /// @@ -465,7 +465,7 @@ public IClassificationLoss CreateComponent(IHostEnvironment env) /// The loss function minimized in the training process. Using, for example, leads to a least square trainer. /// The initial learning rate used by SGD. /// Decrease learning rate as iterations progress. - /// The L2 weight for regularization. + /// The L2 weight for regularization. /// The number of passes through the training dataset. public static OnlineGradientDescentTrainer OnlineGradientDescent(this RegressionCatalog.RegressionTrainers catalog, string labelColumnName = DefaultColumnNames.Label, @@ -505,8 +505,8 @@ public static OnlineGradientDescentTrainer OnlineGradientDescent(this Regression /// The name of the feature column. /// The name of the example weight column (optional). /// Enforce non-negative weights. - /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. - /// The L2 weight for regularization. + /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. + /// The L2 weight for regularization. /// Memory size for . Low=faster, less accurate. /// Threshold for optimizer convergence. /// @@ -559,8 +559,8 @@ public static LbfgsLogisticRegressionBinaryTrainer LbfgsLogisticRegression(this /// The name of the label column. /// The name of the feature column. /// The name of the example weight column (optional). - /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. - /// The L2 weight for regularization. + /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. + /// The L2 weight for regularization. /// Threshold for optimizer convergence. /// Number of previous iterations to remember for estimating the Hessian. Lower values mean faster but less accurate estimates. /// Enforce non-negative weights. @@ -613,8 +613,8 @@ public static LbfgsPoissonRegressionTrainer LbfgsPoissonRegression(this Regressi /// The name of the feature column. /// The name of the example weight column (optional). /// Enforce non-negative weights. - /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. - /// The L2 weight for regularization. + /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. + /// The L2 weight for regularization. /// Memory size for . Low=faster, less accurate. /// Threshold for optimizer convergence. public static LbfgsMaximumEntropyMulticlassTrainer LbfgsMaximumEntropy(this MulticlassClassificationCatalog.MulticlassClassificationTrainers catalog, diff --git a/src/Microsoft.ML.StaticPipe/SgdStatic.cs b/src/Microsoft.ML.StaticPipe/SgdStatic.cs index e905d835e9..c0c02cb7e9 100644 --- a/src/Microsoft.ML.StaticPipe/SgdStatic.cs +++ b/src/Microsoft.ML.StaticPipe/SgdStatic.cs @@ -22,7 +22,7 @@ public static class SgdStaticExtensions /// The name for the example weight column. /// The maximum number of iterations; set to 1 to simulate online learning. /// The initial learning rate used by SGD. - /// The L2 weight for regularization. + /// The L2 weight for regularization. /// A delegate that is called every time the /// method is called on the /// instance created out of this. This delegate will receive @@ -102,7 +102,7 @@ public static (Scalar score, Scalar probability, Scalar pred /// The name for the example weight column. /// The maximum number of iterations; set to 1 to simulate online learning. /// The initial learning rate used by SGD. - /// The L2 weight for regularization. + /// The L2 weight for regularization. /// The loss function to use. /// A delegate that is called every time the /// method is called on the From e8e83a8d6ff01590e24fbcf459703ea90edc33e2 Mon Sep 17 00:00:00 2001 From: Shahab Moradi Date: Tue, 16 Apr 2019 12:19:33 -0700 Subject: [PATCH 3/4] Replaced tmpurl_loss --- .../Standard/Online/AveragedPerceptron.cs | 4 ++-- .../Standard/Online/OnlineGradientDescent.cs | 2 +- .../Standard/SdcaBinary.cs | 4 ++-- .../Standard/SdcaMulticlass.cs | 2 +- .../Standard/SdcaRegression.cs | 4 ++-- .../StandardTrainersCatalog.cs | 12 ++++++------ 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedPerceptron.cs b/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedPerceptron.cs index 50a4b1c61c..8b65c3de22 100644 --- a/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedPerceptron.cs +++ b/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedPerceptron.cs @@ -79,13 +79,13 @@ public sealed class AveragedPerceptronTrainer : AveragedLinearTrainer - /// A custom loss. + /// A custom loss. /// [Argument(ArgumentType.Multiple, Name = "LossFunction", HelpText = "Loss Function", ShortName = "loss", SortOrder = 50)] internal ISupportClassificationLossFactory ClassificationLossFunctionFactory = new HingeLoss.Options(); /// - /// A custom loss. + /// A custom loss. /// public IClassificationLoss LossFunction { get; set; } diff --git a/src/Microsoft.ML.StandardTrainers/Standard/Online/OnlineGradientDescent.cs b/src/Microsoft.ML.StandardTrainers/Standard/Online/OnlineGradientDescent.cs index a0f678ae95..53e6745243 100644 --- a/src/Microsoft.ML.StandardTrainers/Standard/Online/OnlineGradientDescent.cs +++ b/src/Microsoft.ML.StandardTrainers/Standard/Online/OnlineGradientDescent.cs @@ -52,7 +52,7 @@ public sealed class Options : AveragedLinearOptions internal ISupportRegressionLossFactory RegressionLossFunctionFactory = new SquaredLossFactory(); /// - /// A custom loss. + /// A custom loss. /// public IRegressionLoss LossFunction { get; set; } diff --git a/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs b/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs index b67118e2b8..606eb54cc0 100644 --- a/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs +++ b/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs @@ -1623,7 +1623,7 @@ public sealed class SdcaNonCalibratedBinaryTrainer : SdcaBinaryTrainerBase - /// The custom loss. + /// The custom loss. /// /// /// If unspecified, will be used. @@ -1632,7 +1632,7 @@ public sealed class Options : BinaryOptionsBase internal ISupportSdcaClassificationLossFactory LossFunctionFactory = new LogLossFactory(); /// - /// The custom loss. + /// The custom loss. /// /// /// If unspecified, will be used. diff --git a/src/Microsoft.ML.StandardTrainers/Standard/SdcaMulticlass.cs b/src/Microsoft.ML.StandardTrainers/Standard/SdcaMulticlass.cs index 2344651d59..bc4a578c3a 100644 --- a/src/Microsoft.ML.StandardTrainers/Standard/SdcaMulticlass.cs +++ b/src/Microsoft.ML.StandardTrainers/Standard/SdcaMulticlass.cs @@ -42,7 +42,7 @@ public abstract class SdcaMulticlassTrainerBase : SdcaTrainerBase - /// The custom loss. + /// The custom loss. /// /// /// If unspecified, will be used. diff --git a/src/Microsoft.ML.StandardTrainers/Standard/SdcaRegression.cs b/src/Microsoft.ML.StandardTrainers/Standard/SdcaRegression.cs index 3b242f907c..1f27c19bcc 100644 --- a/src/Microsoft.ML.StandardTrainers/Standard/SdcaRegression.cs +++ b/src/Microsoft.ML.StandardTrainers/Standard/SdcaRegression.cs @@ -38,7 +38,7 @@ public sealed class SdcaRegressionTrainer : SdcaTrainerBase - /// A custom loss. + /// A custom loss. /// /// /// Defaults to @@ -47,7 +47,7 @@ public sealed class Options : OptionsBase internal ISupportSdcaRegressionLossFactory LossFunctionFactory = new SquaredLossFactory(); /// - /// A custom loss. + /// A custom loss. /// /// /// Defaults to diff --git a/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs b/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs index 7addf35132..fd06f33389 100644 --- a/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs +++ b/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs @@ -79,7 +79,7 @@ public static SgdCalibratedTrainer SgdCalibrated(this BinaryClassificationCatalo /// The name of the label column, or dependent variable. /// The features, or independent variables. /// The name of the example weight column (optional). - /// The loss function minimized in the training process. Using, for example, leads to a support vector machine trainer. + /// The loss function minimized in the training process. Using, for example, leads to a support vector machine trainer. /// The maximum number of passes through the training dataset; set to 1 to simulate online learning. /// The initial learning rate used by SGD. /// The L2 weight for regularization. @@ -135,7 +135,7 @@ public static SgdNonCalibratedTrainer SgdNonCalibrated(this BinaryClassification /// The name of the label column. /// The name of the feature column. /// The name of the example weight column (optional). - /// The loss function minimized in the training process. Using, for example, its default leads to a least square trainer. + /// The loss function minimized in the training process. Using, for example, its default leads to a least square trainer. /// The L2 weight for regularization. /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. /// The maximum number of passes to perform over the data. @@ -239,7 +239,7 @@ public static SdcaLogisticRegressionBinaryTrainer SdcaLogisticRegression( /// The name of the label column. /// The name of the feature column. /// The name of the example weight column (optional). - /// The loss function minimized in the training process. Defaults to if not specified. + /// The loss function minimized in the training process. Defaults to if not specified. /// The L2 weight for regularization. /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. /// The maximum number of passes to perform over the data. @@ -337,7 +337,7 @@ public static SdcaMaximumEntropyMulticlassTrainer SdcaMaximumEntropy(this Multic /// The name of the label column. /// The name of the feature column. /// The name of the example weight column (optional). - /// The loss function to be minimized. Defaults to if not specified. + /// The loss function to be minimized. Defaults to if not specified. /// The L2 weight for regularization. /// The L1 regularization hyperparameter. Higher values will tend to lead to more sparse model. /// The maximum number of passes to perform over the data. @@ -388,7 +388,7 @@ public static SdcaNonCalibratedMulticlassTrainer SdcaNonCalibrated(this Multicla /// The binary classification catalog trainer object. /// The name of the label column. The column data must be . /// The name of the feature column. The column data must be a known-sized vector of . - /// The loss function minimized in the training process. If , would be used and lead to a max-margin averaged perceptron trainer. + /// The loss function minimized in the training process. If , would be used and lead to a max-margin averaged perceptron trainer. /// The initial learning rate used by SGD. /// /// to decrease the as iterations progress; otherwise, . @@ -462,7 +462,7 @@ public IClassificationLoss CreateComponent(IHostEnvironment env) /// The regression catalog trainer object. /// The name of the label column. /// The name of the feature column. - /// The loss function minimized in the training process. Using, for example, leads to a least square trainer. + /// The loss function minimized in the training process. Using, for example, leads to a least square trainer. /// The initial learning rate used by SGD. /// Decrease learning rate as iterations progress. /// The L2 weight for regularization. From d557c7be2079388afd6884af9271ba048f6651d6 Mon Sep 17 00:00:00 2001 From: Shahab Moradi Date: Tue, 16 Apr 2019 12:20:07 -0700 Subject: [PATCH 4/4] Replaced tmpurl_calib --- .../Standard/Online/AveragedPerceptron.cs | 2 +- src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedPerceptron.cs b/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedPerceptron.cs index 8b65c3de22..97f66ecdf8 100644 --- a/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedPerceptron.cs +++ b/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedPerceptron.cs @@ -90,7 +90,7 @@ public sealed class Options : AveragedLinearOptions public IClassificationLoss LossFunction { get; set; } /// - /// The calibrator for producing probabilities. Default is exponential (aka Platt) calibration. + /// The calibrator for producing probabilities. Default is exponential (aka Platt) calibration. /// [Argument(ArgumentType.AtMostOnce, HelpText = "The calibrator kind to apply to the predictor. Specify null for no calibration", Visibility = ArgumentAttribute.VisibilityType.EntryPointsOnly)] internal ICalibratorTrainerFactory Calibrator = new PlattCalibratorTrainerFactory(); diff --git a/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs b/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs index 606eb54cc0..bbe982b6cb 100644 --- a/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs +++ b/src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs @@ -1547,7 +1547,7 @@ private protected override BinaryPredictionTransformer MakeTra /// /// The for training a binary logistic regression classification model using the stochastic dual coordinate ascent method. - /// The trained model is calibrated and can produce probability by feeding the output value of the + /// The trained model is calibrated and can produce probability by feeding the output value of the /// linear function to a . /// /// @@ -2171,7 +2171,7 @@ private protected override void CheckLabel(RoleMappedData examples, out int weig /// /// The for training logistic regression using a parallel stochastic gradient method. - /// The trained model is calibrated and can produce probability by feeding the output value of the + /// The trained model is calibrated and can produce probability by feeding the output value of the /// linear function to a . /// ///