diff --git a/test/Microsoft.ML.Functional.Tests/Common.cs b/test/Microsoft.ML.Functional.Tests/Common.cs
index 5756893966..cd7f6757fc 100644
--- a/test/Microsoft.ML.Functional.Tests/Common.cs
+++ b/test/Microsoft.ML.Functional.Tests/Common.cs
@@ -9,6 +9,7 @@
 using Microsoft.ML.Data;
 using Microsoft.ML.Functional.Tests.Datasets;
 using Xunit;
+using Xunit.Sdk;
 
 namespace Microsoft.ML.Functional.Tests
 {
@@ -268,6 +269,33 @@ public static void AssertMetricsStatistics(RegressionMetricsStatistics metrics)
             AssertMetricStatistics(metrics.LossFunction);
         }
 
+        /// <summary>
+        /// Assert that two float arrays are not equal.
+        /// </summary>
+        /// <param name="array1">An array of floats.</param>
+        /// <param name="array2">An array of floats.</param>
+        public static void AssertNotEqual(float[] array1, float[] array2)
+        {
+            Assert.NotNull(array1);
+            Assert.NotNull(array2);
+            Assert.Equal(array1.Length, array2.Length);
+
+            bool mismatch = false;
+            for (int i = 0; i < array1.Length; i++)
+                try
+                {
+                    // Use Assert to test for equality rather than
+                    // to roll our own float equality checker.
+                    Assert.Equal(array1[i], array2[i]);
+                }
+                catch(EqualException)
+                {
+                    mismatch = true;
+                    break;
+                }
+            Assert.True(mismatch);
+        }
+
         /// <summary>
         /// Verify that a float array has no NaNs or infinities.
         /// </summary>
diff --git a/test/Microsoft.ML.Functional.Tests/Training.cs b/test/Microsoft.ML.Functional.Tests/Training.cs
new file mode 100644
index 0000000000..c1162e7978
--- /dev/null
+++ b/test/Microsoft.ML.Functional.Tests/Training.cs
@@ -0,0 +1,529 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System;
+using System.Linq;
+using Microsoft.ML.Data;
+using Microsoft.ML.Functional.Tests.Datasets;
+using Microsoft.ML.RunTests;
+using Microsoft.ML.TestFramework;
+using Microsoft.ML.Trainers;
+using Microsoft.ML.Trainers.FastTree;
+using Xunit;
+using Xunit.Abstractions;
+
+namespace Microsoft.ML.Functional.Tests
+{
+    public class Training : BaseTestClass
+    {
+        public Training(ITestOutputHelper output) : base(output)
+        {
+        }
+
+        /// <summary>
+        /// Training: It is easy to compare trainer evaluations on the same dataset.
+        /// </summary>
+        [Fact]
+        public void CompareTrainerEvaluations()
+        {
+            var mlContext = new MLContext(seed: 1);
+
+            // Get the dataset.
+            var data = mlContext.Data.LoadFromTextFile<TweetSentiment>(GetDataPath(TestDatasets.Sentiment.trainFilename),
+                separatorChar: TestDatasets.Sentiment.fileSeparator,
+                hasHeader: TestDatasets.Sentiment.fileHasHeader,
+                allowQuoting: TestDatasets.Sentiment.allowQuoting);
+            var trainTestSplit = mlContext.Data.TrainTestSplit(data);
+            var trainData = trainTestSplit.TrainSet;
+            var testData = trainTestSplit.TestSet;
+
+            // Create a transformation pipeline.
+            var featurizationPipeline = mlContext.Transforms.Text.FeaturizeText("Features", "SentimentText")
+                .AppendCacheCheckpoint(mlContext);
+
+            // Create a selection of learners.
+            var sdcaTrainer = mlContext.BinaryClassification.Trainers.SdcaCalibrated(
+                    new SdcaCalibratedBinaryClassificationTrainer.Options { NumberOfThreads = 1 });
+
+            var fastTreeTrainer = mlContext.BinaryClassification.Trainers.FastTree(
+                    new FastTreeBinaryClassificationTrainer.Options { NumberOfThreads = 1 });
+
+            var ffmTrainer = mlContext.BinaryClassification.Trainers.FieldAwareFactorizationMachine();
+
+            // Fit the data transformation pipeline.
+            var featurization = featurizationPipeline.Fit(trainData);
+            var featurizedTrain = featurization.Transform(trainData);
+            var featurizedTest = featurization.Transform(testData);
+
+            // Fit the trainers.
+            var sdca = sdcaTrainer.Fit(featurizedTrain);
+            var fastTree = fastTreeTrainer.Fit(featurizedTrain);
+            var ffm = ffmTrainer.Fit(featurizedTrain);
+
+            // Evaluate the trainers.
+            var sdcaPredictions = sdca.Transform(featurizedTest);
+            var sdcaMetrics = mlContext.BinaryClassification.EvaluateNonCalibrated(sdcaPredictions);
+            var fastTreePredictions = fastTree.Transform(featurizedTest);
+            var fastTreeMetrics = mlContext.BinaryClassification.EvaluateNonCalibrated(fastTreePredictions);
+            var ffmPredictions = sdca.Transform(featurizedTest);
+            var ffmMetrics = mlContext.BinaryClassification.EvaluateNonCalibrated(ffmPredictions);
+
+            // Validate the results.
+            Common.AssertMetrics(sdcaMetrics);
+            Common.AssertMetrics(fastTreeMetrics);
+            Common.AssertMetrics(ffmMetrics);
+        }
+
+        /// <summary>
+        /// Training: Models can be trained starting from an existing model.
+        /// </summary>
+        [Fact]
+        public void ContinueTrainingAveragePerceptron()
+        {
+            var mlContext = new MLContext(seed: 1);
+
+            // Get the dataset.
+            var data = mlContext.Data.LoadFromTextFile<TweetSentiment>(GetDataPath(TestDatasets.Sentiment.trainFilename),
+                separatorChar: TestDatasets.Sentiment.fileSeparator,
+                hasHeader: TestDatasets.Sentiment.fileHasHeader,
+                allowQuoting: TestDatasets.Sentiment.allowQuoting);
+
+            // Create a transformation pipeline.
+            var featurizationPipeline = mlContext.Transforms.Text.FeaturizeText("Features", "SentimentText")
+                .AppendCacheCheckpoint(mlContext);
+
+            var trainer = mlContext.BinaryClassification.Trainers.AveragedPerceptron(
+                new AveragedPerceptronTrainer.Options { NumberOfIterations = 1 });
+
+            // Fit the data transformation pipeline.
+            var featurization = featurizationPipeline.Fit(data);
+            var featurizedData = featurization.Transform(data);
+
+            // Fit the first trainer.
+            var firstModel = trainer.Fit(featurizedData);
+            var firstModelWeights = firstModel.Model.Weights;
+
+            // Fist the first trainer again.
+            var firstModelPrime = trainer.Fit(featurizedData);
+            var firstModelWeightsPrime = firstModel.Model.Weights;
+
+            // Fit the second trainer.
+            var secondModel = trainer.Fit(featurizedData, firstModel.Model);
+            var secondModelWeights = secondModel.Model.Weights;
+
+            // Validate that continued training occurred.
+            // Training from the same initial condition, same seed should create the same model.
+            Common.AssertEqual(firstModelWeights.ToArray(), firstModelWeightsPrime.ToArray());
+            // Continued training should create a different model.
+            Common.AssertNotEqual(firstModelWeights.ToArray(), secondModelWeights.ToArray());
+        }
+
+        /// <summary>
+        /// Training: Models can be trained starting from an existing model.
+        /// </summary>
+        [Fact]
+        public void ContinueTrainingFieldAwareFactorizationMachine()
+        {
+            var mlContext = new MLContext(seed: 1);
+
+            // Get the dataset.
+            var data = mlContext.Data.LoadFromTextFile<TweetSentiment>(GetDataPath(TestDatasets.Sentiment.trainFilename),
+                separatorChar: TestDatasets.Sentiment.fileSeparator,
+                hasHeader: TestDatasets.Sentiment.fileHasHeader,
+                allowQuoting: TestDatasets.Sentiment.allowQuoting);
+
+            // Create a transformation pipeline.
+            var featurizationPipeline = mlContext.Transforms.Text.FeaturizeText("Features", "SentimentText")
+                .AppendCacheCheckpoint(mlContext);
+
+            var trainer = mlContext.BinaryClassification.Trainers.FieldAwareFactorizationMachine(
+                new FieldAwareFactorizationMachineTrainer.Options { NumberOfIterations = 100 });
+
+            // Fit the data transformation pipeline.
+            var featurization = featurizationPipeline.Fit(data);
+            var featurizedData = featurization.Transform(data);
+
+            // Fit the first trainer.
+            var firstModel = trainer.Fit(featurizedData);
+            var firstModelWeights = firstModel.Model.GetLinearWeights();
+
+            // Fist the first trainer again.
+            var firstModelPrime = trainer.Fit(featurizedData);
+            var firstModelWeightsPrime = firstModel.Model.GetLinearWeights();
+
+            // Fit the second trainer.
+            var secondModel = trainer.Fit(featurizedData, modelParameters: firstModel.Model);
+            var secondModelWeights = secondModel.Model.GetLinearWeights();
+
+            // Validate that continued training occurred.
+            // Training from the same initial condition, same seed should create the same model.
+            Assert.Equal(firstModelWeights, firstModelWeightsPrime);
+            // Continued training should create a different model.
+            Assert.NotEqual(firstModelWeights, secondModelWeights);
+        }
+
+        /// <summary>
+        /// Training: Models can be trained starting from an existing model.
+        /// </summary>
+        [Fact]
+        public void ContinueTrainingLinearSupportVectorMachine()
+        {
+            var mlContext = new MLContext(seed: 1);
+
+            // Get the dataset.
+            var data = mlContext.Data.LoadFromTextFile<TweetSentiment>(GetDataPath(TestDatasets.Sentiment.trainFilename),
+                separatorChar: TestDatasets.Sentiment.fileSeparator,
+                hasHeader: TestDatasets.Sentiment.fileHasHeader,
+                allowQuoting: TestDatasets.Sentiment.allowQuoting);
+
+            // Create a transformation pipeline.
+            var featurizationPipeline = mlContext.Transforms.Text.FeaturizeText("Features", "SentimentText")
+                .AppendCacheCheckpoint(mlContext);
+
+            var trainer = mlContext.BinaryClassification.Trainers.LinearSvm(
+                new LinearSvmTrainer.Options { NumberOfIterations = 1 });
+
+            // Fit the data transformation pipeline.
+            var featurization = featurizationPipeline.Fit(data);
+            var featurizedData = featurization.Transform(data);
+
+            // Fit the first trainer.
+            var firstModel = trainer.Fit(featurizedData);
+            var firstModelWeights = firstModel.Model.Weights;
+
+            // Fist the first trainer again.
+            var firstModelPrime = trainer.Fit(featurizedData);
+            var firstModelWeightsPrime = firstModel.Model.Weights;
+
+            // Fit the second trainer.
+            var secondModel = trainer.Fit(featurizedData, firstModel.Model);
+            var secondModelWeights = secondModel.Model.Weights;
+
+            // Validate that continued training occurred.
+            // Training from the same initial condition, same seed should create the same model.
+            Common.AssertEqual(firstModelWeights.ToArray(), firstModelWeightsPrime.ToArray());
+            // Continued training should create a different model.
+            Common.AssertNotEqual(firstModelWeights.ToArray(), secondModelWeights.ToArray());
+        }
+
+        /// <summary>
+        /// Training: Models can be trained starting from an existing model.
+        /// </summary>
+        [Fact]
+        public void ContinueTrainingLogisticRegression()
+        {
+            var mlContext = new MLContext(seed: 1);
+
+            // Get the dataset.
+            var data = mlContext.Data.LoadFromTextFile<TweetSentiment>(GetDataPath(TestDatasets.Sentiment.trainFilename),
+                separatorChar: TestDatasets.Sentiment.fileSeparator,
+                hasHeader: TestDatasets.Sentiment.fileHasHeader,
+                allowQuoting: TestDatasets.Sentiment.allowQuoting);
+
+            // Create a transformation pipeline.
+            var featurizationPipeline = mlContext.Transforms.Text.FeaturizeText("Features", "SentimentText")
+                .AppendCacheCheckpoint(mlContext);
+
+            var trainer = mlContext.BinaryClassification.Trainers.LogisticRegression(
+                new LogisticRegressionBinaryClassificationTrainer.Options { NumberOfThreads = 1, MaximumNumberOfIterations = 10 });
+
+            // Fit the data transformation pipeline.
+            var featurization = featurizationPipeline.Fit(data);
+            var featurizedData = featurization.Transform(data);
+
+            // Fit the first trainer.
+            var firstModel = trainer.Fit(featurizedData);
+            var firstModelWeights = firstModel.Model.SubModel.Weights;
+
+            // Fist the first trainer again.
+            var firstModelPrime = trainer.Fit(featurizedData);
+            var firstModelWeightsPrime = firstModel.Model.SubModel.Weights;
+
+            // Fit the second trainer.
+            var secondModel = trainer.Fit(featurizedData, firstModel.Model.SubModel);
+            var secondModelWeights = secondModel.Model.SubModel.Weights;
+
+            // Validate that continued training occurred.
+            // Training from the same initial condition, same seed should create the same model.
+            Common.AssertEqual(firstModelWeights.ToArray(), firstModelWeightsPrime.ToArray());
+            // Continued training should create a different model.
+            Common.AssertNotEqual(firstModelWeights.ToArray(), secondModelWeights.ToArray());
+        }
+
+        /// <summary>
+        /// Training: Models can be trained starting from an existing model.
+        /// </summary>
+        [Fact]
+        public void ContinueTrainingLogisticRegressionMulticlass()
+        {
+            var mlContext = new MLContext(seed: 1);
+
+            var data = mlContext.Data.LoadFromTextFile<Iris>(GetDataPath(TestDatasets.iris.trainFilename),
+                hasHeader: TestDatasets.iris.fileHasHeader,
+                separatorChar: TestDatasets.iris.fileSeparator);
+
+            // Create a training pipeline.
+            var featurizationPipeline = mlContext.Transforms.Concatenate("Features", Iris.Features)
+                .Append(mlContext.Transforms.Conversion.MapValueToKey("Label"))
+                .AppendCacheCheckpoint(mlContext);
+
+            var trainer = mlContext.MulticlassClassification.Trainers.LogisticRegression(
+                new LogisticRegressionMulticlassClassificationTrainer.Options { NumberOfThreads = 1, MaximumNumberOfIterations = 10 });
+
+            // Fit the data transformation pipeline.
+            var featurization = featurizationPipeline.Fit(data);
+            var featurizedData = featurization.Transform(data);
+
+            // Fit the first trainer.
+            var firstModel = trainer.Fit(featurizedData);
+            VBuffer<float>[] firstModelWeights = null;
+            firstModel.Model.GetWeights(ref firstModelWeights, out int firstModelNumClasses);
+
+            // Fist the first trainer again.
+            var firstModelPrime = trainer.Fit(featurizedData);
+            VBuffer<float>[] firstModelWeightsPrime = null;
+            firstModel.Model.GetWeights(ref firstModelWeightsPrime, out int firstModelNumClassesPrime);
+
+            // Fit the second trainer.
+            var secondModel = trainer.Fit(featurizedData, firstModel.Model);
+            VBuffer<float>[] secondModelWeights = null;
+            secondModel.Model.GetWeights(ref secondModelWeights, out int secondModelNumClasses);
+
+            // Validate that continued training occurred.
+            // Training from the same initial condition, same seed should create the same model.
+            Assert.Equal(firstModelNumClasses, firstModelNumClassesPrime);
+            for (int i = 0; i < firstModelNumClasses; i++)
+                Common.AssertEqual(firstModelWeights[i].DenseValues().ToArray(), firstModelWeightsPrime[i].DenseValues().ToArray());
+            // Continued training should create a different model.
+            Assert.Equal(firstModelNumClasses, secondModelNumClasses);
+            for (int i = 0; i < firstModelNumClasses; i++)
+                Common.AssertNotEqual(firstModelWeights[i].DenseValues().ToArray(), secondModelWeights[i].DenseValues().ToArray());
+        }
+
+        /// <summary>
+        /// Training: Models can be trained starting from an existing model.
+        /// </summary>
+        [Fact]
+        public void ContinueTrainingOnlineGradientDescent()
+        {
+            var mlContext = new MLContext(seed: 1);
+
+            // Get the dataset.
+            var data = mlContext.Data.LoadFromTextFile<HousingRegression>(GetDataPath(TestDatasets.housing.trainFilename),
+                separatorChar: TestDatasets.housing.fileSeparator,
+                hasHeader: TestDatasets.housing.fileHasHeader);
+
+            // Create a transformation pipeline.
+            var featurizationPipeline = mlContext.Transforms.Concatenate("Features", HousingRegression.Features)
+                .Append(mlContext.Transforms.Normalize("Features"))
+                .AppendCacheCheckpoint(mlContext);
+
+            var trainer = mlContext.Regression.Trainers.OnlineGradientDescent(
+                new OnlineGradientDescentTrainer.Options { NumberOfIterations = 10 });
+
+            // Fit the data transformation pipeline.
+            var featurization = featurizationPipeline.Fit(data);
+            var featurizedData = featurization.Transform(data);
+
+            // Fit the first trainer.
+            var firstModel = trainer.Fit(featurizedData);
+            var firstModelWeights = firstModel.Model.Weights;
+
+            // Fist the first trainer again.
+            var firstModelPrime = trainer.Fit(featurizedData);
+            var firstModelWeightsPrime = firstModel.Model.Weights;
+
+            // Fit the second trainer.
+            var secondModel = trainer.Fit(featurizedData, firstModel.Model);
+            var secondModelWeights = secondModel.Model.Weights;
+
+            // Validate that continued training occurred.
+            // Training from the same initial condition, same seed should create the same model.
+            Common.AssertEqual(firstModelWeights.ToArray(), firstModelWeightsPrime.ToArray());
+            // Continued training should create a different model.
+            Common.AssertNotEqual(firstModelWeights.ToArray(), secondModelWeights.ToArray());
+        }
+
+        /// <summary>
+        /// Training: Models can be trained starting from an existing model.
+        /// </summary>
+        [Fact]
+        public void ContinueTrainingPoissonRegression()
+        {
+            var mlContext = new MLContext(seed: 1);
+
+            // Get the dataset.
+            var data = mlContext.Data.LoadFromTextFile<HousingRegression>(GetDataPath(TestDatasets.housing.trainFilename),
+                separatorChar: TestDatasets.housing.fileSeparator,
+                hasHeader: TestDatasets.housing.fileHasHeader);
+
+            // Create a transformation pipeline.
+            var featurizationPipeline = mlContext.Transforms.Concatenate("Features", HousingRegression.Features)
+                .Append(mlContext.Transforms.Normalize("Features"))
+                .AppendCacheCheckpoint(mlContext);
+
+            var trainer = mlContext.Regression.Trainers.PoissonRegression(
+                new PoissonRegressionTrainer.Options { NumberOfThreads = 1, MaximumNumberOfIterations = 100 });
+
+            // Fit the data transformation pipeline.
+            var featurization = featurizationPipeline.Fit(data);
+            var featurizedData = featurization.Transform(data);
+
+            // Fit the first trainer.
+            var firstModel = trainer.Fit(featurizedData);
+            var firstModelWeights = firstModel.Model.Weights;
+
+            // Fist the first trainer again.
+            var firstModelPrime = trainer.Fit(featurizedData);
+            var firstModelWeightsPrime = firstModel.Model.Weights;
+
+            // Fit the second trainer.
+            var secondModel = trainer.Fit(featurizedData, firstModel.Model);
+            var secondModelWeights = secondModel.Model.Weights;
+
+            // Validate that continued training occurred.
+            // Training from the same initial condition, same seed should create the same model.
+            Common.AssertEqual(firstModelWeights.ToArray(), firstModelWeightsPrime.ToArray());
+            // Continued training should create a different model.
+            Common.AssertNotEqual(firstModelWeights.ToArray(), secondModelWeights.ToArray());
+        }
+
+        /// <summary>
+        /// Training: Models can be trained starting from an existing model.
+        /// </summary>
+        [Fact]
+        public void ContinueTrainingSymbolicStochasticGradientDescent()
+        {
+            var mlContext = new MLContext(seed: 1);
+
+            // Get the dataset.
+            var data = mlContext.Data.LoadFromTextFile<TweetSentiment>(GetDataPath(TestDatasets.Sentiment.trainFilename),
+                separatorChar: TestDatasets.Sentiment.fileSeparator,
+                hasHeader: TestDatasets.Sentiment.fileHasHeader,
+                allowQuoting: TestDatasets.Sentiment.allowQuoting);
+
+            // Create a transformation pipeline.
+            var featurizationPipeline = mlContext.Transforms.Text.FeaturizeText("Features", "SentimentText")
+                .AppendCacheCheckpoint(mlContext);
+
+            var trainer = mlContext.BinaryClassification.Trainers.SymbolicSgd(
+                new SymbolicSgdTrainer.Options
+                {
+                    NumberOfThreads = 1,
+                    NumberOfIterations = 10
+                });
+
+            // Fit the data transformation pipeline.
+            var featurization = featurizationPipeline.Fit(data);
+            var featurizedData = featurization.Transform(data);
+
+            // Fit the first trainer.
+            var firstModel = trainer.Fit(featurizedData);
+            var firstModelWeights = firstModel.Model.SubModel.Weights;
+
+            // Fist the first trainer again.
+            var firstModelPrime = trainer.Fit(featurizedData);
+            var firstModelWeightsPrime = firstModel.Model.SubModel.Weights;
+
+            // Fit the second trainer.
+            var secondModel = trainer.Fit(featurizedData, firstModel.Model.SubModel);
+            var secondModelWeights = secondModel.Model.SubModel.Weights;
+
+            // Validate that continued training occurred.
+            // Training from the same initial condition, same seed should create the same model.
+            Common.AssertEqual(firstModelWeights.ToArray(), firstModelWeightsPrime.ToArray());
+            // Continued training should create a different model.
+            Common.AssertNotEqual(firstModelWeights.ToArray(), secondModelWeights.ToArray());
+        }
+
+        /// <summary>
+        /// Training: Meta-compononts function as expected. For OVA (one-versus-all), a user will be able to specify only
+        /// binary classifier trainers. If they specify a different model class there should be a compile error.
+        /// </summary>
+        [Fact]
+        public void MetacomponentsFunctionAsExpectedOva()
+        {
+            var mlContext = new MLContext(seed: 1);
+
+            var data = mlContext.Data.LoadFromTextFile<Iris>(GetDataPath(TestDatasets.iris.trainFilename),
+                hasHeader: TestDatasets.iris.fileHasHeader,
+                separatorChar: TestDatasets.iris.fileSeparator);
+
+            // Create a model training an OVA trainer with a binary classifier.
+            var anomalyDetectionTrainer = mlContext.AnomalyDetection.Trainers.RandomizedPca();
+            var anomalyDetectionPipeline = mlContext.Transforms.Concatenate("Features", Iris.Features)
+                .AppendCacheCheckpoint(mlContext)
+                .Append(mlContext.Transforms.Conversion.MapValueToKey("Label"))
+                .Append(mlContext.MulticlassClassification.Trainers.OneVersusAll(anomalyDetectionTrainer))
+                .Append(mlContext.Transforms.Conversion.MapKeyToValue("PredictedLabel"));
+
+            // Fit the binary classification pipeline.
+            Assert.Throws<InvalidOperationException>(() => anomalyDetectionPipeline.Fit(data));
+
+            // Create a model training an OVA trainer with a binary classifier.
+            var binaryclassificationTrainer = mlContext.BinaryClassification.Trainers.LogisticRegression(
+                new LogisticRegressionBinaryClassificationTrainer.Options { MaximumNumberOfIterations = 10, NumberOfThreads = 1, });
+            var binaryClassificationPipeline = mlContext.Transforms.Concatenate("Features", Iris.Features)
+                .AppendCacheCheckpoint(mlContext)
+                .Append(mlContext.Transforms.Conversion.MapValueToKey("Label"))
+                .Append(mlContext.MulticlassClassification.Trainers.OneVersusAll(binaryclassificationTrainer));
+
+            // Fit the binary classification pipeline.
+            var binaryClassificationModel = binaryClassificationPipeline.Fit(data);
+
+            // Transform the data
+            var binaryClassificationPredictions = binaryClassificationModel.Transform(data);
+
+            // Evaluate the model.
+            var binaryClassificationMetrics = mlContext.MulticlassClassification.Evaluate(binaryClassificationPredictions);
+
+            // Create a model training an OVA trainer with a clustering trainer.
+            var kmeansTrainer = mlContext.Clustering.Trainers.KMeans(
+                new KMeansTrainer.Options { MaximumNumberOfIterations = 10, NumberOfThreads = 1, });
+
+            Assert.Throws<ArgumentOutOfRangeException>(() =>
+                mlContext.Transforms.Concatenate("Features", Iris.Features)
+                    .AppendCacheCheckpoint(mlContext)
+                    .Append(mlContext.Transforms.Conversion.MapValueToKey("Label"))
+                    .Append(mlContext.MulticlassClassification.Trainers.OneVersusAll(kmeansTrainer))
+                    .Append(mlContext.Transforms.Conversion.MapKeyToValue("PredictedLabel")));
+
+            // Create a model training an OVA trainer with a multiclass classification trainer.
+            var multiclassTrainer = mlContext.MulticlassClassification.Trainers.LogisticRegression(
+                new LogisticRegressionMulticlassClassificationTrainer.Options { MaximumNumberOfIterations = 10, NumberOfThreads = 1, });
+            Assert.Throws<ArgumentOutOfRangeException>(() => 
+                mlContext.Transforms.Concatenate("Features", Iris.Features)
+                    .AppendCacheCheckpoint(mlContext)
+                    .Append(mlContext.Transforms.Conversion.MapValueToKey("Label"))
+                    .Append(mlContext.MulticlassClassification.Trainers.OneVersusAll(multiclassTrainer))
+                    .Append(mlContext.Transforms.Conversion.MapKeyToValue("PredictedLabel")));
+
+            // Create a model training an OVA trainer with a ranking trainer.
+            var rankingTrainer = mlContext.Ranking.Trainers.FastTree(
+                new FastTreeRankingTrainer.Options { NumberOfTrees = 2, NumberOfThreads = 1, });
+            // Todo #2920: Make this fail somehow.
+            var rankingPipeline = mlContext.Transforms.Concatenate("Features", Iris.Features)
+                .AppendCacheCheckpoint(mlContext)
+                .Append(mlContext.Transforms.Conversion.MapValueToKey("Label"))
+                .Append(mlContext.MulticlassClassification.Trainers.OneVersusAll(rankingTrainer))
+                .Append(mlContext.Transforms.Conversion.MapKeyToValue("PredictedLabel"));
+
+            // Fit the invalid pipeline.
+            Assert.Throws<ArgumentOutOfRangeException>(() => rankingPipeline.Fit(data));
+
+            // Create a model training an OVA trainer with a regressor.
+            var regressionTrainer = mlContext.Regression.Trainers.PoissonRegression(
+                new PoissonRegressionTrainer.Options { MaximumNumberOfIterations = 10, NumberOfThreads = 1, });
+            // Todo #2920: Make this fail somehow.
+            var regressionPipeline = mlContext.Transforms.Concatenate("Features", Iris.Features)
+                .AppendCacheCheckpoint(mlContext)
+                .Append(mlContext.Transforms.Conversion.MapValueToKey("Label"))
+                .Append(mlContext.MulticlassClassification.Trainers.OneVersusAll(regressionTrainer))
+                .Append(mlContext.Transforms.Conversion.MapKeyToValue("PredictedLabel"));
+
+            // Fit the invalid pipeline.
+            Assert.Throws<ArgumentOutOfRangeException>(() => regressionPipeline.Fit(data));
+        }
+    }
+}
\ No newline at end of file
diff --git a/test/Microsoft.ML.Tests/Scenarios/Api/Estimators/Metacomponents.cs b/test/Microsoft.ML.Tests/Scenarios/Api/Estimators/Metacomponents.cs
deleted file mode 100644
index 9e9411a2f9..0000000000
--- a/test/Microsoft.ML.Tests/Scenarios/Api/Estimators/Metacomponents.cs
+++ /dev/null
@@ -1,37 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-using Microsoft.ML.Data;
-using Microsoft.ML.RunTests;
-using Microsoft.ML.Trainers;
-using Microsoft.ML.Transforms;
-using Xunit;
-
-namespace Microsoft.ML.Tests.Scenarios.Api
-{
-    public partial class ApiScenariosTests
-    {
-        /// <summary>
-        /// Meta-components: Meta-components (for example, components that themselves instantiate components) should not be booby-trapped.
-        /// When specifying what trainer OVA should use, a user will be able to specify any binary classifier.
-        /// If they specify a regression or multi-class classifier ideally that should be a compile error.
-        /// </summary>
-        [Fact]
-        public void Metacomponents()
-        {
-            var ml = new MLContext();
-            var data = ml.Data.LoadFromTextFile<IrisData>(GetDataPath(TestDatasets.irisData.trainFilename), separatorChar: ',');
-
-            var sdcaTrainer = ml.BinaryClassification.Trainers.SdcaNonCalibrated(
-                new SdcaNonCalibratedBinaryClassificationTrainer.Options { MaximumNumberOfIterations = 100, Shuffle = true, NumberOfThreads = 1, });
-
-            var pipeline = new ColumnConcatenatingEstimator (ml, "Features", "SepalLength", "SepalWidth", "PetalLength", "PetalWidth")
-                .Append(ml.Transforms.Conversion.MapValueToKey("Label"), TransformerScope.TrainTest)
-                .Append(ml.MulticlassClassification.Trainers.OneVersusAll(sdcaTrainer))
-                .Append(ml.Transforms.Conversion.MapKeyToValue(("PredictedLabel")));
-
-            var model = pipeline.Fit(data);
-        }
-    }
-}
\ No newline at end of file