Skip to content

Remove the System.Drawing dependency #6363

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Oct 20, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion THIRD-PARTY-NOTICES.TXT
Original file line number Diff line number Diff line change
Expand Up @@ -84,4 +84,4 @@ Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
limitations under the License.
4 changes: 2 additions & 2 deletions build/ci/job-template.yml
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,11 @@ jobs:
steps:
# Extra MacOS step required to install OS-specific dependencies
- ${{ if and(contains(parameters.pool.vmImage, 'macOS'), not(contains(parameters.name, 'cross'))) }}:
- script: export HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=TRUE && brew update && brew install mono-libgdiplus && brew unlink libomp && brew install $(Build.SourcesDirectory)/build/libomp.rb --build-from-source --formula
- script: export HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=TRUE && brew update && brew unlink libomp && brew install $(Build.SourcesDirectory)/build/libomp.rb --build-from-source --formula
displayName: Install MacOS build dependencies
# Extra Apple MacOS step required to install OS-specific dependencies
- ${{ if and(contains(parameters.pool.vmImage, 'macOS'), contains(parameters.name, 'cross')) }}:
- script: brew update && brew install mono-libgdiplus && brew install libomp && brew link libomp --force
- script: brew update && brew install libomp && brew link libomp --force
displayName: Install MacOS ARM build dependencies
- ${{ if and( eq(parameters.nightlyBuild, 'true'), eq(parameters.pool.vmImage, 'ubuntu-18.04')) }}:
- bash: echo "##vso[task.setvariable variable=LD_LIBRARY_PATH]$(nightlyBuildRunPath):$LD_LIBRARY_PATH"
Expand Down
4 changes: 2 additions & 2 deletions build/vsts-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ jobs:
pool:
vmImage: macOS-12
steps:
- script: brew update && brew unlink [email protected] && brew install mono-libgdiplus && brew install $(Build.SourcesDirectory)/build/libomp.rb --build-from-source --formula && brew link libomp --force
- script: brew update && brew unlink [email protected] && brew install $(Build.SourcesDirectory)/build/libomp.rb --build-from-source --formula && brew link libomp --force
displayName: Install build dependencies
- script: ./restore.sh
displayName: restore all projects
Expand Down Expand Up @@ -157,7 +157,7 @@ jobs:
rm -rf /usr/local/bin/2to3
displayName: MacOS Homebrew bug Workaround
continueOnError: true
- script: brew update && brew unlink [email protected] && brew install mono-libgdiplus && brew install libomp && brew link libomp --force
- script: brew update && brew unlink [email protected] && brew install libomp && brew link libomp --force
displayName: Install build dependencies
- script: ./restore.sh
displayName: restore all projects
Expand Down
3 changes: 1 addition & 2 deletions docs/building/unix-instructions.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,12 @@ macOS 10.13 (High Sierra) or higher is needed to build dotnet/machinelearning. W
On macOS a few components are needed which are not provided by a default developer setup:
* cmake 3.10.3
* libomp 7
* libgdiplus
* gettext
* All the requirements necessary to run .NET Core 3.1 applications. To view macOS prerequisites click [here](https://docs.microsoft.com/en-us/dotnet/core/install/macos?tabs=netcore31#dependencies).

One way of obtaining CMake and other required libraries is via [Homebrew](https://brew.sh):
```sh
$ brew update && brew install cmake https://github.com/raw/dotnet/machinelearning/main/build/libomp.rb mono-libgdiplus gettext && brew link gettext --force && brew link libomp --force
$ brew update && brew install cmake https://github.com/raw/dotnet/machinelearning/main/build/libomp.rb gettext && brew link gettext --force && brew link libomp --force
```

Please note that newer versions of Homebrew [don't allow installing directly from a URL](https://github.com/Homebrew/brew/issues/8791). If you run into this issue, you may need to download libomp.rb first and install it with the local file instead.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,10 @@ public static void Example()
{
FeatureColumnName = "Image",
LabelColumnName = "Label",
// Just by changing/selecting InceptionV3/MobilenetV2
// here instead of
// Just by changing/selecting InceptionV3/MobilenetV2
// here instead of
// ResnetV2101 you can try a different architecture/
// pre-trained model.
// pre-trained model.
Arch = ImageClassificationTrainer.Architecture.ResnetV2101,
Epoch = 182,
BatchSize = 128,
Expand All @@ -92,8 +92,8 @@ public static void Example()
ReuseTrainSetBottleneckCachedValues = false,
// Use linear scaling rule and Learning rate decay as an option
// This is known to do well for Cifar dataset and Resnet models
// You can also try other types of Learning rate scheduling
// methods available in LearningRateScheduler.cs
// You can also try other types of Learning rate scheduling
// methods available in LearningRateScheduler.cs
LearningRateScheduler = new LsrDecay()
};

Expand All @@ -111,7 +111,7 @@ public static void Example()

// Train the model.
// This involves calculating the bottleneck values, and then
// training the final layer. Sample output is:
// training the final layer. Sample output is:
// Phase: Bottleneck Computation, Dataset used: Train, Image Index: 1
// Phase: Bottleneck Computation, Dataset used: Train, Image Index: 2
// ...
Expand Down Expand Up @@ -271,8 +271,9 @@ public static string DownloadImageSet(string imagesDownloadFolder)
// get a set of images to teach the network about the new classes
// CIFAR dataset ( 50000 train images and 10000 test images )
string fileName = "cifar10.zip";
string url = $"https://aka.ms/mlnet-resources/" +
"datasets/cifar10.zip";

// https://github.com/YoongiKim/CIFAR-10-images
string url = $"https://github.com/YoongiKim/CIFAR-10-images/archive/refs/heads/master.zip";

Download(url, imagesDownloadFolder, fileName);
UnZip(Path.Combine(imagesDownloadFolder, fileName),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
using System;
using System.Drawing;
using System.Linq;
using Microsoft.ML;
using Microsoft.ML.Data;
Expand All @@ -26,8 +25,8 @@ public static void Example()
// input /output of the used ONNX model.
var dataPoints = new ImageDataPoint[]
{
new ImageDataPoint(Color.Red),
new ImageDataPoint(Color.Green)
new ImageDataPoint(red: 255, green: 0, blue: 0), // Red color
new ImageDataPoint(red: 0, green: 128, blue: 0) // Green color
};

// Convert training data to IDataView, the general data type used in
Expand Down Expand Up @@ -91,7 +90,7 @@ private class ImageDataPoint

// Image will be consumed by ONNX image multiclass classification model.
[ImageType(height, width)]
public Bitmap Image { get; set; }
public MLImage Image { get; set; }

// Expected output of ONNX model. It contains probabilities of all
// classes. Note that the ColumnName below should match the output name
Expand All @@ -104,12 +103,19 @@ public ImageDataPoint()
Image = null;
}

public ImageDataPoint(Color color)
public ImageDataPoint(byte red, byte green, byte blue)
{
Image = new Bitmap(width, height);
for (int i = 0; i < width; ++i)
for (int j = 0; j < height; ++j)
Image.SetPixel(i, j, color);
byte[] imageData = new byte[width * height * 4]; // 4 for the red, green, blue and alpha colors
for (int i = 0; i < imageData.Length; i += 4)
{
// Fill the buffer with the Bgra32 format
imageData[i] = blue;
imageData[i + 1] = green;
imageData[i + 2] = red;
imageData[i + 3] = 255;
}

Image = MLImage.CreateFromPixels(width, height, MLPixelFormat.Bgra32, imageData);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
using System;
using System.IO;
using System.Linq;
using Microsoft.ML;
using Microsoft.ML.Data;
Expand All @@ -10,8 +11,9 @@ public static class ApplyOnnxModel
public static void Example()
{
// Download the squeeznet image model from ONNX model zoo, version 1.2
// https://github.com/onnx/models/tree/master/squeezenet or use
// Microsoft.ML.Onnx.TestModels nuget.
// https://github.com/onnx/models/tree/master/squeezenet or
// https://s3.amazonaws.com/download.onnx/models/opset_8/squeezenet.tar.gz
// or use Microsoft.ML.Onnx.TestModels nuget.
var modelPath = @"squeezenet\00000001\model.onnx";

// Create ML pipeline to score the data using OnnxScoringEstimator
Expand Down Expand Up @@ -56,7 +58,7 @@ public static void Example()
// inputSize is the overall dimensions of the model input tensor.
private const int inputSize = 224 * 224 * 3;

// A class to hold sample tensor data. Member name should match
// A class to hold sample tensor data. Member name should match
// the inputs that the model expects (in this case, data_0)
public class TensorData
{
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
using System;
using System.Drawing;
using System.IO;
using Microsoft.ML;
using Microsoft.ML.Data;
Expand All @@ -9,7 +8,7 @@ namespace Samples.Dynamic
public static class ConvertToGrayscale
{
// Sample that loads images from the file system, and converts them to
// grayscale.
// grayscale.
public static void Example()
{
// Create a new ML context, for ML.NET operations. It can be used for
Expand All @@ -20,7 +19,7 @@ public static void Example()
// list of the files from the dotnet/machinelearning/test/data/images/.
// If you inspect the fileSystem, after running this line, an "images"
// folder will be created, containing 4 images, and a .tsv file
// enumerating the images.
// enumerating the images.
var imagesDataFile = Microsoft.ML.SamplesUtils.DatasetUtils
.GetSampleImages();

Expand All @@ -42,7 +41,7 @@ public static void Example()
}).Load(imagesDataFile);

var imagesFolder = Path.GetDirectoryName(imagesDataFile);
// Image loading pipeline.
// Image loading pipeline.
var pipeline = mlContext.Transforms.LoadImages("ImageObject",
imagesFolder, "ImagePath")
.Append(mlContext.Transforms.ConvertToGrayscale("Grayscale",
Expand All @@ -67,23 +66,23 @@ private static void PrintColumns(IDataView transformedData)
.Schema))
{
// Note that it is best to get the getters and values *before*
// iteration, so as to faciliate buffer sharing (if applicable), and
// iteration, so as to facilitate buffer sharing (if applicable), and
// column -type validation once, rather than many times.
ReadOnlyMemory<char> imagePath = default;
ReadOnlyMemory<char> name = default;
Bitmap imageObject = null;
Bitmap grayscaleImageObject = null;
MLImage imageObject = null;
MLImage grayscaleImageObject = null;

var imagePathGetter = cursor.GetGetter<ReadOnlyMemory<char>>(cursor
.Schema["ImagePath"]);

var nameGetter = cursor.GetGetter<ReadOnlyMemory<char>>(cursor
.Schema["Name"]);

var imageObjectGetter = cursor.GetGetter<Bitmap>(cursor.Schema[
var imageObjectGetter = cursor.GetGetter<MLImage>(cursor.Schema[
"ImageObject"]);

var grayscaleGetter = cursor.GetGetter<Bitmap>(cursor.Schema[
var grayscaleGetter = cursor.GetGetter<MLImage>(cursor.Schema[
"Grayscale"]);

while (cursor.MoveNext())
Expand All @@ -94,8 +93,8 @@ private static void PrintColumns(IDataView transformedData)
grayscaleGetter(ref grayscaleImageObject);

Console.WriteLine("{0, -25} {1, -25} {2, -25} {3, -25}",
imagePath, name, imageObject.PhysicalDimension,
grayscaleImageObject.PhysicalDimension);
imagePath, name, $"Width={imageObject.Width}, Height={imageObject.Height}",
$"Width={grayscaleImageObject.Width}, Height={grayscaleImageObject.Height}");
}

// Dispose the image.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
using System;
using System.Drawing;
using Microsoft.ML;
using Microsoft.ML.Data;
using Microsoft.ML.Transforms.Image;

namespace Samples.Dynamic
Expand All @@ -11,16 +11,17 @@ public static void Example()
{
var mlContext = new MLContext();
// Create an image list.
var images = new[] { new ImageDataPoint(2, 3, Color.Blue), new
ImageDataPoint(2, 3, Color.Red) };
var images = new[]
{
new ImageDataPoint(2, 3, red: 0, green: 0, blue: 255), // Blue color
new ImageDataPoint(2, 3, red: 255, green: 0, blue: 0) }; // red color

// Convert the list of data points to an IDataView object, which is
// consumable by ML.NET API.
var data = mlContext.Data.LoadFromEnumerable(images);

// Convert image to gray scale.
var pipeline = mlContext.Transforms.ConvertToGrayscale("GrayImage",
"Image");
var pipeline = mlContext.Transforms.ConvertToGrayscale("GrayImage", "Image");

// Fit the model.
var model = pipeline.Fit(data);
Expand All @@ -37,15 +38,31 @@ public static void Example()
{
var image = dataPoint.Image;
var grayImage = dataPoint.GrayImage;
for (int x = 0; x < grayImage.Width; ++x)

ReadOnlySpan<byte> imageData = image.Pixels;
(int alphaIndex, int redIndex, int greenIndex, int blueIndex) = image.PixelFormat switch
{
MLPixelFormat.Bgra32 => (3, 2, 1, 0),
MLPixelFormat.Rgba32 => (3, 0, 1, 2),
_ => throw new InvalidOperationException($"Image pixel format is not supported")
};

ReadOnlySpan<byte> grayImageData = grayImage.Pixels;
(int alphaIndex1, int redIndex1, int greenIndex1, int blueIndex1) = grayImage.PixelFormat switch
{
for (int y = 0; y < grayImage.Height; ++y)
{
var pixel = image.GetPixel(x, y);
var grayPixel = grayImage.GetPixel(x, y);
Console.WriteLine($"The original pixel is {pixel} and its" +
$"pixel in gray is {grayPixel}");
}
MLPixelFormat.Bgra32 => (3, 2, 1, 0),
MLPixelFormat.Rgba32 => (3, 0, 1, 2),
_ => throw new InvalidOperationException($"Image pixel format is not supported")
};

int pixelSize = image.BitsPerPixel / 8;

for (int i = 0; i < imageData.Length; i += pixelSize)
{
string pixelString = $"[A = {imageData[i + alphaIndex]}, R = {imageData[i + redIndex]}, G = {imageData[i + greenIndex]}, B = {imageData[i + blueIndex]}]";
string grayPixelString = $"[A = {grayImageData[i + alphaIndex1]}, R = {grayImageData[i + redIndex1]}, G = {grayImageData[i + greenIndex1]}, B = {grayImageData[i + blueIndex1]}]";

Console.WriteLine($"The original pixel is {pixelString} and its pixel in gray is {grayPixelString}");
}
}

Expand All @@ -67,23 +84,30 @@ public static void Example()
private class ImageDataPoint
{
[ImageType(3, 4)]
public Bitmap Image { get; set; }
public MLImage Image { get; set; }

[ImageType(3, 4)]
public Bitmap GrayImage { get; set; }
public MLImage GrayImage { get; set; }

public ImageDataPoint()
{
Image = null;
GrayImage = null;
}

public ImageDataPoint(int width, int height, Color color)
public ImageDataPoint(int width, int height, byte red, byte green, byte blue)
{
Image = new Bitmap(width, height);
for (int i = 0; i < width; ++i)
for (int j = 0; j < height; ++j)
Image.SetPixel(i, j, color);
byte[] imageData = new byte[width * height * 4]; // 4 for the red, green, blue and alpha colors
for (int i = 0; i < imageData.Length; i += 4)
{
// Fill the buffer with the Bgra32 format
imageData[i] = blue;
imageData[i + 1] = green;
imageData[i + 2] = red;
imageData[i + 3] = 255;
}

Image = MLImage.CreateFromPixels(width, height, MLPixelFormat.Bgra32, imageData);
}
}
}
Expand Down
Loading