Skip to content

Update to Onnxruntime 1.5.1 #5406

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Sep 30, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion build/Dependencies.props
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
<GoogleProtobufPackageVersion>3.10.1</GoogleProtobufPackageVersion>
<LightGBMPackageVersion>2.2.3</LightGBMPackageVersion>
<MicrosoftExtensionsPackageVersion>2.1.0</MicrosoftExtensionsPackageVersion>
<MicrosoftMLOnnxRuntimePackageVersion>1.3.0</MicrosoftMLOnnxRuntimePackageVersion>
<MicrosoftMLOnnxRuntimePackageVersion>1.5.1</MicrosoftMLOnnxRuntimePackageVersion>
<MlNetMklDepsPackageVersion>0.0.0.9</MlNetMklDepsPackageVersion>
<ParquetDotNetPackageVersion>2.1.3</ParquetDotNetPackageVersion>
<SystemDrawingCommonPackageVersion>4.5.0</SystemDrawingCommonPackageVersion>
Expand Down
11 changes: 8 additions & 3 deletions test/Microsoft.ML.Functional.Tests/ONNX.cs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ namespace Microsoft.ML.Functional.Tests
{
public class ONNX : FunctionalTestBaseClass
{
// These two members are meant to be changed
// Only when manually testing the Onnx GPU nuggets
private const bool _fallbackToCpu = true;
private static int? _gpuDeviceId = null;

public ONNX(ITestOutputHelper output) : base(output)
{
}
Expand Down Expand Up @@ -52,7 +57,7 @@ public void SaveOnnxModelLoadAndScoreFastTree()
// Therefore the VectorScoreColumn class (which contains a float [] field called Score) is used for the return
// type on the Prediction engine.
// See #2980 and #2981 for more information.
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(modelPath);
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(modelPath, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxModel = onnxEstimator.Fit(data);

// Create prediction engine and test predictions.
Expand Down Expand Up @@ -98,7 +103,7 @@ public void SaveOnnxModelLoadAndScoreKMeans()
mlContext.Model.ConvertToOnnx(model, data, file);

// Load the model as a transform.
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(modelPath);
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(modelPath, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxModel = onnxEstimator.Fit(data);

// TODO #2980: ONNX outputs don't match the outputs of the model, so we must hand-correct this for now.
Expand Down Expand Up @@ -150,7 +155,7 @@ public void SaveOnnxModelLoadAndScoreSDCA()
mlContext.Model.ConvertToOnnx(model, data, file);

// Load the model as a transform.
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(modelPath);
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(modelPath, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxModel = onnxEstimator.Fit(data);

// Create prediction engine and test predictions.
Expand Down
37 changes: 21 additions & 16 deletions test/Microsoft.ML.OnnxTransformerTest/OnnxTransformTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,11 @@ namespace Microsoft.ML.Tests
{
public class OnnxTransformTests : TestDataPipeBase
{
// These two members are meant to be changed
// Only when manually testing the Onnx GPU nuggets
private const bool _fallbackToCpu = true;
private static int? _gpuDeviceId = null;

private const int InputSize = 150528;

private class TestData
Expand Down Expand Up @@ -134,7 +139,7 @@ public void TestSimpleCase()
var xyData = new List<TestDataXY> { new TestDataXY() { A = new float[InputSize] } };
var stringData = new List<TestDataDifferntType> { new TestDataDifferntType() { data_0 = new string[InputSize] } };
var sizeData = new List<TestDataSize> { new TestDataSize() { data_0 = new float[2] } };
var pipe = ML.Transforms.ApplyOnnxModel(new[] { "softmaxout_1" }, new[] { "data_0" }, modelFile);
var pipe = ML.Transforms.ApplyOnnxModel(new[] { "softmaxout_1" }, new[] { "data_0" }, modelFile, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);

var invalidDataWrongNames = ML.Data.LoadFromEnumerable(xyData);
var invalidDataWrongTypes = ML.Data.LoadFromEnumerable(stringData);
Expand Down Expand Up @@ -231,7 +236,7 @@ public void OnnxWorkout()
var pipe = ML.Transforms.LoadImages("data_0", imageFolder, "imagePath")
.Append(ML.Transforms.ResizeImages("data_0", imageHeight, imageWidth))
.Append(ML.Transforms.ExtractPixels("data_0", interleavePixelColors: true))
.Append(ML.Transforms.ApplyOnnxModel("softmaxout_1", "data_0", modelFile));
.Append(ML.Transforms.ApplyOnnxModel("softmaxout_1", "data_0", modelFile, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu));

TestEstimatorCore(pipe, data);

Expand Down Expand Up @@ -292,7 +297,7 @@ public void OnnxModelScenario()
}
});

var pipeline = ML.Transforms.ApplyOnnxModel("softmaxout_1", "data_0", modelFile);
var pipeline = ML.Transforms.ApplyOnnxModel("softmaxout_1", "data_0", modelFile, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = pipeline.Fit(dataView);
var onnx = onnxTransformer.Transform(dataView);
var scoreCol = onnx.Schema["softmaxout_1"];
Expand Down Expand Up @@ -325,7 +330,7 @@ public void OnnxModelMultiInput()
inb = new float[] {1,2,3,4,5}
}
});
var pipeline = ML.Transforms.ApplyOnnxModel(new[] { "outa", "outb" }, new[] { "ina", "inb" }, modelFile);
var pipeline = ML.Transforms.ApplyOnnxModel(new[] { "outa", "outb" }, new[] { "ina", "inb" }, modelFile, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = pipeline.Fit(dataView);
var onnx = onnxTransformer.Transform(dataView);

Expand Down Expand Up @@ -365,7 +370,7 @@ public void OnnxModelOutputDifferentOrder()
}
});
// The model returns the output columns in the order outa, outb. We are doing the opposite here, making sure the name mapping is correct.
var pipeline = ML.Transforms.ApplyOnnxModel(new[] { "outb", "outa" }, new[] { "ina", "inb" }, modelFile);
var pipeline = ML.Transforms.ApplyOnnxModel(new[] { "outb", "outa" }, new[] { "ina", "inb" }, modelFile, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = pipeline.Fit(dataView);
var onnx = onnxTransformer.Transform(dataView);

Expand All @@ -391,7 +396,7 @@ public void OnnxModelOutputDifferentOrder()
(onnxTransformer as IDisposable)?.Dispose();

// The model returns the output columns in the order outa, outb. We are doing only a subset, outb, to make sure the mapping works.
pipeline = ML.Transforms.ApplyOnnxModel(new[] { "outb" }, new[] { "ina", "inb" }, modelFile);
pipeline = ML.Transforms.ApplyOnnxModel(new[] { "outb" }, new[] { "ina", "inb" }, modelFile, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
onnxTransformer = pipeline.Fit(dataView);
onnx = onnxTransformer.Transform(dataView);

Expand Down Expand Up @@ -425,7 +430,7 @@ public void TestUnknownDimensions()
new TestDataUnknownDimensions(){input = new float[] {-1.1f, -1.3f, 1.2f }},
};
var idv = mlContext.Data.LoadFromEnumerable(data);
var pipeline = ML.Transforms.ApplyOnnxModel(modelFile);
var pipeline = ML.Transforms.ApplyOnnxModel(modelFile, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = pipeline.Fit(idv);
var transformedValues = onnxTransformer.Transform(idv);
var predictions = mlContext.Data.CreateEnumerable<PredictionUnknownDimensions>(transformedValues, reuseRowObject: false).ToArray();
Expand All @@ -451,7 +456,7 @@ public void TestOnnxNoneDimValue()
new TestDataNoneDimension(){features = new float[] { 6.3f, 3.3f, 6.0f, 2.5f }},
};
var idv = mlContext.Data.LoadFromEnumerable(data);
var pipeline = ML.Transforms.ApplyOnnxModel(modelFile);
var pipeline = ML.Transforms.ApplyOnnxModel(modelFile, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = pipeline.Fit(idv);
var transformedValues = onnxTransformer.Transform(idv);
var predictions = mlContext.Data.CreateEnumerable<PredictionNoneDimension>(transformedValues, reuseRowObject: false).ToArray();
Expand Down Expand Up @@ -526,7 +531,7 @@ public void OnnxModelInMemoryImage()
// "softmaxout_1" are model input and output names stored in the used ONNX model file. Users may need to inspect their own models to
// get the right input and output column names.
var pipeline = ML.Transforms.ExtractPixels("data_0", "Image") // Map column "Image" to column "data_0"
.Append(ML.Transforms.ApplyOnnxModel("softmaxout_1", "data_0", modelFile)); // Map column "data_0" to column "softmaxout_1"
.Append(ML.Transforms.ApplyOnnxModel("softmaxout_1", "data_0", modelFile, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu)); // Map column "data_0" to column "softmaxout_1"
var model = pipeline.Fit(dataView);
var onnx = model.Transform(dataView);

Expand Down Expand Up @@ -576,7 +581,7 @@ public void TestOnnxZipMapWithInt64Keys()
};

var dataView = ML.Data.LoadFromEnumerable(dataPoints);
var pipeline = ML.Transforms.ApplyOnnxModel(new[] { "output" }, new[] { "input" }, modelFile);
var pipeline = ML.Transforms.ApplyOnnxModel(new[] { "output" }, new[] { "input" }, modelFile, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = pipeline.Fit(dataView);
var transformedDataView = onnxTransformer.Transform(dataView);

Expand Down Expand Up @@ -629,7 +634,7 @@ public void TestOnnxZipMapWithStringKeys()
};

var dataView = ML.Data.LoadFromEnumerable(dataPoints);
var pipeline = ML.Transforms.ApplyOnnxModel(new[] { "output" }, new[] { "input" }, modelFile);
var pipeline = ML.Transforms.ApplyOnnxModel(new[] { "output" }, new[] { "input" }, modelFile, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = pipeline.Fit(dataView);
var transformedDataView = onnxTransformer.Transform(dataView);

Expand Down Expand Up @@ -794,19 +799,19 @@ public void TestOnnxTransformWithCustomShapes()
// Test 1.
pipeline[0] = ML.Transforms.ApplyOnnxModel(
new[] { nameof(PredictionWithCustomShape.argmax) }, new[] { nameof(InputWithCustomShape.input) },
modelFile, shapeDictionary);
modelFile, shapeDictionary, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
onnxTransformer[0] = pipeline[0].Fit(dataView);
transformedDataViews[0] = onnxTransformer[0].Transform(dataView);

// Test 2.
pipeline[1] = ML.Transforms.ApplyOnnxModel(
nameof(PredictionWithCustomShape.argmax), nameof(InputWithCustomShape.input),
modelFile, shapeDictionary);
modelFile, shapeDictionary, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
onnxTransformer[1] = pipeline[1].Fit(dataView);
transformedDataViews[1] = onnxTransformer[1].Transform(dataView);

// Test 3.
pipeline[2] = ML.Transforms.ApplyOnnxModel(modelFile, shapeDictionary);
pipeline[2] = ML.Transforms.ApplyOnnxModel(modelFile, shapeDictionary, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
onnxTransformer[2] = pipeline[2].Fit(dataView);
transformedDataViews[2] = onnxTransformer[2].Transform(dataView);

Expand Down Expand Up @@ -856,7 +861,7 @@ private void TryModelWithCustomShapesHelper(IDictionary<string, int[]> shapeDict

// Define a ONNX transform, trains it, and apply it to the input data.
var pipeline = ML.Transforms.ApplyOnnxModel(new[] { "outa", "outb" }, new[] { "ina", "inb" },
modelFile, shapeDictionary);
modelFile, shapeDictionary, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
}

/// <summary>
Expand Down Expand Up @@ -956,7 +961,7 @@ public void TestOnnxTransformSaveAndLoadWithCustomShapes()
var dataView = ML.Data.LoadFromEnumerable(dataPoints);

var pipeline = ML.Transforms.ApplyOnnxModel(nameof(PredictionWithCustomShape.argmax),
nameof(InputWithCustomShape.input), modelFile, shapeDictionary);
nameof(InputWithCustomShape.input), modelFile, shapeDictionary, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);

var model = pipeline.Fit(dataView);

Expand Down
19 changes: 12 additions & 7 deletions test/Microsoft.ML.Tests/OnnxConversionTest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,11 @@ namespace Microsoft.ML.Tests
{
public class OnnxConversionTest : BaseTestBaseline
{
// These two members are meant to be changed
// Only when manually testing the Onnx GPU nuggets
private const bool _fallbackToCpu = true;
private static int? _gpuDeviceId = null;

private class AdultData
{
[LoadColumn(0, 10), ColumnName("FeatureVector")]
Expand Down Expand Up @@ -811,7 +816,7 @@ public void RemoveVariablesInPipelineTest()
if (IsOnnxRuntimeSupported())
{
// Evaluate the saved ONNX model using the data used to train the ML.NET pipeline.
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(onnxModelPath);
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(onnxModelPath, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = onnxEstimator.Fit(data);
var onnxResult = onnxTransformer.Transform(data);
CompareResults("Score", "Score", transformedData, onnxResult, isRightColumnOnnxScalar: true);
Expand Down Expand Up @@ -973,7 +978,7 @@ public void PcaOnnxConversionTest(int customOpSetVersion)
if (IsOnnxRuntimeSupported())
{
// Evaluate the saved ONNX model using the data used to train the ML.NET pipeline.
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(onnxModelPath);
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(onnxModelPath, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = onnxEstimator.Fit(dataView);
var onnxResult = onnxTransformer.Transform(dataView);
CompareResults("pca", "pca", transformedData, onnxResult);
Expand Down Expand Up @@ -1338,7 +1343,7 @@ public void NgramOnnxConversionTest(

if (IsOnnxRuntimeSupported())
{
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(onnxFilePath);
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(onnxFilePath, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = onnxEstimator.Fit(dataView);
var onnxResult = onnxTransformer.Transform(dataView);
var columnName = i == pipelines.Length - 1 ? "Tokens" : "NGrams";
Expand Down Expand Up @@ -1455,7 +1460,7 @@ public void OptionalColumnOnnxTest(DataKind dataKind)
{
string[] inputNames = onnxModel.Graph.Input.Select(valueInfoProto => valueInfoProto.Name).ToArray();
string[] outputNames = onnxModel.Graph.Output.Select(valueInfoProto => valueInfoProto.Name).ToArray();
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(outputNames, inputNames, onnxModelPath);
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(outputNames, inputNames, onnxModelPath, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = onnxEstimator.Fit(dataView);
var onnxResult = onnxTransformer.Transform(dataView);
CompareResults("Label", "Label", outputData, onnxResult, isRightColumnOnnxScalar: true);
Expand Down Expand Up @@ -1589,7 +1594,7 @@ public void UseKeyDataViewTypeAsUInt32InOnnxInput()
if (IsOnnxRuntimeSupported())
{
// Step 5: Apply Onnx Model
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(outputNames, inputNames, onnxModelPath);
var onnxEstimator = mlContext.Transforms.ApplyOnnxModel(outputNames, inputNames, onnxModelPath, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = onnxEstimator.Fit(reloadedData);
var onnxResult = onnxTransformer.Transform(reloadedData);

Expand All @@ -1602,7 +1607,7 @@ public void UseKeyDataViewTypeAsUInt32InOnnxInput()
string onnxModelPath2 = GetOutputPath("onnxmodel2-kdvt-as-uint32.onnx");
using (FileStream stream = new FileStream(onnxModelPath2, FileMode.Create))
mlContext.Model.ConvertToOnnx(model, mappedData, stream);
var onnxEstimator2 = mlContext.Transforms.ApplyOnnxModel(outputNames, inputNames, onnxModelPath2);
var onnxEstimator2 = mlContext.Transforms.ApplyOnnxModel(outputNames, inputNames, onnxModelPath2, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer2 = onnxEstimator2.Fit(originalData);
var onnxResult2 = onnxTransformer2.Transform(originalData);

Expand Down Expand Up @@ -2034,7 +2039,7 @@ private void TestPipeline<TLastTransformer>(EstimatorChain<TLastTransformer> pip
if (IsOnnxRuntimeSupported() && columnsToCompare != null)
{
// Evaluate the saved ONNX model using the data used to train the ML.NET pipeline.
var onnxEstimator = ML.Transforms.ApplyOnnxModel(onnxModelPath);
var onnxEstimator = ML.Transforms.ApplyOnnxModel(onnxModelPath, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu);
var onnxTransformer = onnxEstimator.Fit(dataView);
var onnxResult = onnxTransformer.Transform(dataView);

Expand Down
11 changes: 9 additions & 2 deletions test/Microsoft.ML.Tests/OnnxSequenceTypeWithAttributesTest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ namespace Microsoft.ML.Tests
{
public class OnnxSequenceTypeWithAttributesTest : BaseTestBaseline
{
private const bool _fallbackToCpu = true;
private static int? _gpuDeviceId = null;

public class OutputObj
{
[ColumnName("output")]
Expand All @@ -42,7 +45,9 @@ public static PredictionEngine<FloatInput, OutputObj> LoadModel(string onnxModel

var pipeline = ctx.Transforms.ApplyOnnxModel(
modelFile: onnxModelFilePath,
outputColumnNames: new[] { "output" }, inputColumnNames: new[] { "input" });
outputColumnNames: new[] { "output" }, inputColumnNames: new[] { "input" },
gpuDeviceId: _gpuDeviceId,
fallbackToCpu: _fallbackToCpu);

var model = pipeline.Fit(dataView);
return ctx.Model.CreatePredictionEngine<FloatInput, OutputObj>(model);
Expand Down Expand Up @@ -79,7 +84,9 @@ public static PredictionEngine<FloatInput, WrongOutputObj> LoadModelWithWrongCus

var pipeline = ctx.Transforms.ApplyOnnxModel(
modelFile: onnxModelFilePath,
outputColumnNames: new[] { "output" }, inputColumnNames: new[] { "input" });
outputColumnNames: new[] { "output" }, inputColumnNames: new[] { "input" },
gpuDeviceId: _gpuDeviceId,
fallbackToCpu: _fallbackToCpu);

var model = pipeline.Fit(dataView);
return ctx.Model.CreatePredictionEngine<FloatInput, WrongOutputObj>(model);
Expand Down