Skip to content

Commit cf7e3d9

Browse files
authored
Lockdown Microsoft.ML.StandardLearners public surface (#2541)
* Internalize Microsoft.ML.Numeric. * Internalization. * merge and more internalization.
1 parent 1f90f50 commit cf7e3d9

21 files changed

+125
-124
lines changed

src/Microsoft.ML.StandardLearners/FactorizationMachine/FieldAwareFactorizationMachineModelParameters.cs

+2-2
Original file line numberDiff line numberDiff line change
@@ -290,12 +290,12 @@ public sealed class FieldAwareFactorizationMachinePredictionTransformer : Predic
290290
/// <summary>
291291
/// The name of the feature column used by the prediction transformer.
292292
/// </summary>
293-
internal IReadOnlyList<string> FeatureColumns { get; }
293+
private IReadOnlyList<string> FeatureColumns { get; }
294294

295295
/// <summary>
296296
/// The type of the feature columns.
297297
/// </summary>
298-
internal IReadOnlyList<DataViewType> FeatureColumnTypes { get; }
298+
private IReadOnlyList<DataViewType> FeatureColumnTypes { get; }
299299

300300
private readonly string _thresholdColumn;
301301
private readonly float _threshold;

src/Microsoft.ML.StandardLearners/Optimizer/DifferentiableFunction.cs

+4-4
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ namespace Microsoft.ML.Numeric
1818
/// <param name="gradient">The gradient vector, which must be filled in (its initial contents are undefined)</param>
1919
/// <param name="progress">The progress channel provider that can be used to report calculation progress. Can be null.</param>
2020
/// <returns>The value of the function</returns>
21-
public delegate Float DifferentiableFunction(in VBuffer<Float> input, ref VBuffer<Float> gradient, IProgressChannelProvider progress);
21+
internal delegate Float DifferentiableFunction(in VBuffer<Float> input, ref VBuffer<Float> gradient, IProgressChannelProvider progress);
2222

2323
/// <summary>
2424
/// A delegate for indexed sets of functions with gradients.
@@ -30,12 +30,12 @@ namespace Microsoft.ML.Numeric
3030
/// <param name="input">The point at which to evaluate the function</param>
3131
/// <param name="gradient">The gradient vector, which must be filled in (its initial contents are undefined)</param>
3232
/// <returns>The value of the function</returns>
33-
public delegate Float IndexedDifferentiableFunction(int index, in VBuffer<Float> input, ref VBuffer<Float> gradient);
33+
internal delegate Float IndexedDifferentiableFunction(int index, in VBuffer<Float> input, ref VBuffer<Float> gradient);
3434

3535
/// <summary>
3636
/// Class to aggregate an indexed differentiable function into a single function, in parallel
3737
/// </summary>
38-
public class DifferentiableFunctionAggregator
38+
internal class DifferentiableFunctionAggregator
3939
{
4040
private readonly IndexedDifferentiableFunction _func;
4141
private readonly int _maxIndex;
@@ -154,7 +154,7 @@ public Float Eval(in VBuffer<Float> input, ref VBuffer<Float> gradient)
154154
/// May have false negatives if extreme values cause the numeric gradient to be off,
155155
/// for example, if the norm of x is very large, or if the gradient is changing rapidly at x.
156156
/// </remarks>
157-
public static class GradientTester
157+
internal static class GradientTester
158158
{
159159
// approximately u^(1/3), where u is the unit roundoff ~ 1.1e-16.
160160
// the optimal value of eps for the central difference approximation, Nocedal & Wright

src/Microsoft.ML.StandardLearners/Optimizer/L1Optimizer.cs

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ namespace Microsoft.ML.Numeric
1515
/// If you use this code for published research, please cite
1616
/// Galen Andrew and Jianfeng Gao, "Scalable Training of L1-Regularized Log-Linear Models", ICML 2007
1717
/// </summary>
18-
public sealed class L1Optimizer : Optimizer
18+
internal sealed class L1Optimizer : Optimizer
1919
{
2020
// Biases do not contribute to the L1 norm and are assumed to be at the beginning of the weights.
2121
private readonly int _biasCount;

src/Microsoft.ML.StandardLearners/Optimizer/LineSearch.cs

+6-6
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ namespace Microsoft.ML.Numeric
1212
/// <summary>
1313
/// Line search that does not use derivatives
1414
/// </summary>
15-
public interface ILineSearch : IDiffLineSearch
15+
internal interface ILineSearch : IDiffLineSearch
1616
{
1717
/// <summary>
1818
/// Finds a local minimum of the function
@@ -28,12 +28,12 @@ public interface ILineSearch : IDiffLineSearch
2828
/// <param name="x">Point to evaluate</param>
2929
/// <param name="deriv">Derivative at that point</param>
3030
/// <returns></returns>
31-
public delegate Float DiffFunc1D(Float x, out Float deriv);
31+
internal delegate Float DiffFunc1D(Float x, out Float deriv);
3232

3333
/// <summary>
3434
/// Line search that uses derivatives
3535
/// </summary>
36-
public interface IDiffLineSearch
36+
internal interface IDiffLineSearch
3737
{
3838
/// <summary>
3939
/// Finds a local minimum of the function
@@ -48,7 +48,7 @@ public interface IDiffLineSearch
4848
/// <summary>
4949
/// Cubic interpolation line search
5050
/// </summary>
51-
public sealed class CubicInterpLineSearch : IDiffLineSearch
51+
internal sealed class CubicInterpLineSearch : IDiffLineSearch
5252
{
5353
private Float _step;
5454
private const Float _minProgress = (Float)0.01;
@@ -217,7 +217,7 @@ private Float FindMinimum(DiffFunc1D func, Float initValue, Float initDeriv)
217217
/// <summary>
218218
/// Finds local minimum with golden section search.
219219
/// </summary>
220-
public sealed class GoldenSectionSearch : ILineSearch
220+
internal sealed class GoldenSectionSearch : ILineSearch
221221
{
222222
private Float _step;
223223
private static readonly Float _phi = (1 + MathUtils.Sqrt(5)) / 2;
@@ -396,7 +396,7 @@ private Float FindMinimum(Func<Float, Float> func)
396396
/// <summary>
397397
/// Backtracking line search with Armijo condition
398398
/// </summary>
399-
public sealed class BacktrackingLineSearch : IDiffLineSearch
399+
internal sealed class BacktrackingLineSearch : IDiffLineSearch
400400
{
401401
private Float _step;
402402
private Float _c1;

src/Microsoft.ML.StandardLearners/Optimizer/OptimizationMonitor.cs

+7-7
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ namespace Microsoft.ML.Numeric
1313
/// <summary>
1414
/// An object which is used to decide whether to stop optimization.
1515
/// </summary>
16-
public interface ITerminationCriterion
16+
internal interface ITerminationCriterion
1717
{
1818
/// <summary>
1919
/// Name appropriate for display to the user.
@@ -37,7 +37,7 @@ public interface ITerminationCriterion
3737
/// <summary>
3838
/// A wrapper for a termination criterion that checks the gradient at a specified interval
3939
/// </summary>
40-
public sealed class GradientCheckingMonitor : ITerminationCriterion
40+
internal sealed class GradientCheckingMonitor : ITerminationCriterion
4141
{
4242
private const string _checkingMessage = " Checking gradient...";
4343
private readonly ITerminationCriterion _termCrit;
@@ -104,7 +104,7 @@ public void Reset()
104104
/// <summary>
105105
/// An abstract partial implementation of ITerminationCriterion for those which do not require resetting
106106
/// </summary>
107-
public abstract class StaticTerminationCriterion : ITerminationCriterion
107+
internal abstract class StaticTerminationCriterion : ITerminationCriterion
108108
{
109109
public abstract string FriendlyName { get; }
110110

@@ -127,7 +127,7 @@ public void Reset() { }
127127
/// <summary>
128128
/// Terminates when the geometrically-weighted average improvement falls below the tolerance
129129
/// </summary>
130-
public sealed class MeanImprovementCriterion : ITerminationCriterion
130+
internal sealed class MeanImprovementCriterion : ITerminationCriterion
131131
{
132132
private readonly Float _tol;
133133
private readonly Float _lambda;
@@ -190,7 +190,7 @@ public void Reset()
190190
/// <remarks>
191191
/// Inappropriate for functions whose optimal value is non-positive, because of normalization
192192
/// </remarks>
193-
public sealed class MeanRelativeImprovementCriterion : ITerminationCriterion
193+
internal sealed class MeanRelativeImprovementCriterion : ITerminationCriterion
194194
{
195195
private readonly int _n;
196196
private readonly Float _tol;
@@ -280,7 +280,7 @@ public void Reset()
280280
/// that H > (1 / sigmaSq) * I at all points)
281281
/// Inappropriate for functions whose optimal value is non-positive, because of normalization
282282
/// </remarks>
283-
public sealed class UpperBoundOnDistanceWithL2 : StaticTerminationCriterion
283+
internal sealed class UpperBoundOnDistanceWithL2 : StaticTerminationCriterion
284284
{
285285
private readonly Float _sigmaSq;
286286
private readonly Float _tol;
@@ -345,7 +345,7 @@ public override string ToString()
345345
/// <remarks>
346346
/// Inappropriate for functions whose optimal value is non-positive, because of normalization
347347
/// </remarks>
348-
public sealed class RelativeNormGradient : StaticTerminationCriterion
348+
internal sealed class RelativeNormGradient : StaticTerminationCriterion
349349
{
350350
private readonly Float _tol;
351351

src/Microsoft.ML.StandardLearners/Optimizer/Optimizer.cs

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ namespace Microsoft.ML.Numeric
1313
/// <summary>
1414
/// Limited-memory BFGS quasi-Newton optimization routine
1515
/// </summary>
16-
public class Optimizer
16+
internal class Optimizer
1717
{
1818
/// Based on Nocedal and Wright, "Numerical Optimization, Second Edition"
1919

src/Microsoft.ML.StandardLearners/Optimizer/SgdOptimizer.cs

+3-3
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,12 @@ namespace Microsoft.ML.Numeric
1414
/// </summary>
1515
/// <param name="x">Current iterate</param>
1616
/// <returns>True if search should terminate</returns>
17-
public delegate bool DTerminate(in VBuffer<Float> x);
17+
internal delegate bool DTerminate(in VBuffer<Float> x);
1818

1919
/// <summary>
2020
/// Stochastic gradient descent with variations (minibatch, momentum, averaging).
2121
/// </summary>
22-
public sealed class SgdOptimizer
22+
internal sealed class SgdOptimizer
2323
{
2424
private int _batchSize;
2525

@@ -227,7 +227,7 @@ public void Minimize(DStochasticGradient f, ref VBuffer<Float> initial, ref VBuf
227227
/// <summary>
228228
/// Deterministic gradient descent with line search
229229
/// </summary>
230-
public class GDOptimizer
230+
internal class GDOptimizer
231231
{
232232
/// <summary>
233233
/// Line search to use.

src/Microsoft.ML.StandardLearners/Standard/LinearModelParameters.cs

+4-3
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,8 @@ public abstract class LinearModelParameters : ModelParametersBase<float>,
5151
ISingleCanSavePfa,
5252
ISingleCanSaveOnnx
5353
{
54-
protected readonly VBuffer<float> Weight;
54+
[BestFriend]
55+
private protected readonly VBuffer<float> Weight;
5556

5657
// _weightsDense is not persisted and is used for performance when the input instance is sparse.
5758
private VBuffer<float> _weightsDense;
@@ -250,7 +251,7 @@ bool ISingleCanSaveOnnx.SaveAsOnnx(OnnxContext ctx, string[] outputs, string fea
250251
}
251252

252253
// Generate the score from the given values, assuming they have already been normalized.
253-
protected virtual float Score(in VBuffer<float> src)
254+
private protected virtual float Score(in VBuffer<float> src)
254255
{
255256
if (src.IsDense)
256257
{
@@ -711,7 +712,7 @@ private protected override void SaveCore(ModelSaveContext ctx)
711712
ctx.SetVersionInfo(GetVersionInfo());
712713
}
713714

714-
protected override float Score(in VBuffer<float> src)
715+
private protected override float Score(in VBuffer<float> src)
715716
{
716717
return MathUtils.ExpSlow(base.Score(in src));
717718
}

src/Microsoft.ML.StandardLearners/Standard/LogisticRegression/LbfgsPredictorBase.cs

+32-32
Original file line numberDiff line numberDiff line change
@@ -105,28 +105,28 @@ internal static class Defaults
105105

106106
private const string RegisterName = nameof(LbfgsTrainerBase<TArgs, TTransformer, TModel>);
107107

108-
protected int NumFeatures;
109-
protected VBuffer<float> CurrentWeights;
110-
protected long NumGoodRows;
111-
protected Double WeightSum;
112-
protected bool ShowTrainingStats;
108+
private protected int NumFeatures;
109+
private protected VBuffer<float> CurrentWeights;
110+
private protected long NumGoodRows;
111+
private protected Double WeightSum;
112+
private protected bool ShowTrainingStats;
113113

114114
private TModel _srcPredictor;
115115

116-
protected readonly TArgs Args;
117-
protected readonly float L2Weight;
118-
protected readonly float L1Weight;
119-
protected readonly float OptTol;
120-
protected readonly int MemorySize;
121-
protected readonly int MaxIterations;
122-
protected readonly float SgdInitializationTolerance;
123-
protected readonly bool Quiet;
124-
protected readonly float InitWtsDiameter;
125-
protected readonly bool UseThreads;
126-
protected readonly int? NumThreads;
127-
protected readonly bool DenseOptimizer;
128-
protected readonly long MaxNormalizationExamples;
129-
protected readonly bool EnforceNonNegativity;
116+
private protected readonly TArgs Args;
117+
private protected readonly float L2Weight;
118+
private protected readonly float L1Weight;
119+
private protected readonly float OptTol;
120+
private protected readonly int MemorySize;
121+
private protected readonly int MaxIterations;
122+
private protected readonly float SgdInitializationTolerance;
123+
private protected readonly bool Quiet;
124+
private protected readonly float InitWtsDiameter;
125+
private protected readonly bool UseThreads;
126+
private protected readonly int? NumThreads;
127+
private protected readonly bool DenseOptimizer;
128+
private protected readonly long MaxNormalizationExamples;
129+
private protected readonly bool EnforceNonNegativity;
130130

131131
// The training data, when NOT using multiple threads.
132132
private RoleMappedData _data;
@@ -256,9 +256,9 @@ private static TArgs ArgsInit(string featureColumn, SchemaShape.Column labelColu
256256
return args;
257257
}
258258

259-
protected virtual int ClassCount => 1;
260-
protected int BiasCount => ClassCount;
261-
protected int WeightCount => ClassCount * NumFeatures;
259+
private protected virtual int ClassCount => 1;
260+
private protected int BiasCount => ClassCount;
261+
private protected int WeightCount => ClassCount * NumFeatures;
262262
private protected virtual Optimizer InitializeOptimizer(IChannel ch, FloatLabelCursor.Factory cursorFactory,
263263
out VBuffer<float> init, out ITerminationCriterion terminationCriterion)
264264
{
@@ -364,15 +364,15 @@ private protected virtual VBuffer<float> InitializeWeightsSgd(IChannel ch, Float
364364
return result;
365365
}
366366

367-
protected abstract VBuffer<float> InitializeWeightsFromPredictor(TModel srcPredictor);
367+
private protected abstract VBuffer<float> InitializeWeightsFromPredictor(TModel srcPredictor);
368368

369369
private protected abstract void CheckLabel(RoleMappedData data);
370370

371-
protected virtual void PreTrainingProcessInstance(float label, in VBuffer<float> feat, float weight)
371+
private protected virtual void PreTrainingProcessInstance(float label, in VBuffer<float> feat, float weight)
372372
{
373373
}
374374

375-
protected abstract TModel CreatePredictor();
375+
private protected abstract TModel CreatePredictor();
376376

377377
/// <summary>
378378
/// The basic training calls the optimizer
@@ -570,24 +570,24 @@ private protected virtual void TrainCore(IChannel ch, RoleMappedData data)
570570

571571
// Ensure that the bias portion of vec is represented in vec.
572572
// REVIEW: Is this really necessary?
573-
protected void EnsureBiases(ref VBuffer<float> vec)
573+
private protected void EnsureBiases(ref VBuffer<float> vec)
574574
{
575575
// REVIEW: Consider promoting this "densify first n entries" to a general purpose utility,
576576
// if we ever encounter other situations where this becomes useful.
577577
Contracts.Assert(vec.Length == BiasCount + WeightCount);
578578
VBufferUtils.DensifyFirst(ref vec, BiasCount);
579579
}
580580

581-
protected abstract float AccumulateOneGradient(in VBuffer<float> feat, float label, float weight,
581+
private protected abstract float AccumulateOneGradient(in VBuffer<float> feat, float label, float weight,
582582
in VBuffer<float> xDense, ref VBuffer<float> grad, ref float[] scratch);
583583

584584
private protected abstract void ComputeTrainingStatistics(IChannel ch, FloatLabelCursor.Factory cursorFactory, float loss, int numParams);
585585

586-
protected abstract void ProcessPriorDistribution(float label, float weight);
586+
private protected abstract void ProcessPriorDistribution(float label, float weight);
587587
/// <summary>
588588
/// The gradient being used by the optimizer
589589
/// </summary>
590-
protected virtual float DifferentiableFunction(in VBuffer<float> x, ref VBuffer<float> gradient,
590+
private protected virtual float DifferentiableFunction(in VBuffer<float> x, ref VBuffer<float> gradient,
591591
IProgressChannelProvider progress)
592592
{
593593
Contracts.Assert((_numChunks == 0) != (_data == null));
@@ -647,7 +647,7 @@ protected virtual float DifferentiableFunction(in VBuffer<float> x, ref VBuffer<
647647
/// REVIEW: consider getting rid of multithread-targeted members
648648
/// Using TPL, the distinction between Multithreaded and Sequential implementations is unnecessary
649649
/// </remarks>
650-
protected virtual float DifferentiableFunctionMultithreaded(in VBuffer<float> xDense, ref VBuffer<float> gradient, IProgressChannel pch)
650+
private protected virtual float DifferentiableFunctionMultithreaded(in VBuffer<float> xDense, ref VBuffer<float> gradient, IProgressChannel pch)
651651
{
652652
Contracts.Assert(_data == null);
653653
Contracts.Assert(_cursorFactory == null);
@@ -679,7 +679,7 @@ protected virtual float DifferentiableFunctionMultithreaded(in VBuffer<float> xD
679679
return loss;
680680
}
681681

682-
protected float DifferentiableFunctionComputeChunk(int ichk, in VBuffer<float> xDense, ref VBuffer<float> grad, IProgressChannel pch)
682+
private protected float DifferentiableFunctionComputeChunk(int ichk, in VBuffer<float> xDense, ref VBuffer<float> grad, IProgressChannel pch)
683683
{
684684
Contracts.Assert(0 <= ichk && ichk < _numChunks);
685685
Contracts.AssertValueOrNull(pch);
@@ -733,7 +733,7 @@ private protected float DifferentiableFunctionStream(FloatLabelCursor.Factory cu
733733
return (float)loss;
734734
}
735735

736-
protected VBuffer<float> InitializeWeights(IEnumerable<float> weights, IEnumerable<float> biases)
736+
private protected VBuffer<float> InitializeWeights(IEnumerable<float> weights, IEnumerable<float> biases)
737737
{
738738
Contracts.AssertValue(biases);
739739
Contracts.AssertValue(weights);

0 commit comments

Comments
 (0)