@@ -105,28 +105,28 @@ internal static class Defaults
105
105
106
106
private const string RegisterName = nameof ( LbfgsTrainerBase < TArgs , TTransformer , TModel > ) ;
107
107
108
- protected int NumFeatures ;
109
- protected VBuffer < float > CurrentWeights ;
110
- protected long NumGoodRows ;
111
- protected Double WeightSum ;
112
- protected bool ShowTrainingStats ;
108
+ private protected int NumFeatures ;
109
+ private protected VBuffer < float > CurrentWeights ;
110
+ private protected long NumGoodRows ;
111
+ private protected Double WeightSum ;
112
+ private protected bool ShowTrainingStats ;
113
113
114
114
private TModel _srcPredictor ;
115
115
116
- protected readonly TArgs Args ;
117
- protected readonly float L2Weight ;
118
- protected readonly float L1Weight ;
119
- protected readonly float OptTol ;
120
- protected readonly int MemorySize ;
121
- protected readonly int MaxIterations ;
122
- protected readonly float SgdInitializationTolerance ;
123
- protected readonly bool Quiet ;
124
- protected readonly float InitWtsDiameter ;
125
- protected readonly bool UseThreads ;
126
- protected readonly int ? NumThreads ;
127
- protected readonly bool DenseOptimizer ;
128
- protected readonly long MaxNormalizationExamples ;
129
- protected readonly bool EnforceNonNegativity ;
116
+ private protected readonly TArgs Args ;
117
+ private protected readonly float L2Weight ;
118
+ private protected readonly float L1Weight ;
119
+ private protected readonly float OptTol ;
120
+ private protected readonly int MemorySize ;
121
+ private protected readonly int MaxIterations ;
122
+ private protected readonly float SgdInitializationTolerance ;
123
+ private protected readonly bool Quiet ;
124
+ private protected readonly float InitWtsDiameter ;
125
+ private protected readonly bool UseThreads ;
126
+ private protected readonly int ? NumThreads ;
127
+ private protected readonly bool DenseOptimizer ;
128
+ private protected readonly long MaxNormalizationExamples ;
129
+ private protected readonly bool EnforceNonNegativity ;
130
130
131
131
// The training data, when NOT using multiple threads.
132
132
private RoleMappedData _data ;
@@ -256,9 +256,9 @@ private static TArgs ArgsInit(string featureColumn, SchemaShape.Column labelColu
256
256
return args ;
257
257
}
258
258
259
- protected virtual int ClassCount => 1 ;
260
- protected int BiasCount => ClassCount ;
261
- protected int WeightCount => ClassCount * NumFeatures ;
259
+ private protected virtual int ClassCount => 1 ;
260
+ private protected int BiasCount => ClassCount ;
261
+ private protected int WeightCount => ClassCount * NumFeatures ;
262
262
private protected virtual Optimizer InitializeOptimizer ( IChannel ch , FloatLabelCursor . Factory cursorFactory ,
263
263
out VBuffer < float > init , out ITerminationCriterion terminationCriterion )
264
264
{
@@ -364,15 +364,15 @@ private protected virtual VBuffer<float> InitializeWeightsSgd(IChannel ch, Float
364
364
return result ;
365
365
}
366
366
367
- protected abstract VBuffer < float > InitializeWeightsFromPredictor ( TModel srcPredictor ) ;
367
+ private protected abstract VBuffer < float > InitializeWeightsFromPredictor ( TModel srcPredictor ) ;
368
368
369
369
private protected abstract void CheckLabel ( RoleMappedData data ) ;
370
370
371
- protected virtual void PreTrainingProcessInstance ( float label , in VBuffer < float > feat , float weight )
371
+ private protected virtual void PreTrainingProcessInstance ( float label , in VBuffer < float > feat , float weight )
372
372
{
373
373
}
374
374
375
- protected abstract TModel CreatePredictor ( ) ;
375
+ private protected abstract TModel CreatePredictor ( ) ;
376
376
377
377
/// <summary>
378
378
/// The basic training calls the optimizer
@@ -570,24 +570,24 @@ private protected virtual void TrainCore(IChannel ch, RoleMappedData data)
570
570
571
571
// Ensure that the bias portion of vec is represented in vec.
572
572
// REVIEW: Is this really necessary?
573
- protected void EnsureBiases ( ref VBuffer < float > vec )
573
+ private protected void EnsureBiases ( ref VBuffer < float > vec )
574
574
{
575
575
// REVIEW: Consider promoting this "densify first n entries" to a general purpose utility,
576
576
// if we ever encounter other situations where this becomes useful.
577
577
Contracts . Assert ( vec . Length == BiasCount + WeightCount ) ;
578
578
VBufferUtils . DensifyFirst ( ref vec , BiasCount ) ;
579
579
}
580
580
581
- protected abstract float AccumulateOneGradient ( in VBuffer < float > feat , float label , float weight ,
581
+ private protected abstract float AccumulateOneGradient ( in VBuffer < float > feat , float label , float weight ,
582
582
in VBuffer < float > xDense , ref VBuffer < float > grad , ref float [ ] scratch ) ;
583
583
584
584
private protected abstract void ComputeTrainingStatistics ( IChannel ch , FloatLabelCursor . Factory cursorFactory , float loss , int numParams ) ;
585
585
586
- protected abstract void ProcessPriorDistribution ( float label , float weight ) ;
586
+ private protected abstract void ProcessPriorDistribution ( float label , float weight ) ;
587
587
/// <summary>
588
588
/// The gradient being used by the optimizer
589
589
/// </summary>
590
- protected virtual float DifferentiableFunction ( in VBuffer < float > x , ref VBuffer < float > gradient ,
590
+ private protected virtual float DifferentiableFunction ( in VBuffer < float > x , ref VBuffer < float > gradient ,
591
591
IProgressChannelProvider progress )
592
592
{
593
593
Contracts . Assert ( ( _numChunks == 0 ) != ( _data == null ) ) ;
@@ -647,7 +647,7 @@ protected virtual float DifferentiableFunction(in VBuffer<float> x, ref VBuffer<
647
647
/// REVIEW: consider getting rid of multithread-targeted members
648
648
/// Using TPL, the distinction between Multithreaded and Sequential implementations is unnecessary
649
649
/// </remarks>
650
- protected virtual float DifferentiableFunctionMultithreaded ( in VBuffer < float > xDense , ref VBuffer < float > gradient , IProgressChannel pch )
650
+ private protected virtual float DifferentiableFunctionMultithreaded ( in VBuffer < float > xDense , ref VBuffer < float > gradient , IProgressChannel pch )
651
651
{
652
652
Contracts . Assert ( _data == null ) ;
653
653
Contracts . Assert ( _cursorFactory == null ) ;
@@ -679,7 +679,7 @@ protected virtual float DifferentiableFunctionMultithreaded(in VBuffer<float> xD
679
679
return loss ;
680
680
}
681
681
682
- protected float DifferentiableFunctionComputeChunk ( int ichk , in VBuffer < float > xDense , ref VBuffer < float > grad , IProgressChannel pch )
682
+ private protected float DifferentiableFunctionComputeChunk ( int ichk , in VBuffer < float > xDense , ref VBuffer < float > grad , IProgressChannel pch )
683
683
{
684
684
Contracts . Assert ( 0 <= ichk && ichk < _numChunks ) ;
685
685
Contracts . AssertValueOrNull ( pch ) ;
@@ -733,7 +733,7 @@ private protected float DifferentiableFunctionStream(FloatLabelCursor.Factory cu
733
733
return ( float ) loss ;
734
734
}
735
735
736
- protected VBuffer < float > InitializeWeights ( IEnumerable < float > weights , IEnumerable < float > biases )
736
+ private protected VBuffer < float > InitializeWeights ( IEnumerable < float > weights , IEnumerable < float > biases )
737
737
{
738
738
Contracts . AssertValue ( biases ) ;
739
739
Contracts . AssertValue ( weights ) ;
0 commit comments