Skip to content

Commit 8d0e86e

Browse files
committed
PR feedback.
1 parent 95fcf1a commit 8d0e86e

File tree

2 files changed

+20
-20
lines changed

2 files changed

+20
-20
lines changed

src/Microsoft.ML.StandardTrainers/Standard/SdcaBinary.cs

+14-14
Original file line numberDiff line numberDiff line change
@@ -2170,7 +2170,7 @@ private protected override void CheckLabel(RoleMappedData examples, out int weig
21702170
}
21712171

21722172
/// <summary>
2173-
/// The<see cref="IEstimator{TTransformer}"/> for training logistic regression using a parallel stochastic gradient method.
2173+
/// The <see cref="IEstimator{TTransformer}"/> for training logistic regression using a parallel stochastic gradient method.
21742174
/// The trained model is <a href='https://en.wikipedia.org/wiki/Calibration_(statistics)'>calibrated</a> and can produce probability by feeding the output value of the
21752175
/// linear function to a <see cref="PlattCalibrator"/>.
21762176
/// </summary>
@@ -2179,7 +2179,7 @@ private protected override void CheckLabel(RoleMappedData examples, out int weig
21792179
/// To create this trainer, use [SgdCalibrated](xref:Microsoft.ML.StandardTrainersCatalog.SgdCalibrated(Microsoft.ML.BinaryClassificationCatalog.BinaryClassificationTrainers,System.String,System.String,System.String,System.Int32,System.Double,System.Single))
21802180
/// or [SgdCalibrated(Options)](xref:Microsoft.ML.StandardTrainersCatalog.SgdCalibrated(Microsoft.ML.BinaryClassificationCatalog.BinaryClassificationTrainers,Microsoft.ML.Trainers.SgdCalibratedTrainer.Options)).
21812181
///
2182-
/// [!include[io](~/../docs/samples/docs/api-reference/io-columns-binary-classification.md)]
2182+
/// [!include[io](~/../docs/samples/docs/api-reference/io-columns-regression.md)]
21832183
///
21842184
/// ### Trainer Characteristics
21852185
/// | | |
@@ -2191,14 +2191,14 @@ private protected override void CheckLabel(RoleMappedData examples, out int weig
21912191
///
21922192
/// ### Training Algorithm Details
21932193
/// The Stochastic Gradient Descent (SGD) is one of the popular stochastic optimization procedures that can be integrated
2194-
/// into several machine learning tasks to achieve state-of-the-art performance. This trainer implements the Hogwild SGD for binary classification
2195-
/// that supports multi-threading without any locking. If the associated optimization problem is sparse, Hogwild SGD achieves a nearly optimal
2196-
/// rate of convergence. For more details about Hogwild SGD can be found [here](http://arxiv.org/pdf/1106.5730v2.pdf).
2194+
/// into several machine learning tasks to achieve state-of-the-art performance. This trainer implements the Hogwild Stochastic Gradient Descent for binary classification
2195+
/// that supports multi-threading without any locking. If the associated optimization problem is sparse, Hogwild Stochastic Gradient Descent achieves a nearly optimal
2196+
/// rate of convergence. For more details about Hogwild Stochastic Gradient Descent can be found [here](http://arxiv.org/pdf/1106.5730v2.pdf).
21972197
/// ]]>
21982198
/// </format>
21992199
/// </remarks>
2200-
/// <seealso cref="Microsoft.ML.StandardTrainersCatalog.SgdCalibrated(Microsoft.ML.BinaryClassificationCatalog.BinaryClassificationTrainers,System.String,System.String,System.String,System.Int32,System.Double,System.Single)"/>
2201-
/// <seealso cref="Microsoft.ML.StandardTrainersCatalog.SgdCalibrated(Microsoft.ML.BinaryClassificationCatalog.BinaryClassificationTrainers,Microsoft.ML.Trainers.SgdCalibratedTrainer.Options)"/>
2200+
/// <seealso cref="StandardTrainersCatalog.SgdCalibrated(BinaryClassificationCatalog.BinaryClassificationTrainers, string, string, string, int, double, float)"/>
2201+
/// <seealso cref="StandardTrainersCatalog.SgdCalibrated(BinaryClassificationCatalog.BinaryClassificationTrainers, SgdCalibratedTrainer.Options)"/>
22022202
/// <seealso cref="Options"/>
22032203
public sealed class SgdCalibratedTrainer :
22042204
SgdBinaryTrainerBase<CalibratedModelParametersBase<LinearBinaryModelParameters, PlattCalibrator>>
@@ -2269,7 +2269,7 @@ private protected override CalibratedModelParametersBase<LinearBinaryModelParame
22692269
/// To create this trainer, use [SgdNonCalibrated](xref:Microsoft.ML.StandardTrainersCatalog.SgdNonCalibrated(Microsoft.ML.BinaryClassificationCatalog.BinaryClassificationTrainers,System.String,System.String,System.String,Microsoft.ML.Trainers.IClassificationLoss,System.Int32,System.Double,System.Single))
22702270
/// or [SgdNonCalibrated(Options)](xref:Microsoft.ML.StandardTrainersCatalog.SgdNonCalibrated(Microsoft.ML.BinaryClassificationCatalog.BinaryClassificationTrainers,Microsoft.ML.Trainers.SgdNonCalibratedTrainer.Options)).
22712271
///
2272-
/// [!include[io](~/../docs/samples/docs/api-reference/io-columns-binary-classification.md)]
2272+
/// [!include[io](~/../docs/samples/docs/api-reference/io-columns-regression.md)]
22732273
///
22742274
/// ### Trainer Characteristics
22752275
/// | | |
@@ -2280,15 +2280,15 @@ private protected override CalibratedModelParametersBase<LinearBinaryModelParame
22802280
/// | Required NuGet in addition to Microsoft.ML | None |
22812281
///
22822282
/// ### Training Algorithm Details
2283-
/// The Stochastic Gradient Descent (SGD) is one of the popular stochastic optimization procedures that can be integrated
2284-
/// into several machine learning tasks to achieve state-of-the-art performance. This trainer implements the Hogwild SGD for binary classification
2285-
/// that supports multi-threading without any locking. If the associated optimization problem is sparse, Hogwild SGD achieves a nearly optimal
2286-
/// rate of convergence. For more details about Hogwild SGD can be found [here](http://arxiv.org/pdf/1106.5730v2.pdf).
2283+
/// The Stochastic Gradient Descent is one of the popular stochastic optimization procedures that can be integrated
2284+
/// into several machine learning tasks to achieve state-of-the-art performance. This trainer implements the Hogwild Stochastic Gradient Descent for binary classification
2285+
/// that supports multi-threading without any locking. If the associated optimization problem is sparse, Hogwild Stochastic Gradient Descent achieves a nearly optimal
2286+
/// rate of convergence. For more details about Hogwild Stochastic Gradient Descent can be found [here](http://arxiv.org/pdf/1106.5730v2.pdf).
22872287
/// ]]>
22882288
/// </format>
22892289
/// </remarks>
2290-
/// <seealso cref="Microsoft.ML.StandardTrainersCatalog.SgdNonCalibrated(Microsoft.ML.BinaryClassificationCatalog.BinaryClassificationTrainers,System.String,System.String,System.String,Microsoft.ML.Trainers.IClassificationLoss,System.Int32,System.Double,System.Single)"/>
2291-
/// <seealso cref="Microsoft.ML.StandardTrainersCatalog.SgdNonCalibrated(Microsoft.ML.BinaryClassificationCatalog.BinaryClassificationTrainers,Microsoft.ML.Trainers.SgdNonCalibratedTrainer.Options)"/>
2290+
/// <seealso cref="StandardTrainersCatalog.SgdNonCalibrated(BinaryClassificationCatalog.BinaryClassificationTrainers, string, string, string, IClassificationLoss, int, double, float)"/>
2291+
/// <seealso cref="StandardTrainersCatalog.SgdNonCalibrated(BinaryClassificationCatalog.BinaryClassificationTrainers, SgdNonCalibratedTrainer.Options)"/>
22922292
/// <seealso cref="Options"/>
22932293
public sealed class SgdNonCalibratedTrainer :
22942294
SgdBinaryTrainerBase<LinearBinaryModelParameters>

src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs

+6-6
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,12 @@ namespace Microsoft.ML
1717
public static class StandardTrainersCatalog
1818
{
1919
/// <summary>
20-
/// Creates a <see cref="Trainers.SgdCalibratedTrainer"/> that predicts a target using a linear classification model.
20+
/// Create <see cref="SgdCalibratedTrainer"/>, which predicts a target using a linear classification model.
2121
/// Stochastic gradient descent (SGD) is an iterative algorithm that optimizes a differentiable objective function.
2222
/// </summary>
2323
/// <param name="catalog">The binary classification catalog trainer object.</param>
2424
/// <param name="labelColumnName">The name of the label column, or dependent variable. The column data must be <see cref="System.Boolean"/>.</param>
25-
/// <param name="featureColumnName">The features, or independent variables. The column data must be a known-sized vector of <see cref="System.Single"/></param>
25+
/// <param name="featureColumnName">The features, or independent variables. The column data must be a known-sized vector of <see cref="System.Single"/>.</param>
2626
/// <param name="exampleWeightColumnName">The name of the example weight column (optional).</param>
2727
/// <param name="numberOfIterations">The maximum number of passes through the training dataset; set to 1 to simulate online learning.</param>
2828
/// <param name="learningRate">The initial learning rate used by SGD.</param>
@@ -49,7 +49,7 @@ public static SgdCalibratedTrainer SgdCalibrated(this BinaryClassificationCatalo
4949
}
5050

5151
/// <summary>
52-
/// Creates a <see cref="Trainers.SgdCalibratedTrainer"/> that predicts a target using a linear classification model and advanced options.
52+
/// Create <see cref="Trainers.SgdCalibratedTrainer"/> with advanced options, which predicts a target using a linear classification model.
5353
/// Stochastic gradient descent (SGD) is an iterative algorithm that optimizes a differentiable objective function.
5454
/// </summary>
5555
/// <param name="catalog">The binary classification catalog trainer object.</param>
@@ -72,12 +72,12 @@ public static SgdCalibratedTrainer SgdCalibrated(this BinaryClassificationCatalo
7272
}
7373

7474
/// <summary>
75-
/// Creates a <see cref="Trainers.SgdNonCalibratedTrainer"/> that predicts a target using a linear classification model.
75+
/// Create <see cref="Trainers.SgdNonCalibratedTrainer"/>, which predicts a target using a linear classification model.
7676
/// Stochastic gradient descent (SGD) is an iterative algorithm that optimizes a differentiable objective function.
7777
/// </summary>
7878
/// <param name="catalog">The binary classification catalog trainer object.</param>
7979
/// <param name="labelColumnName">The name of the label column, or dependent variable. The column data must be <see cref="System.Boolean"/>.</param>
80-
/// <param name="featureColumnName">The features, or independent variables. The column data must be a known-sized vector of <see cref="System.Single"/></param>
80+
/// <param name="featureColumnName">The features, or independent variables. The column data must be a known-sized vector of <see cref="System.Single"/>.</param>
8181
/// <param name="exampleWeightColumnName">The name of the example weight column (optional).</param>
8282
/// <param name="lossFunction">The <a href="https://en.wikipedia.org/wiki/Loss_function">loss</a> function minimized in the training process. Using, for example, <see cref="HingeLoss"/> leads to a support vector machine trainer.</param>
8383
/// <param name="numberOfIterations">The maximum number of passes through the training dataset; set to 1 to simulate online learning.</param>
@@ -106,7 +106,7 @@ public static SgdNonCalibratedTrainer SgdNonCalibrated(this BinaryClassification
106106
}
107107

108108
/// <summary>
109-
/// Creates a <see cref="Trainers.SgdNonCalibratedTrainer"/> that predicts a target using a linear classification model and advanced options.
109+
/// Create <see cref="Trainers.SgdNonCalibratedTrainer"/> with advanced options, which predicts a target using a linear classification model.
110110
/// Stochastic gradient descent (SGD) is an iterative algorithm that optimizes a differentiable objective function.
111111
/// </summary>
112112
/// <param name="catalog">The binary classification catalog trainer object.</param>

0 commit comments

Comments
 (0)