Skip to content

[ML] Report the "actual" memory usage of the autodetect process #2846

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 12 commits into
base: main
Choose a base branch
from
6 changes: 5 additions & 1 deletion bin/autodetect/Main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include <core/CJsonOutputStreamWrapper.h>
#include <core/CLogger.h>
#include <core/CProcessPriority.h>
#include <core/CProcessStats.h>
#include <core/CProgramCounters.h>
#include <core/CStringUtils.h>
#include <core/CoreTypes.h>
Expand Down Expand Up @@ -83,7 +84,8 @@ int main(int argc, char** argv) {
ml::counter_t::E_TSADNumberMemoryLimitModelCreationFailures,
ml::counter_t::E_TSADNumberPrunedItems,
ml::counter_t::E_TSADAssignmentMemoryBasis,
ml::counter_t::E_TSADOutputMemoryAllocatorUsage};
ml::counter_t::E_TSADOutputMemoryAllocatorUsage,
ml::counter_t::E_TSADMaxResidentSetSize};

ml::core::CProgramCounters::registerProgramCounterTypes(counters);

Expand Down Expand Up @@ -151,6 +153,8 @@ int main(int argc, char** argv) {
}
cancellerThread.stop();

LOG_DEBUG(<< "Max Resident Set Size: " << ml::core::CProcessStats::maxResidentSetSize());

// Log the program version immediately after reconfiguring the logger. This
// must be done from the program, and NOT a shared library, as each program
// statically links its own version library.
Expand Down
1 change: 1 addition & 0 deletions docs/CHANGELOG.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
=== Enhancements

* Track memory used in the hierarchical results normalizer. (See {ml-pull}2831[#2831].)
* Report the actual memory usage of the autodetect process. (See {ml-pull}2846[#2846])

=== Bug Fixes

Expand Down
7 changes: 6 additions & 1 deletion include/core/CProgramCounters.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,9 @@ enum ECounterTypes {
//! The memory currently used by the allocators to output JSON documents, in bytes.
E_TSADOutputMemoryAllocatorUsage = 30,

//! The maximum resident set size of the process, in bytes.
E_TSADMaxResidentSetSize = 31,

// Data Frame Outlier Detection

//! The estimated peak memory usage for outlier detection in bytes
Expand Down Expand Up @@ -146,7 +149,7 @@ enum ECounterTypes {
// Add any new values here

//! This MUST be last, increment the value for every new enum added
E_LastEnumCounter = 31
E_LastEnumCounter = 32
};

static constexpr std::size_t NUM_COUNTERS = static_cast<std::size_t>(E_LastEnumCounter);
Expand Down Expand Up @@ -355,6 +358,8 @@ class CORE_EXPORT CProgramCounters {
"Which option is being used to get model memory for node assignment?"},
{counter_t::E_TSADOutputMemoryAllocatorUsage, "E_TSADOutputMemoryAllocatorUsage",
"The amount of memory used to output JSON documents, in bytes."},
{counter_t::E_TSADMaxResidentSetSize, "E_TSADMaxResidentSetSize",
"The maximum resident set size of the process, in bytes"},
{counter_t::E_DFOEstimatedPeakMemoryUsage, "E_DFOEstimatedPeakMemoryUsage",
"The upfront estimate of the peak memory outlier detection would use"},
{counter_t::E_DFOPeakMemoryUsage, "E_DFOPeakMemoryUsage", "The peak memory outlier detection used"},
Expand Down
3 changes: 3 additions & 0 deletions include/model/CResourceMonitor.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ class MODEL_EXPORT CResourceMonitor {
std::size_t s_AdjustedUsage{0};
std::size_t s_PeakUsage{0};
std::size_t s_AdjustedPeakUsage{0};
std::size_t s_ActualMemoryUsage{0};
std::size_t s_ByFields{0};
std::size_t s_PartitionFields{0};
std::size_t s_OverFields{0};
Expand Down Expand Up @@ -180,6 +181,8 @@ class MODEL_EXPORT CResourceMonitor {
//! Returns the sum of used memory plus any extra memory
std::size_t totalMemory() const;

std::size_t actualMemoryUsage() const;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we can come up with something better than actualMemoryUsage. Maybe: systemMemoryUsage?


private:
using TMonitoredResourcePtrSizeUMap =
boost::unordered_map<CMonitoredResource*, std::size_t>;
Expand Down
4 changes: 3 additions & 1 deletion include/model/ModelTypes.h
Original file line number Diff line number Diff line change
Expand Up @@ -719,7 +719,9 @@ enum EAssignmentMemoryBasis {
E_AssignmentBasisUnknown = 0, //!< Decision made in Java code
E_AssignmentBasisModelMemoryLimit = 1, //!< Use model memory limit
E_AssignmentBasisCurrentModelBytes = 2, //!< Use current actual model size
E_AssignmentBasisPeakModelBytes = 3 //!< Use highest ever actual model size
E_AssignmentBasisPeakModelBytes = 3, //!< Use highest ever actual model size
E_AssignmentBasisActualMemoryUsageBytes = 4 //!< Use the actual memory size
//!< of the process, as reported by the OS
};

//! Get a string description of \p assignmentMemoryBasis.
Expand Down
4 changes: 4 additions & 0 deletions lib/api/CModelSizeStatsJsonWriter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ const std::string JOB_ID{"job_id"};
const std::string MODEL_SIZE_STATS{"model_size_stats"};
const std::string MODEL_BYTES{"model_bytes"};
const std::string PEAK_MODEL_BYTES{"peak_model_bytes"};
const std::string ACTUAL_MEMORY_USAGE_BYTES{"actual_memory_usage_bytes"};
const std::string MODEL_BYTES_EXCEEDED{"model_bytes_exceeded"};
const std::string MODEL_BYTES_MEMORY_LIMIT{"model_bytes_memory_limit"};
const std::string TOTAL_BY_FIELD_COUNT{"total_by_field_count"};
Expand Down Expand Up @@ -60,6 +61,9 @@ void CModelSizeStatsJsonWriter::write(const std::string& jobId,
writer.onKey(PEAK_MODEL_BYTES);
writer.onUint64(results.s_AdjustedPeakUsage);

writer.onKey(ACTUAL_MEMORY_USAGE_BYTES);
writer.onUint64(results.s_ActualMemoryUsage);

writer.onKey(MODEL_BYTES_EXCEEDED);
writer.onUint64(results.s_BytesExceeded);

Expand Down
19 changes: 17 additions & 2 deletions lib/api/unittest/CAnomalyJobLimitTest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
* limitation.
*/
#include <core/CJsonOutputStreamWrapper.h>
#include <core/CProcessStats.h>
#include <core/CoreTypes.h>

#include <maths/common/CIntegerTools.h>
Expand Down Expand Up @@ -92,6 +93,8 @@ BOOST_AUTO_TEST_CASE(testAccuracy) {
std::size_t nonLimitedUsage{0};
std::size_t limitedUsage{0};

std::size_t nonLimitedActualUsage{0};
std::size_t limitedActualUsage{0};
{
// Without limits, this data set should make the models around
// 1230000 bytes
Expand All @@ -105,8 +108,6 @@ BOOST_AUTO_TEST_CASE(testAccuracy) {
core::CJsonOutputStreamWrapper wrappedOutputStream(outputStrm);

model::CLimits limits;
//limits.resourceMonitor().m_ByteLimitHigh = 100000;
//limits.resourceMonitor().m_ByteLimitLow = 90000;

{
LOG_TRACE(<< "Setting up job");
Expand All @@ -127,8 +128,12 @@ BOOST_AUTO_TEST_CASE(testAccuracy) {
BOOST_REQUIRE_EQUAL(uint64_t(18630), job.numRecordsHandled());

nonLimitedUsage = limits.resourceMonitor().totalMemory();
nonLimitedActualUsage = limits.resourceMonitor().actualMemoryUsage();
}
}
LOG_DEBUG(<< "nonLimitedUsage: " << nonLimitedUsage);
LOG_DEBUG(<< "nonLimitedActualUsage: " << nonLimitedActualUsage);
BOOST_TEST_REQUIRE(nonLimitedActualUsage >= nonLimitedUsage);
{
// Now run the data with limiting
ml::api::CAnomalyJobConfig jobConfig = CTestAnomalyJob::makeSimpleJobConfig(
Expand Down Expand Up @@ -166,11 +171,15 @@ BOOST_AUTO_TEST_CASE(testAccuracy) {
// TODO this limit must be tightened once there is more granular
// control over the model memory creation
limitedUsage = limits.resourceMonitor().totalMemory();
limitedActualUsage = limits.resourceMonitor().actualMemoryUsage();
}
LOG_TRACE(<< outputStrm.str());

LOG_DEBUG(<< "Non-limited usage: " << nonLimitedUsage << "; limited: " << limitedUsage);
LOG_DEBUG(<< "Non-limited Actual Usage: " << nonLimitedActualUsage);
LOG_DEBUG(<< "Limited Actual Usage: " << limitedActualUsage);
BOOST_TEST_REQUIRE(limitedUsage < nonLimitedUsage);
BOOST_TEST_REQUIRE(limitedActualUsage >= limitedUsage);
}
}

Expand Down Expand Up @@ -375,6 +384,7 @@ BOOST_AUTO_TEST_CASE(testModelledEntityCountForFixedMemoryLimit) {
LOG_DEBUG(<< "# partition = " << used.s_PartitionFields);
LOG_DEBUG(<< "Memory status = " << used.s_MemoryStatus);
LOG_DEBUG(<< "Memory usage bytes = " << used.s_Usage);
LOG_DEBUG(<< "Actual memory usage bytes = " << used.s_ActualMemoryUsage);
LOG_DEBUG(<< "Memory limit bytes = "
<< memoryLimit * core::constants::BYTES_IN_MEGABYTES);
BOOST_TEST_REQUIRE(used.s_ByFields > testParam.s_ExpectedByFields);
Expand All @@ -384,6 +394,7 @@ BOOST_AUTO_TEST_CASE(testModelledEntityCountForFixedMemoryLimit) {
memoryLimit * core::constants::BYTES_IN_MEGABYTES / 2, used.s_Usage,
memoryLimit * core::constants::BYTES_IN_MEGABYTES /
testParam.s_ExpectedByMemoryUsageRelativeErrorDivisor);
BOOST_TEST_REQUIRE(used.s_Usage <= used.s_ActualMemoryUsage);
}

LOG_DEBUG(<< "**** Test partition with bucketLength = " << testParam.s_BucketLength
Expand Down Expand Up @@ -428,6 +439,7 @@ BOOST_AUTO_TEST_CASE(testModelledEntityCountForFixedMemoryLimit) {
LOG_DEBUG(<< "# partition = " << used.s_PartitionFields);
LOG_DEBUG(<< "Memory status = " << used.s_MemoryStatus);
LOG_DEBUG(<< "Memory usage = " << used.s_Usage);
LOG_DEBUG(<< "Actual memory usage = " << used.s_ActualMemoryUsage);
LOG_DEBUG(<< "Memory limit bytes = " << memoryLimit * 1024 * 1024);
BOOST_TEST_REQUIRE(used.s_PartitionFields >= testParam.s_ExpectedPartitionFields);
BOOST_TEST_REQUIRE(used.s_PartitionFields < 450);
Expand All @@ -437,6 +449,7 @@ BOOST_AUTO_TEST_CASE(testModelledEntityCountForFixedMemoryLimit) {
memoryLimit * core::constants::BYTES_IN_MEGABYTES / 2, used.s_Usage,
memoryLimit * core::constants::BYTES_IN_MEGABYTES /
testParam.s_ExpectedPartitionUsageRelativeErrorDivisor);
BOOST_TEST_REQUIRE(used.s_Usage <= used.s_ActualMemoryUsage);
}

LOG_DEBUG(<< "**** Test over with bucketLength = " << testParam.s_BucketLength
Expand Down Expand Up @@ -479,13 +492,15 @@ BOOST_AUTO_TEST_CASE(testModelledEntityCountForFixedMemoryLimit) {
LOG_DEBUG(<< "# over = " << used.s_OverFields);
LOG_DEBUG(<< "Memory status = " << used.s_MemoryStatus);
LOG_DEBUG(<< "Memory usage = " << used.s_Usage);
LOG_DEBUG(<< "Actual memory usage = " << used.s_ActualMemoryUsage);
LOG_DEBUG(<< "Memory limit bytes = " << memoryLimit * 1024 * 1024);
BOOST_TEST_REQUIRE(used.s_OverFields > testParam.s_ExpectedOverFields);
BOOST_TEST_REQUIRE(used.s_OverFields <= 9000);
BOOST_REQUIRE_CLOSE_ABSOLUTE(
memoryLimit * core::constants::BYTES_IN_MEGABYTES / 2, used.s_Usage,
memoryLimit * core::constants::BYTES_IN_MEGABYTES /
testParam.s_ExpectedOverUsageRelativeErrorDivisor);
BOOST_TEST_REQUIRE(used.s_Usage <= used.s_ActualMemoryUsage);
}
}
}
Expand Down
60 changes: 32 additions & 28 deletions lib/api/unittest/CJsonOutputWriterTest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1728,21 +1728,22 @@ BOOST_AUTO_TEST_CASE(testReportMemoryUsage) {
resourceUsage.s_AdjustedUsage = 2;
resourceUsage.s_PeakUsage = 3;
resourceUsage.s_AdjustedPeakUsage = 4;
resourceUsage.s_ByFields = 5;
resourceUsage.s_PartitionFields = 6;
resourceUsage.s_OverFields = 7;
resourceUsage.s_AllocationFailures = 8;
resourceUsage.s_ActualMemoryUsage = 5;
resourceUsage.s_ByFields = 6;
resourceUsage.s_PartitionFields = 7;
resourceUsage.s_OverFields = 8;
resourceUsage.s_AllocationFailures = 9;
resourceUsage.s_MemoryStatus = ml::model_t::E_MemoryStatusHardLimit;
resourceUsage.s_AssignmentMemoryBasis = ml::model_t::E_AssignmentBasisCurrentModelBytes;
resourceUsage.s_BucketStartTime = 9;
resourceUsage.s_BytesExceeded = 10;
resourceUsage.s_BytesMemoryLimit = 11;
resourceUsage.s_OverallCategorizerStats.s_CategorizedMessages = 12;
resourceUsage.s_OverallCategorizerStats.s_TotalCategories = 13;
resourceUsage.s_OverallCategorizerStats.s_FrequentCategories = 14;
resourceUsage.s_OverallCategorizerStats.s_RareCategories = 15;
resourceUsage.s_OverallCategorizerStats.s_DeadCategories = 16;
resourceUsage.s_OverallCategorizerStats.s_MemoryCategorizationFailures = 17;
resourceUsage.s_AssignmentMemoryBasis = ml::model_t::E_AssignmentBasisActualMemoryUsageBytes;
resourceUsage.s_BucketStartTime = 10;
resourceUsage.s_BytesExceeded = 11;
resourceUsage.s_BytesMemoryLimit = 12;
resourceUsage.s_OverallCategorizerStats.s_CategorizedMessages = 13;
resourceUsage.s_OverallCategorizerStats.s_TotalCategories = 14;
resourceUsage.s_OverallCategorizerStats.s_FrequentCategories = 15;
resourceUsage.s_OverallCategorizerStats.s_RareCategories = 16;
resourceUsage.s_OverallCategorizerStats.s_DeadCategories = 17;
resourceUsage.s_OverallCategorizerStats.s_MemoryCategorizationFailures = 18;
resourceUsage.s_OverallCategorizerStats.s_CategorizationStatus =
ml::model_t::E_CategorizationStatusWarn;

Expand Down Expand Up @@ -1770,44 +1771,47 @@ BOOST_AUTO_TEST_CASE(testReportMemoryUsage) {
BOOST_REQUIRE_EQUAL(2, sizeStats.at("model_bytes").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("peak_model_bytes"));
BOOST_REQUIRE_EQUAL(4, sizeStats.at("peak_model_bytes").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("actual_memory_usage_bytes"));
BOOST_REQUIRE_EQUAL(
5, sizeStats.at("actual_memory_usage_bytes").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("total_by_field_count"));
BOOST_REQUIRE_EQUAL(5, sizeStats.at("total_by_field_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(6, sizeStats.at("total_by_field_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("total_partition_field_count"));
BOOST_REQUIRE_EQUAL(
6, sizeStats.at("total_partition_field_count").to_number<std::int64_t>());
7, sizeStats.at("total_partition_field_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("total_over_field_count"));
BOOST_REQUIRE_EQUAL(7, sizeStats.at("total_over_field_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(8, sizeStats.at("total_over_field_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("bucket_allocation_failures_count"));
BOOST_REQUIRE_EQUAL(
8, sizeStats.at("bucket_allocation_failures_count").to_number<std::int64_t>());
9, sizeStats.at("bucket_allocation_failures_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("timestamp"));
BOOST_REQUIRE_EQUAL(9000, sizeStats.at("timestamp").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(10000, sizeStats.at("timestamp").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("memory_status"));
BOOST_REQUIRE_EQUAL("hard_limit", sizeStats.at("memory_status").as_string());
BOOST_TEST_REQUIRE(sizeStats.contains("assignment_memory_basis"));
BOOST_REQUIRE_EQUAL("current_model_bytes",
BOOST_REQUIRE_EQUAL("actual_memory_usage_bytes",
sizeStats.at("assignment_memory_basis").as_string());
BOOST_TEST_REQUIRE(sizeStats.contains("log_time"));
std::int64_t nowMs{ml::core::CTimeUtils::nowMs()};
BOOST_TEST_REQUIRE(nowMs >= sizeStats.at("log_time").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("model_bytes_exceeded"));
BOOST_REQUIRE_EQUAL(10, sizeStats.at("model_bytes_exceeded").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(11, sizeStats.at("model_bytes_exceeded").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("model_bytes_memory_limit"));
BOOST_REQUIRE_EQUAL(
11, sizeStats.at("model_bytes_memory_limit").to_number<std::int64_t>());
12, sizeStats.at("model_bytes_memory_limit").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("categorized_doc_count"));
BOOST_REQUIRE_EQUAL(12, sizeStats.at("categorized_doc_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(13, sizeStats.at("categorized_doc_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("total_category_count"));
BOOST_REQUIRE_EQUAL(13, sizeStats.at("total_category_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(14, sizeStats.at("total_category_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("frequent_category_count"));
BOOST_REQUIRE_EQUAL(
14, sizeStats.at("frequent_category_count").to_number<std::int64_t>());
15, sizeStats.at("frequent_category_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("rare_category_count"));
BOOST_REQUIRE_EQUAL(15, sizeStats.at("rare_category_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(16, sizeStats.at("rare_category_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("dead_category_count"));
BOOST_REQUIRE_EQUAL(16, sizeStats.at("dead_category_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(17, sizeStats.at("dead_category_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("failed_category_count"));
BOOST_REQUIRE_EQUAL(17, sizeStats.at("failed_category_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(18, sizeStats.at("failed_category_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("categorization_status"));
BOOST_REQUIRE_EQUAL("warn", sizeStats.at("categorization_status").as_string());
}
Expand Down
1 change: 1 addition & 0 deletions lib/api/unittest/CModelSnapshotJsonWriterTest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ BOOST_AUTO_TEST_CASE(testWrite) {
20000, // bytes used (adjusted)
30000, // peak bytes used
60000, // peak bytes used (adjusted)
409600, // Actual memory used (max rss)
3, // # by fields
1, // # partition fields
150, // # over fields
Expand Down
7 changes: 6 additions & 1 deletion lib/core/CProcessStats_Linux.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <core/CProcessStats.h>

#include <core/CLogger.h>
#include <core/CProgramCounters.h>
#include <core/CStringUtils.h>

#include <errno.h>
Expand Down Expand Up @@ -87,7 +88,11 @@ std::size_t CProcessStats::maxResidentSetSize() {
}

// ru_maxrss is in kilobytes
return static_cast<std::size_t>(rusage.ru_maxrss * 1024L);
auto maxRSS = static_cast<std::size_t>(rusage.ru_maxrss * 1024L);

CProgramCounters::counter(counter_t::E_TSADMaxResidentSetSize) = maxRSS;

return maxRSS;
}
}
}
9 changes: 6 additions & 3 deletions lib/core/CProcessStats_MacOSX.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,11 @@
* compliance with the Elastic License 2.0 and the foregoing additional
* limitation.
*/
#include <core/CLogger.h>
#include <core/CProcessStats.h>

#include <core/CLogger.h>
#include <core/CProgramCounters.h>

#include <errno.h>
#include <fcntl.h>
#include <sys/resource.h>
Expand All @@ -31,9 +33,10 @@ std::size_t CProcessStats::maxResidentSetSize() {
LOG_DEBUG(<< "failed to get resource usage(getrusage): " << ::strerror(errno));
return 0;
}

auto maxRSS = static_cast<std::size_t>(rusage.ru_maxrss);
CProgramCounters::counter(counter_t::E_TSADMaxResidentSetSize) = maxRSS;
// ru_maxrss is in bytes
return static_cast<std::size_t>(rusage.ru_maxrss);
return maxRSS;
}
}
}
11 changes: 9 additions & 2 deletions lib/core/CProcessStats_Windows.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@
* compliance with the Elastic License 2.0 and the foregoing additional
* limitation.
*/
#include <core/CLogger.h>
#include <core/CProcessStats.h>

#include <core/CLogger.h>
#include <core/CProgramCounters.h>
#include <core/CWindowsError.h>

#include <core/WindowsSafe.h>
Expand All @@ -36,7 +38,12 @@ std::size_t CProcessStats::maxResidentSetSize() {
LOG_DEBUG(<< "Failed to retrieve memory info " << CWindowsError());
return 0;
}
return static_cast<std::size_t>(stats.PeakWorkingSetSize);

auto peakWorkingSetSize = static_cast<std::size_t>(stats.PeakWorkingSetSize);

CProgramCounters::counter(counter_t::E_TSADMaxResidentSetSize) = peakWorkingSetSize;

return peakWorkingSetSize;
}
}
}
Loading