@@ -43,7 +43,7 @@ void TKafkaProduceActor::LogEvent(IEventHandle& ev) {
43
43
void TKafkaProduceActor::SendMetrics (const TString& topicName, size_t delta, const TString& name, const TActorContext& ctx) {
44
44
auto topicWithoutDb = GetTopicNameWithoutDb (Context->DatabasePath , topicName);
45
45
ctx.Send (MakeKafkaMetricsServiceID (), new TEvKafka::TEvUpdateCounter (delta, BuildLabels (Context, " " , topicWithoutDb, TStringBuilder () << " api.kafka.produce." << name, " " )));
46
- ctx.Send (MakeKafkaMetricsServiceID (), new TEvKafka::TEvUpdateCounter (delta, BuildLabels (Context, " " , topicWithoutDb, " api.kafka.produce.total_messages" , " " )));
46
+ ctx.Send (MakeKafkaMetricsServiceID (), new TEvKafka::TEvUpdateCounter (delta, BuildLabels (Context, " " , topicWithoutDb, " api.kafka.produce.total_messages" , " " )));
47
47
}
48
48
49
49
void TKafkaProduceActor::Bootstrap (const NActors::TActorContext& /* ctx*/ ) {
@@ -82,7 +82,7 @@ void TKafkaProduceActor::PassAway() {
82
82
void TKafkaProduceActor::CleanTopics (const TActorContext& ctx) {
83
83
const auto now = ctx.Now ();
84
84
85
- std::map<TString, TTopicInfo> newTopics;
85
+ std::map<TString, TTopicInfo> newTopics;
86
86
for (auto & [topicPath, topicInfo] : Topics) {
87
87
if (topicInfo.ExpirationTime > now) {
88
88
newTopics[topicPath] = std::move (topicInfo);
@@ -242,7 +242,8 @@ size_t TKafkaProduceActor::EnqueueInitialization() {
242
242
THolder<TEvPartitionWriter::TEvWriteRequest> Convert (const TProduceRequestData::TTopicProduceData::TPartitionProduceData& data,
243
243
const TString& topicName,
244
244
ui64 cookie,
245
- const TString& clientDC) {
245
+ const TString& clientDC,
246
+ bool ruPerRequest) {
246
247
auto ev = MakeHolder<TEvPartitionWriter::TEvWriteRequest>();
247
248
auto & request = ev->Record ;
248
249
@@ -254,6 +255,9 @@ THolder<TEvPartitionWriter::TEvWriteRequest> Convert(const TProduceRequestData::
254
255
partitionRequest->SetPartition (data.Index );
255
256
// partitionRequest->SetCmdWriteOffset();
256
257
partitionRequest->SetCookie (cookie);
258
+ if (ruPerRequest) {
259
+ partitionRequest->SetMeteringV2Enabled (true );
260
+ }
257
261
258
262
ui64 totalSize = 0 ;
259
263
@@ -317,11 +321,11 @@ void TKafkaProduceActor::ProcessRequest(TPendingRequest::TPtr pendingRequest, co
317
321
pendingRequest->StartTime = ctx.Now ();
318
322
319
323
size_t position = 0 ;
324
+ bool ruPerRequest = Context->Config .GetMeteringV2Enabled ();
320
325
for (const auto & topicData : r->TopicData ) {
321
326
const TString& topicPath = NormalizePath (Context->DatabasePath , *topicData.Name );
322
327
for (const auto & partitionData : topicData.PartitionData ) {
323
328
const auto partitionId = partitionData.Index ;
324
-
325
329
auto writer = PartitionWriter (topicPath, partitionId, ctx);
326
330
if (OK == writer.first ) {
327
331
auto ownCookie = ++Cookie;
@@ -334,7 +338,8 @@ void TKafkaProduceActor::ProcessRequest(TPendingRequest::TPtr pendingRequest, co
334
338
pendingRequest->WaitAcceptingCookies .insert (ownCookie);
335
339
pendingRequest->WaitResultCookies .insert (ownCookie);
336
340
337
- auto ev = Convert (partitionData, *topicData.Name , ownCookie, ClientDC);
341
+ auto ev = Convert (partitionData, *topicData.Name , ownCookie, ClientDC, ruPerRequest);
342
+ ruPerRequest = false ;
338
343
339
344
Send (writer.second , std::move (ev));
340
345
} else {
@@ -441,7 +446,7 @@ void TKafkaProduceActor::SendResults(const TActorContext& ctx) {
441
446
// We send the results in the order of receipt of the request
442
447
while (!PendingRequests.empty ()) {
443
448
auto pendingRequest = PendingRequests.front ();
444
-
449
+
445
450
// We send the response by timeout. This is possible, for example, if the event was lost or the PartitionWrite died.
446
451
bool expired = expireTime > pendingRequest->StartTime ;
447
452
0 commit comments