@@ -26,6 +26,9 @@ public struct GenerateContentResponse: Sendable {
26
26
/// The total number of tokens across the generated response candidates.
27
27
public let candidatesTokenCount : Int
28
28
29
+ /// The number of tokens present in tool-use prompt(s).
30
+ public let toolUsePromptTokenCount : Int
31
+
29
32
/// The number of tokens used by the model's internal "thinking" process.
30
33
///
31
34
/// For models that support thinking (like Gemini 2.5 Pro and Flash), this represents the actual
@@ -44,6 +47,9 @@ public struct GenerateContentResponse: Sendable {
44
47
45
48
/// The breakdown, by modality, of how many tokens are consumed by the candidates
46
49
public let candidatesTokensDetails : [ ModalityTokenCount ]
50
+
51
+ /// List of modalities that were processed for tool-use request inputs.
52
+ public let toolUsePromptTokensDetails : [ ModalityTokenCount ]
47
53
}
48
54
49
55
/// A list of candidate response content, ordered from best to worst.
@@ -474,17 +480,21 @@ extension GenerateContentResponse.UsageMetadata: Decodable {
474
480
enum CodingKeys : CodingKey {
475
481
case promptTokenCount
476
482
case candidatesTokenCount
483
+ case toolUsePromptTokenCount
477
484
case thoughtsTokenCount
478
485
case totalTokenCount
479
486
case promptTokensDetails
480
487
case candidatesTokensDetails
488
+ case toolUsePromptTokensDetails
481
489
}
482
490
483
491
public init ( from decoder: any Decoder ) throws {
484
492
let container = try decoder. container ( keyedBy: CodingKeys . self)
485
493
promptTokenCount = try container. decodeIfPresent ( Int . self, forKey: . promptTokenCount) ?? 0
486
494
candidatesTokenCount =
487
495
try container. decodeIfPresent ( Int . self, forKey: . candidatesTokenCount) ?? 0
496
+ toolUsePromptTokenCount =
497
+ try container. decodeIfPresent ( Int . self, forKey: . toolUsePromptTokenCount) ?? 0
488
498
thoughtsTokenCount = try container. decodeIfPresent ( Int . self, forKey: . thoughtsTokenCount) ?? 0
489
499
totalTokenCount = try container. decodeIfPresent ( Int . self, forKey: . totalTokenCount) ?? 0
490
500
promptTokensDetails =
@@ -493,6 +503,9 @@ extension GenerateContentResponse.UsageMetadata: Decodable {
493
503
[ ModalityTokenCount ] . self,
494
504
forKey: . candidatesTokensDetails
495
505
) ?? [ ]
506
+ toolUsePromptTokensDetails = try container. decodeIfPresent (
507
+ [ ModalityTokenCount ] . self, forKey: . toolUsePromptTokensDetails
508
+ ) ?? [ ]
496
509
}
497
510
}
498
511
0 commit comments