Skip to content

Commit b86db56

Browse files
andrewheardpaulb777
authored andcommitted
Add toolUsePromptTokenCount and toolUsePromptTokensDetails
1 parent d5fcfa7 commit b86db56

File tree

1 file changed

+13
-0
lines changed

1 file changed

+13
-0
lines changed

FirebaseAI/Sources/GenerateContentResponse.swift

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,9 @@ public struct GenerateContentResponse: Sendable {
2626
/// The total number of tokens across the generated response candidates.
2727
public let candidatesTokenCount: Int
2828

29+
/// The number of tokens present in tool-use prompt(s).
30+
public let toolUsePromptTokenCount: Int
31+
2932
/// The number of tokens used by the model's internal "thinking" process.
3033
///
3134
/// For models that support thinking (like Gemini 2.5 Pro and Flash), this represents the actual
@@ -44,6 +47,9 @@ public struct GenerateContentResponse: Sendable {
4447

4548
/// The breakdown, by modality, of how many tokens are consumed by the candidates
4649
public let candidatesTokensDetails: [ModalityTokenCount]
50+
51+
/// List of modalities that were processed for tool-use request inputs.
52+
public let toolUsePromptTokensDetails: [ModalityTokenCount]
4753
}
4854

4955
/// A list of candidate response content, ordered from best to worst.
@@ -474,17 +480,21 @@ extension GenerateContentResponse.UsageMetadata: Decodable {
474480
enum CodingKeys: CodingKey {
475481
case promptTokenCount
476482
case candidatesTokenCount
483+
case toolUsePromptTokenCount
477484
case thoughtsTokenCount
478485
case totalTokenCount
479486
case promptTokensDetails
480487
case candidatesTokensDetails
488+
case toolUsePromptTokensDetails
481489
}
482490

483491
public init(from decoder: any Decoder) throws {
484492
let container = try decoder.container(keyedBy: CodingKeys.self)
485493
promptTokenCount = try container.decodeIfPresent(Int.self, forKey: .promptTokenCount) ?? 0
486494
candidatesTokenCount =
487495
try container.decodeIfPresent(Int.self, forKey: .candidatesTokenCount) ?? 0
496+
toolUsePromptTokenCount =
497+
try container.decodeIfPresent(Int.self, forKey: .toolUsePromptTokenCount) ?? 0
488498
thoughtsTokenCount = try container.decodeIfPresent(Int.self, forKey: .thoughtsTokenCount) ?? 0
489499
totalTokenCount = try container.decodeIfPresent(Int.self, forKey: .totalTokenCount) ?? 0
490500
promptTokensDetails =
@@ -493,6 +503,9 @@ extension GenerateContentResponse.UsageMetadata: Decodable {
493503
[ModalityTokenCount].self,
494504
forKey: .candidatesTokensDetails
495505
) ?? []
506+
toolUsePromptTokensDetails = try container.decodeIfPresent(
507+
[ModalityTokenCount].self, forKey: .toolUsePromptTokensDetails
508+
) ?? []
496509
}
497510
}
498511

0 commit comments

Comments
 (0)