diff --git a/common/api-review/vertexai.api.md b/common/api-review/vertexai.api.md
index 8758d25bdce..05977b66bd9 100644
--- a/common/api-review/vertexai.api.md
+++ b/common/api-review/vertexai.api.md
@@ -553,8 +553,7 @@ export enum HarmSeverity {
export interface HybridParams {
inCloudParams?: ModelParams;
mode: InferenceMode;
- // Warning: (ae-forgotten-export) The symbol "LanguageModelCreateOptions" needs to be exported by the entry point index.d.ts
- onDeviceParams?: LanguageModelCreateOptions;
+ onDeviceParams?: OnDeviceParams;
}
// @beta
@@ -718,6 +717,18 @@ export interface ObjectSchemaInterface extends SchemaInterface {
type: SchemaType.OBJECT;
}
+// @public
+export interface OnDeviceParams {
+ // Warning: (ae-forgotten-export) The symbol "LanguageModelCreateOptions" needs to be exported by the entry point index.d.ts
+ //
+ // (undocumented)
+ createOptions?: LanguageModelCreateOptions;
+ // Warning: (ae-forgotten-export) The symbol "LanguageModelPromptOptions" needs to be exported by the entry point index.d.ts
+ //
+ // (undocumented)
+ promptOptions?: LanguageModelPromptOptions;
+}
+
// @public
export type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart;
diff --git a/docs-devsite/_toc.yaml b/docs-devsite/_toc.yaml
index c1a10429ad7..405d11bfc01 100644
--- a/docs-devsite/_toc.yaml
+++ b/docs-devsite/_toc.yaml
@@ -562,6 +562,8 @@ toc:
path: /docs/reference/js/vertexai.objectschema.md
- title: ObjectSchemaInterface
path: /docs/reference/js/vertexai.objectschemainterface.md
+ - title: OnDeviceParams
+ path: /docs/reference/js/vertexai.ondeviceparams.md
- title: PromptFeedback
path: /docs/reference/js/vertexai.promptfeedback.md
- title: RequestOptions
diff --git a/docs-devsite/vertexai.hybridparams.md b/docs-devsite/vertexai.hybridparams.md
index cf847b40fa7..9e1e4f9be15 100644
--- a/docs-devsite/vertexai.hybridparams.md
+++ b/docs-devsite/vertexai.hybridparams.md
@@ -24,7 +24,7 @@ export interface HybridParams
| --- | --- | --- |
| [inCloudParams](./vertexai.hybridparams.md#hybridparamsincloudparams) | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Optional. Specifies advanced params for in-cloud inference. |
| [mode](./vertexai.hybridparams.md#hybridparamsmode) | [InferenceMode](./vertexai.md#inferencemode) | Specifies on-device or in-cloud inference. Defaults to prefer on-device. |
-| [onDeviceParams](./vertexai.hybridparams.md#hybridparamsondeviceparams) | LanguageModelCreateOptions | Optional. Specifies advanced params for on-device inference. |
+| [onDeviceParams](./vertexai.hybridparams.md#hybridparamsondeviceparams) | [OnDeviceParams](./vertexai.ondeviceparams.md#ondeviceparams_interface) | Optional. Specifies advanced params for on-device inference. |
## HybridParams.inCloudParams
@@ -53,5 +53,5 @@ Optional. Specifies advanced params for on-device inference.
Signature:
```typescript
-onDeviceParams?: LanguageModelCreateOptions;
+onDeviceParams?: OnDeviceParams;
```
diff --git a/docs-devsite/vertexai.md b/docs-devsite/vertexai.md
index 46eafd41e80..034af9bae90 100644
--- a/docs-devsite/vertexai.md
+++ b/docs-devsite/vertexai.md
@@ -108,6 +108,7 @@ The Firebase AI Web SDK.
| [ModalityTokenCount](./vertexai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. |
| [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_c63f46a). |
| [ObjectSchemaInterface](./vertexai.objectschemainterface.md#objectschemainterface_interface) | Interface for [ObjectSchema](./vertexai.objectschema.md#objectschema_class) class. |
+| [OnDeviceParams](./vertexai.ondeviceparams.md#ondeviceparams_interface) | Encapsulates configuration for on-device inference. |
| [PromptFeedback](./vertexai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason
and the relevant safetyRatings
. |
| [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_c63f46a). |
| [RetrievedContextAttribution](./vertexai.retrievedcontextattribution.md#retrievedcontextattribution_interface) | |
diff --git a/docs-devsite/vertexai.ondeviceparams.md b/docs-devsite/vertexai.ondeviceparams.md
new file mode 100644
index 00000000000..3dae308f5e1
--- /dev/null
+++ b/docs-devsite/vertexai.ondeviceparams.md
@@ -0,0 +1,42 @@
+Project: /docs/reference/js/_project.yaml
+Book: /docs/reference/_book.yaml
+page_type: reference
+
+{% comment %}
+DO NOT EDIT THIS FILE!
+This is generated by the JS SDK team, and any local changes will be
+overwritten. Changes should be made in the source code at
+https://github.com/firebase/firebase-js-sdk
+{% endcomment %}
+
+# OnDeviceParams interface
+Encapsulates configuration for on-device inference.
+
+Signature:
+
+```typescript
+export interface OnDeviceParams
+```
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [createOptions](./vertexai.ondeviceparams.md#ondeviceparamscreateoptions) | LanguageModelCreateOptions | |
+| [promptOptions](./vertexai.ondeviceparams.md#ondeviceparamspromptoptions) | LanguageModelPromptOptions | |
+
+## OnDeviceParams.createOptions
+
+Signature:
+
+```typescript
+createOptions?: LanguageModelCreateOptions;
+```
+
+## OnDeviceParams.promptOptions
+
+Signature:
+
+```typescript
+promptOptions?: LanguageModelPromptOptions;
+```
diff --git a/e2e/sample-apps/modular.js b/e2e/sample-apps/modular.js
index abcf829856b..1617b2aab60 100644
--- a/e2e/sample-apps/modular.js
+++ b/e2e/sample-apps/modular.js
@@ -58,7 +58,7 @@ import {
onValue,
off
} from 'firebase/database';
-import { getGenerativeModel, getVertexAI } from 'firebase/vertexai';
+import { getGenerativeModel, getVertexAI, Schema } from 'firebase/vertexai';
import { getDataConnect, DataConnect } from 'firebase/data-connect';
/**
@@ -313,23 +313,43 @@ function callPerformance(app) {
async function callVertexAI(app) {
console.log('[VERTEXAI] start');
const vertexAI = getVertexAI(app);
- const model = getGenerativeModel(vertexAI, {
- mode: 'prefer_on_device'
+
+ const jsonSchema = Schema.object({
+ properties: {
+ characters: Schema.array({
+ items: Schema.object({
+ properties: {
+ name: Schema.string(),
+ accessory: Schema.string(),
+ age: Schema.number(),
+ species: Schema.string()
+ },
+ optionalProperties: ['accessory']
+ })
+ })
+ }
});
- const singleResult = await model.generateContent([
- { text: 'describe this 20 x 20 px image in two words' },
- {
- inlineData: {
- mimeType: 'image/heic',
- data: 'AAAAGGZ0eXBoZWljAAAAAGhlaWNtaWYxAAAB7G1ldGEAAAAAAAAAIWhkbHIAAAAAAAAAAHBpY3QAAAAAAAAAAAAAAAAAAAAAJGRpbmYAAAAcZHJlZgAAAAAAAAABAAAADHVybCAAAAABAAAADnBpdG0AAAAAAAEAAAA4aWluZgAAAAAAAgAAABVpbmZlAgAAAAABAABodmMxAAAAABVpbmZlAgAAAQACAABFeGlmAAAAABppcmVmAAAAAAAAAA5jZHNjAAIAAQABAAABD2lwcnAAAADtaXBjbwAAABNjb2xybmNseAACAAIABoAAAAAMY2xsaQDLAEAAAAAUaXNwZQAAAAAAAAAUAAAADgAAAChjbGFwAAAAFAAAAAEAAAANAAAAAQAAAAAAAAAB/8AAAACAAAAAAAAJaXJvdAAAAAAQcGl4aQAAAAADCAgIAAAAcWh2Y0MBA3AAAACwAAAAAAAe8AD8/fj4AAALA6AAAQAXQAEMAf//A3AAAAMAsAAAAwAAAwAecCShAAEAI0IBAQNwAAADALAAAAMAAAMAHqAUIEHAjw1iHuRZVNwICBgCogABAAlEAcBhcshAUyQAAAAaaXBtYQAAAAAAAAABAAEHgQIDhIUGhwAAACxpbG9jAAAAAEQAAAIAAQAAAAEAAAJsAAABDAACAAAAAQAAAhQAAABYAAAAAW1kYXQAAAAAAAABdAAAAAZFeGlmAABNTQAqAAAACAAEARIAAwAAAAEAAQAAARoABQAAAAEAAAA+ARsABQAAAAEAAABGASgAAwAAAAEAAgAAAAAAAAAAAEgAAAABAAAASAAAAAEAAAEIKAGvoR8wDimTiRYUbALiHkU3ZdZ8DXAcSrRB9GARtVQHvnCE0LEyBGAyb5P4eYr6JAK5UxNX10WNlARq3ZpcGeVD+Xom6LodYasuZKKtDHCz/xnswOtC/ksZzVKhtWQqGvkXcsJnLYqWevNkacnccQ95jbHJBg9nXub69jAAN3xhNOXxjGSxaG9QvES5R7sYICEojRjLF5OB5K3v+okQAwfgWpz/u21ayideOgOZQLAyBkKOv7ymLNCagiPWTlHAuy/3qR1Q7m2ERFaxKIAbLSkIVO/P8m8+anKxhzhC//L8NMAUoF+Sf3aEH9O41fwLc+PlcbrDrjgY2EboD3cn9DyN32Rum2Ym'
+
+ const model = getGenerativeModel(vertexAI, {
+ // mode: 'prefer_on_device',
+ mode: 'only_in_cloud',
+ inCloudParams: {
+ generationConfig: {
+ responseMimeType: 'application/json',
+ responseSchema: jsonSchema
+ }
+ },
+ onDeviceParams: {
+ promptOptions: {
+ responseConstraint: jsonSchema
}
}
- ]);
- console.log(`Generated text: ${singleResult.response.text()}`);
- const chat = model.startChat();
- let chatResult = await chat.sendMessage('describe red in two words');
- chatResult = await chat.sendMessage('describe blue');
- console.log('Chat history:', await chat.getHistory());
+ });
+
+ const singleResult = await model.generateContent(
+ "For use in a children's card game, generate 10 animal-based characters."
+ );
+ console.log(`Generated text:`, JSON.parse(singleResult.response.text()));
console.log(`[VERTEXAI] end`);
}
diff --git a/e2e/webpack.config.js b/e2e/webpack.config.js
index b2e4c25f62e..b2c6a64f17e 100644
--- a/e2e/webpack.config.js
+++ b/e2e/webpack.config.js
@@ -88,7 +88,7 @@ module.exports = [
stats: {
colors: true
},
- devtool: 'source-map',
+ devtool: 'eval-source-map',
devServer: {
static: './build'
}
diff --git a/packages/vertexai/src/methods/chrome-adapter.test.ts b/packages/vertexai/src/methods/chrome-adapter.test.ts
index 550b87c9e0b..fbe7ec1a5c5 100644
--- a/packages/vertexai/src/methods/chrome-adapter.test.ts
+++ b/packages/vertexai/src/methods/chrome-adapter.test.ts
@@ -28,6 +28,7 @@ import {
} from '../types/language-model';
import { match, stub } from 'sinon';
import { GenerateContentRequest, AIErrorCode } from '../types';
+import { Schema } from '../api';
use(sinonChai);
use(chaiAsPromised);
@@ -85,14 +86,16 @@ describe('ChromeAdapter', () => {
languageModelProvider,
'availability'
).resolves(Availability.available);
- const onDeviceParams = {
+ const createOptions = {
// Explicitly sets expected inputs.
expectedInputs: [{ type: 'text' }]
} as LanguageModelCreateOptions;
const adapter = new ChromeAdapter(
languageModelProvider,
'prefer_on_device',
- onDeviceParams
+ {
+ createOptions
+ }
);
await adapter.isAvailable({
contents: [
@@ -102,7 +105,7 @@ describe('ChromeAdapter', () => {
}
]
});
- expect(availabilityStub).to.have.been.calledWith(onDeviceParams);
+ expect(availabilityStub).to.have.been.calledWith(createOptions);
});
});
describe('isAvailable', () => {
@@ -210,20 +213,20 @@ describe('ChromeAdapter', () => {
const createStub = stub(languageModelProvider, 'create').resolves(
{} as LanguageModel
);
- const expectedOnDeviceParams = {
+ const createOptions = {
expectedInputs: [{ type: 'image' }]
} as LanguageModelCreateOptions;
const adapter = new ChromeAdapter(
languageModelProvider,
'prefer_on_device',
- expectedOnDeviceParams
+ { createOptions }
);
expect(
await adapter.isAvailable({
contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
})
).to.be.false;
- expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams);
+ expect(createStub).to.have.been.calledOnceWith(createOptions);
});
it('avoids redundant downloads', async () => {
const languageModelProvider = {
@@ -310,21 +313,21 @@ describe('ChromeAdapter', () => {
);
const promptOutput = 'hi';
const promptStub = stub(languageModel, 'prompt').resolves(promptOutput);
- const expectedOnDeviceParams = {
+ const createOptions = {
systemPrompt: 'be yourself',
expectedInputs: [{ type: 'image' }]
} as LanguageModelCreateOptions;
const adapter = new ChromeAdapter(
languageModelProvider,
'prefer_on_device',
- expectedOnDeviceParams
+ { createOptions }
);
const request = {
contents: [{ role: 'user', parts: [{ text: 'anything' }] }]
} as GenerateContentRequest;
const response = await adapter.generateContent(request);
// Asserts initialization params are proxied.
- expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams);
+ expect(createStub).to.have.been.calledOnceWith(createOptions);
// Asserts Vertex input type is mapped to Chrome type.
expect(promptStub).to.have.been.calledOnceWith([
{
@@ -356,14 +359,14 @@ describe('ChromeAdapter', () => {
);
const promptOutput = 'hi';
const promptStub = stub(languageModel, 'prompt').resolves(promptOutput);
- const expectedOnDeviceParams = {
+ const createOptions = {
systemPrompt: 'be yourself',
expectedInputs: [{ type: 'image' }]
} as LanguageModelCreateOptions;
const adapter = new ChromeAdapter(
languageModelProvider,
'prefer_on_device',
- expectedOnDeviceParams
+ { createOptions }
);
const request = {
contents: [
@@ -383,7 +386,7 @@ describe('ChromeAdapter', () => {
} as GenerateContentRequest;
const response = await adapter.generateContent(request);
// Asserts initialization params are proxied.
- expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams);
+ expect(createStub).to.have.been.calledOnceWith(createOptions);
// Asserts Vertex input type is mapped to Chrome type.
expect(promptStub).to.have.been.calledOnceWith([
{
@@ -406,6 +409,40 @@ describe('ChromeAdapter', () => {
]
});
});
+ it('honors prompt options', async () => {
+ const languageModel = {
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ prompt: (p: LanguageModelMessageContent[]) => Promise.resolve('')
+ } as LanguageModel;
+ const languageModelProvider = {
+ create: () => Promise.resolve(languageModel)
+ } as LanguageModel;
+ const promptOutput = '{}';
+ const promptStub = stub(languageModel, 'prompt').resolves(promptOutput);
+ const promptOptions = {
+ responseConstraint: Schema.object({
+ properties: {}
+ })
+ };
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device',
+ { promptOptions }
+ );
+ const request = {
+ contents: [{ role: 'user', parts: [{ text: 'anything' }] }]
+ } as GenerateContentRequest;
+ await adapter.generateContent(request);
+ expect(promptStub).to.have.been.calledOnceWith(
+ [
+ {
+ type: 'text',
+ content: request.contents[0].parts[0].text
+ }
+ ],
+ promptOptions
+ );
+ });
});
describe('countTokens', () => {
it('counts tokens is not yet available', async () => {
@@ -462,19 +499,19 @@ describe('ChromeAdapter', () => {
}
})
);
- const expectedOnDeviceParams = {
+ const createOptions = {
expectedInputs: [{ type: 'image' }]
} as LanguageModelCreateOptions;
const adapter = new ChromeAdapter(
languageModelProvider,
'prefer_on_device',
- expectedOnDeviceParams
+ { createOptions }
);
const request = {
contents: [{ role: 'user', parts: [{ text: 'anything' }] }]
} as GenerateContentRequest;
const response = await adapter.generateContentStream(request);
- expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams);
+ expect(createStub).to.have.been.calledOnceWith(createOptions);
expect(promptStub).to.have.been.calledOnceWith([
{
type: 'text',
@@ -505,13 +542,13 @@ describe('ChromeAdapter', () => {
}
})
);
- const expectedOnDeviceParams = {
+ const createOptions = {
expectedInputs: [{ type: 'image' }]
} as LanguageModelCreateOptions;
const adapter = new ChromeAdapter(
languageModelProvider,
'prefer_on_device',
- expectedOnDeviceParams
+ { createOptions }
);
const request = {
contents: [
@@ -530,7 +567,7 @@ describe('ChromeAdapter', () => {
]
} as GenerateContentRequest;
const response = await adapter.generateContentStream(request);
- expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams);
+ expect(createStub).to.have.been.calledOnceWith(createOptions);
expect(promptStub).to.have.been.calledOnceWith([
{
type: 'text',
@@ -546,6 +583,41 @@ describe('ChromeAdapter', () => {
`data: {"candidates":[{"content":{"role":"model","parts":[{"text":["${part}"]}]}}]}\n\n`
]);
});
+ it('honors prompt options', async () => {
+ const languageModel = {
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ promptStreaming: p => new ReadableStream()
+ } as LanguageModel;
+ const languageModelProvider = {
+ create: () => Promise.resolve(languageModel)
+ } as LanguageModel;
+ const promptStub = stub(languageModel, 'promptStreaming').returns(
+ new ReadableStream()
+ );
+ const promptOptions = {
+ responseConstraint: Schema.object({
+ properties: {}
+ })
+ };
+ const adapter = new ChromeAdapter(
+ languageModelProvider,
+ 'prefer_on_device',
+ { promptOptions }
+ );
+ const request = {
+ contents: [{ role: 'user', parts: [{ text: 'anything' }] }]
+ } as GenerateContentRequest;
+ await adapter.generateContentStream(request);
+ expect(promptStub).to.have.been.calledOnceWith(
+ [
+ {
+ type: 'text',
+ content: request.contents[0].parts[0].text
+ }
+ ],
+ promptOptions
+ );
+ });
});
});
diff --git a/packages/vertexai/src/methods/chrome-adapter.ts b/packages/vertexai/src/methods/chrome-adapter.ts
index 9ba674937a8..aa3709048a2 100644
--- a/packages/vertexai/src/methods/chrome-adapter.ts
+++ b/packages/vertexai/src/methods/chrome-adapter.ts
@@ -22,12 +22,12 @@ import {
GenerateContentRequest,
InferenceMode,
Part,
- AIErrorCode
+ AIErrorCode,
+ OnDeviceParams
} from '../types';
import {
Availability,
LanguageModel,
- LanguageModelCreateOptions,
LanguageModelMessageContent
} from '../types/language-model';
@@ -44,10 +44,13 @@ export class ChromeAdapter {
constructor(
private languageModelProvider?: LanguageModel,
private mode?: InferenceMode,
- private onDeviceParams: LanguageModelCreateOptions = {}
- ) {
- this.addImageTypeAsExpectedInput();
- }
+ private onDeviceParams: OnDeviceParams = {
+ createOptions: {
+ // Defaults to support image inputs for convenience.
+ expectedInputs: [{ type: 'image' }]
+ }
+ }
+ ) {}
/**
* Checks if a given request can be made on-device.
@@ -111,7 +114,10 @@ export class ChromeAdapter {
const contents = await Promise.all(
request.contents[0].parts.map(ChromeAdapter.toLanguageModelMessageContent)
);
- const text = await session.prompt(contents);
+ const text = await session.prompt(
+ contents,
+ this.onDeviceParams.promptOptions
+ );
return ChromeAdapter.toResponse(text);
}
@@ -132,7 +138,10 @@ export class ChromeAdapter {
const contents = await Promise.all(
request.contents[0].parts.map(ChromeAdapter.toLanguageModelMessageContent)
);
- const stream = await session.promptStreaming(contents);
+ const stream = await session.promptStreaming(
+ contents,
+ this.onDeviceParams.promptOptions
+ );
return ChromeAdapter.toStreamResponse(stream);
}
@@ -187,7 +196,7 @@ export class ChromeAdapter {
*/
private async downloadIfAvailable(): Promise {
const availability = await this.languageModelProvider?.availability(
- this.onDeviceParams
+ this.onDeviceParams.createOptions
);
if (availability === Availability.downloadable) {
@@ -212,7 +221,7 @@ export class ChromeAdapter {
}
this.isDownloading = true;
this.downloadPromise = this.languageModelProvider
- ?.create(this.onDeviceParams)
+ ?.create(this.onDeviceParams.createOptions)
.then(() => {
this.isDownloading = false;
});
@@ -263,7 +272,7 @@ export class ChromeAdapter {
);
}
const newSession = await this.languageModelProvider.create(
- this.onDeviceParams
+ this.onDeviceParams.createOptions
);
if (this.oldSession) {
this.oldSession.destroy();
@@ -273,11 +282,6 @@ export class ChromeAdapter {
return newSession;
}
- private addImageTypeAsExpectedInput(): void {
- // Defaults to support image inputs for convenience.
- this.onDeviceParams.expectedInputs ??= [{ type: 'image' }];
- }
-
/**
* Formats string returned by Chrome as a {@link Response} returned by Vertex.
*/
diff --git a/packages/vertexai/src/types/language-model.ts b/packages/vertexai/src/types/language-model.ts
index cd84f22dbdb..22916e7ff96 100644
--- a/packages/vertexai/src/types/language-model.ts
+++ b/packages/vertexai/src/types/language-model.ts
@@ -49,8 +49,9 @@ export interface LanguageModelCreateOptions
systemPrompt?: string;
initialPrompts?: LanguageModelInitialPrompts;
}
-interface LanguageModelPromptOptions {
- signal?: AbortSignal;
+export interface LanguageModelPromptOptions {
+ responseConstraint?: object;
+ // TODO: Restore AbortSignal once the API is defined.
}
interface LanguageModelExpectedInput {
type: LanguageModelMessageType;
diff --git a/packages/vertexai/src/types/requests.ts b/packages/vertexai/src/types/requests.ts
index 36700b5a936..e9d5716e3b4 100644
--- a/packages/vertexai/src/types/requests.ts
+++ b/packages/vertexai/src/types/requests.ts
@@ -17,7 +17,10 @@
import { TypedSchema } from '../requests/schema-builder';
import { Content, Part } from './content';
-import { LanguageModelCreateOptions } from './language-model';
+import {
+ LanguageModelCreateOptions,
+ LanguageModelPromptOptions
+} from './language-model';
import {
FunctionCallingMode,
HarmBlockMethod,
@@ -220,6 +223,14 @@ export interface FunctionCallingConfig {
allowedFunctionNames?: string[];
}
+/**
+ * Encapsulates configuration for on-device inference.
+ */
+export interface OnDeviceParams {
+ createOptions?: LanguageModelCreateOptions;
+ promptOptions?: LanguageModelPromptOptions;
+}
+
/**
* Toggles hybrid inference.
*/
@@ -231,7 +242,7 @@ export interface HybridParams {
/**
* Optional. Specifies advanced params for on-device inference.
*/
- onDeviceParams?: LanguageModelCreateOptions;
+ onDeviceParams?: OnDeviceParams;
/**
* Optional. Specifies advanced params for in-cloud inference.
*/