summaryrefslogtreecommitdiff
path: root/frontend-old/node_modules/@firebase/ai/dist/src/types
diff options
context:
space:
mode:
Diffstat (limited to 'frontend-old/node_modules/@firebase/ai/dist/src/types')
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/chrome-adapter.d.ts56
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/content.d.ts265
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/enums.d.ts398
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/error.d.ts89
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/googleai.d.ts57
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/index.d.ts18
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/internal.d.ts134
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/requests.d.ts245
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/responses.d.ts79
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/index.d.ts26
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/internal.d.ts33
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/language-model.d.ts107
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/live-responses.d.ts79
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/requests.d.ts464
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/responses.d.ts582
-rw-r--r--frontend-old/node_modules/@firebase/ai/dist/src/types/schema.d.ts139
16 files changed, 2771 insertions, 0 deletions
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/chrome-adapter.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/chrome-adapter.d.ts
new file mode 100644
index 0000000..6092353
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/chrome-adapter.d.ts
@@ -0,0 +1,56 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { CountTokensRequest, GenerateContentRequest } from './requests';
+/**
+ * Defines an inference "backend" that uses Chrome's on-device model,
+ * and encapsulates logic for detecting when on-device inference is
+ * possible.
+ *
+ * These methods should not be called directly by the user.
+ *
+ * @beta
+ */
+export interface ChromeAdapter {
+ /**
+ * Checks if the on-device model is capable of handling a given
+ * request.
+ * @param request - A potential request to be passed to the model.
+ */
+ isAvailable(request: GenerateContentRequest): Promise<boolean>;
+ /**
+ * Generates content using on-device inference.
+ *
+ * @remarks
+ * This is comparable to {@link GenerativeModel.generateContent} for generating
+ * content using in-cloud inference.
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
+ */
+ generateContent(request: GenerateContentRequest): Promise<Response>;
+ /**
+ * Generates a content stream using on-device inference.
+ *
+ * @remarks
+ * This is comparable to {@link GenerativeModel.generateContentStream} for generating
+ * a content stream using in-cloud inference.
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
+ */
+ generateContentStream(request: GenerateContentRequest): Promise<Response>;
+ /**
+ * @internal
+ */
+ countTokens(request: CountTokensRequest): Promise<Response>;
+}
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/content.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/content.d.ts
new file mode 100644
index 0000000..a760547
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/content.d.ts
@@ -0,0 +1,265 @@
+/**
+ * @license
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { Language, Outcome, Role } from './enums';
+/**
+ * Content type for both prompts and response candidates.
+ * @public
+ */
+export interface Content {
+ role: Role;
+ parts: Part[];
+}
+/**
+ * Content part - includes text, image/video, or function call/response
+ * part types.
+ * @public
+ */
+export type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart | ExecutableCodePart | CodeExecutionResultPart;
+/**
+ * Content part interface if the part represents a text string.
+ * @public
+ */
+export interface TextPart {
+ text: string;
+ inlineData?: never;
+ functionCall?: never;
+ functionResponse?: never;
+ thought?: boolean;
+ /**
+ * @internal
+ */
+ thoughtSignature?: string;
+ executableCode?: never;
+ codeExecutionResult?: never;
+}
+/**
+ * Content part interface if the part represents an image.
+ * @public
+ */
+export interface InlineDataPart {
+ text?: never;
+ inlineData: GenerativeContentBlob;
+ functionCall?: never;
+ functionResponse?: never;
+ /**
+ * Applicable if `inlineData` is a video.
+ */
+ videoMetadata?: VideoMetadata;
+ thought?: boolean;
+ /**
+ * @internal
+ */
+ thoughtSignature?: never;
+ executableCode?: never;
+ codeExecutionResult?: never;
+}
+/**
+ * Describes the input video content.
+ * @public
+ */
+export interface VideoMetadata {
+ /**
+ * The start offset of the video in
+ * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format.
+ */
+ startOffset: string;
+ /**
+ * The end offset of the video in
+ * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format.
+ */
+ endOffset: string;
+}
+/**
+ * Content part interface if the part represents a {@link FunctionCall}.
+ * @public
+ */
+export interface FunctionCallPart {
+ text?: never;
+ inlineData?: never;
+ functionCall: FunctionCall;
+ functionResponse?: never;
+ thought?: boolean;
+ /**
+ * @internal
+ */
+ thoughtSignature?: never;
+ executableCode?: never;
+ codeExecutionResult?: never;
+}
+/**
+ * Content part interface if the part represents {@link FunctionResponse}.
+ * @public
+ */
+export interface FunctionResponsePart {
+ text?: never;
+ inlineData?: never;
+ functionCall?: never;
+ functionResponse: FunctionResponse;
+ thought?: boolean;
+ /**
+ * @internal
+ */
+ thoughtSignature?: never;
+ executableCode?: never;
+ codeExecutionResult?: never;
+}
+/**
+ * Content part interface if the part represents {@link FileData}
+ * @public
+ */
+export interface FileDataPart {
+ text?: never;
+ inlineData?: never;
+ functionCall?: never;
+ functionResponse?: never;
+ fileData: FileData;
+ thought?: boolean;
+ /**
+ * @internal
+ */
+ thoughtSignature?: never;
+ executableCode?: never;
+ codeExecutionResult?: never;
+}
+/**
+ * Represents the code that is executed by the model.
+ *
+ * @beta
+ */
+export interface ExecutableCodePart {
+ text?: never;
+ inlineData?: never;
+ functionCall?: never;
+ functionResponse?: never;
+ fileData: never;
+ thought?: never;
+ /**
+ * @internal
+ */
+ thoughtSignature?: never;
+ executableCode?: ExecutableCode;
+ codeExecutionResult?: never;
+}
+/**
+ * Represents the code execution result from the model.
+ *
+ * @beta
+ */
+export interface CodeExecutionResultPart {
+ text?: never;
+ inlineData?: never;
+ functionCall?: never;
+ functionResponse?: never;
+ fileData: never;
+ thought?: never;
+ /**
+ * @internal
+ */
+ thoughtSignature?: never;
+ executableCode?: never;
+ codeExecutionResult?: CodeExecutionResult;
+}
+/**
+ * An interface for executable code returned by the model.
+ *
+ * @beta
+ */
+export interface ExecutableCode {
+ /**
+ * The programming language of the code.
+ */
+ language?: Language;
+ /**
+ * The source code to be executed.
+ */
+ code?: string;
+}
+/**
+ * The results of code execution run by the model.
+ *
+ * @beta
+ */
+export interface CodeExecutionResult {
+ /**
+ * The result of the code execution.
+ */
+ outcome?: Outcome;
+ /**
+ * The output from the code execution, or an error message
+ * if it failed.
+ */
+ output?: string;
+}
+/**
+ * A predicted {@link FunctionCall} returned from the model
+ * that contains a string representing the {@link FunctionDeclaration.name}
+ * and a structured JSON object containing the parameters and their values.
+ * @public
+ */
+export interface FunctionCall {
+ /**
+ * The id of the function call. This must be sent back in the associated {@link FunctionResponse}.
+ *
+ *
+ * @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}).
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be
+ * `undefined`.
+ */
+ id?: string;
+ name: string;
+ args: object;
+}
+/**
+ * The result output from a {@link FunctionCall} that contains a string
+ * representing the {@link FunctionDeclaration.name}
+ * and a structured JSON object containing any output
+ * from the function is used as context to the model.
+ * This should contain the result of a {@link FunctionCall}
+ * made based on model prediction.
+ * @public
+ */
+export interface FunctionResponse {
+ /**
+ * The id of the {@link FunctionCall}.
+ *
+ * @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}).
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be
+ * `undefined`.
+ */
+ id?: string;
+ name: string;
+ response: object;
+}
+/**
+ * Interface for sending an image.
+ * @public
+ */
+export interface GenerativeContentBlob {
+ mimeType: string;
+ /**
+ * Image as a base64 string.
+ */
+ data: string;
+}
+/**
+ * Data pointing to a file uploaded on Google Cloud Storage.
+ * @public
+ */
+export interface FileData {
+ mimeType: string;
+ fileUri: string;
+}
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/enums.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/enums.d.ts
new file mode 100644
index 0000000..170a299
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/enums.d.ts
@@ -0,0 +1,398 @@
+/**
+ * @license
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Role is the producer of the content.
+ * @public
+ */
+export type Role = (typeof POSSIBLE_ROLES)[number];
+/**
+ * Possible roles.
+ * @public
+ */
+export declare const POSSIBLE_ROLES: readonly ["user", "model", "function", "system"];
+/**
+ * Harm categories that would cause prompts or candidates to be blocked.
+ * @public
+ */
+export declare const HarmCategory: {
+ readonly HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH";
+ readonly HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
+ readonly HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT";
+ readonly HARM_CATEGORY_DANGEROUS_CONTENT: "HARM_CATEGORY_DANGEROUS_CONTENT";
+};
+/**
+ * Harm categories that would cause prompts or candidates to be blocked.
+ * @public
+ */
+export type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory];
+/**
+ * Threshold above which a prompt or candidate will be blocked.
+ * @public
+ */
+export declare const HarmBlockThreshold: {
+ /**
+ * Content with `NEGLIGIBLE` will be allowed.
+ */
+ readonly BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE";
+ /**
+ * Content with `NEGLIGIBLE` and `LOW` will be allowed.
+ */
+ readonly BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE";
+ /**
+ * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.
+ */
+ readonly BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH";
+ /**
+ * All content will be allowed.
+ */
+ readonly BLOCK_NONE: "BLOCK_NONE";
+ /**
+ * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding
+ * to the {@link (HarmCategory:type)} will not be present in the response.
+ */
+ readonly OFF: "OFF";
+};
+/**
+ * Threshold above which a prompt or candidate will be blocked.
+ * @public
+ */
+export type HarmBlockThreshold = (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold];
+/**
+ * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
+ *
+ * @public
+ */
+export declare const HarmBlockMethod: {
+ /**
+ * The harm block method uses both probability and severity scores.
+ */
+ readonly SEVERITY: "SEVERITY";
+ /**
+ * The harm block method uses the probability score.
+ */
+ readonly PROBABILITY: "PROBABILITY";
+};
+/**
+ * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
+ *
+ * @public
+ */
+export type HarmBlockMethod = (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod];
+/**
+ * Probability that a prompt or candidate matches a harm category.
+ * @public
+ */
+export declare const HarmProbability: {
+ /**
+ * Content has a negligible chance of being unsafe.
+ */
+ readonly NEGLIGIBLE: "NEGLIGIBLE";
+ /**
+ * Content has a low chance of being unsafe.
+ */
+ readonly LOW: "LOW";
+ /**
+ * Content has a medium chance of being unsafe.
+ */
+ readonly MEDIUM: "MEDIUM";
+ /**
+ * Content has a high chance of being unsafe.
+ */
+ readonly HIGH: "HIGH";
+};
+/**
+ * Probability that a prompt or candidate matches a harm category.
+ * @public
+ */
+export type HarmProbability = (typeof HarmProbability)[keyof typeof HarmProbability];
+/**
+ * Harm severity levels.
+ * @public
+ */
+export declare const HarmSeverity: {
+ /**
+ * Negligible level of harm severity.
+ */
+ readonly HARM_SEVERITY_NEGLIGIBLE: "HARM_SEVERITY_NEGLIGIBLE";
+ /**
+ * Low level of harm severity.
+ */
+ readonly HARM_SEVERITY_LOW: "HARM_SEVERITY_LOW";
+ /**
+ * Medium level of harm severity.
+ */
+ readonly HARM_SEVERITY_MEDIUM: "HARM_SEVERITY_MEDIUM";
+ /**
+ * High level of harm severity.
+ */
+ readonly HARM_SEVERITY_HIGH: "HARM_SEVERITY_HIGH";
+ /**
+ * Harm severity is not supported.
+ *
+ * @remarks
+ * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.
+ */
+ readonly HARM_SEVERITY_UNSUPPORTED: "HARM_SEVERITY_UNSUPPORTED";
+};
+/**
+ * Harm severity levels.
+ * @public
+ */
+export type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity];
+/**
+ * Reason that a prompt was blocked.
+ * @public
+ */
+export declare const BlockReason: {
+ /**
+ * Content was blocked by safety settings.
+ */
+ readonly SAFETY: "SAFETY";
+ /**
+ * Content was blocked, but the reason is uncategorized.
+ */
+ readonly OTHER: "OTHER";
+ /**
+ * Content was blocked because it contained terms from the terminology blocklist.
+ */
+ readonly BLOCKLIST: "BLOCKLIST";
+ /**
+ * Content was blocked due to prohibited content.
+ */
+ readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT";
+};
+/**
+ * Reason that a prompt was blocked.
+ * @public
+ */
+export type BlockReason = (typeof BlockReason)[keyof typeof BlockReason];
+/**
+ * Reason that a candidate finished.
+ * @public
+ */
+export declare const FinishReason: {
+ /**
+ * Natural stop point of the model or provided stop sequence.
+ */
+ readonly STOP: "STOP";
+ /**
+ * The maximum number of tokens as specified in the request was reached.
+ */
+ readonly MAX_TOKENS: "MAX_TOKENS";
+ /**
+ * The candidate content was flagged for safety reasons.
+ */
+ readonly SAFETY: "SAFETY";
+ /**
+ * The candidate content was flagged for recitation reasons.
+ */
+ readonly RECITATION: "RECITATION";
+ /**
+ * Unknown reason.
+ */
+ readonly OTHER: "OTHER";
+ /**
+ * The candidate content contained forbidden terms.
+ */
+ readonly BLOCKLIST: "BLOCKLIST";
+ /**
+ * The candidate content potentially contained prohibited content.
+ */
+ readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT";
+ /**
+ * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).
+ */
+ readonly SPII: "SPII";
+ /**
+ * The function call generated by the model was invalid.
+ */
+ readonly MALFORMED_FUNCTION_CALL: "MALFORMED_FUNCTION_CALL";
+};
+/**
+ * Reason that a candidate finished.
+ * @public
+ */
+export type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];
+/**
+ * @public
+ */
+export declare const FunctionCallingMode: {
+ /**
+ * Default model behavior; model decides to predict either a function call
+ * or a natural language response.
+ */
+ readonly AUTO: "AUTO";
+ /**
+ * Model is constrained to always predicting a function call only.
+ * If `allowed_function_names` is set, the predicted function call will be
+ * limited to any one of `allowed_function_names`, else the predicted
+ * function call will be any one of the provided `function_declarations`.
+ */
+ readonly ANY: "ANY";
+ /**
+ * Model will not predict any function call. Model behavior is same as when
+ * not passing any function declarations.
+ */
+ readonly NONE: "NONE";
+};
+/**
+ * @public
+ */
+export type FunctionCallingMode = (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode];
+/**
+ * Content part modality.
+ * @public
+ */
+export declare const Modality: {
+ /**
+ * Unspecified modality.
+ */
+ readonly MODALITY_UNSPECIFIED: "MODALITY_UNSPECIFIED";
+ /**
+ * Plain text.
+ */
+ readonly TEXT: "TEXT";
+ /**
+ * Image.
+ */
+ readonly IMAGE: "IMAGE";
+ /**
+ * Video.
+ */
+ readonly VIDEO: "VIDEO";
+ /**
+ * Audio.
+ */
+ readonly AUDIO: "AUDIO";
+ /**
+ * Document (for example, PDF).
+ */
+ readonly DOCUMENT: "DOCUMENT";
+};
+/**
+ * Content part modality.
+ * @public
+ */
+export type Modality = (typeof Modality)[keyof typeof Modality];
+/**
+ * Generation modalities to be returned in generation responses.
+ *
+ * @beta
+ */
+export declare const ResponseModality: {
+ /**
+ * Text.
+ * @beta
+ */
+ readonly TEXT: "TEXT";
+ /**
+ * Image.
+ * @beta
+ */
+ readonly IMAGE: "IMAGE";
+ /**
+ * Audio.
+ * @beta
+ */
+ readonly AUDIO: "AUDIO";
+};
+/**
+ * Generation modalities to be returned in generation responses.
+ *
+ * @beta
+ */
+export type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseModality];
+/**
+ * Determines whether inference happens on-device or in-cloud.
+ *
+ * @remarks
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
+ * on-device model. If on-device inference is not available, the SDK
+ * will fall back to using a cloud-hosted model.
+ * <br/>
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
+ * If on-device inference is not available, inference methods will throw.
+ * <br/>
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
+ * <br/>
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
+ * cloud-hosted model. If not available, the SDK will fall back to an
+ * on-device model.
+ *
+ * @beta
+ */
+export declare const InferenceMode: {
+ readonly PREFER_ON_DEVICE: "prefer_on_device";
+ readonly ONLY_ON_DEVICE: "only_on_device";
+ readonly ONLY_IN_CLOUD: "only_in_cloud";
+ readonly PREFER_IN_CLOUD: "prefer_in_cloud";
+};
+/**
+ * Determines whether inference happens on-device or in-cloud.
+ *
+ * @beta
+ */
+export type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];
+/**
+ * Indicates whether inference happened on-device or in-cloud.
+ *
+ * @beta
+ */
+export declare const InferenceSource: {
+ readonly ON_DEVICE: "on_device";
+ readonly IN_CLOUD: "in_cloud";
+};
+/**
+ * Indicates whether inference happened on-device or in-cloud.
+ *
+ * @beta
+ */
+export type InferenceSource = (typeof InferenceSource)[keyof typeof InferenceSource];
+/**
+ * Represents the result of the code execution.
+ *
+ * @beta
+ */
+export declare const Outcome: {
+ UNSPECIFIED: string;
+ OK: string;
+ FAILED: string;
+ DEADLINE_EXCEEDED: string;
+};
+/**
+ * Represents the result of the code execution.
+ *
+ * @beta
+ */
+export type Outcome = (typeof Outcome)[keyof typeof Outcome];
+/**
+ * The programming language of the code.
+ *
+ * @beta
+ */
+export declare const Language: {
+ UNSPECIFIED: string;
+ PYTHON: string;
+};
+/**
+ * The programming language of the code.
+ *
+ * @beta
+ */
+export type Language = (typeof Language)[keyof typeof Language];
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/error.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/error.d.ts
new file mode 100644
index 0000000..82e6bb4
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/error.d.ts
@@ -0,0 +1,89 @@
+/**
+ * @license
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { GenerateContentResponse } from './responses';
+/**
+ * Details object that may be included in an error response.
+ *
+ * @public
+ */
+export interface ErrorDetails {
+ '@type'?: string;
+ /** The reason for the error. */
+ reason?: string;
+ /** The domain where the error occurred. */
+ domain?: string;
+ /** Additional metadata about the error. */
+ metadata?: Record<string, unknown>;
+ /** Any other relevant information about the error. */
+ [key: string]: unknown;
+}
+/**
+ * Details object that contains data originating from a bad HTTP response.
+ *
+ * @public
+ */
+export interface CustomErrorData {
+ /** HTTP status code of the error response. */
+ status?: number;
+ /** HTTP status text of the error response. */
+ statusText?: string;
+ /** Response from a {@link GenerateContentRequest} */
+ response?: GenerateContentResponse;
+ /** Optional additional details about the error. */
+ errorDetails?: ErrorDetails[];
+}
+/**
+ * Standardized error codes that {@link AIError} can have.
+ *
+ * @public
+ */
+export declare const AIErrorCode: {
+ /** A generic error occurred. */
+ readonly ERROR: "error";
+ /** An error occurred in a request. */
+ readonly REQUEST_ERROR: "request-error";
+ /** An error occurred in a response. */
+ readonly RESPONSE_ERROR: "response-error";
+ /** An error occurred while performing a fetch. */
+ readonly FETCH_ERROR: "fetch-error";
+ /** An error occurred because an operation was attempted on a closed session. */
+ readonly SESSION_CLOSED: "session-closed";
+ /** An error associated with a Content object. */
+ readonly INVALID_CONTENT: "invalid-content";
+ /** An error due to the Firebase API not being enabled in the Console. */
+ readonly API_NOT_ENABLED: "api-not-enabled";
+ /** An error due to invalid Schema input. */
+ readonly INVALID_SCHEMA: "invalid-schema";
+ /** An error occurred due to a missing Firebase API key. */
+ readonly NO_API_KEY: "no-api-key";
+ /** An error occurred due to a missing Firebase app ID. */
+ readonly NO_APP_ID: "no-app-id";
+ /** An error occurred due to a model name not being specified during initialization. */
+ readonly NO_MODEL: "no-model";
+ /** An error occurred due to a missing project ID. */
+ readonly NO_PROJECT_ID: "no-project-id";
+ /** An error occurred while parsing. */
+ readonly PARSE_FAILED: "parse-failed";
+ /** An error occurred due an attempt to use an unsupported feature. */
+ readonly UNSUPPORTED: "unsupported";
+};
+/**
+ * Standardized error codes that {@link AIError} can have.
+ *
+ * @public
+ */
+export type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode];
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/googleai.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/googleai.d.ts
new file mode 100644
index 0000000..7060f48
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/googleai.d.ts
@@ -0,0 +1,57 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { Tool, GenerationConfig, Citation, FinishReason, GroundingMetadata, PromptFeedback, SafetyRating, UsageMetadata, URLContextMetadata } from '../public-types';
+import { Content, Part } from './content';
+/**
+ * @internal
+ */
+export interface GoogleAICountTokensRequest {
+ generateContentRequest: {
+ model: string;
+ contents: Content[];
+ systemInstruction?: string | Part | Content;
+ tools?: Tool[];
+ generationConfig?: GenerationConfig;
+ };
+}
+/**
+ * @internal
+ */
+export interface GoogleAIGenerateContentResponse {
+ candidates?: GoogleAIGenerateContentCandidate[];
+ promptFeedback?: PromptFeedback;
+ usageMetadata?: UsageMetadata;
+}
+/**
+ * @internal
+ */
+export interface GoogleAIGenerateContentCandidate {
+ index: number;
+ content: Content;
+ finishReason?: FinishReason;
+ finishMessage?: string;
+ safetyRatings?: SafetyRating[];
+ citationMetadata?: GoogleAICitationMetadata;
+ groundingMetadata?: GroundingMetadata;
+ urlContextMetadata?: URLContextMetadata;
+}
+/**
+ * @internal
+ */
+export interface GoogleAICitationMetadata {
+ citationSources: Citation[];
+}
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/index.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/index.d.ts
new file mode 100644
index 0000000..c56c5bc
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/index.d.ts
@@ -0,0 +1,18 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+export * from './requests';
+export * from './responses';
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/internal.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/internal.d.ts
new file mode 100644
index 0000000..7d5824d
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/internal.d.ts
@@ -0,0 +1,134 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { ImagenGenerationConfig, ImagenSafetySettings } from './requests';
+/**
+ * A response from the REST API is expected to look like this in the success case:
+ * {
+ * "predictions": [
+ * {
+ * "mimeType": "image/png",
+ * "bytesBase64Encoded": "iVBORw0KG..."
+ * },
+ * {
+ * "mimeType": "image/png",
+ * "bytesBase64Encoded": "i4BOtw0KG..."
+ * }
+ * ]
+ * }
+ *
+ * And like this in the failure case:
+ * {
+ * "predictions": [
+ * {
+ * "raiFilteredReason": "..."
+ * }
+ * ]
+ * }
+ *
+ * @internal
+ */
+export interface ImagenResponseInternal {
+ predictions?: Array<{
+ /**
+ * The MIME type of the generated image.
+ */
+ mimeType?: string;
+ /**
+ * The image data encoded as a base64 string.
+ */
+ bytesBase64Encoded?: string;
+ /**
+ * The GCS URI where the image was stored.
+ */
+ gcsUri?: string;
+ /**
+ * The reason why the image was filtered.
+ */
+ raiFilteredReason?: string;
+ /**
+ * The safety attributes.
+ *
+ * This type is currently unused in the SDK. It is sent back because our requests set
+ * `includeSafetyAttributes`. This property is currently only used to avoid throwing an error
+ * when encountering this unsupported prediction type.
+ */
+ safetyAttributes?: unknown;
+ }>;
+}
+/**
+ * The parameters to be sent in the request body of the HTTP call
+ * to the Vertex AI backend.
+ *
+ * We need a seperate internal-only interface for this because the REST
+ * API expects different parameter names than what we show to our users.
+ *
+ * Sample request body JSON:
+ * {
+ * "instances": [
+ * {
+ * "prompt": "Portrait of a golden retriever on a beach."
+ * }
+ * ],
+ * "parameters": {
+ * "mimeType": "image/png",
+ * "safetyFilterLevel": "block_low_and_above",
+ * "personGeneration": "allow_all",
+ * "sampleCount": 2,
+ * "includeRaiReason": true,
+ * "includeSafetyAttributes": true,
+ * "aspectRatio": "9:16"
+ * }
+ * }
+ *
+ * See the Google Cloud docs: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api#-drest
+ *
+ * @internal
+ */
+export interface PredictRequestBody {
+ instances: [
+ {
+ prompt: string;
+ }
+ ];
+ parameters: {
+ sampleCount: number;
+ aspectRatio?: string;
+ outputOptions?: {
+ mimeType: string;
+ compressionQuality?: number;
+ };
+ negativePrompt?: string;
+ storageUri?: string;
+ addWatermark?: boolean;
+ safetyFilterLevel?: string;
+ personGeneration?: string;
+ includeRaiReason: boolean;
+ includeSafetyAttributes: boolean;
+ };
+}
+/**
+ * Contains all possible REST API paramaters that are provided by the caller.
+ *
+ * @internal
+ */
+export type ImagenGenerationParams = {
+ /**
+ * The Cloud Storage for Firebase bucket URI where the images should be stored
+ * (for GCS requests only).
+ */
+ gcsURI?: string;
+} & ImagenGenerationConfig & ImagenSafetySettings;
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/requests.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/requests.d.ts
new file mode 100644
index 0000000..31083fa
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/requests.d.ts
@@ -0,0 +1,245 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { ImagenImageFormat } from '../../requests/imagen-image-format';
+/**
+ * Parameters for configuring an {@link ImagenModel}.
+ *
+ * @public
+ */
+export interface ImagenModelParams {
+ /**
+ * The Imagen model to use for generating images.
+ * For example: `imagen-3.0-generate-002`.
+ *
+ * Only Imagen 3 models (named `imagen-3.0-*`) are supported.
+ *
+ * See {@link https://firebase.google.com/docs/vertex-ai/models | model versions}
+ * for a full list of supported Imagen 3 models.
+ */
+ model: string;
+ /**
+ * Configuration options for generating images with Imagen.
+ */
+ generationConfig?: ImagenGenerationConfig;
+ /**
+ * Safety settings for filtering potentially inappropriate content.
+ */
+ safetySettings?: ImagenSafetySettings;
+}
+/**
+ * Configuration options for generating images with Imagen.
+ *
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for
+ * more details.
+ *
+ * @public
+ */
+export interface ImagenGenerationConfig {
+ /**
+ * A description of what should be omitted from the generated images.
+ *
+ * Support for negative prompts depends on the Imagen model.
+ *
+ * See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details.
+ *
+ * This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions
+ * greater than `imagen-3.0-generate-002`.
+ */
+ negativePrompt?: string;
+ /**
+ * The number of images to generate. The default value is 1.
+ *
+ * The number of sample images that may be generated in each request depends on the model
+ * (typically up to 4); see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">sampleCount</a>
+ * documentation for more details.
+ */
+ numberOfImages?: number;
+ /**
+ * The aspect ratio of the generated images. The default value is square 1:1.
+ * Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)}
+ * for more details.
+ */
+ aspectRatio?: ImagenAspectRatio;
+ /**
+ * The image format of the generated images. The default is PNG.
+ *
+ * See {@link ImagenImageFormat} for more details.
+ */
+ imageFormat?: ImagenImageFormat;
+ /**
+ * Whether to add an invisible watermark to generated images.
+ *
+ * If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate
+ * that they are AI generated. If set to `false`, watermarking will be disabled.
+ *
+ * For Imagen 3 models, the default value is `true`; see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">addWatermark</a>
+ * documentation for more details.
+ *
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true,
+ * and cannot be turned off.
+ */
+ addWatermark?: boolean;
+}
+/**
+ * A filter level controlling how aggressively to filter sensitive content.
+ *
+ * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI
+ * are assessed against a list of safety filters, which include 'harmful categories' (for example,
+ * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to
+ * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
+ * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
+ * for more details.
+ *
+ * @public
+ */
+export declare const ImagenSafetyFilterLevel: {
+ /**
+ * The most aggressive filtering level; most strict blocking.
+ */
+ readonly BLOCK_LOW_AND_ABOVE: "block_low_and_above";
+ /**
+ * Blocks some sensitive prompts and responses.
+ */
+ readonly BLOCK_MEDIUM_AND_ABOVE: "block_medium_and_above";
+ /**
+ * Blocks few sensitive prompts and responses.
+ */
+ readonly BLOCK_ONLY_HIGH: "block_only_high";
+ /**
+ * The least aggressive filtering level; blocks very few sensitive prompts and responses.
+ *
+ * Access to this feature is restricted and may require your case to be reviewed and approved by
+ * Cloud support.
+ */
+ readonly BLOCK_NONE: "block_none";
+};
+/**
+ * A filter level controlling how aggressively to filter sensitive content.
+ *
+ * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI
+ * are assessed against a list of safety filters, which include 'harmful categories' (for example,
+ * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to
+ * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
+ * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
+ * for more details.
+ *
+ * @public
+ */
+export type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel];
+/**
+ * A filter level controlling whether generation of images containing people or faces is allowed.
+ *
+ * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
+ * documentation for more details.
+ *
+ * @public
+ */
+export declare const ImagenPersonFilterLevel: {
+ /**
+ * Disallow generation of images containing people or faces; images of people are filtered out.
+ */
+ readonly BLOCK_ALL: "dont_allow";
+ /**
+ * Allow generation of images containing adults only; images of children are filtered out.
+ *
+ * Generation of images containing people or faces may require your use case to be
+ * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
+ * for more details.
+ */
+ readonly ALLOW_ADULT: "allow_adult";
+ /**
+ * Allow generation of images containing adults only; images of children are filtered out.
+ *
+ * Generation of images containing people or faces may require your use case to be
+ * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
+ * for more details.
+ */
+ readonly ALLOW_ALL: "allow_all";
+};
+/**
+ * A filter level controlling whether generation of images containing people or faces is allowed.
+ *
+ * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
+ * documentation for more details.
+ *
+ * @public
+ */
+export type ImagenPersonFilterLevel = (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel];
+/**
+ * Settings for controlling the aggressiveness of filtering out sensitive content.
+ *
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
+ * for more details.
+ *
+ * @public
+ */
+export interface ImagenSafetySettings {
+ /**
+ * A filter level controlling how aggressive to filter out sensitive content from generated
+ * images.
+ */
+ safetyFilterLevel?: ImagenSafetyFilterLevel;
+ /**
+ * A filter level controlling whether generation of images containing people or faces is allowed.
+ */
+ personFilterLevel?: ImagenPersonFilterLevel;
+}
+/**
+ * Aspect ratios for Imagen images.
+ *
+ * To specify an aspect ratio for generated images, set the `aspectRatio` property in your
+ * {@link ImagenGenerationConfig}.
+ *
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
+ * for more details and examples of the supported aspect ratios.
+ *
+ * @public
+ */
+export declare const ImagenAspectRatio: {
+ /**
+ * Square (1:1) aspect ratio.
+ */
+ readonly SQUARE: "1:1";
+ /**
+ * Landscape (3:4) aspect ratio.
+ */
+ readonly LANDSCAPE_3x4: "3:4";
+ /**
+ * Portrait (4:3) aspect ratio.
+ */
+ readonly PORTRAIT_4x3: "4:3";
+ /**
+ * Landscape (16:9) aspect ratio.
+ */
+ readonly LANDSCAPE_16x9: "16:9";
+ /**
+ * Portrait (9:16) aspect ratio.
+ */
+ readonly PORTRAIT_9x16: "9:16";
+};
+/**
+ * Aspect ratios for Imagen images.
+ *
+ * To specify an aspect ratio for generated images, set the `aspectRatio` property in your
+ * {@link ImagenGenerationConfig}.
+ *
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
+ * for more details and examples of the supported aspect ratios.
+ *
+ * @public
+ */
+export type ImagenAspectRatio = (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio];
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/responses.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/responses.d.ts
new file mode 100644
index 0000000..f5dfc0f
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/responses.d.ts
@@ -0,0 +1,79 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * An image generated by Imagen, represented as inline data.
+ *
+ * @public
+ */
+export interface ImagenInlineImage {
+ /**
+ * The MIME type of the image; either `"image/png"` or `"image/jpeg"`.
+ *
+ * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}.
+ */
+ mimeType: string;
+ /**
+ * The base64-encoded image data.
+ */
+ bytesBase64Encoded: string;
+}
+/**
+ * An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.
+ *
+ * This feature is not available yet.
+ * @public
+ */
+export interface ImagenGCSImage {
+ /**
+ * The MIME type of the image; either `"image/png"` or `"image/jpeg"`.
+ *
+ * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}.
+ */
+ mimeType: string;
+ /**
+ * The URI of the file stored in a Cloud Storage for Firebase bucket.
+ *
+ * @example `"gs://bucket-name/path/sample_0.jpg"`.
+ */
+ gcsURI: string;
+}
+/**
+ * The response from a request to generate images with Imagen.
+ *
+ * @public
+ */
+export interface ImagenGenerationResponse<T extends ImagenInlineImage | ImagenGCSImage> {
+ /**
+ * The images generated by Imagen.
+ *
+ * The number of images generated may be fewer than the number requested if one or more were
+ * filtered out; see `filteredReason`.
+ */
+ images: T[];
+ /**
+ * The reason that images were filtered out. This property will only be defined if one
+ * or more images were filtered.
+ *
+ * Images may be filtered out due to the {@link (ImagenSafetyFilterLevel:type)},
+ * {@link (ImagenPersonFilterLevel:type)}, or filtering included in the model.
+ * The filter levels may be adjusted in your {@link ImagenSafetySettings}.
+ *
+ * See the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen | Responsible AI and usage guidelines for Imagen}
+ * for more details.
+ */
+ filteredReason?: string;
+}
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/index.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/index.d.ts
new file mode 100644
index 0000000..a8508d4
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/index.d.ts
@@ -0,0 +1,26 @@
+/**
+ * @license
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+export * from './content';
+export * from './enums';
+export * from './requests';
+export * from './responses';
+export * from './error';
+export * from './schema';
+export * from './imagen';
+export * from './googleai';
+export { LanguageModelCreateOptions, LanguageModelCreateCoreOptions, LanguageModelExpected, LanguageModelMessage, LanguageModelMessageContent, LanguageModelMessageContentValue, LanguageModelMessageRole, LanguageModelMessageType, LanguageModelPromptOptions } from './language-model';
+export * from './chrome-adapter';
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/internal.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/internal.d.ts
new file mode 100644
index 0000000..3c16979
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/internal.d.ts
@@ -0,0 +1,33 @@
+/**
+ * @license
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { AppCheckTokenResult } from '@firebase/app-check-interop-types';
+import { FirebaseAuthTokenData } from '@firebase/auth-interop-types';
+import { Backend } from '../backend';
+export * from './imagen/internal';
+export interface ApiSettings {
+ apiKey: string;
+ project: string;
+ appId: string;
+ automaticDataCollectionEnabled?: boolean;
+ /**
+ * @deprecated Use `backend.location` instead.
+ */
+ location: string;
+ backend: Backend;
+ getAuthToken?: () => Promise<FirebaseAuthTokenData | null>;
+ getAppCheckToken?: () => Promise<AppCheckTokenResult>;
+}
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/language-model.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/language-model.d.ts
new file mode 100644
index 0000000..9361a1f
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/language-model.d.ts
@@ -0,0 +1,107 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * The subset of the Prompt API
+ * (see {@link https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl }
+ * required for hybrid functionality.
+ *
+ * @internal
+ */
+export interface LanguageModel extends EventTarget {
+ create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;
+ availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;
+ prompt(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<string>;
+ promptStreaming(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): ReadableStream;
+ measureInputUsage(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<number>;
+ destroy(): undefined;
+}
+/**
+ * @internal
+ */
+export declare enum Availability {
+ 'UNAVAILABLE' = "unavailable",
+ 'DOWNLOADABLE' = "downloadable",
+ 'DOWNLOADING' = "downloading",
+ 'AVAILABLE' = "available"
+}
+/**
+ * Configures the creation of an on-device language model session.
+ * @beta
+ */
+export interface LanguageModelCreateCoreOptions {
+ topK?: number;
+ temperature?: number;
+ expectedInputs?: LanguageModelExpected[];
+}
+/**
+ * Configures the creation of an on-device language model session.
+ * @beta
+ */
+export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
+ signal?: AbortSignal;
+ initialPrompts?: LanguageModelMessage[];
+}
+/**
+ * Options for an on-device language model prompt.
+ * @beta
+ */
+export interface LanguageModelPromptOptions {
+ responseConstraint?: object;
+}
+/**
+ * Options for the expected inputs for an on-device language model.
+ * @beta
+ */ export interface LanguageModelExpected {
+ type: LanguageModelMessageType;
+ languages?: string[];
+}
+/**
+ * An on-device language model prompt.
+ * @beta
+ */
+export type LanguageModelPrompt = LanguageModelMessage[];
+/**
+ * An on-device language model message.
+ * @beta
+ */
+export interface LanguageModelMessage {
+ role: LanguageModelMessageRole;
+ content: LanguageModelMessageContent[];
+}
+/**
+ * An on-device language model content object.
+ * @beta
+ */
+export interface LanguageModelMessageContent {
+ type: LanguageModelMessageType;
+ value: LanguageModelMessageContentValue;
+}
+/**
+ * Allowable roles for on-device language model usage.
+ * @beta
+ */
+export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
+/**
+ * Allowable types for on-device language model messages.
+ * @beta
+ */
+export type LanguageModelMessageType = 'text' | 'image' | 'audio';
+/**
+ * Content formats that can be provided as on-device message content.
+ * @beta
+ */
+export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/live-responses.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/live-responses.d.ts
new file mode 100644
index 0000000..8270db9
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/live-responses.d.ts
@@ -0,0 +1,79 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { Content, FunctionResponse, GenerativeContentBlob, Part } from './content';
+import { AudioTranscriptionConfig, LiveGenerationConfig, Tool, ToolConfig } from './requests';
+import { Transcription } from './responses';
+/**
+ * User input that is sent to the model.
+ *
+ * @internal
+ */
+export interface _LiveClientContent {
+ clientContent: {
+ turns: [Content];
+ turnComplete: boolean;
+ inputTranscription?: Transcription;
+ outputTranscription?: Transcription;
+ };
+}
+/**
+ * User input that is sent to the model in real time.
+ *
+ * @internal
+ */
+export interface _LiveClientRealtimeInput {
+ realtimeInput: {
+ text?: string;
+ audio?: GenerativeContentBlob;
+ video?: GenerativeContentBlob;
+ /**
+ * @deprecated Use `text`, `audio`, and `video` instead.
+ */
+ mediaChunks?: GenerativeContentBlob[];
+ };
+}
+/**
+ * Function responses that are sent to the model in real time.
+ */
+export interface _LiveClientToolResponse {
+ toolResponse: {
+ functionResponses: FunctionResponse[];
+ };
+}
+/**
+ * The first message in a Live session, used to configure generation options.
+ *
+ * @internal
+ */
+export interface _LiveClientSetup {
+ setup: {
+ model: string;
+ generationConfig?: _LiveGenerationConfig;
+ tools?: Tool[];
+ toolConfig?: ToolConfig;
+ systemInstruction?: string | Part | Content;
+ inputAudioTranscription?: AudioTranscriptionConfig;
+ outputAudioTranscription?: AudioTranscriptionConfig;
+ };
+}
+/**
+ * The Live Generation Config.
+ *
+ * The public API ({@link LiveGenerationConfig}) has `inputAudioTranscription` and `outputAudioTranscription`,
+ * but the server expects these fields to be in the top-level `setup` message. This was a conscious API decision.
+ */
+export type _LiveGenerationConfig = Omit<LiveGenerationConfig, 'inputAudioTranscription' | 'outputAudioTranscription'>;
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/requests.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/requests.d.ts
new file mode 100644
index 0000000..6df8be1
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/requests.d.ts
@@ -0,0 +1,464 @@
+/**
+ * @license
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { ObjectSchema, TypedSchema } from '../requests/schema-builder';
+import { Content, Part } from './content';
+import { LanguageModelCreateOptions, LanguageModelPromptOptions } from './language-model';
+import { FunctionCallingMode, HarmBlockMethod, HarmBlockThreshold, HarmCategory, InferenceMode, ResponseModality } from './enums';
+import { ObjectSchemaRequest, SchemaRequest } from './schema';
+/**
+ * Base parameters for a number of methods.
+ * @public
+ */
+export interface BaseParams {
+ safetySettings?: SafetySetting[];
+ generationConfig?: GenerationConfig;
+}
+/**
+ * Params passed to {@link getGenerativeModel}.
+ * @public
+ */
+export interface ModelParams extends BaseParams {
+ model: string;
+ tools?: Tool[];
+ toolConfig?: ToolConfig;
+ systemInstruction?: string | Part | Content;
+}
+/**
+ * Params passed to {@link getLiveGenerativeModel}.
+ * @beta
+ */
+export interface LiveModelParams {
+ model: string;
+ generationConfig?: LiveGenerationConfig;
+ tools?: Tool[];
+ toolConfig?: ToolConfig;
+ systemInstruction?: string | Part | Content;
+}
+/**
+ * Request sent through {@link GenerativeModel.generateContent}
+ * @public
+ */
+export interface GenerateContentRequest extends BaseParams {
+ contents: Content[];
+ tools?: Tool[];
+ toolConfig?: ToolConfig;
+ systemInstruction?: string | Part | Content;
+}
+/**
+ * Safety setting that can be sent as part of request parameters.
+ * @public
+ */
+export interface SafetySetting {
+ category: HarmCategory;
+ threshold: HarmBlockThreshold;
+ /**
+ * The harm block method.
+ *
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), an {@link AIError} will be
+ * thrown if this property is defined.
+ */
+ method?: HarmBlockMethod;
+}
+/**
+ * Config options for content-related requests
+ * @public
+ */
+export interface GenerationConfig {
+ candidateCount?: number;
+ stopSequences?: string[];
+ maxOutputTokens?: number;
+ temperature?: number;
+ topP?: number;
+ topK?: number;
+ presencePenalty?: number;
+ frequencyPenalty?: number;
+ /**
+ * Output response MIME type of the generated candidate text.
+ * Supported MIME types are `text/plain` (default, text output),
+ * `application/json` (JSON response in the candidates), and
+ * `text/x.enum`.
+ */
+ responseMimeType?: string;
+ /**
+ * Output response schema of the generated candidate text. This
+ * value can be a class generated with a {@link Schema} static method
+ * like `Schema.string()` or `Schema.object()` or it can be a plain
+ * JS object matching the {@link SchemaRequest} interface.
+ * <br/>Note: This only applies when the specified `responseMimeType` supports a schema; currently
+ * this is limited to `application/json` and `text/x.enum`.
+ */
+ responseSchema?: TypedSchema | SchemaRequest;
+ /**
+ * Generation modalities to be returned in generation responses.
+ *
+ * @remarks
+ * - Multimodal response generation is only supported by some Gemini models and versions; see {@link https://firebase.google.com/docs/vertex-ai/models | model versions}.
+ * - Only image generation (`ResponseModality.IMAGE`) is supported.
+ *
+ * @beta
+ */
+ responseModalities?: ResponseModality[];
+ /**
+ * Configuration for "thinking" behavior of compatible Gemini models.
+ */
+ thinkingConfig?: ThinkingConfig;
+}
+/**
+ * Configuration parameters used by {@link LiveGenerativeModel} to control live content generation.
+ *
+ * @beta
+ */
+export interface LiveGenerationConfig {
+ /**
+ * Configuration for speech synthesis.
+ */
+ speechConfig?: SpeechConfig;
+ /**
+ * Specifies the maximum number of tokens that can be generated in the response. The number of
+ * tokens per word varies depending on the language outputted. Is unbounded by default.
+ */
+ maxOutputTokens?: number;
+ /**
+ * Controls the degree of randomness in token selection. A `temperature` value of 0 means that the highest
+ * probability tokens are always selected. In this case, responses for a given prompt are mostly
+ * deterministic, but a small amount of variation is still possible.
+ */
+ temperature?: number;
+ /**
+ * Changes how the model selects tokens for output. Tokens are
+ * selected from the most to least probable until the sum of their probabilities equals the `topP`
+ * value. For example, if tokens A, B, and C have probabilities of 0.3, 0.2, and 0.1 respectively
+ * and the `topP` value is 0.5, then the model will select either A or B as the next token by using
+ * the `temperature` and exclude C as a candidate. Defaults to 0.95 if unset.
+ */
+ topP?: number;
+ /**
+ * Changes how the model selects token for output. A `topK` value of 1 means the select token is
+ * the most probable among all tokens in the model's vocabulary, while a `topK` value 3 means that
+ * the next token is selected from among the 3 most probably using probabilities sampled. Tokens
+ * are then further filtered with the highest selected `temperature` sampling. Defaults to 40
+ * if unspecified.
+ */
+ topK?: number;
+ /**
+ * Positive penalties.
+ */
+ presencePenalty?: number;
+ /**
+ * Frequency penalties.
+ */
+ frequencyPenalty?: number;
+ /**
+ * The modalities of the response.
+ */
+ responseModalities?: ResponseModality[];
+ /**
+ * Enables transcription of audio input.
+ *
+ * When enabled, the model will respond with transcriptions of your audio input in the `inputTranscriptions` property
+ * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across
+ * messages, so you may only receive small amounts of text per message. For example, if you ask the model
+ * "How are you today?", the model may transcribe that input across three messages, broken up as "How a", "re yo", "u today?".
+ */
+ inputAudioTranscription?: AudioTranscriptionConfig;
+ /**
+ * Enables transcription of audio input.
+ *
+ * When enabled, the model will respond with transcriptions of its audio output in the `outputTranscription` property
+ * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across
+ * messages, so you may only receive small amounts of text per message. For example, if the model says
+ * "How are you today?", the model may transcribe that output across three messages, broken up as "How a", "re yo", "u today?".
+ */
+ outputAudioTranscription?: AudioTranscriptionConfig;
+}
+/**
+ * Params for {@link GenerativeModel.startChat}.
+ * @public
+ */
+export interface StartChatParams extends BaseParams {
+ history?: Content[];
+ tools?: Tool[];
+ toolConfig?: ToolConfig;
+ systemInstruction?: string | Part | Content;
+}
+/**
+ * Params for calling {@link GenerativeModel.countTokens}
+ * @public
+ */
+export interface CountTokensRequest {
+ contents: Content[];
+ /**
+ * Instructions that direct the model to behave a certain way.
+ */
+ systemInstruction?: string | Part | Content;
+ /**
+ * {@link Tool} configuration.
+ */
+ tools?: Tool[];
+ /**
+ * Configuration options that control how the model generates a response.
+ */
+ generationConfig?: GenerationConfig;
+}
+/**
+ * Params passed to {@link getGenerativeModel}.
+ * @public
+ */
+export interface RequestOptions {
+ /**
+ * Request timeout in milliseconds. Defaults to 180 seconds (180000ms).
+ */
+ timeout?: number;
+ /**
+ * Base url for endpoint. Defaults to
+ * https://firebasevertexai.googleapis.com, which is the
+ * {@link https://console.cloud.google.com/apis/library/firebasevertexai.googleapis.com?project=_ | Firebase AI Logic API}
+ * (used regardless of your chosen Gemini API provider).
+ */
+ baseUrl?: string;
+}
+/**
+ * Defines a tool that model can call to access external knowledge.
+ * @public
+ */
+export type Tool = FunctionDeclarationsTool | GoogleSearchTool | CodeExecutionTool | URLContextTool;
+/**
+ * Structured representation of a function declaration as defined by the
+ * {@link https://spec.openapis.org/oas/v3.0.3 | OpenAPI 3.0 specification}.
+ * Included
+ * in this declaration are the function name and parameters. This
+ * `FunctionDeclaration` is a representation of a block of code that can be used
+ * as a Tool by the model and executed by the client.
+ * @public
+ */
+export interface FunctionDeclaration {
+ /**
+ * The name of the function to call. Must start with a letter or an
+ * underscore. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with
+ * a max length of 64.
+ */
+ name: string;
+ /**
+ * Description and purpose of the function. Model uses it to decide
+ * how and whether to call the function.
+ */
+ description: string;
+ /**
+ * Optional. Describes the parameters to this function in JSON Schema Object
+ * format. Reflects the Open API 3.03 Parameter Object. Parameter names are
+ * case-sensitive. For a function with no parameters, this can be left unset.
+ */
+ parameters?: ObjectSchema | ObjectSchemaRequest;
+}
+/**
+ * A tool that allows a Gemini model to connect to Google Search to access and incorporate
+ * up-to-date information from the web into its responses.
+ *
+ * Important: If using Grounding with Google Search, you are required to comply with the
+ * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
+ * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
+ * section within the Service Specific Terms).
+ *
+ * @public
+ */
+export interface GoogleSearchTool {
+ /**
+ * Specifies the Google Search configuration.
+ * Currently, this is an empty object, but it's reserved for future configuration options.
+ *
+ * When using this feature, you are required to comply with the "Grounding with Google Search"
+ * usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
+ * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
+ * section within the Service Specific Terms).
+ */
+ googleSearch: GoogleSearch;
+}
+/**
+ * A tool that enables the model to use code execution.
+ *
+ * @beta
+ */
+export interface CodeExecutionTool {
+ /**
+ * Specifies the Google Search configuration.
+ * Currently, this is an empty object, but it's reserved for future configuration options.
+ */
+ codeExecution: {};
+}
+/**
+ * Specifies the Google Search configuration.
+ *
+ * @remarks Currently, this is an empty object, but it's reserved for future configuration options.
+ *
+ * @public
+ */
+export interface GoogleSearch {
+}
+/**
+ * A tool that allows you to provide additional context to the models in the form of public web
+ * URLs. By including URLs in your request, the Gemini model will access the content from those
+ * pages to inform and enhance its response.
+ *
+ * @beta
+ */
+export interface URLContextTool {
+ /**
+ * Specifies the URL Context configuration.
+ */
+ urlContext: URLContext;
+}
+/**
+ * Specifies the URL Context configuration.
+ *
+ * @beta
+ */
+export interface URLContext {
+}
+/**
+ * A `FunctionDeclarationsTool` is a piece of code that enables the system to
+ * interact with external systems to perform an action, or set of actions,
+ * outside of knowledge and scope of the model.
+ * @public
+ */
+export interface FunctionDeclarationsTool {
+ /**
+ * Optional. One or more function declarations
+ * to be passed to the model along with the current user query. Model may
+ * decide to call a subset of these functions by populating
+ * {@link FunctionCall} in the response. User should
+ * provide a {@link FunctionResponse} for each
+ * function call in the next turn. Based on the function responses, the model will
+ * generate the final response back to the user. Maximum 64 function
+ * declarations can be provided.
+ */
+ functionDeclarations?: FunctionDeclaration[];
+}
+/**
+ * Tool config. This config is shared for all tools provided in the request.
+ * @public
+ */
+export interface ToolConfig {
+ functionCallingConfig?: FunctionCallingConfig;
+}
+/**
+ * @public
+ */
+export interface FunctionCallingConfig {
+ mode?: FunctionCallingMode;
+ allowedFunctionNames?: string[];
+}
+/**
+ * Encapsulates configuration for on-device inference.
+ *
+ * @beta
+ */
+export interface OnDeviceParams {
+ createOptions?: LanguageModelCreateOptions;
+ promptOptions?: LanguageModelPromptOptions;
+}
+/**
+ * Configures hybrid inference.
+ * @beta
+ */
+export interface HybridParams {
+ /**
+ * Specifies on-device or in-cloud inference. Defaults to prefer on-device.
+ */
+ mode: InferenceMode;
+ /**
+ * Optional. Specifies advanced params for on-device inference.
+ */
+ onDeviceParams?: OnDeviceParams;
+ /**
+ * Optional. Specifies advanced params for in-cloud inference.
+ */
+ inCloudParams?: ModelParams;
+}
+/**
+ * Configuration for "thinking" behavior of compatible Gemini models.
+ *
+ * Certain models utilize a thinking process before generating a response. This allows them to
+ * reason through complex problems and plan a more coherent and accurate answer.
+ *
+ * @public
+ */
+export interface ThinkingConfig {
+ /**
+ * The thinking budget, in tokens.
+ *
+ * This parameter sets an upper limit on the number of tokens the model can use for its internal
+ * "thinking" process. A higher budget may result in higher quality responses for complex tasks
+ * but can also increase latency and cost.
+ *
+ * If you don't specify a budget, the model will determine the appropriate amount
+ * of thinking based on the complexity of the prompt.
+ *
+ * An error will be thrown if you set a thinking budget for a model that does not support this
+ * feature or if the specified budget is not within the model's supported range.
+ */
+ thinkingBudget?: number;
+ /**
+ * Whether to include "thought summaries" in the model's response.
+ *
+ * @remarks
+ * Thought summaries provide a brief overview of the model's internal thinking process,
+ * offering insight into how it arrived at the final answer. This can be useful for
+ * debugging, understanding the model's reasoning, and verifying its accuracy.
+ */
+ includeThoughts?: boolean;
+}
+/**
+ * Configuration for a pre-built voice.
+ *
+ * @beta
+ */
+export interface PrebuiltVoiceConfig {
+ /**
+ * The voice name to use for speech synthesis.
+ *
+ * For a full list of names and demos of what each voice sounds like, see {@link https://cloud.google.com/text-to-speech/docs/chirp3-hd | Chirp 3: HD Voices}.
+ */
+ voiceName?: string;
+}
+/**
+ * Configuration for the voice to used in speech synthesis.
+ *
+ * @beta
+ */
+export interface VoiceConfig {
+ /**
+ * Configures the voice using a pre-built voice configuration.
+ */
+ prebuiltVoiceConfig?: PrebuiltVoiceConfig;
+}
+/**
+ * Configures speech synthesis.
+ *
+ * @beta
+ */
+export interface SpeechConfig {
+ /**
+ * Configures the voice to be used in speech synthesis.
+ */
+ voiceConfig?: VoiceConfig;
+}
+/**
+ * The audio transcription configuration.
+ */
+export interface AudioTranscriptionConfig {
+}
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/responses.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/responses.d.ts
new file mode 100644
index 0000000..8896455
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/responses.d.ts
@@ -0,0 +1,582 @@
+/**
+ * @license
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { Content, FunctionCall, InlineDataPart } from './content';
+import { BlockReason, FinishReason, HarmCategory, HarmProbability, HarmSeverity, InferenceSource, Modality } from './enums';
+/**
+ * Result object returned from {@link GenerativeModel.generateContent} call.
+ *
+ * @public
+ */
+export interface GenerateContentResult {
+ response: EnhancedGenerateContentResponse;
+}
+/**
+ * Result object returned from {@link GenerativeModel.generateContentStream} call.
+ * Iterate over `stream` to get chunks as they come in and/or
+ * use the `response` promise to get the aggregated response when
+ * the stream is done.
+ *
+ * @public
+ */
+export interface GenerateContentStreamResult {
+ stream: AsyncGenerator<EnhancedGenerateContentResponse>;
+ response: Promise<EnhancedGenerateContentResponse>;
+}
+/**
+ * Response object wrapped with helper methods.
+ *
+ * @public
+ */
+export interface EnhancedGenerateContentResponse extends GenerateContentResponse {
+ /**
+ * Returns the text string from the response, if available.
+ * Throws if the prompt or candidate was blocked.
+ */
+ text: () => string;
+ /**
+ * Aggregates and returns every {@link InlineDataPart} from the first candidate of
+ * {@link GenerateContentResponse}.
+ *
+ * @throws If the prompt or candidate was blocked.
+ */
+ inlineDataParts: () => InlineDataPart[] | undefined;
+ /**
+ * Aggregates and returns every {@link FunctionCall} from the first candidate of
+ * {@link GenerateContentResponse}.
+ *
+ * @throws If the prompt or candidate was blocked.
+ */
+ functionCalls: () => FunctionCall[] | undefined;
+ /**
+ * Aggregates and returns every {@link TextPart} with their `thought` property set
+ * to `true` from the first candidate of {@link GenerateContentResponse}.
+ *
+ * @throws If the prompt or candidate was blocked.
+ *
+ * @remarks
+ * Thought summaries provide a brief overview of the model's internal thinking process,
+ * offering insight into how it arrived at the final answer. This can be useful for
+ * debugging, understanding the model's reasoning, and verifying its accuracy.
+ *
+ * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is
+ * set to `true`.
+ */
+ thoughtSummary: () => string | undefined;
+ /**
+ * Indicates whether inference happened on-device or in-cloud.
+ *
+ * @beta
+ */
+ inferenceSource?: InferenceSource;
+}
+/**
+ * Individual response from {@link GenerativeModel.generateContent} and
+ * {@link GenerativeModel.generateContentStream}.
+ * `generateContentStream()` will return one in each chunk until
+ * the stream is done.
+ * @public
+ */
+export interface GenerateContentResponse {
+ candidates?: GenerateContentCandidate[];
+ promptFeedback?: PromptFeedback;
+ usageMetadata?: UsageMetadata;
+}
+/**
+ * Usage metadata about a {@link GenerateContentResponse}.
+ *
+ * @public
+ */
+export interface UsageMetadata {
+ promptTokenCount: number;
+ candidatesTokenCount: number;
+ /**
+ * The number of tokens used by the model's internal "thinking" process.
+ */
+ thoughtsTokenCount?: number;
+ totalTokenCount: number;
+ /**
+ * The number of tokens used by tools.
+ */
+ toolUsePromptTokenCount?: number;
+ promptTokensDetails?: ModalityTokenCount[];
+ candidatesTokensDetails?: ModalityTokenCount[];
+ /**
+ * A list of tokens used by tools, broken down by modality.
+ */
+ toolUsePromptTokensDetails?: ModalityTokenCount[];
+}
+/**
+ * Represents token counting info for a single modality.
+ *
+ * @public
+ */
+export interface ModalityTokenCount {
+ /** The modality associated with this token count. */
+ modality: Modality;
+ /** The number of tokens counted. */
+ tokenCount: number;
+}
+/**
+ * If the prompt was blocked, this will be populated with `blockReason` and
+ * the relevant `safetyRatings`.
+ * @public
+ */
+export interface PromptFeedback {
+ blockReason?: BlockReason;
+ safetyRatings: SafetyRating[];
+ /**
+ * A human-readable description of the `blockReason`.
+ *
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
+ */
+ blockReasonMessage?: string;
+}
+/**
+ * A candidate returned as part of a {@link GenerateContentResponse}.
+ * @public
+ */
+export interface GenerateContentCandidate {
+ index: number;
+ content: Content;
+ finishReason?: FinishReason;
+ finishMessage?: string;
+ safetyRatings?: SafetyRating[];
+ citationMetadata?: CitationMetadata;
+ groundingMetadata?: GroundingMetadata;
+ urlContextMetadata?: URLContextMetadata;
+}
+/**
+ * Citation metadata that may be found on a {@link GenerateContentCandidate}.
+ * @public
+ */
+export interface CitationMetadata {
+ citations: Citation[];
+}
+/**
+ * A single citation.
+ * @public
+ */
+export interface Citation {
+ startIndex?: number;
+ endIndex?: number;
+ uri?: string;
+ license?: string;
+ /**
+ * The title of the cited source, if available.
+ *
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
+ */
+ title?: string;
+ /**
+ * The publication date of the cited source, if available.
+ *
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
+ */
+ publicationDate?: Date;
+}
+/**
+ * Metadata returned when grounding is enabled.
+ *
+ * Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}).
+ *
+ * Important: If using Grounding with Google Search, you are required to comply with the
+ * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
+ * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
+ * section within the Service Specific Terms).
+ *
+ * @public
+ */
+export interface GroundingMetadata {
+ /**
+ * Google Search entry point for web searches. This contains an HTML/CSS snippet that must be
+ * embedded in an app to display a Google Search entry point for follow-up web searches related to
+ * a model's "Grounded Response".
+ */
+ searchEntryPoint?: SearchEntrypoint;
+ /**
+ * A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content
+ * (for example, from a web page). that the model used to ground its response.
+ */
+ groundingChunks?: GroundingChunk[];
+ /**
+ * A list of {@link GroundingSupport} objects. Each object details how specific segments of the
+ * model's response are supported by the `groundingChunks`.
+ */
+ groundingSupports?: GroundingSupport[];
+ /**
+ * A list of web search queries that the model performed to gather the grounding information.
+ * These can be used to allow users to explore the search results themselves.
+ */
+ webSearchQueries?: string[];
+ /**
+ * @deprecated Use {@link GroundingSupport} instead.
+ */
+ retrievalQueries?: string[];
+}
+/**
+ * Google search entry point.
+ *
+ * @public
+ */
+export interface SearchEntrypoint {
+ /**
+ * HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid
+ * undesired interaction with the rest of the page's CSS.
+ *
+ * To ensure proper rendering and prevent CSS conflicts, it is recommended
+ * to encapsulate this `renderedContent` within a shadow DOM when embedding it
+ * into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}.
+ *
+ * @example
+ * ```javascript
+ * const container = document.createElement('div');
+ * document.body.appendChild(container);
+ * container.attachShadow({ mode: 'open' }).innerHTML = renderedContent;
+ * ```
+ */
+ renderedContent?: string;
+}
+/**
+ * Represents a chunk of retrieved data that supports a claim in the model's response. This is part
+ * of the grounding information provided when grounding is enabled.
+ *
+ * @public
+ */
+export interface GroundingChunk {
+ /**
+ * Contains details if the grounding chunk is from a web source.
+ */
+ web?: WebGroundingChunk;
+}
+/**
+ * A grounding chunk from the web.
+ *
+ * Important: If using Grounding with Google Search, you are required to comply with the
+ * {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for "Grounding with Google Search".
+ *
+ * @public
+ */
+export interface WebGroundingChunk {
+ /**
+ * The URI of the retrieved web page.
+ */
+ uri?: string;
+ /**
+ * The title of the retrieved web page.
+ */
+ title?: string;
+ /**
+ * The domain of the original URI from which the content was retrieved.
+ *
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be
+ * `undefined`.
+ */
+ domain?: string;
+}
+/**
+ * Provides information about how a specific segment of the model's response is supported by the
+ * retrieved grounding chunks.
+ *
+ * @public
+ */
+export interface GroundingSupport {
+ /**
+ * Specifies the segment of the model's response content that this grounding support pertains to.
+ */
+ segment?: Segment;
+ /**
+ * A list of indices that refer to specific {@link GroundingChunk} objects within the
+ * {@link GroundingMetadata.groundingChunks} array. These referenced chunks
+ * are the sources that support the claim made in the associated `segment` of the response.
+ * For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`,
+ * and `groundingChunks[4]` are the retrieved content supporting this part of the response.
+ */
+ groundingChunkIndices?: number[];
+}
+/**
+ * Represents a specific segment within a {@link Content} object, often used to
+ * pinpoint the exact location of text or data that grounding information refers to.
+ *
+ * @public
+ */
+export interface Segment {
+ /**
+ * The zero-based index of the {@link Part} object within the `parts` array
+ * of its parent {@link Content} object. This identifies which part of the
+ * content the segment belongs to.
+ */
+ partIndex: number;
+ /**
+ * The zero-based start index of the segment within the specified `Part`,
+ * measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the
+ * beginning of the part's content (e.g., `Part.text`).
+ */
+ startIndex: number;
+ /**
+ * The zero-based end index of the segment within the specified `Part`,
+ * measured in UTF-8 bytes. This offset is exclusive, meaning the character
+ * at this index is not included in the segment.
+ */
+ endIndex: number;
+ /**
+ * The text corresponding to the segment from the response.
+ */
+ text: string;
+}
+/**
+ * Metadata related to {@link URLContextTool}.
+ *
+ * @beta
+ */
+export interface URLContextMetadata {
+ /**
+ * List of URL metadata used to provide context to the Gemini model.
+ */
+ urlMetadata: URLMetadata[];
+}
+/**
+ * Metadata for a single URL retrieved by the {@link URLContextTool} tool.
+ *
+ * @beta
+ */
+export interface URLMetadata {
+ /**
+ * The retrieved URL.
+ */
+ retrievedUrl?: string;
+ /**
+ * The status of the URL retrieval.
+ */
+ urlRetrievalStatus?: URLRetrievalStatus;
+}
+/**
+ * The status of a URL retrieval.
+ *
+ * @remarks
+ * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
+ * <br/>
+ * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
+ * <br/>
+ * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
+ * <br/>
+ * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
+ * <br/>
+ * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
+ * <br/>
+ *
+ * @beta
+ */
+export declare const URLRetrievalStatus: {
+ /**
+ * Unspecified retrieval status.
+ */
+ URL_RETRIEVAL_STATUS_UNSPECIFIED: string;
+ /**
+ * The URL retrieval was successful.
+ */
+ URL_RETRIEVAL_STATUS_SUCCESS: string;
+ /**
+ * The URL retrieval failed.
+ */
+ URL_RETRIEVAL_STATUS_ERROR: string;
+ /**
+ * The URL retrieval failed because the content is behind a paywall.
+ */
+ URL_RETRIEVAL_STATUS_PAYWALL: string;
+ /**
+ * The URL retrieval failed because the content is unsafe.
+ */
+ URL_RETRIEVAL_STATUS_UNSAFE: string;
+};
+/**
+ * The status of a URL retrieval.
+ *
+ * @remarks
+ * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
+ * <br/>
+ * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
+ * <br/>
+ * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
+ * <br/>
+ * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
+ * <br/>
+ * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
+ * <br/>
+ *
+ * @beta
+ */
+export type URLRetrievalStatus = (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus];
+/**
+ * @public
+ */
+export interface WebAttribution {
+ uri: string;
+ title: string;
+}
+/**
+ * @public
+ */
+export interface RetrievedContextAttribution {
+ uri: string;
+ title: string;
+}
+/**
+ * Protobuf google.type.Date
+ * @public
+ */
+export interface Date {
+ year: number;
+ month: number;
+ day: number;
+}
+/**
+ * A safety rating associated with a {@link GenerateContentCandidate}
+ * @public
+ */
+export interface SafetyRating {
+ category: HarmCategory;
+ probability: HarmProbability;
+ /**
+ * The harm severity level.
+ *
+ * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`.
+ */
+ severity: HarmSeverity;
+ /**
+ * The probability score of the harm category.
+ *
+ * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.
+ */
+ probabilityScore: number;
+ /**
+ * The severity score of the harm category.
+ *
+ * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.
+ */
+ severityScore: number;
+ blocked: boolean;
+}
+/**
+ * Response from calling {@link GenerativeModel.countTokens}.
+ * @public
+ */
+export interface CountTokensResponse {
+ /**
+ * The total number of tokens counted across all instances from the request.
+ */
+ totalTokens: number;
+ /**
+ * @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`.
+ *
+ * The total number of billable characters counted across all instances
+ * from the request.
+ */
+ totalBillableCharacters?: number;
+ /**
+ * The breakdown, by modality, of how many tokens are consumed by the prompt.
+ */
+ promptTokensDetails?: ModalityTokenCount[];
+}
+/**
+ * An incremental content update from the model.
+ *
+ * @beta
+ */
+export interface LiveServerContent {
+ type: 'serverContent';
+ /**
+ * The content that the model has generated as part of the current conversation with the user.
+ */
+ modelTurn?: Content;
+ /**
+ * Indicates whether the turn is complete. This is `undefined` if the turn is not complete.
+ */
+ turnComplete?: boolean;
+ /**
+ * Indicates whether the model was interrupted by the client. An interruption occurs when
+ * the client sends a message before the model finishes it's turn. This is `undefined` if the
+ * model was not interrupted.
+ */
+ interrupted?: boolean;
+ /**
+ * Transcription of the audio that was input to the model.
+ */
+ inputTranscription?: Transcription;
+ /**
+ * Transcription of the audio output from the model.
+ */
+ outputTranscription?: Transcription;
+}
+/**
+ * Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription
+ * is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on
+ * the {@link LiveGenerationConfig}.
+ *
+ * @beta
+ */
+export interface Transcription {
+ /**
+ * The text transcription of the audio.
+ */
+ text?: string;
+}
+/**
+ * A request from the model for the client to execute one or more functions.
+ *
+ * @beta
+ */
+export interface LiveServerToolCall {
+ type: 'toolCall';
+ /**
+ * An array of function calls to run.
+ */
+ functionCalls: FunctionCall[];
+}
+/**
+ * Notification to cancel a previous function call triggered by {@link LiveServerToolCall}.
+ *
+ * @beta
+ */
+export interface LiveServerToolCallCancellation {
+ type: 'toolCallCancellation';
+ /**
+ * IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}.
+ */
+ functionIds: string[];
+}
+/**
+ * The types of responses that can be returned by {@link LiveSession.receive}.
+ *
+ * @beta
+ */
+export declare const LiveResponseType: {
+ SERVER_CONTENT: string;
+ TOOL_CALL: string;
+ TOOL_CALL_CANCELLATION: string;
+};
+/**
+ * The types of responses that can be returned by {@link LiveSession.receive}.
+ * This is a property on all messages that can be used for type narrowing. This property is not
+ * returned by the server, it is assigned to a server message object once it's parsed.
+ *
+ * @beta
+ */
+export type LiveResponseType = (typeof LiveResponseType)[keyof typeof LiveResponseType];
diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/schema.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/schema.d.ts
new file mode 100644
index 0000000..7abb2d1
--- /dev/null
+++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/schema.d.ts
@@ -0,0 +1,139 @@
+/**
+ * @license
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Contains the list of OpenAPI data types
+ * as defined by the
+ * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}
+ * @public
+ */
+export declare const SchemaType: {
+ /** String type. */
+ readonly STRING: "string";
+ /** Number type. */
+ readonly NUMBER: "number";
+ /** Integer type. */
+ readonly INTEGER: "integer";
+ /** Boolean type. */
+ readonly BOOLEAN: "boolean";
+ /** Array type. */
+ readonly ARRAY: "array";
+ /** Object type. */
+ readonly OBJECT: "object";
+};
+/**
+ * Contains the list of OpenAPI data types
+ * as defined by the
+ * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}
+ * @public
+ */
+export type SchemaType = (typeof SchemaType)[keyof typeof SchemaType];
+/**
+ * Basic {@link Schema} properties shared across several Schema-related
+ * types.
+ * @public
+ */
+export interface SchemaShared<T> {
+ /**
+ * An array of {@link Schema}. The generated data must be valid against any of the schemas
+ * listed in this array. This allows specifying multiple possible structures or types for a
+ * single field.
+ */
+ anyOf?: T[];
+ /** Optional. The format of the property.
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or
+ * `'date-time'`, otherwise requests will fail.
+ */
+ format?: string;
+ /** Optional. The description of the property. */
+ description?: string;
+ /**
+ * The title of the property. This helps document the schema's purpose but does not typically
+ * constrain the generated value. It can subtly guide the model by clarifying the intent of a
+ * field.
+ */
+ title?: string;
+ /** Optional. The items of the property. */
+ items?: T;
+ /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
+ minItems?: number;
+ /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
+ maxItems?: number;
+ /** Optional. Map of `Schema` objects. */
+ properties?: {
+ [k: string]: T;
+ };
+ /** A hint suggesting the order in which the keys should appear in the generated JSON string. */
+ propertyOrdering?: string[];
+ /** Optional. The enum of the property. */
+ enum?: string[];
+ /** Optional. The example of the property. */
+ example?: unknown;
+ /** Optional. Whether the property is nullable. */
+ nullable?: boolean;
+ /** The minimum value of a numeric type. */
+ minimum?: number;
+ /** The maximum value of a numeric type. */
+ maximum?: number;
+ [key: string]: unknown;
+}
+/**
+ * Params passed to {@link Schema} static methods to create specific
+ * {@link Schema} classes.
+ * @public
+ */
+export interface SchemaParams extends SchemaShared<SchemaInterface> {
+}
+/**
+ * Final format for {@link Schema} params passed to backend requests.
+ * @public
+ */
+export interface SchemaRequest extends SchemaShared<SchemaRequest> {
+ /**
+ * The type of the property. this can only be undefined when using `anyOf` schemas,
+ * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }.
+ */
+ type?: SchemaType;
+ /** Optional. Array of required property. */
+ required?: string[];
+}
+/**
+ * Interface for {@link Schema} class.
+ * @public
+ */
+export interface SchemaInterface extends SchemaShared<SchemaInterface> {
+ /**
+ * The type of the property. this can only be undefined when using `anyof` schemas,
+ * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}.
+ */
+ type?: SchemaType;
+}
+/**
+ * Interface for JSON parameters in a schema of {@link (SchemaType:type)}
+ * "object" when not using the `Schema.object()` helper.
+ * @public
+ */
+export interface ObjectSchemaRequest extends SchemaRequest {
+ type: 'object';
+ /**
+ * This is not a property accepted in the final request to the backend, but is
+ * a client-side convenience property that is only usable by constructing
+ * a schema through the `Schema.object()` helper method. Populating this
+ * property will cause response errors if the object is not wrapped with
+ * `Schema.object()`.
+ */
+ optionalProperties?: never;
+}