diff options
Diffstat (limited to 'frontend-old/node_modules/@firebase/ai/dist')
108 files changed, 32415 insertions, 0 deletions
diff --git a/frontend-old/node_modules/@firebase/ai/dist/ai-public.d.ts b/frontend-old/node_modules/@firebase/ai/dist/ai-public.d.ts new file mode 100644 index 0000000..0868452 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/ai-public.d.ts @@ -0,0 +1,3232 @@ +/** + * The Firebase AI Web SDK. + * + * @packageDocumentation + */ + +import { AppCheckTokenResult } from '@firebase/app-check-interop-types'; +import { FirebaseApp } from '@firebase/app'; +import { FirebaseAuthTokenData } from '@firebase/auth-interop-types'; +import { FirebaseError } from '@firebase/util'; + +/** + * An instance of the Firebase AI SDK. + * + * Do not create this instance directly. Instead, use {@link getAI | getAI()}. + * + * @public + */ +export declare interface AI { + /** + * The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with. + */ + app: FirebaseApp; + /** + * A {@link Backend} instance that specifies the configuration for the target backend, + * either the Gemini Developer API (using {@link GoogleAIBackend}) or the + * Vertex AI Gemini API (using {@link VertexAIBackend}). + */ + backend: Backend; + /** + * Options applied to this {@link AI} instance. + */ + options?: AIOptions; + /** + * @deprecated use `AI.backend.location` instead. + * + * The location configured for this AI service instance, relevant for Vertex AI backends. + */ + location: string; +} + +/** + * Error class for the Firebase AI SDK. + * + * @public + */ +export declare class AIError extends FirebaseError { + readonly code: AIErrorCode; + readonly customErrorData?: CustomErrorData | undefined; + /** + * Constructs a new instance of the `AIError` class. + * + * @param code - The error code from {@link (AIErrorCode:type)}. + * @param message - A human-readable message describing the error. + * @param customErrorData - Optional error data. + */ + constructor(code: AIErrorCode, message: string, customErrorData?: CustomErrorData | undefined); +} + +/** + * Standardized error codes that {@link AIError} can have. + * + * @public + */ +export declare const AIErrorCode: { + /** A generic error occurred. */ + readonly ERROR: "error"; + /** An error occurred in a request. */ + readonly REQUEST_ERROR: "request-error"; + /** An error occurred in a response. */ + readonly RESPONSE_ERROR: "response-error"; + /** An error occurred while performing a fetch. */ + readonly FETCH_ERROR: "fetch-error"; + /** An error occurred because an operation was attempted on a closed session. */ + readonly SESSION_CLOSED: "session-closed"; + /** An error associated with a Content object. */ + readonly INVALID_CONTENT: "invalid-content"; + /** An error due to the Firebase API not being enabled in the Console. */ + readonly API_NOT_ENABLED: "api-not-enabled"; + /** An error due to invalid Schema input. */ + readonly INVALID_SCHEMA: "invalid-schema"; + /** An error occurred due to a missing Firebase API key. */ + readonly NO_API_KEY: "no-api-key"; + /** An error occurred due to a missing Firebase app ID. */ + readonly NO_APP_ID: "no-app-id"; + /** An error occurred due to a model name not being specified during initialization. */ + readonly NO_MODEL: "no-model"; + /** An error occurred due to a missing project ID. */ + readonly NO_PROJECT_ID: "no-project-id"; + /** An error occurred while parsing. */ + readonly PARSE_FAILED: "parse-failed"; + /** An error occurred due an attempt to use an unsupported feature. */ + readonly UNSUPPORTED: "unsupported"; +}; + +/** + * Standardized error codes that {@link AIError} can have. + * + * @public + */ +export declare type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode]; + +/** + * Base class for Firebase AI model APIs. + * + * Instances of this class are associated with a specific Firebase AI {@link Backend} + * and provide methods for interacting with the configured generative model. + * + * @public + */ +export declare abstract class AIModel { + /** + * The fully qualified model resource name to use for generating images + * (for example, `publishers/google/models/imagen-3.0-generate-002`). + */ + readonly model: string; + /* Excluded from this release type: _apiSettings */ + /* Excluded from this release type: __constructor */ + /* Excluded from this release type: normalizeModelName */ + /* Excluded from this release type: normalizeGoogleAIModelName */ + /* Excluded from this release type: normalizeVertexAIModelName */ +} + +/** + * Options for initializing the AI service using {@link getAI | getAI()}. + * This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API) + * and configuring its specific options (like location for Vertex AI). + * + * @public + */ +export declare interface AIOptions { + /** + * The backend configuration to use for the AI service instance. + * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}). + */ + backend?: Backend; + /** + * Whether to use App Check limited use tokens. Defaults to false. + */ + useLimitedUseAppCheckTokens?: boolean; +} + +/** + * Schema class representing a value that can conform to any of the provided sub-schemas. This is + * useful when a field can accept multiple distinct types or structures. + * @public + */ +export declare class AnyOfSchema extends Schema { + anyOf: TypedSchema[]; + constructor(schemaParams: SchemaParams & { + anyOf: TypedSchema[]; + }); + /* Excluded from this release type: toJSON */ +} + +declare interface ApiSettings { + apiKey: string; + project: string; + appId: string; + automaticDataCollectionEnabled?: boolean; + /** + * @deprecated Use `backend.location` instead. + */ + location: string; + backend: Backend; + getAuthToken?: () => Promise<FirebaseAuthTokenData | null>; + getAppCheckToken?: () => Promise<AppCheckTokenResult>; +} + +/** + * Schema class for "array" types. + * The `items` param should refer to the type of item that can be a member + * of the array. + * @public + */ +export declare class ArraySchema extends Schema { + items: TypedSchema; + constructor(schemaParams: SchemaParams, items: TypedSchema); + /* Excluded from this release type: toJSON */ +} + +/** + * A controller for managing an active audio conversation. + * + * @beta + */ +export declare interface AudioConversationController { + /** + * Stops the audio conversation, closes the microphone connection, and + * cleans up resources. Returns a promise that resolves when cleanup is complete. + */ + stop: () => Promise<void>; +} + +/** + * The audio transcription configuration. + */ +export declare interface AudioTranscriptionConfig { +} + +/** + * Abstract base class representing the configuration for an AI service backend. + * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for + * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and + * {@link VertexAIBackend} for the Vertex AI Gemini API. + * + * @public + */ +export declare abstract class Backend { + /** + * Specifies the backend type. + */ + readonly backendType: BackendType; + /** + * Protected constructor for use by subclasses. + * @param type - The backend type. + */ + protected constructor(type: BackendType); +} + +/** + * An enum-like object containing constants that represent the supported backends + * for the Firebase AI SDK. + * This determines which backend service (Vertex AI Gemini API or Gemini Developer API) + * the SDK will communicate with. + * + * These values are assigned to the `backendType` property within the specific backend + * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify + * which service to target. + * + * @public + */ +export declare const BackendType: { + /** + * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud. + * Use this constant when creating a {@link VertexAIBackend} configuration. + */ + readonly VERTEX_AI: "VERTEX_AI"; + /** + * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}). + * Use this constant when creating a {@link GoogleAIBackend} configuration. + */ + readonly GOOGLE_AI: "GOOGLE_AI"; +}; + +/** + * Type alias representing valid backend types. + * It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`. + * + * @public + */ +export declare type BackendType = (typeof BackendType)[keyof typeof BackendType]; + +/** + * Base parameters for a number of methods. + * @public + */ +export declare interface BaseParams { + safetySettings?: SafetySetting[]; + generationConfig?: GenerationConfig; +} + +/** + * Reason that a prompt was blocked. + * @public + */ +export declare const BlockReason: { + /** + * Content was blocked by safety settings. + */ + readonly SAFETY: "SAFETY"; + /** + * Content was blocked, but the reason is uncategorized. + */ + readonly OTHER: "OTHER"; + /** + * Content was blocked because it contained terms from the terminology blocklist. + */ + readonly BLOCKLIST: "BLOCKLIST"; + /** + * Content was blocked due to prohibited content. + */ + readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT"; +}; + +/** + * Reason that a prompt was blocked. + * @public + */ +export declare type BlockReason = (typeof BlockReason)[keyof typeof BlockReason]; + +/** + * Schema class for "boolean" types. + * @public + */ +export declare class BooleanSchema extends Schema { + constructor(schemaParams?: SchemaParams); +} + +/** + * ChatSession class that enables sending chat messages and stores + * history of sent and received messages so far. + * + * @public + */ +export declare class ChatSession { + model: string; + private chromeAdapter?; + params?: StartChatParams | undefined; + requestOptions?: RequestOptions | undefined; + private _apiSettings; + private _history; + private _sendPromise; + constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); + /** + * Gets the chat history so far. Blocked prompts are not added to history. + * Neither blocked candidates nor the prompts that generated them are added + * to history. + */ + getHistory(): Promise<Content[]>; + /** + * Sends a chat message and receives a non-streaming + * {@link GenerateContentResult} + */ + sendMessage(request: string | Array<string | Part>): Promise<GenerateContentResult>; + /** + * Sends a chat message and receives the response as a + * {@link GenerateContentStreamResult} containing an iterable stream + * and a response promise. + */ + sendMessageStream(request: string | Array<string | Part>): Promise<GenerateContentStreamResult>; +} + +/** + * Defines an inference "backend" that uses Chrome's on-device model, + * and encapsulates logic for detecting when on-device inference is + * possible. + * + * These methods should not be called directly by the user. + * + * @beta + */ +export declare interface ChromeAdapter { + /** + * Checks if the on-device model is capable of handling a given + * request. + * @param request - A potential request to be passed to the model. + */ + isAvailable(request: GenerateContentRequest): Promise<boolean>; + /** + * Generates content using on-device inference. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContent} for generating + * content using in-cloud inference. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + */ + generateContent(request: GenerateContentRequest): Promise<Response>; + /** + * Generates a content stream using on-device inference. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContentStream} for generating + * a content stream using in-cloud inference. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + */ + generateContentStream(request: GenerateContentRequest): Promise<Response>; + /* Excluded from this release type: countTokens */ +} + +/** + * A single citation. + * @public + */ +export declare interface Citation { + startIndex?: number; + endIndex?: number; + uri?: string; + license?: string; + /** + * The title of the cited source, if available. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + */ + title?: string; + /** + * The publication date of the cited source, if available. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + */ + publicationDate?: Date_2; +} + +/** + * Citation metadata that may be found on a {@link GenerateContentCandidate}. + * @public + */ +export declare interface CitationMetadata { + citations: Citation[]; +} + +/** + * The results of code execution run by the model. + * + * @beta + */ +export declare interface CodeExecutionResult { + /** + * The result of the code execution. + */ + outcome?: Outcome; + /** + * The output from the code execution, or an error message + * if it failed. + */ + output?: string; +} + +/** + * Represents the code execution result from the model. + * + * @beta + */ +export declare interface CodeExecutionResultPart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + fileData: never; + thought?: never; + /* Excluded from this release type: thoughtSignature */ + executableCode?: never; + codeExecutionResult?: CodeExecutionResult; +} + +/** + * A tool that enables the model to use code execution. + * + * @beta + */ +export declare interface CodeExecutionTool { + /** + * Specifies the Google Search configuration. + * Currently, this is an empty object, but it's reserved for future configuration options. + */ + codeExecution: {}; +} + +/** + * Content type for both prompts and response candidates. + * @public + */ +export declare interface Content { + role: Role; + parts: Part[]; +} + +/** + * Params for calling {@link GenerativeModel.countTokens} + * @public + */ +export declare interface CountTokensRequest { + contents: Content[]; + /** + * Instructions that direct the model to behave a certain way. + */ + systemInstruction?: string | Part | Content; + /** + * {@link Tool} configuration. + */ + tools?: Tool[]; + /** + * Configuration options that control how the model generates a response. + */ + generationConfig?: GenerationConfig; +} + +/** + * Response from calling {@link GenerativeModel.countTokens}. + * @public + */ +export declare interface CountTokensResponse { + /** + * The total number of tokens counted across all instances from the request. + */ + totalTokens: number; + /** + * @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`. + * + * The total number of billable characters counted across all instances + * from the request. + */ + totalBillableCharacters?: number; + /** + * The breakdown, by modality, of how many tokens are consumed by the prompt. + */ + promptTokensDetails?: ModalityTokenCount[]; +} + +/** + * Details object that contains data originating from a bad HTTP response. + * + * @public + */ +export declare interface CustomErrorData { + /** HTTP status code of the error response. */ + status?: number; + /** HTTP status text of the error response. */ + statusText?: string; + /** Response from a {@link GenerateContentRequest} */ + response?: GenerateContentResponse; + /** Optional additional details about the error. */ + errorDetails?: ErrorDetails[]; +} + +/** + * Protobuf google.type.Date + * @public + */ +declare interface Date_2 { + year: number; + month: number; + day: number; +} +export { Date_2 as Date } + +/** + * Response object wrapped with helper methods. + * + * @public + */ +export declare interface EnhancedGenerateContentResponse extends GenerateContentResponse { + /** + * Returns the text string from the response, if available. + * Throws if the prompt or candidate was blocked. + */ + text: () => string; + /** + * Aggregates and returns every {@link InlineDataPart} from the first candidate of + * {@link GenerateContentResponse}. + * + * @throws If the prompt or candidate was blocked. + */ + inlineDataParts: () => InlineDataPart[] | undefined; + /** + * Aggregates and returns every {@link FunctionCall} from the first candidate of + * {@link GenerateContentResponse}. + * + * @throws If the prompt or candidate was blocked. + */ + functionCalls: () => FunctionCall[] | undefined; + /** + * Aggregates and returns every {@link TextPart} with their `thought` property set + * to `true` from the first candidate of {@link GenerateContentResponse}. + * + * @throws If the prompt or candidate was blocked. + * + * @remarks + * Thought summaries provide a brief overview of the model's internal thinking process, + * offering insight into how it arrived at the final answer. This can be useful for + * debugging, understanding the model's reasoning, and verifying its accuracy. + * + * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is + * set to `true`. + */ + thoughtSummary: () => string | undefined; + /** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ + inferenceSource?: InferenceSource; +} + +/** + * Details object that may be included in an error response. + * + * @public + */ +export declare interface ErrorDetails { + '@type'?: string; + /** The reason for the error. */ + reason?: string; + /** The domain where the error occurred. */ + domain?: string; + /** Additional metadata about the error. */ + metadata?: Record<string, unknown>; + /** Any other relevant information about the error. */ + [key: string]: unknown; +} + +/** + * An interface for executable code returned by the model. + * + * @beta + */ +export declare interface ExecutableCode { + /** + * The programming language of the code. + */ + language?: Language; + /** + * The source code to be executed. + */ + code?: string; +} + +/** + * Represents the code that is executed by the model. + * + * @beta + */ +export declare interface ExecutableCodePart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + fileData: never; + thought?: never; + /* Excluded from this release type: thoughtSignature */ + executableCode?: ExecutableCode; + codeExecutionResult?: never; +} + +/** + * Data pointing to a file uploaded on Google Cloud Storage. + * @public + */ +export declare interface FileData { + mimeType: string; + fileUri: string; +} + +/** + * Content part interface if the part represents {@link FileData} + * @public + */ +export declare interface FileDataPart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + fileData: FileData; + thought?: boolean; + /* Excluded from this release type: thoughtSignature */ + executableCode?: never; + codeExecutionResult?: never; +} + +/** + * Reason that a candidate finished. + * @public + */ +export declare const FinishReason: { + /** + * Natural stop point of the model or provided stop sequence. + */ + readonly STOP: "STOP"; + /** + * The maximum number of tokens as specified in the request was reached. + */ + readonly MAX_TOKENS: "MAX_TOKENS"; + /** + * The candidate content was flagged for safety reasons. + */ + readonly SAFETY: "SAFETY"; + /** + * The candidate content was flagged for recitation reasons. + */ + readonly RECITATION: "RECITATION"; + /** + * Unknown reason. + */ + readonly OTHER: "OTHER"; + /** + * The candidate content contained forbidden terms. + */ + readonly BLOCKLIST: "BLOCKLIST"; + /** + * The candidate content potentially contained prohibited content. + */ + readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT"; + /** + * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII). + */ + readonly SPII: "SPII"; + /** + * The function call generated by the model was invalid. + */ + readonly MALFORMED_FUNCTION_CALL: "MALFORMED_FUNCTION_CALL"; +}; + +/** + * Reason that a candidate finished. + * @public + */ +export declare type FinishReason = (typeof FinishReason)[keyof typeof FinishReason]; + +/** + * A predicted {@link FunctionCall} returned from the model + * that contains a string representing the {@link FunctionDeclaration.name} + * and a structured JSON object containing the parameters and their values. + * @public + */ +export declare interface FunctionCall { + /** + * The id of the function call. This must be sent back in the associated {@link FunctionResponse}. + * + * + * @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be + * `undefined`. + */ + id?: string; + name: string; + args: object; +} + +/** + * @public + */ +export declare interface FunctionCallingConfig { + mode?: FunctionCallingMode; + allowedFunctionNames?: string[]; +} + +/** + * @public + */ +export declare const FunctionCallingMode: { + /** + * Default model behavior; model decides to predict either a function call + * or a natural language response. + */ + readonly AUTO: "AUTO"; + /** + * Model is constrained to always predicting a function call only. + * If `allowed_function_names` is set, the predicted function call will be + * limited to any one of `allowed_function_names`, else the predicted + * function call will be any one of the provided `function_declarations`. + */ + readonly ANY: "ANY"; + /** + * Model will not predict any function call. Model behavior is same as when + * not passing any function declarations. + */ + readonly NONE: "NONE"; +}; + +/** + * @public + */ +export declare type FunctionCallingMode = (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode]; + +/** + * Content part interface if the part represents a {@link FunctionCall}. + * @public + */ +export declare interface FunctionCallPart { + text?: never; + inlineData?: never; + functionCall: FunctionCall; + functionResponse?: never; + thought?: boolean; + /* Excluded from this release type: thoughtSignature */ + executableCode?: never; + codeExecutionResult?: never; +} + +/** + * Structured representation of a function declaration as defined by the + * {@link https://spec.openapis.org/oas/v3.0.3 | OpenAPI 3.0 specification}. + * Included + * in this declaration are the function name and parameters. This + * `FunctionDeclaration` is a representation of a block of code that can be used + * as a Tool by the model and executed by the client. + * @public + */ +export declare interface FunctionDeclaration { + /** + * The name of the function to call. Must start with a letter or an + * underscore. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with + * a max length of 64. + */ + name: string; + /** + * Description and purpose of the function. Model uses it to decide + * how and whether to call the function. + */ + description: string; + /** + * Optional. Describes the parameters to this function in JSON Schema Object + * format. Reflects the Open API 3.03 Parameter Object. Parameter names are + * case-sensitive. For a function with no parameters, this can be left unset. + */ + parameters?: ObjectSchema | ObjectSchemaRequest; +} + +/** + * A `FunctionDeclarationsTool` is a piece of code that enables the system to + * interact with external systems to perform an action, or set of actions, + * outside of knowledge and scope of the model. + * @public + */ +export declare interface FunctionDeclarationsTool { + /** + * Optional. One or more function declarations + * to be passed to the model along with the current user query. Model may + * decide to call a subset of these functions by populating + * {@link FunctionCall} in the response. User should + * provide a {@link FunctionResponse} for each + * function call in the next turn. Based on the function responses, the model will + * generate the final response back to the user. Maximum 64 function + * declarations can be provided. + */ + functionDeclarations?: FunctionDeclaration[]; +} + +/** + * The result output from a {@link FunctionCall} that contains a string + * representing the {@link FunctionDeclaration.name} + * and a structured JSON object containing any output + * from the function is used as context to the model. + * This should contain the result of a {@link FunctionCall} + * made based on model prediction. + * @public + */ +export declare interface FunctionResponse { + /** + * The id of the {@link FunctionCall}. + * + * @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be + * `undefined`. + */ + id?: string; + name: string; + response: object; +} + +/** + * Content part interface if the part represents {@link FunctionResponse}. + * @public + */ +export declare interface FunctionResponsePart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse: FunctionResponse; + thought?: boolean; + /* Excluded from this release type: thoughtSignature */ + executableCode?: never; + codeExecutionResult?: never; +} + +/** + * A candidate returned as part of a {@link GenerateContentResponse}. + * @public + */ +export declare interface GenerateContentCandidate { + index: number; + content: Content; + finishReason?: FinishReason; + finishMessage?: string; + safetyRatings?: SafetyRating[]; + citationMetadata?: CitationMetadata; + groundingMetadata?: GroundingMetadata; + urlContextMetadata?: URLContextMetadata; +} + +/** + * Request sent through {@link GenerativeModel.generateContent} + * @public + */ +export declare interface GenerateContentRequest extends BaseParams { + contents: Content[]; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} + +/** + * Individual response from {@link GenerativeModel.generateContent} and + * {@link GenerativeModel.generateContentStream}. + * `generateContentStream()` will return one in each chunk until + * the stream is done. + * @public + */ +export declare interface GenerateContentResponse { + candidates?: GenerateContentCandidate[]; + promptFeedback?: PromptFeedback; + usageMetadata?: UsageMetadata; +} + +/** + * Result object returned from {@link GenerativeModel.generateContent} call. + * + * @public + */ +export declare interface GenerateContentResult { + response: EnhancedGenerateContentResponse; +} + +/** + * Result object returned from {@link GenerativeModel.generateContentStream} call. + * Iterate over `stream` to get chunks as they come in and/or + * use the `response` promise to get the aggregated response when + * the stream is done. + * + * @public + */ +export declare interface GenerateContentStreamResult { + stream: AsyncGenerator<EnhancedGenerateContentResponse>; + response: Promise<EnhancedGenerateContentResponse>; +} + +/** + * Config options for content-related requests + * @public + */ +export declare interface GenerationConfig { + candidateCount?: number; + stopSequences?: string[]; + maxOutputTokens?: number; + temperature?: number; + topP?: number; + topK?: number; + presencePenalty?: number; + frequencyPenalty?: number; + /** + * Output response MIME type of the generated candidate text. + * Supported MIME types are `text/plain` (default, text output), + * `application/json` (JSON response in the candidates), and + * `text/x.enum`. + */ + responseMimeType?: string; + /** + * Output response schema of the generated candidate text. This + * value can be a class generated with a {@link Schema} static method + * like `Schema.string()` or `Schema.object()` or it can be a plain + * JS object matching the {@link SchemaRequest} interface. + * <br/>Note: This only applies when the specified `responseMimeType` supports a schema; currently + * this is limited to `application/json` and `text/x.enum`. + */ + responseSchema?: TypedSchema | SchemaRequest; + /** + * Generation modalities to be returned in generation responses. + * + * @remarks + * - Multimodal response generation is only supported by some Gemini models and versions; see {@link https://firebase.google.com/docs/vertex-ai/models | model versions}. + * - Only image generation (`ResponseModality.IMAGE`) is supported. + * + * @beta + */ + responseModalities?: ResponseModality[]; + /** + * Configuration for "thinking" behavior of compatible Gemini models. + */ + thinkingConfig?: ThinkingConfig; +} + +/** + * Interface for sending an image. + * @public + */ +export declare interface GenerativeContentBlob { + mimeType: string; + /** + * Image as a base64 string. + */ + data: string; +} + +/** + * Class for generative model APIs. + * @public + */ +export declare class GenerativeModel extends AIModel { + private chromeAdapter?; + generationConfig: GenerationConfig; + safetySettings: SafetySetting[]; + requestOptions?: RequestOptions; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: Content; + constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions, chromeAdapter?: ChromeAdapter | undefined); + /** + * Makes a single non-streaming call to the model + * and returns an object containing a single {@link GenerateContentResponse}. + */ + generateContent(request: GenerateContentRequest | string | Array<string | Part>): Promise<GenerateContentResult>; + /** + * Makes a single streaming call to the model + * and returns an object containing an iterable stream that iterates + * over all chunks in the streaming response as well as + * a promise that returns the final aggregated response. + */ + generateContentStream(request: GenerateContentRequest | string | Array<string | Part>): Promise<GenerateContentStreamResult>; + /** + * Gets a new {@link ChatSession} instance which can be used for + * multi-turn chats. + */ + startChat(startChatParams?: StartChatParams): ChatSession; + /** + * Counts the tokens in the provided request. + */ + countTokens(request: CountTokensRequest | string | Array<string | Part>): Promise<CountTokensResponse>; +} + +/** + * Returns the default {@link AI} instance that is associated with the provided + * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the + * default settings. + * + * @example + * ```javascript + * const ai = getAI(app); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Gemini Developer API (via Google AI). + * const ai = getAI(app, { backend: new GoogleAIBackend() }); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Vertex AI Gemini API. + * const ai = getAI(app, { backend: new VertexAIBackend() }); + * ``` + * + * @param app - The {@link @firebase/app#FirebaseApp} to use. + * @param options - {@link AIOptions} that configure the AI instance. + * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}. + * + * @public + */ +export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI; + +/** + * Returns a {@link GenerativeModel} class with methods for inference + * and other functionality. + * + * @public + */ +export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel; + +/** + * Returns an {@link ImagenModel} class with methods for using Imagen. + * + * Only Imagen 3 models (named `imagen-3.0-*`) are supported. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when making Imagen requests. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @public + */ +export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; + +/** + * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication. + * + * The Live API is only supported in modern browser windows and Node >= 22. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when setting up a {@link LiveSession}. + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @beta + */ +export declare function getLiveGenerativeModel(ai: AI, modelParams: LiveModelParams): LiveGenerativeModel; + +/** + * Configuration class for the Gemini Developer API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Gemini Developer API as the backend. + * + * @public + */ +export declare class GoogleAIBackend extends Backend { + /** + * Creates a configuration object for the Gemini Developer API backend. + */ + constructor(); +} + +/* Excluded from this release type: GoogleAICitationMetadata */ + +/* Excluded from this release type: GoogleAICountTokensRequest */ + +/* Excluded from this release type: GoogleAIGenerateContentCandidate */ + +/* Excluded from this release type: GoogleAIGenerateContentResponse */ + +/** + * Specifies the Google Search configuration. + * + * @remarks Currently, this is an empty object, but it's reserved for future configuration options. + * + * @public + */ +export declare interface GoogleSearch { +} + +/** + * A tool that allows a Gemini model to connect to Google Search to access and incorporate + * up-to-date information from the web into its responses. + * + * Important: If using Grounding with Google Search, you are required to comply with the + * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API} + * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms} + * section within the Service Specific Terms). + * + * @public + */ +export declare interface GoogleSearchTool { + /** + * Specifies the Google Search configuration. + * Currently, this is an empty object, but it's reserved for future configuration options. + * + * When using this feature, you are required to comply with the "Grounding with Google Search" + * usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API} + * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms} + * section within the Service Specific Terms). + */ + googleSearch: GoogleSearch; +} + +/** + * Represents a chunk of retrieved data that supports a claim in the model's response. This is part + * of the grounding information provided when grounding is enabled. + * + * @public + */ +export declare interface GroundingChunk { + /** + * Contains details if the grounding chunk is from a web source. + */ + web?: WebGroundingChunk; +} + +/** + * Metadata returned when grounding is enabled. + * + * Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}). + * + * Important: If using Grounding with Google Search, you are required to comply with the + * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API} + * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms} + * section within the Service Specific Terms). + * + * @public + */ +export declare interface GroundingMetadata { + /** + * Google Search entry point for web searches. This contains an HTML/CSS snippet that must be + * embedded in an app to display a Google Search entry point for follow-up web searches related to + * a model's "Grounded Response". + */ + searchEntryPoint?: SearchEntrypoint; + /** + * A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content + * (for example, from a web page). that the model used to ground its response. + */ + groundingChunks?: GroundingChunk[]; + /** + * A list of {@link GroundingSupport} objects. Each object details how specific segments of the + * model's response are supported by the `groundingChunks`. + */ + groundingSupports?: GroundingSupport[]; + /** + * A list of web search queries that the model performed to gather the grounding information. + * These can be used to allow users to explore the search results themselves. + */ + webSearchQueries?: string[]; + /** + * @deprecated Use {@link GroundingSupport} instead. + */ + retrievalQueries?: string[]; +} + +/** + * Provides information about how a specific segment of the model's response is supported by the + * retrieved grounding chunks. + * + * @public + */ +export declare interface GroundingSupport { + /** + * Specifies the segment of the model's response content that this grounding support pertains to. + */ + segment?: Segment; + /** + * A list of indices that refer to specific {@link GroundingChunk} objects within the + * {@link GroundingMetadata.groundingChunks} array. These referenced chunks + * are the sources that support the claim made in the associated `segment` of the response. + * For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`, + * and `groundingChunks[4]` are the retrieved content supporting this part of the response. + */ + groundingChunkIndices?: number[]; +} + +/** + * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}). + * + * @public + */ +export declare const HarmBlockMethod: { + /** + * The harm block method uses both probability and severity scores. + */ + readonly SEVERITY: "SEVERITY"; + /** + * The harm block method uses the probability score. + */ + readonly PROBABILITY: "PROBABILITY"; +}; + +/** + * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}). + * + * @public + */ +export declare type HarmBlockMethod = (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod]; + +/** + * Threshold above which a prompt or candidate will be blocked. + * @public + */ +export declare const HarmBlockThreshold: { + /** + * Content with `NEGLIGIBLE` will be allowed. + */ + readonly BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE"; + /** + * Content with `NEGLIGIBLE` and `LOW` will be allowed. + */ + readonly BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE"; + /** + * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed. + */ + readonly BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH"; + /** + * All content will be allowed. + */ + readonly BLOCK_NONE: "BLOCK_NONE"; + /** + * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding + * to the {@link (HarmCategory:type)} will not be present in the response. + */ + readonly OFF: "OFF"; +}; + +/** + * Threshold above which a prompt or candidate will be blocked. + * @public + */ +export declare type HarmBlockThreshold = (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold]; + +/** + * Harm categories that would cause prompts or candidates to be blocked. + * @public + */ +export declare const HarmCategory: { + readonly HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH"; + readonly HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT"; + readonly HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT"; + readonly HARM_CATEGORY_DANGEROUS_CONTENT: "HARM_CATEGORY_DANGEROUS_CONTENT"; +}; + +/** + * Harm categories that would cause prompts or candidates to be blocked. + * @public + */ +export declare type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory]; + +/** + * Probability that a prompt or candidate matches a harm category. + * @public + */ +export declare const HarmProbability: { + /** + * Content has a negligible chance of being unsafe. + */ + readonly NEGLIGIBLE: "NEGLIGIBLE"; + /** + * Content has a low chance of being unsafe. + */ + readonly LOW: "LOW"; + /** + * Content has a medium chance of being unsafe. + */ + readonly MEDIUM: "MEDIUM"; + /** + * Content has a high chance of being unsafe. + */ + readonly HIGH: "HIGH"; +}; + +/** + * Probability that a prompt or candidate matches a harm category. + * @public + */ +export declare type HarmProbability = (typeof HarmProbability)[keyof typeof HarmProbability]; + +/** + * Harm severity levels. + * @public + */ +export declare const HarmSeverity: { + /** + * Negligible level of harm severity. + */ + readonly HARM_SEVERITY_NEGLIGIBLE: "HARM_SEVERITY_NEGLIGIBLE"; + /** + * Low level of harm severity. + */ + readonly HARM_SEVERITY_LOW: "HARM_SEVERITY_LOW"; + /** + * Medium level of harm severity. + */ + readonly HARM_SEVERITY_MEDIUM: "HARM_SEVERITY_MEDIUM"; + /** + * High level of harm severity. + */ + readonly HARM_SEVERITY_HIGH: "HARM_SEVERITY_HIGH"; + /** + * Harm severity is not supported. + * + * @remarks + * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback. + */ + readonly HARM_SEVERITY_UNSUPPORTED: "HARM_SEVERITY_UNSUPPORTED"; +}; + +/** + * Harm severity levels. + * @public + */ +export declare type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity]; + +/** + * Configures hybrid inference. + * @beta + */ +export declare interface HybridParams { + /** + * Specifies on-device or in-cloud inference. Defaults to prefer on-device. + */ + mode: InferenceMode; + /** + * Optional. Specifies advanced params for on-device inference. + */ + onDeviceParams?: OnDeviceParams; + /** + * Optional. Specifies advanced params for in-cloud inference. + */ + inCloudParams?: ModelParams; +} + +/** + * Aspect ratios for Imagen images. + * + * To specify an aspect ratio for generated images, set the `aspectRatio` property in your + * {@link ImagenGenerationConfig}. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details and examples of the supported aspect ratios. + * + * @public + */ +export declare const ImagenAspectRatio: { + /** + * Square (1:1) aspect ratio. + */ + readonly SQUARE: "1:1"; + /** + * Landscape (3:4) aspect ratio. + */ + readonly LANDSCAPE_3x4: "3:4"; + /** + * Portrait (4:3) aspect ratio. + */ + readonly PORTRAIT_4x3: "4:3"; + /** + * Landscape (16:9) aspect ratio. + */ + readonly LANDSCAPE_16x9: "16:9"; + /** + * Portrait (9:16) aspect ratio. + */ + readonly PORTRAIT_9x16: "9:16"; +}; + +/** + * Aspect ratios for Imagen images. + * + * To specify an aspect ratio for generated images, set the `aspectRatio` property in your + * {@link ImagenGenerationConfig}. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details and examples of the supported aspect ratios. + * + * @public + */ +export declare type ImagenAspectRatio = (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio]; + +/** + * An image generated by Imagen, stored in a Cloud Storage for Firebase bucket. + * + * This feature is not available yet. + * @public + */ +export declare interface ImagenGCSImage { + /** + * The MIME type of the image; either `"image/png"` or `"image/jpeg"`. + * + * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}. + */ + mimeType: string; + /** + * The URI of the file stored in a Cloud Storage for Firebase bucket. + * + * @example `"gs://bucket-name/path/sample_0.jpg"`. + */ + gcsURI: string; +} + +/** + * Configuration options for generating images with Imagen. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for + * more details. + * + * @public + */ +export declare interface ImagenGenerationConfig { + /** + * A description of what should be omitted from the generated images. + * + * Support for negative prompts depends on the Imagen model. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details. + * + * This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions + * greater than `imagen-3.0-generate-002`. + */ + negativePrompt?: string; + /** + * The number of images to generate. The default value is 1. + * + * The number of sample images that may be generated in each request depends on the model + * (typically up to 4); see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">sampleCount</a> + * documentation for more details. + */ + numberOfImages?: number; + /** + * The aspect ratio of the generated images. The default value is square 1:1. + * Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)} + * for more details. + */ + aspectRatio?: ImagenAspectRatio; + /** + * The image format of the generated images. The default is PNG. + * + * See {@link ImagenImageFormat} for more details. + */ + imageFormat?: ImagenImageFormat; + /** + * Whether to add an invisible watermark to generated images. + * + * If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate + * that they are AI generated. If set to `false`, watermarking will be disabled. + * + * For Imagen 3 models, the default value is `true`; see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">addWatermark</a> + * documentation for more details. + * + * When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true, + * and cannot be turned off. + */ + addWatermark?: boolean; +} + +/** + * The response from a request to generate images with Imagen. + * + * @public + */ +export declare interface ImagenGenerationResponse<T extends ImagenInlineImage | ImagenGCSImage> { + /** + * The images generated by Imagen. + * + * The number of images generated may be fewer than the number requested if one or more were + * filtered out; see `filteredReason`. + */ + images: T[]; + /** + * The reason that images were filtered out. This property will only be defined if one + * or more images were filtered. + * + * Images may be filtered out due to the {@link (ImagenSafetyFilterLevel:type)}, + * {@link (ImagenPersonFilterLevel:type)}, or filtering included in the model. + * The filter levels may be adjusted in your {@link ImagenSafetySettings}. + * + * See the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen | Responsible AI and usage guidelines for Imagen} + * for more details. + */ + filteredReason?: string; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Defines the image format for images generated by Imagen. + * + * Use this class to specify the desired format (JPEG or PNG) and compression quality + * for images generated by Imagen. This is typically included as part of + * {@link ImagenModelParams}. + * + * @example + * ```javascript + * const imagenModelParams = { + * // ... other ImagenModelParams + * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75. + * } + * ``` + * + * @public + */ +export declare class ImagenImageFormat { + /** + * The MIME type. + */ + mimeType: string; + /** + * The level of compression (a number between 0 and 100). + */ + compressionQuality?: number; + private constructor(); + /** + * Creates an {@link ImagenImageFormat} for a JPEG image. + * + * @param compressionQuality - The level of compression (a number between 0 and 100). + * @returns An {@link ImagenImageFormat} object for a JPEG image. + * + * @public + */ + static jpeg(compressionQuality?: number): ImagenImageFormat; + /** + * Creates an {@link ImagenImageFormat} for a PNG image. + * + * @returns An {@link ImagenImageFormat} object for a PNG image. + * + * @public + */ + static png(): ImagenImageFormat; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * An image generated by Imagen, represented as inline data. + * + * @public + */ +export declare interface ImagenInlineImage { + /** + * The MIME type of the image; either `"image/png"` or `"image/jpeg"`. + * + * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}. + */ + mimeType: string; + /** + * The base64-encoded image data. + */ + bytesBase64Encoded: string; +} + +/** + * Class for Imagen model APIs. + * + * This class provides methods for generating images using the Imagen model. + * + * @example + * ```javascript + * const imagen = new ImagenModel( + * ai, + * { + * model: 'imagen-3.0-generate-002' + * } + * ); + * + * const response = await imagen.generateImages('A photo of a cat'); + * if (response.images.length > 0) { + * console.log(response.images[0].bytesBase64Encoded); + * } + * ``` + * + * @public + */ +export declare class ImagenModel extends AIModel { + requestOptions?: RequestOptions | undefined; + /** + * The Imagen generation configuration. + */ + generationConfig?: ImagenGenerationConfig; + /** + * Safety settings for filtering inappropriate content. + */ + safetySettings?: ImagenSafetySettings; + /** + * Constructs a new instance of the {@link ImagenModel} class. + * + * @param ai - an {@link AI} instance. + * @param modelParams - Parameters to use when making requests to Imagen. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + */ + constructor(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined); + /** + * Generates images using the Imagen model and returns them as + * base64-encoded strings. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the generated images. + * + * @throws If the request to generate images fails. This happens if the + * prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + * + * @public + */ + generateImages(prompt: string): Promise<ImagenGenerationResponse<ImagenInlineImage>>; + /* Excluded from this release type: generateImagesGCS */ +} + +/** + * Parameters for configuring an {@link ImagenModel}. + * + * @public + */ +export declare interface ImagenModelParams { + /** + * The Imagen model to use for generating images. + * For example: `imagen-3.0-generate-002`. + * + * Only Imagen 3 models (named `imagen-3.0-*`) are supported. + * + * See {@link https://firebase.google.com/docs/vertex-ai/models | model versions} + * for a full list of supported Imagen 3 models. + */ + model: string; + /** + * Configuration options for generating images with Imagen. + */ + generationConfig?: ImagenGenerationConfig; + /** + * Safety settings for filtering potentially inappropriate content. + */ + safetySettings?: ImagenSafetySettings; +} + +/** + * A filter level controlling whether generation of images containing people or faces is allowed. + * + * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a> + * documentation for more details. + * + * @public + */ +export declare const ImagenPersonFilterLevel: { + /** + * Disallow generation of images containing people or faces; images of people are filtered out. + */ + readonly BLOCK_ALL: "dont_allow"; + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + readonly ALLOW_ADULT: "allow_adult"; + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + readonly ALLOW_ALL: "allow_all"; +}; + +/** + * A filter level controlling whether generation of images containing people or faces is allowed. + * + * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a> + * documentation for more details. + * + * @public + */ +export declare type ImagenPersonFilterLevel = (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel]; + +/** + * A filter level controlling how aggressively to filter sensitive content. + * + * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI + * are assessed against a list of safety filters, which include 'harmful categories' (for example, + * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to + * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines} + * for more details. + * + * @public + */ +export declare const ImagenSafetyFilterLevel: { + /** + * The most aggressive filtering level; most strict blocking. + */ + readonly BLOCK_LOW_AND_ABOVE: "block_low_and_above"; + /** + * Blocks some sensitive prompts and responses. + */ + readonly BLOCK_MEDIUM_AND_ABOVE: "block_medium_and_above"; + /** + * Blocks few sensitive prompts and responses. + */ + readonly BLOCK_ONLY_HIGH: "block_only_high"; + /** + * The least aggressive filtering level; blocks very few sensitive prompts and responses. + * + * Access to this feature is restricted and may require your case to be reviewed and approved by + * Cloud support. + */ + readonly BLOCK_NONE: "block_none"; +}; + +/** + * A filter level controlling how aggressively to filter sensitive content. + * + * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI + * are assessed against a list of safety filters, which include 'harmful categories' (for example, + * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to + * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines} + * for more details. + * + * @public + */ +export declare type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel]; + +/** + * Settings for controlling the aggressiveness of filtering out sensitive content. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details. + * + * @public + */ +export declare interface ImagenSafetySettings { + /** + * A filter level controlling how aggressive to filter out sensitive content from generated + * images. + */ + safetyFilterLevel?: ImagenSafetyFilterLevel; + /** + * A filter level controlling whether generation of images containing people or faces is allowed. + */ + personFilterLevel?: ImagenPersonFilterLevel; +} + +/** + * Determines whether inference happens on-device or in-cloud. + * + * @remarks + * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an + * on-device model. If on-device inference is not available, the SDK + * will fall back to using a cloud-hosted model. + * <br/> + * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an + * on-device model. The SDK will not fall back to a cloud-hosted model. + * If on-device inference is not available, inference methods will throw. + * <br/> + * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a + * cloud-hosted model. The SDK will not fall back to an on-device model. + * <br/> + * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a + * cloud-hosted model. If not available, the SDK will fall back to an + * on-device model. + * + * @beta + */ +export declare const InferenceMode: { + readonly PREFER_ON_DEVICE: "prefer_on_device"; + readonly ONLY_ON_DEVICE: "only_on_device"; + readonly ONLY_IN_CLOUD: "only_in_cloud"; + readonly PREFER_IN_CLOUD: "prefer_in_cloud"; +}; + +/** + * Determines whether inference happens on-device or in-cloud. + * + * @beta + */ +export declare type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode]; + +/** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ +export declare const InferenceSource: { + readonly ON_DEVICE: "on_device"; + readonly IN_CLOUD: "in_cloud"; +}; + +/** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ +export declare type InferenceSource = (typeof InferenceSource)[keyof typeof InferenceSource]; + +/** + * Content part interface if the part represents an image. + * @public + */ +export declare interface InlineDataPart { + text?: never; + inlineData: GenerativeContentBlob; + functionCall?: never; + functionResponse?: never; + /** + * Applicable if `inlineData` is a video. + */ + videoMetadata?: VideoMetadata; + thought?: boolean; + /* Excluded from this release type: thoughtSignature */ + executableCode?: never; + codeExecutionResult?: never; +} + +/** + * Schema class for "integer" types. + * @public + */ +export declare class IntegerSchema extends Schema { + constructor(schemaParams?: SchemaParams); +} + +/** + * The programming language of the code. + * + * @beta + */ +export declare const Language: { + UNSPECIFIED: string; + PYTHON: string; +}; + +/** + * The programming language of the code. + * + * @beta + */ +export declare type Language = (typeof Language)[keyof typeof Language]; + +/** + * Configures the creation of an on-device language model session. + * @beta + */ +export declare interface LanguageModelCreateCoreOptions { + topK?: number; + temperature?: number; + expectedInputs?: LanguageModelExpected[]; +} + +/** + * Configures the creation of an on-device language model session. + * @beta + */ +export declare interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions { + signal?: AbortSignal; + initialPrompts?: LanguageModelMessage[]; +} + +/** + * Options for the expected inputs for an on-device language model. + * @beta + */ export declare interface LanguageModelExpected { + type: LanguageModelMessageType; + languages?: string[]; +} + +/** + * An on-device language model message. + * @beta + */ +export declare interface LanguageModelMessage { + role: LanguageModelMessageRole; + content: LanguageModelMessageContent[]; +} + +/** + * An on-device language model content object. + * @beta + */ +export declare interface LanguageModelMessageContent { + type: LanguageModelMessageType; + value: LanguageModelMessageContentValue; +} + +/** + * Content formats that can be provided as on-device message content. + * @beta + */ +export declare type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string; + +/** + * Allowable roles for on-device language model usage. + * @beta + */ +export declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant'; + +/** + * Allowable types for on-device language model messages. + * @beta + */ +export declare type LanguageModelMessageType = 'text' | 'image' | 'audio'; + +/** + * Options for an on-device language model prompt. + * @beta + */ +export declare interface LanguageModelPromptOptions { + responseConstraint?: object; +} + +/** + * Configuration parameters used by {@link LiveGenerativeModel} to control live content generation. + * + * @beta + */ +export declare interface LiveGenerationConfig { + /** + * Configuration for speech synthesis. + */ + speechConfig?: SpeechConfig; + /** + * Specifies the maximum number of tokens that can be generated in the response. The number of + * tokens per word varies depending on the language outputted. Is unbounded by default. + */ + maxOutputTokens?: number; + /** + * Controls the degree of randomness in token selection. A `temperature` value of 0 means that the highest + * probability tokens are always selected. In this case, responses for a given prompt are mostly + * deterministic, but a small amount of variation is still possible. + */ + temperature?: number; + /** + * Changes how the model selects tokens for output. Tokens are + * selected from the most to least probable until the sum of their probabilities equals the `topP` + * value. For example, if tokens A, B, and C have probabilities of 0.3, 0.2, and 0.1 respectively + * and the `topP` value is 0.5, then the model will select either A or B as the next token by using + * the `temperature` and exclude C as a candidate. Defaults to 0.95 if unset. + */ + topP?: number; + /** + * Changes how the model selects token for output. A `topK` value of 1 means the select token is + * the most probable among all tokens in the model's vocabulary, while a `topK` value 3 means that + * the next token is selected from among the 3 most probably using probabilities sampled. Tokens + * are then further filtered with the highest selected `temperature` sampling. Defaults to 40 + * if unspecified. + */ + topK?: number; + /** + * Positive penalties. + */ + presencePenalty?: number; + /** + * Frequency penalties. + */ + frequencyPenalty?: number; + /** + * The modalities of the response. + */ + responseModalities?: ResponseModality[]; + /** + * Enables transcription of audio input. + * + * When enabled, the model will respond with transcriptions of your audio input in the `inputTranscriptions` property + * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across + * messages, so you may only receive small amounts of text per message. For example, if you ask the model + * "How are you today?", the model may transcribe that input across three messages, broken up as "How a", "re yo", "u today?". + */ + inputAudioTranscription?: AudioTranscriptionConfig; + /** + * Enables transcription of audio input. + * + * When enabled, the model will respond with transcriptions of its audio output in the `outputTranscription` property + * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across + * messages, so you may only receive small amounts of text per message. For example, if the model says + * "How are you today?", the model may transcribe that output across three messages, broken up as "How a", "re yo", "u today?". + */ + outputAudioTranscription?: AudioTranscriptionConfig; +} + +/** + * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal + * interactions with Gemini. + * + * This class should only be instantiated with {@link getLiveGenerativeModel}. + * + * @beta + */ +export declare class LiveGenerativeModel extends AIModel { + /* Excluded from this release type: _webSocketHandler */ + generationConfig: LiveGenerationConfig; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: Content; + /* Excluded from this release type: __constructor */ + /** + * Starts a {@link LiveSession}. + * + * @returns A {@link LiveSession}. + * @throws If the connection failed to be established with the server. + * + * @beta + */ + connect(): Promise<LiveSession>; +} + +/** + * Params passed to {@link getLiveGenerativeModel}. + * @beta + */ +export declare interface LiveModelParams { + model: string; + generationConfig?: LiveGenerationConfig; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} + +/** + * The types of responses that can be returned by {@link LiveSession.receive}. + * + * @beta + */ +export declare const LiveResponseType: { + SERVER_CONTENT: string; + TOOL_CALL: string; + TOOL_CALL_CANCELLATION: string; +}; + +/** + * The types of responses that can be returned by {@link LiveSession.receive}. + * This is a property on all messages that can be used for type narrowing. This property is not + * returned by the server, it is assigned to a server message object once it's parsed. + * + * @beta + */ +export declare type LiveResponseType = (typeof LiveResponseType)[keyof typeof LiveResponseType]; + +/** + * An incremental content update from the model. + * + * @beta + */ +export declare interface LiveServerContent { + type: 'serverContent'; + /** + * The content that the model has generated as part of the current conversation with the user. + */ + modelTurn?: Content; + /** + * Indicates whether the turn is complete. This is `undefined` if the turn is not complete. + */ + turnComplete?: boolean; + /** + * Indicates whether the model was interrupted by the client. An interruption occurs when + * the client sends a message before the model finishes it's turn. This is `undefined` if the + * model was not interrupted. + */ + interrupted?: boolean; + /** + * Transcription of the audio that was input to the model. + */ + inputTranscription?: Transcription; + /** + * Transcription of the audio output from the model. + */ + outputTranscription?: Transcription; +} + +/** + * A request from the model for the client to execute one or more functions. + * + * @beta + */ +export declare interface LiveServerToolCall { + type: 'toolCall'; + /** + * An array of function calls to run. + */ + functionCalls: FunctionCall[]; +} + +/** + * Notification to cancel a previous function call triggered by {@link LiveServerToolCall}. + * + * @beta + */ +export declare interface LiveServerToolCallCancellation { + type: 'toolCallCancellation'; + /** + * IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}. + */ + functionIds: string[]; +} + +/** + * Represents an active, real-time, bidirectional conversation with the model. + * + * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}. + * + * @beta + */ +export declare class LiveSession { + private webSocketHandler; + private serverMessages; + /** + * Indicates whether this Live session is closed. + * + * @beta + */ + isClosed: boolean; + /** + * Indicates whether this Live session is being controlled by an `AudioConversationController`. + * + * @beta + */ + inConversation: boolean; + /* Excluded from this release type: __constructor */ + /** + * Sends content to the server. + * + * @param request - The message to send to the model. + * @param turnComplete - Indicates if the turn is complete. Defaults to false. + * @throws If this session has been closed. + * + * @beta + */ + send(request: string | Array<string | Part>, turnComplete?: boolean): Promise<void>; + /** + * Sends text to the server in realtime. + * + * @example + * ```javascript + * liveSession.sendTextRealtime("Hello, how are you?"); + * ``` + * + * @param text - The text data to send. + * @throws If this session has been closed. + * + * @beta + */ + sendTextRealtime(text: string): Promise<void>; + /** + * Sends audio data to the server in realtime. + * + * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz + * little-endian. + * + * @example + * ```javascript + * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian. + * const blob = { mimeType: "audio/pcm", data: pcmData }; + * liveSession.sendAudioRealtime(blob); + * ``` + * + * @param blob - The base64-encoded PCM data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + sendAudioRealtime(blob: GenerativeContentBlob): Promise<void>; + /** + * Sends video data to the server in realtime. + * + * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It + * is recommended to set `mimeType` to `image/jpeg`. + * + * @example + * ```javascript + * // const videoFrame = ... base64-encoded JPEG data + * const blob = { mimeType: "image/jpeg", data: videoFrame }; + * liveSession.sendVideoRealtime(blob); + * ``` + * @param blob - The base64-encoded video data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + sendVideoRealtime(blob: GenerativeContentBlob): Promise<void>; + /** + * Sends function responses to the server. + * + * @param functionResponses - The function responses to send. + * @throws If this session has been closed. + * + * @beta + */ + sendFunctionResponses(functionResponses: FunctionResponse[]): Promise<void>; + /** + * Yields messages received from the server. + * This can only be used by one consumer at a time. + * + * @returns An `AsyncGenerator` that yields server messages as they arrive. + * @throws If the session is already closed, or if we receive a response that we don't support. + * + * @beta + */ + receive(): AsyncGenerator<LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation>; + /** + * Closes this session. + * All methods on this session will throw an error once this resolves. + * + * @beta + */ + close(): Promise<void>; + /** + * Sends realtime input to the server. + * + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * @param mediaChunks - The media chunks to send. + * @throws If this session has been closed. + * + * @beta + */ + sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void>; + /** + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * Sends a stream of {@link GenerativeContentBlob}. + * + * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send. + * @throws If this session has been closed. + * + * @beta + */ + sendMediaStream(mediaChunkStream: ReadableStream<GenerativeContentBlob>): Promise<void>; +} + +/** + * Content part modality. + * @public + */ +export declare const Modality: { + /** + * Unspecified modality. + */ + readonly MODALITY_UNSPECIFIED: "MODALITY_UNSPECIFIED"; + /** + * Plain text. + */ + readonly TEXT: "TEXT"; + /** + * Image. + */ + readonly IMAGE: "IMAGE"; + /** + * Video. + */ + readonly VIDEO: "VIDEO"; + /** + * Audio. + */ + readonly AUDIO: "AUDIO"; + /** + * Document (for example, PDF). + */ + readonly DOCUMENT: "DOCUMENT"; +}; + +/** + * Content part modality. + * @public + */ +export declare type Modality = (typeof Modality)[keyof typeof Modality]; + +/** + * Represents token counting info for a single modality. + * + * @public + */ +export declare interface ModalityTokenCount { + /** The modality associated with this token count. */ + modality: Modality; + /** The number of tokens counted. */ + tokenCount: number; +} + +/** + * Params passed to {@link getGenerativeModel}. + * @public + */ +export declare interface ModelParams extends BaseParams { + model: string; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} + +/** + * Schema class for "number" types. + * @public + */ +export declare class NumberSchema extends Schema { + constructor(schemaParams?: SchemaParams); +} + +/** + * Schema class for "object" types. + * The `properties` param must be a map of `Schema` objects. + * @public + */ +export declare class ObjectSchema extends Schema { + properties: { + [k: string]: TypedSchema; + }; + optionalProperties: string[]; + constructor(schemaParams: SchemaParams, properties: { + [k: string]: TypedSchema; + }, optionalProperties?: string[]); + /* Excluded from this release type: toJSON */ +} + +/** + * Interface for JSON parameters in a schema of {@link (SchemaType:type)} + * "object" when not using the `Schema.object()` helper. + * @public + */ +export declare interface ObjectSchemaRequest extends SchemaRequest { + type: 'object'; + /** + * This is not a property accepted in the final request to the backend, but is + * a client-side convenience property that is only usable by constructing + * a schema through the `Schema.object()` helper method. Populating this + * property will cause response errors if the object is not wrapped with + * `Schema.object()`. + */ + optionalProperties?: never; +} + +/** + * Encapsulates configuration for on-device inference. + * + * @beta + */ +export declare interface OnDeviceParams { + createOptions?: LanguageModelCreateOptions; + promptOptions?: LanguageModelPromptOptions; +} + +/** + * Represents the result of the code execution. + * + * @beta + */ +export declare const Outcome: { + UNSPECIFIED: string; + OK: string; + FAILED: string; + DEADLINE_EXCEEDED: string; +}; + +/** + * Represents the result of the code execution. + * + * @beta + */ +export declare type Outcome = (typeof Outcome)[keyof typeof Outcome]; + +/** + * Content part - includes text, image/video, or function call/response + * part types. + * @public + */ +export declare type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart | ExecutableCodePart | CodeExecutionResultPart; + +/** + * Possible roles. + * @public + */ +export declare const POSSIBLE_ROLES: readonly ["user", "model", "function", "system"]; + +/** + * Configuration for a pre-built voice. + * + * @beta + */ +export declare interface PrebuiltVoiceConfig { + /** + * The voice name to use for speech synthesis. + * + * For a full list of names and demos of what each voice sounds like, see {@link https://cloud.google.com/text-to-speech/docs/chirp3-hd | Chirp 3: HD Voices}. + */ + voiceName?: string; +} + +/** + * If the prompt was blocked, this will be populated with `blockReason` and + * the relevant `safetyRatings`. + * @public + */ +export declare interface PromptFeedback { + blockReason?: BlockReason; + safetyRatings: SafetyRating[]; + /** + * A human-readable description of the `blockReason`. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + */ + blockReasonMessage?: string; +} + +/** + * Params passed to {@link getGenerativeModel}. + * @public + */ +export declare interface RequestOptions { + /** + * Request timeout in milliseconds. Defaults to 180 seconds (180000ms). + */ + timeout?: number; + /** + * Base url for endpoint. Defaults to + * https://firebasevertexai.googleapis.com, which is the + * {@link https://console.cloud.google.com/apis/library/firebasevertexai.googleapis.com?project=_ | Firebase AI Logic API} + * (used regardless of your chosen Gemini API provider). + */ + baseUrl?: string; +} + +/** + * Generation modalities to be returned in generation responses. + * + * @beta + */ +export declare const ResponseModality: { + /** + * Text. + * @beta + */ + readonly TEXT: "TEXT"; + /** + * Image. + * @beta + */ + readonly IMAGE: "IMAGE"; + /** + * Audio. + * @beta + */ + readonly AUDIO: "AUDIO"; +}; + +/** + * Generation modalities to be returned in generation responses. + * + * @beta + */ +export declare type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseModality]; + +/** + * @public + */ +export declare interface RetrievedContextAttribution { + uri: string; + title: string; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Role is the producer of the content. + * @public + */ +export declare type Role = (typeof POSSIBLE_ROLES)[number]; + +/** + * A safety rating associated with a {@link GenerateContentCandidate} + * @public + */ +export declare interface SafetyRating { + category: HarmCategory; + probability: HarmProbability; + /** + * The harm severity level. + * + * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`. + */ + severity: HarmSeverity; + /** + * The probability score of the harm category. + * + * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0. + */ + probabilityScore: number; + /** + * The severity score of the harm category. + * + * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0. + */ + severityScore: number; + blocked: boolean; +} + +/** + * Safety setting that can be sent as part of request parameters. + * @public + */ +export declare interface SafetySetting { + category: HarmCategory; + threshold: HarmBlockThreshold; + /** + * The harm block method. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), an {@link AIError} will be + * thrown if this property is defined. + */ + method?: HarmBlockMethod; +} + +/** + * Parent class encompassing all Schema types, with static methods that + * allow building specific Schema types. This class can be converted with + * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints. + * (This string conversion is automatically done when calling SDK methods.) + * @public + */ +export declare abstract class Schema implements SchemaInterface { + /** + * Optional. The type of the property. + * This can only be undefined when using `anyOf` schemas, which do not have an + * explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification}. + */ + type?: SchemaType; + /** Optional. The format of the property. + * Supported formats:<br/> + * <ul> + * <li>for NUMBER type: "float", "double"</li> + * <li>for INTEGER type: "int32", "int64"</li> + * <li>for STRING type: "email", "byte", etc</li> + * </ul> + */ + format?: string; + /** Optional. The description of the property. */ + description?: string; + /** Optional. The items of the property. */ + items?: SchemaInterface; + /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + minItems?: number; + /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + maxItems?: number; + /** Optional. Whether the property is nullable. Defaults to false. */ + nullable: boolean; + /** Optional. The example of the property. */ + example?: unknown; + /** + * Allows user to add other schema properties that have not yet + * been officially added to the SDK. + */ + [key: string]: unknown; + constructor(schemaParams: SchemaInterface); + /* Excluded from this release type: toJSON */ + static array(arrayParams: SchemaParams & { + items: Schema; + }): ArraySchema; + static object(objectParams: SchemaParams & { + properties: { + [k: string]: Schema; + }; + optionalProperties?: string[]; + }): ObjectSchema; + static string(stringParams?: SchemaParams): StringSchema; + static enumString(stringParams: SchemaParams & { + enum: string[]; + }): StringSchema; + static integer(integerParams?: SchemaParams): IntegerSchema; + static number(numberParams?: SchemaParams): NumberSchema; + static boolean(booleanParams?: SchemaParams): BooleanSchema; + static anyOf(anyOfParams: SchemaParams & { + anyOf: TypedSchema[]; + }): AnyOfSchema; +} + +/** + * Interface for {@link Schema} class. + * @public + */ +export declare interface SchemaInterface extends SchemaShared<SchemaInterface> { + /** + * The type of the property. this can only be undefined when using `anyof` schemas, + * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}. + */ + type?: SchemaType; +} + +/** + * Params passed to {@link Schema} static methods to create specific + * {@link Schema} classes. + * @public + */ +export declare interface SchemaParams extends SchemaShared<SchemaInterface> { +} + +/** + * Final format for {@link Schema} params passed to backend requests. + * @public + */ +export declare interface SchemaRequest extends SchemaShared<SchemaRequest> { + /** + * The type of the property. this can only be undefined when using `anyOf` schemas, + * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }. + */ + type?: SchemaType; + /** Optional. Array of required property. */ + required?: string[]; +} + +/** + * Basic {@link Schema} properties shared across several Schema-related + * types. + * @public + */ +export declare interface SchemaShared<T> { + /** + * An array of {@link Schema}. The generated data must be valid against any of the schemas + * listed in this array. This allows specifying multiple possible structures or types for a + * single field. + */ + anyOf?: T[]; + /** Optional. The format of the property. + * When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or + * `'date-time'`, otherwise requests will fail. + */ + format?: string; + /** Optional. The description of the property. */ + description?: string; + /** + * The title of the property. This helps document the schema's purpose but does not typically + * constrain the generated value. It can subtly guide the model by clarifying the intent of a + * field. + */ + title?: string; + /** Optional. The items of the property. */ + items?: T; + /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + minItems?: number; + /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + maxItems?: number; + /** Optional. Map of `Schema` objects. */ + properties?: { + [k: string]: T; + }; + /** A hint suggesting the order in which the keys should appear in the generated JSON string. */ + propertyOrdering?: string[]; + /** Optional. The enum of the property. */ + enum?: string[]; + /** Optional. The example of the property. */ + example?: unknown; + /** Optional. Whether the property is nullable. */ + nullable?: boolean; + /** The minimum value of a numeric type. */ + minimum?: number; + /** The maximum value of a numeric type. */ + maximum?: number; + [key: string]: unknown; +} + +/** + * Contains the list of OpenAPI data types + * as defined by the + * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification} + * @public + */ +export declare const SchemaType: { + /** String type. */ + readonly STRING: "string"; + /** Number type. */ + readonly NUMBER: "number"; + /** Integer type. */ + readonly INTEGER: "integer"; + /** Boolean type. */ + readonly BOOLEAN: "boolean"; + /** Array type. */ + readonly ARRAY: "array"; + /** Object type. */ + readonly OBJECT: "object"; +}; + +/** + * Contains the list of OpenAPI data types + * as defined by the + * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification} + * @public + */ +export declare type SchemaType = (typeof SchemaType)[keyof typeof SchemaType]; + +/** + * Google search entry point. + * + * @public + */ +export declare interface SearchEntrypoint { + /** + * HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid + * undesired interaction with the rest of the page's CSS. + * + * To ensure proper rendering and prevent CSS conflicts, it is recommended + * to encapsulate this `renderedContent` within a shadow DOM when embedding it + * into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}. + * + * @example + * ```javascript + * const container = document.createElement('div'); + * document.body.appendChild(container); + * container.attachShadow({ mode: 'open' }).innerHTML = renderedContent; + * ``` + */ + renderedContent?: string; +} + +/** + * Represents a specific segment within a {@link Content} object, often used to + * pinpoint the exact location of text or data that grounding information refers to. + * + * @public + */ +export declare interface Segment { + /** + * The zero-based index of the {@link Part} object within the `parts` array + * of its parent {@link Content} object. This identifies which part of the + * content the segment belongs to. + */ + partIndex: number; + /** + * The zero-based start index of the segment within the specified `Part`, + * measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the + * beginning of the part's content (e.g., `Part.text`). + */ + startIndex: number; + /** + * The zero-based end index of the segment within the specified `Part`, + * measured in UTF-8 bytes. This offset is exclusive, meaning the character + * at this index is not included in the segment. + */ + endIndex: number; + /** + * The text corresponding to the segment from the response. + */ + text: string; +} + +/** + * Configures speech synthesis. + * + * @beta + */ +export declare interface SpeechConfig { + /** + * Configures the voice to be used in speech synthesis. + */ + voiceConfig?: VoiceConfig; +} + +/** + * Starts a real-time, bidirectional audio conversation with the model. This helper function manages + * the complexities of microphone access, audio recording, playback, and interruptions. + * + * @remarks Important: This function must be called in response to a user gesture + * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}. + * + * @example + * ```javascript + * const liveSession = await model.connect(); + * let conversationController; + * + * // This function must be called from within a click handler. + * async function startConversation() { + * try { + * conversationController = await startAudioConversation(liveSession); + * } catch (e) { + * // Handle AI-specific errors + * if (e instanceof AIError) { + * console.error("AI Error:", e.message); + * } + * // Handle microphone permission and hardware errors + * else if (e instanceof DOMException) { + * console.error("Microphone Error:", e.message); + * } + * // Handle other unexpected errors + * else { + * console.error("An unexpected error occurred:", e); + * } + * } + * } + * + * // Later, to stop the conversation: + * // if (conversationController) { + * // await conversationController.stop(); + * // } + * ``` + * + * @param liveSession - An active {@link LiveSession} instance. + * @param options - Configuration options for the audio conversation. + * @returns A `Promise` that resolves with an {@link AudioConversationController}. + * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`). + * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions. + * + * @beta + */ +export declare function startAudioConversation(liveSession: LiveSession, options?: StartAudioConversationOptions): Promise<AudioConversationController>; + +/** + * Options for {@link startAudioConversation}. + * + * @beta + */ +export declare interface StartAudioConversationOptions { + /** + * An async handler that is called when the model requests a function to be executed. + * The handler should perform the function call and return the result as a `Part`, + * which will then be sent back to the model. + */ + functionCallingHandler?: (functionCalls: FunctionCall[]) => Promise<FunctionResponse>; +} + +/** + * Params for {@link GenerativeModel.startChat}. + * @public + */ +export declare interface StartChatParams extends BaseParams { + history?: Content[]; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} + +/** + * Schema class for "string" types. Can be used with or without + * enum values. + * @public + */ +export declare class StringSchema extends Schema { + enum?: string[]; + constructor(schemaParams?: SchemaParams, enumValues?: string[]); + /* Excluded from this release type: toJSON */ +} + +/** + * Content part interface if the part represents a text string. + * @public + */ +export declare interface TextPart { + text: string; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + thought?: boolean; + /* Excluded from this release type: thoughtSignature */ + executableCode?: never; + codeExecutionResult?: never; +} + +/** + * Configuration for "thinking" behavior of compatible Gemini models. + * + * Certain models utilize a thinking process before generating a response. This allows them to + * reason through complex problems and plan a more coherent and accurate answer. + * + * @public + */ +export declare interface ThinkingConfig { + /** + * The thinking budget, in tokens. + * + * This parameter sets an upper limit on the number of tokens the model can use for its internal + * "thinking" process. A higher budget may result in higher quality responses for complex tasks + * but can also increase latency and cost. + * + * If you don't specify a budget, the model will determine the appropriate amount + * of thinking based on the complexity of the prompt. + * + * An error will be thrown if you set a thinking budget for a model that does not support this + * feature or if the specified budget is not within the model's supported range. + */ + thinkingBudget?: number; + /** + * Whether to include "thought summaries" in the model's response. + * + * @remarks + * Thought summaries provide a brief overview of the model's internal thinking process, + * offering insight into how it arrived at the final answer. This can be useful for + * debugging, understanding the model's reasoning, and verifying its accuracy. + */ + includeThoughts?: boolean; +} + +/** + * Defines a tool that model can call to access external knowledge. + * @public + */ +export declare type Tool = FunctionDeclarationsTool | GoogleSearchTool | CodeExecutionTool | URLContextTool; + +/** + * Tool config. This config is shared for all tools provided in the request. + * @public + */ +export declare interface ToolConfig { + functionCallingConfig?: FunctionCallingConfig; +} + +/** + * Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription + * is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on + * the {@link LiveGenerationConfig}. + * + * @beta + */ +export declare interface Transcription { + /** + * The text transcription of the audio. + */ + text?: string; +} + +/** + * A type that includes all specific Schema types. + * @public + */ +export declare type TypedSchema = IntegerSchema | NumberSchema | StringSchema | BooleanSchema | ObjectSchema | ArraySchema | AnyOfSchema; + +/** + * Specifies the URL Context configuration. + * + * @beta + */ +export declare interface URLContext { +} + +/** + * Metadata related to {@link URLContextTool}. + * + * @beta + */ +export declare interface URLContextMetadata { + /** + * List of URL metadata used to provide context to the Gemini model. + */ + urlMetadata: URLMetadata[]; +} + +/** + * A tool that allows you to provide additional context to the models in the form of public web + * URLs. By including URLs in your request, the Gemini model will access the content from those + * pages to inform and enhance its response. + * + * @beta + */ +export declare interface URLContextTool { + /** + * Specifies the URL Context configuration. + */ + urlContext: URLContext; +} + +/** + * Metadata for a single URL retrieved by the {@link URLContextTool} tool. + * + * @beta + */ +export declare interface URLMetadata { + /** + * The retrieved URL. + */ + retrievedUrl?: string; + /** + * The status of the URL retrieval. + */ + urlRetrievalStatus?: URLRetrievalStatus; +} + +/** + * The status of a URL retrieval. + * + * @remarks + * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status. + * <br/> + * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful. + * <br/> + * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed. + * <br/> + * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall. + * <br/> + * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe. + * <br/> + * + * @beta + */ +export declare const URLRetrievalStatus: { + /** + * Unspecified retrieval status. + */ + URL_RETRIEVAL_STATUS_UNSPECIFIED: string; + /** + * The URL retrieval was successful. + */ + URL_RETRIEVAL_STATUS_SUCCESS: string; + /** + * The URL retrieval failed. + */ + URL_RETRIEVAL_STATUS_ERROR: string; + /** + * The URL retrieval failed because the content is behind a paywall. + */ + URL_RETRIEVAL_STATUS_PAYWALL: string; + /** + * The URL retrieval failed because the content is unsafe. + */ + URL_RETRIEVAL_STATUS_UNSAFE: string; +}; + +/** + * The status of a URL retrieval. + * + * @remarks + * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status. + * <br/> + * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful. + * <br/> + * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed. + * <br/> + * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall. + * <br/> + * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe. + * <br/> + * + * @beta + */ +export declare type URLRetrievalStatus = (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus]; + +/** + * Usage metadata about a {@link GenerateContentResponse}. + * + * @public + */ +export declare interface UsageMetadata { + promptTokenCount: number; + candidatesTokenCount: number; + /** + * The number of tokens used by the model's internal "thinking" process. + */ + thoughtsTokenCount?: number; + totalTokenCount: number; + /** + * The number of tokens used by tools. + */ + toolUsePromptTokenCount?: number; + promptTokensDetails?: ModalityTokenCount[]; + candidatesTokensDetails?: ModalityTokenCount[]; + /** + * A list of tokens used by tools, broken down by modality. + */ + toolUsePromptTokensDetails?: ModalityTokenCount[]; +} + +/** + * Configuration class for the Vertex AI Gemini API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend. + * + * @public + */ +export declare class VertexAIBackend extends Backend { + /** + * The region identifier. + * See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + readonly location: string; + /** + * Creates a configuration object for the Vertex AI backend. + * + * @param location - The region identifier, defaulting to `us-central1`; + * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + constructor(location?: string); +} + +/** + * Describes the input video content. + * @public + */ +export declare interface VideoMetadata { + /** + * The start offset of the video in + * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format. + */ + startOffset: string; + /** + * The end offset of the video in + * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format. + */ + endOffset: string; +} + +/** + * Configuration for the voice to used in speech synthesis. + * + * @beta + */ +export declare interface VoiceConfig { + /** + * Configures the voice using a pre-built voice configuration. + */ + prebuiltVoiceConfig?: PrebuiltVoiceConfig; +} + +/** + * @public + */ +export declare interface WebAttribution { + uri: string; + title: string; +} + +/** + * A grounding chunk from the web. + * + * Important: If using Grounding with Google Search, you are required to comply with the + * {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for "Grounding with Google Search". + * + * @public + */ +export declare interface WebGroundingChunk { + /** + * The URI of the retrieved web page. + */ + uri?: string; + /** + * The title of the retrieved web page. + */ + title?: string; + /** + * The domain of the original URI from which the content was retrieved. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be + * `undefined`. + */ + domain?: string; +} + +/* Excluded from this release type: WebSocketHandler */ + +export { } diff --git a/frontend-old/node_modules/@firebase/ai/dist/ai.d.ts b/frontend-old/node_modules/@firebase/ai/dist/ai.d.ts new file mode 100644 index 0000000..9772835 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/ai.d.ts @@ -0,0 +1,3427 @@ +/** + * The Firebase AI Web SDK. + * + * @packageDocumentation + */ + +import { AppCheckTokenResult } from '@firebase/app-check-interop-types'; +import { FirebaseApp } from '@firebase/app'; +import { FirebaseAuthTokenData } from '@firebase/auth-interop-types'; +import { FirebaseError } from '@firebase/util'; + +/** + * An instance of the Firebase AI SDK. + * + * Do not create this instance directly. Instead, use {@link getAI | getAI()}. + * + * @public + */ +export declare interface AI { + /** + * The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with. + */ + app: FirebaseApp; + /** + * A {@link Backend} instance that specifies the configuration for the target backend, + * either the Gemini Developer API (using {@link GoogleAIBackend}) or the + * Vertex AI Gemini API (using {@link VertexAIBackend}). + */ + backend: Backend; + /** + * Options applied to this {@link AI} instance. + */ + options?: AIOptions; + /** + * @deprecated use `AI.backend.location` instead. + * + * The location configured for this AI service instance, relevant for Vertex AI backends. + */ + location: string; +} + +/** + * Error class for the Firebase AI SDK. + * + * @public + */ +export declare class AIError extends FirebaseError { + readonly code: AIErrorCode; + readonly customErrorData?: CustomErrorData | undefined; + /** + * Constructs a new instance of the `AIError` class. + * + * @param code - The error code from {@link (AIErrorCode:type)}. + * @param message - A human-readable message describing the error. + * @param customErrorData - Optional error data. + */ + constructor(code: AIErrorCode, message: string, customErrorData?: CustomErrorData | undefined); +} + +/** + * Standardized error codes that {@link AIError} can have. + * + * @public + */ +export declare const AIErrorCode: { + /** A generic error occurred. */ + readonly ERROR: "error"; + /** An error occurred in a request. */ + readonly REQUEST_ERROR: "request-error"; + /** An error occurred in a response. */ + readonly RESPONSE_ERROR: "response-error"; + /** An error occurred while performing a fetch. */ + readonly FETCH_ERROR: "fetch-error"; + /** An error occurred because an operation was attempted on a closed session. */ + readonly SESSION_CLOSED: "session-closed"; + /** An error associated with a Content object. */ + readonly INVALID_CONTENT: "invalid-content"; + /** An error due to the Firebase API not being enabled in the Console. */ + readonly API_NOT_ENABLED: "api-not-enabled"; + /** An error due to invalid Schema input. */ + readonly INVALID_SCHEMA: "invalid-schema"; + /** An error occurred due to a missing Firebase API key. */ + readonly NO_API_KEY: "no-api-key"; + /** An error occurred due to a missing Firebase app ID. */ + readonly NO_APP_ID: "no-app-id"; + /** An error occurred due to a model name not being specified during initialization. */ + readonly NO_MODEL: "no-model"; + /** An error occurred due to a missing project ID. */ + readonly NO_PROJECT_ID: "no-project-id"; + /** An error occurred while parsing. */ + readonly PARSE_FAILED: "parse-failed"; + /** An error occurred due an attempt to use an unsupported feature. */ + readonly UNSUPPORTED: "unsupported"; +}; + +/** + * Standardized error codes that {@link AIError} can have. + * + * @public + */ +export declare type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode]; + +/** + * Base class for Firebase AI model APIs. + * + * Instances of this class are associated with a specific Firebase AI {@link Backend} + * and provide methods for interacting with the configured generative model. + * + * @public + */ +export declare abstract class AIModel { + /** + * The fully qualified model resource name to use for generating images + * (for example, `publishers/google/models/imagen-3.0-generate-002`). + */ + readonly model: string; + /** + * @internal + */ + _apiSettings: ApiSettings; + /** + * Constructs a new instance of the {@link AIModel} class. + * + * This constructor should only be called from subclasses that provide + * a model API. + * + * @param ai - an {@link AI} instance. + * @param modelName - The name of the model being used. It can be in one of the following formats: + * - `my-model` (short name, will resolve to `publishers/google/models/my-model`) + * - `models/my-model` (will resolve to `publishers/google/models/my-model`) + * - `publishers/my-publisher/models/my-model` (fully qualified model name) + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @internal + */ + protected constructor(ai: AI, modelName: string); + /** + * Normalizes the given model name to a fully qualified model resource name. + * + * @param modelName - The model name to normalize. + * @returns The fully qualified model resource name. + * + * @internal + */ + static normalizeModelName(modelName: string, backendType: BackendType): string; + /** + * @internal + */ + private static normalizeGoogleAIModelName; + /** + * @internal + */ + private static normalizeVertexAIModelName; +} + +/** + * Options for initializing the AI service using {@link getAI | getAI()}. + * This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API) + * and configuring its specific options (like location for Vertex AI). + * + * @public + */ +export declare interface AIOptions { + /** + * The backend configuration to use for the AI service instance. + * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}). + */ + backend?: Backend; + /** + * Whether to use App Check limited use tokens. Defaults to false. + */ + useLimitedUseAppCheckTokens?: boolean; +} + +/** + * Schema class representing a value that can conform to any of the provided sub-schemas. This is + * useful when a field can accept multiple distinct types or structures. + * @public + */ +export declare class AnyOfSchema extends Schema { + anyOf: TypedSchema[]; + constructor(schemaParams: SchemaParams & { + anyOf: TypedSchema[]; + }); + /** + * @internal + */ + toJSON(): SchemaRequest; +} + +declare interface ApiSettings { + apiKey: string; + project: string; + appId: string; + automaticDataCollectionEnabled?: boolean; + /** + * @deprecated Use `backend.location` instead. + */ + location: string; + backend: Backend; + getAuthToken?: () => Promise<FirebaseAuthTokenData | null>; + getAppCheckToken?: () => Promise<AppCheckTokenResult>; +} + +/** + * Schema class for "array" types. + * The `items` param should refer to the type of item that can be a member + * of the array. + * @public + */ +export declare class ArraySchema extends Schema { + items: TypedSchema; + constructor(schemaParams: SchemaParams, items: TypedSchema); + /** + * @internal + */ + toJSON(): SchemaRequest; +} + +/** + * A controller for managing an active audio conversation. + * + * @beta + */ +export declare interface AudioConversationController { + /** + * Stops the audio conversation, closes the microphone connection, and + * cleans up resources. Returns a promise that resolves when cleanup is complete. + */ + stop: () => Promise<void>; +} + +/** + * The audio transcription configuration. + */ +export declare interface AudioTranscriptionConfig { +} + +/** + * Abstract base class representing the configuration for an AI service backend. + * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for + * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and + * {@link VertexAIBackend} for the Vertex AI Gemini API. + * + * @public + */ +export declare abstract class Backend { + /** + * Specifies the backend type. + */ + readonly backendType: BackendType; + /** + * Protected constructor for use by subclasses. + * @param type - The backend type. + */ + protected constructor(type: BackendType); +} + +/** + * An enum-like object containing constants that represent the supported backends + * for the Firebase AI SDK. + * This determines which backend service (Vertex AI Gemini API or Gemini Developer API) + * the SDK will communicate with. + * + * These values are assigned to the `backendType` property within the specific backend + * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify + * which service to target. + * + * @public + */ +export declare const BackendType: { + /** + * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud. + * Use this constant when creating a {@link VertexAIBackend} configuration. + */ + readonly VERTEX_AI: "VERTEX_AI"; + /** + * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}). + * Use this constant when creating a {@link GoogleAIBackend} configuration. + */ + readonly GOOGLE_AI: "GOOGLE_AI"; +}; + +/** + * Type alias representing valid backend types. + * It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`. + * + * @public + */ +export declare type BackendType = (typeof BackendType)[keyof typeof BackendType]; + +/** + * Base parameters for a number of methods. + * @public + */ +export declare interface BaseParams { + safetySettings?: SafetySetting[]; + generationConfig?: GenerationConfig; +} + +/** + * Reason that a prompt was blocked. + * @public + */ +export declare const BlockReason: { + /** + * Content was blocked by safety settings. + */ + readonly SAFETY: "SAFETY"; + /** + * Content was blocked, but the reason is uncategorized. + */ + readonly OTHER: "OTHER"; + /** + * Content was blocked because it contained terms from the terminology blocklist. + */ + readonly BLOCKLIST: "BLOCKLIST"; + /** + * Content was blocked due to prohibited content. + */ + readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT"; +}; + +/** + * Reason that a prompt was blocked. + * @public + */ +export declare type BlockReason = (typeof BlockReason)[keyof typeof BlockReason]; + +/** + * Schema class for "boolean" types. + * @public + */ +export declare class BooleanSchema extends Schema { + constructor(schemaParams?: SchemaParams); +} + +/** + * ChatSession class that enables sending chat messages and stores + * history of sent and received messages so far. + * + * @public + */ +export declare class ChatSession { + model: string; + private chromeAdapter?; + params?: StartChatParams | undefined; + requestOptions?: RequestOptions | undefined; + private _apiSettings; + private _history; + private _sendPromise; + constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); + /** + * Gets the chat history so far. Blocked prompts are not added to history. + * Neither blocked candidates nor the prompts that generated them are added + * to history. + */ + getHistory(): Promise<Content[]>; + /** + * Sends a chat message and receives a non-streaming + * {@link GenerateContentResult} + */ + sendMessage(request: string | Array<string | Part>): Promise<GenerateContentResult>; + /** + * Sends a chat message and receives the response as a + * {@link GenerateContentStreamResult} containing an iterable stream + * and a response promise. + */ + sendMessageStream(request: string | Array<string | Part>): Promise<GenerateContentStreamResult>; +} + +/** + * Defines an inference "backend" that uses Chrome's on-device model, + * and encapsulates logic for detecting when on-device inference is + * possible. + * + * These methods should not be called directly by the user. + * + * @beta + */ +export declare interface ChromeAdapter { + /** + * Checks if the on-device model is capable of handling a given + * request. + * @param request - A potential request to be passed to the model. + */ + isAvailable(request: GenerateContentRequest): Promise<boolean>; + /** + * Generates content using on-device inference. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContent} for generating + * content using in-cloud inference. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + */ + generateContent(request: GenerateContentRequest): Promise<Response>; + /** + * Generates a content stream using on-device inference. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContentStream} for generating + * a content stream using in-cloud inference. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + */ + generateContentStream(request: GenerateContentRequest): Promise<Response>; + /** + * @internal + */ + countTokens(request: CountTokensRequest): Promise<Response>; +} + +/** + * A single citation. + * @public + */ +export declare interface Citation { + startIndex?: number; + endIndex?: number; + uri?: string; + license?: string; + /** + * The title of the cited source, if available. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + */ + title?: string; + /** + * The publication date of the cited source, if available. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + */ + publicationDate?: Date_2; +} + +/** + * Citation metadata that may be found on a {@link GenerateContentCandidate}. + * @public + */ +export declare interface CitationMetadata { + citations: Citation[]; +} + +/** + * The results of code execution run by the model. + * + * @beta + */ +export declare interface CodeExecutionResult { + /** + * The result of the code execution. + */ + outcome?: Outcome; + /** + * The output from the code execution, or an error message + * if it failed. + */ + output?: string; +} + +/** + * Represents the code execution result from the model. + * + * @beta + */ +export declare interface CodeExecutionResultPart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + fileData: never; + thought?: never; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: CodeExecutionResult; +} + +/** + * A tool that enables the model to use code execution. + * + * @beta + */ +export declare interface CodeExecutionTool { + /** + * Specifies the Google Search configuration. + * Currently, this is an empty object, but it's reserved for future configuration options. + */ + codeExecution: {}; +} + +/** + * Content type for both prompts and response candidates. + * @public + */ +export declare interface Content { + role: Role; + parts: Part[]; +} + +/** + * Params for calling {@link GenerativeModel.countTokens} + * @public + */ +export declare interface CountTokensRequest { + contents: Content[]; + /** + * Instructions that direct the model to behave a certain way. + */ + systemInstruction?: string | Part | Content; + /** + * {@link Tool} configuration. + */ + tools?: Tool[]; + /** + * Configuration options that control how the model generates a response. + */ + generationConfig?: GenerationConfig; +} + +/** + * Response from calling {@link GenerativeModel.countTokens}. + * @public + */ +export declare interface CountTokensResponse { + /** + * The total number of tokens counted across all instances from the request. + */ + totalTokens: number; + /** + * @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`. + * + * The total number of billable characters counted across all instances + * from the request. + */ + totalBillableCharacters?: number; + /** + * The breakdown, by modality, of how many tokens are consumed by the prompt. + */ + promptTokensDetails?: ModalityTokenCount[]; +} + +/** + * Details object that contains data originating from a bad HTTP response. + * + * @public + */ +export declare interface CustomErrorData { + /** HTTP status code of the error response. */ + status?: number; + /** HTTP status text of the error response. */ + statusText?: string; + /** Response from a {@link GenerateContentRequest} */ + response?: GenerateContentResponse; + /** Optional additional details about the error. */ + errorDetails?: ErrorDetails[]; +} + +/** + * Protobuf google.type.Date + * @public + */ +declare interface Date_2 { + year: number; + month: number; + day: number; +} +export { Date_2 as Date } + +/** + * Response object wrapped with helper methods. + * + * @public + */ +export declare interface EnhancedGenerateContentResponse extends GenerateContentResponse { + /** + * Returns the text string from the response, if available. + * Throws if the prompt or candidate was blocked. + */ + text: () => string; + /** + * Aggregates and returns every {@link InlineDataPart} from the first candidate of + * {@link GenerateContentResponse}. + * + * @throws If the prompt or candidate was blocked. + */ + inlineDataParts: () => InlineDataPart[] | undefined; + /** + * Aggregates and returns every {@link FunctionCall} from the first candidate of + * {@link GenerateContentResponse}. + * + * @throws If the prompt or candidate was blocked. + */ + functionCalls: () => FunctionCall[] | undefined; + /** + * Aggregates and returns every {@link TextPart} with their `thought` property set + * to `true` from the first candidate of {@link GenerateContentResponse}. + * + * @throws If the prompt or candidate was blocked. + * + * @remarks + * Thought summaries provide a brief overview of the model's internal thinking process, + * offering insight into how it arrived at the final answer. This can be useful for + * debugging, understanding the model's reasoning, and verifying its accuracy. + * + * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is + * set to `true`. + */ + thoughtSummary: () => string | undefined; + /** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ + inferenceSource?: InferenceSource; +} + +/** + * Details object that may be included in an error response. + * + * @public + */ +export declare interface ErrorDetails { + '@type'?: string; + /** The reason for the error. */ + reason?: string; + /** The domain where the error occurred. */ + domain?: string; + /** Additional metadata about the error. */ + metadata?: Record<string, unknown>; + /** Any other relevant information about the error. */ + [key: string]: unknown; +} + +/** + * An interface for executable code returned by the model. + * + * @beta + */ +export declare interface ExecutableCode { + /** + * The programming language of the code. + */ + language?: Language; + /** + * The source code to be executed. + */ + code?: string; +} + +/** + * Represents the code that is executed by the model. + * + * @beta + */ +export declare interface ExecutableCodePart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + fileData: never; + thought?: never; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: ExecutableCode; + codeExecutionResult?: never; +} + +/** + * Data pointing to a file uploaded on Google Cloud Storage. + * @public + */ +export declare interface FileData { + mimeType: string; + fileUri: string; +} + +/** + * Content part interface if the part represents {@link FileData} + * @public + */ +export declare interface FileDataPart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + fileData: FileData; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: never; +} + +/** + * Reason that a candidate finished. + * @public + */ +export declare const FinishReason: { + /** + * Natural stop point of the model or provided stop sequence. + */ + readonly STOP: "STOP"; + /** + * The maximum number of tokens as specified in the request was reached. + */ + readonly MAX_TOKENS: "MAX_TOKENS"; + /** + * The candidate content was flagged for safety reasons. + */ + readonly SAFETY: "SAFETY"; + /** + * The candidate content was flagged for recitation reasons. + */ + readonly RECITATION: "RECITATION"; + /** + * Unknown reason. + */ + readonly OTHER: "OTHER"; + /** + * The candidate content contained forbidden terms. + */ + readonly BLOCKLIST: "BLOCKLIST"; + /** + * The candidate content potentially contained prohibited content. + */ + readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT"; + /** + * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII). + */ + readonly SPII: "SPII"; + /** + * The function call generated by the model was invalid. + */ + readonly MALFORMED_FUNCTION_CALL: "MALFORMED_FUNCTION_CALL"; +}; + +/** + * Reason that a candidate finished. + * @public + */ +export declare type FinishReason = (typeof FinishReason)[keyof typeof FinishReason]; + +/** + * A predicted {@link FunctionCall} returned from the model + * that contains a string representing the {@link FunctionDeclaration.name} + * and a structured JSON object containing the parameters and their values. + * @public + */ +export declare interface FunctionCall { + /** + * The id of the function call. This must be sent back in the associated {@link FunctionResponse}. + * + * + * @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be + * `undefined`. + */ + id?: string; + name: string; + args: object; +} + +/** + * @public + */ +export declare interface FunctionCallingConfig { + mode?: FunctionCallingMode; + allowedFunctionNames?: string[]; +} + +/** + * @public + */ +export declare const FunctionCallingMode: { + /** + * Default model behavior; model decides to predict either a function call + * or a natural language response. + */ + readonly AUTO: "AUTO"; + /** + * Model is constrained to always predicting a function call only. + * If `allowed_function_names` is set, the predicted function call will be + * limited to any one of `allowed_function_names`, else the predicted + * function call will be any one of the provided `function_declarations`. + */ + readonly ANY: "ANY"; + /** + * Model will not predict any function call. Model behavior is same as when + * not passing any function declarations. + */ + readonly NONE: "NONE"; +}; + +/** + * @public + */ +export declare type FunctionCallingMode = (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode]; + +/** + * Content part interface if the part represents a {@link FunctionCall}. + * @public + */ +export declare interface FunctionCallPart { + text?: never; + inlineData?: never; + functionCall: FunctionCall; + functionResponse?: never; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: never; +} + +/** + * Structured representation of a function declaration as defined by the + * {@link https://spec.openapis.org/oas/v3.0.3 | OpenAPI 3.0 specification}. + * Included + * in this declaration are the function name and parameters. This + * `FunctionDeclaration` is a representation of a block of code that can be used + * as a Tool by the model and executed by the client. + * @public + */ +export declare interface FunctionDeclaration { + /** + * The name of the function to call. Must start with a letter or an + * underscore. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with + * a max length of 64. + */ + name: string; + /** + * Description and purpose of the function. Model uses it to decide + * how and whether to call the function. + */ + description: string; + /** + * Optional. Describes the parameters to this function in JSON Schema Object + * format. Reflects the Open API 3.03 Parameter Object. Parameter names are + * case-sensitive. For a function with no parameters, this can be left unset. + */ + parameters?: ObjectSchema | ObjectSchemaRequest; +} + +/** + * A `FunctionDeclarationsTool` is a piece of code that enables the system to + * interact with external systems to perform an action, or set of actions, + * outside of knowledge and scope of the model. + * @public + */ +export declare interface FunctionDeclarationsTool { + /** + * Optional. One or more function declarations + * to be passed to the model along with the current user query. Model may + * decide to call a subset of these functions by populating + * {@link FunctionCall} in the response. User should + * provide a {@link FunctionResponse} for each + * function call in the next turn. Based on the function responses, the model will + * generate the final response back to the user. Maximum 64 function + * declarations can be provided. + */ + functionDeclarations?: FunctionDeclaration[]; +} + +/** + * The result output from a {@link FunctionCall} that contains a string + * representing the {@link FunctionDeclaration.name} + * and a structured JSON object containing any output + * from the function is used as context to the model. + * This should contain the result of a {@link FunctionCall} + * made based on model prediction. + * @public + */ +export declare interface FunctionResponse { + /** + * The id of the {@link FunctionCall}. + * + * @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be + * `undefined`. + */ + id?: string; + name: string; + response: object; +} + +/** + * Content part interface if the part represents {@link FunctionResponse}. + * @public + */ +export declare interface FunctionResponsePart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse: FunctionResponse; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: never; +} + +/** + * A candidate returned as part of a {@link GenerateContentResponse}. + * @public + */ +export declare interface GenerateContentCandidate { + index: number; + content: Content; + finishReason?: FinishReason; + finishMessage?: string; + safetyRatings?: SafetyRating[]; + citationMetadata?: CitationMetadata; + groundingMetadata?: GroundingMetadata; + urlContextMetadata?: URLContextMetadata; +} + +/** + * Request sent through {@link GenerativeModel.generateContent} + * @public + */ +export declare interface GenerateContentRequest extends BaseParams { + contents: Content[]; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} + +/** + * Individual response from {@link GenerativeModel.generateContent} and + * {@link GenerativeModel.generateContentStream}. + * `generateContentStream()` will return one in each chunk until + * the stream is done. + * @public + */ +export declare interface GenerateContentResponse { + candidates?: GenerateContentCandidate[]; + promptFeedback?: PromptFeedback; + usageMetadata?: UsageMetadata; +} + +/** + * Result object returned from {@link GenerativeModel.generateContent} call. + * + * @public + */ +export declare interface GenerateContentResult { + response: EnhancedGenerateContentResponse; +} + +/** + * Result object returned from {@link GenerativeModel.generateContentStream} call. + * Iterate over `stream` to get chunks as they come in and/or + * use the `response` promise to get the aggregated response when + * the stream is done. + * + * @public + */ +export declare interface GenerateContentStreamResult { + stream: AsyncGenerator<EnhancedGenerateContentResponse>; + response: Promise<EnhancedGenerateContentResponse>; +} + +/** + * Config options for content-related requests + * @public + */ +export declare interface GenerationConfig { + candidateCount?: number; + stopSequences?: string[]; + maxOutputTokens?: number; + temperature?: number; + topP?: number; + topK?: number; + presencePenalty?: number; + frequencyPenalty?: number; + /** + * Output response MIME type of the generated candidate text. + * Supported MIME types are `text/plain` (default, text output), + * `application/json` (JSON response in the candidates), and + * `text/x.enum`. + */ + responseMimeType?: string; + /** + * Output response schema of the generated candidate text. This + * value can be a class generated with a {@link Schema} static method + * like `Schema.string()` or `Schema.object()` or it can be a plain + * JS object matching the {@link SchemaRequest} interface. + * <br/>Note: This only applies when the specified `responseMimeType` supports a schema; currently + * this is limited to `application/json` and `text/x.enum`. + */ + responseSchema?: TypedSchema | SchemaRequest; + /** + * Generation modalities to be returned in generation responses. + * + * @remarks + * - Multimodal response generation is only supported by some Gemini models and versions; see {@link https://firebase.google.com/docs/vertex-ai/models | model versions}. + * - Only image generation (`ResponseModality.IMAGE`) is supported. + * + * @beta + */ + responseModalities?: ResponseModality[]; + /** + * Configuration for "thinking" behavior of compatible Gemini models. + */ + thinkingConfig?: ThinkingConfig; +} + +/** + * Interface for sending an image. + * @public + */ +export declare interface GenerativeContentBlob { + mimeType: string; + /** + * Image as a base64 string. + */ + data: string; +} + +/** + * Class for generative model APIs. + * @public + */ +export declare class GenerativeModel extends AIModel { + private chromeAdapter?; + generationConfig: GenerationConfig; + safetySettings: SafetySetting[]; + requestOptions?: RequestOptions; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: Content; + constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions, chromeAdapter?: ChromeAdapter | undefined); + /** + * Makes a single non-streaming call to the model + * and returns an object containing a single {@link GenerateContentResponse}. + */ + generateContent(request: GenerateContentRequest | string | Array<string | Part>): Promise<GenerateContentResult>; + /** + * Makes a single streaming call to the model + * and returns an object containing an iterable stream that iterates + * over all chunks in the streaming response as well as + * a promise that returns the final aggregated response. + */ + generateContentStream(request: GenerateContentRequest | string | Array<string | Part>): Promise<GenerateContentStreamResult>; + /** + * Gets a new {@link ChatSession} instance which can be used for + * multi-turn chats. + */ + startChat(startChatParams?: StartChatParams): ChatSession; + /** + * Counts the tokens in the provided request. + */ + countTokens(request: CountTokensRequest | string | Array<string | Part>): Promise<CountTokensResponse>; +} + +/** + * Returns the default {@link AI} instance that is associated with the provided + * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the + * default settings. + * + * @example + * ```javascript + * const ai = getAI(app); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Gemini Developer API (via Google AI). + * const ai = getAI(app, { backend: new GoogleAIBackend() }); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Vertex AI Gemini API. + * const ai = getAI(app, { backend: new VertexAIBackend() }); + * ``` + * + * @param app - The {@link @firebase/app#FirebaseApp} to use. + * @param options - {@link AIOptions} that configure the AI instance. + * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}. + * + * @public + */ +export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI; + +/** + * Returns a {@link GenerativeModel} class with methods for inference + * and other functionality. + * + * @public + */ +export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel; + +/** + * Returns an {@link ImagenModel} class with methods for using Imagen. + * + * Only Imagen 3 models (named `imagen-3.0-*`) are supported. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when making Imagen requests. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @public + */ +export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; + +/** + * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication. + * + * The Live API is only supported in modern browser windows and Node >= 22. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when setting up a {@link LiveSession}. + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @beta + */ +export declare function getLiveGenerativeModel(ai: AI, modelParams: LiveModelParams): LiveGenerativeModel; + +/** + * Configuration class for the Gemini Developer API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Gemini Developer API as the backend. + * + * @public + */ +export declare class GoogleAIBackend extends Backend { + /** + * Creates a configuration object for the Gemini Developer API backend. + */ + constructor(); +} + +/** + * @internal + */ +export declare interface GoogleAICitationMetadata { + citationSources: Citation[]; +} + +/** + * @internal + */ +export declare interface GoogleAICountTokensRequest { + generateContentRequest: { + model: string; + contents: Content[]; + systemInstruction?: string | Part | Content; + tools?: Tool[]; + generationConfig?: GenerationConfig; + }; +} + +/** + * @internal + */ +export declare interface GoogleAIGenerateContentCandidate { + index: number; + content: Content; + finishReason?: FinishReason; + finishMessage?: string; + safetyRatings?: SafetyRating[]; + citationMetadata?: GoogleAICitationMetadata; + groundingMetadata?: GroundingMetadata; + urlContextMetadata?: URLContextMetadata; +} + +/** + * @internal + */ +export declare interface GoogleAIGenerateContentResponse { + candidates?: GoogleAIGenerateContentCandidate[]; + promptFeedback?: PromptFeedback; + usageMetadata?: UsageMetadata; +} + +/** + * Specifies the Google Search configuration. + * + * @remarks Currently, this is an empty object, but it's reserved for future configuration options. + * + * @public + */ +export declare interface GoogleSearch { +} + +/** + * A tool that allows a Gemini model to connect to Google Search to access and incorporate + * up-to-date information from the web into its responses. + * + * Important: If using Grounding with Google Search, you are required to comply with the + * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API} + * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms} + * section within the Service Specific Terms). + * + * @public + */ +export declare interface GoogleSearchTool { + /** + * Specifies the Google Search configuration. + * Currently, this is an empty object, but it's reserved for future configuration options. + * + * When using this feature, you are required to comply with the "Grounding with Google Search" + * usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API} + * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms} + * section within the Service Specific Terms). + */ + googleSearch: GoogleSearch; +} + +/** + * Represents a chunk of retrieved data that supports a claim in the model's response. This is part + * of the grounding information provided when grounding is enabled. + * + * @public + */ +export declare interface GroundingChunk { + /** + * Contains details if the grounding chunk is from a web source. + */ + web?: WebGroundingChunk; +} + +/** + * Metadata returned when grounding is enabled. + * + * Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}). + * + * Important: If using Grounding with Google Search, you are required to comply with the + * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API} + * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms} + * section within the Service Specific Terms). + * + * @public + */ +export declare interface GroundingMetadata { + /** + * Google Search entry point for web searches. This contains an HTML/CSS snippet that must be + * embedded in an app to display a Google Search entry point for follow-up web searches related to + * a model's "Grounded Response". + */ + searchEntryPoint?: SearchEntrypoint; + /** + * A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content + * (for example, from a web page). that the model used to ground its response. + */ + groundingChunks?: GroundingChunk[]; + /** + * A list of {@link GroundingSupport} objects. Each object details how specific segments of the + * model's response are supported by the `groundingChunks`. + */ + groundingSupports?: GroundingSupport[]; + /** + * A list of web search queries that the model performed to gather the grounding information. + * These can be used to allow users to explore the search results themselves. + */ + webSearchQueries?: string[]; + /** + * @deprecated Use {@link GroundingSupport} instead. + */ + retrievalQueries?: string[]; +} + +/** + * Provides information about how a specific segment of the model's response is supported by the + * retrieved grounding chunks. + * + * @public + */ +export declare interface GroundingSupport { + /** + * Specifies the segment of the model's response content that this grounding support pertains to. + */ + segment?: Segment; + /** + * A list of indices that refer to specific {@link GroundingChunk} objects within the + * {@link GroundingMetadata.groundingChunks} array. These referenced chunks + * are the sources that support the claim made in the associated `segment` of the response. + * For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`, + * and `groundingChunks[4]` are the retrieved content supporting this part of the response. + */ + groundingChunkIndices?: number[]; +} + +/** + * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}). + * + * @public + */ +export declare const HarmBlockMethod: { + /** + * The harm block method uses both probability and severity scores. + */ + readonly SEVERITY: "SEVERITY"; + /** + * The harm block method uses the probability score. + */ + readonly PROBABILITY: "PROBABILITY"; +}; + +/** + * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}). + * + * @public + */ +export declare type HarmBlockMethod = (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod]; + +/** + * Threshold above which a prompt or candidate will be blocked. + * @public + */ +export declare const HarmBlockThreshold: { + /** + * Content with `NEGLIGIBLE` will be allowed. + */ + readonly BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE"; + /** + * Content with `NEGLIGIBLE` and `LOW` will be allowed. + */ + readonly BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE"; + /** + * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed. + */ + readonly BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH"; + /** + * All content will be allowed. + */ + readonly BLOCK_NONE: "BLOCK_NONE"; + /** + * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding + * to the {@link (HarmCategory:type)} will not be present in the response. + */ + readonly OFF: "OFF"; +}; + +/** + * Threshold above which a prompt or candidate will be blocked. + * @public + */ +export declare type HarmBlockThreshold = (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold]; + +/** + * Harm categories that would cause prompts or candidates to be blocked. + * @public + */ +export declare const HarmCategory: { + readonly HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH"; + readonly HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT"; + readonly HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT"; + readonly HARM_CATEGORY_DANGEROUS_CONTENT: "HARM_CATEGORY_DANGEROUS_CONTENT"; +}; + +/** + * Harm categories that would cause prompts or candidates to be blocked. + * @public + */ +export declare type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory]; + +/** + * Probability that a prompt or candidate matches a harm category. + * @public + */ +export declare const HarmProbability: { + /** + * Content has a negligible chance of being unsafe. + */ + readonly NEGLIGIBLE: "NEGLIGIBLE"; + /** + * Content has a low chance of being unsafe. + */ + readonly LOW: "LOW"; + /** + * Content has a medium chance of being unsafe. + */ + readonly MEDIUM: "MEDIUM"; + /** + * Content has a high chance of being unsafe. + */ + readonly HIGH: "HIGH"; +}; + +/** + * Probability that a prompt or candidate matches a harm category. + * @public + */ +export declare type HarmProbability = (typeof HarmProbability)[keyof typeof HarmProbability]; + +/** + * Harm severity levels. + * @public + */ +export declare const HarmSeverity: { + /** + * Negligible level of harm severity. + */ + readonly HARM_SEVERITY_NEGLIGIBLE: "HARM_SEVERITY_NEGLIGIBLE"; + /** + * Low level of harm severity. + */ + readonly HARM_SEVERITY_LOW: "HARM_SEVERITY_LOW"; + /** + * Medium level of harm severity. + */ + readonly HARM_SEVERITY_MEDIUM: "HARM_SEVERITY_MEDIUM"; + /** + * High level of harm severity. + */ + readonly HARM_SEVERITY_HIGH: "HARM_SEVERITY_HIGH"; + /** + * Harm severity is not supported. + * + * @remarks + * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback. + */ + readonly HARM_SEVERITY_UNSUPPORTED: "HARM_SEVERITY_UNSUPPORTED"; +}; + +/** + * Harm severity levels. + * @public + */ +export declare type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity]; + +/** + * Configures hybrid inference. + * @beta + */ +export declare interface HybridParams { + /** + * Specifies on-device or in-cloud inference. Defaults to prefer on-device. + */ + mode: InferenceMode; + /** + * Optional. Specifies advanced params for on-device inference. + */ + onDeviceParams?: OnDeviceParams; + /** + * Optional. Specifies advanced params for in-cloud inference. + */ + inCloudParams?: ModelParams; +} + +/** + * Aspect ratios for Imagen images. + * + * To specify an aspect ratio for generated images, set the `aspectRatio` property in your + * {@link ImagenGenerationConfig}. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details and examples of the supported aspect ratios. + * + * @public + */ +export declare const ImagenAspectRatio: { + /** + * Square (1:1) aspect ratio. + */ + readonly SQUARE: "1:1"; + /** + * Landscape (3:4) aspect ratio. + */ + readonly LANDSCAPE_3x4: "3:4"; + /** + * Portrait (4:3) aspect ratio. + */ + readonly PORTRAIT_4x3: "4:3"; + /** + * Landscape (16:9) aspect ratio. + */ + readonly LANDSCAPE_16x9: "16:9"; + /** + * Portrait (9:16) aspect ratio. + */ + readonly PORTRAIT_9x16: "9:16"; +}; + +/** + * Aspect ratios for Imagen images. + * + * To specify an aspect ratio for generated images, set the `aspectRatio` property in your + * {@link ImagenGenerationConfig}. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details and examples of the supported aspect ratios. + * + * @public + */ +export declare type ImagenAspectRatio = (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio]; + +/** + * An image generated by Imagen, stored in a Cloud Storage for Firebase bucket. + * + * This feature is not available yet. + * @public + */ +export declare interface ImagenGCSImage { + /** + * The MIME type of the image; either `"image/png"` or `"image/jpeg"`. + * + * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}. + */ + mimeType: string; + /** + * The URI of the file stored in a Cloud Storage for Firebase bucket. + * + * @example `"gs://bucket-name/path/sample_0.jpg"`. + */ + gcsURI: string; +} + +/** + * Configuration options for generating images with Imagen. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for + * more details. + * + * @public + */ +export declare interface ImagenGenerationConfig { + /** + * A description of what should be omitted from the generated images. + * + * Support for negative prompts depends on the Imagen model. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details. + * + * This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions + * greater than `imagen-3.0-generate-002`. + */ + negativePrompt?: string; + /** + * The number of images to generate. The default value is 1. + * + * The number of sample images that may be generated in each request depends on the model + * (typically up to 4); see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">sampleCount</a> + * documentation for more details. + */ + numberOfImages?: number; + /** + * The aspect ratio of the generated images. The default value is square 1:1. + * Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)} + * for more details. + */ + aspectRatio?: ImagenAspectRatio; + /** + * The image format of the generated images. The default is PNG. + * + * See {@link ImagenImageFormat} for more details. + */ + imageFormat?: ImagenImageFormat; + /** + * Whether to add an invisible watermark to generated images. + * + * If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate + * that they are AI generated. If set to `false`, watermarking will be disabled. + * + * For Imagen 3 models, the default value is `true`; see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">addWatermark</a> + * documentation for more details. + * + * When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true, + * and cannot be turned off. + */ + addWatermark?: boolean; +} + +/** + * The response from a request to generate images with Imagen. + * + * @public + */ +export declare interface ImagenGenerationResponse<T extends ImagenInlineImage | ImagenGCSImage> { + /** + * The images generated by Imagen. + * + * The number of images generated may be fewer than the number requested if one or more were + * filtered out; see `filteredReason`. + */ + images: T[]; + /** + * The reason that images were filtered out. This property will only be defined if one + * or more images were filtered. + * + * Images may be filtered out due to the {@link (ImagenSafetyFilterLevel:type)}, + * {@link (ImagenPersonFilterLevel:type)}, or filtering included in the model. + * The filter levels may be adjusted in your {@link ImagenSafetySettings}. + * + * See the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen | Responsible AI and usage guidelines for Imagen} + * for more details. + */ + filteredReason?: string; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Defines the image format for images generated by Imagen. + * + * Use this class to specify the desired format (JPEG or PNG) and compression quality + * for images generated by Imagen. This is typically included as part of + * {@link ImagenModelParams}. + * + * @example + * ```javascript + * const imagenModelParams = { + * // ... other ImagenModelParams + * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75. + * } + * ``` + * + * @public + */ +export declare class ImagenImageFormat { + /** + * The MIME type. + */ + mimeType: string; + /** + * The level of compression (a number between 0 and 100). + */ + compressionQuality?: number; + private constructor(); + /** + * Creates an {@link ImagenImageFormat} for a JPEG image. + * + * @param compressionQuality - The level of compression (a number between 0 and 100). + * @returns An {@link ImagenImageFormat} object for a JPEG image. + * + * @public + */ + static jpeg(compressionQuality?: number): ImagenImageFormat; + /** + * Creates an {@link ImagenImageFormat} for a PNG image. + * + * @returns An {@link ImagenImageFormat} object for a PNG image. + * + * @public + */ + static png(): ImagenImageFormat; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * An image generated by Imagen, represented as inline data. + * + * @public + */ +export declare interface ImagenInlineImage { + /** + * The MIME type of the image; either `"image/png"` or `"image/jpeg"`. + * + * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}. + */ + mimeType: string; + /** + * The base64-encoded image data. + */ + bytesBase64Encoded: string; +} + +/** + * Class for Imagen model APIs. + * + * This class provides methods for generating images using the Imagen model. + * + * @example + * ```javascript + * const imagen = new ImagenModel( + * ai, + * { + * model: 'imagen-3.0-generate-002' + * } + * ); + * + * const response = await imagen.generateImages('A photo of a cat'); + * if (response.images.length > 0) { + * console.log(response.images[0].bytesBase64Encoded); + * } + * ``` + * + * @public + */ +export declare class ImagenModel extends AIModel { + requestOptions?: RequestOptions | undefined; + /** + * The Imagen generation configuration. + */ + generationConfig?: ImagenGenerationConfig; + /** + * Safety settings for filtering inappropriate content. + */ + safetySettings?: ImagenSafetySettings; + /** + * Constructs a new instance of the {@link ImagenModel} class. + * + * @param ai - an {@link AI} instance. + * @param modelParams - Parameters to use when making requests to Imagen. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + */ + constructor(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined); + /** + * Generates images using the Imagen model and returns them as + * base64-encoded strings. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the generated images. + * + * @throws If the request to generate images fails. This happens if the + * prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + * + * @public + */ + generateImages(prompt: string): Promise<ImagenGenerationResponse<ImagenInlineImage>>; + /** + * Generates images to Cloud Storage for Firebase using the Imagen model. + * + * @internal This method is temporarily internal. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket. + * This should be a directory. For example, `gs://my-bucket/my-directory/`. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the URLs of the generated images. + * + * @throws If the request fails to generate images fails. This happens if + * the prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + */ + generateImagesGCS(prompt: string, gcsURI: string): Promise<ImagenGenerationResponse<ImagenGCSImage>>; +} + +/** + * Parameters for configuring an {@link ImagenModel}. + * + * @public + */ +export declare interface ImagenModelParams { + /** + * The Imagen model to use for generating images. + * For example: `imagen-3.0-generate-002`. + * + * Only Imagen 3 models (named `imagen-3.0-*`) are supported. + * + * See {@link https://firebase.google.com/docs/vertex-ai/models | model versions} + * for a full list of supported Imagen 3 models. + */ + model: string; + /** + * Configuration options for generating images with Imagen. + */ + generationConfig?: ImagenGenerationConfig; + /** + * Safety settings for filtering potentially inappropriate content. + */ + safetySettings?: ImagenSafetySettings; +} + +/** + * A filter level controlling whether generation of images containing people or faces is allowed. + * + * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a> + * documentation for more details. + * + * @public + */ +export declare const ImagenPersonFilterLevel: { + /** + * Disallow generation of images containing people or faces; images of people are filtered out. + */ + readonly BLOCK_ALL: "dont_allow"; + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + readonly ALLOW_ADULT: "allow_adult"; + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + readonly ALLOW_ALL: "allow_all"; +}; + +/** + * A filter level controlling whether generation of images containing people or faces is allowed. + * + * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a> + * documentation for more details. + * + * @public + */ +export declare type ImagenPersonFilterLevel = (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel]; + +/** + * A filter level controlling how aggressively to filter sensitive content. + * + * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI + * are assessed against a list of safety filters, which include 'harmful categories' (for example, + * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to + * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines} + * for more details. + * + * @public + */ +export declare const ImagenSafetyFilterLevel: { + /** + * The most aggressive filtering level; most strict blocking. + */ + readonly BLOCK_LOW_AND_ABOVE: "block_low_and_above"; + /** + * Blocks some sensitive prompts and responses. + */ + readonly BLOCK_MEDIUM_AND_ABOVE: "block_medium_and_above"; + /** + * Blocks few sensitive prompts and responses. + */ + readonly BLOCK_ONLY_HIGH: "block_only_high"; + /** + * The least aggressive filtering level; blocks very few sensitive prompts and responses. + * + * Access to this feature is restricted and may require your case to be reviewed and approved by + * Cloud support. + */ + readonly BLOCK_NONE: "block_none"; +}; + +/** + * A filter level controlling how aggressively to filter sensitive content. + * + * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI + * are assessed against a list of safety filters, which include 'harmful categories' (for example, + * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to + * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines} + * for more details. + * + * @public + */ +export declare type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel]; + +/** + * Settings for controlling the aggressiveness of filtering out sensitive content. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details. + * + * @public + */ +export declare interface ImagenSafetySettings { + /** + * A filter level controlling how aggressive to filter out sensitive content from generated + * images. + */ + safetyFilterLevel?: ImagenSafetyFilterLevel; + /** + * A filter level controlling whether generation of images containing people or faces is allowed. + */ + personFilterLevel?: ImagenPersonFilterLevel; +} + +/** + * Determines whether inference happens on-device or in-cloud. + * + * @remarks + * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an + * on-device model. If on-device inference is not available, the SDK + * will fall back to using a cloud-hosted model. + * <br/> + * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an + * on-device model. The SDK will not fall back to a cloud-hosted model. + * If on-device inference is not available, inference methods will throw. + * <br/> + * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a + * cloud-hosted model. The SDK will not fall back to an on-device model. + * <br/> + * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a + * cloud-hosted model. If not available, the SDK will fall back to an + * on-device model. + * + * @beta + */ +export declare const InferenceMode: { + readonly PREFER_ON_DEVICE: "prefer_on_device"; + readonly ONLY_ON_DEVICE: "only_on_device"; + readonly ONLY_IN_CLOUD: "only_in_cloud"; + readonly PREFER_IN_CLOUD: "prefer_in_cloud"; +}; + +/** + * Determines whether inference happens on-device or in-cloud. + * + * @beta + */ +export declare type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode]; + +/** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ +export declare const InferenceSource: { + readonly ON_DEVICE: "on_device"; + readonly IN_CLOUD: "in_cloud"; +}; + +/** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ +export declare type InferenceSource = (typeof InferenceSource)[keyof typeof InferenceSource]; + +/** + * Content part interface if the part represents an image. + * @public + */ +export declare interface InlineDataPart { + text?: never; + inlineData: GenerativeContentBlob; + functionCall?: never; + functionResponse?: never; + /** + * Applicable if `inlineData` is a video. + */ + videoMetadata?: VideoMetadata; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: never; +} + +/** + * Schema class for "integer" types. + * @public + */ +export declare class IntegerSchema extends Schema { + constructor(schemaParams?: SchemaParams); +} + +/** + * The programming language of the code. + * + * @beta + */ +export declare const Language: { + UNSPECIFIED: string; + PYTHON: string; +}; + +/** + * The programming language of the code. + * + * @beta + */ +export declare type Language = (typeof Language)[keyof typeof Language]; + +/** + * Configures the creation of an on-device language model session. + * @beta + */ +export declare interface LanguageModelCreateCoreOptions { + topK?: number; + temperature?: number; + expectedInputs?: LanguageModelExpected[]; +} + +/** + * Configures the creation of an on-device language model session. + * @beta + */ +export declare interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions { + signal?: AbortSignal; + initialPrompts?: LanguageModelMessage[]; +} + +/** + * Options for the expected inputs for an on-device language model. + * @beta + */ export declare interface LanguageModelExpected { + type: LanguageModelMessageType; + languages?: string[]; +} + +/** + * An on-device language model message. + * @beta + */ +export declare interface LanguageModelMessage { + role: LanguageModelMessageRole; + content: LanguageModelMessageContent[]; +} + +/** + * An on-device language model content object. + * @beta + */ +export declare interface LanguageModelMessageContent { + type: LanguageModelMessageType; + value: LanguageModelMessageContentValue; +} + +/** + * Content formats that can be provided as on-device message content. + * @beta + */ +export declare type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string; + +/** + * Allowable roles for on-device language model usage. + * @beta + */ +export declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant'; + +/** + * Allowable types for on-device language model messages. + * @beta + */ +export declare type LanguageModelMessageType = 'text' | 'image' | 'audio'; + +/** + * Options for an on-device language model prompt. + * @beta + */ +export declare interface LanguageModelPromptOptions { + responseConstraint?: object; +} + +/** + * Configuration parameters used by {@link LiveGenerativeModel} to control live content generation. + * + * @beta + */ +export declare interface LiveGenerationConfig { + /** + * Configuration for speech synthesis. + */ + speechConfig?: SpeechConfig; + /** + * Specifies the maximum number of tokens that can be generated in the response. The number of + * tokens per word varies depending on the language outputted. Is unbounded by default. + */ + maxOutputTokens?: number; + /** + * Controls the degree of randomness in token selection. A `temperature` value of 0 means that the highest + * probability tokens are always selected. In this case, responses for a given prompt are mostly + * deterministic, but a small amount of variation is still possible. + */ + temperature?: number; + /** + * Changes how the model selects tokens for output. Tokens are + * selected from the most to least probable until the sum of their probabilities equals the `topP` + * value. For example, if tokens A, B, and C have probabilities of 0.3, 0.2, and 0.1 respectively + * and the `topP` value is 0.5, then the model will select either A or B as the next token by using + * the `temperature` and exclude C as a candidate. Defaults to 0.95 if unset. + */ + topP?: number; + /** + * Changes how the model selects token for output. A `topK` value of 1 means the select token is + * the most probable among all tokens in the model's vocabulary, while a `topK` value 3 means that + * the next token is selected from among the 3 most probably using probabilities sampled. Tokens + * are then further filtered with the highest selected `temperature` sampling. Defaults to 40 + * if unspecified. + */ + topK?: number; + /** + * Positive penalties. + */ + presencePenalty?: number; + /** + * Frequency penalties. + */ + frequencyPenalty?: number; + /** + * The modalities of the response. + */ + responseModalities?: ResponseModality[]; + /** + * Enables transcription of audio input. + * + * When enabled, the model will respond with transcriptions of your audio input in the `inputTranscriptions` property + * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across + * messages, so you may only receive small amounts of text per message. For example, if you ask the model + * "How are you today?", the model may transcribe that input across three messages, broken up as "How a", "re yo", "u today?". + */ + inputAudioTranscription?: AudioTranscriptionConfig; + /** + * Enables transcription of audio input. + * + * When enabled, the model will respond with transcriptions of its audio output in the `outputTranscription` property + * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across + * messages, so you may only receive small amounts of text per message. For example, if the model says + * "How are you today?", the model may transcribe that output across three messages, broken up as "How a", "re yo", "u today?". + */ + outputAudioTranscription?: AudioTranscriptionConfig; +} + +/** + * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal + * interactions with Gemini. + * + * This class should only be instantiated with {@link getLiveGenerativeModel}. + * + * @beta + */ +export declare class LiveGenerativeModel extends AIModel { + /** + * @internal + */ + private _webSocketHandler; + generationConfig: LiveGenerationConfig; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: Content; + /** + * @internal + */ + constructor(ai: AI, modelParams: LiveModelParams, + /** + * @internal + */ + _webSocketHandler: WebSocketHandler); + /** + * Starts a {@link LiveSession}. + * + * @returns A {@link LiveSession}. + * @throws If the connection failed to be established with the server. + * + * @beta + */ + connect(): Promise<LiveSession>; +} + +/** + * Params passed to {@link getLiveGenerativeModel}. + * @beta + */ +export declare interface LiveModelParams { + model: string; + generationConfig?: LiveGenerationConfig; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} + +/** + * The types of responses that can be returned by {@link LiveSession.receive}. + * + * @beta + */ +export declare const LiveResponseType: { + SERVER_CONTENT: string; + TOOL_CALL: string; + TOOL_CALL_CANCELLATION: string; +}; + +/** + * The types of responses that can be returned by {@link LiveSession.receive}. + * This is a property on all messages that can be used for type narrowing. This property is not + * returned by the server, it is assigned to a server message object once it's parsed. + * + * @beta + */ +export declare type LiveResponseType = (typeof LiveResponseType)[keyof typeof LiveResponseType]; + +/** + * An incremental content update from the model. + * + * @beta + */ +export declare interface LiveServerContent { + type: 'serverContent'; + /** + * The content that the model has generated as part of the current conversation with the user. + */ + modelTurn?: Content; + /** + * Indicates whether the turn is complete. This is `undefined` if the turn is not complete. + */ + turnComplete?: boolean; + /** + * Indicates whether the model was interrupted by the client. An interruption occurs when + * the client sends a message before the model finishes it's turn. This is `undefined` if the + * model was not interrupted. + */ + interrupted?: boolean; + /** + * Transcription of the audio that was input to the model. + */ + inputTranscription?: Transcription; + /** + * Transcription of the audio output from the model. + */ + outputTranscription?: Transcription; +} + +/** + * A request from the model for the client to execute one or more functions. + * + * @beta + */ +export declare interface LiveServerToolCall { + type: 'toolCall'; + /** + * An array of function calls to run. + */ + functionCalls: FunctionCall[]; +} + +/** + * Notification to cancel a previous function call triggered by {@link LiveServerToolCall}. + * + * @beta + */ +export declare interface LiveServerToolCallCancellation { + type: 'toolCallCancellation'; + /** + * IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}. + */ + functionIds: string[]; +} + +/** + * Represents an active, real-time, bidirectional conversation with the model. + * + * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}. + * + * @beta + */ +export declare class LiveSession { + private webSocketHandler; + private serverMessages; + /** + * Indicates whether this Live session is closed. + * + * @beta + */ + isClosed: boolean; + /** + * Indicates whether this Live session is being controlled by an `AudioConversationController`. + * + * @beta + */ + inConversation: boolean; + /** + * @internal + */ + constructor(webSocketHandler: WebSocketHandler, serverMessages: AsyncGenerator<unknown>); + /** + * Sends content to the server. + * + * @param request - The message to send to the model. + * @param turnComplete - Indicates if the turn is complete. Defaults to false. + * @throws If this session has been closed. + * + * @beta + */ + send(request: string | Array<string | Part>, turnComplete?: boolean): Promise<void>; + /** + * Sends text to the server in realtime. + * + * @example + * ```javascript + * liveSession.sendTextRealtime("Hello, how are you?"); + * ``` + * + * @param text - The text data to send. + * @throws If this session has been closed. + * + * @beta + */ + sendTextRealtime(text: string): Promise<void>; + /** + * Sends audio data to the server in realtime. + * + * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz + * little-endian. + * + * @example + * ```javascript + * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian. + * const blob = { mimeType: "audio/pcm", data: pcmData }; + * liveSession.sendAudioRealtime(blob); + * ``` + * + * @param blob - The base64-encoded PCM data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + sendAudioRealtime(blob: GenerativeContentBlob): Promise<void>; + /** + * Sends video data to the server in realtime. + * + * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It + * is recommended to set `mimeType` to `image/jpeg`. + * + * @example + * ```javascript + * // const videoFrame = ... base64-encoded JPEG data + * const blob = { mimeType: "image/jpeg", data: videoFrame }; + * liveSession.sendVideoRealtime(blob); + * ``` + * @param blob - The base64-encoded video data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + sendVideoRealtime(blob: GenerativeContentBlob): Promise<void>; + /** + * Sends function responses to the server. + * + * @param functionResponses - The function responses to send. + * @throws If this session has been closed. + * + * @beta + */ + sendFunctionResponses(functionResponses: FunctionResponse[]): Promise<void>; + /** + * Yields messages received from the server. + * This can only be used by one consumer at a time. + * + * @returns An `AsyncGenerator` that yields server messages as they arrive. + * @throws If the session is already closed, or if we receive a response that we don't support. + * + * @beta + */ + receive(): AsyncGenerator<LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation>; + /** + * Closes this session. + * All methods on this session will throw an error once this resolves. + * + * @beta + */ + close(): Promise<void>; + /** + * Sends realtime input to the server. + * + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * @param mediaChunks - The media chunks to send. + * @throws If this session has been closed. + * + * @beta + */ + sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void>; + /** + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * Sends a stream of {@link GenerativeContentBlob}. + * + * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send. + * @throws If this session has been closed. + * + * @beta + */ + sendMediaStream(mediaChunkStream: ReadableStream<GenerativeContentBlob>): Promise<void>; +} + +/** + * Content part modality. + * @public + */ +export declare const Modality: { + /** + * Unspecified modality. + */ + readonly MODALITY_UNSPECIFIED: "MODALITY_UNSPECIFIED"; + /** + * Plain text. + */ + readonly TEXT: "TEXT"; + /** + * Image. + */ + readonly IMAGE: "IMAGE"; + /** + * Video. + */ + readonly VIDEO: "VIDEO"; + /** + * Audio. + */ + readonly AUDIO: "AUDIO"; + /** + * Document (for example, PDF). + */ + readonly DOCUMENT: "DOCUMENT"; +}; + +/** + * Content part modality. + * @public + */ +export declare type Modality = (typeof Modality)[keyof typeof Modality]; + +/** + * Represents token counting info for a single modality. + * + * @public + */ +export declare interface ModalityTokenCount { + /** The modality associated with this token count. */ + modality: Modality; + /** The number of tokens counted. */ + tokenCount: number; +} + +/** + * Params passed to {@link getGenerativeModel}. + * @public + */ +export declare interface ModelParams extends BaseParams { + model: string; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} + +/** + * Schema class for "number" types. + * @public + */ +export declare class NumberSchema extends Schema { + constructor(schemaParams?: SchemaParams); +} + +/** + * Schema class for "object" types. + * The `properties` param must be a map of `Schema` objects. + * @public + */ +export declare class ObjectSchema extends Schema { + properties: { + [k: string]: TypedSchema; + }; + optionalProperties: string[]; + constructor(schemaParams: SchemaParams, properties: { + [k: string]: TypedSchema; + }, optionalProperties?: string[]); + /** + * @internal + */ + toJSON(): SchemaRequest; +} + +/** + * Interface for JSON parameters in a schema of {@link (SchemaType:type)} + * "object" when not using the `Schema.object()` helper. + * @public + */ +export declare interface ObjectSchemaRequest extends SchemaRequest { + type: 'object'; + /** + * This is not a property accepted in the final request to the backend, but is + * a client-side convenience property that is only usable by constructing + * a schema through the `Schema.object()` helper method. Populating this + * property will cause response errors if the object is not wrapped with + * `Schema.object()`. + */ + optionalProperties?: never; +} + +/** + * Encapsulates configuration for on-device inference. + * + * @beta + */ +export declare interface OnDeviceParams { + createOptions?: LanguageModelCreateOptions; + promptOptions?: LanguageModelPromptOptions; +} + +/** + * Represents the result of the code execution. + * + * @beta + */ +export declare const Outcome: { + UNSPECIFIED: string; + OK: string; + FAILED: string; + DEADLINE_EXCEEDED: string; +}; + +/** + * Represents the result of the code execution. + * + * @beta + */ +export declare type Outcome = (typeof Outcome)[keyof typeof Outcome]; + +/** + * Content part - includes text, image/video, or function call/response + * part types. + * @public + */ +export declare type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart | ExecutableCodePart | CodeExecutionResultPart; + +/** + * Possible roles. + * @public + */ +export declare const POSSIBLE_ROLES: readonly ["user", "model", "function", "system"]; + +/** + * Configuration for a pre-built voice. + * + * @beta + */ +export declare interface PrebuiltVoiceConfig { + /** + * The voice name to use for speech synthesis. + * + * For a full list of names and demos of what each voice sounds like, see {@link https://cloud.google.com/text-to-speech/docs/chirp3-hd | Chirp 3: HD Voices}. + */ + voiceName?: string; +} + +/** + * If the prompt was blocked, this will be populated with `blockReason` and + * the relevant `safetyRatings`. + * @public + */ +export declare interface PromptFeedback { + blockReason?: BlockReason; + safetyRatings: SafetyRating[]; + /** + * A human-readable description of the `blockReason`. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + */ + blockReasonMessage?: string; +} + +/** + * Params passed to {@link getGenerativeModel}. + * @public + */ +export declare interface RequestOptions { + /** + * Request timeout in milliseconds. Defaults to 180 seconds (180000ms). + */ + timeout?: number; + /** + * Base url for endpoint. Defaults to + * https://firebasevertexai.googleapis.com, which is the + * {@link https://console.cloud.google.com/apis/library/firebasevertexai.googleapis.com?project=_ | Firebase AI Logic API} + * (used regardless of your chosen Gemini API provider). + */ + baseUrl?: string; +} + +/** + * Generation modalities to be returned in generation responses. + * + * @beta + */ +export declare const ResponseModality: { + /** + * Text. + * @beta + */ + readonly TEXT: "TEXT"; + /** + * Image. + * @beta + */ + readonly IMAGE: "IMAGE"; + /** + * Audio. + * @beta + */ + readonly AUDIO: "AUDIO"; +}; + +/** + * Generation modalities to be returned in generation responses. + * + * @beta + */ +export declare type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseModality]; + +/** + * @public + */ +export declare interface RetrievedContextAttribution { + uri: string; + title: string; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Role is the producer of the content. + * @public + */ +export declare type Role = (typeof POSSIBLE_ROLES)[number]; + +/** + * A safety rating associated with a {@link GenerateContentCandidate} + * @public + */ +export declare interface SafetyRating { + category: HarmCategory; + probability: HarmProbability; + /** + * The harm severity level. + * + * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`. + */ + severity: HarmSeverity; + /** + * The probability score of the harm category. + * + * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0. + */ + probabilityScore: number; + /** + * The severity score of the harm category. + * + * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0. + */ + severityScore: number; + blocked: boolean; +} + +/** + * Safety setting that can be sent as part of request parameters. + * @public + */ +export declare interface SafetySetting { + category: HarmCategory; + threshold: HarmBlockThreshold; + /** + * The harm block method. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), an {@link AIError} will be + * thrown if this property is defined. + */ + method?: HarmBlockMethod; +} + +/** + * Parent class encompassing all Schema types, with static methods that + * allow building specific Schema types. This class can be converted with + * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints. + * (This string conversion is automatically done when calling SDK methods.) + * @public + */ +export declare abstract class Schema implements SchemaInterface { + /** + * Optional. The type of the property. + * This can only be undefined when using `anyOf` schemas, which do not have an + * explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification}. + */ + type?: SchemaType; + /** Optional. The format of the property. + * Supported formats:<br/> + * <ul> + * <li>for NUMBER type: "float", "double"</li> + * <li>for INTEGER type: "int32", "int64"</li> + * <li>for STRING type: "email", "byte", etc</li> + * </ul> + */ + format?: string; + /** Optional. The description of the property. */ + description?: string; + /** Optional. The items of the property. */ + items?: SchemaInterface; + /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + minItems?: number; + /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + maxItems?: number; + /** Optional. Whether the property is nullable. Defaults to false. */ + nullable: boolean; + /** Optional. The example of the property. */ + example?: unknown; + /** + * Allows user to add other schema properties that have not yet + * been officially added to the SDK. + */ + [key: string]: unknown; + constructor(schemaParams: SchemaInterface); + /** + * Defines how this Schema should be serialized as JSON. + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior + * @internal + */ + toJSON(): SchemaRequest; + static array(arrayParams: SchemaParams & { + items: Schema; + }): ArraySchema; + static object(objectParams: SchemaParams & { + properties: { + [k: string]: Schema; + }; + optionalProperties?: string[]; + }): ObjectSchema; + static string(stringParams?: SchemaParams): StringSchema; + static enumString(stringParams: SchemaParams & { + enum: string[]; + }): StringSchema; + static integer(integerParams?: SchemaParams): IntegerSchema; + static number(numberParams?: SchemaParams): NumberSchema; + static boolean(booleanParams?: SchemaParams): BooleanSchema; + static anyOf(anyOfParams: SchemaParams & { + anyOf: TypedSchema[]; + }): AnyOfSchema; +} + +/** + * Interface for {@link Schema} class. + * @public + */ +export declare interface SchemaInterface extends SchemaShared<SchemaInterface> { + /** + * The type of the property. this can only be undefined when using `anyof` schemas, + * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}. + */ + type?: SchemaType; +} + +/** + * Params passed to {@link Schema} static methods to create specific + * {@link Schema} classes. + * @public + */ +export declare interface SchemaParams extends SchemaShared<SchemaInterface> { +} + +/** + * Final format for {@link Schema} params passed to backend requests. + * @public + */ +export declare interface SchemaRequest extends SchemaShared<SchemaRequest> { + /** + * The type of the property. this can only be undefined when using `anyOf` schemas, + * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }. + */ + type?: SchemaType; + /** Optional. Array of required property. */ + required?: string[]; +} + +/** + * Basic {@link Schema} properties shared across several Schema-related + * types. + * @public + */ +export declare interface SchemaShared<T> { + /** + * An array of {@link Schema}. The generated data must be valid against any of the schemas + * listed in this array. This allows specifying multiple possible structures or types for a + * single field. + */ + anyOf?: T[]; + /** Optional. The format of the property. + * When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or + * `'date-time'`, otherwise requests will fail. + */ + format?: string; + /** Optional. The description of the property. */ + description?: string; + /** + * The title of the property. This helps document the schema's purpose but does not typically + * constrain the generated value. It can subtly guide the model by clarifying the intent of a + * field. + */ + title?: string; + /** Optional. The items of the property. */ + items?: T; + /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + minItems?: number; + /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + maxItems?: number; + /** Optional. Map of `Schema` objects. */ + properties?: { + [k: string]: T; + }; + /** A hint suggesting the order in which the keys should appear in the generated JSON string. */ + propertyOrdering?: string[]; + /** Optional. The enum of the property. */ + enum?: string[]; + /** Optional. The example of the property. */ + example?: unknown; + /** Optional. Whether the property is nullable. */ + nullable?: boolean; + /** The minimum value of a numeric type. */ + minimum?: number; + /** The maximum value of a numeric type. */ + maximum?: number; + [key: string]: unknown; +} + +/** + * Contains the list of OpenAPI data types + * as defined by the + * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification} + * @public + */ +export declare const SchemaType: { + /** String type. */ + readonly STRING: "string"; + /** Number type. */ + readonly NUMBER: "number"; + /** Integer type. */ + readonly INTEGER: "integer"; + /** Boolean type. */ + readonly BOOLEAN: "boolean"; + /** Array type. */ + readonly ARRAY: "array"; + /** Object type. */ + readonly OBJECT: "object"; +}; + +/** + * Contains the list of OpenAPI data types + * as defined by the + * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification} + * @public + */ +export declare type SchemaType = (typeof SchemaType)[keyof typeof SchemaType]; + +/** + * Google search entry point. + * + * @public + */ +export declare interface SearchEntrypoint { + /** + * HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid + * undesired interaction with the rest of the page's CSS. + * + * To ensure proper rendering and prevent CSS conflicts, it is recommended + * to encapsulate this `renderedContent` within a shadow DOM when embedding it + * into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}. + * + * @example + * ```javascript + * const container = document.createElement('div'); + * document.body.appendChild(container); + * container.attachShadow({ mode: 'open' }).innerHTML = renderedContent; + * ``` + */ + renderedContent?: string; +} + +/** + * Represents a specific segment within a {@link Content} object, often used to + * pinpoint the exact location of text or data that grounding information refers to. + * + * @public + */ +export declare interface Segment { + /** + * The zero-based index of the {@link Part} object within the `parts` array + * of its parent {@link Content} object. This identifies which part of the + * content the segment belongs to. + */ + partIndex: number; + /** + * The zero-based start index of the segment within the specified `Part`, + * measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the + * beginning of the part's content (e.g., `Part.text`). + */ + startIndex: number; + /** + * The zero-based end index of the segment within the specified `Part`, + * measured in UTF-8 bytes. This offset is exclusive, meaning the character + * at this index is not included in the segment. + */ + endIndex: number; + /** + * The text corresponding to the segment from the response. + */ + text: string; +} + +/** + * Configures speech synthesis. + * + * @beta + */ +export declare interface SpeechConfig { + /** + * Configures the voice to be used in speech synthesis. + */ + voiceConfig?: VoiceConfig; +} + +/** + * Starts a real-time, bidirectional audio conversation with the model. This helper function manages + * the complexities of microphone access, audio recording, playback, and interruptions. + * + * @remarks Important: This function must be called in response to a user gesture + * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}. + * + * @example + * ```javascript + * const liveSession = await model.connect(); + * let conversationController; + * + * // This function must be called from within a click handler. + * async function startConversation() { + * try { + * conversationController = await startAudioConversation(liveSession); + * } catch (e) { + * // Handle AI-specific errors + * if (e instanceof AIError) { + * console.error("AI Error:", e.message); + * } + * // Handle microphone permission and hardware errors + * else if (e instanceof DOMException) { + * console.error("Microphone Error:", e.message); + * } + * // Handle other unexpected errors + * else { + * console.error("An unexpected error occurred:", e); + * } + * } + * } + * + * // Later, to stop the conversation: + * // if (conversationController) { + * // await conversationController.stop(); + * // } + * ``` + * + * @param liveSession - An active {@link LiveSession} instance. + * @param options - Configuration options for the audio conversation. + * @returns A `Promise` that resolves with an {@link AudioConversationController}. + * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`). + * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions. + * + * @beta + */ +export declare function startAudioConversation(liveSession: LiveSession, options?: StartAudioConversationOptions): Promise<AudioConversationController>; + +/** + * Options for {@link startAudioConversation}. + * + * @beta + */ +export declare interface StartAudioConversationOptions { + /** + * An async handler that is called when the model requests a function to be executed. + * The handler should perform the function call and return the result as a `Part`, + * which will then be sent back to the model. + */ + functionCallingHandler?: (functionCalls: FunctionCall[]) => Promise<FunctionResponse>; +} + +/** + * Params for {@link GenerativeModel.startChat}. + * @public + */ +export declare interface StartChatParams extends BaseParams { + history?: Content[]; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} + +/** + * Schema class for "string" types. Can be used with or without + * enum values. + * @public + */ +export declare class StringSchema extends Schema { + enum?: string[]; + constructor(schemaParams?: SchemaParams, enumValues?: string[]); + /** + * @internal + */ + toJSON(): SchemaRequest; +} + +/** + * Content part interface if the part represents a text string. + * @public + */ +export declare interface TextPart { + text: string; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: string; + executableCode?: never; + codeExecutionResult?: never; +} + +/** + * Configuration for "thinking" behavior of compatible Gemini models. + * + * Certain models utilize a thinking process before generating a response. This allows them to + * reason through complex problems and plan a more coherent and accurate answer. + * + * @public + */ +export declare interface ThinkingConfig { + /** + * The thinking budget, in tokens. + * + * This parameter sets an upper limit on the number of tokens the model can use for its internal + * "thinking" process. A higher budget may result in higher quality responses for complex tasks + * but can also increase latency and cost. + * + * If you don't specify a budget, the model will determine the appropriate amount + * of thinking based on the complexity of the prompt. + * + * An error will be thrown if you set a thinking budget for a model that does not support this + * feature or if the specified budget is not within the model's supported range. + */ + thinkingBudget?: number; + /** + * Whether to include "thought summaries" in the model's response. + * + * @remarks + * Thought summaries provide a brief overview of the model's internal thinking process, + * offering insight into how it arrived at the final answer. This can be useful for + * debugging, understanding the model's reasoning, and verifying its accuracy. + */ + includeThoughts?: boolean; +} + +/** + * Defines a tool that model can call to access external knowledge. + * @public + */ +export declare type Tool = FunctionDeclarationsTool | GoogleSearchTool | CodeExecutionTool | URLContextTool; + +/** + * Tool config. This config is shared for all tools provided in the request. + * @public + */ +export declare interface ToolConfig { + functionCallingConfig?: FunctionCallingConfig; +} + +/** + * Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription + * is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on + * the {@link LiveGenerationConfig}. + * + * @beta + */ +export declare interface Transcription { + /** + * The text transcription of the audio. + */ + text?: string; +} + +/** + * A type that includes all specific Schema types. + * @public + */ +export declare type TypedSchema = IntegerSchema | NumberSchema | StringSchema | BooleanSchema | ObjectSchema | ArraySchema | AnyOfSchema; + +/** + * Specifies the URL Context configuration. + * + * @beta + */ +export declare interface URLContext { +} + +/** + * Metadata related to {@link URLContextTool}. + * + * @beta + */ +export declare interface URLContextMetadata { + /** + * List of URL metadata used to provide context to the Gemini model. + */ + urlMetadata: URLMetadata[]; +} + +/** + * A tool that allows you to provide additional context to the models in the form of public web + * URLs. By including URLs in your request, the Gemini model will access the content from those + * pages to inform and enhance its response. + * + * @beta + */ +export declare interface URLContextTool { + /** + * Specifies the URL Context configuration. + */ + urlContext: URLContext; +} + +/** + * Metadata for a single URL retrieved by the {@link URLContextTool} tool. + * + * @beta + */ +export declare interface URLMetadata { + /** + * The retrieved URL. + */ + retrievedUrl?: string; + /** + * The status of the URL retrieval. + */ + urlRetrievalStatus?: URLRetrievalStatus; +} + +/** + * The status of a URL retrieval. + * + * @remarks + * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status. + * <br/> + * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful. + * <br/> + * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed. + * <br/> + * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall. + * <br/> + * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe. + * <br/> + * + * @beta + */ +export declare const URLRetrievalStatus: { + /** + * Unspecified retrieval status. + */ + URL_RETRIEVAL_STATUS_UNSPECIFIED: string; + /** + * The URL retrieval was successful. + */ + URL_RETRIEVAL_STATUS_SUCCESS: string; + /** + * The URL retrieval failed. + */ + URL_RETRIEVAL_STATUS_ERROR: string; + /** + * The URL retrieval failed because the content is behind a paywall. + */ + URL_RETRIEVAL_STATUS_PAYWALL: string; + /** + * The URL retrieval failed because the content is unsafe. + */ + URL_RETRIEVAL_STATUS_UNSAFE: string; +}; + +/** + * The status of a URL retrieval. + * + * @remarks + * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status. + * <br/> + * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful. + * <br/> + * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed. + * <br/> + * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall. + * <br/> + * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe. + * <br/> + * + * @beta + */ +export declare type URLRetrievalStatus = (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus]; + +/** + * Usage metadata about a {@link GenerateContentResponse}. + * + * @public + */ +export declare interface UsageMetadata { + promptTokenCount: number; + candidatesTokenCount: number; + /** + * The number of tokens used by the model's internal "thinking" process. + */ + thoughtsTokenCount?: number; + totalTokenCount: number; + /** + * The number of tokens used by tools. + */ + toolUsePromptTokenCount?: number; + promptTokensDetails?: ModalityTokenCount[]; + candidatesTokensDetails?: ModalityTokenCount[]; + /** + * A list of tokens used by tools, broken down by modality. + */ + toolUsePromptTokensDetails?: ModalityTokenCount[]; +} + +/** + * Configuration class for the Vertex AI Gemini API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend. + * + * @public + */ +export declare class VertexAIBackend extends Backend { + /** + * The region identifier. + * See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + readonly location: string; + /** + * Creates a configuration object for the Vertex AI backend. + * + * @param location - The region identifier, defaulting to `us-central1`; + * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + constructor(location?: string); +} + +/** + * Describes the input video content. + * @public + */ +export declare interface VideoMetadata { + /** + * The start offset of the video in + * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format. + */ + startOffset: string; + /** + * The end offset of the video in + * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format. + */ + endOffset: string; +} + +/** + * Configuration for the voice to used in speech synthesis. + * + * @beta + */ +export declare interface VoiceConfig { + /** + * Configures the voice using a pre-built voice configuration. + */ + prebuiltVoiceConfig?: PrebuiltVoiceConfig; +} + +/** + * @public + */ +export declare interface WebAttribution { + uri: string; + title: string; +} + +/** + * A grounding chunk from the web. + * + * Important: If using Grounding with Google Search, you are required to comply with the + * {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for "Grounding with Google Search". + * + * @public + */ +export declare interface WebGroundingChunk { + /** + * The URI of the retrieved web page. + */ + uri?: string; + /** + * The title of the retrieved web page. + */ + title?: string; + /** + * The domain of the original URI from which the content was retrieved. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be + * `undefined`. + */ + domain?: string; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * A standardized interface for interacting with a WebSocket connection. + * This abstraction allows the SDK to use the appropriate WebSocket implementation + * for the current JS environment (Browser vs. Node) without + * changing the core logic of the `LiveSession`. + * @internal + */ +declare interface WebSocketHandler { + /** + * Establishes a connection to the given URL. + * + * @param url The WebSocket URL (e.g., wss://...). + * @returns A promise that resolves on successful connection or rejects on failure. + */ + connect(url: string): Promise<void>; + /** + * Sends data over the WebSocket. + * + * @param data The string or binary data to send. + */ + send(data: string | ArrayBuffer): void; + /** + * Returns an async generator that yields parsed JSON objects from the server. + * The yielded type is `unknown` because the handler cannot guarantee the shape of the data. + * The consumer is responsible for type validation. + * The generator terminates when the connection is closed. + * + * @returns A generator that allows consumers to pull messages using a `for await...of` loop. + */ + listen(): AsyncGenerator<unknown>; + /** + * Closes the WebSocket connection. + * + * @param code - A numeric status code explaining why the connection is closing. + * @param reason - A human-readable string explaining why the connection is closing. + */ + close(code?: number, reason?: string): Promise<void>; +} + +export { } diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/index.esm.js b/frontend-old/node_modules/@firebase/ai/dist/esm/index.esm.js new file mode 100644 index 0000000..d7144c0 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/index.esm.js @@ -0,0 +1,4247 @@ +import { _isFirebaseServerApp, _getProvider, getApp, _registerComponent, registerVersion } from '@firebase/app'; +import { Component } from '@firebase/component'; +import { FirebaseError, Deferred, getModularInstance } from '@firebase/util'; +import { Logger } from '@firebase/logger'; + +var name = "@firebase/ai"; +var version = "2.5.0"; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const AI_TYPE = 'AI'; +const DEFAULT_LOCATION = 'us-central1'; +const DEFAULT_DOMAIN = 'firebasevertexai.googleapis.com'; +const DEFAULT_API_VERSION = 'v1beta'; +const PACKAGE_VERSION = version; +const LANGUAGE_TAG = 'gl-js'; +const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000; +/** + * Defines the name of the default in-cloud model to use for hybrid inference. + */ +const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite'; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Error class for the Firebase AI SDK. + * + * @public + */ +class AIError extends FirebaseError { + /** + * Constructs a new instance of the `AIError` class. + * + * @param code - The error code from {@link (AIErrorCode:type)}. + * @param message - A human-readable message describing the error. + * @param customErrorData - Optional error data. + */ + constructor(code, message, customErrorData) { + // Match error format used by FirebaseError from ErrorFactory + const service = AI_TYPE; + const fullCode = `${service}/${code}`; + const fullMessage = `${service}: ${message} (${fullCode})`; + super(code, fullMessage); + this.code = code; + this.customErrorData = customErrorData; + // FirebaseError initializes a stack trace, but it assumes the error is created from the error + // factory. Since we break this assumption, we set the stack trace to be originating from this + // constructor. + // This is only supported in V8. + if (Error.captureStackTrace) { + // Allows us to initialize the stack trace without including the constructor itself at the + // top level of the stack trace. + Error.captureStackTrace(this, AIError); + } + // Allows instanceof AIError in ES5/ES6 + // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work + // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget + // which we can now use since we no longer target ES5. + Object.setPrototypeOf(this, AIError.prototype); + // Since Error is an interface, we don't inherit toString and so we define it ourselves. + this.toString = () => fullMessage; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Possible roles. + * @public + */ +const POSSIBLE_ROLES = ['user', 'model', 'function', 'system']; +/** + * Harm categories that would cause prompts or candidates to be blocked. + * @public + */ +const HarmCategory = { + HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH', + HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', + HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT', + HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT' +}; +/** + * Threshold above which a prompt or candidate will be blocked. + * @public + */ +const HarmBlockThreshold = { + /** + * Content with `NEGLIGIBLE` will be allowed. + */ + BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE', + /** + * Content with `NEGLIGIBLE` and `LOW` will be allowed. + */ + BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE', + /** + * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed. + */ + BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH', + /** + * All content will be allowed. + */ + BLOCK_NONE: 'BLOCK_NONE', + /** + * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding + * to the {@link (HarmCategory:type)} will not be present in the response. + */ + OFF: 'OFF' +}; +/** + * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}). + * + * @public + */ +const HarmBlockMethod = { + /** + * The harm block method uses both probability and severity scores. + */ + SEVERITY: 'SEVERITY', + /** + * The harm block method uses the probability score. + */ + PROBABILITY: 'PROBABILITY' +}; +/** + * Probability that a prompt or candidate matches a harm category. + * @public + */ +const HarmProbability = { + /** + * Content has a negligible chance of being unsafe. + */ + NEGLIGIBLE: 'NEGLIGIBLE', + /** + * Content has a low chance of being unsafe. + */ + LOW: 'LOW', + /** + * Content has a medium chance of being unsafe. + */ + MEDIUM: 'MEDIUM', + /** + * Content has a high chance of being unsafe. + */ + HIGH: 'HIGH' +}; +/** + * Harm severity levels. + * @public + */ +const HarmSeverity = { + /** + * Negligible level of harm severity. + */ + HARM_SEVERITY_NEGLIGIBLE: 'HARM_SEVERITY_NEGLIGIBLE', + /** + * Low level of harm severity. + */ + HARM_SEVERITY_LOW: 'HARM_SEVERITY_LOW', + /** + * Medium level of harm severity. + */ + HARM_SEVERITY_MEDIUM: 'HARM_SEVERITY_MEDIUM', + /** + * High level of harm severity. + */ + HARM_SEVERITY_HIGH: 'HARM_SEVERITY_HIGH', + /** + * Harm severity is not supported. + * + * @remarks + * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback. + */ + HARM_SEVERITY_UNSUPPORTED: 'HARM_SEVERITY_UNSUPPORTED' +}; +/** + * Reason that a prompt was blocked. + * @public + */ +const BlockReason = { + /** + * Content was blocked by safety settings. + */ + SAFETY: 'SAFETY', + /** + * Content was blocked, but the reason is uncategorized. + */ + OTHER: 'OTHER', + /** + * Content was blocked because it contained terms from the terminology blocklist. + */ + BLOCKLIST: 'BLOCKLIST', + /** + * Content was blocked due to prohibited content. + */ + PROHIBITED_CONTENT: 'PROHIBITED_CONTENT' +}; +/** + * Reason that a candidate finished. + * @public + */ +const FinishReason = { + /** + * Natural stop point of the model or provided stop sequence. + */ + STOP: 'STOP', + /** + * The maximum number of tokens as specified in the request was reached. + */ + MAX_TOKENS: 'MAX_TOKENS', + /** + * The candidate content was flagged for safety reasons. + */ + SAFETY: 'SAFETY', + /** + * The candidate content was flagged for recitation reasons. + */ + RECITATION: 'RECITATION', + /** + * Unknown reason. + */ + OTHER: 'OTHER', + /** + * The candidate content contained forbidden terms. + */ + BLOCKLIST: 'BLOCKLIST', + /** + * The candidate content potentially contained prohibited content. + */ + PROHIBITED_CONTENT: 'PROHIBITED_CONTENT', + /** + * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII). + */ + SPII: 'SPII', + /** + * The function call generated by the model was invalid. + */ + MALFORMED_FUNCTION_CALL: 'MALFORMED_FUNCTION_CALL' +}; +/** + * @public + */ +const FunctionCallingMode = { + /** + * Default model behavior; model decides to predict either a function call + * or a natural language response. + */ + AUTO: 'AUTO', + /** + * Model is constrained to always predicting a function call only. + * If `allowed_function_names` is set, the predicted function call will be + * limited to any one of `allowed_function_names`, else the predicted + * function call will be any one of the provided `function_declarations`. + */ + ANY: 'ANY', + /** + * Model will not predict any function call. Model behavior is same as when + * not passing any function declarations. + */ + NONE: 'NONE' +}; +/** + * Content part modality. + * @public + */ +const Modality = { + /** + * Unspecified modality. + */ + MODALITY_UNSPECIFIED: 'MODALITY_UNSPECIFIED', + /** + * Plain text. + */ + TEXT: 'TEXT', + /** + * Image. + */ + IMAGE: 'IMAGE', + /** + * Video. + */ + VIDEO: 'VIDEO', + /** + * Audio. + */ + AUDIO: 'AUDIO', + /** + * Document (for example, PDF). + */ + DOCUMENT: 'DOCUMENT' +}; +/** + * Generation modalities to be returned in generation responses. + * + * @beta + */ +const ResponseModality = { + /** + * Text. + * @beta + */ + TEXT: 'TEXT', + /** + * Image. + * @beta + */ + IMAGE: 'IMAGE', + /** + * Audio. + * @beta + */ + AUDIO: 'AUDIO' +}; +/** + * Determines whether inference happens on-device or in-cloud. + * + * @remarks + * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an + * on-device model. If on-device inference is not available, the SDK + * will fall back to using a cloud-hosted model. + * <br/> + * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an + * on-device model. The SDK will not fall back to a cloud-hosted model. + * If on-device inference is not available, inference methods will throw. + * <br/> + * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a + * cloud-hosted model. The SDK will not fall back to an on-device model. + * <br/> + * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a + * cloud-hosted model. If not available, the SDK will fall back to an + * on-device model. + * + * @beta + */ +const InferenceMode = { + 'PREFER_ON_DEVICE': 'prefer_on_device', + 'ONLY_ON_DEVICE': 'only_on_device', + 'ONLY_IN_CLOUD': 'only_in_cloud', + 'PREFER_IN_CLOUD': 'prefer_in_cloud' +}; +/** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ +const InferenceSource = { + 'ON_DEVICE': 'on_device', + 'IN_CLOUD': 'in_cloud' +}; +/** + * Represents the result of the code execution. + * + * @beta + */ +const Outcome = { + UNSPECIFIED: 'OUTCOME_UNSPECIFIED', + OK: 'OUTCOME_OK', + FAILED: 'OUTCOME_FAILED', + DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED' +}; +/** + * The programming language of the code. + * + * @beta + */ +const Language = { + UNSPECIFIED: 'LANGUAGE_UNSPECIFIED', + PYTHON: 'PYTHON' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * The status of a URL retrieval. + * + * @remarks + * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status. + * <br/> + * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful. + * <br/> + * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed. + * <br/> + * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall. + * <br/> + * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe. + * <br/> + * + * @beta + */ +const URLRetrievalStatus = { + /** + * Unspecified retrieval status. + */ + URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED', + /** + * The URL retrieval was successful. + */ + URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS', + /** + * The URL retrieval failed. + */ + URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR', + /** + * The URL retrieval failed because the content is behind a paywall. + */ + URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL', + /** + * The URL retrieval failed because the content is unsafe. + */ + URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE' +}; +/** + * The types of responses that can be returned by {@link LiveSession.receive}. + * + * @beta + */ +const LiveResponseType = { + SERVER_CONTENT: 'serverContent', + TOOL_CALL: 'toolCall', + TOOL_CALL_CANCELLATION: 'toolCallCancellation' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Standardized error codes that {@link AIError} can have. + * + * @public + */ +const AIErrorCode = { + /** A generic error occurred. */ + ERROR: 'error', + /** An error occurred in a request. */ + REQUEST_ERROR: 'request-error', + /** An error occurred in a response. */ + RESPONSE_ERROR: 'response-error', + /** An error occurred while performing a fetch. */ + FETCH_ERROR: 'fetch-error', + /** An error occurred because an operation was attempted on a closed session. */ + SESSION_CLOSED: 'session-closed', + /** An error associated with a Content object. */ + INVALID_CONTENT: 'invalid-content', + /** An error due to the Firebase API not being enabled in the Console. */ + API_NOT_ENABLED: 'api-not-enabled', + /** An error due to invalid Schema input. */ + INVALID_SCHEMA: 'invalid-schema', + /** An error occurred due to a missing Firebase API key. */ + NO_API_KEY: 'no-api-key', + /** An error occurred due to a missing Firebase app ID. */ + NO_APP_ID: 'no-app-id', + /** An error occurred due to a model name not being specified during initialization. */ + NO_MODEL: 'no-model', + /** An error occurred due to a missing project ID. */ + NO_PROJECT_ID: 'no-project-id', + /** An error occurred while parsing. */ + PARSE_FAILED: 'parse-failed', + /** An error occurred due an attempt to use an unsupported feature. */ + UNSUPPORTED: 'unsupported' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Contains the list of OpenAPI data types + * as defined by the + * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification} + * @public + */ +const SchemaType = { + /** String type. */ + STRING: 'string', + /** Number type. */ + NUMBER: 'number', + /** Integer type. */ + INTEGER: 'integer', + /** Boolean type. */ + BOOLEAN: 'boolean', + /** Array type. */ + ARRAY: 'array', + /** Object type. */ + OBJECT: 'object' +}; + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * A filter level controlling how aggressively to filter sensitive content. + * + * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI + * are assessed against a list of safety filters, which include 'harmful categories' (for example, + * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to + * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines} + * for more details. + * + * @public + */ +const ImagenSafetyFilterLevel = { + /** + * The most aggressive filtering level; most strict blocking. + */ + BLOCK_LOW_AND_ABOVE: 'block_low_and_above', + /** + * Blocks some sensitive prompts and responses. + */ + BLOCK_MEDIUM_AND_ABOVE: 'block_medium_and_above', + /** + * Blocks few sensitive prompts and responses. + */ + BLOCK_ONLY_HIGH: 'block_only_high', + /** + * The least aggressive filtering level; blocks very few sensitive prompts and responses. + * + * Access to this feature is restricted and may require your case to be reviewed and approved by + * Cloud support. + */ + BLOCK_NONE: 'block_none' +}; +/** + * A filter level controlling whether generation of images containing people or faces is allowed. + * + * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a> + * documentation for more details. + * + * @public + */ +const ImagenPersonFilterLevel = { + /** + * Disallow generation of images containing people or faces; images of people are filtered out. + */ + BLOCK_ALL: 'dont_allow', + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + ALLOW_ADULT: 'allow_adult', + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + ALLOW_ALL: 'allow_all' +}; +/** + * Aspect ratios for Imagen images. + * + * To specify an aspect ratio for generated images, set the `aspectRatio` property in your + * {@link ImagenGenerationConfig}. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details and examples of the supported aspect ratios. + * + * @public + */ +const ImagenAspectRatio = { + /** + * Square (1:1) aspect ratio. + */ + 'SQUARE': '1:1', + /** + * Landscape (3:4) aspect ratio. + */ + 'LANDSCAPE_3x4': '3:4', + /** + * Portrait (4:3) aspect ratio. + */ + 'PORTRAIT_4x3': '4:3', + /** + * Landscape (16:9) aspect ratio. + */ + 'LANDSCAPE_16x9': '16:9', + /** + * Portrait (9:16) aspect ratio. + */ + 'PORTRAIT_9x16': '9:16' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * An enum-like object containing constants that represent the supported backends + * for the Firebase AI SDK. + * This determines which backend service (Vertex AI Gemini API or Gemini Developer API) + * the SDK will communicate with. + * + * These values are assigned to the `backendType` property within the specific backend + * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify + * which service to target. + * + * @public + */ +const BackendType = { + /** + * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud. + * Use this constant when creating a {@link VertexAIBackend} configuration. + */ + VERTEX_AI: 'VERTEX_AI', + /** + * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}). + * Use this constant when creating a {@link GoogleAIBackend} configuration. + */ + GOOGLE_AI: 'GOOGLE_AI' +}; // Using 'as const' makes the string values literal types + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Abstract base class representing the configuration for an AI service backend. + * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for + * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and + * {@link VertexAIBackend} for the Vertex AI Gemini API. + * + * @public + */ +class Backend { + /** + * Protected constructor for use by subclasses. + * @param type - The backend type. + */ + constructor(type) { + this.backendType = type; + } +} +/** + * Configuration class for the Gemini Developer API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Gemini Developer API as the backend. + * + * @public + */ +class GoogleAIBackend extends Backend { + /** + * Creates a configuration object for the Gemini Developer API backend. + */ + constructor() { + super(BackendType.GOOGLE_AI); + } +} +/** + * Configuration class for the Vertex AI Gemini API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend. + * + * @public + */ +class VertexAIBackend extends Backend { + /** + * Creates a configuration object for the Vertex AI backend. + * + * @param location - The region identifier, defaulting to `us-central1`; + * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + constructor(location = DEFAULT_LOCATION) { + super(BackendType.VERTEX_AI); + if (!location) { + this.location = DEFAULT_LOCATION; + } + else { + this.location = location; + } + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI} + * instances by backend type. + * + * @internal + */ +function encodeInstanceIdentifier(backend) { + if (backend instanceof GoogleAIBackend) { + return `${AI_TYPE}/googleai`; + } + else if (backend instanceof VertexAIBackend) { + return `${AI_TYPE}/vertexai/${backend.location}`; + } + else { + throw new AIError(AIErrorCode.ERROR, `Invalid backend: ${JSON.stringify(backend.backendType)}`); + } +} +/** + * Decodes an instance identifier string into a {@link Backend}. + * + * @internal + */ +function decodeInstanceIdentifier(instanceIdentifier) { + const identifierParts = instanceIdentifier.split('/'); + if (identifierParts[0] !== AI_TYPE) { + throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier, unknown prefix '${identifierParts[0]}'`); + } + const backendType = identifierParts[1]; + switch (backendType) { + case 'vertexai': + const location = identifierParts[2]; + if (!location) { + throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier, unknown location '${instanceIdentifier}'`); + } + return new VertexAIBackend(location); + case 'googleai': + return new GoogleAIBackend(); + default: + throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier string: '${instanceIdentifier}'`); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const logger = new Logger('@firebase/vertexai'); + +/** + * @internal + */ +var Availability; +(function (Availability) { + Availability["UNAVAILABLE"] = "unavailable"; + Availability["DOWNLOADABLE"] = "downloadable"; + Availability["DOWNLOADING"] = "downloading"; + Availability["AVAILABLE"] = "available"; +})(Availability || (Availability = {})); + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Defaults to support image inputs for convenience. +const defaultExpectedInputs = [{ type: 'image' }]; +/** + * Defines an inference "backend" that uses Chrome's on-device model, + * and encapsulates logic for detecting when on-device inference is + * possible. + */ +class ChromeAdapterImpl { + constructor(languageModelProvider, mode, onDeviceParams) { + this.languageModelProvider = languageModelProvider; + this.mode = mode; + this.isDownloading = false; + this.onDeviceParams = { + createOptions: { + expectedInputs: defaultExpectedInputs + } + }; + if (onDeviceParams) { + this.onDeviceParams = onDeviceParams; + if (!this.onDeviceParams.createOptions) { + this.onDeviceParams.createOptions = { + expectedInputs: defaultExpectedInputs + }; + } + else if (!this.onDeviceParams.createOptions.expectedInputs) { + this.onDeviceParams.createOptions.expectedInputs = + defaultExpectedInputs; + } + } + } + /** + * Checks if a given request can be made on-device. + * + * Encapsulates a few concerns: + * the mode + * API existence + * prompt formatting + * model availability, including triggering download if necessary + * + * + * Pros: callers needn't be concerned with details of on-device availability.</p> + * Cons: this method spans a few concerns and splits request validation from usage. + * If instance variables weren't already part of the API, we could consider a better + * separation of concerns. + */ + async isAvailable(request) { + if (!this.mode) { + logger.debug(`On-device inference unavailable because mode is undefined.`); + return false; + } + if (this.mode === InferenceMode.ONLY_IN_CLOUD) { + logger.debug(`On-device inference unavailable because mode is "only_in_cloud".`); + return false; + } + // Triggers out-of-band download so model will eventually become available. + const availability = await this.downloadIfAvailable(); + if (this.mode === InferenceMode.ONLY_ON_DEVICE) { + // If it will never be available due to API inavailability, throw. + if (availability === Availability.UNAVAILABLE) { + throw new AIError(AIErrorCode.API_NOT_ENABLED, 'Local LanguageModel API not available in this environment.'); + } + else if (availability === Availability.DOWNLOADABLE || + availability === Availability.DOWNLOADING) { + // TODO(chholland): Better user experience during download - progress? + logger.debug(`Waiting for download of LanguageModel to complete.`); + await this.downloadPromise; + return true; + } + return true; + } + // Applies prefer_on_device logic. + if (availability !== Availability.AVAILABLE) { + logger.debug(`On-device inference unavailable because availability is "${availability}".`); + return false; + } + if (!ChromeAdapterImpl.isOnDeviceRequest(request)) { + logger.debug(`On-device inference unavailable because request is incompatible.`); + return false; + } + return true; + } + /** + * Generates content on device. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContent} for generating content in + * Cloud. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + * @returns {@link Response}, so we can reuse common response formatting. + */ + async generateContent(request) { + const session = await this.createSession(); + const contents = await Promise.all(request.contents.map(ChromeAdapterImpl.toLanguageModelMessage)); + const text = await session.prompt(contents, this.onDeviceParams.promptOptions); + return ChromeAdapterImpl.toResponse(text); + } + /** + * Generates content stream on device. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in + * Cloud. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + * @returns {@link Response}, so we can reuse common response formatting. + */ + async generateContentStream(request) { + const session = await this.createSession(); + const contents = await Promise.all(request.contents.map(ChromeAdapterImpl.toLanguageModelMessage)); + const stream = session.promptStreaming(contents, this.onDeviceParams.promptOptions); + return ChromeAdapterImpl.toStreamResponse(stream); + } + async countTokens(_request) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'Count Tokens is not yet available for on-device model.'); + } + /** + * Asserts inference for the given request can be performed by an on-device model. + */ + static isOnDeviceRequest(request) { + // Returns false if the prompt is empty. + if (request.contents.length === 0) { + logger.debug('Empty prompt rejected for on-device inference.'); + return false; + } + for (const content of request.contents) { + if (content.role === 'function') { + logger.debug(`"Function" role rejected for on-device inference.`); + return false; + } + // Returns false if request contains an image with an unsupported mime type. + for (const part of content.parts) { + if (part.inlineData && + ChromeAdapterImpl.SUPPORTED_MIME_TYPES.indexOf(part.inlineData.mimeType) === -1) { + logger.debug(`Unsupported mime type "${part.inlineData.mimeType}" rejected for on-device inference.`); + return false; + } + } + } + return true; + } + /** + * Encapsulates logic to get availability and download a model if one is downloadable. + */ + async downloadIfAvailable() { + const availability = await this.languageModelProvider?.availability(this.onDeviceParams.createOptions); + if (availability === Availability.DOWNLOADABLE) { + this.download(); + } + return availability; + } + /** + * Triggers out-of-band download of an on-device model. + * + * Chrome only downloads models as needed. Chrome knows a model is needed when code calls + * LanguageModel.create. + * + * Since Chrome manages the download, the SDK can only avoid redundant download requests by + * tracking if a download has previously been requested. + */ + download() { + if (this.isDownloading) { + return; + } + this.isDownloading = true; + this.downloadPromise = this.languageModelProvider + ?.create(this.onDeviceParams.createOptions) + .finally(() => { + this.isDownloading = false; + }); + } + /** + * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object. + */ + static async toLanguageModelMessage(content) { + const languageModelMessageContents = await Promise.all(content.parts.map(ChromeAdapterImpl.toLanguageModelMessageContent)); + return { + role: ChromeAdapterImpl.toLanguageModelMessageRole(content.role), + content: languageModelMessageContents + }; + } + /** + * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object. + */ + static async toLanguageModelMessageContent(part) { + if (part.text) { + return { + type: 'text', + value: part.text + }; + } + else if (part.inlineData) { + const formattedImageContent = await fetch(`data:${part.inlineData.mimeType};base64,${part.inlineData.data}`); + const imageBlob = await formattedImageContent.blob(); + const imageBitmap = await createImageBitmap(imageBlob); + return { + type: 'image', + value: imageBitmap + }; + } + throw new AIError(AIErrorCode.REQUEST_ERROR, `Processing of this Part type is not currently supported.`); + } + /** + * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string. + */ + static toLanguageModelMessageRole(role) { + // Assumes 'function' rule has been filtered by isOnDeviceRequest + return role === 'model' ? 'assistant' : 'user'; + } + /** + * Abstracts Chrome session creation. + * + * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all + * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all + * inference. + * + * Chrome will remove a model from memory if it's no longer in use, so this method ensures a + * new session is created before an old session is destroyed. + */ + async createSession() { + if (!this.languageModelProvider) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'Chrome AI requested for unsupported browser version.'); + } + const newSession = await this.languageModelProvider.create(this.onDeviceParams.createOptions); + if (this.oldSession) { + this.oldSession.destroy(); + } + // Holds session reference, so model isn't unloaded from memory. + this.oldSession = newSession; + return newSession; + } + /** + * Formats string returned by Chrome as a {@link Response} returned by Firebase AI. + */ + static toResponse(text) { + return { + json: async () => ({ + candidates: [ + { + content: { + parts: [{ text }] + } + } + ] + }) + }; + } + /** + * Formats string stream returned by Chrome as SSE returned by Firebase AI. + */ + static toStreamResponse(stream) { + const encoder = new TextEncoder(); + return { + body: stream.pipeThrough(new TransformStream({ + transform(chunk, controller) { + const json = JSON.stringify({ + candidates: [ + { + content: { + role: 'model', + parts: [{ text: chunk }] + } + } + ] + }); + controller.enqueue(encoder.encode(`data: ${json}\n\n`)); + } + })) + }; + } +} +// Visible for testing +ChromeAdapterImpl.SUPPORTED_MIME_TYPES = ['image/jpeg', 'image/png']; +/** + * Creates a ChromeAdapterImpl on demand. + */ +function chromeAdapterFactory(mode, window, params) { + // Do not initialize a ChromeAdapter if we are not in hybrid mode. + if (typeof window !== 'undefined' && mode) { + return new ChromeAdapterImpl(window.LanguageModel, mode, params); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class AIService { + constructor(app, backend, authProvider, appCheckProvider, chromeAdapterFactory) { + this.app = app; + this.backend = backend; + this.chromeAdapterFactory = chromeAdapterFactory; + const appCheck = appCheckProvider?.getImmediate({ optional: true }); + const auth = authProvider?.getImmediate({ optional: true }); + this.auth = auth || null; + this.appCheck = appCheck || null; + if (backend instanceof VertexAIBackend) { + this.location = backend.location; + } + else { + this.location = ''; + } + } + _delete() { + return Promise.resolve(); + } + set options(optionsToSet) { + this._options = optionsToSet; + } + get options() { + return this._options; + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +function factory(container, { instanceIdentifier }) { + if (!instanceIdentifier) { + throw new AIError(AIErrorCode.ERROR, 'AIService instance identifier is undefined.'); + } + const backend = decodeInstanceIdentifier(instanceIdentifier); + // getImmediate for FirebaseApp will always succeed + const app = container.getProvider('app').getImmediate(); + const auth = container.getProvider('auth-internal'); + const appCheckProvider = container.getProvider('app-check-internal'); + return new AIService(app, backend, auth, appCheckProvider, chromeAdapterFactory); +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Base class for Firebase AI model APIs. + * + * Instances of this class are associated with a specific Firebase AI {@link Backend} + * and provide methods for interacting with the configured generative model. + * + * @public + */ +class AIModel { + /** + * Constructs a new instance of the {@link AIModel} class. + * + * This constructor should only be called from subclasses that provide + * a model API. + * + * @param ai - an {@link AI} instance. + * @param modelName - The name of the model being used. It can be in one of the following formats: + * - `my-model` (short name, will resolve to `publishers/google/models/my-model`) + * - `models/my-model` (will resolve to `publishers/google/models/my-model`) + * - `publishers/my-publisher/models/my-model` (fully qualified model name) + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @internal + */ + constructor(ai, modelName) { + if (!ai.app?.options?.apiKey) { + throw new AIError(AIErrorCode.NO_API_KEY, `The "apiKey" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`); + } + else if (!ai.app?.options?.projectId) { + throw new AIError(AIErrorCode.NO_PROJECT_ID, `The "projectId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`); + } + else if (!ai.app?.options?.appId) { + throw new AIError(AIErrorCode.NO_APP_ID, `The "appId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`); + } + else { + this._apiSettings = { + apiKey: ai.app.options.apiKey, + project: ai.app.options.projectId, + appId: ai.app.options.appId, + automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled, + location: ai.location, + backend: ai.backend + }; + if (_isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) { + const token = ai.app.settings.appCheckToken; + this._apiSettings.getAppCheckToken = () => { + return Promise.resolve({ token }); + }; + } + else if (ai.appCheck) { + if (ai.options?.useLimitedUseAppCheckTokens) { + this._apiSettings.getAppCheckToken = () => ai.appCheck.getLimitedUseToken(); + } + else { + this._apiSettings.getAppCheckToken = () => ai.appCheck.getToken(); + } + } + if (ai.auth) { + this._apiSettings.getAuthToken = () => ai.auth.getToken(); + } + this.model = AIModel.normalizeModelName(modelName, this._apiSettings.backend.backendType); + } + } + /** + * Normalizes the given model name to a fully qualified model resource name. + * + * @param modelName - The model name to normalize. + * @returns The fully qualified model resource name. + * + * @internal + */ + static normalizeModelName(modelName, backendType) { + if (backendType === BackendType.GOOGLE_AI) { + return AIModel.normalizeGoogleAIModelName(modelName); + } + else { + return AIModel.normalizeVertexAIModelName(modelName); + } + } + /** + * @internal + */ + static normalizeGoogleAIModelName(modelName) { + return `models/${modelName}`; + } + /** + * @internal + */ + static normalizeVertexAIModelName(modelName) { + let model; + if (modelName.includes('/')) { + if (modelName.startsWith('models/')) { + // Add 'publishers/google' if the user is only passing in 'models/model-name'. + model = `publishers/google/${modelName}`; + } + else { + // Any other custom format (e.g. tuned models) must be passed in correctly. + model = modelName; + } + } + else { + // If path is not included, assume it's a non-tuned model. + model = `publishers/google/models/${modelName}`; + } + return model; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +var Task; +(function (Task) { + Task["GENERATE_CONTENT"] = "generateContent"; + Task["STREAM_GENERATE_CONTENT"] = "streamGenerateContent"; + Task["COUNT_TOKENS"] = "countTokens"; + Task["PREDICT"] = "predict"; +})(Task || (Task = {})); +class RequestUrl { + constructor(model, task, apiSettings, stream, requestOptions) { + this.model = model; + this.task = task; + this.apiSettings = apiSettings; + this.stream = stream; + this.requestOptions = requestOptions; + } + toString() { + const url = new URL(this.baseUrl); // Throws if the URL is invalid + url.pathname = `/${this.apiVersion}/${this.modelPath}:${this.task}`; + url.search = this.queryParams.toString(); + return url.toString(); + } + get baseUrl() { + return this.requestOptions?.baseUrl || `https://${DEFAULT_DOMAIN}`; + } + get apiVersion() { + return DEFAULT_API_VERSION; // TODO: allow user-set options if that feature becomes available + } + get modelPath() { + if (this.apiSettings.backend instanceof GoogleAIBackend) { + return `projects/${this.apiSettings.project}/${this.model}`; + } + else if (this.apiSettings.backend instanceof VertexAIBackend) { + return `projects/${this.apiSettings.project}/locations/${this.apiSettings.backend.location}/${this.model}`; + } + else { + throw new AIError(AIErrorCode.ERROR, `Invalid backend: ${JSON.stringify(this.apiSettings.backend)}`); + } + } + get queryParams() { + const params = new URLSearchParams(); + if (this.stream) { + params.set('alt', 'sse'); + } + return params; + } +} +class WebSocketUrl { + constructor(apiSettings) { + this.apiSettings = apiSettings; + } + toString() { + const url = new URL(`wss://${DEFAULT_DOMAIN}`); + url.pathname = this.pathname; + const queryParams = new URLSearchParams(); + queryParams.set('key', this.apiSettings.apiKey); + url.search = queryParams.toString(); + return url.toString(); + } + get pathname() { + if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + return 'ws/google.firebase.vertexai.v1beta.GenerativeService/BidiGenerateContent'; + } + else { + return `ws/google.firebase.vertexai.v1beta.LlmBidiService/BidiGenerateContent/locations/${this.apiSettings.location}`; + } + } +} +/** + * Log language and "fire/version" to x-goog-api-client + */ +function getClientHeaders() { + const loggingTags = []; + loggingTags.push(`${LANGUAGE_TAG}/${PACKAGE_VERSION}`); + loggingTags.push(`fire/${PACKAGE_VERSION}`); + return loggingTags.join(' '); +} +async function getHeaders(url) { + const headers = new Headers(); + headers.append('Content-Type', 'application/json'); + headers.append('x-goog-api-client', getClientHeaders()); + headers.append('x-goog-api-key', url.apiSettings.apiKey); + if (url.apiSettings.automaticDataCollectionEnabled) { + headers.append('X-Firebase-Appid', url.apiSettings.appId); + } + if (url.apiSettings.getAppCheckToken) { + const appCheckToken = await url.apiSettings.getAppCheckToken(); + if (appCheckToken) { + headers.append('X-Firebase-AppCheck', appCheckToken.token); + if (appCheckToken.error) { + logger.warn(`Unable to obtain a valid App Check token: ${appCheckToken.error.message}`); + } + } + } + if (url.apiSettings.getAuthToken) { + const authToken = await url.apiSettings.getAuthToken(); + if (authToken) { + headers.append('Authorization', `Firebase ${authToken.accessToken}`); + } + } + return headers; +} +async function constructRequest(model, task, apiSettings, stream, body, requestOptions) { + const url = new RequestUrl(model, task, apiSettings, stream, requestOptions); + return { + url: url.toString(), + fetchOptions: { + method: 'POST', + headers: await getHeaders(url), + body + } + }; +} +async function makeRequest(model, task, apiSettings, stream, body, requestOptions) { + const url = new RequestUrl(model, task, apiSettings, stream, requestOptions); + let response; + let fetchTimeoutId; + try { + const request = await constructRequest(model, task, apiSettings, stream, body, requestOptions); + // Timeout is 180s by default + const timeoutMillis = requestOptions?.timeout != null && requestOptions.timeout >= 0 + ? requestOptions.timeout + : DEFAULT_FETCH_TIMEOUT_MS; + const abortController = new AbortController(); + fetchTimeoutId = setTimeout(() => abortController.abort(), timeoutMillis); + request.fetchOptions.signal = abortController.signal; + response = await fetch(request.url, request.fetchOptions); + if (!response.ok) { + let message = ''; + let errorDetails; + try { + const json = await response.json(); + message = json.error.message; + if (json.error.details) { + message += ` ${JSON.stringify(json.error.details)}`; + errorDetails = json.error.details; + } + } + catch (e) { + // ignored + } + if (response.status === 403 && + errorDetails && + errorDetails.some((detail) => detail.reason === 'SERVICE_DISABLED') && + errorDetails.some((detail) => detail.links?.[0]?.description.includes('Google developers console API activation'))) { + throw new AIError(AIErrorCode.API_NOT_ENABLED, `The Firebase AI SDK requires the Firebase AI ` + + `API ('firebasevertexai.googleapis.com') to be enabled in your ` + + `Firebase project. Enable this API by visiting the Firebase Console ` + + `at https://console.firebase.google.com/project/${url.apiSettings.project}/genai/ ` + + `and clicking "Get started". If you enabled this API recently, ` + + `wait a few minutes for the action to propagate to our systems and ` + + `then retry.`, { + status: response.status, + statusText: response.statusText, + errorDetails + }); + } + throw new AIError(AIErrorCode.FETCH_ERROR, `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`, { + status: response.status, + statusText: response.statusText, + errorDetails + }); + } + } + catch (e) { + let err = e; + if (e.code !== AIErrorCode.FETCH_ERROR && + e.code !== AIErrorCode.API_NOT_ENABLED && + e instanceof Error) { + err = new AIError(AIErrorCode.ERROR, `Error fetching from ${url.toString()}: ${e.message}`); + err.stack = e.stack; + } + throw err; + } + finally { + if (fetchTimeoutId) { + clearTimeout(fetchTimeoutId); + } + } + return response; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Check that at least one candidate exists and does not have a bad + * finish reason. Warns if multiple candidates exist. + */ +function hasValidCandidates(response) { + if (response.candidates && response.candidates.length > 0) { + if (response.candidates.length > 1) { + logger.warn(`This response had ${response.candidates.length} ` + + `candidates. Returning text from the first candidate only. ` + + `Access response.candidates directly to use the other candidates.`); + } + if (hadBadFinishReason(response.candidates[0])) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage(response)}. Response body stored in error.response`, { + response + }); + } + return true; + } + else { + return false; + } +} +/** + * Creates an EnhancedGenerateContentResponse object that has helper functions and + * other modifications that improve usability. + */ +function createEnhancedContentResponse(response, inferenceSource = InferenceSource.IN_CLOUD) { + /** + * The Vertex AI backend omits default values. + * This causes the `index` property to be omitted from the first candidate in the + * response, since it has index 0, and 0 is a default value. + * See: https://github.com/firebase/firebase-js-sdk/issues/8566 + */ + if (response.candidates && !response.candidates[0].hasOwnProperty('index')) { + response.candidates[0].index = 0; + } + const responseWithHelpers = addHelpers(response); + responseWithHelpers.inferenceSource = inferenceSource; + return responseWithHelpers; +} +/** + * Adds convenience helper methods to a response object, including stream + * chunks (as long as each chunk is a complete GenerateContentResponse JSON). + */ +function addHelpers(response) { + response.text = () => { + if (hasValidCandidates(response)) { + return getText(response, part => !part.thought); + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Text not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return ''; + }; + response.thoughtSummary = () => { + if (hasValidCandidates(response)) { + const result = getText(response, part => !!part.thought); + return result === '' ? undefined : result; + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Thought summary not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return undefined; + }; + response.inlineDataParts = () => { + if (hasValidCandidates(response)) { + return getInlineDataParts(response); + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Data not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return undefined; + }; + response.functionCalls = () => { + if (hasValidCandidates(response)) { + return getFunctionCalls(response); + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Function call not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return undefined; + }; + return response; +} +/** + * Returns all text from the first candidate's parts, filtering by whether + * `partFilter()` returns true. + * + * @param response - The `GenerateContentResponse` from which to extract text. + * @param partFilter - Only return `Part`s for which this returns true + */ +function getText(response, partFilter) { + const textStrings = []; + if (response.candidates?.[0].content?.parts) { + for (const part of response.candidates?.[0].content?.parts) { + if (part.text && partFilter(part)) { + textStrings.push(part.text); + } + } + } + if (textStrings.length > 0) { + return textStrings.join(''); + } + else { + return ''; + } +} +/** + * Returns every {@link FunctionCall} associated with first candidate. + */ +function getFunctionCalls(response) { + const functionCalls = []; + if (response.candidates?.[0].content?.parts) { + for (const part of response.candidates?.[0].content?.parts) { + if (part.functionCall) { + functionCalls.push(part.functionCall); + } + } + } + if (functionCalls.length > 0) { + return functionCalls; + } + else { + return undefined; + } +} +/** + * Returns every {@link InlineDataPart} in the first candidate if present. + * + * @internal + */ +function getInlineDataParts(response) { + const data = []; + if (response.candidates?.[0].content?.parts) { + for (const part of response.candidates?.[0].content?.parts) { + if (part.inlineData) { + data.push(part); + } + } + } + if (data.length > 0) { + return data; + } + else { + return undefined; + } +} +const badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY]; +function hadBadFinishReason(candidate) { + return (!!candidate.finishReason && + badFinishReasons.some(reason => reason === candidate.finishReason)); +} +function formatBlockErrorMessage(response) { + let message = ''; + if ((!response.candidates || response.candidates.length === 0) && + response.promptFeedback) { + message += 'Response was blocked'; + if (response.promptFeedback?.blockReason) { + message += ` due to ${response.promptFeedback.blockReason}`; + } + if (response.promptFeedback?.blockReasonMessage) { + message += `: ${response.promptFeedback.blockReasonMessage}`; + } + } + else if (response.candidates?.[0]) { + const firstCandidate = response.candidates[0]; + if (hadBadFinishReason(firstCandidate)) { + message += `Candidate was blocked due to ${firstCandidate.finishReason}`; + if (firstCandidate.finishMessage) { + message += `: ${firstCandidate.finishMessage}`; + } + } + } + return message; +} +/** + * Convert a generic successful fetch response body to an Imagen response object + * that can be returned to the user. This converts the REST APIs response format to our + * APIs representation of a response. + * + * @internal + */ +async function handlePredictResponse(response) { + const responseJson = await response.json(); + const images = []; + let filteredReason = undefined; + // The backend should always send a non-empty array of predictions if the response was successful. + if (!responseJson.predictions || responseJson.predictions?.length === 0) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.'); + } + for (const prediction of responseJson.predictions) { + if (prediction.raiFilteredReason) { + filteredReason = prediction.raiFilteredReason; + } + else if (prediction.mimeType && prediction.bytesBase64Encoded) { + images.push({ + mimeType: prediction.mimeType, + bytesBase64Encoded: prediction.bytesBase64Encoded + }); + } + else if (prediction.mimeType && prediction.gcsUri) { + images.push({ + mimeType: prediction.mimeType, + gcsURI: prediction.gcsUri + }); + } + else if (prediction.safetyAttributes) ; + else { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Unexpected element in 'predictions' array in response: '${JSON.stringify(prediction)}'`); + } + } + return { images, filteredReason }; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI). + * The public API prioritizes the format used by the Vertex AI Gemini API. + * We avoid having two sets of types by translating requests and responses between the two API formats. + * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API + * with minimal code changes. + * + * In here are functions that map requests and responses between the two API formats. + * Requests in the Vertex AI format are mapped to the Google AI format before being sent. + * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user. + */ +/** + * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI. + * + * @param generateContentRequest The {@link GenerateContentRequest} to map. + * @returns A {@link GenerateContentResponse} that conforms to the Google AI format. + * + * @throws If the request contains properties that are unsupported by Google AI. + * + * @internal + */ +function mapGenerateContentRequest(generateContentRequest) { + generateContentRequest.safetySettings?.forEach(safetySetting => { + if (safetySetting.method) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'SafetySetting.method is not supported in the the Gemini Developer API. Please remove this property.'); + } + }); + if (generateContentRequest.generationConfig?.topK) { + const roundedTopK = Math.round(generateContentRequest.generationConfig.topK); + if (roundedTopK !== generateContentRequest.generationConfig.topK) { + logger.warn('topK in GenerationConfig has been rounded to the nearest integer to match the format for requests to the Gemini Developer API.'); + generateContentRequest.generationConfig.topK = roundedTopK; + } + } + return generateContentRequest; +} +/** + * Maps a {@link GenerateContentResponse} from Google AI to the format of the + * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API. + * + * @param googleAIResponse The {@link GenerateContentResponse} from Google AI. + * @returns A {@link GenerateContentResponse} that conforms to the public API's format. + * + * @internal + */ +function mapGenerateContentResponse(googleAIResponse) { + const generateContentResponse = { + candidates: googleAIResponse.candidates + ? mapGenerateContentCandidates(googleAIResponse.candidates) + : undefined, + prompt: googleAIResponse.promptFeedback + ? mapPromptFeedback(googleAIResponse.promptFeedback) + : undefined, + usageMetadata: googleAIResponse.usageMetadata + }; + return generateContentResponse; +} +/** + * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI. + * + * @param countTokensRequest The {@link CountTokensRequest} to map. + * @param model The model to count tokens with. + * @returns A {@link CountTokensRequest} that conforms to the Google AI format. + * + * @internal + */ +function mapCountTokensRequest(countTokensRequest, model) { + const mappedCountTokensRequest = { + generateContentRequest: { + model, + ...countTokensRequest + } + }; + return mappedCountTokensRequest; +} +/** + * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms + * to the Vertex AI API format. + * + * @param candidates The {@link GoogleAIGenerateContentCandidate} to map. + * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format. + * + * @throws If any {@link Part} in the candidates has a `videoMetadata` property. + * + * @internal + */ +function mapGenerateContentCandidates(candidates) { + const mappedCandidates = []; + let mappedSafetyRatings; + if (mappedCandidates) { + candidates.forEach(candidate => { + // Map citationSources to citations. + let citationMetadata; + if (candidate.citationMetadata) { + citationMetadata = { + citations: candidate.citationMetadata.citationSources + }; + } + // Assign missing candidate SafetyRatings properties to their defaults if undefined. + if (candidate.safetyRatings) { + mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => { + return { + ...safetyRating, + severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + probabilityScore: safetyRating.probabilityScore ?? 0, + severityScore: safetyRating.severityScore ?? 0 + }; + }); + } + // videoMetadata is not supported. + // Throw early since developers may send a long video as input and only expect to pay + // for inference on a small portion of the video. + if (candidate.content?.parts?.some(part => part?.videoMetadata)) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.'); + } + const mappedCandidate = { + index: candidate.index, + content: candidate.content, + finishReason: candidate.finishReason, + finishMessage: candidate.finishMessage, + safetyRatings: mappedSafetyRatings, + citationMetadata, + groundingMetadata: candidate.groundingMetadata, + urlContextMetadata: candidate.urlContextMetadata + }; + mappedCandidates.push(mappedCandidate); + }); + } + return mappedCandidates; +} +function mapPromptFeedback(promptFeedback) { + // Assign missing SafetyRating properties to their defaults if undefined. + const mappedSafetyRatings = []; + promptFeedback.safetyRatings.forEach(safetyRating => { + mappedSafetyRatings.push({ + category: safetyRating.category, + probability: safetyRating.probability, + severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + probabilityScore: safetyRating.probabilityScore ?? 0, + severityScore: safetyRating.severityScore ?? 0, + blocked: safetyRating.blocked + }); + }); + const mappedPromptFeedback = { + blockReason: promptFeedback.blockReason, + safetyRatings: mappedSafetyRatings, + blockReasonMessage: promptFeedback.blockReasonMessage + }; + return mappedPromptFeedback; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/; +/** + * Process a response.body stream from the backend and return an + * iterator that provides one complete GenerateContentResponse at a time + * and a promise that resolves with a single aggregated + * GenerateContentResponse. + * + * @param response - Response from a fetch call + */ +function processStream(response, apiSettings, inferenceSource) { + const inputStream = response.body.pipeThrough(new TextDecoderStream('utf8', { fatal: true })); + const responseStream = getResponseStream(inputStream); + const [stream1, stream2] = responseStream.tee(); + return { + stream: generateResponseSequence(stream1, apiSettings, inferenceSource), + response: getResponsePromise(stream2, apiSettings, inferenceSource) + }; +} +async function getResponsePromise(stream, apiSettings, inferenceSource) { + const allResponses = []; + const reader = stream.getReader(); + while (true) { + const { done, value } = await reader.read(); + if (done) { + let generateContentResponse = aggregateResponses(allResponses); + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + generateContentResponse = mapGenerateContentResponse(generateContentResponse); + } + return createEnhancedContentResponse(generateContentResponse, inferenceSource); + } + allResponses.push(value); + } +} +async function* generateResponseSequence(stream, apiSettings, inferenceSource) { + const reader = stream.getReader(); + while (true) { + const { value, done } = await reader.read(); + if (done) { + break; + } + let enhancedResponse; + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + enhancedResponse = createEnhancedContentResponse(mapGenerateContentResponse(value), inferenceSource); + } + else { + enhancedResponse = createEnhancedContentResponse(value, inferenceSource); + } + const firstCandidate = enhancedResponse.candidates?.[0]; + // Don't yield a response with no useful data for the developer. + if (!firstCandidate?.content?.parts && + !firstCandidate?.finishReason && + !firstCandidate?.citationMetadata && + !firstCandidate?.urlContextMetadata) { + continue; + } + yield enhancedResponse; + } +} +/** + * Reads a raw stream from the fetch response and join incomplete + * chunks, returning a new stream that provides a single complete + * GenerateContentResponse in each iteration. + */ +function getResponseStream(inputStream) { + const reader = inputStream.getReader(); + const stream = new ReadableStream({ + start(controller) { + let currentText = ''; + return pump(); + function pump() { + return reader.read().then(({ value, done }) => { + if (done) { + if (currentText.trim()) { + controller.error(new AIError(AIErrorCode.PARSE_FAILED, 'Failed to parse stream')); + return; + } + controller.close(); + return; + } + currentText += value; + let match = currentText.match(responseLineRE); + let parsedResponse; + while (match) { + try { + parsedResponse = JSON.parse(match[1]); + } + catch (e) { + controller.error(new AIError(AIErrorCode.PARSE_FAILED, `Error parsing JSON response: "${match[1]}`)); + return; + } + controller.enqueue(parsedResponse); + currentText = currentText.substring(match[0].length); + match = currentText.match(responseLineRE); + } + return pump(); + }); + } + } + }); + return stream; +} +/** + * Aggregates an array of `GenerateContentResponse`s into a single + * GenerateContentResponse. + */ +function aggregateResponses(responses) { + const lastResponse = responses[responses.length - 1]; + const aggregatedResponse = { + promptFeedback: lastResponse?.promptFeedback + }; + for (const response of responses) { + if (response.candidates) { + for (const candidate of response.candidates) { + // Index will be undefined if it's the first index (0), so we should use 0 if it's undefined. + // See: https://github.com/firebase/firebase-js-sdk/issues/8566 + const i = candidate.index || 0; + if (!aggregatedResponse.candidates) { + aggregatedResponse.candidates = []; + } + if (!aggregatedResponse.candidates[i]) { + aggregatedResponse.candidates[i] = { + index: candidate.index + }; + } + // Keep overwriting, the last one will be final + aggregatedResponse.candidates[i].citationMetadata = + candidate.citationMetadata; + aggregatedResponse.candidates[i].finishReason = candidate.finishReason; + aggregatedResponse.candidates[i].finishMessage = + candidate.finishMessage; + aggregatedResponse.candidates[i].safetyRatings = + candidate.safetyRatings; + aggregatedResponse.candidates[i].groundingMetadata = + candidate.groundingMetadata; + // The urlContextMetadata object is defined in the first chunk of the response stream. + // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to + // make sure that we don't overwrite the first value urlContextMetadata object with undefined. + // FIXME: What happens if we receive a second, valid urlContextMetadata object? + const urlContextMetadata = candidate.urlContextMetadata; + if (typeof urlContextMetadata === 'object' && + urlContextMetadata !== null && + Object.keys(urlContextMetadata).length > 0) { + aggregatedResponse.candidates[i].urlContextMetadata = + urlContextMetadata; + } + /** + * Candidates should always have content and parts, but this handles + * possible malformed responses. + */ + if (candidate.content) { + // Skip a candidate without parts. + if (!candidate.content.parts) { + continue; + } + if (!aggregatedResponse.candidates[i].content) { + aggregatedResponse.candidates[i].content = { + role: candidate.content.role || 'user', + parts: [] + }; + } + for (const part of candidate.content.parts) { + const newPart = { ...part }; + // The backend can send empty text parts. If these are sent back + // (e.g. in chat history), the backend will respond with an error. + // To prevent this, ignore empty text parts. + if (part.text === '') { + continue; + } + if (Object.keys(newPart).length > 0) { + aggregatedResponse.candidates[i].content.parts.push(newPart); + } + } + } + } + } + } + return aggregatedResponse; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const errorsCausingFallback = [ + // most network errors + AIErrorCode.FETCH_ERROR, + // fallback code for all other errors in makeRequest + AIErrorCode.ERROR, + // error due to API not being enabled in project + AIErrorCode.API_NOT_ENABLED +]; +/** + * Dispatches a request to the appropriate backend (on-device or in-cloud) + * based on the inference mode. + * + * @param request - The request to be sent. + * @param chromeAdapter - The on-device model adapter. + * @param onDeviceCall - The function to call for on-device inference. + * @param inCloudCall - The function to call for in-cloud inference. + * @returns The response from the backend. + */ +async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) { + if (!chromeAdapter) { + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + } + switch (chromeAdapter.mode) { + case InferenceMode.ONLY_ON_DEVICE: + if (await chromeAdapter.isAvailable(request)) { + return { + response: await onDeviceCall(), + inferenceSource: InferenceSource.ON_DEVICE + }; + } + throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.'); + case InferenceMode.ONLY_IN_CLOUD: + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + case InferenceMode.PREFER_IN_CLOUD: + try { + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + } + catch (e) { + if (e instanceof AIError && errorsCausingFallback.includes(e.code)) { + return { + response: await onDeviceCall(), + inferenceSource: InferenceSource.ON_DEVICE + }; + } + throw e; + } + case InferenceMode.PREFER_ON_DEVICE: + if (await chromeAdapter.isAvailable(request)) { + return { + response: await onDeviceCall(), + inferenceSource: InferenceSource.ON_DEVICE + }; + } + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + default: + throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +async function generateContentStreamOnCloud(apiSettings, model, params, requestOptions) { + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + params = mapGenerateContentRequest(params); + } + return makeRequest(model, Task.STREAM_GENERATE_CONTENT, apiSettings, + /* stream */ true, JSON.stringify(params), requestOptions); +} +async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) { + const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions)); + return processStream(callResult.response, apiSettings); // TODO: Map streaming responses +} +async function generateContentOnCloud(apiSettings, model, params, requestOptions) { + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + params = mapGenerateContentRequest(params); + } + return makeRequest(model, Task.GENERATE_CONTENT, apiSettings, + /* stream */ false, JSON.stringify(params), requestOptions); +} +async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) { + const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions)); + const generateContentResponse = await processGenerateContentResponse(callResult.response, apiSettings); + const enhancedResponse = createEnhancedContentResponse(generateContentResponse, callResult.inferenceSource); + return { + response: enhancedResponse + }; +} +async function processGenerateContentResponse(response, apiSettings) { + const responseJson = await response.json(); + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + return mapGenerateContentResponse(responseJson); + } + else { + return responseJson; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +function formatSystemInstruction(input) { + // null or undefined + if (input == null) { + return undefined; + } + else if (typeof input === 'string') { + return { role: 'system', parts: [{ text: input }] }; + } + else if (input.text) { + return { role: 'system', parts: [input] }; + } + else if (input.parts) { + if (!input.role) { + return { role: 'system', parts: input.parts }; + } + else { + return input; + } + } +} +function formatNewContent(request) { + let newParts = []; + if (typeof request === 'string') { + newParts = [{ text: request }]; + } + else { + for (const partOrString of request) { + if (typeof partOrString === 'string') { + newParts.push({ text: partOrString }); + } + else { + newParts.push(partOrString); + } + } + } + return assignRoleToPartsAndValidateSendMessageRequest(newParts); +} +/** + * When multiple Part types (i.e. FunctionResponsePart and TextPart) are + * passed in a single Part array, we may need to assign different roles to each + * part. Currently only FunctionResponsePart requires a role other than 'user'. + * @private + * @param parts Array of parts to pass to the model + * @returns Array of content items + */ +function assignRoleToPartsAndValidateSendMessageRequest(parts) { + const userContent = { role: 'user', parts: [] }; + const functionContent = { role: 'function', parts: [] }; + let hasUserContent = false; + let hasFunctionContent = false; + for (const part of parts) { + if ('functionResponse' in part) { + functionContent.parts.push(part); + hasFunctionContent = true; + } + else { + userContent.parts.push(part); + hasUserContent = true; + } + } + if (hasUserContent && hasFunctionContent) { + throw new AIError(AIErrorCode.INVALID_CONTENT, 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.'); + } + if (!hasUserContent && !hasFunctionContent) { + throw new AIError(AIErrorCode.INVALID_CONTENT, 'No Content is provided for sending chat message.'); + } + if (hasUserContent) { + return userContent; + } + return functionContent; +} +function formatGenerateContentInput(params) { + let formattedRequest; + if (params.contents) { + formattedRequest = params; + } + else { + // Array or string + const content = formatNewContent(params); + formattedRequest = { contents: [content] }; + } + if (params.systemInstruction) { + formattedRequest.systemInstruction = formatSystemInstruction(params.systemInstruction); + } + return formattedRequest; +} +/** + * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format + * that is expected from the REST API. + * + * @internal + */ +function createPredictRequestBody(prompt, { gcsURI, imageFormat, addWatermark, numberOfImages = 1, negativePrompt, aspectRatio, safetyFilterLevel, personFilterLevel }) { + // Properties that are undefined will be omitted from the JSON string that is sent in the request. + const body = { + instances: [ + { + prompt + } + ], + parameters: { + storageUri: gcsURI, + negativePrompt, + sampleCount: numberOfImages, + aspectRatio, + outputOptions: imageFormat, + addWatermark, + safetyFilterLevel, + personGeneration: personFilterLevel, + includeRaiReason: true, + includeSafetyAttributes: true + } + }; + return body; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// https://ai.google.dev/api/rest/v1beta/Content#part +const VALID_PART_FIELDS = [ + 'text', + 'inlineData', + 'functionCall', + 'functionResponse', + 'thought', + 'thoughtSignature' +]; +const VALID_PARTS_PER_ROLE = { + user: ['text', 'inlineData'], + function: ['functionResponse'], + model: ['text', 'functionCall', 'thought', 'thoughtSignature'], + // System instructions shouldn't be in history anyway. + system: ['text'] +}; +const VALID_PREVIOUS_CONTENT_ROLES = { + user: ['model'], + function: ['model'], + model: ['user', 'function'], + // System instructions shouldn't be in history. + system: [] +}; +function validateChatHistory(history) { + let prevContent = null; + for (const currContent of history) { + const { role, parts } = currContent; + if (!prevContent && role !== 'user') { + throw new AIError(AIErrorCode.INVALID_CONTENT, `First Content should be with role 'user', got ${role}`); + } + if (!POSSIBLE_ROLES.includes(role)) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(POSSIBLE_ROLES)}`); + } + if (!Array.isArray(parts)) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Content should have 'parts' property with an array of Parts`); + } + if (parts.length === 0) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Each Content should have at least one part`); + } + const countFields = { + text: 0, + inlineData: 0, + functionCall: 0, + functionResponse: 0, + thought: 0, + thoughtSignature: 0, + executableCode: 0, + codeExecutionResult: 0 + }; + for (const part of parts) { + for (const key of VALID_PART_FIELDS) { + if (key in part) { + countFields[key] += 1; + } + } + } + const validParts = VALID_PARTS_PER_ROLE[role]; + for (const key of VALID_PART_FIELDS) { + if (!validParts.includes(key) && countFields[key] > 0) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't contain '${key}' part`); + } + } + if (prevContent) { + const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role]; + if (!validPreviousContentRoles.includes(prevContent.role)) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't follow '${prevContent.role}'. Valid previous roles: ${JSON.stringify(VALID_PREVIOUS_CONTENT_ROLES)}`); + } + } + prevContent = currContent; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Do not log a message for this error. + */ +const SILENT_ERROR = 'SILENT_ERROR'; +/** + * ChatSession class that enables sending chat messages and stores + * history of sent and received messages so far. + * + * @public + */ +class ChatSession { + constructor(apiSettings, model, chromeAdapter, params, requestOptions) { + this.model = model; + this.chromeAdapter = chromeAdapter; + this.params = params; + this.requestOptions = requestOptions; + this._history = []; + this._sendPromise = Promise.resolve(); + this._apiSettings = apiSettings; + if (params?.history) { + validateChatHistory(params.history); + this._history = params.history; + } + } + /** + * Gets the chat history so far. Blocked prompts are not added to history. + * Neither blocked candidates nor the prompts that generated them are added + * to history. + */ + async getHistory() { + await this._sendPromise; + return this._history; + } + /** + * Sends a chat message and receives a non-streaming + * {@link GenerateContentResult} + */ + async sendMessage(request) { + await this._sendPromise; + const newContent = formatNewContent(request); + const generateContentRequest = { + safetySettings: this.params?.safetySettings, + generationConfig: this.params?.generationConfig, + tools: this.params?.tools, + toolConfig: this.params?.toolConfig, + systemInstruction: this.params?.systemInstruction, + contents: [...this._history, newContent] + }; + let finalResult = {}; + // Add onto the chain. + this._sendPromise = this._sendPromise + .then(() => generateContent(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter, this.requestOptions)) + .then(result => { + if (result.response.candidates && + result.response.candidates.length > 0) { + this._history.push(newContent); + const responseContent = { + parts: result.response.candidates?.[0].content.parts || [], + // Response seems to come back without a role set. + role: result.response.candidates?.[0].content.role || 'model' + }; + this._history.push(responseContent); + } + else { + const blockErrorMessage = formatBlockErrorMessage(result.response); + if (blockErrorMessage) { + logger.warn(`sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`); + } + } + finalResult = result; + }); + await this._sendPromise; + return finalResult; + } + /** + * Sends a chat message and receives the response as a + * {@link GenerateContentStreamResult} containing an iterable stream + * and a response promise. + */ + async sendMessageStream(request) { + await this._sendPromise; + const newContent = formatNewContent(request); + const generateContentRequest = { + safetySettings: this.params?.safetySettings, + generationConfig: this.params?.generationConfig, + tools: this.params?.tools, + toolConfig: this.params?.toolConfig, + systemInstruction: this.params?.systemInstruction, + contents: [...this._history, newContent] + }; + const streamPromise = generateContentStream(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter, this.requestOptions); + // Add onto the chain. + this._sendPromise = this._sendPromise + .then(() => streamPromise) + // This must be handled to avoid unhandled rejection, but jump + // to the final catch block with a label to not log this error. + .catch(_ignored => { + throw new Error(SILENT_ERROR); + }) + .then(streamResult => streamResult.response) + .then(response => { + if (response.candidates && response.candidates.length > 0) { + this._history.push(newContent); + const responseContent = { ...response.candidates[0].content }; + // Response seems to come back without a role set. + if (!responseContent.role) { + responseContent.role = 'model'; + } + this._history.push(responseContent); + } + else { + const blockErrorMessage = formatBlockErrorMessage(response); + if (blockErrorMessage) { + logger.warn(`sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`); + } + } + }) + .catch(e => { + // Errors in streamPromise are already catchable by the user as + // streamPromise is returned. + // Avoid duplicating the error message in logs. + if (e.message !== SILENT_ERROR) { + // Users do not have access to _sendPromise to catch errors + // downstream from streamPromise, so they should not throw. + logger.error(e); + } + }); + return streamPromise; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +async function countTokensOnCloud(apiSettings, model, params, requestOptions) { + let body = ''; + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + const mappedParams = mapCountTokensRequest(params, model); + body = JSON.stringify(mappedParams); + } + else { + body = JSON.stringify(params); + } + const response = await makeRequest(model, Task.COUNT_TOKENS, apiSettings, false, body, requestOptions); + return response.json(); +} +async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) { + if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.'); + } + return countTokensOnCloud(apiSettings, model, params, requestOptions); +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Class for generative model APIs. + * @public + */ +class GenerativeModel extends AIModel { + constructor(ai, modelParams, requestOptions, chromeAdapter) { + super(ai, modelParams.model); + this.chromeAdapter = chromeAdapter; + this.generationConfig = modelParams.generationConfig || {}; + this.safetySettings = modelParams.safetySettings || []; + this.tools = modelParams.tools; + this.toolConfig = modelParams.toolConfig; + this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction); + this.requestOptions = requestOptions || {}; + } + /** + * Makes a single non-streaming call to the model + * and returns an object containing a single {@link GenerateContentResponse}. + */ + async generateContent(request) { + const formattedParams = formatGenerateContentInput(request); + return generateContent(this._apiSettings, this.model, { + generationConfig: this.generationConfig, + safetySettings: this.safetySettings, + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + ...formattedParams + }, this.chromeAdapter, this.requestOptions); + } + /** + * Makes a single streaming call to the model + * and returns an object containing an iterable stream that iterates + * over all chunks in the streaming response as well as + * a promise that returns the final aggregated response. + */ + async generateContentStream(request) { + const formattedParams = formatGenerateContentInput(request); + return generateContentStream(this._apiSettings, this.model, { + generationConfig: this.generationConfig, + safetySettings: this.safetySettings, + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + ...formattedParams + }, this.chromeAdapter, this.requestOptions); + } + /** + * Gets a new {@link ChatSession} instance which can be used for + * multi-turn chats. + */ + startChat(startChatParams) { + return new ChatSession(this._apiSettings, this.model, this.chromeAdapter, { + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + generationConfig: this.generationConfig, + safetySettings: this.safetySettings, + /** + * Overrides params inherited from GenerativeModel with those explicitly set in the + * StartChatParams. For example, if startChatParams.generationConfig is set, it'll override + * this.generationConfig. + */ + ...startChatParams + }, this.requestOptions); + } + /** + * Counts the tokens in the provided request. + */ + async countTokens(request) { + const formattedParams = formatGenerateContentInput(request); + return countTokens(this._apiSettings, this.model, formattedParams, this.chromeAdapter); + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Represents an active, real-time, bidirectional conversation with the model. + * + * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}. + * + * @beta + */ +class LiveSession { + /** + * @internal + */ + constructor(webSocketHandler, serverMessages) { + this.webSocketHandler = webSocketHandler; + this.serverMessages = serverMessages; + /** + * Indicates whether this Live session is closed. + * + * @beta + */ + this.isClosed = false; + /** + * Indicates whether this Live session is being controlled by an `AudioConversationController`. + * + * @beta + */ + this.inConversation = false; + } + /** + * Sends content to the server. + * + * @param request - The message to send to the model. + * @param turnComplete - Indicates if the turn is complete. Defaults to false. + * @throws If this session has been closed. + * + * @beta + */ + async send(request, turnComplete = true) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const newContent = formatNewContent(request); + const message = { + clientContent: { + turns: [newContent], + turnComplete + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends text to the server in realtime. + * + * @example + * ```javascript + * liveSession.sendTextRealtime("Hello, how are you?"); + * ``` + * + * @param text - The text data to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendTextRealtime(text) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + realtimeInput: { + text + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends audio data to the server in realtime. + * + * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz + * little-endian. + * + * @example + * ```javascript + * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian. + * const blob = { mimeType: "audio/pcm", data: pcmData }; + * liveSession.sendAudioRealtime(blob); + * ``` + * + * @param blob - The base64-encoded PCM data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + async sendAudioRealtime(blob) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + realtimeInput: { + audio: blob + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends video data to the server in realtime. + * + * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It + * is recommended to set `mimeType` to `image/jpeg`. + * + * @example + * ```javascript + * // const videoFrame = ... base64-encoded JPEG data + * const blob = { mimeType: "image/jpeg", data: videoFrame }; + * liveSession.sendVideoRealtime(blob); + * ``` + * @param blob - The base64-encoded video data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + async sendVideoRealtime(blob) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + realtimeInput: { + video: blob + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends function responses to the server. + * + * @param functionResponses - The function responses to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendFunctionResponses(functionResponses) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + toolResponse: { + functionResponses + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Yields messages received from the server. + * This can only be used by one consumer at a time. + * + * @returns An `AsyncGenerator` that yields server messages as they arrive. + * @throws If the session is already closed, or if we receive a response that we don't support. + * + * @beta + */ + async *receive() { + if (this.isClosed) { + throw new AIError(AIErrorCode.SESSION_CLOSED, 'Cannot read from a Live session that is closed. Try starting a new Live session.'); + } + for await (const message of this.serverMessages) { + if (message && typeof message === 'object') { + if (LiveResponseType.SERVER_CONTENT in message) { + yield { + type: 'serverContent', + ...message + .serverContent + }; + } + else if (LiveResponseType.TOOL_CALL in message) { + yield { + type: 'toolCall', + ...message + .toolCall + }; + } + else if (LiveResponseType.TOOL_CALL_CANCELLATION in message) { + yield { + type: 'toolCallCancellation', + ...message.toolCallCancellation + }; + } + else { + logger.warn(`Received an unknown message type from the server: ${JSON.stringify(message)}`); + } + } + else { + logger.warn(`Received an invalid message from the server: ${JSON.stringify(message)}`); + } + } + } + /** + * Closes this session. + * All methods on this session will throw an error once this resolves. + * + * @beta + */ + async close() { + if (!this.isClosed) { + this.isClosed = true; + await this.webSocketHandler.close(1000, 'Client closed session.'); + } + } + /** + * Sends realtime input to the server. + * + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * @param mediaChunks - The media chunks to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendMediaChunks(mediaChunks) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + // The backend does not support sending more than one mediaChunk in one message. + // Work around this limitation by sending mediaChunks in separate messages. + mediaChunks.forEach(mediaChunk => { + const message = { + realtimeInput: { mediaChunks: [mediaChunk] } + }; + this.webSocketHandler.send(JSON.stringify(message)); + }); + } + /** + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * Sends a stream of {@link GenerativeContentBlob}. + * + * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendMediaStream(mediaChunkStream) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const reader = mediaChunkStream.getReader(); + while (true) { + try { + const { done, value } = await reader.read(); + if (done) { + break; + } + else if (!value) { + throw new Error('Missing chunk in reader, but reader is not done.'); + } + await this.sendMediaChunks([value]); + } + catch (e) { + // Re-throw any errors that occur during stream consumption or sending. + const message = e instanceof Error ? e.message : 'Error processing media stream.'; + throw new AIError(AIErrorCode.REQUEST_ERROR, message); + } + } + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal + * interactions with Gemini. + * + * This class should only be instantiated with {@link getLiveGenerativeModel}. + * + * @beta + */ +class LiveGenerativeModel extends AIModel { + /** + * @internal + */ + constructor(ai, modelParams, + /** + * @internal + */ + _webSocketHandler) { + super(ai, modelParams.model); + this._webSocketHandler = _webSocketHandler; + this.generationConfig = modelParams.generationConfig || {}; + this.tools = modelParams.tools; + this.toolConfig = modelParams.toolConfig; + this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction); + } + /** + * Starts a {@link LiveSession}. + * + * @returns A {@link LiveSession}. + * @throws If the connection failed to be established with the server. + * + * @beta + */ + async connect() { + const url = new WebSocketUrl(this._apiSettings); + await this._webSocketHandler.connect(url.toString()); + let fullModelPath; + if (this._apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + fullModelPath = `projects/${this._apiSettings.project}/${this.model}`; + } + else { + fullModelPath = `projects/${this._apiSettings.project}/locations/${this._apiSettings.location}/${this.model}`; + } + // inputAudioTranscription and outputAudioTranscription are on the generation config in the public API, + // but the backend expects them to be in the `setup` message. + const { inputAudioTranscription, outputAudioTranscription, ...generationConfig } = this.generationConfig; + const setupMessage = { + setup: { + model: fullModelPath, + generationConfig, + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + inputAudioTranscription, + outputAudioTranscription + } + }; + try { + // Begin listening for server messages, and begin the handshake by sending the 'setupMessage' + const serverMessages = this._webSocketHandler.listen(); + this._webSocketHandler.send(JSON.stringify(setupMessage)); + // Verify we received the handshake response 'setupComplete' + const firstMessage = (await serverMessages.next()).value; + if (!firstMessage || + !(typeof firstMessage === 'object') || + !('setupComplete' in firstMessage)) { + await this._webSocketHandler.close(1011, 'Handshake failure'); + throw new AIError(AIErrorCode.RESPONSE_ERROR, 'Server connection handshake failed. The server did not respond with a setupComplete message.'); + } + return new LiveSession(this._webSocketHandler, serverMessages); + } + catch (e) { + // Ensure connection is closed on any setup error + await this._webSocketHandler.close(); + throw e; + } + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Class for Imagen model APIs. + * + * This class provides methods for generating images using the Imagen model. + * + * @example + * ```javascript + * const imagen = new ImagenModel( + * ai, + * { + * model: 'imagen-3.0-generate-002' + * } + * ); + * + * const response = await imagen.generateImages('A photo of a cat'); + * if (response.images.length > 0) { + * console.log(response.images[0].bytesBase64Encoded); + * } + * ``` + * + * @public + */ +class ImagenModel extends AIModel { + /** + * Constructs a new instance of the {@link ImagenModel} class. + * + * @param ai - an {@link AI} instance. + * @param modelParams - Parameters to use when making requests to Imagen. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + */ + constructor(ai, modelParams, requestOptions) { + const { model, generationConfig, safetySettings } = modelParams; + super(ai, model); + this.requestOptions = requestOptions; + this.generationConfig = generationConfig; + this.safetySettings = safetySettings; + } + /** + * Generates images using the Imagen model and returns them as + * base64-encoded strings. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the generated images. + * + * @throws If the request to generate images fails. This happens if the + * prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + * + * @public + */ + async generateImages(prompt) { + const body = createPredictRequestBody(prompt, { + ...this.generationConfig, + ...this.safetySettings + }); + const response = await makeRequest(this.model, Task.PREDICT, this._apiSettings, + /* stream */ false, JSON.stringify(body), this.requestOptions); + return handlePredictResponse(response); + } + /** + * Generates images to Cloud Storage for Firebase using the Imagen model. + * + * @internal This method is temporarily internal. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket. + * This should be a directory. For example, `gs://my-bucket/my-directory/`. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the URLs of the generated images. + * + * @throws If the request fails to generate images fails. This happens if + * the prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + */ + async generateImagesGCS(prompt, gcsURI) { + const body = createPredictRequestBody(prompt, { + gcsURI, + ...this.generationConfig, + ...this.safetySettings + }); + const response = await makeRequest(this.model, Task.PREDICT, this._apiSettings, + /* stream */ false, JSON.stringify(body), this.requestOptions); + return handlePredictResponse(response); + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22. + * + * @internal + */ +class WebSocketHandlerImpl { + constructor() { + if (typeof WebSocket === 'undefined') { + throw new AIError(AIErrorCode.UNSUPPORTED, 'The WebSocket API is not available in this environment. ' + + 'The "Live" feature is not supported here. It is supported in ' + + 'modern browser windows, Web Workers with WebSocket support, and Node >= 22.'); + } + } + connect(url) { + return new Promise((resolve, reject) => { + this.ws = new WebSocket(url); + this.ws.binaryType = 'blob'; // Only important to set in Node + this.ws.addEventListener('open', () => resolve(), { once: true }); + this.ws.addEventListener('error', () => reject(new AIError(AIErrorCode.FETCH_ERROR, `Error event raised on WebSocket`)), { once: true }); + this.ws.addEventListener('close', (closeEvent) => { + if (closeEvent.reason) { + logger.warn(`WebSocket connection closed by server. Reason: '${closeEvent.reason}'`); + } + }); + }); + } + send(data) { + if (!this.ws || this.ws.readyState !== WebSocket.OPEN) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not open.'); + } + this.ws.send(data); + } + async *listen() { + if (!this.ws) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not connected.'); + } + const messageQueue = []; + const errorQueue = []; + let resolvePromise = null; + let isClosed = false; + const messageListener = async (event) => { + let data; + if (event.data instanceof Blob) { + data = await event.data.text(); + } + else if (typeof event.data === 'string') { + data = event.data; + } + else { + errorQueue.push(new AIError(AIErrorCode.PARSE_FAILED, `Failed to parse WebSocket response. Expected data to be a Blob or string, but was ${typeof event.data}.`)); + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + return; + } + try { + const obj = JSON.parse(data); + messageQueue.push(obj); + } + catch (e) { + const err = e; + errorQueue.push(new AIError(AIErrorCode.PARSE_FAILED, `Error parsing WebSocket message to JSON: ${err.message}`)); + } + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + }; + const errorListener = () => { + errorQueue.push(new AIError(AIErrorCode.FETCH_ERROR, 'WebSocket connection error.')); + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + }; + const closeListener = (event) => { + if (event.reason) { + logger.warn(`WebSocket connection closed by the server with reason: ${event.reason}`); + } + isClosed = true; + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + // Clean up listeners to prevent memory leaks + this.ws?.removeEventListener('message', messageListener); + this.ws?.removeEventListener('close', closeListener); + this.ws?.removeEventListener('error', errorListener); + }; + this.ws.addEventListener('message', messageListener); + this.ws.addEventListener('close', closeListener); + this.ws.addEventListener('error', errorListener); + while (!isClosed) { + if (errorQueue.length > 0) { + const error = errorQueue.shift(); + throw error; + } + if (messageQueue.length > 0) { + yield messageQueue.shift(); + } + else { + await new Promise(resolve => { + resolvePromise = resolve; + }); + } + } + // If the loop terminated because isClosed is true, check for any final errors + if (errorQueue.length > 0) { + const error = errorQueue.shift(); + throw error; + } + } + close(code, reason) { + return new Promise(resolve => { + if (!this.ws) { + return resolve(); + } + this.ws.addEventListener('close', () => resolve(), { once: true }); + // Calling 'close' during these states results in an error. + if (this.ws.readyState === WebSocket.CLOSED || + this.ws.readyState === WebSocket.CONNECTING) { + return resolve(); + } + if (this.ws.readyState !== WebSocket.CLOSING) { + this.ws.close(code, reason); + } + }); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Parent class encompassing all Schema types, with static methods that + * allow building specific Schema types. This class can be converted with + * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints. + * (This string conversion is automatically done when calling SDK methods.) + * @public + */ +class Schema { + constructor(schemaParams) { + // TODO(dlarocque): Enforce this with union types + if (!schemaParams.type && !schemaParams.anyOf) { + throw new AIError(AIErrorCode.INVALID_SCHEMA, "A schema must have either a 'type' or an 'anyOf' array of sub-schemas."); + } + // eslint-disable-next-line guard-for-in + for (const paramKey in schemaParams) { + this[paramKey] = schemaParams[paramKey]; + } + // Ensure these are explicitly set to avoid TS errors. + this.type = schemaParams.type; + this.format = schemaParams.hasOwnProperty('format') + ? schemaParams.format + : undefined; + this.nullable = schemaParams.hasOwnProperty('nullable') + ? !!schemaParams.nullable + : false; + } + /** + * Defines how this Schema should be serialized as JSON. + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior + * @internal + */ + toJSON() { + const obj = { + type: this.type + }; + for (const prop in this) { + if (this.hasOwnProperty(prop) && this[prop] !== undefined) { + if (prop !== 'required' || this.type === SchemaType.OBJECT) { + obj[prop] = this[prop]; + } + } + } + return obj; + } + static array(arrayParams) { + return new ArraySchema(arrayParams, arrayParams.items); + } + static object(objectParams) { + return new ObjectSchema(objectParams, objectParams.properties, objectParams.optionalProperties); + } + // eslint-disable-next-line id-blacklist + static string(stringParams) { + return new StringSchema(stringParams); + } + static enumString(stringParams) { + return new StringSchema(stringParams, stringParams.enum); + } + static integer(integerParams) { + return new IntegerSchema(integerParams); + } + // eslint-disable-next-line id-blacklist + static number(numberParams) { + return new NumberSchema(numberParams); + } + // eslint-disable-next-line id-blacklist + static boolean(booleanParams) { + return new BooleanSchema(booleanParams); + } + static anyOf(anyOfParams) { + return new AnyOfSchema(anyOfParams); + } +} +/** + * Schema class for "integer" types. + * @public + */ +class IntegerSchema extends Schema { + constructor(schemaParams) { + super({ + type: SchemaType.INTEGER, + ...schemaParams + }); + } +} +/** + * Schema class for "number" types. + * @public + */ +class NumberSchema extends Schema { + constructor(schemaParams) { + super({ + type: SchemaType.NUMBER, + ...schemaParams + }); + } +} +/** + * Schema class for "boolean" types. + * @public + */ +class BooleanSchema extends Schema { + constructor(schemaParams) { + super({ + type: SchemaType.BOOLEAN, + ...schemaParams + }); + } +} +/** + * Schema class for "string" types. Can be used with or without + * enum values. + * @public + */ +class StringSchema extends Schema { + constructor(schemaParams, enumValues) { + super({ + type: SchemaType.STRING, + ...schemaParams + }); + this.enum = enumValues; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + if (this.enum) { + obj['enum'] = this.enum; + } + return obj; + } +} +/** + * Schema class for "array" types. + * The `items` param should refer to the type of item that can be a member + * of the array. + * @public + */ +class ArraySchema extends Schema { + constructor(schemaParams, items) { + super({ + type: SchemaType.ARRAY, + ...schemaParams + }); + this.items = items; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + obj.items = this.items.toJSON(); + return obj; + } +} +/** + * Schema class for "object" types. + * The `properties` param must be a map of `Schema` objects. + * @public + */ +class ObjectSchema extends Schema { + constructor(schemaParams, properties, optionalProperties = []) { + super({ + type: SchemaType.OBJECT, + ...schemaParams + }); + this.properties = properties; + this.optionalProperties = optionalProperties; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + obj.properties = { ...this.properties }; + const required = []; + if (this.optionalProperties) { + for (const propertyKey of this.optionalProperties) { + if (!this.properties.hasOwnProperty(propertyKey)) { + throw new AIError(AIErrorCode.INVALID_SCHEMA, `Property "${propertyKey}" specified in "optionalProperties" does not exist.`); + } + } + } + for (const propertyKey in this.properties) { + if (this.properties.hasOwnProperty(propertyKey)) { + obj.properties[propertyKey] = this.properties[propertyKey].toJSON(); + if (!this.optionalProperties.includes(propertyKey)) { + required.push(propertyKey); + } + } + } + if (required.length > 0) { + obj.required = required; + } + delete obj.optionalProperties; + return obj; + } +} +/** + * Schema class representing a value that can conform to any of the provided sub-schemas. This is + * useful when a field can accept multiple distinct types or structures. + * @public + */ +class AnyOfSchema extends Schema { + constructor(schemaParams) { + if (schemaParams.anyOf.length === 0) { + throw new AIError(AIErrorCode.INVALID_SCHEMA, "The 'anyOf' array must not be empty."); + } + super({ + ...schemaParams, + type: undefined // anyOf schemas do not have an explicit type + }); + this.anyOf = schemaParams.anyOf; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + // Ensure the 'anyOf' property contains serialized SchemaRequest objects. + if (this.anyOf && Array.isArray(this.anyOf)) { + obj.anyOf = this.anyOf.map(s => s.toJSON()); + } + return obj; + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Defines the image format for images generated by Imagen. + * + * Use this class to specify the desired format (JPEG or PNG) and compression quality + * for images generated by Imagen. This is typically included as part of + * {@link ImagenModelParams}. + * + * @example + * ```javascript + * const imagenModelParams = { + * // ... other ImagenModelParams + * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75. + * } + * ``` + * + * @public + */ +class ImagenImageFormat { + constructor() { + this.mimeType = 'image/png'; + } + /** + * Creates an {@link ImagenImageFormat} for a JPEG image. + * + * @param compressionQuality - The level of compression (a number between 0 and 100). + * @returns An {@link ImagenImageFormat} object for a JPEG image. + * + * @public + */ + static jpeg(compressionQuality) { + if (compressionQuality && + (compressionQuality < 0 || compressionQuality > 100)) { + logger.warn(`Invalid JPEG compression quality of ${compressionQuality} specified; the supported range is [0, 100].`); + } + return { mimeType: 'image/jpeg', compressionQuality }; + } + /** + * Creates an {@link ImagenImageFormat} for a PNG image. + * + * @returns An {@link ImagenImageFormat} object for a PNG image. + * + * @public + */ + static png() { + return { mimeType: 'image/png' }; + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const SERVER_INPUT_SAMPLE_RATE = 16000; +const SERVER_OUTPUT_SAMPLE_RATE = 24000; +const AUDIO_PROCESSOR_NAME = 'audio-processor'; +/** + * The JS for an `AudioWorkletProcessor`. + * This processor is responsible for taking raw audio from the microphone, + * converting it to the required 16-bit 16kHz PCM, and posting it back to the main thread. + * + * See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor + * + * It is defined as a string here so that it can be converted into a `Blob` + * and loaded at runtime. + */ +const audioProcessorWorkletString = ` + class AudioProcessor extends AudioWorkletProcessor { + constructor(options) { + super(); + this.targetSampleRate = options.processorOptions.targetSampleRate; + // 'sampleRate' is a global variable available inside the AudioWorkletGlobalScope, + // representing the native sample rate of the AudioContext. + this.inputSampleRate = sampleRate; + } + + /** + * This method is called by the browser's audio engine for each block of audio data. + * Input is a single input, with a single channel (input[0][0]). + */ + process(inputs) { + const input = inputs[0]; + if (input && input.length > 0 && input[0].length > 0) { + const pcmData = input[0]; // Float32Array of raw audio samples. + + // Simple linear interpolation for resampling. + const resampled = new Float32Array(Math.round(pcmData.length * this.targetSampleRate / this.inputSampleRate)); + const ratio = pcmData.length / resampled.length; + for (let i = 0; i < resampled.length; i++) { + resampled[i] = pcmData[Math.floor(i * ratio)]; + } + + // Convert Float32 (-1, 1) samples to Int16 (-32768, 32767) + const resampledInt16 = new Int16Array(resampled.length); + for (let i = 0; i < resampled.length; i++) { + const sample = Math.max(-1, Math.min(1, resampled[i])); + if (sample < 0) { + resampledInt16[i] = sample * 32768; + } else { + resampledInt16[i] = sample * 32767; + } + } + + this.port.postMessage(resampledInt16); + } + // Return true to keep the processor alive and processing the next audio block. + return true; + } + } + + // Register the processor with a name that can be used to instantiate it from the main thread. + registerProcessor('${AUDIO_PROCESSOR_NAME}', AudioProcessor); +`; +/** + * Encapsulates the core logic of an audio conversation. + * + * @internal + */ +class AudioConversationRunner { + constructor(liveSession, options, deps) { + this.liveSession = liveSession; + this.options = options; + this.deps = deps; + /** A flag to indicate if the conversation has been stopped. */ + this.isStopped = false; + /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */ + this.stopDeferred = new Deferred(); + /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */ + this.playbackQueue = []; + /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */ + this.scheduledSources = []; + /** A high-precision timeline pointer for scheduling gapless audio playback. */ + this.nextStartTime = 0; + /** A mutex to prevent the playback processing loop from running multiple times concurrently. */ + this.isPlaybackLoopRunning = false; + this.liveSession.inConversation = true; + // Start listening for messages from the server. + this.receiveLoopPromise = this.runReceiveLoop().finally(() => this.cleanup()); + // Set up the handler for receiving processed audio data from the worklet. + // Message data has been resampled to 16kHz 16-bit PCM. + this.deps.workletNode.port.onmessage = event => { + if (this.isStopped) { + return; + } + const pcm16 = event.data; + const base64 = btoa(String.fromCharCode.apply(null, Array.from(new Uint8Array(pcm16.buffer)))); + const chunk = { + mimeType: 'audio/pcm', + data: base64 + }; + void this.liveSession.sendAudioRealtime(chunk); + }; + } + /** + * Stops the conversation and unblocks the main receive loop. + */ + async stop() { + if (this.isStopped) { + return; + } + this.isStopped = true; + this.stopDeferred.resolve(); // Unblock the receive loop + await this.receiveLoopPromise; // Wait for the loop and cleanup to finish + } + /** + * Cleans up all audio resources (nodes, stream tracks, context) and marks the + * session as no longer in a conversation. + */ + cleanup() { + this.interruptPlayback(); // Ensure all audio is stopped on final cleanup. + this.deps.workletNode.port.onmessage = null; + this.deps.workletNode.disconnect(); + this.deps.sourceNode.disconnect(); + this.deps.mediaStream.getTracks().forEach(track => track.stop()); + if (this.deps.audioContext.state !== 'closed') { + void this.deps.audioContext.close(); + } + this.liveSession.inConversation = false; + } + /** + * Adds audio data to the queue and ensures the playback loop is running. + */ + enqueueAndPlay(audioData) { + this.playbackQueue.push(audioData); + // Will no-op if it's already running. + void this.processPlaybackQueue(); + } + /** + * Stops all current and pending audio playback and clears the queue. This is + * called when the server indicates the model's speech was interrupted with + * `LiveServerContent.modelTurn.interrupted`. + */ + interruptPlayback() { + // Stop all sources that have been scheduled. The onended event will fire for each, + // which will clean up the scheduledSources array. + [...this.scheduledSources].forEach(source => source.stop(0)); + // Clear the internal buffer of unprocessed audio chunks. + this.playbackQueue.length = 0; + // Reset the playback clock to start fresh. + this.nextStartTime = this.deps.audioContext.currentTime; + } + /** + * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence. + */ + async processPlaybackQueue() { + if (this.isPlaybackLoopRunning) { + return; + } + this.isPlaybackLoopRunning = true; + while (this.playbackQueue.length > 0 && !this.isStopped) { + const pcmRawBuffer = this.playbackQueue.shift(); + try { + const pcm16 = new Int16Array(pcmRawBuffer); + const frameCount = pcm16.length; + const audioBuffer = this.deps.audioContext.createBuffer(1, frameCount, SERVER_OUTPUT_SAMPLE_RATE); + // Convert 16-bit PCM to 32-bit PCM, required by the Web Audio API. + const channelData = audioBuffer.getChannelData(0); + for (let i = 0; i < frameCount; i++) { + channelData[i] = pcm16[i] / 32768; // Normalize to Float32 range [-1.0, 1.0] + } + const source = this.deps.audioContext.createBufferSource(); + source.buffer = audioBuffer; + source.connect(this.deps.audioContext.destination); + // Track the source and set up a handler to remove it from tracking when it finishes. + this.scheduledSources.push(source); + source.onended = () => { + this.scheduledSources = this.scheduledSources.filter(s => s !== source); + }; + // To prevent gaps, schedule the next chunk to start either now (if we're catching up) + // or exactly when the previous chunk is scheduled to end. + this.nextStartTime = Math.max(this.deps.audioContext.currentTime, this.nextStartTime); + source.start(this.nextStartTime); + // Update the schedule for the *next* chunk. + this.nextStartTime += audioBuffer.duration; + } + catch (e) { + logger.error('Error playing audio:', e); + } + } + this.isPlaybackLoopRunning = false; + } + /** + * The main loop that listens for and processes messages from the server. + */ + async runReceiveLoop() { + const messageGenerator = this.liveSession.receive(); + while (!this.isStopped) { + const result = await Promise.race([ + messageGenerator.next(), + this.stopDeferred.promise + ]); + if (this.isStopped || !result || result.done) { + break; + } + const message = result.value; + if (message.type === 'serverContent') { + const serverContent = message; + if (serverContent.interrupted) { + this.interruptPlayback(); + } + const audioPart = serverContent.modelTurn?.parts.find(part => part.inlineData?.mimeType.startsWith('audio/')); + if (audioPart?.inlineData) { + const audioData = Uint8Array.from(atob(audioPart.inlineData.data), c => c.charCodeAt(0)).buffer; + this.enqueueAndPlay(audioData); + } + } + else if (message.type === 'toolCall') { + if (!this.options.functionCallingHandler) { + logger.warn('Received tool call message, but StartAudioConversationOptions.functionCallingHandler is undefined. Ignoring tool call.'); + } + else { + try { + const functionResponse = await this.options.functionCallingHandler(message.functionCalls); + if (!this.isStopped) { + void this.liveSession.sendFunctionResponses([functionResponse]); + } + } + catch (e) { + throw new AIError(AIErrorCode.ERROR, `Function calling handler failed: ${e.message}`); + } + } + } + } + } +} +/** + * Starts a real-time, bidirectional audio conversation with the model. This helper function manages + * the complexities of microphone access, audio recording, playback, and interruptions. + * + * @remarks Important: This function must be called in response to a user gesture + * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}. + * + * @example + * ```javascript + * const liveSession = await model.connect(); + * let conversationController; + * + * // This function must be called from within a click handler. + * async function startConversation() { + * try { + * conversationController = await startAudioConversation(liveSession); + * } catch (e) { + * // Handle AI-specific errors + * if (e instanceof AIError) { + * console.error("AI Error:", e.message); + * } + * // Handle microphone permission and hardware errors + * else if (e instanceof DOMException) { + * console.error("Microphone Error:", e.message); + * } + * // Handle other unexpected errors + * else { + * console.error("An unexpected error occurred:", e); + * } + * } + * } + * + * // Later, to stop the conversation: + * // if (conversationController) { + * // await conversationController.stop(); + * // } + * ``` + * + * @param liveSession - An active {@link LiveSession} instance. + * @param options - Configuration options for the audio conversation. + * @returns A `Promise` that resolves with an {@link AudioConversationController}. + * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`). + * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions. + * + * @beta + */ +async function startAudioConversation(liveSession, options = {}) { + if (liveSession.isClosed) { + throw new AIError(AIErrorCode.SESSION_CLOSED, 'Cannot start audio conversation on a closed LiveSession.'); + } + if (liveSession.inConversation) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'An audio conversation is already in progress for this session.'); + } + // Check for necessary Web API support. + if (typeof AudioWorkletNode === 'undefined' || + typeof AudioContext === 'undefined' || + typeof navigator === 'undefined' || + !navigator.mediaDevices) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'Audio conversation is not supported in this environment. It requires the Web Audio API and AudioWorklet support.'); + } + let audioContext; + try { + // 1. Set up the audio context. This must be in response to a user gesture. + // See: https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy + audioContext = new AudioContext(); + if (audioContext.state === 'suspended') { + await audioContext.resume(); + } + // 2. Prompt for microphone access and get the media stream. + // This can throw a variety of permission or hardware-related errors. + const mediaStream = await navigator.mediaDevices.getUserMedia({ + audio: true + }); + // 3. Load the AudioWorklet processor. + // See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet + const workletBlob = new Blob([audioProcessorWorkletString], { + type: 'application/javascript' + }); + const workletURL = URL.createObjectURL(workletBlob); + await audioContext.audioWorklet.addModule(workletURL); + // 4. Create the audio graph: Microphone -> Source Node -> Worklet Node + const sourceNode = audioContext.createMediaStreamSource(mediaStream); + const workletNode = new AudioWorkletNode(audioContext, AUDIO_PROCESSOR_NAME, { + processorOptions: { targetSampleRate: SERVER_INPUT_SAMPLE_RATE } + }); + sourceNode.connect(workletNode); + // 5. Instantiate and return the runner which manages the conversation. + const runner = new AudioConversationRunner(liveSession, options, { + audioContext, + mediaStream, + sourceNode, + workletNode + }); + return { stop: () => runner.stop() }; + } + catch (e) { + // Ensure the audio context is closed on any setup error. + if (audioContext && audioContext.state !== 'closed') { + void audioContext.close(); + } + // Re-throw specific, known error types directly. The user may want to handle `DOMException` + // errors differently (for example, if permission to access audio device was denied). + if (e instanceof AIError || e instanceof DOMException) { + throw e; + } + // Wrap any other unexpected errors in a standard AIError. + throw new AIError(AIErrorCode.ERROR, `Failed to initialize audio recording: ${e.message}`); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Returns the default {@link AI} instance that is associated with the provided + * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the + * default settings. + * + * @example + * ```javascript + * const ai = getAI(app); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Gemini Developer API (via Google AI). + * const ai = getAI(app, { backend: new GoogleAIBackend() }); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Vertex AI Gemini API. + * const ai = getAI(app, { backend: new VertexAIBackend() }); + * ``` + * + * @param app - The {@link @firebase/app#FirebaseApp} to use. + * @param options - {@link AIOptions} that configure the AI instance. + * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}. + * + * @public + */ +function getAI(app = getApp(), options) { + app = getModularInstance(app); + // Dependencies + const AIProvider = _getProvider(app, AI_TYPE); + const backend = options?.backend ?? new GoogleAIBackend(); + const finalOptions = { + useLimitedUseAppCheckTokens: options?.useLimitedUseAppCheckTokens ?? false + }; + const identifier = encodeInstanceIdentifier(backend); + const aiInstance = AIProvider.getImmediate({ + identifier + }); + aiInstance.options = finalOptions; + return aiInstance; +} +/** + * Returns a {@link GenerativeModel} class with methods for inference + * and other functionality. + * + * @public + */ +function getGenerativeModel(ai, modelParams, requestOptions) { + // Uses the existence of HybridParams.mode to clarify the type of the modelParams input. + const hybridParams = modelParams; + let inCloudParams; + if (hybridParams.mode) { + inCloudParams = hybridParams.inCloudParams || { + model: DEFAULT_HYBRID_IN_CLOUD_MODEL + }; + } + else { + inCloudParams = modelParams; + } + if (!inCloudParams.model) { + throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`); + } + /** + * An AIService registered by index.node.ts will not have a + * chromeAdapterFactory() method. + */ + const chromeAdapter = ai.chromeAdapterFactory?.(hybridParams.mode, typeof window === 'undefined' ? undefined : window, hybridParams.onDeviceParams); + return new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter); +} +/** + * Returns an {@link ImagenModel} class with methods for using Imagen. + * + * Only Imagen 3 models (named `imagen-3.0-*`) are supported. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when making Imagen requests. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @public + */ +function getImagenModel(ai, modelParams, requestOptions) { + if (!modelParams.model) { + throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })`); + } + return new ImagenModel(ai, modelParams, requestOptions); +} +/** + * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication. + * + * The Live API is only supported in modern browser windows and Node >= 22. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when setting up a {@link LiveSession}. + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @beta + */ +function getLiveGenerativeModel(ai, modelParams) { + if (!modelParams.model) { + throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name for getLiveGenerativeModel. Example: getLiveGenerativeModel(ai, { model: 'my-model-name' })`); + } + const webSocketHandler = new WebSocketHandlerImpl(); + return new LiveGenerativeModel(ai, modelParams, webSocketHandler); +} + +/** + * The Firebase AI Web SDK. + * + * @packageDocumentation + */ +function registerAI() { + _registerComponent(new Component(AI_TYPE, factory, "PUBLIC" /* ComponentType.PUBLIC */).setMultipleInstances(true)); + registerVersion(name, version); + // BUILD_TARGET will be replaced by values like esm, cjs, etc during the compilation + registerVersion(name, version, 'esm2020'); +} +registerAI(); + +export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, InferenceSource, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, URLRetrievalStatus, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation }; +//# sourceMappingURL=index.esm.js.map diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/index.esm.js.map b/frontend-old/node_modules/@firebase/ai/dist/esm/index.esm.js.map new file mode 100644 index 0000000..a5485cb --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/index.esm.js.map @@ -0,0 +1 @@ +{"version":3,"file":"index.esm.js","sources":["../../src/constants.ts","../../src/errors.ts","../../src/types/enums.ts","../../src/types/responses.ts","../../src/types/error.ts","../../src/types/schema.ts","../../src/types/imagen/requests.ts","../../src/public-types.ts","../../src/backend.ts","../../src/helpers.ts","../../src/logger.ts","../../src/types/language-model.ts","../../src/methods/chrome-adapter.ts","../../src/service.ts","../../src/factory-browser.ts","../../src/models/ai-model.ts","../../src/requests/request.ts","../../src/requests/response-helpers.ts","../../src/googleai-mappers.ts","../../src/requests/stream-reader.ts","../../src/requests/hybrid-helpers.ts","../../src/methods/generate-content.ts","../../src/requests/request-helpers.ts","../../src/methods/chat-session-helpers.ts","../../src/methods/chat-session.ts","../../src/methods/count-tokens.ts","../../src/models/generative-model.ts","../../src/methods/live-session.ts","../../src/models/live-generative-model.ts","../../src/models/imagen-model.ts","../../src/websocket.ts","../../src/requests/schema-builder.ts","../../src/requests/imagen-image-format.ts","../../src/methods/live-session-helpers.ts","../../src/api.ts","../../src/index.ts"],"sourcesContent":["/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { version } from '../package.json';\n\nexport const AI_TYPE = 'AI';\n\nexport const DEFAULT_LOCATION = 'us-central1';\n\nexport const DEFAULT_DOMAIN = 'firebasevertexai.googleapis.com';\n\nexport const DEFAULT_API_VERSION = 'v1beta';\n\nexport const PACKAGE_VERSION = version;\n\nexport const LANGUAGE_TAG = 'gl-js';\n\nexport const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000;\n\n/**\n * Defines the name of the default in-cloud model to use for hybrid inference.\n */\nexport const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite';\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseError } from '@firebase/util';\nimport { AIErrorCode, CustomErrorData } from './types';\nimport { AI_TYPE } from './constants';\n\n/**\n * Error class for the Firebase AI SDK.\n *\n * @public\n */\nexport class AIError extends FirebaseError {\n /**\n * Constructs a new instance of the `AIError` class.\n *\n * @param code - The error code from {@link (AIErrorCode:type)}.\n * @param message - A human-readable message describing the error.\n * @param customErrorData - Optional error data.\n */\n constructor(\n readonly code: AIErrorCode,\n message: string,\n readonly customErrorData?: CustomErrorData\n ) {\n // Match error format used by FirebaseError from ErrorFactory\n const service = AI_TYPE;\n const fullCode = `${service}/${code}`;\n const fullMessage = `${service}: ${message} (${fullCode})`;\n super(code, fullMessage);\n\n // FirebaseError initializes a stack trace, but it assumes the error is created from the error\n // factory. Since we break this assumption, we set the stack trace to be originating from this\n // constructor.\n // This is only supported in V8.\n if (Error.captureStackTrace) {\n // Allows us to initialize the stack trace without including the constructor itself at the\n // top level of the stack trace.\n Error.captureStackTrace(this, AIError);\n }\n\n // Allows instanceof AIError in ES5/ES6\n // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work\n // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget\n // which we can now use since we no longer target ES5.\n Object.setPrototypeOf(this, AIError.prototype);\n\n // Since Error is an interface, we don't inherit toString and so we define it ourselves.\n this.toString = () => fullMessage;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Role is the producer of the content.\n * @public\n */\nexport type Role = (typeof POSSIBLE_ROLES)[number];\n\n/**\n * Possible roles.\n * @public\n */\nexport const POSSIBLE_ROLES = ['user', 'model', 'function', 'system'] as const;\n\n/**\n * Harm categories that would cause prompts or candidates to be blocked.\n * @public\n */\nexport const HarmCategory = {\n HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH',\n HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT',\n HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT'\n} as const;\n\n/**\n * Harm categories that would cause prompts or candidates to be blocked.\n * @public\n */\nexport type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory];\n\n/**\n * Threshold above which a prompt or candidate will be blocked.\n * @public\n */\nexport const HarmBlockThreshold = {\n /**\n * Content with `NEGLIGIBLE` will be allowed.\n */\n BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE',\n /**\n * Content with `NEGLIGIBLE` and `LOW` will be allowed.\n */\n BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE',\n /**\n * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.\n */\n BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH',\n /**\n * All content will be allowed.\n */\n BLOCK_NONE: 'BLOCK_NONE',\n /**\n * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding\n * to the {@link (HarmCategory:type)} will not be present in the response.\n */\n OFF: 'OFF'\n} as const;\n\n/**\n * Threshold above which a prompt or candidate will be blocked.\n * @public\n */\nexport type HarmBlockThreshold =\n (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold];\n\n/**\n * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).\n *\n * @public\n */\nexport const HarmBlockMethod = {\n /**\n * The harm block method uses both probability and severity scores.\n */\n SEVERITY: 'SEVERITY',\n /**\n * The harm block method uses the probability score.\n */\n PROBABILITY: 'PROBABILITY'\n} as const;\n\n/**\n * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).\n *\n * @public\n */\nexport type HarmBlockMethod =\n (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod];\n\n/**\n * Probability that a prompt or candidate matches a harm category.\n * @public\n */\nexport const HarmProbability = {\n /**\n * Content has a negligible chance of being unsafe.\n */\n NEGLIGIBLE: 'NEGLIGIBLE',\n /**\n * Content has a low chance of being unsafe.\n */\n LOW: 'LOW',\n /**\n * Content has a medium chance of being unsafe.\n */\n MEDIUM: 'MEDIUM',\n /**\n * Content has a high chance of being unsafe.\n */\n HIGH: 'HIGH'\n} as const;\n\n/**\n * Probability that a prompt or candidate matches a harm category.\n * @public\n */\nexport type HarmProbability =\n (typeof HarmProbability)[keyof typeof HarmProbability];\n\n/**\n * Harm severity levels.\n * @public\n */\nexport const HarmSeverity = {\n /**\n * Negligible level of harm severity.\n */\n HARM_SEVERITY_NEGLIGIBLE: 'HARM_SEVERITY_NEGLIGIBLE',\n /**\n * Low level of harm severity.\n */\n HARM_SEVERITY_LOW: 'HARM_SEVERITY_LOW',\n /**\n * Medium level of harm severity.\n */\n HARM_SEVERITY_MEDIUM: 'HARM_SEVERITY_MEDIUM',\n /**\n * High level of harm severity.\n */\n HARM_SEVERITY_HIGH: 'HARM_SEVERITY_HIGH',\n /**\n * Harm severity is not supported.\n *\n * @remarks\n * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.\n */\n HARM_SEVERITY_UNSUPPORTED: 'HARM_SEVERITY_UNSUPPORTED'\n} as const;\n\n/**\n * Harm severity levels.\n * @public\n */\nexport type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity];\n\n/**\n * Reason that a prompt was blocked.\n * @public\n */\nexport const BlockReason = {\n /**\n * Content was blocked by safety settings.\n */\n SAFETY: 'SAFETY',\n /**\n * Content was blocked, but the reason is uncategorized.\n */\n OTHER: 'OTHER',\n /**\n * Content was blocked because it contained terms from the terminology blocklist.\n */\n BLOCKLIST: 'BLOCKLIST',\n /**\n * Content was blocked due to prohibited content.\n */\n PROHIBITED_CONTENT: 'PROHIBITED_CONTENT'\n} as const;\n\n/**\n * Reason that a prompt was blocked.\n * @public\n */\nexport type BlockReason = (typeof BlockReason)[keyof typeof BlockReason];\n\n/**\n * Reason that a candidate finished.\n * @public\n */\nexport const FinishReason = {\n /**\n * Natural stop point of the model or provided stop sequence.\n */\n STOP: 'STOP',\n /**\n * The maximum number of tokens as specified in the request was reached.\n */\n MAX_TOKENS: 'MAX_TOKENS',\n /**\n * The candidate content was flagged for safety reasons.\n */\n SAFETY: 'SAFETY',\n /**\n * The candidate content was flagged for recitation reasons.\n */\n RECITATION: 'RECITATION',\n /**\n * Unknown reason.\n */\n OTHER: 'OTHER',\n /**\n * The candidate content contained forbidden terms.\n */\n BLOCKLIST: 'BLOCKLIST',\n /**\n * The candidate content potentially contained prohibited content.\n */\n PROHIBITED_CONTENT: 'PROHIBITED_CONTENT',\n /**\n * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).\n */\n SPII: 'SPII',\n /**\n * The function call generated by the model was invalid.\n */\n MALFORMED_FUNCTION_CALL: 'MALFORMED_FUNCTION_CALL'\n} as const;\n\n/**\n * Reason that a candidate finished.\n * @public\n */\nexport type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];\n\n/**\n * @public\n */\nexport const FunctionCallingMode = {\n /**\n * Default model behavior; model decides to predict either a function call\n * or a natural language response.\n */\n AUTO: 'AUTO',\n /**\n * Model is constrained to always predicting a function call only.\n * If `allowed_function_names` is set, the predicted function call will be\n * limited to any one of `allowed_function_names`, else the predicted\n * function call will be any one of the provided `function_declarations`.\n */\n ANY: 'ANY',\n /**\n * Model will not predict any function call. Model behavior is same as when\n * not passing any function declarations.\n */\n NONE: 'NONE'\n} as const;\n\n/**\n * @public\n */\nexport type FunctionCallingMode =\n (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode];\n\n/**\n * Content part modality.\n * @public\n */\nexport const Modality = {\n /**\n * Unspecified modality.\n */\n MODALITY_UNSPECIFIED: 'MODALITY_UNSPECIFIED',\n /**\n * Plain text.\n */\n TEXT: 'TEXT',\n /**\n * Image.\n */\n IMAGE: 'IMAGE',\n /**\n * Video.\n */\n VIDEO: 'VIDEO',\n /**\n * Audio.\n */\n AUDIO: 'AUDIO',\n /**\n * Document (for example, PDF).\n */\n DOCUMENT: 'DOCUMENT'\n} as const;\n\n/**\n * Content part modality.\n * @public\n */\nexport type Modality = (typeof Modality)[keyof typeof Modality];\n\n/**\n * Generation modalities to be returned in generation responses.\n *\n * @beta\n */\nexport const ResponseModality = {\n /**\n * Text.\n * @beta\n */\n TEXT: 'TEXT',\n /**\n * Image.\n * @beta\n */\n IMAGE: 'IMAGE',\n /**\n * Audio.\n * @beta\n */\n AUDIO: 'AUDIO'\n} as const;\n\n/**\n * Generation modalities to be returned in generation responses.\n *\n * @beta\n */\nexport type ResponseModality =\n (typeof ResponseModality)[keyof typeof ResponseModality];\n\n/**\n * Determines whether inference happens on-device or in-cloud.\n *\n * @remarks\n * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an\n * on-device model. If on-device inference is not available, the SDK\n * will fall back to using a cloud-hosted model.\n * <br/>\n * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an\n * on-device model. The SDK will not fall back to a cloud-hosted model.\n * If on-device inference is not available, inference methods will throw.\n * <br/>\n * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a\n * cloud-hosted model. The SDK will not fall back to an on-device model.\n * <br/>\n * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a\n * cloud-hosted model. If not available, the SDK will fall back to an\n * on-device model.\n *\n * @beta\n */\nexport const InferenceMode = {\n 'PREFER_ON_DEVICE': 'prefer_on_device',\n 'ONLY_ON_DEVICE': 'only_on_device',\n 'ONLY_IN_CLOUD': 'only_in_cloud',\n 'PREFER_IN_CLOUD': 'prefer_in_cloud'\n} as const;\n\n/**\n * Determines whether inference happens on-device or in-cloud.\n *\n * @beta\n */\nexport type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];\n\n/**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\nexport const InferenceSource = {\n 'ON_DEVICE': 'on_device',\n 'IN_CLOUD': 'in_cloud'\n} as const;\n\n/**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\nexport type InferenceSource =\n (typeof InferenceSource)[keyof typeof InferenceSource];\n\n/**\n * Represents the result of the code execution.\n *\n * @beta\n */\nexport const Outcome = {\n UNSPECIFIED: 'OUTCOME_UNSPECIFIED',\n OK: 'OUTCOME_OK',\n FAILED: 'OUTCOME_FAILED',\n DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'\n};\n\n/**\n * Represents the result of the code execution.\n *\n * @beta\n */\nexport type Outcome = (typeof Outcome)[keyof typeof Outcome];\n\n/**\n * The programming language of the code.\n *\n * @beta\n */\nexport const Language = {\n UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',\n PYTHON: 'PYTHON'\n};\n\n/**\n * The programming language of the code.\n *\n * @beta\n */\nexport type Language = (typeof Language)[keyof typeof Language];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, FunctionCall, InlineDataPart } from './content';\nimport {\n BlockReason,\n FinishReason,\n HarmCategory,\n HarmProbability,\n HarmSeverity,\n InferenceSource,\n Modality\n} from './enums';\n\n/**\n * Result object returned from {@link GenerativeModel.generateContent} call.\n *\n * @public\n */\nexport interface GenerateContentResult {\n response: EnhancedGenerateContentResponse;\n}\n\n/**\n * Result object returned from {@link GenerativeModel.generateContentStream} call.\n * Iterate over `stream` to get chunks as they come in and/or\n * use the `response` promise to get the aggregated response when\n * the stream is done.\n *\n * @public\n */\nexport interface GenerateContentStreamResult {\n stream: AsyncGenerator<EnhancedGenerateContentResponse>;\n response: Promise<EnhancedGenerateContentResponse>;\n}\n\n/**\n * Response object wrapped with helper methods.\n *\n * @public\n */\nexport interface EnhancedGenerateContentResponse\n extends GenerateContentResponse {\n /**\n * Returns the text string from the response, if available.\n * Throws if the prompt or candidate was blocked.\n */\n text: () => string;\n /**\n * Aggregates and returns every {@link InlineDataPart} from the first candidate of\n * {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n */\n inlineDataParts: () => InlineDataPart[] | undefined;\n /**\n * Aggregates and returns every {@link FunctionCall} from the first candidate of\n * {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n */\n functionCalls: () => FunctionCall[] | undefined;\n /**\n * Aggregates and returns every {@link TextPart} with their `thought` property set\n * to `true` from the first candidate of {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n *\n * @remarks\n * Thought summaries provide a brief overview of the model's internal thinking process,\n * offering insight into how it arrived at the final answer. This can be useful for\n * debugging, understanding the model's reasoning, and verifying its accuracy.\n *\n * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is\n * set to `true`.\n */\n thoughtSummary: () => string | undefined;\n /**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\n inferenceSource?: InferenceSource;\n}\n\n/**\n * Individual response from {@link GenerativeModel.generateContent} and\n * {@link GenerativeModel.generateContentStream}.\n * `generateContentStream()` will return one in each chunk until\n * the stream is done.\n * @public\n */\nexport interface GenerateContentResponse {\n candidates?: GenerateContentCandidate[];\n promptFeedback?: PromptFeedback;\n usageMetadata?: UsageMetadata;\n}\n\n/**\n * Usage metadata about a {@link GenerateContentResponse}.\n *\n * @public\n */\nexport interface UsageMetadata {\n promptTokenCount: number;\n candidatesTokenCount: number;\n /**\n * The number of tokens used by the model's internal \"thinking\" process.\n */\n thoughtsTokenCount?: number;\n totalTokenCount: number;\n /**\n * The number of tokens used by tools.\n */\n toolUsePromptTokenCount?: number;\n promptTokensDetails?: ModalityTokenCount[];\n candidatesTokensDetails?: ModalityTokenCount[];\n /**\n * A list of tokens used by tools, broken down by modality.\n */\n toolUsePromptTokensDetails?: ModalityTokenCount[];\n}\n\n/**\n * Represents token counting info for a single modality.\n *\n * @public\n */\nexport interface ModalityTokenCount {\n /** The modality associated with this token count. */\n modality: Modality;\n /** The number of tokens counted. */\n tokenCount: number;\n}\n\n/**\n * If the prompt was blocked, this will be populated with `blockReason` and\n * the relevant `safetyRatings`.\n * @public\n */\nexport interface PromptFeedback {\n blockReason?: BlockReason;\n safetyRatings: SafetyRating[];\n /**\n * A human-readable description of the `blockReason`.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n blockReasonMessage?: string;\n}\n\n/**\n * A candidate returned as part of a {@link GenerateContentResponse}.\n * @public\n */\nexport interface GenerateContentCandidate {\n index: number;\n content: Content;\n finishReason?: FinishReason;\n finishMessage?: string;\n safetyRatings?: SafetyRating[];\n citationMetadata?: CitationMetadata;\n groundingMetadata?: GroundingMetadata;\n urlContextMetadata?: URLContextMetadata;\n}\n\n/**\n * Citation metadata that may be found on a {@link GenerateContentCandidate}.\n * @public\n */\nexport interface CitationMetadata {\n citations: Citation[];\n}\n\n/**\n * A single citation.\n * @public\n */\nexport interface Citation {\n startIndex?: number;\n endIndex?: number;\n uri?: string;\n license?: string;\n /**\n * The title of the cited source, if available.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n title?: string;\n /**\n * The publication date of the cited source, if available.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n publicationDate?: Date;\n}\n\n/**\n * Metadata returned when grounding is enabled.\n *\n * Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}).\n *\n * Important: If using Grounding with Google Search, you are required to comply with the\n * \"Grounding with Google Search\" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}\n * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}\n * section within the Service Specific Terms).\n *\n * @public\n */\nexport interface GroundingMetadata {\n /**\n * Google Search entry point for web searches. This contains an HTML/CSS snippet that must be\n * embedded in an app to display a Google Search entry point for follow-up web searches related to\n * a model's \"Grounded Response\".\n */\n searchEntryPoint?: SearchEntrypoint;\n /**\n * A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content\n * (for example, from a web page). that the model used to ground its response.\n */\n groundingChunks?: GroundingChunk[];\n /**\n * A list of {@link GroundingSupport} objects. Each object details how specific segments of the\n * model's response are supported by the `groundingChunks`.\n */\n groundingSupports?: GroundingSupport[];\n /**\n * A list of web search queries that the model performed to gather the grounding information.\n * These can be used to allow users to explore the search results themselves.\n */\n webSearchQueries?: string[];\n /**\n * @deprecated Use {@link GroundingSupport} instead.\n */\n retrievalQueries?: string[];\n}\n\n/**\n * Google search entry point.\n *\n * @public\n */\nexport interface SearchEntrypoint {\n /**\n * HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid\n * undesired interaction with the rest of the page's CSS.\n *\n * To ensure proper rendering and prevent CSS conflicts, it is recommended\n * to encapsulate this `renderedContent` within a shadow DOM when embedding it\n * into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}.\n *\n * @example\n * ```javascript\n * const container = document.createElement('div');\n * document.body.appendChild(container);\n * container.attachShadow({ mode: 'open' }).innerHTML = renderedContent;\n * ```\n */\n renderedContent?: string;\n}\n\n/**\n * Represents a chunk of retrieved data that supports a claim in the model's response. This is part\n * of the grounding information provided when grounding is enabled.\n *\n * @public\n */\nexport interface GroundingChunk {\n /**\n * Contains details if the grounding chunk is from a web source.\n */\n web?: WebGroundingChunk;\n}\n\n/**\n * A grounding chunk from the web.\n *\n * Important: If using Grounding with Google Search, you are required to comply with the\n * {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for \"Grounding with Google Search\".\n *\n * @public\n */\nexport interface WebGroundingChunk {\n /**\n * The URI of the retrieved web page.\n */\n uri?: string;\n /**\n * The title of the retrieved web page.\n */\n title?: string;\n /**\n * The domain of the original URI from which the content was retrieved.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be\n * `undefined`.\n */\n domain?: string;\n}\n\n/**\n * Provides information about how a specific segment of the model's response is supported by the\n * retrieved grounding chunks.\n *\n * @public\n */\nexport interface GroundingSupport {\n /**\n * Specifies the segment of the model's response content that this grounding support pertains to.\n */\n segment?: Segment;\n /**\n * A list of indices that refer to specific {@link GroundingChunk} objects within the\n * {@link GroundingMetadata.groundingChunks} array. These referenced chunks\n * are the sources that support the claim made in the associated `segment` of the response.\n * For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`,\n * and `groundingChunks[4]` are the retrieved content supporting this part of the response.\n */\n groundingChunkIndices?: number[];\n}\n\n/**\n * Represents a specific segment within a {@link Content} object, often used to\n * pinpoint the exact location of text or data that grounding information refers to.\n *\n * @public\n */\nexport interface Segment {\n /**\n * The zero-based index of the {@link Part} object within the `parts` array\n * of its parent {@link Content} object. This identifies which part of the\n * content the segment belongs to.\n */\n partIndex: number;\n /**\n * The zero-based start index of the segment within the specified `Part`,\n * measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the\n * beginning of the part's content (e.g., `Part.text`).\n */\n startIndex: number;\n /**\n * The zero-based end index of the segment within the specified `Part`,\n * measured in UTF-8 bytes. This offset is exclusive, meaning the character\n * at this index is not included in the segment.\n */\n endIndex: number;\n /**\n * The text corresponding to the segment from the response.\n */\n text: string;\n}\n\n/**\n * Metadata related to {@link URLContextTool}.\n *\n * @beta\n */\nexport interface URLContextMetadata {\n /**\n * List of URL metadata used to provide context to the Gemini model.\n */\n urlMetadata: URLMetadata[];\n}\n\n/**\n * Metadata for a single URL retrieved by the {@link URLContextTool} tool.\n *\n * @beta\n */\nexport interface URLMetadata {\n /**\n * The retrieved URL.\n */\n retrievedUrl?: string;\n /**\n * The status of the URL retrieval.\n */\n urlRetrievalStatus?: URLRetrievalStatus;\n}\n\n/**\n * The status of a URL retrieval.\n *\n * @remarks\n * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.\n * <br/>\n *\n * @beta\n */\nexport const URLRetrievalStatus = {\n /**\n * Unspecified retrieval status.\n */\n URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED',\n /**\n * The URL retrieval was successful.\n */\n URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS',\n /**\n * The URL retrieval failed.\n */\n URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR',\n /**\n * The URL retrieval failed because the content is behind a paywall.\n */\n URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL',\n /**\n * The URL retrieval failed because the content is unsafe.\n */\n URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE'\n};\n\n/**\n * The status of a URL retrieval.\n *\n * @remarks\n * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.\n * <br/>\n *\n * @beta\n */\nexport type URLRetrievalStatus =\n (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus];\n\n/**\n * @public\n */\nexport interface WebAttribution {\n uri: string;\n title: string;\n}\n\n/**\n * @public\n */\nexport interface RetrievedContextAttribution {\n uri: string;\n title: string;\n}\n\n/**\n * Protobuf google.type.Date\n * @public\n */\nexport interface Date {\n year: number;\n month: number;\n day: number;\n}\n\n/**\n * A safety rating associated with a {@link GenerateContentCandidate}\n * @public\n */\nexport interface SafetyRating {\n category: HarmCategory;\n probability: HarmProbability;\n /**\n * The harm severity level.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`.\n */\n severity: HarmSeverity;\n /**\n * The probability score of the harm category.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.\n */\n probabilityScore: number;\n /**\n * The severity score of the harm category.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.\n */\n severityScore: number;\n blocked: boolean;\n}\n\n/**\n * Response from calling {@link GenerativeModel.countTokens}.\n * @public\n */\nexport interface CountTokensResponse {\n /**\n * The total number of tokens counted across all instances from the request.\n */\n totalTokens: number;\n /**\n * @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`.\n *\n * The total number of billable characters counted across all instances\n * from the request.\n */\n totalBillableCharacters?: number;\n /**\n * The breakdown, by modality, of how many tokens are consumed by the prompt.\n */\n promptTokensDetails?: ModalityTokenCount[];\n}\n\n/**\n * An incremental content update from the model.\n *\n * @beta\n */\nexport interface LiveServerContent {\n type: 'serverContent';\n /**\n * The content that the model has generated as part of the current conversation with the user.\n */\n modelTurn?: Content;\n /**\n * Indicates whether the turn is complete. This is `undefined` if the turn is not complete.\n */\n turnComplete?: boolean;\n /**\n * Indicates whether the model was interrupted by the client. An interruption occurs when\n * the client sends a message before the model finishes it's turn. This is `undefined` if the\n * model was not interrupted.\n */\n interrupted?: boolean;\n /**\n * Transcription of the audio that was input to the model.\n */\n inputTranscription?: Transcription;\n /**\n * Transcription of the audio output from the model.\n */\n outputTranscription?: Transcription;\n}\n\n/**\n * Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription\n * is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on\n * the {@link LiveGenerationConfig}.\n *\n * @beta\n */\n\nexport interface Transcription {\n /**\n * The text transcription of the audio.\n */\n text?: string;\n}\n\n/**\n * A request from the model for the client to execute one or more functions.\n *\n * @beta\n */\nexport interface LiveServerToolCall {\n type: 'toolCall';\n /**\n * An array of function calls to run.\n */\n functionCalls: FunctionCall[];\n}\n\n/**\n * Notification to cancel a previous function call triggered by {@link LiveServerToolCall}.\n *\n * @beta\n */\nexport interface LiveServerToolCallCancellation {\n type: 'toolCallCancellation';\n /**\n * IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}.\n */\n functionIds: string[];\n}\n\n/**\n * The types of responses that can be returned by {@link LiveSession.receive}.\n *\n * @beta\n */\nexport const LiveResponseType = {\n SERVER_CONTENT: 'serverContent',\n TOOL_CALL: 'toolCall',\n TOOL_CALL_CANCELLATION: 'toolCallCancellation'\n};\n\n/**\n * The types of responses that can be returned by {@link LiveSession.receive}.\n * This is a property on all messages that can be used for type narrowing. This property is not\n * returned by the server, it is assigned to a server message object once it's parsed.\n *\n * @beta\n */\nexport type LiveResponseType =\n (typeof LiveResponseType)[keyof typeof LiveResponseType];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { GenerateContentResponse } from './responses';\n\n/**\n * Details object that may be included in an error response.\n *\n * @public\n */\nexport interface ErrorDetails {\n '@type'?: string;\n\n /** The reason for the error. */\n reason?: string;\n\n /** The domain where the error occurred. */\n domain?: string;\n\n /** Additional metadata about the error. */\n metadata?: Record<string, unknown>;\n\n /** Any other relevant information about the error. */\n [key: string]: unknown;\n}\n\n/**\n * Details object that contains data originating from a bad HTTP response.\n *\n * @public\n */\nexport interface CustomErrorData {\n /** HTTP status code of the error response. */\n status?: number;\n\n /** HTTP status text of the error response. */\n statusText?: string;\n\n /** Response from a {@link GenerateContentRequest} */\n response?: GenerateContentResponse;\n\n /** Optional additional details about the error. */\n errorDetails?: ErrorDetails[];\n}\n\n/**\n * Standardized error codes that {@link AIError} can have.\n *\n * @public\n */\nexport const AIErrorCode = {\n /** A generic error occurred. */\n ERROR: 'error',\n\n /** An error occurred in a request. */\n REQUEST_ERROR: 'request-error',\n\n /** An error occurred in a response. */\n RESPONSE_ERROR: 'response-error',\n\n /** An error occurred while performing a fetch. */\n FETCH_ERROR: 'fetch-error',\n\n /** An error occurred because an operation was attempted on a closed session. */\n SESSION_CLOSED: 'session-closed',\n\n /** An error associated with a Content object. */\n INVALID_CONTENT: 'invalid-content',\n\n /** An error due to the Firebase API not being enabled in the Console. */\n API_NOT_ENABLED: 'api-not-enabled',\n\n /** An error due to invalid Schema input. */\n INVALID_SCHEMA: 'invalid-schema',\n\n /** An error occurred due to a missing Firebase API key. */\n NO_API_KEY: 'no-api-key',\n\n /** An error occurred due to a missing Firebase app ID. */\n NO_APP_ID: 'no-app-id',\n\n /** An error occurred due to a model name not being specified during initialization. */\n NO_MODEL: 'no-model',\n\n /** An error occurred due to a missing project ID. */\n NO_PROJECT_ID: 'no-project-id',\n\n /** An error occurred while parsing. */\n PARSE_FAILED: 'parse-failed',\n\n /** An error occurred due an attempt to use an unsupported feature. */\n UNSUPPORTED: 'unsupported'\n} as const;\n\n/**\n * Standardized error codes that {@link AIError} can have.\n *\n * @public\n */\nexport type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Contains the list of OpenAPI data types\n * as defined by the\n * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}\n * @public\n */\nexport const SchemaType = {\n /** String type. */\n STRING: 'string',\n /** Number type. */\n NUMBER: 'number',\n /** Integer type. */\n INTEGER: 'integer',\n /** Boolean type. */\n BOOLEAN: 'boolean',\n /** Array type. */\n ARRAY: 'array',\n /** Object type. */\n OBJECT: 'object'\n} as const;\n\n/**\n * Contains the list of OpenAPI data types\n * as defined by the\n * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}\n * @public\n */\nexport type SchemaType = (typeof SchemaType)[keyof typeof SchemaType];\n\n/**\n * Basic {@link Schema} properties shared across several Schema-related\n * types.\n * @public\n */\nexport interface SchemaShared<T> {\n /**\n * An array of {@link Schema}. The generated data must be valid against any of the schemas\n * listed in this array. This allows specifying multiple possible structures or types for a\n * single field.\n */\n anyOf?: T[];\n /** Optional. The format of the property.\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or\n * `'date-time'`, otherwise requests will fail.\n */\n format?: string;\n /** Optional. The description of the property. */\n description?: string;\n /**\n * The title of the property. This helps document the schema's purpose but does not typically\n * constrain the generated value. It can subtly guide the model by clarifying the intent of a\n * field.\n */\n title?: string;\n /** Optional. The items of the property. */\n items?: T;\n /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n minItems?: number;\n /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n maxItems?: number;\n /** Optional. Map of `Schema` objects. */\n properties?: {\n [k: string]: T;\n };\n /** A hint suggesting the order in which the keys should appear in the generated JSON string. */\n propertyOrdering?: string[];\n /** Optional. The enum of the property. */\n enum?: string[];\n /** Optional. The example of the property. */\n example?: unknown;\n /** Optional. Whether the property is nullable. */\n nullable?: boolean;\n /** The minimum value of a numeric type. */\n minimum?: number;\n /** The maximum value of a numeric type. */\n maximum?: number;\n [key: string]: unknown;\n}\n\n/**\n * Params passed to {@link Schema} static methods to create specific\n * {@link Schema} classes.\n * @public\n */\nexport interface SchemaParams extends SchemaShared<SchemaInterface> {}\n\n/**\n * Final format for {@link Schema} params passed to backend requests.\n * @public\n */\nexport interface SchemaRequest extends SchemaShared<SchemaRequest> {\n /**\n * The type of the property. this can only be undefined when using `anyOf` schemas,\n * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }.\n */\n type?: SchemaType;\n /** Optional. Array of required property. */\n required?: string[];\n}\n\n/**\n * Interface for {@link Schema} class.\n * @public\n */\nexport interface SchemaInterface extends SchemaShared<SchemaInterface> {\n /**\n * The type of the property. this can only be undefined when using `anyof` schemas,\n * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}.\n */\n type?: SchemaType;\n}\n\n/**\n * Interface for JSON parameters in a schema of {@link (SchemaType:type)}\n * \"object\" when not using the `Schema.object()` helper.\n * @public\n */\nexport interface ObjectSchemaRequest extends SchemaRequest {\n type: 'object';\n /**\n * This is not a property accepted in the final request to the backend, but is\n * a client-side convenience property that is only usable by constructing\n * a schema through the `Schema.object()` helper method. Populating this\n * property will cause response errors if the object is not wrapped with\n * `Schema.object()`.\n */\n optionalProperties?: never;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ImagenImageFormat } from '../../requests/imagen-image-format';\n\n/**\n * Parameters for configuring an {@link ImagenModel}.\n *\n * @public\n */\nexport interface ImagenModelParams {\n /**\n * The Imagen model to use for generating images.\n * For example: `imagen-3.0-generate-002`.\n *\n * Only Imagen 3 models (named `imagen-3.0-*`) are supported.\n *\n * See {@link https://firebase.google.com/docs/vertex-ai/models | model versions}\n * for a full list of supported Imagen 3 models.\n */\n model: string;\n /**\n * Configuration options for generating images with Imagen.\n */\n generationConfig?: ImagenGenerationConfig;\n /**\n * Safety settings for filtering potentially inappropriate content.\n */\n safetySettings?: ImagenSafetySettings;\n}\n\n/**\n * Configuration options for generating images with Imagen.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for\n * more details.\n *\n * @public\n */\nexport interface ImagenGenerationConfig {\n /**\n * A description of what should be omitted from the generated images.\n *\n * Support for negative prompts depends on the Imagen model.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details.\n *\n * This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions\n * greater than `imagen-3.0-generate-002`.\n */\n negativePrompt?: string;\n /**\n * The number of images to generate. The default value is 1.\n *\n * The number of sample images that may be generated in each request depends on the model\n * (typically up to 4); see the <a href=\"http://firebase.google.com/docs/vertex-ai/model-parameters#imagen\">sampleCount</a>\n * documentation for more details.\n */\n numberOfImages?: number;\n /**\n * The aspect ratio of the generated images. The default value is square 1:1.\n * Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)}\n * for more details.\n */\n aspectRatio?: ImagenAspectRatio;\n /**\n * The image format of the generated images. The default is PNG.\n *\n * See {@link ImagenImageFormat} for more details.\n */\n imageFormat?: ImagenImageFormat;\n /**\n * Whether to add an invisible watermark to generated images.\n *\n * If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate\n * that they are AI generated. If set to `false`, watermarking will be disabled.\n *\n * For Imagen 3 models, the default value is `true`; see the <a href=\"http://firebase.google.com/docs/vertex-ai/model-parameters#imagen\">addWatermark</a>\n * documentation for more details.\n *\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true,\n * and cannot be turned off.\n */\n addWatermark?: boolean;\n}\n\n/**\n * A filter level controlling how aggressively to filter sensitive content.\n *\n * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI\n * are assessed against a list of safety filters, which include 'harmful categories' (for example,\n * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to\n * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}\n * for more details.\n *\n * @public\n */\nexport const ImagenSafetyFilterLevel = {\n /**\n * The most aggressive filtering level; most strict blocking.\n */\n BLOCK_LOW_AND_ABOVE: 'block_low_and_above',\n /**\n * Blocks some sensitive prompts and responses.\n */\n BLOCK_MEDIUM_AND_ABOVE: 'block_medium_and_above',\n /**\n * Blocks few sensitive prompts and responses.\n */\n BLOCK_ONLY_HIGH: 'block_only_high',\n /**\n * The least aggressive filtering level; blocks very few sensitive prompts and responses.\n *\n * Access to this feature is restricted and may require your case to be reviewed and approved by\n * Cloud support.\n */\n BLOCK_NONE: 'block_none'\n} as const;\n\n/**\n * A filter level controlling how aggressively to filter sensitive content.\n *\n * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI\n * are assessed against a list of safety filters, which include 'harmful categories' (for example,\n * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to\n * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}\n * for more details.\n *\n * @public\n */\nexport type ImagenSafetyFilterLevel =\n (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel];\n\n/**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n *\n * See the <a href=\"http://firebase.google.com/docs/vertex-ai/generate-images\">personGeneration</a>\n * documentation for more details.\n *\n * @public\n */\nexport const ImagenPersonFilterLevel = {\n /**\n * Disallow generation of images containing people or faces; images of people are filtered out.\n */\n BLOCK_ALL: 'dont_allow',\n /**\n * Allow generation of images containing adults only; images of children are filtered out.\n *\n * Generation of images containing people or faces may require your use case to be\n * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}\n * for more details.\n */\n ALLOW_ADULT: 'allow_adult',\n /**\n * Allow generation of images containing adults only; images of children are filtered out.\n *\n * Generation of images containing people or faces may require your use case to be\n * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}\n * for more details.\n */\n ALLOW_ALL: 'allow_all'\n} as const;\n\n/**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n *\n * See the <a href=\"http://firebase.google.com/docs/vertex-ai/generate-images\">personGeneration</a>\n * documentation for more details.\n *\n * @public\n */\nexport type ImagenPersonFilterLevel =\n (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel];\n\n/**\n * Settings for controlling the aggressiveness of filtering out sensitive content.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details.\n *\n * @public\n */\nexport interface ImagenSafetySettings {\n /**\n * A filter level controlling how aggressive to filter out sensitive content from generated\n * images.\n */\n safetyFilterLevel?: ImagenSafetyFilterLevel;\n /**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n */\n personFilterLevel?: ImagenPersonFilterLevel;\n}\n\n/**\n * Aspect ratios for Imagen images.\n *\n * To specify an aspect ratio for generated images, set the `aspectRatio` property in your\n * {@link ImagenGenerationConfig}.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details and examples of the supported aspect ratios.\n *\n * @public\n */\nexport const ImagenAspectRatio = {\n /**\n * Square (1:1) aspect ratio.\n */\n 'SQUARE': '1:1',\n /**\n * Landscape (3:4) aspect ratio.\n */\n 'LANDSCAPE_3x4': '3:4',\n /**\n * Portrait (4:3) aspect ratio.\n */\n 'PORTRAIT_4x3': '4:3',\n /**\n * Landscape (16:9) aspect ratio.\n */\n 'LANDSCAPE_16x9': '16:9',\n /**\n * Portrait (9:16) aspect ratio.\n */\n 'PORTRAIT_9x16': '9:16'\n} as const;\n\n/**\n * Aspect ratios for Imagen images.\n *\n * To specify an aspect ratio for generated images, set the `aspectRatio` property in your\n * {@link ImagenGenerationConfig}.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details and examples of the supported aspect ratios.\n *\n * @public\n */\nexport type ImagenAspectRatio =\n (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp } from '@firebase/app';\nimport { Backend } from './backend';\n\nexport * from './types';\n\n/**\n * An instance of the Firebase AI SDK.\n *\n * Do not create this instance directly. Instead, use {@link getAI | getAI()}.\n *\n * @public\n */\nexport interface AI {\n /**\n * The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with.\n */\n app: FirebaseApp;\n /**\n * A {@link Backend} instance that specifies the configuration for the target backend,\n * either the Gemini Developer API (using {@link GoogleAIBackend}) or the\n * Vertex AI Gemini API (using {@link VertexAIBackend}).\n */\n backend: Backend;\n /**\n * Options applied to this {@link AI} instance.\n */\n options?: AIOptions;\n /**\n * @deprecated use `AI.backend.location` instead.\n *\n * The location configured for this AI service instance, relevant for Vertex AI backends.\n */\n location: string;\n}\n\n/**\n * An enum-like object containing constants that represent the supported backends\n * for the Firebase AI SDK.\n * This determines which backend service (Vertex AI Gemini API or Gemini Developer API)\n * the SDK will communicate with.\n *\n * These values are assigned to the `backendType` property within the specific backend\n * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify\n * which service to target.\n *\n * @public\n */\nexport const BackendType = {\n /**\n * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud.\n * Use this constant when creating a {@link VertexAIBackend} configuration.\n */\n VERTEX_AI: 'VERTEX_AI',\n\n /**\n * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}).\n * Use this constant when creating a {@link GoogleAIBackend} configuration.\n */\n GOOGLE_AI: 'GOOGLE_AI'\n} as const; // Using 'as const' makes the string values literal types\n\n/**\n * Type alias representing valid backend types.\n * It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`.\n *\n * @public\n */\nexport type BackendType = (typeof BackendType)[keyof typeof BackendType];\n\n/**\n * Options for initializing the AI service using {@link getAI | getAI()}.\n * This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API)\n * and configuring its specific options (like location for Vertex AI).\n *\n * @public\n */\nexport interface AIOptions {\n /**\n * The backend configuration to use for the AI service instance.\n * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}).\n */\n backend?: Backend;\n /**\n * Whether to use App Check limited use tokens. Defaults to false.\n */\n useLimitedUseAppCheckTokens?: boolean;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { DEFAULT_LOCATION } from './constants';\nimport { BackendType } from './public-types';\n\n/**\n * Abstract base class representing the configuration for an AI service backend.\n * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for\n * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and\n * {@link VertexAIBackend} for the Vertex AI Gemini API.\n *\n * @public\n */\nexport abstract class Backend {\n /**\n * Specifies the backend type.\n */\n readonly backendType: BackendType;\n\n /**\n * Protected constructor for use by subclasses.\n * @param type - The backend type.\n */\n protected constructor(type: BackendType) {\n this.backendType = type;\n }\n}\n\n/**\n * Configuration class for the Gemini Developer API.\n *\n * Use this with {@link AIOptions} when initializing the AI service via\n * {@link getAI | getAI()} to specify the Gemini Developer API as the backend.\n *\n * @public\n */\nexport class GoogleAIBackend extends Backend {\n /**\n * Creates a configuration object for the Gemini Developer API backend.\n */\n constructor() {\n super(BackendType.GOOGLE_AI);\n }\n}\n\n/**\n * Configuration class for the Vertex AI Gemini API.\n *\n * Use this with {@link AIOptions} when initializing the AI service via\n * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend.\n *\n * @public\n */\nexport class VertexAIBackend extends Backend {\n /**\n * The region identifier.\n * See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}\n * for a list of supported locations.\n */\n readonly location: string;\n\n /**\n * Creates a configuration object for the Vertex AI backend.\n *\n * @param location - The region identifier, defaulting to `us-central1`;\n * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}\n * for a list of supported locations.\n */\n constructor(location: string = DEFAULT_LOCATION) {\n super(BackendType.VERTEX_AI);\n if (!location) {\n this.location = DEFAULT_LOCATION;\n } else {\n this.location = location;\n }\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AI_TYPE } from './constants';\nimport { AIError } from './errors';\nimport { AIErrorCode } from './types';\nimport { Backend, GoogleAIBackend, VertexAIBackend } from './backend';\n\n/**\n * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI}\n * instances by backend type.\n *\n * @internal\n */\nexport function encodeInstanceIdentifier(backend: Backend): string {\n if (backend instanceof GoogleAIBackend) {\n return `${AI_TYPE}/googleai`;\n } else if (backend instanceof VertexAIBackend) {\n return `${AI_TYPE}/vertexai/${backend.location}`;\n } else {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid backend: ${JSON.stringify(backend.backendType)}`\n );\n }\n}\n\n/**\n * Decodes an instance identifier string into a {@link Backend}.\n *\n * @internal\n */\nexport function decodeInstanceIdentifier(instanceIdentifier: string): Backend {\n const identifierParts = instanceIdentifier.split('/');\n if (identifierParts[0] !== AI_TYPE) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier, unknown prefix '${identifierParts[0]}'`\n );\n }\n const backendType = identifierParts[1];\n switch (backendType) {\n case 'vertexai':\n const location: string | undefined = identifierParts[2];\n if (!location) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier, unknown location '${instanceIdentifier}'`\n );\n }\n return new VertexAIBackend(location);\n case 'googleai':\n return new GoogleAIBackend();\n default:\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier string: '${instanceIdentifier}'`\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Logger } from '@firebase/logger';\n\nexport const logger = new Logger('@firebase/vertexai');\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * The subset of the Prompt API\n * (see {@link https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl }\n * required for hybrid functionality.\n *\n * @internal\n */\nexport interface LanguageModel extends EventTarget {\n create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;\n availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;\n prompt(\n input: LanguageModelPrompt,\n options?: LanguageModelPromptOptions\n ): Promise<string>;\n promptStreaming(\n input: LanguageModelPrompt,\n options?: LanguageModelPromptOptions\n ): ReadableStream;\n measureInputUsage(\n input: LanguageModelPrompt,\n options?: LanguageModelPromptOptions\n ): Promise<number>;\n destroy(): undefined;\n}\n\n/**\n * @internal\n */\nexport enum Availability {\n 'UNAVAILABLE' = 'unavailable',\n 'DOWNLOADABLE' = 'downloadable',\n 'DOWNLOADING' = 'downloading',\n 'AVAILABLE' = 'available'\n}\n\n/**\n * Configures the creation of an on-device language model session.\n * @beta\n */\nexport interface LanguageModelCreateCoreOptions {\n topK?: number;\n temperature?: number;\n expectedInputs?: LanguageModelExpected[];\n}\n\n/**\n * Configures the creation of an on-device language model session.\n * @beta\n */\nexport interface LanguageModelCreateOptions\n extends LanguageModelCreateCoreOptions {\n signal?: AbortSignal;\n initialPrompts?: LanguageModelMessage[];\n}\n\n/**\n * Options for an on-device language model prompt.\n * @beta\n */\nexport interface LanguageModelPromptOptions {\n responseConstraint?: object;\n // TODO: Restore AbortSignal once the API is defined.\n}\n\n/**\n * Options for the expected inputs for an on-device language model.\n * @beta\n */ export interface LanguageModelExpected {\n type: LanguageModelMessageType;\n languages?: string[];\n}\n\n/**\n * An on-device language model prompt.\n * @beta\n */\nexport type LanguageModelPrompt = LanguageModelMessage[];\n\n/**\n * An on-device language model message.\n * @beta\n */\nexport interface LanguageModelMessage {\n role: LanguageModelMessageRole;\n content: LanguageModelMessageContent[];\n}\n\n/**\n * An on-device language model content object.\n * @beta\n */\nexport interface LanguageModelMessageContent {\n type: LanguageModelMessageType;\n value: LanguageModelMessageContentValue;\n}\n\n/**\n * Allowable roles for on-device language model usage.\n * @beta\n */\nexport type LanguageModelMessageRole = 'system' | 'user' | 'assistant';\n\n/**\n * Allowable types for on-device language model messages.\n * @beta\n */\nexport type LanguageModelMessageType = 'text' | 'image' | 'audio';\n\n/**\n * Content formats that can be provided as on-device message content.\n * @beta\n */\nexport type LanguageModelMessageContentValue =\n | ImageBitmapSource\n | AudioBuffer\n | BufferSource\n | string;\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport {\n CountTokensRequest,\n GenerateContentRequest,\n InferenceMode,\n Part,\n AIErrorCode,\n OnDeviceParams,\n Content,\n Role\n} from '../types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport {\n Availability,\n LanguageModel,\n LanguageModelExpected,\n LanguageModelMessage,\n LanguageModelMessageContent,\n LanguageModelMessageRole\n} from '../types/language-model';\n\n// Defaults to support image inputs for convenience.\nconst defaultExpectedInputs: LanguageModelExpected[] = [{ type: 'image' }];\n\n/**\n * Defines an inference \"backend\" that uses Chrome's on-device model,\n * and encapsulates logic for detecting when on-device inference is\n * possible.\n */\nexport class ChromeAdapterImpl implements ChromeAdapter {\n // Visible for testing\n static SUPPORTED_MIME_TYPES = ['image/jpeg', 'image/png'];\n private isDownloading = false;\n private downloadPromise: Promise<LanguageModel | void> | undefined;\n private oldSession: LanguageModel | undefined;\n onDeviceParams: OnDeviceParams = {\n createOptions: {\n expectedInputs: defaultExpectedInputs\n }\n };\n constructor(\n public languageModelProvider: LanguageModel,\n public mode: InferenceMode,\n onDeviceParams?: OnDeviceParams\n ) {\n if (onDeviceParams) {\n this.onDeviceParams = onDeviceParams;\n if (!this.onDeviceParams.createOptions) {\n this.onDeviceParams.createOptions = {\n expectedInputs: defaultExpectedInputs\n };\n } else if (!this.onDeviceParams.createOptions.expectedInputs) {\n this.onDeviceParams.createOptions.expectedInputs =\n defaultExpectedInputs;\n }\n }\n }\n\n /**\n * Checks if a given request can be made on-device.\n *\n * Encapsulates a few concerns:\n * the mode\n * API existence\n * prompt formatting\n * model availability, including triggering download if necessary\n *\n *\n * Pros: callers needn't be concerned with details of on-device availability.</p>\n * Cons: this method spans a few concerns and splits request validation from usage.\n * If instance variables weren't already part of the API, we could consider a better\n * separation of concerns.\n */\n async isAvailable(request: GenerateContentRequest): Promise<boolean> {\n if (!this.mode) {\n logger.debug(\n `On-device inference unavailable because mode is undefined.`\n );\n return false;\n }\n if (this.mode === InferenceMode.ONLY_IN_CLOUD) {\n logger.debug(\n `On-device inference unavailable because mode is \"only_in_cloud\".`\n );\n return false;\n }\n\n // Triggers out-of-band download so model will eventually become available.\n const availability = await this.downloadIfAvailable();\n\n if (this.mode === InferenceMode.ONLY_ON_DEVICE) {\n // If it will never be available due to API inavailability, throw.\n if (availability === Availability.UNAVAILABLE) {\n throw new AIError(\n AIErrorCode.API_NOT_ENABLED,\n 'Local LanguageModel API not available in this environment.'\n );\n } else if (\n availability === Availability.DOWNLOADABLE ||\n availability === Availability.DOWNLOADING\n ) {\n // TODO(chholland): Better user experience during download - progress?\n logger.debug(`Waiting for download of LanguageModel to complete.`);\n await this.downloadPromise;\n return true;\n }\n return true;\n }\n\n // Applies prefer_on_device logic.\n if (availability !== Availability.AVAILABLE) {\n logger.debug(\n `On-device inference unavailable because availability is \"${availability}\".`\n );\n return false;\n }\n if (!ChromeAdapterImpl.isOnDeviceRequest(request)) {\n logger.debug(\n `On-device inference unavailable because request is incompatible.`\n );\n return false;\n }\n\n return true;\n }\n\n /**\n * Generates content on device.\n *\n * @remarks\n * This is comparable to {@link GenerativeModel.generateContent} for generating content in\n * Cloud.\n * @param request - a standard Firebase AI {@link GenerateContentRequest}\n * @returns {@link Response}, so we can reuse common response formatting.\n */\n async generateContent(request: GenerateContentRequest): Promise<Response> {\n const session = await this.createSession();\n const contents = await Promise.all(\n request.contents.map(ChromeAdapterImpl.toLanguageModelMessage)\n );\n const text = await session.prompt(\n contents,\n this.onDeviceParams.promptOptions\n );\n return ChromeAdapterImpl.toResponse(text);\n }\n\n /**\n * Generates content stream on device.\n *\n * @remarks\n * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in\n * Cloud.\n * @param request - a standard Firebase AI {@link GenerateContentRequest}\n * @returns {@link Response}, so we can reuse common response formatting.\n */\n async generateContentStream(\n request: GenerateContentRequest\n ): Promise<Response> {\n const session = await this.createSession();\n const contents = await Promise.all(\n request.contents.map(ChromeAdapterImpl.toLanguageModelMessage)\n );\n const stream = session.promptStreaming(\n contents,\n this.onDeviceParams.promptOptions\n );\n return ChromeAdapterImpl.toStreamResponse(stream);\n }\n\n async countTokens(_request: CountTokensRequest): Promise<Response> {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'Count Tokens is not yet available for on-device model.'\n );\n }\n\n /**\n * Asserts inference for the given request can be performed by an on-device model.\n */\n private static isOnDeviceRequest(request: GenerateContentRequest): boolean {\n // Returns false if the prompt is empty.\n if (request.contents.length === 0) {\n logger.debug('Empty prompt rejected for on-device inference.');\n return false;\n }\n\n for (const content of request.contents) {\n if (content.role === 'function') {\n logger.debug(`\"Function\" role rejected for on-device inference.`);\n return false;\n }\n\n // Returns false if request contains an image with an unsupported mime type.\n for (const part of content.parts) {\n if (\n part.inlineData &&\n ChromeAdapterImpl.SUPPORTED_MIME_TYPES.indexOf(\n part.inlineData.mimeType\n ) === -1\n ) {\n logger.debug(\n `Unsupported mime type \"${part.inlineData.mimeType}\" rejected for on-device inference.`\n );\n return false;\n }\n }\n }\n\n return true;\n }\n\n /**\n * Encapsulates logic to get availability and download a model if one is downloadable.\n */\n private async downloadIfAvailable(): Promise<Availability | undefined> {\n const availability = await this.languageModelProvider?.availability(\n this.onDeviceParams.createOptions\n );\n\n if (availability === Availability.DOWNLOADABLE) {\n this.download();\n }\n\n return availability;\n }\n\n /**\n * Triggers out-of-band download of an on-device model.\n *\n * Chrome only downloads models as needed. Chrome knows a model is needed when code calls\n * LanguageModel.create.\n *\n * Since Chrome manages the download, the SDK can only avoid redundant download requests by\n * tracking if a download has previously been requested.\n */\n private download(): void {\n if (this.isDownloading) {\n return;\n }\n this.isDownloading = true;\n this.downloadPromise = this.languageModelProvider\n ?.create(this.onDeviceParams.createOptions)\n .finally(() => {\n this.isDownloading = false;\n });\n }\n\n /**\n * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.\n */\n private static async toLanguageModelMessage(\n content: Content\n ): Promise<LanguageModelMessage> {\n const languageModelMessageContents = await Promise.all(\n content.parts.map(ChromeAdapterImpl.toLanguageModelMessageContent)\n );\n return {\n role: ChromeAdapterImpl.toLanguageModelMessageRole(content.role),\n content: languageModelMessageContents\n };\n }\n\n /**\n * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.\n */\n private static async toLanguageModelMessageContent(\n part: Part\n ): Promise<LanguageModelMessageContent> {\n if (part.text) {\n return {\n type: 'text',\n value: part.text\n };\n } else if (part.inlineData) {\n const formattedImageContent = await fetch(\n `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`\n );\n const imageBlob = await formattedImageContent.blob();\n const imageBitmap = await createImageBitmap(imageBlob);\n return {\n type: 'image',\n value: imageBitmap\n };\n }\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n `Processing of this Part type is not currently supported.`\n );\n }\n\n /**\n * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.\n */\n private static toLanguageModelMessageRole(\n role: Role\n ): LanguageModelMessageRole {\n // Assumes 'function' rule has been filtered by isOnDeviceRequest\n return role === 'model' ? 'assistant' : 'user';\n }\n\n /**\n * Abstracts Chrome session creation.\n *\n * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all\n * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all\n * inference.\n *\n * Chrome will remove a model from memory if it's no longer in use, so this method ensures a\n * new session is created before an old session is destroyed.\n */\n private async createSession(): Promise<LanguageModel> {\n if (!this.languageModelProvider) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Chrome AI requested for unsupported browser version.'\n );\n }\n const newSession = await this.languageModelProvider.create(\n this.onDeviceParams.createOptions\n );\n if (this.oldSession) {\n this.oldSession.destroy();\n }\n // Holds session reference, so model isn't unloaded from memory.\n this.oldSession = newSession;\n return newSession;\n }\n\n /**\n * Formats string returned by Chrome as a {@link Response} returned by Firebase AI.\n */\n private static toResponse(text: string): Response {\n return {\n json: async () => ({\n candidates: [\n {\n content: {\n parts: [{ text }]\n }\n }\n ]\n })\n } as Response;\n }\n\n /**\n * Formats string stream returned by Chrome as SSE returned by Firebase AI.\n */\n private static toStreamResponse(stream: ReadableStream<string>): Response {\n const encoder = new TextEncoder();\n return {\n body: stream.pipeThrough(\n new TransformStream({\n transform(chunk, controller) {\n const json = JSON.stringify({\n candidates: [\n {\n content: {\n role: 'model',\n parts: [{ text: chunk }]\n }\n }\n ]\n });\n controller.enqueue(encoder.encode(`data: ${json}\\n\\n`));\n }\n })\n )\n } as Response;\n }\n}\n\n/**\n * Creates a ChromeAdapterImpl on demand.\n */\nexport function chromeAdapterFactory(\n mode: InferenceMode,\n window?: Window,\n params?: OnDeviceParams\n): ChromeAdapterImpl | undefined {\n // Do not initialize a ChromeAdapter if we are not in hybrid mode.\n if (typeof window !== 'undefined' && mode) {\n return new ChromeAdapterImpl(\n (window as Window).LanguageModel as LanguageModel,\n mode,\n params\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp, _FirebaseService } from '@firebase/app';\nimport { AI, AIOptions, InferenceMode, OnDeviceParams } from './public-types';\nimport {\n AppCheckInternalComponentName,\n FirebaseAppCheckInternal\n} from '@firebase/app-check-interop-types';\nimport { Provider } from '@firebase/component';\nimport {\n FirebaseAuthInternal,\n FirebaseAuthInternalName\n} from '@firebase/auth-interop-types';\nimport { Backend, VertexAIBackend } from './backend';\nimport { ChromeAdapterImpl } from './methods/chrome-adapter';\n\nexport class AIService implements AI, _FirebaseService {\n auth: FirebaseAuthInternal | null;\n appCheck: FirebaseAppCheckInternal | null;\n _options?: Omit<AIOptions, 'backend'>;\n location: string; // This is here for backwards-compatibility\n\n constructor(\n public app: FirebaseApp,\n public backend: Backend,\n authProvider?: Provider<FirebaseAuthInternalName>,\n appCheckProvider?: Provider<AppCheckInternalComponentName>,\n public chromeAdapterFactory?: (\n mode: InferenceMode,\n window?: Window,\n params?: OnDeviceParams\n ) => ChromeAdapterImpl | undefined\n ) {\n const appCheck = appCheckProvider?.getImmediate({ optional: true });\n const auth = authProvider?.getImmediate({ optional: true });\n this.auth = auth || null;\n this.appCheck = appCheck || null;\n\n if (backend instanceof VertexAIBackend) {\n this.location = backend.location;\n } else {\n this.location = '';\n }\n }\n\n _delete(): Promise<void> {\n return Promise.resolve();\n }\n\n set options(optionsToSet: AIOptions) {\n this._options = optionsToSet;\n }\n\n get options(): AIOptions | undefined {\n return this._options;\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n ComponentContainer,\n InstanceFactoryOptions\n} from '@firebase/component';\nimport { AIError } from './errors';\nimport { decodeInstanceIdentifier } from './helpers';\nimport { chromeAdapterFactory } from './methods/chrome-adapter';\nimport { AIService } from './service';\nimport { AIErrorCode } from './types';\n\nexport function factory(\n container: ComponentContainer,\n { instanceIdentifier }: InstanceFactoryOptions\n): AIService {\n if (!instanceIdentifier) {\n throw new AIError(\n AIErrorCode.ERROR,\n 'AIService instance identifier is undefined.'\n );\n }\n\n const backend = decodeInstanceIdentifier(instanceIdentifier);\n\n // getImmediate for FirebaseApp will always succeed\n const app = container.getProvider('app').getImmediate();\n const auth = container.getProvider('auth-internal');\n const appCheckProvider = container.getProvider('app-check-internal');\n\n return new AIService(\n app,\n backend,\n auth,\n appCheckProvider,\n chromeAdapterFactory\n );\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { AIErrorCode, AI, BackendType } from '../public-types';\nimport { AIService } from '../service';\nimport { ApiSettings } from '../types/internal';\nimport { _isFirebaseServerApp } from '@firebase/app';\n\n/**\n * Base class for Firebase AI model APIs.\n *\n * Instances of this class are associated with a specific Firebase AI {@link Backend}\n * and provide methods for interacting with the configured generative model.\n *\n * @public\n */\nexport abstract class AIModel {\n /**\n * The fully qualified model resource name to use for generating images\n * (for example, `publishers/google/models/imagen-3.0-generate-002`).\n */\n readonly model: string;\n\n /**\n * @internal\n */\n _apiSettings: ApiSettings;\n\n /**\n * Constructs a new instance of the {@link AIModel} class.\n *\n * This constructor should only be called from subclasses that provide\n * a model API.\n *\n * @param ai - an {@link AI} instance.\n * @param modelName - The name of the model being used. It can be in one of the following formats:\n * - `my-model` (short name, will resolve to `publishers/google/models/my-model`)\n * - `models/my-model` (will resolve to `publishers/google/models/my-model`)\n * - `publishers/my-publisher/models/my-model` (fully qualified model name)\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @internal\n */\n protected constructor(ai: AI, modelName: string) {\n if (!ai.app?.options?.apiKey) {\n throw new AIError(\n AIErrorCode.NO_API_KEY,\n `The \"apiKey\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`\n );\n } else if (!ai.app?.options?.projectId) {\n throw new AIError(\n AIErrorCode.NO_PROJECT_ID,\n `The \"projectId\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`\n );\n } else if (!ai.app?.options?.appId) {\n throw new AIError(\n AIErrorCode.NO_APP_ID,\n `The \"appId\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`\n );\n } else {\n this._apiSettings = {\n apiKey: ai.app.options.apiKey,\n project: ai.app.options.projectId,\n appId: ai.app.options.appId,\n automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled,\n location: ai.location,\n backend: ai.backend\n };\n\n if (_isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) {\n const token = ai.app.settings.appCheckToken;\n this._apiSettings.getAppCheckToken = () => {\n return Promise.resolve({ token });\n };\n } else if ((ai as AIService).appCheck) {\n if (ai.options?.useLimitedUseAppCheckTokens) {\n this._apiSettings.getAppCheckToken = () =>\n (ai as AIService).appCheck!.getLimitedUseToken();\n } else {\n this._apiSettings.getAppCheckToken = () =>\n (ai as AIService).appCheck!.getToken();\n }\n }\n\n if ((ai as AIService).auth) {\n this._apiSettings.getAuthToken = () =>\n (ai as AIService).auth!.getToken();\n }\n\n this.model = AIModel.normalizeModelName(\n modelName,\n this._apiSettings.backend.backendType\n );\n }\n }\n\n /**\n * Normalizes the given model name to a fully qualified model resource name.\n *\n * @param modelName - The model name to normalize.\n * @returns The fully qualified model resource name.\n *\n * @internal\n */\n static normalizeModelName(\n modelName: string,\n backendType: BackendType\n ): string {\n if (backendType === BackendType.GOOGLE_AI) {\n return AIModel.normalizeGoogleAIModelName(modelName);\n } else {\n return AIModel.normalizeVertexAIModelName(modelName);\n }\n }\n\n /**\n * @internal\n */\n private static normalizeGoogleAIModelName(modelName: string): string {\n return `models/${modelName}`;\n }\n\n /**\n * @internal\n */\n private static normalizeVertexAIModelName(modelName: string): string {\n let model: string;\n if (modelName.includes('/')) {\n if (modelName.startsWith('models/')) {\n // Add 'publishers/google' if the user is only passing in 'models/model-name'.\n model = `publishers/google/${modelName}`;\n } else {\n // Any other custom format (e.g. tuned models) must be passed in correctly.\n model = modelName;\n }\n } else {\n // If path is not included, assume it's a non-tuned model.\n model = `publishers/google/models/${modelName}`;\n }\n\n return model;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ErrorDetails, RequestOptions, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\nimport { ApiSettings } from '../types/internal';\nimport {\n DEFAULT_API_VERSION,\n DEFAULT_DOMAIN,\n DEFAULT_FETCH_TIMEOUT_MS,\n LANGUAGE_TAG,\n PACKAGE_VERSION\n} from '../constants';\nimport { logger } from '../logger';\nimport { GoogleAIBackend, VertexAIBackend } from '../backend';\nimport { BackendType } from '../public-types';\n\nexport enum Task {\n GENERATE_CONTENT = 'generateContent',\n STREAM_GENERATE_CONTENT = 'streamGenerateContent',\n COUNT_TOKENS = 'countTokens',\n PREDICT = 'predict'\n}\n\nexport class RequestUrl {\n constructor(\n public model: string,\n public task: Task,\n public apiSettings: ApiSettings,\n public stream: boolean,\n public requestOptions?: RequestOptions\n ) {}\n toString(): string {\n const url = new URL(this.baseUrl); // Throws if the URL is invalid\n url.pathname = `/${this.apiVersion}/${this.modelPath}:${this.task}`;\n url.search = this.queryParams.toString();\n return url.toString();\n }\n\n private get baseUrl(): string {\n return this.requestOptions?.baseUrl || `https://${DEFAULT_DOMAIN}`;\n }\n\n private get apiVersion(): string {\n return DEFAULT_API_VERSION; // TODO: allow user-set options if that feature becomes available\n }\n\n private get modelPath(): string {\n if (this.apiSettings.backend instanceof GoogleAIBackend) {\n return `projects/${this.apiSettings.project}/${this.model}`;\n } else if (this.apiSettings.backend instanceof VertexAIBackend) {\n return `projects/${this.apiSettings.project}/locations/${this.apiSettings.backend.location}/${this.model}`;\n } else {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid backend: ${JSON.stringify(this.apiSettings.backend)}`\n );\n }\n }\n\n private get queryParams(): URLSearchParams {\n const params = new URLSearchParams();\n if (this.stream) {\n params.set('alt', 'sse');\n }\n\n return params;\n }\n}\n\nexport class WebSocketUrl {\n constructor(public apiSettings: ApiSettings) {}\n toString(): string {\n const url = new URL(`wss://${DEFAULT_DOMAIN}`);\n url.pathname = this.pathname;\n\n const queryParams = new URLSearchParams();\n queryParams.set('key', this.apiSettings.apiKey);\n url.search = queryParams.toString();\n\n return url.toString();\n }\n\n private get pathname(): string {\n if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n return 'ws/google.firebase.vertexai.v1beta.GenerativeService/BidiGenerateContent';\n } else {\n return `ws/google.firebase.vertexai.v1beta.LlmBidiService/BidiGenerateContent/locations/${this.apiSettings.location}`;\n }\n }\n}\n\n/**\n * Log language and \"fire/version\" to x-goog-api-client\n */\nfunction getClientHeaders(): string {\n const loggingTags = [];\n loggingTags.push(`${LANGUAGE_TAG}/${PACKAGE_VERSION}`);\n loggingTags.push(`fire/${PACKAGE_VERSION}`);\n return loggingTags.join(' ');\n}\n\nexport async function getHeaders(url: RequestUrl): Promise<Headers> {\n const headers = new Headers();\n headers.append('Content-Type', 'application/json');\n headers.append('x-goog-api-client', getClientHeaders());\n headers.append('x-goog-api-key', url.apiSettings.apiKey);\n if (url.apiSettings.automaticDataCollectionEnabled) {\n headers.append('X-Firebase-Appid', url.apiSettings.appId);\n }\n if (url.apiSettings.getAppCheckToken) {\n const appCheckToken = await url.apiSettings.getAppCheckToken();\n if (appCheckToken) {\n headers.append('X-Firebase-AppCheck', appCheckToken.token);\n if (appCheckToken.error) {\n logger.warn(\n `Unable to obtain a valid App Check token: ${appCheckToken.error.message}`\n );\n }\n }\n }\n\n if (url.apiSettings.getAuthToken) {\n const authToken = await url.apiSettings.getAuthToken();\n if (authToken) {\n headers.append('Authorization', `Firebase ${authToken.accessToken}`);\n }\n }\n\n return headers;\n}\n\nexport async function constructRequest(\n model: string,\n task: Task,\n apiSettings: ApiSettings,\n stream: boolean,\n body: string,\n requestOptions?: RequestOptions\n): Promise<{ url: string; fetchOptions: RequestInit }> {\n const url = new RequestUrl(model, task, apiSettings, stream, requestOptions);\n return {\n url: url.toString(),\n fetchOptions: {\n method: 'POST',\n headers: await getHeaders(url),\n body\n }\n };\n}\n\nexport async function makeRequest(\n model: string,\n task: Task,\n apiSettings: ApiSettings,\n stream: boolean,\n body: string,\n requestOptions?: RequestOptions\n): Promise<Response> {\n const url = new RequestUrl(model, task, apiSettings, stream, requestOptions);\n let response;\n let fetchTimeoutId: string | number | NodeJS.Timeout | undefined;\n try {\n const request = await constructRequest(\n model,\n task,\n apiSettings,\n stream,\n body,\n requestOptions\n );\n // Timeout is 180s by default\n const timeoutMillis =\n requestOptions?.timeout != null && requestOptions.timeout >= 0\n ? requestOptions.timeout\n : DEFAULT_FETCH_TIMEOUT_MS;\n const abortController = new AbortController();\n fetchTimeoutId = setTimeout(() => abortController.abort(), timeoutMillis);\n request.fetchOptions.signal = abortController.signal;\n\n response = await fetch(request.url, request.fetchOptions);\n if (!response.ok) {\n let message = '';\n let errorDetails;\n try {\n const json = await response.json();\n message = json.error.message;\n if (json.error.details) {\n message += ` ${JSON.stringify(json.error.details)}`;\n errorDetails = json.error.details;\n }\n } catch (e) {\n // ignored\n }\n if (\n response.status === 403 &&\n errorDetails &&\n errorDetails.some(\n (detail: ErrorDetails) => detail.reason === 'SERVICE_DISABLED'\n ) &&\n errorDetails.some((detail: ErrorDetails) =>\n (\n detail.links as Array<Record<string, string>>\n )?.[0]?.description.includes(\n 'Google developers console API activation'\n )\n )\n ) {\n throw new AIError(\n AIErrorCode.API_NOT_ENABLED,\n `The Firebase AI SDK requires the Firebase AI ` +\n `API ('firebasevertexai.googleapis.com') to be enabled in your ` +\n `Firebase project. Enable this API by visiting the Firebase Console ` +\n `at https://console.firebase.google.com/project/${url.apiSettings.project}/genai/ ` +\n `and clicking \"Get started\". If you enabled this API recently, ` +\n `wait a few minutes for the action to propagate to our systems and ` +\n `then retry.`,\n {\n status: response.status,\n statusText: response.statusText,\n errorDetails\n }\n );\n }\n throw new AIError(\n AIErrorCode.FETCH_ERROR,\n `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`,\n {\n status: response.status,\n statusText: response.statusText,\n errorDetails\n }\n );\n }\n } catch (e) {\n let err = e as Error;\n if (\n (e as AIError).code !== AIErrorCode.FETCH_ERROR &&\n (e as AIError).code !== AIErrorCode.API_NOT_ENABLED &&\n e instanceof Error\n ) {\n err = new AIError(\n AIErrorCode.ERROR,\n `Error fetching from ${url.toString()}: ${e.message}`\n );\n err.stack = e.stack;\n }\n\n throw err;\n } finally {\n if (fetchTimeoutId) {\n clearTimeout(fetchTimeoutId);\n }\n }\n return response;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n EnhancedGenerateContentResponse,\n FinishReason,\n FunctionCall,\n GenerateContentCandidate,\n GenerateContentResponse,\n ImagenGCSImage,\n ImagenInlineImage,\n AIErrorCode,\n InlineDataPart,\n Part,\n InferenceSource\n} from '../types';\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport { ImagenResponseInternal } from '../types/internal';\n\n/**\n * Check that at least one candidate exists and does not have a bad\n * finish reason. Warns if multiple candidates exist.\n */\nfunction hasValidCandidates(response: GenerateContentResponse): boolean {\n if (response.candidates && response.candidates.length > 0) {\n if (response.candidates.length > 1) {\n logger.warn(\n `This response had ${response.candidates.length} ` +\n `candidates. Returning text from the first candidate only. ` +\n `Access response.candidates directly to use the other candidates.`\n );\n }\n if (hadBadFinishReason(response.candidates[0])) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Response error: ${formatBlockErrorMessage(\n response\n )}. Response body stored in error.response`,\n {\n response\n }\n );\n }\n return true;\n } else {\n return false;\n }\n}\n\n/**\n * Creates an EnhancedGenerateContentResponse object that has helper functions and\n * other modifications that improve usability.\n */\nexport function createEnhancedContentResponse(\n response: GenerateContentResponse,\n inferenceSource: InferenceSource = InferenceSource.IN_CLOUD\n): EnhancedGenerateContentResponse {\n /**\n * The Vertex AI backend omits default values.\n * This causes the `index` property to be omitted from the first candidate in the\n * response, since it has index 0, and 0 is a default value.\n * See: https://github.com/firebase/firebase-js-sdk/issues/8566\n */\n if (response.candidates && !response.candidates[0].hasOwnProperty('index')) {\n response.candidates[0].index = 0;\n }\n\n const responseWithHelpers = addHelpers(response);\n responseWithHelpers.inferenceSource = inferenceSource;\n return responseWithHelpers;\n}\n\n/**\n * Adds convenience helper methods to a response object, including stream\n * chunks (as long as each chunk is a complete GenerateContentResponse JSON).\n */\nexport function addHelpers(\n response: GenerateContentResponse\n): EnhancedGenerateContentResponse {\n (response as EnhancedGenerateContentResponse).text = () => {\n if (hasValidCandidates(response)) {\n return getText(response, part => !part.thought);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Text not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return '';\n };\n (response as EnhancedGenerateContentResponse).thoughtSummary = () => {\n if (hasValidCandidates(response)) {\n const result = getText(response, part => !!part.thought);\n return result === '' ? undefined : result;\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Thought summary not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n (response as EnhancedGenerateContentResponse).inlineDataParts = ():\n | InlineDataPart[]\n | undefined => {\n if (hasValidCandidates(response)) {\n return getInlineDataParts(response);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Data not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n (response as EnhancedGenerateContentResponse).functionCalls = () => {\n if (hasValidCandidates(response)) {\n return getFunctionCalls(response);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Function call not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n return response as EnhancedGenerateContentResponse;\n}\n\n/**\n * Returns all text from the first candidate's parts, filtering by whether\n * `partFilter()` returns true.\n *\n * @param response - The `GenerateContentResponse` from which to extract text.\n * @param partFilter - Only return `Part`s for which this returns true\n */\nexport function getText(\n response: GenerateContentResponse,\n partFilter: (part: Part) => boolean\n): string {\n const textStrings = [];\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.text && partFilter(part)) {\n textStrings.push(part.text);\n }\n }\n }\n if (textStrings.length > 0) {\n return textStrings.join('');\n } else {\n return '';\n }\n}\n\n/**\n * Returns every {@link FunctionCall} associated with first candidate.\n */\nexport function getFunctionCalls(\n response: GenerateContentResponse\n): FunctionCall[] | undefined {\n const functionCalls: FunctionCall[] = [];\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.functionCall) {\n functionCalls.push(part.functionCall);\n }\n }\n }\n if (functionCalls.length > 0) {\n return functionCalls;\n } else {\n return undefined;\n }\n}\n\n/**\n * Returns every {@link InlineDataPart} in the first candidate if present.\n *\n * @internal\n */\nexport function getInlineDataParts(\n response: GenerateContentResponse\n): InlineDataPart[] | undefined {\n const data: InlineDataPart[] = [];\n\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.inlineData) {\n data.push(part);\n }\n }\n }\n\n if (data.length > 0) {\n return data;\n } else {\n return undefined;\n }\n}\n\nconst badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY];\n\nfunction hadBadFinishReason(candidate: GenerateContentCandidate): boolean {\n return (\n !!candidate.finishReason &&\n badFinishReasons.some(reason => reason === candidate.finishReason)\n );\n}\n\nexport function formatBlockErrorMessage(\n response: GenerateContentResponse\n): string {\n let message = '';\n if (\n (!response.candidates || response.candidates.length === 0) &&\n response.promptFeedback\n ) {\n message += 'Response was blocked';\n if (response.promptFeedback?.blockReason) {\n message += ` due to ${response.promptFeedback.blockReason}`;\n }\n if (response.promptFeedback?.blockReasonMessage) {\n message += `: ${response.promptFeedback.blockReasonMessage}`;\n }\n } else if (response.candidates?.[0]) {\n const firstCandidate = response.candidates[0];\n if (hadBadFinishReason(firstCandidate)) {\n message += `Candidate was blocked due to ${firstCandidate.finishReason}`;\n if (firstCandidate.finishMessage) {\n message += `: ${firstCandidate.finishMessage}`;\n }\n }\n }\n return message;\n}\n\n/**\n * Convert a generic successful fetch response body to an Imagen response object\n * that can be returned to the user. This converts the REST APIs response format to our\n * APIs representation of a response.\n *\n * @internal\n */\nexport async function handlePredictResponse<\n T extends ImagenInlineImage | ImagenGCSImage\n>(response: Response): Promise<{ images: T[]; filteredReason?: string }> {\n const responseJson: ImagenResponseInternal = await response.json();\n\n const images: T[] = [];\n let filteredReason: string | undefined = undefined;\n\n // The backend should always send a non-empty array of predictions if the response was successful.\n if (!responseJson.predictions || responseJson.predictions?.length === 0) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.'\n );\n }\n\n for (const prediction of responseJson.predictions) {\n if (prediction.raiFilteredReason) {\n filteredReason = prediction.raiFilteredReason;\n } else if (prediction.mimeType && prediction.bytesBase64Encoded) {\n images.push({\n mimeType: prediction.mimeType,\n bytesBase64Encoded: prediction.bytesBase64Encoded\n } as T);\n } else if (prediction.mimeType && prediction.gcsUri) {\n images.push({\n mimeType: prediction.mimeType,\n gcsURI: prediction.gcsUri\n } as T);\n } else if (prediction.safetyAttributes) {\n // Ignore safetyAttributes \"prediction\" to avoid throwing an error below.\n } else {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Unexpected element in 'predictions' array in response: '${JSON.stringify(\n prediction\n )}'`\n );\n }\n }\n\n return { images, filteredReason };\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from './errors';\nimport { logger } from './logger';\nimport {\n CitationMetadata,\n CountTokensRequest,\n GenerateContentCandidate,\n GenerateContentRequest,\n GenerateContentResponse,\n HarmSeverity,\n InlineDataPart,\n PromptFeedback,\n SafetyRating,\n AIErrorCode\n} from './types';\nimport {\n GoogleAIGenerateContentResponse,\n GoogleAIGenerateContentCandidate,\n GoogleAICountTokensRequest\n} from './types/googleai';\n\n/**\n * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI).\n * The public API prioritizes the format used by the Vertex AI Gemini API.\n * We avoid having two sets of types by translating requests and responses between the two API formats.\n * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API\n * with minimal code changes.\n *\n * In here are functions that map requests and responses between the two API formats.\n * Requests in the Vertex AI format are mapped to the Google AI format before being sent.\n * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user.\n */\n\n/**\n * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI.\n *\n * @param generateContentRequest The {@link GenerateContentRequest} to map.\n * @returns A {@link GenerateContentResponse} that conforms to the Google AI format.\n *\n * @throws If the request contains properties that are unsupported by Google AI.\n *\n * @internal\n */\nexport function mapGenerateContentRequest(\n generateContentRequest: GenerateContentRequest\n): GenerateContentRequest {\n generateContentRequest.safetySettings?.forEach(safetySetting => {\n if (safetySetting.method) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'SafetySetting.method is not supported in the the Gemini Developer API. Please remove this property.'\n );\n }\n });\n\n if (generateContentRequest.generationConfig?.topK) {\n const roundedTopK = Math.round(\n generateContentRequest.generationConfig.topK\n );\n\n if (roundedTopK !== generateContentRequest.generationConfig.topK) {\n logger.warn(\n 'topK in GenerationConfig has been rounded to the nearest integer to match the format for requests to the Gemini Developer API.'\n );\n generateContentRequest.generationConfig.topK = roundedTopK;\n }\n }\n\n return generateContentRequest;\n}\n\n/**\n * Maps a {@link GenerateContentResponse} from Google AI to the format of the\n * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API.\n *\n * @param googleAIResponse The {@link GenerateContentResponse} from Google AI.\n * @returns A {@link GenerateContentResponse} that conforms to the public API's format.\n *\n * @internal\n */\nexport function mapGenerateContentResponse(\n googleAIResponse: GoogleAIGenerateContentResponse\n): GenerateContentResponse {\n const generateContentResponse = {\n candidates: googleAIResponse.candidates\n ? mapGenerateContentCandidates(googleAIResponse.candidates)\n : undefined,\n prompt: googleAIResponse.promptFeedback\n ? mapPromptFeedback(googleAIResponse.promptFeedback)\n : undefined,\n usageMetadata: googleAIResponse.usageMetadata\n };\n\n return generateContentResponse;\n}\n\n/**\n * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI.\n *\n * @param countTokensRequest The {@link CountTokensRequest} to map.\n * @param model The model to count tokens with.\n * @returns A {@link CountTokensRequest} that conforms to the Google AI format.\n *\n * @internal\n */\nexport function mapCountTokensRequest(\n countTokensRequest: CountTokensRequest,\n model: string\n): GoogleAICountTokensRequest {\n const mappedCountTokensRequest: GoogleAICountTokensRequest = {\n generateContentRequest: {\n model,\n ...countTokensRequest\n }\n };\n\n return mappedCountTokensRequest;\n}\n\n/**\n * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms\n * to the Vertex AI API format.\n *\n * @param candidates The {@link GoogleAIGenerateContentCandidate} to map.\n * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format.\n *\n * @throws If any {@link Part} in the candidates has a `videoMetadata` property.\n *\n * @internal\n */\nexport function mapGenerateContentCandidates(\n candidates: GoogleAIGenerateContentCandidate[]\n): GenerateContentCandidate[] {\n const mappedCandidates: GenerateContentCandidate[] = [];\n let mappedSafetyRatings: SafetyRating[];\n if (mappedCandidates) {\n candidates.forEach(candidate => {\n // Map citationSources to citations.\n let citationMetadata: CitationMetadata | undefined;\n if (candidate.citationMetadata) {\n citationMetadata = {\n citations: candidate.citationMetadata.citationSources\n };\n }\n\n // Assign missing candidate SafetyRatings properties to their defaults if undefined.\n if (candidate.safetyRatings) {\n mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => {\n return {\n ...safetyRating,\n severity:\n safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,\n probabilityScore: safetyRating.probabilityScore ?? 0,\n severityScore: safetyRating.severityScore ?? 0\n };\n });\n }\n\n // videoMetadata is not supported.\n // Throw early since developers may send a long video as input and only expect to pay\n // for inference on a small portion of the video.\n if (\n candidate.content?.parts?.some(\n part => (part as InlineDataPart)?.videoMetadata\n )\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.'\n );\n }\n\n const mappedCandidate = {\n index: candidate.index,\n content: candidate.content,\n finishReason: candidate.finishReason,\n finishMessage: candidate.finishMessage,\n safetyRatings: mappedSafetyRatings,\n citationMetadata,\n groundingMetadata: candidate.groundingMetadata,\n urlContextMetadata: candidate.urlContextMetadata\n };\n mappedCandidates.push(mappedCandidate);\n });\n }\n\n return mappedCandidates;\n}\n\nexport function mapPromptFeedback(\n promptFeedback: PromptFeedback\n): PromptFeedback {\n // Assign missing SafetyRating properties to their defaults if undefined.\n const mappedSafetyRatings: SafetyRating[] = [];\n promptFeedback.safetyRatings.forEach(safetyRating => {\n mappedSafetyRatings.push({\n category: safetyRating.category,\n probability: safetyRating.probability,\n severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,\n probabilityScore: safetyRating.probabilityScore ?? 0,\n severityScore: safetyRating.severityScore ?? 0,\n blocked: safetyRating.blocked\n });\n });\n\n const mappedPromptFeedback: PromptFeedback = {\n blockReason: promptFeedback.blockReason,\n safetyRatings: mappedSafetyRatings,\n blockReasonMessage: promptFeedback.blockReasonMessage\n };\n return mappedPromptFeedback;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n EnhancedGenerateContentResponse,\n GenerateContentCandidate,\n GenerateContentResponse,\n GenerateContentStreamResult,\n Part,\n AIErrorCode\n} from '../types';\nimport { AIError } from '../errors';\nimport { createEnhancedContentResponse } from './response-helpers';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { GoogleAIGenerateContentResponse } from '../types/googleai';\nimport { ApiSettings } from '../types/internal';\nimport {\n BackendType,\n InferenceSource,\n URLContextMetadata\n} from '../public-types';\n\nconst responseLineRE = /^data\\: (.*)(?:\\n\\n|\\r\\r|\\r\\n\\r\\n)/;\n\n/**\n * Process a response.body stream from the backend and return an\n * iterator that provides one complete GenerateContentResponse at a time\n * and a promise that resolves with a single aggregated\n * GenerateContentResponse.\n *\n * @param response - Response from a fetch call\n */\nexport function processStream(\n response: Response,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): GenerateContentStreamResult {\n const inputStream = response.body!.pipeThrough(\n new TextDecoderStream('utf8', { fatal: true })\n );\n const responseStream =\n getResponseStream<GenerateContentResponse>(inputStream);\n const [stream1, stream2] = responseStream.tee();\n return {\n stream: generateResponseSequence(stream1, apiSettings, inferenceSource),\n response: getResponsePromise(stream2, apiSettings, inferenceSource)\n };\n}\n\nasync function getResponsePromise(\n stream: ReadableStream<GenerateContentResponse>,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): Promise<EnhancedGenerateContentResponse> {\n const allResponses: GenerateContentResponse[] = [];\n const reader = stream.getReader();\n while (true) {\n const { done, value } = await reader.read();\n if (done) {\n let generateContentResponse = aggregateResponses(allResponses);\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n generateContentResponse = GoogleAIMapper.mapGenerateContentResponse(\n generateContentResponse as GoogleAIGenerateContentResponse\n );\n }\n return createEnhancedContentResponse(\n generateContentResponse,\n inferenceSource\n );\n }\n\n allResponses.push(value);\n }\n}\n\nasync function* generateResponseSequence(\n stream: ReadableStream<GenerateContentResponse>,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): AsyncGenerator<EnhancedGenerateContentResponse> {\n const reader = stream.getReader();\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n break;\n }\n\n let enhancedResponse: EnhancedGenerateContentResponse;\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n enhancedResponse = createEnhancedContentResponse(\n GoogleAIMapper.mapGenerateContentResponse(\n value as GoogleAIGenerateContentResponse\n ),\n inferenceSource\n );\n } else {\n enhancedResponse = createEnhancedContentResponse(value, inferenceSource);\n }\n\n const firstCandidate = enhancedResponse.candidates?.[0];\n // Don't yield a response with no useful data for the developer.\n if (\n !firstCandidate?.content?.parts &&\n !firstCandidate?.finishReason &&\n !firstCandidate?.citationMetadata &&\n !firstCandidate?.urlContextMetadata\n ) {\n continue;\n }\n\n yield enhancedResponse;\n }\n}\n\n/**\n * Reads a raw stream from the fetch response and join incomplete\n * chunks, returning a new stream that provides a single complete\n * GenerateContentResponse in each iteration.\n */\nexport function getResponseStream<T>(\n inputStream: ReadableStream<string>\n): ReadableStream<T> {\n const reader = inputStream.getReader();\n const stream = new ReadableStream<T>({\n start(controller) {\n let currentText = '';\n return pump();\n function pump(): Promise<(() => Promise<void>) | undefined> {\n return reader.read().then(({ value, done }) => {\n if (done) {\n if (currentText.trim()) {\n controller.error(\n new AIError(AIErrorCode.PARSE_FAILED, 'Failed to parse stream')\n );\n return;\n }\n controller.close();\n return;\n }\n\n currentText += value;\n let match = currentText.match(responseLineRE);\n let parsedResponse: T;\n while (match) {\n try {\n parsedResponse = JSON.parse(match[1]);\n } catch (e) {\n controller.error(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Error parsing JSON response: \"${match[1]}`\n )\n );\n return;\n }\n controller.enqueue(parsedResponse);\n currentText = currentText.substring(match[0].length);\n match = currentText.match(responseLineRE);\n }\n return pump();\n });\n }\n }\n });\n return stream;\n}\n\n/**\n * Aggregates an array of `GenerateContentResponse`s into a single\n * GenerateContentResponse.\n */\nexport function aggregateResponses(\n responses: GenerateContentResponse[]\n): GenerateContentResponse {\n const lastResponse = responses[responses.length - 1];\n const aggregatedResponse: GenerateContentResponse = {\n promptFeedback: lastResponse?.promptFeedback\n };\n for (const response of responses) {\n if (response.candidates) {\n for (const candidate of response.candidates) {\n // Index will be undefined if it's the first index (0), so we should use 0 if it's undefined.\n // See: https://github.com/firebase/firebase-js-sdk/issues/8566\n const i = candidate.index || 0;\n if (!aggregatedResponse.candidates) {\n aggregatedResponse.candidates = [];\n }\n if (!aggregatedResponse.candidates[i]) {\n aggregatedResponse.candidates[i] = {\n index: candidate.index\n } as GenerateContentCandidate;\n }\n // Keep overwriting, the last one will be final\n aggregatedResponse.candidates[i].citationMetadata =\n candidate.citationMetadata;\n aggregatedResponse.candidates[i].finishReason = candidate.finishReason;\n aggregatedResponse.candidates[i].finishMessage =\n candidate.finishMessage;\n aggregatedResponse.candidates[i].safetyRatings =\n candidate.safetyRatings;\n aggregatedResponse.candidates[i].groundingMetadata =\n candidate.groundingMetadata;\n\n // The urlContextMetadata object is defined in the first chunk of the response stream.\n // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to\n // make sure that we don't overwrite the first value urlContextMetadata object with undefined.\n // FIXME: What happens if we receive a second, valid urlContextMetadata object?\n const urlContextMetadata = candidate.urlContextMetadata as unknown;\n if (\n typeof urlContextMetadata === 'object' &&\n urlContextMetadata !== null &&\n Object.keys(urlContextMetadata).length > 0\n ) {\n aggregatedResponse.candidates[i].urlContextMetadata =\n urlContextMetadata as URLContextMetadata;\n }\n\n /**\n * Candidates should always have content and parts, but this handles\n * possible malformed responses.\n */\n if (candidate.content) {\n // Skip a candidate without parts.\n if (!candidate.content.parts) {\n continue;\n }\n if (!aggregatedResponse.candidates[i].content) {\n aggregatedResponse.candidates[i].content = {\n role: candidate.content.role || 'user',\n parts: []\n };\n }\n for (const part of candidate.content.parts) {\n const newPart: Part = { ...part };\n // The backend can send empty text parts. If these are sent back\n // (e.g. in chat history), the backend will respond with an error.\n // To prevent this, ignore empty text parts.\n if (part.text === '') {\n continue;\n }\n if (Object.keys(newPart).length > 0) {\n aggregatedResponse.candidates[i].content.parts.push(\n newPart as Part\n );\n }\n }\n }\n }\n }\n }\n return aggregatedResponse;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport {\n GenerateContentRequest,\n InferenceMode,\n AIErrorCode,\n ChromeAdapter,\n InferenceSource\n} from '../types';\nimport { ChromeAdapterImpl } from '../methods/chrome-adapter';\n\nconst errorsCausingFallback: AIErrorCode[] = [\n // most network errors\n AIErrorCode.FETCH_ERROR,\n // fallback code for all other errors in makeRequest\n AIErrorCode.ERROR,\n // error due to API not being enabled in project\n AIErrorCode.API_NOT_ENABLED\n];\n\ninterface CallResult<Response> {\n response: Response;\n inferenceSource: InferenceSource;\n}\n\n/**\n * Dispatches a request to the appropriate backend (on-device or in-cloud)\n * based on the inference mode.\n *\n * @param request - The request to be sent.\n * @param chromeAdapter - The on-device model adapter.\n * @param onDeviceCall - The function to call for on-device inference.\n * @param inCloudCall - The function to call for in-cloud inference.\n * @returns The response from the backend.\n */\nexport async function callCloudOrDevice<Response>(\n request: GenerateContentRequest,\n chromeAdapter: ChromeAdapter | undefined,\n onDeviceCall: () => Promise<Response>,\n inCloudCall: () => Promise<Response>\n): Promise<CallResult<Response>> {\n if (!chromeAdapter) {\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n }\n switch ((chromeAdapter as ChromeAdapterImpl).mode) {\n case InferenceMode.ONLY_ON_DEVICE:\n if (await chromeAdapter.isAvailable(request)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.'\n );\n case InferenceMode.ONLY_IN_CLOUD:\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n case InferenceMode.PREFER_IN_CLOUD:\n try {\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n } catch (e) {\n if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n throw e;\n }\n case InferenceMode.PREFER_ON_DEVICE:\n if (await chromeAdapter.isAvailable(request)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n default:\n throw new AIError(\n AIErrorCode.ERROR,\n `Unexpected infererence mode: ${\n (chromeAdapter as ChromeAdapterImpl).mode\n }`\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n GenerateContentRequest,\n GenerateContentResponse,\n GenerateContentResult,\n GenerateContentStreamResult,\n RequestOptions\n} from '../types';\nimport { Task, makeRequest } from '../requests/request';\nimport { createEnhancedContentResponse } from '../requests/response-helpers';\nimport { processStream } from '../requests/stream-reader';\nimport { ApiSettings } from '../types/internal';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { BackendType } from '../public-types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport { callCloudOrDevice } from '../requests/hybrid-helpers';\n\nasync function generateContentStreamOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n requestOptions?: RequestOptions\n): Promise<Response> {\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n params = GoogleAIMapper.mapGenerateContentRequest(params);\n }\n return makeRequest(\n model,\n Task.STREAM_GENERATE_CONTENT,\n apiSettings,\n /* stream */ true,\n JSON.stringify(params),\n requestOptions\n );\n}\n\nexport async function generateContentStream(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<GenerateContentStreamResult> {\n const callResult = await callCloudOrDevice(\n params,\n chromeAdapter,\n () => chromeAdapter!.generateContentStream(params),\n () =>\n generateContentStreamOnCloud(apiSettings, model, params, requestOptions)\n );\n return processStream(callResult.response, apiSettings); // TODO: Map streaming responses\n}\n\nasync function generateContentOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n requestOptions?: RequestOptions\n): Promise<Response> {\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n params = GoogleAIMapper.mapGenerateContentRequest(params);\n }\n return makeRequest(\n model,\n Task.GENERATE_CONTENT,\n apiSettings,\n /* stream */ false,\n JSON.stringify(params),\n requestOptions\n );\n}\n\nexport async function generateContent(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<GenerateContentResult> {\n const callResult = await callCloudOrDevice(\n params,\n chromeAdapter,\n () => chromeAdapter!.generateContent(params),\n () => generateContentOnCloud(apiSettings, model, params, requestOptions)\n );\n const generateContentResponse = await processGenerateContentResponse(\n callResult.response,\n apiSettings\n );\n const enhancedResponse = createEnhancedContentResponse(\n generateContentResponse,\n callResult.inferenceSource\n );\n return {\n response: enhancedResponse\n };\n}\n\nasync function processGenerateContentResponse(\n response: Response,\n apiSettings: ApiSettings\n): Promise<GenerateContentResponse> {\n const responseJson = await response.json();\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n return GoogleAIMapper.mapGenerateContentResponse(responseJson);\n } else {\n return responseJson;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, GenerateContentRequest, Part, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\nimport { ImagenGenerationParams, PredictRequestBody } from '../types/internal';\n\nexport function formatSystemInstruction(\n input?: string | Part | Content\n): Content | undefined {\n // null or undefined\n if (input == null) {\n return undefined;\n } else if (typeof input === 'string') {\n return { role: 'system', parts: [{ text: input }] } as Content;\n } else if ((input as Part).text) {\n return { role: 'system', parts: [input as Part] };\n } else if ((input as Content).parts) {\n if (!(input as Content).role) {\n return { role: 'system', parts: (input as Content).parts };\n } else {\n return input as Content;\n }\n }\n}\n\nexport function formatNewContent(\n request: string | Array<string | Part>\n): Content {\n let newParts: Part[] = [];\n if (typeof request === 'string') {\n newParts = [{ text: request }];\n } else {\n for (const partOrString of request) {\n if (typeof partOrString === 'string') {\n newParts.push({ text: partOrString });\n } else {\n newParts.push(partOrString);\n }\n }\n }\n return assignRoleToPartsAndValidateSendMessageRequest(newParts);\n}\n\n/**\n * When multiple Part types (i.e. FunctionResponsePart and TextPart) are\n * passed in a single Part array, we may need to assign different roles to each\n * part. Currently only FunctionResponsePart requires a role other than 'user'.\n * @private\n * @param parts Array of parts to pass to the model\n * @returns Array of content items\n */\nfunction assignRoleToPartsAndValidateSendMessageRequest(\n parts: Part[]\n): Content {\n const userContent: Content = { role: 'user', parts: [] };\n const functionContent: Content = { role: 'function', parts: [] };\n let hasUserContent = false;\n let hasFunctionContent = false;\n for (const part of parts) {\n if ('functionResponse' in part) {\n functionContent.parts.push(part);\n hasFunctionContent = true;\n } else {\n userContent.parts.push(part);\n hasUserContent = true;\n }\n }\n\n if (hasUserContent && hasFunctionContent) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.'\n );\n }\n\n if (!hasUserContent && !hasFunctionContent) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n 'No Content is provided for sending chat message.'\n );\n }\n\n if (hasUserContent) {\n return userContent;\n }\n\n return functionContent;\n}\n\nexport function formatGenerateContentInput(\n params: GenerateContentRequest | string | Array<string | Part>\n): GenerateContentRequest {\n let formattedRequest: GenerateContentRequest;\n if ((params as GenerateContentRequest).contents) {\n formattedRequest = params as GenerateContentRequest;\n } else {\n // Array or string\n const content = formatNewContent(params as string | Array<string | Part>);\n formattedRequest = { contents: [content] };\n }\n if ((params as GenerateContentRequest).systemInstruction) {\n formattedRequest.systemInstruction = formatSystemInstruction(\n (params as GenerateContentRequest).systemInstruction\n );\n }\n return formattedRequest;\n}\n\n/**\n * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format\n * that is expected from the REST API.\n *\n * @internal\n */\nexport function createPredictRequestBody(\n prompt: string,\n {\n gcsURI,\n imageFormat,\n addWatermark,\n numberOfImages = 1,\n negativePrompt,\n aspectRatio,\n safetyFilterLevel,\n personFilterLevel\n }: ImagenGenerationParams\n): PredictRequestBody {\n // Properties that are undefined will be omitted from the JSON string that is sent in the request.\n const body: PredictRequestBody = {\n instances: [\n {\n prompt\n }\n ],\n parameters: {\n storageUri: gcsURI,\n negativePrompt,\n sampleCount: numberOfImages,\n aspectRatio,\n outputOptions: imageFormat,\n addWatermark,\n safetyFilterLevel,\n personGeneration: personFilterLevel,\n includeRaiReason: true,\n includeSafetyAttributes: true\n }\n };\n return body;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, POSSIBLE_ROLES, Part, Role, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\n\n// https://ai.google.dev/api/rest/v1beta/Content#part\n\nconst VALID_PART_FIELDS: Array<keyof Part> = [\n 'text',\n 'inlineData',\n 'functionCall',\n 'functionResponse',\n 'thought',\n 'thoughtSignature'\n];\n\nconst VALID_PARTS_PER_ROLE: { [key in Role]: Array<keyof Part> } = {\n user: ['text', 'inlineData'],\n function: ['functionResponse'],\n model: ['text', 'functionCall', 'thought', 'thoughtSignature'],\n // System instructions shouldn't be in history anyway.\n system: ['text']\n};\n\nconst VALID_PREVIOUS_CONTENT_ROLES: { [key in Role]: Role[] } = {\n user: ['model'],\n function: ['model'],\n model: ['user', 'function'],\n // System instructions shouldn't be in history.\n system: []\n};\n\nexport function validateChatHistory(history: Content[]): void {\n let prevContent: Content | null = null;\n for (const currContent of history) {\n const { role, parts } = currContent;\n if (!prevContent && role !== 'user') {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `First Content should be with role 'user', got ${role}`\n );\n }\n if (!POSSIBLE_ROLES.includes(role)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(\n POSSIBLE_ROLES\n )}`\n );\n }\n\n if (!Array.isArray(parts)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content should have 'parts' property with an array of Parts`\n );\n }\n\n if (parts.length === 0) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Each Content should have at least one part`\n );\n }\n\n const countFields: Record<keyof Part, number> = {\n text: 0,\n inlineData: 0,\n functionCall: 0,\n functionResponse: 0,\n thought: 0,\n thoughtSignature: 0,\n executableCode: 0,\n codeExecutionResult: 0\n };\n\n for (const part of parts) {\n for (const key of VALID_PART_FIELDS) {\n if (key in part) {\n countFields[key] += 1;\n }\n }\n }\n const validParts = VALID_PARTS_PER_ROLE[role];\n for (const key of VALID_PART_FIELDS) {\n if (!validParts.includes(key) && countFields[key] > 0) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content with role '${role}' can't contain '${key}' part`\n );\n }\n }\n\n if (prevContent) {\n const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role];\n if (!validPreviousContentRoles.includes(prevContent.role)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content with role '${role}' can't follow '${\n prevContent.role\n }'. Valid previous roles: ${JSON.stringify(\n VALID_PREVIOUS_CONTENT_ROLES\n )}`\n );\n }\n }\n prevContent = currContent;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n Content,\n GenerateContentRequest,\n GenerateContentResult,\n GenerateContentStreamResult,\n Part,\n RequestOptions,\n StartChatParams\n} from '../types';\nimport { formatNewContent } from '../requests/request-helpers';\nimport { formatBlockErrorMessage } from '../requests/response-helpers';\nimport { validateChatHistory } from './chat-session-helpers';\nimport { generateContent, generateContentStream } from './generate-content';\nimport { ApiSettings } from '../types/internal';\nimport { logger } from '../logger';\nimport { ChromeAdapter } from '../types/chrome-adapter';\n\n/**\n * Do not log a message for this error.\n */\nconst SILENT_ERROR = 'SILENT_ERROR';\n\n/**\n * ChatSession class that enables sending chat messages and stores\n * history of sent and received messages so far.\n *\n * @public\n */\nexport class ChatSession {\n private _apiSettings: ApiSettings;\n private _history: Content[] = [];\n private _sendPromise: Promise<void> = Promise.resolve();\n\n constructor(\n apiSettings: ApiSettings,\n public model: string,\n private chromeAdapter?: ChromeAdapter,\n public params?: StartChatParams,\n public requestOptions?: RequestOptions\n ) {\n this._apiSettings = apiSettings;\n if (params?.history) {\n validateChatHistory(params.history);\n this._history = params.history;\n }\n }\n\n /**\n * Gets the chat history so far. Blocked prompts are not added to history.\n * Neither blocked candidates nor the prompts that generated them are added\n * to history.\n */\n async getHistory(): Promise<Content[]> {\n await this._sendPromise;\n return this._history;\n }\n\n /**\n * Sends a chat message and receives a non-streaming\n * {@link GenerateContentResult}\n */\n async sendMessage(\n request: string | Array<string | Part>\n ): Promise<GenerateContentResult> {\n await this._sendPromise;\n const newContent = formatNewContent(request);\n const generateContentRequest: GenerateContentRequest = {\n safetySettings: this.params?.safetySettings,\n generationConfig: this.params?.generationConfig,\n tools: this.params?.tools,\n toolConfig: this.params?.toolConfig,\n systemInstruction: this.params?.systemInstruction,\n contents: [...this._history, newContent]\n };\n let finalResult = {} as GenerateContentResult;\n // Add onto the chain.\n this._sendPromise = this._sendPromise\n .then(() =>\n generateContent(\n this._apiSettings,\n this.model,\n generateContentRequest,\n this.chromeAdapter,\n this.requestOptions\n )\n )\n .then(result => {\n if (\n result.response.candidates &&\n result.response.candidates.length > 0\n ) {\n this._history.push(newContent);\n const responseContent: Content = {\n parts: result.response.candidates?.[0].content.parts || [],\n // Response seems to come back without a role set.\n role: result.response.candidates?.[0].content.role || 'model'\n };\n this._history.push(responseContent);\n } else {\n const blockErrorMessage = formatBlockErrorMessage(result.response);\n if (blockErrorMessage) {\n logger.warn(\n `sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`\n );\n }\n }\n finalResult = result;\n });\n await this._sendPromise;\n return finalResult;\n }\n\n /**\n * Sends a chat message and receives the response as a\n * {@link GenerateContentStreamResult} containing an iterable stream\n * and a response promise.\n */\n async sendMessageStream(\n request: string | Array<string | Part>\n ): Promise<GenerateContentStreamResult> {\n await this._sendPromise;\n const newContent = formatNewContent(request);\n const generateContentRequest: GenerateContentRequest = {\n safetySettings: this.params?.safetySettings,\n generationConfig: this.params?.generationConfig,\n tools: this.params?.tools,\n toolConfig: this.params?.toolConfig,\n systemInstruction: this.params?.systemInstruction,\n contents: [...this._history, newContent]\n };\n const streamPromise = generateContentStream(\n this._apiSettings,\n this.model,\n generateContentRequest,\n this.chromeAdapter,\n this.requestOptions\n );\n\n // Add onto the chain.\n this._sendPromise = this._sendPromise\n .then(() => streamPromise)\n // This must be handled to avoid unhandled rejection, but jump\n // to the final catch block with a label to not log this error.\n .catch(_ignored => {\n throw new Error(SILENT_ERROR);\n })\n .then(streamResult => streamResult.response)\n .then(response => {\n if (response.candidates && response.candidates.length > 0) {\n this._history.push(newContent);\n const responseContent = { ...response.candidates[0].content };\n // Response seems to come back without a role set.\n if (!responseContent.role) {\n responseContent.role = 'model';\n }\n this._history.push(responseContent);\n } else {\n const blockErrorMessage = formatBlockErrorMessage(response);\n if (blockErrorMessage) {\n logger.warn(\n `sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`\n );\n }\n }\n })\n .catch(e => {\n // Errors in streamPromise are already catchable by the user as\n // streamPromise is returned.\n // Avoid duplicating the error message in logs.\n if (e.message !== SILENT_ERROR) {\n // Users do not have access to _sendPromise to catch errors\n // downstream from streamPromise, so they should not throw.\n logger.error(e);\n }\n });\n return streamPromise;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport {\n CountTokensRequest,\n CountTokensResponse,\n InferenceMode,\n RequestOptions,\n AIErrorCode\n} from '../types';\nimport { Task, makeRequest } from '../requests/request';\nimport { ApiSettings } from '../types/internal';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { BackendType } from '../public-types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport { ChromeAdapterImpl } from './chrome-adapter';\n\nexport async function countTokensOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: CountTokensRequest,\n requestOptions?: RequestOptions\n): Promise<CountTokensResponse> {\n let body: string = '';\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n const mappedParams = GoogleAIMapper.mapCountTokensRequest(params, model);\n body = JSON.stringify(mappedParams);\n } else {\n body = JSON.stringify(params);\n }\n const response = await makeRequest(\n model,\n Task.COUNT_TOKENS,\n apiSettings,\n false,\n body,\n requestOptions\n );\n return response.json();\n}\n\nexport async function countTokens(\n apiSettings: ApiSettings,\n model: string,\n params: CountTokensRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<CountTokensResponse> {\n if (\n (chromeAdapter as ChromeAdapterImpl)?.mode === InferenceMode.ONLY_ON_DEVICE\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'countTokens() is not supported for on-device models.'\n );\n }\n return countTokensOnCloud(apiSettings, model, params, requestOptions);\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n generateContent,\n generateContentStream\n} from '../methods/generate-content';\nimport {\n Content,\n CountTokensRequest,\n CountTokensResponse,\n GenerateContentRequest,\n GenerateContentResult,\n GenerateContentStreamResult,\n GenerationConfig,\n ModelParams,\n Part,\n RequestOptions,\n SafetySetting,\n StartChatParams,\n Tool,\n ToolConfig\n} from '../types';\nimport { ChatSession } from '../methods/chat-session';\nimport { countTokens } from '../methods/count-tokens';\nimport {\n formatGenerateContentInput,\n formatSystemInstruction\n} from '../requests/request-helpers';\nimport { AI } from '../public-types';\nimport { AIModel } from './ai-model';\nimport { ChromeAdapter } from '../types/chrome-adapter';\n\n/**\n * Class for generative model APIs.\n * @public\n */\nexport class GenerativeModel extends AIModel {\n generationConfig: GenerationConfig;\n safetySettings: SafetySetting[];\n requestOptions?: RequestOptions;\n tools?: Tool[];\n toolConfig?: ToolConfig;\n systemInstruction?: Content;\n\n constructor(\n ai: AI,\n modelParams: ModelParams,\n requestOptions?: RequestOptions,\n private chromeAdapter?: ChromeAdapter\n ) {\n super(ai, modelParams.model);\n this.generationConfig = modelParams.generationConfig || {};\n this.safetySettings = modelParams.safetySettings || [];\n this.tools = modelParams.tools;\n this.toolConfig = modelParams.toolConfig;\n this.systemInstruction = formatSystemInstruction(\n modelParams.systemInstruction\n );\n this.requestOptions = requestOptions || {};\n }\n\n /**\n * Makes a single non-streaming call to the model\n * and returns an object containing a single {@link GenerateContentResponse}.\n */\n async generateContent(\n request: GenerateContentRequest | string | Array<string | Part>\n ): Promise<GenerateContentResult> {\n const formattedParams = formatGenerateContentInput(request);\n return generateContent(\n this._apiSettings,\n this.model,\n {\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n ...formattedParams\n },\n this.chromeAdapter,\n this.requestOptions\n );\n }\n\n /**\n * Makes a single streaming call to the model\n * and returns an object containing an iterable stream that iterates\n * over all chunks in the streaming response as well as\n * a promise that returns the final aggregated response.\n */\n async generateContentStream(\n request: GenerateContentRequest | string | Array<string | Part>\n ): Promise<GenerateContentStreamResult> {\n const formattedParams = formatGenerateContentInput(request);\n return generateContentStream(\n this._apiSettings,\n this.model,\n {\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n ...formattedParams\n },\n this.chromeAdapter,\n this.requestOptions\n );\n }\n\n /**\n * Gets a new {@link ChatSession} instance which can be used for\n * multi-turn chats.\n */\n startChat(startChatParams?: StartChatParams): ChatSession {\n return new ChatSession(\n this._apiSettings,\n this.model,\n this.chromeAdapter,\n {\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n /**\n * Overrides params inherited from GenerativeModel with those explicitly set in the\n * StartChatParams. For example, if startChatParams.generationConfig is set, it'll override\n * this.generationConfig.\n */\n ...startChatParams\n },\n this.requestOptions\n );\n }\n\n /**\n * Counts the tokens in the provided request.\n */\n async countTokens(\n request: CountTokensRequest | string | Array<string | Part>\n ): Promise<CountTokensResponse> {\n const formattedParams = formatGenerateContentInput(request);\n return countTokens(\n this._apiSettings,\n this.model,\n formattedParams,\n this.chromeAdapter\n );\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n AIErrorCode,\n FunctionResponse,\n GenerativeContentBlob,\n LiveResponseType,\n LiveServerContent,\n LiveServerToolCall,\n LiveServerToolCallCancellation,\n Part\n} from '../public-types';\nimport { formatNewContent } from '../requests/request-helpers';\nimport { AIError } from '../errors';\nimport { WebSocketHandler } from '../websocket';\nimport { logger } from '../logger';\nimport {\n _LiveClientContent,\n _LiveClientRealtimeInput,\n _LiveClientToolResponse\n} from '../types/live-responses';\n\n/**\n * Represents an active, real-time, bidirectional conversation with the model.\n *\n * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}.\n *\n * @beta\n */\nexport class LiveSession {\n /**\n * Indicates whether this Live session is closed.\n *\n * @beta\n */\n isClosed = false;\n /**\n * Indicates whether this Live session is being controlled by an `AudioConversationController`.\n *\n * @beta\n */\n inConversation = false;\n\n /**\n * @internal\n */\n constructor(\n private webSocketHandler: WebSocketHandler,\n private serverMessages: AsyncGenerator<unknown>\n ) {}\n\n /**\n * Sends content to the server.\n *\n * @param request - The message to send to the model.\n * @param turnComplete - Indicates if the turn is complete. Defaults to false.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async send(\n request: string | Array<string | Part>,\n turnComplete = true\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const newContent = formatNewContent(request);\n\n const message: _LiveClientContent = {\n clientContent: {\n turns: [newContent],\n turnComplete\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends text to the server in realtime.\n *\n * @example\n * ```javascript\n * liveSession.sendTextRealtime(\"Hello, how are you?\");\n * ```\n *\n * @param text - The text data to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendTextRealtime(text: string): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n text\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends audio data to the server in realtime.\n *\n * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz\n * little-endian.\n *\n * @example\n * ```javascript\n * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian.\n * const blob = { mimeType: \"audio/pcm\", data: pcmData };\n * liveSession.sendAudioRealtime(blob);\n * ```\n *\n * @param blob - The base64-encoded PCM data to send to the server in realtime.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendAudioRealtime(blob: GenerativeContentBlob): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n audio: blob\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends video data to the server in realtime.\n *\n * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It\n * is recommended to set `mimeType` to `image/jpeg`.\n *\n * @example\n * ```javascript\n * // const videoFrame = ... base64-encoded JPEG data\n * const blob = { mimeType: \"image/jpeg\", data: videoFrame };\n * liveSession.sendVideoRealtime(blob);\n * ```\n * @param blob - The base64-encoded video data to send to the server in realtime.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendVideoRealtime(blob: GenerativeContentBlob): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n video: blob\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends function responses to the server.\n *\n * @param functionResponses - The function responses to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendFunctionResponses(\n functionResponses: FunctionResponse[]\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientToolResponse = {\n toolResponse: {\n functionResponses\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Yields messages received from the server.\n * This can only be used by one consumer at a time.\n *\n * @returns An `AsyncGenerator` that yields server messages as they arrive.\n * @throws If the session is already closed, or if we receive a response that we don't support.\n *\n * @beta\n */\n async *receive(): AsyncGenerator<\n LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation\n > {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.SESSION_CLOSED,\n 'Cannot read from a Live session that is closed. Try starting a new Live session.'\n );\n }\n for await (const message of this.serverMessages) {\n if (message && typeof message === 'object') {\n if (LiveResponseType.SERVER_CONTENT in message) {\n yield {\n type: 'serverContent',\n ...(message as { serverContent: Omit<LiveServerContent, 'type'> })\n .serverContent\n } as LiveServerContent;\n } else if (LiveResponseType.TOOL_CALL in message) {\n yield {\n type: 'toolCall',\n ...(message as { toolCall: Omit<LiveServerToolCall, 'type'> })\n .toolCall\n } as LiveServerToolCall;\n } else if (LiveResponseType.TOOL_CALL_CANCELLATION in message) {\n yield {\n type: 'toolCallCancellation',\n ...(\n message as {\n toolCallCancellation: Omit<\n LiveServerToolCallCancellation,\n 'type'\n >;\n }\n ).toolCallCancellation\n } as LiveServerToolCallCancellation;\n } else {\n logger.warn(\n `Received an unknown message type from the server: ${JSON.stringify(\n message\n )}`\n );\n }\n } else {\n logger.warn(\n `Received an invalid message from the server: ${JSON.stringify(\n message\n )}`\n );\n }\n }\n }\n\n /**\n * Closes this session.\n * All methods on this session will throw an error once this resolves.\n *\n * @beta\n */\n async close(): Promise<void> {\n if (!this.isClosed) {\n this.isClosed = true;\n await this.webSocketHandler.close(1000, 'Client closed session.');\n }\n }\n\n /**\n * Sends realtime input to the server.\n *\n * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.\n *\n * @param mediaChunks - The media chunks to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n // The backend does not support sending more than one mediaChunk in one message.\n // Work around this limitation by sending mediaChunks in separate messages.\n mediaChunks.forEach(mediaChunk => {\n const message: _LiveClientRealtimeInput = {\n realtimeInput: { mediaChunks: [mediaChunk] }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n });\n }\n\n /**\n * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.\n *\n * Sends a stream of {@link GenerativeContentBlob}.\n *\n * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendMediaStream(\n mediaChunkStream: ReadableStream<GenerativeContentBlob>\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const reader = mediaChunkStream.getReader();\n while (true) {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n break;\n } else if (!value) {\n throw new Error('Missing chunk in reader, but reader is not done.');\n }\n\n await this.sendMediaChunks([value]);\n } catch (e) {\n // Re-throw any errors that occur during stream consumption or sending.\n const message =\n e instanceof Error ? e.message : 'Error processing media stream.';\n throw new AIError(AIErrorCode.REQUEST_ERROR, message);\n }\n }\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIModel } from './ai-model';\nimport { LiveSession } from '../methods/live-session';\nimport { AIError } from '../errors';\nimport {\n AI,\n AIErrorCode,\n BackendType,\n Content,\n LiveGenerationConfig,\n LiveModelParams,\n Tool,\n ToolConfig\n} from '../public-types';\nimport { WebSocketHandler } from '../websocket';\nimport { WebSocketUrl } from '../requests/request';\nimport { formatSystemInstruction } from '../requests/request-helpers';\nimport { _LiveClientSetup } from '../types/live-responses';\n\n/**\n * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal\n * interactions with Gemini.\n *\n * This class should only be instantiated with {@link getLiveGenerativeModel}.\n *\n * @beta\n */\nexport class LiveGenerativeModel extends AIModel {\n generationConfig: LiveGenerationConfig;\n tools?: Tool[];\n toolConfig?: ToolConfig;\n systemInstruction?: Content;\n\n /**\n * @internal\n */\n constructor(\n ai: AI,\n modelParams: LiveModelParams,\n /**\n * @internal\n */\n private _webSocketHandler: WebSocketHandler\n ) {\n super(ai, modelParams.model);\n this.generationConfig = modelParams.generationConfig || {};\n this.tools = modelParams.tools;\n this.toolConfig = modelParams.toolConfig;\n this.systemInstruction = formatSystemInstruction(\n modelParams.systemInstruction\n );\n }\n\n /**\n * Starts a {@link LiveSession}.\n *\n * @returns A {@link LiveSession}.\n * @throws If the connection failed to be established with the server.\n *\n * @beta\n */\n async connect(): Promise<LiveSession> {\n const url = new WebSocketUrl(this._apiSettings);\n await this._webSocketHandler.connect(url.toString());\n\n let fullModelPath: string;\n if (this._apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n fullModelPath = `projects/${this._apiSettings.project}/${this.model}`;\n } else {\n fullModelPath = `projects/${this._apiSettings.project}/locations/${this._apiSettings.location}/${this.model}`;\n }\n\n // inputAudioTranscription and outputAudioTranscription are on the generation config in the public API,\n // but the backend expects them to be in the `setup` message.\n const {\n inputAudioTranscription,\n outputAudioTranscription,\n ...generationConfig\n } = this.generationConfig;\n\n const setupMessage: _LiveClientSetup = {\n setup: {\n model: fullModelPath,\n generationConfig,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n inputAudioTranscription,\n outputAudioTranscription\n }\n };\n\n try {\n // Begin listening for server messages, and begin the handshake by sending the 'setupMessage'\n const serverMessages = this._webSocketHandler.listen();\n this._webSocketHandler.send(JSON.stringify(setupMessage));\n\n // Verify we received the handshake response 'setupComplete'\n const firstMessage = (await serverMessages.next()).value;\n if (\n !firstMessage ||\n !(typeof firstMessage === 'object') ||\n !('setupComplete' in firstMessage)\n ) {\n await this._webSocketHandler.close(1011, 'Handshake failure');\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n 'Server connection handshake failed. The server did not respond with a setupComplete message.'\n );\n }\n\n return new LiveSession(this._webSocketHandler, serverMessages);\n } catch (e) {\n // Ensure connection is closed on any setup error\n await this._webSocketHandler.close();\n throw e;\n }\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AI } from '../public-types';\nimport { Task, makeRequest } from '../requests/request';\nimport { createPredictRequestBody } from '../requests/request-helpers';\nimport { handlePredictResponse } from '../requests/response-helpers';\nimport {\n ImagenGCSImage,\n ImagenGenerationConfig,\n ImagenInlineImage,\n RequestOptions,\n ImagenModelParams,\n ImagenGenerationResponse,\n ImagenSafetySettings\n} from '../types';\nimport { AIModel } from './ai-model';\n\n/**\n * Class for Imagen model APIs.\n *\n * This class provides methods for generating images using the Imagen model.\n *\n * @example\n * ```javascript\n * const imagen = new ImagenModel(\n * ai,\n * {\n * model: 'imagen-3.0-generate-002'\n * }\n * );\n *\n * const response = await imagen.generateImages('A photo of a cat');\n * if (response.images.length > 0) {\n * console.log(response.images[0].bytesBase64Encoded);\n * }\n * ```\n *\n * @public\n */\nexport class ImagenModel extends AIModel {\n /**\n * The Imagen generation configuration.\n */\n generationConfig?: ImagenGenerationConfig;\n /**\n * Safety settings for filtering inappropriate content.\n */\n safetySettings?: ImagenSafetySettings;\n\n /**\n * Constructs a new instance of the {@link ImagenModel} class.\n *\n * @param ai - an {@link AI} instance.\n * @param modelParams - Parameters to use when making requests to Imagen.\n * @param requestOptions - Additional options to use when making requests.\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n */\n constructor(\n ai: AI,\n modelParams: ImagenModelParams,\n public requestOptions?: RequestOptions\n ) {\n const { model, generationConfig, safetySettings } = modelParams;\n super(ai, model);\n this.generationConfig = generationConfig;\n this.safetySettings = safetySettings;\n }\n\n /**\n * Generates images using the Imagen model and returns them as\n * base64-encoded strings.\n *\n * @param prompt - A text prompt describing the image(s) to generate.\n * @returns A promise that resolves to an {@link ImagenGenerationResponse}\n * object containing the generated images.\n *\n * @throws If the request to generate images fails. This happens if the\n * prompt is blocked.\n *\n * @remarks\n * If the prompt was not blocked, but one or more of the generated images were filtered, the\n * returned object will have a `filteredReason` property.\n * If all images are filtered, the `images` array will be empty.\n *\n * @public\n */\n async generateImages(\n prompt: string\n ): Promise<ImagenGenerationResponse<ImagenInlineImage>> {\n const body = createPredictRequestBody(prompt, {\n ...this.generationConfig,\n ...this.safetySettings\n });\n const response = await makeRequest(\n this.model,\n Task.PREDICT,\n this._apiSettings,\n /* stream */ false,\n JSON.stringify(body),\n this.requestOptions\n );\n return handlePredictResponse<ImagenInlineImage>(response);\n }\n\n /**\n * Generates images to Cloud Storage for Firebase using the Imagen model.\n *\n * @internal This method is temporarily internal.\n *\n * @param prompt - A text prompt describing the image(s) to generate.\n * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket.\n * This should be a directory. For example, `gs://my-bucket/my-directory/`.\n * @returns A promise that resolves to an {@link ImagenGenerationResponse}\n * object containing the URLs of the generated images.\n *\n * @throws If the request fails to generate images fails. This happens if\n * the prompt is blocked.\n *\n * @remarks\n * If the prompt was not blocked, but one or more of the generated images were filtered, the\n * returned object will have a `filteredReason` property.\n * If all images are filtered, the `images` array will be empty.\n */\n async generateImagesGCS(\n prompt: string,\n gcsURI: string\n ): Promise<ImagenGenerationResponse<ImagenGCSImage>> {\n const body = createPredictRequestBody(prompt, {\n gcsURI,\n ...this.generationConfig,\n ...this.safetySettings\n });\n const response = await makeRequest(\n this.model,\n Task.PREDICT,\n this._apiSettings,\n /* stream */ false,\n JSON.stringify(body),\n this.requestOptions\n );\n return handlePredictResponse<ImagenGCSImage>(response);\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from './errors';\nimport { logger } from './logger';\nimport { AIErrorCode } from './types';\n\n/**\n * A standardized interface for interacting with a WebSocket connection.\n * This abstraction allows the SDK to use the appropriate WebSocket implementation\n * for the current JS environment (Browser vs. Node) without\n * changing the core logic of the `LiveSession`.\n * @internal\n */\n\nexport interface WebSocketHandler {\n /**\n * Establishes a connection to the given URL.\n *\n * @param url The WebSocket URL (e.g., wss://...).\n * @returns A promise that resolves on successful connection or rejects on failure.\n */\n connect(url: string): Promise<void>;\n\n /**\n * Sends data over the WebSocket.\n *\n * @param data The string or binary data to send.\n */\n send(data: string | ArrayBuffer): void;\n\n /**\n * Returns an async generator that yields parsed JSON objects from the server.\n * The yielded type is `unknown` because the handler cannot guarantee the shape of the data.\n * The consumer is responsible for type validation.\n * The generator terminates when the connection is closed.\n *\n * @returns A generator that allows consumers to pull messages using a `for await...of` loop.\n */\n listen(): AsyncGenerator<unknown>;\n\n /**\n * Closes the WebSocket connection.\n *\n * @param code - A numeric status code explaining why the connection is closing.\n * @param reason - A human-readable string explaining why the connection is closing.\n */\n close(code?: number, reason?: string): Promise<void>;\n}\n\n/**\n * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22.\n *\n * @internal\n */\nexport class WebSocketHandlerImpl implements WebSocketHandler {\n private ws?: WebSocket;\n\n constructor() {\n if (typeof WebSocket === 'undefined') {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'The WebSocket API is not available in this environment. ' +\n 'The \"Live\" feature is not supported here. It is supported in ' +\n 'modern browser windows, Web Workers with WebSocket support, and Node >= 22.'\n );\n }\n }\n\n connect(url: string): Promise<void> {\n return new Promise((resolve, reject) => {\n this.ws = new WebSocket(url);\n this.ws.binaryType = 'blob'; // Only important to set in Node\n this.ws.addEventListener('open', () => resolve(), { once: true });\n this.ws.addEventListener(\n 'error',\n () =>\n reject(\n new AIError(\n AIErrorCode.FETCH_ERROR,\n `Error event raised on WebSocket`\n )\n ),\n { once: true }\n );\n this.ws!.addEventListener('close', (closeEvent: CloseEvent) => {\n if (closeEvent.reason) {\n logger.warn(\n `WebSocket connection closed by server. Reason: '${closeEvent.reason}'`\n );\n }\n });\n });\n }\n\n send(data: string | ArrayBuffer): void {\n if (!this.ws || this.ws.readyState !== WebSocket.OPEN) {\n throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not open.');\n }\n this.ws.send(data);\n }\n\n async *listen(): AsyncGenerator<unknown> {\n if (!this.ws) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'WebSocket is not connected.'\n );\n }\n\n const messageQueue: unknown[] = [];\n const errorQueue: Error[] = [];\n let resolvePromise: (() => void) | null = null;\n let isClosed = false;\n\n const messageListener = async (event: MessageEvent): Promise<void> => {\n let data: string;\n if (event.data instanceof Blob) {\n data = await event.data.text();\n } else if (typeof event.data === 'string') {\n data = event.data;\n } else {\n errorQueue.push(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Failed to parse WebSocket response. Expected data to be a Blob or string, but was ${typeof event.data}.`\n )\n );\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n return;\n }\n\n try {\n const obj = JSON.parse(data) as unknown;\n messageQueue.push(obj);\n } catch (e) {\n const err = e as Error;\n errorQueue.push(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Error parsing WebSocket message to JSON: ${err.message}`\n )\n );\n }\n\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n };\n\n const errorListener = (): void => {\n errorQueue.push(\n new AIError(AIErrorCode.FETCH_ERROR, 'WebSocket connection error.')\n );\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n };\n\n const closeListener = (event: CloseEvent): void => {\n if (event.reason) {\n logger.warn(\n `WebSocket connection closed by the server with reason: ${event.reason}`\n );\n }\n isClosed = true;\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n // Clean up listeners to prevent memory leaks\n this.ws?.removeEventListener('message', messageListener);\n this.ws?.removeEventListener('close', closeListener);\n this.ws?.removeEventListener('error', errorListener);\n };\n\n this.ws.addEventListener('message', messageListener);\n this.ws.addEventListener('close', closeListener);\n this.ws.addEventListener('error', errorListener);\n\n while (!isClosed) {\n if (errorQueue.length > 0) {\n const error = errorQueue.shift()!;\n throw error;\n }\n if (messageQueue.length > 0) {\n yield messageQueue.shift()!;\n } else {\n await new Promise<void>(resolve => {\n resolvePromise = resolve;\n });\n }\n }\n\n // If the loop terminated because isClosed is true, check for any final errors\n if (errorQueue.length > 0) {\n const error = errorQueue.shift()!;\n throw error;\n }\n }\n\n close(code?: number, reason?: string): Promise<void> {\n return new Promise(resolve => {\n if (!this.ws) {\n return resolve();\n }\n\n this.ws.addEventListener('close', () => resolve(), { once: true });\n // Calling 'close' during these states results in an error.\n if (\n this.ws.readyState === WebSocket.CLOSED ||\n this.ws.readyState === WebSocket.CONNECTING\n ) {\n return resolve();\n }\n\n if (this.ws.readyState !== WebSocket.CLOSING) {\n this.ws.close(code, reason);\n }\n });\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { AIErrorCode } from '../types';\nimport {\n SchemaInterface,\n SchemaType,\n SchemaParams,\n SchemaRequest\n} from '../types/schema';\n\n/**\n * Parent class encompassing all Schema types, with static methods that\n * allow building specific Schema types. This class can be converted with\n * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints.\n * (This string conversion is automatically done when calling SDK methods.)\n * @public\n */\nexport abstract class Schema implements SchemaInterface {\n /**\n * Optional. The type of the property.\n * This can only be undefined when using `anyOf` schemas, which do not have an\n * explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification}.\n */\n type?: SchemaType;\n /** Optional. The format of the property.\n * Supported formats:<br/>\n * <ul>\n * <li>for NUMBER type: \"float\", \"double\"</li>\n * <li>for INTEGER type: \"int32\", \"int64\"</li>\n * <li>for STRING type: \"email\", \"byte\", etc</li>\n * </ul>\n */\n format?: string;\n /** Optional. The description of the property. */\n description?: string;\n /** Optional. The items of the property. */\n items?: SchemaInterface;\n /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n minItems?: number;\n /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n maxItems?: number;\n /** Optional. Whether the property is nullable. Defaults to false. */\n nullable: boolean;\n /** Optional. The example of the property. */\n example?: unknown;\n /**\n * Allows user to add other schema properties that have not yet\n * been officially added to the SDK.\n */\n [key: string]: unknown;\n\n constructor(schemaParams: SchemaInterface) {\n // TODO(dlarocque): Enforce this with union types\n if (!schemaParams.type && !schemaParams.anyOf) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n \"A schema must have either a 'type' or an 'anyOf' array of sub-schemas.\"\n );\n }\n // eslint-disable-next-line guard-for-in\n for (const paramKey in schemaParams) {\n this[paramKey] = schemaParams[paramKey];\n }\n // Ensure these are explicitly set to avoid TS errors.\n this.type = schemaParams.type;\n this.format = schemaParams.hasOwnProperty('format')\n ? schemaParams.format\n : undefined;\n this.nullable = schemaParams.hasOwnProperty('nullable')\n ? !!schemaParams.nullable\n : false;\n }\n\n /**\n * Defines how this Schema should be serialized as JSON.\n * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj: { type?: SchemaType; [key: string]: unknown } = {\n type: this.type\n };\n for (const prop in this) {\n if (this.hasOwnProperty(prop) && this[prop] !== undefined) {\n if (prop !== 'required' || this.type === SchemaType.OBJECT) {\n obj[prop] = this[prop];\n }\n }\n }\n return obj as SchemaRequest;\n }\n\n static array(arrayParams: SchemaParams & { items: Schema }): ArraySchema {\n return new ArraySchema(arrayParams, arrayParams.items);\n }\n\n static object(\n objectParams: SchemaParams & {\n properties: {\n [k: string]: Schema;\n };\n optionalProperties?: string[];\n }\n ): ObjectSchema {\n return new ObjectSchema(\n objectParams,\n objectParams.properties,\n objectParams.optionalProperties\n );\n }\n\n // eslint-disable-next-line id-blacklist\n static string(stringParams?: SchemaParams): StringSchema {\n return new StringSchema(stringParams);\n }\n\n static enumString(\n stringParams: SchemaParams & { enum: string[] }\n ): StringSchema {\n return new StringSchema(stringParams, stringParams.enum);\n }\n\n static integer(integerParams?: SchemaParams): IntegerSchema {\n return new IntegerSchema(integerParams);\n }\n\n // eslint-disable-next-line id-blacklist\n static number(numberParams?: SchemaParams): NumberSchema {\n return new NumberSchema(numberParams);\n }\n\n // eslint-disable-next-line id-blacklist\n static boolean(booleanParams?: SchemaParams): BooleanSchema {\n return new BooleanSchema(booleanParams);\n }\n\n static anyOf(\n anyOfParams: SchemaParams & { anyOf: TypedSchema[] }\n ): AnyOfSchema {\n return new AnyOfSchema(anyOfParams);\n }\n}\n\n/**\n * A type that includes all specific Schema types.\n * @public\n */\nexport type TypedSchema =\n | IntegerSchema\n | NumberSchema\n | StringSchema\n | BooleanSchema\n | ObjectSchema\n | ArraySchema\n | AnyOfSchema;\n\n/**\n * Schema class for \"integer\" types.\n * @public\n */\nexport class IntegerSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.INTEGER,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"number\" types.\n * @public\n */\nexport class NumberSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.NUMBER,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"boolean\" types.\n * @public\n */\nexport class BooleanSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.BOOLEAN,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"string\" types. Can be used with or without\n * enum values.\n * @public\n */\nexport class StringSchema extends Schema {\n enum?: string[];\n constructor(schemaParams?: SchemaParams, enumValues?: string[]) {\n super({\n type: SchemaType.STRING,\n ...schemaParams\n });\n this.enum = enumValues;\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n if (this.enum) {\n obj['enum'] = this.enum;\n }\n return obj as SchemaRequest;\n }\n}\n\n/**\n * Schema class for \"array\" types.\n * The `items` param should refer to the type of item that can be a member\n * of the array.\n * @public\n */\nexport class ArraySchema extends Schema {\n constructor(schemaParams: SchemaParams, public items: TypedSchema) {\n super({\n type: SchemaType.ARRAY,\n ...schemaParams\n });\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n obj.items = this.items.toJSON();\n return obj;\n }\n}\n\n/**\n * Schema class for \"object\" types.\n * The `properties` param must be a map of `Schema` objects.\n * @public\n */\nexport class ObjectSchema extends Schema {\n constructor(\n schemaParams: SchemaParams,\n public properties: {\n [k: string]: TypedSchema;\n },\n public optionalProperties: string[] = []\n ) {\n super({\n type: SchemaType.OBJECT,\n ...schemaParams\n });\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n obj.properties = { ...this.properties };\n const required = [];\n if (this.optionalProperties) {\n for (const propertyKey of this.optionalProperties) {\n if (!this.properties.hasOwnProperty(propertyKey)) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n `Property \"${propertyKey}\" specified in \"optionalProperties\" does not exist.`\n );\n }\n }\n }\n for (const propertyKey in this.properties) {\n if (this.properties.hasOwnProperty(propertyKey)) {\n obj.properties[propertyKey] = this.properties[\n propertyKey\n ].toJSON() as SchemaRequest;\n if (!this.optionalProperties.includes(propertyKey)) {\n required.push(propertyKey);\n }\n }\n }\n if (required.length > 0) {\n obj.required = required;\n }\n delete obj.optionalProperties;\n return obj as SchemaRequest;\n }\n}\n\n/**\n * Schema class representing a value that can conform to any of the provided sub-schemas. This is\n * useful when a field can accept multiple distinct types or structures.\n * @public\n */\nexport class AnyOfSchema extends Schema {\n anyOf: TypedSchema[]; // Re-define field to narrow to required type\n constructor(schemaParams: SchemaParams & { anyOf: TypedSchema[] }) {\n if (schemaParams.anyOf.length === 0) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n \"The 'anyOf' array must not be empty.\"\n );\n }\n super({\n ...schemaParams,\n type: undefined // anyOf schemas do not have an explicit type\n });\n this.anyOf = schemaParams.anyOf;\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n // Ensure the 'anyOf' property contains serialized SchemaRequest objects.\n if (this.anyOf && Array.isArray(this.anyOf)) {\n obj.anyOf = (this.anyOf as TypedSchema[]).map(s => s.toJSON());\n }\n return obj;\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { logger } from '../logger';\n\n/**\n * Defines the image format for images generated by Imagen.\n *\n * Use this class to specify the desired format (JPEG or PNG) and compression quality\n * for images generated by Imagen. This is typically included as part of\n * {@link ImagenModelParams}.\n *\n * @example\n * ```javascript\n * const imagenModelParams = {\n * // ... other ImagenModelParams\n * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75.\n * }\n * ```\n *\n * @public\n */\nexport class ImagenImageFormat {\n /**\n * The MIME type.\n */\n mimeType: string;\n /**\n * The level of compression (a number between 0 and 100).\n */\n compressionQuality?: number;\n\n private constructor() {\n this.mimeType = 'image/png';\n }\n\n /**\n * Creates an {@link ImagenImageFormat} for a JPEG image.\n *\n * @param compressionQuality - The level of compression (a number between 0 and 100).\n * @returns An {@link ImagenImageFormat} object for a JPEG image.\n *\n * @public\n */\n static jpeg(compressionQuality?: number): ImagenImageFormat {\n if (\n compressionQuality &&\n (compressionQuality < 0 || compressionQuality > 100)\n ) {\n logger.warn(\n `Invalid JPEG compression quality of ${compressionQuality} specified; the supported range is [0, 100].`\n );\n }\n return { mimeType: 'image/jpeg', compressionQuality };\n }\n\n /**\n * Creates an {@link ImagenImageFormat} for a PNG image.\n *\n * @returns An {@link ImagenImageFormat} object for a PNG image.\n *\n * @public\n */\n static png(): ImagenImageFormat {\n return { mimeType: 'image/png' };\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport {\n AIErrorCode,\n FunctionCall,\n FunctionResponse,\n GenerativeContentBlob,\n LiveServerContent\n} from '../types';\nimport { LiveSession } from './live-session';\nimport { Deferred } from '@firebase/util';\n\nconst SERVER_INPUT_SAMPLE_RATE = 16_000;\nconst SERVER_OUTPUT_SAMPLE_RATE = 24_000;\n\nconst AUDIO_PROCESSOR_NAME = 'audio-processor';\n\n/**\n * The JS for an `AudioWorkletProcessor`.\n * This processor is responsible for taking raw audio from the microphone,\n * converting it to the required 16-bit 16kHz PCM, and posting it back to the main thread.\n *\n * See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor\n *\n * It is defined as a string here so that it can be converted into a `Blob`\n * and loaded at runtime.\n */\nconst audioProcessorWorkletString = `\n class AudioProcessor extends AudioWorkletProcessor {\n constructor(options) {\n super();\n this.targetSampleRate = options.processorOptions.targetSampleRate;\n // 'sampleRate' is a global variable available inside the AudioWorkletGlobalScope,\n // representing the native sample rate of the AudioContext.\n this.inputSampleRate = sampleRate;\n }\n\n /**\n * This method is called by the browser's audio engine for each block of audio data.\n * Input is a single input, with a single channel (input[0][0]).\n */\n process(inputs) {\n const input = inputs[0];\n if (input && input.length > 0 && input[0].length > 0) {\n const pcmData = input[0]; // Float32Array of raw audio samples.\n \n // Simple linear interpolation for resampling.\n const resampled = new Float32Array(Math.round(pcmData.length * this.targetSampleRate / this.inputSampleRate));\n const ratio = pcmData.length / resampled.length;\n for (let i = 0; i < resampled.length; i++) {\n resampled[i] = pcmData[Math.floor(i * ratio)];\n }\n\n // Convert Float32 (-1, 1) samples to Int16 (-32768, 32767)\n const resampledInt16 = new Int16Array(resampled.length);\n for (let i = 0; i < resampled.length; i++) {\n const sample = Math.max(-1, Math.min(1, resampled[i]));\n if (sample < 0) {\n resampledInt16[i] = sample * 32768;\n } else {\n resampledInt16[i] = sample * 32767;\n }\n }\n \n this.port.postMessage(resampledInt16);\n }\n // Return true to keep the processor alive and processing the next audio block.\n return true;\n }\n }\n\n // Register the processor with a name that can be used to instantiate it from the main thread.\n registerProcessor('${AUDIO_PROCESSOR_NAME}', AudioProcessor);\n`;\n\n/**\n * A controller for managing an active audio conversation.\n *\n * @beta\n */\nexport interface AudioConversationController {\n /**\n * Stops the audio conversation, closes the microphone connection, and\n * cleans up resources. Returns a promise that resolves when cleanup is complete.\n */\n stop: () => Promise<void>;\n}\n\n/**\n * Options for {@link startAudioConversation}.\n *\n * @beta\n */\nexport interface StartAudioConversationOptions {\n /**\n * An async handler that is called when the model requests a function to be executed.\n * The handler should perform the function call and return the result as a `Part`,\n * which will then be sent back to the model.\n */\n functionCallingHandler?: (\n functionCalls: FunctionCall[]\n ) => Promise<FunctionResponse>;\n}\n\n/**\n * Dependencies needed by the {@link AudioConversationRunner}.\n *\n * @internal\n */\ninterface RunnerDependencies {\n audioContext: AudioContext;\n mediaStream: MediaStream;\n sourceNode: MediaStreamAudioSourceNode;\n workletNode: AudioWorkletNode;\n}\n\n/**\n * Encapsulates the core logic of an audio conversation.\n *\n * @internal\n */\nexport class AudioConversationRunner {\n /** A flag to indicate if the conversation has been stopped. */\n private isStopped = false;\n /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */\n private readonly stopDeferred = new Deferred<void>();\n /** A promise that tracks the lifecycle of the main `runReceiveLoop`. */\n private readonly receiveLoopPromise: Promise<void>;\n\n /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */\n private readonly playbackQueue: ArrayBuffer[] = [];\n /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */\n private scheduledSources: AudioBufferSourceNode[] = [];\n /** A high-precision timeline pointer for scheduling gapless audio playback. */\n private nextStartTime = 0;\n /** A mutex to prevent the playback processing loop from running multiple times concurrently. */\n private isPlaybackLoopRunning = false;\n\n constructor(\n private readonly liveSession: LiveSession,\n private readonly options: StartAudioConversationOptions,\n private readonly deps: RunnerDependencies\n ) {\n this.liveSession.inConversation = true;\n\n // Start listening for messages from the server.\n this.receiveLoopPromise = this.runReceiveLoop().finally(() =>\n this.cleanup()\n );\n\n // Set up the handler for receiving processed audio data from the worklet.\n // Message data has been resampled to 16kHz 16-bit PCM.\n this.deps.workletNode.port.onmessage = event => {\n if (this.isStopped) {\n return;\n }\n\n const pcm16 = event.data as Int16Array;\n const base64 = btoa(\n String.fromCharCode.apply(\n null,\n Array.from(new Uint8Array(pcm16.buffer))\n )\n );\n\n const chunk: GenerativeContentBlob = {\n mimeType: 'audio/pcm',\n data: base64\n };\n void this.liveSession.sendAudioRealtime(chunk);\n };\n }\n\n /**\n * Stops the conversation and unblocks the main receive loop.\n */\n async stop(): Promise<void> {\n if (this.isStopped) {\n return;\n }\n this.isStopped = true;\n this.stopDeferred.resolve(); // Unblock the receive loop\n await this.receiveLoopPromise; // Wait for the loop and cleanup to finish\n }\n\n /**\n * Cleans up all audio resources (nodes, stream tracks, context) and marks the\n * session as no longer in a conversation.\n */\n private cleanup(): void {\n this.interruptPlayback(); // Ensure all audio is stopped on final cleanup.\n this.deps.workletNode.port.onmessage = null;\n this.deps.workletNode.disconnect();\n this.deps.sourceNode.disconnect();\n this.deps.mediaStream.getTracks().forEach(track => track.stop());\n if (this.deps.audioContext.state !== 'closed') {\n void this.deps.audioContext.close();\n }\n this.liveSession.inConversation = false;\n }\n\n /**\n * Adds audio data to the queue and ensures the playback loop is running.\n */\n private enqueueAndPlay(audioData: ArrayBuffer): void {\n this.playbackQueue.push(audioData);\n // Will no-op if it's already running.\n void this.processPlaybackQueue();\n }\n\n /**\n * Stops all current and pending audio playback and clears the queue. This is\n * called when the server indicates the model's speech was interrupted with\n * `LiveServerContent.modelTurn.interrupted`.\n */\n private interruptPlayback(): void {\n // Stop all sources that have been scheduled. The onended event will fire for each,\n // which will clean up the scheduledSources array.\n [...this.scheduledSources].forEach(source => source.stop(0));\n\n // Clear the internal buffer of unprocessed audio chunks.\n this.playbackQueue.length = 0;\n\n // Reset the playback clock to start fresh.\n this.nextStartTime = this.deps.audioContext.currentTime;\n }\n\n /**\n * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence.\n */\n private async processPlaybackQueue(): Promise<void> {\n if (this.isPlaybackLoopRunning) {\n return;\n }\n this.isPlaybackLoopRunning = true;\n\n while (this.playbackQueue.length > 0 && !this.isStopped) {\n const pcmRawBuffer = this.playbackQueue.shift()!;\n try {\n const pcm16 = new Int16Array(pcmRawBuffer);\n const frameCount = pcm16.length;\n\n const audioBuffer = this.deps.audioContext.createBuffer(\n 1,\n frameCount,\n SERVER_OUTPUT_SAMPLE_RATE\n );\n\n // Convert 16-bit PCM to 32-bit PCM, required by the Web Audio API.\n const channelData = audioBuffer.getChannelData(0);\n for (let i = 0; i < frameCount; i++) {\n channelData[i] = pcm16[i] / 32768; // Normalize to Float32 range [-1.0, 1.0]\n }\n\n const source = this.deps.audioContext.createBufferSource();\n source.buffer = audioBuffer;\n source.connect(this.deps.audioContext.destination);\n\n // Track the source and set up a handler to remove it from tracking when it finishes.\n this.scheduledSources.push(source);\n source.onended = () => {\n this.scheduledSources = this.scheduledSources.filter(\n s => s !== source\n );\n };\n\n // To prevent gaps, schedule the next chunk to start either now (if we're catching up)\n // or exactly when the previous chunk is scheduled to end.\n this.nextStartTime = Math.max(\n this.deps.audioContext.currentTime,\n this.nextStartTime\n );\n source.start(this.nextStartTime);\n\n // Update the schedule for the *next* chunk.\n this.nextStartTime += audioBuffer.duration;\n } catch (e) {\n logger.error('Error playing audio:', e);\n }\n }\n\n this.isPlaybackLoopRunning = false;\n }\n\n /**\n * The main loop that listens for and processes messages from the server.\n */\n private async runReceiveLoop(): Promise<void> {\n const messageGenerator = this.liveSession.receive();\n while (!this.isStopped) {\n const result = await Promise.race([\n messageGenerator.next(),\n this.stopDeferred.promise\n ]);\n\n if (this.isStopped || !result || result.done) {\n break;\n }\n\n const message = result.value;\n if (message.type === 'serverContent') {\n const serverContent = message as LiveServerContent;\n if (serverContent.interrupted) {\n this.interruptPlayback();\n }\n\n const audioPart = serverContent.modelTurn?.parts.find(part =>\n part.inlineData?.mimeType.startsWith('audio/')\n );\n if (audioPart?.inlineData) {\n const audioData = Uint8Array.from(\n atob(audioPart.inlineData.data),\n c => c.charCodeAt(0)\n ).buffer;\n this.enqueueAndPlay(audioData);\n }\n } else if (message.type === 'toolCall') {\n if (!this.options.functionCallingHandler) {\n logger.warn(\n 'Received tool call message, but StartAudioConversationOptions.functionCallingHandler is undefined. Ignoring tool call.'\n );\n } else {\n try {\n const functionResponse = await this.options.functionCallingHandler(\n message.functionCalls\n );\n if (!this.isStopped) {\n void this.liveSession.sendFunctionResponses([functionResponse]);\n }\n } catch (e) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Function calling handler failed: ${(e as Error).message}`\n );\n }\n }\n }\n }\n }\n}\n\n/**\n * Starts a real-time, bidirectional audio conversation with the model. This helper function manages\n * the complexities of microphone access, audio recording, playback, and interruptions.\n *\n * @remarks Important: This function must be called in response to a user gesture\n * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}.\n *\n * @example\n * ```javascript\n * const liveSession = await model.connect();\n * let conversationController;\n *\n * // This function must be called from within a click handler.\n * async function startConversation() {\n * try {\n * conversationController = await startAudioConversation(liveSession);\n * } catch (e) {\n * // Handle AI-specific errors\n * if (e instanceof AIError) {\n * console.error(\"AI Error:\", e.message);\n * }\n * // Handle microphone permission and hardware errors\n * else if (e instanceof DOMException) {\n * console.error(\"Microphone Error:\", e.message);\n * }\n * // Handle other unexpected errors\n * else {\n * console.error(\"An unexpected error occurred:\", e);\n * }\n * }\n * }\n *\n * // Later, to stop the conversation:\n * // if (conversationController) {\n * // await conversationController.stop();\n * // }\n * ```\n *\n * @param liveSession - An active {@link LiveSession} instance.\n * @param options - Configuration options for the audio conversation.\n * @returns A `Promise` that resolves with an {@link AudioConversationController}.\n * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`).\n * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions.\n *\n * @beta\n */\nexport async function startAudioConversation(\n liveSession: LiveSession,\n options: StartAudioConversationOptions = {}\n): Promise<AudioConversationController> {\n if (liveSession.isClosed) {\n throw new AIError(\n AIErrorCode.SESSION_CLOSED,\n 'Cannot start audio conversation on a closed LiveSession.'\n );\n }\n\n if (liveSession.inConversation) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'An audio conversation is already in progress for this session.'\n );\n }\n\n // Check for necessary Web API support.\n if (\n typeof AudioWorkletNode === 'undefined' ||\n typeof AudioContext === 'undefined' ||\n typeof navigator === 'undefined' ||\n !navigator.mediaDevices\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Audio conversation is not supported in this environment. It requires the Web Audio API and AudioWorklet support.'\n );\n }\n\n let audioContext: AudioContext | undefined;\n try {\n // 1. Set up the audio context. This must be in response to a user gesture.\n // See: https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy\n audioContext = new AudioContext();\n if (audioContext.state === 'suspended') {\n await audioContext.resume();\n }\n\n // 2. Prompt for microphone access and get the media stream.\n // This can throw a variety of permission or hardware-related errors.\n const mediaStream = await navigator.mediaDevices.getUserMedia({\n audio: true\n });\n\n // 3. Load the AudioWorklet processor.\n // See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet\n const workletBlob = new Blob([audioProcessorWorkletString], {\n type: 'application/javascript'\n });\n const workletURL = URL.createObjectURL(workletBlob);\n await audioContext.audioWorklet.addModule(workletURL);\n\n // 4. Create the audio graph: Microphone -> Source Node -> Worklet Node\n const sourceNode = audioContext.createMediaStreamSource(mediaStream);\n const workletNode = new AudioWorkletNode(\n audioContext,\n AUDIO_PROCESSOR_NAME,\n {\n processorOptions: { targetSampleRate: SERVER_INPUT_SAMPLE_RATE }\n }\n );\n sourceNode.connect(workletNode);\n\n // 5. Instantiate and return the runner which manages the conversation.\n const runner = new AudioConversationRunner(liveSession, options, {\n audioContext,\n mediaStream,\n sourceNode,\n workletNode\n });\n\n return { stop: () => runner.stop() };\n } catch (e) {\n // Ensure the audio context is closed on any setup error.\n if (audioContext && audioContext.state !== 'closed') {\n void audioContext.close();\n }\n\n // Re-throw specific, known error types directly. The user may want to handle `DOMException`\n // errors differently (for example, if permission to access audio device was denied).\n if (e instanceof AIError || e instanceof DOMException) {\n throw e;\n }\n\n // Wrap any other unexpected errors in a standard AIError.\n throw new AIError(\n AIErrorCode.ERROR,\n `Failed to initialize audio recording: ${(e as Error).message}`\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp, getApp, _getProvider } from '@firebase/app';\nimport { Provider } from '@firebase/component';\nimport { getModularInstance } from '@firebase/util';\nimport { AI_TYPE, DEFAULT_HYBRID_IN_CLOUD_MODEL } from './constants';\nimport { AIService } from './service';\nimport { AI, AIOptions } from './public-types';\nimport {\n ImagenModelParams,\n HybridParams,\n ModelParams,\n RequestOptions,\n AIErrorCode,\n LiveModelParams\n} from './types';\nimport { AIError } from './errors';\nimport {\n AIModel,\n GenerativeModel,\n LiveGenerativeModel,\n ImagenModel\n} from './models';\nimport { encodeInstanceIdentifier } from './helpers';\nimport { GoogleAIBackend } from './backend';\nimport { WebSocketHandlerImpl } from './websocket';\n\nexport { ChatSession } from './methods/chat-session';\nexport { LiveSession } from './methods/live-session';\nexport * from './requests/schema-builder';\nexport { ImagenImageFormat } from './requests/imagen-image-format';\nexport { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel, AIError };\nexport { Backend, VertexAIBackend, GoogleAIBackend } from './backend';\nexport {\n startAudioConversation,\n AudioConversationController,\n StartAudioConversationOptions\n} from './methods/live-session-helpers';\n\ndeclare module '@firebase/component' {\n interface NameServiceMapping {\n [AI_TYPE]: AIService;\n }\n}\n\n/**\n * Returns the default {@link AI} instance that is associated with the provided\n * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the\n * default settings.\n *\n * @example\n * ```javascript\n * const ai = getAI(app);\n * ```\n *\n * @example\n * ```javascript\n * // Get an AI instance configured to use the Gemini Developer API (via Google AI).\n * const ai = getAI(app, { backend: new GoogleAIBackend() });\n * ```\n *\n * @example\n * ```javascript\n * // Get an AI instance configured to use the Vertex AI Gemini API.\n * const ai = getAI(app, { backend: new VertexAIBackend() });\n * ```\n *\n * @param app - The {@link @firebase/app#FirebaseApp} to use.\n * @param options - {@link AIOptions} that configure the AI instance.\n * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}.\n *\n * @public\n */\nexport function getAI(app: FirebaseApp = getApp(), options?: AIOptions): AI {\n app = getModularInstance(app);\n // Dependencies\n const AIProvider: Provider<'AI'> = _getProvider(app, AI_TYPE);\n\n const backend = options?.backend ?? new GoogleAIBackend();\n\n const finalOptions: Omit<AIOptions, 'backend'> = {\n useLimitedUseAppCheckTokens: options?.useLimitedUseAppCheckTokens ?? false\n };\n\n const identifier = encodeInstanceIdentifier(backend);\n const aiInstance = AIProvider.getImmediate({\n identifier\n });\n\n aiInstance.options = finalOptions;\n\n return aiInstance;\n}\n\n/**\n * Returns a {@link GenerativeModel} class with methods for inference\n * and other functionality.\n *\n * @public\n */\nexport function getGenerativeModel(\n ai: AI,\n modelParams: ModelParams | HybridParams,\n requestOptions?: RequestOptions\n): GenerativeModel {\n // Uses the existence of HybridParams.mode to clarify the type of the modelParams input.\n const hybridParams = modelParams as HybridParams;\n let inCloudParams: ModelParams;\n if (hybridParams.mode) {\n inCloudParams = hybridParams.inCloudParams || {\n model: DEFAULT_HYBRID_IN_CLOUD_MODEL\n };\n } else {\n inCloudParams = modelParams as ModelParams;\n }\n\n if (!inCloudParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`\n );\n }\n\n /**\n * An AIService registered by index.node.ts will not have a\n * chromeAdapterFactory() method.\n */\n const chromeAdapter = (ai as AIService).chromeAdapterFactory?.(\n hybridParams.mode,\n typeof window === 'undefined' ? undefined : window,\n hybridParams.onDeviceParams\n );\n\n return new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter);\n}\n\n/**\n * Returns an {@link ImagenModel} class with methods for using Imagen.\n *\n * Only Imagen 3 models (named `imagen-3.0-*`) are supported.\n *\n * @param ai - An {@link AI} instance.\n * @param modelParams - Parameters to use when making Imagen requests.\n * @param requestOptions - Additional options to use when making requests.\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @public\n */\nexport function getImagenModel(\n ai: AI,\n modelParams: ImagenModelParams,\n requestOptions?: RequestOptions\n): ImagenModel {\n if (!modelParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })`\n );\n }\n return new ImagenModel(ai, modelParams, requestOptions);\n}\n\n/**\n * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication.\n *\n * The Live API is only supported in modern browser windows and Node >= 22.\n *\n * @param ai - An {@link AI} instance.\n * @param modelParams - Parameters to use when setting up a {@link LiveSession}.\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @beta\n */\nexport function getLiveGenerativeModel(\n ai: AI,\n modelParams: LiveModelParams\n): LiveGenerativeModel {\n if (!modelParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name for getLiveGenerativeModel. Example: getLiveGenerativeModel(ai, { model: 'my-model-name' })`\n );\n }\n const webSocketHandler = new WebSocketHandlerImpl();\n return new LiveGenerativeModel(ai, modelParams, webSocketHandler);\n}\n","/**\n * The Firebase AI Web SDK.\n *\n * @packageDocumentation\n */\n\n/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { registerVersion, _registerComponent } from '@firebase/app';\nimport { AI_TYPE } from './constants';\nimport { Component, ComponentType } from '@firebase/component';\nimport { name, version } from '../package.json';\nimport { LanguageModel } from './types/language-model';\nimport { factory } from './factory-browser';\n\ndeclare global {\n interface Window {\n LanguageModel: LanguageModel;\n }\n}\n\nfunction registerAI(): void {\n _registerComponent(\n new Component(AI_TYPE, factory, ComponentType.PUBLIC).setMultipleInstances(\n true\n )\n );\n\n registerVersion(name, version);\n // BUILD_TARGET will be replaced by values like esm, cjs, etc during the compilation\n registerVersion(name, version, '__BUILD_TARGET__');\n}\n\nregisterAI();\n\nexport * from './api';\nexport * from './public-types';\n"],"names":["GoogleAIMapper.mapGenerateContentResponse","GoogleAIMapper.mapGenerateContentRequest","GoogleAIMapper.mapCountTokensRequest"],"mappings":";;;;;;;;AAAA;;;;;;;;;;;;;;;AAeG;AAII,MAAM,OAAO,GAAG,IAAI,CAAC;AAErB,MAAM,gBAAgB,GAAG,aAAa,CAAC;AAEvC,MAAM,cAAc,GAAG,iCAAiC,CAAC;AAEzD,MAAM,mBAAmB,GAAG,QAAQ,CAAC;AAErC,MAAM,eAAe,GAAG,OAAO,CAAC;AAEhC,MAAM,YAAY,GAAG,OAAO,CAAC;AAE7B,MAAM,wBAAwB,GAAG,GAAG,GAAG,IAAI,CAAC;AAEnD;;AAEG;AACI,MAAM,6BAA6B,GAAG,uBAAuB;;ACpCpE;;;;;;;;;;;;;;;AAeG;AAMH;;;;AAIG;AACG,MAAO,OAAQ,SAAQ,aAAa,CAAA;AACxC;;;;;;AAMG;AACH,IAAA,WAAA,CACW,IAAiB,EAC1B,OAAe,EACN,eAAiC,EAAA;;QAG1C,MAAM,OAAO,GAAG,OAAO,CAAC;AACxB,QAAA,MAAM,QAAQ,GAAG,CAAA,EAAG,OAAO,CAAI,CAAA,EAAA,IAAI,EAAE,CAAC;QACtC,MAAM,WAAW,GAAG,CAAG,EAAA,OAAO,KAAK,OAAO,CAAA,EAAA,EAAK,QAAQ,CAAA,CAAA,CAAG,CAAC;AAC3D,QAAA,KAAK,CAAC,IAAI,EAAE,WAAW,CAAC,CAAC;QARhB,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAa;QAEjB,IAAe,CAAA,eAAA,GAAf,eAAe,CAAkB;;;;;AAY1C,QAAA,IAAI,KAAK,CAAC,iBAAiB,EAAE;;;AAG3B,YAAA,KAAK,CAAC,iBAAiB,CAAC,IAAI,EAAE,OAAO,CAAC,CAAC;SACxC;;;;;QAMD,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,OAAO,CAAC,SAAS,CAAC,CAAC;;AAG/C,QAAA,IAAI,CAAC,QAAQ,GAAG,MAAM,WAAW,CAAC;KACnC;AACF;;AChED;;;;;;;;;;;;;;;AAeG;AAQH;;;AAGG;AACI,MAAM,cAAc,GAAG,CAAC,MAAM,EAAE,OAAO,EAAE,UAAU,EAAE,QAAQ,EAAW;AAE/E;;;AAGG;AACU,MAAA,YAAY,GAAG;AAC1B,IAAA,yBAAyB,EAAE,2BAA2B;AACtD,IAAA,+BAA+B,EAAE,iCAAiC;AAClE,IAAA,wBAAwB,EAAE,0BAA0B;AACpD,IAAA,+BAA+B,EAAE,iCAAiC;EACzD;AAQX;;;AAGG;AACU,MAAA,kBAAkB,GAAG;AAChC;;AAEG;AACH,IAAA,mBAAmB,EAAE,qBAAqB;AAC1C;;AAEG;AACH,IAAA,sBAAsB,EAAE,wBAAwB;AAChD;;AAEG;AACH,IAAA,eAAe,EAAE,iBAAiB;AAClC;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;;AAGG;AACH,IAAA,GAAG,EAAE,KAAK;EACD;AASX;;;;AAIG;AACU,MAAA,eAAe,GAAG;AAC7B;;AAEG;AACH,IAAA,QAAQ,EAAE,UAAU;AACpB;;AAEG;AACH,IAAA,WAAW,EAAE,aAAa;EACjB;AAUX;;;AAGG;AACU,MAAA,eAAe,GAAG;AAC7B;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;AAEG;AACH,IAAA,GAAG,EAAE,KAAK;AACV;;AAEG;AACH,IAAA,MAAM,EAAE,QAAQ;AAChB;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;EACH;AASX;;;AAGG;AACU,MAAA,YAAY,GAAG;AAC1B;;AAEG;AACH,IAAA,wBAAwB,EAAE,0BAA0B;AACpD;;AAEG;AACH,IAAA,iBAAiB,EAAE,mBAAmB;AACtC;;AAEG;AACH,IAAA,oBAAoB,EAAE,sBAAsB;AAC5C;;AAEG;AACH,IAAA,kBAAkB,EAAE,oBAAoB;AACxC;;;;;AAKG;AACH,IAAA,yBAAyB,EAAE,2BAA2B;EAC7C;AAQX;;;AAGG;AACU,MAAA,WAAW,GAAG;AACzB;;AAEG;AACH,IAAA,MAAM,EAAE,QAAQ;AAChB;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,SAAS,EAAE,WAAW;AACtB;;AAEG;AACH,IAAA,kBAAkB,EAAE,oBAAoB;EAC/B;AAQX;;;AAGG;AACU,MAAA,YAAY,GAAG;AAC1B;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;AAEG;AACH,IAAA,MAAM,EAAE,QAAQ;AAChB;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,SAAS,EAAE,WAAW;AACtB;;AAEG;AACH,IAAA,kBAAkB,EAAE,oBAAoB;AACxC;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;AAEG;AACH,IAAA,uBAAuB,EAAE,yBAAyB;EACzC;AAQX;;AAEG;AACU,MAAA,mBAAmB,GAAG;AACjC;;;AAGG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;;;;AAKG;AACH,IAAA,GAAG,EAAE,KAAK;AACV;;;AAGG;AACH,IAAA,IAAI,EAAE,MAAM;EACH;AAQX;;;AAGG;AACU,MAAA,QAAQ,GAAG;AACtB;;AAEG;AACH,IAAA,oBAAoB,EAAE,sBAAsB;AAC5C;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,QAAQ,EAAE,UAAU;EACX;AAQX;;;;AAIG;AACU,MAAA,gBAAgB,GAAG;AAC9B;;;AAGG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;;AAGG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;;AAGG;AACH,IAAA,KAAK,EAAE,OAAO;EACL;AAUX;;;;;;;;;;;;;;;;;;;;AAoBG;AACU,MAAA,aAAa,GAAG;AAC3B,IAAA,kBAAkB,EAAE,kBAAkB;AACtC,IAAA,gBAAgB,EAAE,gBAAgB;AAClC,IAAA,eAAe,EAAE,eAAe;AAChC,IAAA,iBAAiB,EAAE,iBAAiB;EAC3B;AASX;;;;AAIG;AACU,MAAA,eAAe,GAAG;AAC7B,IAAA,WAAW,EAAE,WAAW;AACxB,IAAA,UAAU,EAAE,UAAU;EACb;AAUX;;;;AAIG;AACU,MAAA,OAAO,GAAG;AACrB,IAAA,WAAW,EAAE,qBAAqB;AAClC,IAAA,EAAE,EAAE,YAAY;AAChB,IAAA,MAAM,EAAE,gBAAgB;AACxB,IAAA,iBAAiB,EAAE,2BAA2B;EAC9C;AASF;;;;AAIG;AACU,MAAA,QAAQ,GAAG;AACtB,IAAA,WAAW,EAAE,sBAAsB;AACnC,IAAA,MAAM,EAAE,QAAQ;;;ACzalB;;;;;;;;;;;;;;;AAeG;AA4XH;;;;;;;;;;;;;;;;AAgBG;AACU,MAAA,kBAAkB,GAAG;AAChC;;AAEG;AACH,IAAA,gCAAgC,EAAE,kCAAkC;AACpE;;AAEG;AACH,IAAA,4BAA4B,EAAE,8BAA8B;AAC5D;;AAEG;AACH,IAAA,0BAA0B,EAAE,4BAA4B;AACxD;;AAEG;AACH,IAAA,4BAA4B,EAAE,8BAA8B;AAC5D;;AAEG;AACH,IAAA,2BAA2B,EAAE,6BAA6B;EAC1D;AA6KF;;;;AAIG;AACU,MAAA,gBAAgB,GAAG;AAC9B,IAAA,cAAc,EAAE,eAAe;AAC/B,IAAA,SAAS,EAAE,UAAU;AACrB,IAAA,sBAAsB,EAAE,sBAAsB;;;ACtmBhD;;;;;;;;;;;;;;;AAeG;AA4CH;;;;AAIG;AACU,MAAA,WAAW,GAAG;;AAEzB,IAAA,KAAK,EAAE,OAAO;;AAGd,IAAA,aAAa,EAAE,eAAe;;AAG9B,IAAA,cAAc,EAAE,gBAAgB;;AAGhC,IAAA,WAAW,EAAE,aAAa;;AAG1B,IAAA,cAAc,EAAE,gBAAgB;;AAGhC,IAAA,eAAe,EAAE,iBAAiB;;AAGlC,IAAA,eAAe,EAAE,iBAAiB;;AAGlC,IAAA,cAAc,EAAE,gBAAgB;;AAGhC,IAAA,UAAU,EAAE,YAAY;;AAGxB,IAAA,SAAS,EAAE,WAAW;;AAGtB,IAAA,QAAQ,EAAE,UAAU;;AAGpB,IAAA,aAAa,EAAE,eAAe;;AAG9B,IAAA,YAAY,EAAE,cAAc;;AAG5B,IAAA,WAAW,EAAE,aAAa;;;ACzG5B;;;;;;;;;;;;;;;AAeG;AAEH;;;;;AAKG;AACU,MAAA,UAAU,GAAG;;AAExB,IAAA,MAAM,EAAE,QAAQ;;AAEhB,IAAA,MAAM,EAAE,QAAQ;;AAEhB,IAAA,OAAO,EAAE,SAAS;;AAElB,IAAA,OAAO,EAAE,SAAS;;AAElB,IAAA,KAAK,EAAE,OAAO;;AAEd,IAAA,MAAM,EAAE,QAAQ;;;ACnClB;;;;;;;;;;;;;;;AAeG;AAqFH;;;;;;;;;;;AAWG;AACU,MAAA,uBAAuB,GAAG;AACrC;;AAEG;AACH,IAAA,mBAAmB,EAAE,qBAAqB;AAC1C;;AAEG;AACH,IAAA,sBAAsB,EAAE,wBAAwB;AAChD;;AAEG;AACH,IAAA,eAAe,EAAE,iBAAiB;AAClC;;;;;AAKG;AACH,IAAA,UAAU,EAAE,YAAY;EACf;AAiBX;;;;;;;AAOG;AACU,MAAA,uBAAuB,GAAG;AACrC;;AAEG;AACH,IAAA,SAAS,EAAE,YAAY;AACvB;;;;;;AAMG;AACH,IAAA,WAAW,EAAE,aAAa;AAC1B;;;;;;AAMG;AACH,IAAA,SAAS,EAAE,WAAW;EACb;AAiCX;;;;;;;;;;AAUG;AACU,MAAA,iBAAiB,GAAG;AAC/B;;AAEG;AACH,IAAA,QAAQ,EAAE,KAAK;AACf;;AAEG;AACH,IAAA,eAAe,EAAE,KAAK;AACtB;;AAEG;AACH,IAAA,cAAc,EAAE,KAAK;AACrB;;AAEG;AACH,IAAA,gBAAgB,EAAE,MAAM;AACxB;;AAEG;AACH,IAAA,eAAe,EAAE,MAAM;;;AClPzB;;;;;;;;;;;;;;;AAeG;AAqCH;;;;;;;;;;;AAWG;AACU,MAAA,WAAW,GAAG;AACzB;;;AAGG;AACH,IAAA,SAAS,EAAE,WAAW;AAEtB;;;AAGG;AACH,IAAA,SAAS,EAAE,WAAW;AACd,EAAC;;AC5EX;;;;;;;;;;;;;;;AAeG;AAKH;;;;;;;AAOG;MACmB,OAAO,CAAA;AAM3B;;;AAGG;AACH,IAAA,WAAA,CAAsB,IAAiB,EAAA;AACrC,QAAA,IAAI,CAAC,WAAW,GAAG,IAAI,CAAC;KACzB;AACF,CAAA;AAED;;;;;;;AAOG;AACG,MAAO,eAAgB,SAAQ,OAAO,CAAA;AAC1C;;AAEG;AACH,IAAA,WAAA,GAAA;AACE,QAAA,KAAK,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC;KAC9B;AACF,CAAA;AAED;;;;;;;AAOG;AACG,MAAO,eAAgB,SAAQ,OAAO,CAAA;AAQ1C;;;;;;AAMG;AACH,IAAA,WAAA,CAAY,WAAmB,gBAAgB,EAAA;AAC7C,QAAA,KAAK,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC;QAC7B,IAAI,CAAC,QAAQ,EAAE;AACb,YAAA,IAAI,CAAC,QAAQ,GAAG,gBAAgB,CAAC;SAClC;aAAM;AACL,YAAA,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;SAC1B;KACF;AACF;;AC3FD;;;;;;;;;;;;;;;AAeG;AAOH;;;;;AAKG;AACG,SAAU,wBAAwB,CAAC,OAAgB,EAAA;AACvD,IAAA,IAAI,OAAO,YAAY,eAAe,EAAE;QACtC,OAAO,CAAA,EAAG,OAAO,CAAA,SAAA,CAAW,CAAC;KAC9B;AAAM,SAAA,IAAI,OAAO,YAAY,eAAe,EAAE;AAC7C,QAAA,OAAO,GAAG,OAAO,CAAA,UAAA,EAAa,OAAO,CAAC,QAAQ,EAAE,CAAC;KAClD;SAAM;AACL,QAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAoB,iBAAA,EAAA,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,WAAW,CAAC,CAAA,CAAE,CAC1D,CAAC;KACH;AACH,CAAC;AAED;;;;AAIG;AACG,SAAU,wBAAwB,CAAC,kBAA0B,EAAA;IACjE,MAAM,eAAe,GAAG,kBAAkB,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;AACtD,IAAA,IAAI,eAAe,CAAC,CAAC,CAAC,KAAK,OAAO,EAAE;AAClC,QAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAgD,6CAAA,EAAA,eAAe,CAAC,CAAC,CAAC,CAAA,CAAA,CAAG,CACtE,CAAC;KACH;AACD,IAAA,MAAM,WAAW,GAAG,eAAe,CAAC,CAAC,CAAC,CAAC;IACvC,QAAQ,WAAW;AACjB,QAAA,KAAK,UAAU;AACb,YAAA,MAAM,QAAQ,GAAuB,eAAe,CAAC,CAAC,CAAC,CAAC;YACxD,IAAI,CAAC,QAAQ,EAAE;gBACb,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAkD,+CAAA,EAAA,kBAAkB,CAAG,CAAA,CAAA,CACxE,CAAC;aACH;AACD,YAAA,OAAO,IAAI,eAAe,CAAC,QAAQ,CAAC,CAAC;AACvC,QAAA,KAAK,UAAU;YACb,OAAO,IAAI,eAAe,EAAE,CAAC;AAC/B,QAAA;YACE,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAwC,qCAAA,EAAA,kBAAkB,CAAG,CAAA,CAAA,CAC9D,CAAC;KACL;AACH;;ACzEA;;;;;;;;;;;;;;;AAeG;AAII,MAAM,MAAM,GAAG,IAAI,MAAM,CAAC,oBAAoB,CAAC;;ACsBtD;;AAEG;AACH,IAAY,YAKX,CAAA;AALD,CAAA,UAAY,YAAY,EAAA;AACtB,IAAA,YAAA,CAAA,aAAA,CAAA,GAAA,aAA6B,CAAA;AAC7B,IAAA,YAAA,CAAA,cAAA,CAAA,GAAA,cAA+B,CAAA;AAC/B,IAAA,YAAA,CAAA,aAAA,CAAA,GAAA,aAA6B,CAAA;AAC7B,IAAA,YAAA,CAAA,WAAA,CAAA,GAAA,WAAyB,CAAA;AAC3B,CAAC,EALW,YAAY,KAAZ,YAAY,GAKvB,EAAA,CAAA,CAAA;;ACjDD;;;;;;;;;;;;;;;AAeG;AAwBH;AACA,MAAM,qBAAqB,GAA4B,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC;AAE3E;;;;AAIG;MACU,iBAAiB,CAAA;AAW5B,IAAA,WAAA,CACS,qBAAoC,EACpC,IAAmB,EAC1B,cAA+B,EAAA;QAFxB,IAAqB,CAAA,qBAAA,GAArB,qBAAqB,CAAe;QACpC,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAe;QAVpB,IAAa,CAAA,aAAA,GAAG,KAAK,CAAC;AAG9B,QAAA,IAAA,CAAA,cAAc,GAAmB;AAC/B,YAAA,aAAa,EAAE;AACb,gBAAA,cAAc,EAAE,qBAAqB;AACtC,aAAA;SACF,CAAC;QAMA,IAAI,cAAc,EAAE;AAClB,YAAA,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;AACrC,YAAA,IAAI,CAAC,IAAI,CAAC,cAAc,CAAC,aAAa,EAAE;AACtC,gBAAA,IAAI,CAAC,cAAc,CAAC,aAAa,GAAG;AAClC,oBAAA,cAAc,EAAE,qBAAqB;iBACtC,CAAC;aACH;iBAAM,IAAI,CAAC,IAAI,CAAC,cAAc,CAAC,aAAa,CAAC,cAAc,EAAE;AAC5D,gBAAA,IAAI,CAAC,cAAc,CAAC,aAAa,CAAC,cAAc;AAC9C,oBAAA,qBAAqB,CAAC;aACzB;SACF;KACF;AAED;;;;;;;;;;;;;;AAcG;IACH,MAAM,WAAW,CAAC,OAA+B,EAAA;AAC/C,QAAA,IAAI,CAAC,IAAI,CAAC,IAAI,EAAE;AACd,YAAA,MAAM,CAAC,KAAK,CACV,CAAA,0DAAA,CAA4D,CAC7D,CAAC;AACF,YAAA,OAAO,KAAK,CAAC;SACd;QACD,IAAI,IAAI,CAAC,IAAI,KAAK,aAAa,CAAC,aAAa,EAAE;AAC7C,YAAA,MAAM,CAAC,KAAK,CACV,CAAA,gEAAA,CAAkE,CACnE,CAAC;AACF,YAAA,OAAO,KAAK,CAAC;SACd;;AAGD,QAAA,MAAM,YAAY,GAAG,MAAM,IAAI,CAAC,mBAAmB,EAAE,CAAC;QAEtD,IAAI,IAAI,CAAC,IAAI,KAAK,aAAa,CAAC,cAAc,EAAE;;AAE9C,YAAA,IAAI,YAAY,KAAK,YAAY,CAAC,WAAW,EAAE;gBAC7C,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,4DAA4D,CAC7D,CAAC;aACH;AAAM,iBAAA,IACL,YAAY,KAAK,YAAY,CAAC,YAAY;AAC1C,gBAAA,YAAY,KAAK,YAAY,CAAC,WAAW,EACzC;;AAEA,gBAAA,MAAM,CAAC,KAAK,CAAC,CAAA,kDAAA,CAAoD,CAAC,CAAC;gBACnE,MAAM,IAAI,CAAC,eAAe,CAAC;AAC3B,gBAAA,OAAO,IAAI,CAAC;aACb;AACD,YAAA,OAAO,IAAI,CAAC;SACb;;AAGD,QAAA,IAAI,YAAY,KAAK,YAAY,CAAC,SAAS,EAAE;AAC3C,YAAA,MAAM,CAAC,KAAK,CACV,4DAA4D,YAAY,CAAA,EAAA,CAAI,CAC7E,CAAC;AACF,YAAA,OAAO,KAAK,CAAC;SACd;QACD,IAAI,CAAC,iBAAiB,CAAC,iBAAiB,CAAC,OAAO,CAAC,EAAE;AACjD,YAAA,MAAM,CAAC,KAAK,CACV,CAAA,gEAAA,CAAkE,CACnE,CAAC;AACF,YAAA,OAAO,KAAK,CAAC;SACd;AAED,QAAA,OAAO,IAAI,CAAC;KACb;AAED;;;;;;;;AAQG;IACH,MAAM,eAAe,CAAC,OAA+B,EAAA;AACnD,QAAA,MAAM,OAAO,GAAG,MAAM,IAAI,CAAC,aAAa,EAAE,CAAC;AAC3C,QAAA,MAAM,QAAQ,GAAG,MAAM,OAAO,CAAC,GAAG,CAChC,OAAO,CAAC,QAAQ,CAAC,GAAG,CAAC,iBAAiB,CAAC,sBAAsB,CAAC,CAC/D,CAAC;AACF,QAAA,MAAM,IAAI,GAAG,MAAM,OAAO,CAAC,MAAM,CAC/B,QAAQ,EACR,IAAI,CAAC,cAAc,CAAC,aAAa,CAClC,CAAC;AACF,QAAA,OAAO,iBAAiB,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;KAC3C;AAED;;;;;;;;AAQG;IACH,MAAM,qBAAqB,CACzB,OAA+B,EAAA;AAE/B,QAAA,MAAM,OAAO,GAAG,MAAM,IAAI,CAAC,aAAa,EAAE,CAAC;AAC3C,QAAA,MAAM,QAAQ,GAAG,MAAM,OAAO,CAAC,GAAG,CAChC,OAAO,CAAC,QAAQ,CAAC,GAAG,CAAC,iBAAiB,CAAC,sBAAsB,CAAC,CAC/D,CAAC;AACF,QAAA,MAAM,MAAM,GAAG,OAAO,CAAC,eAAe,CACpC,QAAQ,EACR,IAAI,CAAC,cAAc,CAAC,aAAa,CAClC,CAAC;AACF,QAAA,OAAO,iBAAiB,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;KACnD;IAED,MAAM,WAAW,CAAC,QAA4B,EAAA;QAC5C,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,wDAAwD,CACzD,CAAC;KACH;AAED;;AAEG;IACK,OAAO,iBAAiB,CAAC,OAA+B,EAAA;;QAE9D,IAAI,OAAO,CAAC,QAAQ,CAAC,MAAM,KAAK,CAAC,EAAE;AACjC,YAAA,MAAM,CAAC,KAAK,CAAC,gDAAgD,CAAC,CAAC;AAC/D,YAAA,OAAO,KAAK,CAAC;SACd;AAED,QAAA,KAAK,MAAM,OAAO,IAAI,OAAO,CAAC,QAAQ,EAAE;AACtC,YAAA,IAAI,OAAO,CAAC,IAAI,KAAK,UAAU,EAAE;AAC/B,gBAAA,MAAM,CAAC,KAAK,CAAC,CAAA,iDAAA,CAAmD,CAAC,CAAC;AAClE,gBAAA,OAAO,KAAK,CAAC;aACd;;AAGD,YAAA,KAAK,MAAM,IAAI,IAAI,OAAO,CAAC,KAAK,EAAE;gBAChC,IACE,IAAI,CAAC,UAAU;AACf,oBAAA,iBAAiB,CAAC,oBAAoB,CAAC,OAAO,CAC5C,IAAI,CAAC,UAAU,CAAC,QAAQ,CACzB,KAAK,CAAC,CAAC,EACR;oBACA,MAAM,CAAC,KAAK,CACV,CAA0B,uBAAA,EAAA,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAqC,mCAAA,CAAA,CACxF,CAAC;AACF,oBAAA,OAAO,KAAK,CAAC;iBACd;aACF;SACF;AAED,QAAA,OAAO,IAAI,CAAC;KACb;AAED;;AAEG;AACK,IAAA,MAAM,mBAAmB,GAAA;AAC/B,QAAA,MAAM,YAAY,GAAG,MAAM,IAAI,CAAC,qBAAqB,EAAE,YAAY,CACjE,IAAI,CAAC,cAAc,CAAC,aAAa,CAClC,CAAC;AAEF,QAAA,IAAI,YAAY,KAAK,YAAY,CAAC,YAAY,EAAE;YAC9C,IAAI,CAAC,QAAQ,EAAE,CAAC;SACjB;AAED,QAAA,OAAO,YAAY,CAAC;KACrB;AAED;;;;;;;;AAQG;IACK,QAAQ,GAAA;AACd,QAAA,IAAI,IAAI,CAAC,aAAa,EAAE;YACtB,OAAO;SACR;AACD,QAAA,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC;AAC1B,QAAA,IAAI,CAAC,eAAe,GAAG,IAAI,CAAC,qBAAqB;AAC/C,cAAE,MAAM,CAAC,IAAI,CAAC,cAAc,CAAC,aAAa,CAAC;aAC1C,OAAO,CAAC,MAAK;AACZ,YAAA,IAAI,CAAC,aAAa,GAAG,KAAK,CAAC;AAC7B,SAAC,CAAC,CAAC;KACN;AAED;;AAEG;AACK,IAAA,aAAa,sBAAsB,CACzC,OAAgB,EAAA;AAEhB,QAAA,MAAM,4BAA4B,GAAG,MAAM,OAAO,CAAC,GAAG,CACpD,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,iBAAiB,CAAC,6BAA6B,CAAC,CACnE,CAAC;QACF,OAAO;YACL,IAAI,EAAE,iBAAiB,CAAC,0BAA0B,CAAC,OAAO,CAAC,IAAI,CAAC;AAChE,YAAA,OAAO,EAAE,4BAA4B;SACtC,CAAC;KACH;AAED;;AAEG;AACK,IAAA,aAAa,6BAA6B,CAChD,IAAU,EAAA;AAEV,QAAA,IAAI,IAAI,CAAC,IAAI,EAAE;YACb,OAAO;AACL,gBAAA,IAAI,EAAE,MAAM;gBACZ,KAAK,EAAE,IAAI,CAAC,IAAI;aACjB,CAAC;SACH;AAAM,aAAA,IAAI,IAAI,CAAC,UAAU,EAAE;AAC1B,YAAA,MAAM,qBAAqB,GAAG,MAAM,KAAK,CACvC,CAAA,KAAA,EAAQ,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAA,QAAA,EAAW,IAAI,CAAC,UAAU,CAAC,IAAI,CAAA,CAAE,CAClE,CAAC;AACF,YAAA,MAAM,SAAS,GAAG,MAAM,qBAAqB,CAAC,IAAI,EAAE,CAAC;AACrD,YAAA,MAAM,WAAW,GAAG,MAAM,iBAAiB,CAAC,SAAS,CAAC,CAAC;YACvD,OAAO;AACL,gBAAA,IAAI,EAAE,OAAO;AACb,gBAAA,KAAK,EAAE,WAAW;aACnB,CAAC;SACH;QACD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,CAA0D,wDAAA,CAAA,CAC3D,CAAC;KACH;AAED;;AAEG;IACK,OAAO,0BAA0B,CACvC,IAAU,EAAA;;QAGV,OAAO,IAAI,KAAK,OAAO,GAAG,WAAW,GAAG,MAAM,CAAC;KAChD;AAED;;;;;;;;;AASG;AACK,IAAA,MAAM,aAAa,GAAA;AACzB,QAAA,IAAI,CAAC,IAAI,CAAC,qBAAqB,EAAE;YAC/B,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,sDAAsD,CACvD,CAAC;SACH;AACD,QAAA,MAAM,UAAU,GAAG,MAAM,IAAI,CAAC,qBAAqB,CAAC,MAAM,CACxD,IAAI,CAAC,cAAc,CAAC,aAAa,CAClC,CAAC;AACF,QAAA,IAAI,IAAI,CAAC,UAAU,EAAE;AACnB,YAAA,IAAI,CAAC,UAAU,CAAC,OAAO,EAAE,CAAC;SAC3B;;AAED,QAAA,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;AAC7B,QAAA,OAAO,UAAU,CAAC;KACnB;AAED;;AAEG;IACK,OAAO,UAAU,CAAC,IAAY,EAAA;QACpC,OAAO;AACL,YAAA,IAAI,EAAE,aAAa;AACjB,gBAAA,UAAU,EAAE;AACV,oBAAA;AACE,wBAAA,OAAO,EAAE;AACP,4BAAA,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,CAAC;AAClB,yBAAA;AACF,qBAAA;AACF,iBAAA;aACF,CAAC;SACS,CAAC;KACf;AAED;;AAEG;IACK,OAAO,gBAAgB,CAAC,MAA8B,EAAA;AAC5D,QAAA,MAAM,OAAO,GAAG,IAAI,WAAW,EAAE,CAAC;QAClC,OAAO;AACL,YAAA,IAAI,EAAE,MAAM,CAAC,WAAW,CACtB,IAAI,eAAe,CAAC;gBAClB,SAAS,CAAC,KAAK,EAAE,UAAU,EAAA;AACzB,oBAAA,MAAM,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC;AAC1B,wBAAA,UAAU,EAAE;AACV,4BAAA;AACE,gCAAA,OAAO,EAAE;AACP,oCAAA,IAAI,EAAE,OAAO;AACb,oCAAA,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,CAAC;AACzB,iCAAA;AACF,6BAAA;AACF,yBAAA;AACF,qBAAA,CAAC,CAAC;AACH,oBAAA,UAAU,CAAC,OAAO,CAAC,OAAO,CAAC,MAAM,CAAC,CAAA,MAAA,EAAS,IAAI,CAAA,IAAA,CAAM,CAAC,CAAC,CAAC;iBACzD;AACF,aAAA,CAAC,CACH;SACU,CAAC;KACf;;AApVD;AACO,iBAAA,CAAA,oBAAoB,GAAG,CAAC,YAAY,EAAE,WAAW,CAAC,CAAC;AAsV5D;;AAEG;SACa,oBAAoB,CAClC,IAAmB,EACnB,MAAe,EACf,MAAuB,EAAA;;AAGvB,IAAA,IAAI,OAAO,MAAM,KAAK,WAAW,IAAI,IAAI,EAAE;QACzC,OAAO,IAAI,iBAAiB,CACzB,MAAiB,CAAC,aAA8B,EACjD,IAAI,EACJ,MAAM,CACP,CAAC;KACH;AACH;;ACvZA;;;;;;;;;;;;;;;AAeG;MAgBU,SAAS,CAAA;IAMpB,WACS,CAAA,GAAgB,EAChB,OAAgB,EACvB,YAAiD,EACjD,gBAA0D,EACnD,oBAI2B,EAAA;QAR3B,IAAG,CAAA,GAAA,GAAH,GAAG,CAAa;QAChB,IAAO,CAAA,OAAA,GAAP,OAAO,CAAS;QAGhB,IAAoB,CAAA,oBAAA,GAApB,oBAAoB,CAIO;AAElC,QAAA,MAAM,QAAQ,GAAG,gBAAgB,EAAE,YAAY,CAAC,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,CAAC;AACpE,QAAA,MAAM,IAAI,GAAG,YAAY,EAAE,YAAY,CAAC,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,CAAC;AAC5D,QAAA,IAAI,CAAC,IAAI,GAAG,IAAI,IAAI,IAAI,CAAC;AACzB,QAAA,IAAI,CAAC,QAAQ,GAAG,QAAQ,IAAI,IAAI,CAAC;AAEjC,QAAA,IAAI,OAAO,YAAY,eAAe,EAAE;AACtC,YAAA,IAAI,CAAC,QAAQ,GAAG,OAAO,CAAC,QAAQ,CAAC;SAClC;aAAM;AACL,YAAA,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAC;SACpB;KACF;IAED,OAAO,GAAA;AACL,QAAA,OAAO,OAAO,CAAC,OAAO,EAAE,CAAC;KAC1B;IAED,IAAI,OAAO,CAAC,YAAuB,EAAA;AACjC,QAAA,IAAI,CAAC,QAAQ,GAAG,YAAY,CAAC;KAC9B;AAED,IAAA,IAAI,OAAO,GAAA;QACT,OAAO,IAAI,CAAC,QAAQ,CAAC;KACtB;AACF;;ACvED;;;;;;;;;;;;;;;AAeG;SAYa,OAAO,CACrB,SAA6B,EAC7B,EAAE,kBAAkB,EAA0B,EAAA;IAE9C,IAAI,CAAC,kBAAkB,EAAE;QACvB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,6CAA6C,CAC9C,CAAC;KACH;AAED,IAAA,MAAM,OAAO,GAAG,wBAAwB,CAAC,kBAAkB,CAAC,CAAC;;IAG7D,MAAM,GAAG,GAAG,SAAS,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC,YAAY,EAAE,CAAC;IACxD,MAAM,IAAI,GAAG,SAAS,CAAC,WAAW,CAAC,eAAe,CAAC,CAAC;IACpD,MAAM,gBAAgB,GAAG,SAAS,CAAC,WAAW,CAAC,oBAAoB,CAAC,CAAC;AAErE,IAAA,OAAO,IAAI,SAAS,CAClB,GAAG,EACH,OAAO,EACP,IAAI,EACJ,gBAAgB,EAChB,oBAAoB,CACrB,CAAC;AACJ;;ACpDA;;;;;;;;;;;;;;;AAeG;AAQH;;;;;;;AAOG;MACmB,OAAO,CAAA;AAY3B;;;;;;;;;;;;;;;;AAgBG;IACH,WAAsB,CAAA,EAAM,EAAE,SAAiB,EAAA;QAC7C,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,MAAM,EAAE;YAC5B,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,UAAU,EACtB,CAAuH,qHAAA,CAAA,CACxH,CAAC;SACH;aAAM,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,SAAS,EAAE;YACtC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,CAA6H,2HAAA,CAAA,CAC9H,CAAC;SACH;aAAM,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE;YAClC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,SAAS,EACrB,CAAqH,mHAAA,CAAA,CACtH,CAAC;SACH;aAAM;YACL,IAAI,CAAC,YAAY,GAAG;AAClB,gBAAA,MAAM,EAAE,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM;AAC7B,gBAAA,OAAO,EAAE,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,SAAS;AACjC,gBAAA,KAAK,EAAE,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK;AAC3B,gBAAA,8BAA8B,EAAE,EAAE,CAAC,GAAG,CAAC,8BAA8B;gBACrE,QAAQ,EAAE,EAAE,CAAC,QAAQ;gBACrB,OAAO,EAAE,EAAE,CAAC,OAAO;aACpB,CAAC;AAEF,YAAA,IAAI,oBAAoB,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,aAAa,EAAE;gBACjE,MAAM,KAAK,GAAG,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,aAAa,CAAC;AAC5C,gBAAA,IAAI,CAAC,YAAY,CAAC,gBAAgB,GAAG,MAAK;oBACxC,OAAO,OAAO,CAAC,OAAO,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC;AACpC,iBAAC,CAAC;aACH;AAAM,iBAAA,IAAK,EAAgB,CAAC,QAAQ,EAAE;AACrC,gBAAA,IAAI,EAAE,CAAC,OAAO,EAAE,2BAA2B,EAAE;AAC3C,oBAAA,IAAI,CAAC,YAAY,CAAC,gBAAgB,GAAG,MAClC,EAAgB,CAAC,QAAS,CAAC,kBAAkB,EAAE,CAAC;iBACpD;qBAAM;AACL,oBAAA,IAAI,CAAC,YAAY,CAAC,gBAAgB,GAAG,MAClC,EAAgB,CAAC,QAAS,CAAC,QAAQ,EAAE,CAAC;iBAC1C;aACF;AAED,YAAA,IAAK,EAAgB,CAAC,IAAI,EAAE;AAC1B,gBAAA,IAAI,CAAC,YAAY,CAAC,YAAY,GAAG,MAC9B,EAAgB,CAAC,IAAK,CAAC,QAAQ,EAAE,CAAC;aACtC;AAED,YAAA,IAAI,CAAC,KAAK,GAAG,OAAO,CAAC,kBAAkB,CACrC,SAAS,EACT,IAAI,CAAC,YAAY,CAAC,OAAO,CAAC,WAAW,CACtC,CAAC;SACH;KACF;AAED;;;;;;;AAOG;AACH,IAAA,OAAO,kBAAkB,CACvB,SAAiB,EACjB,WAAwB,EAAA;AAExB,QAAA,IAAI,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AACzC,YAAA,OAAO,OAAO,CAAC,0BAA0B,CAAC,SAAS,CAAC,CAAC;SACtD;aAAM;AACL,YAAA,OAAO,OAAO,CAAC,0BAA0B,CAAC,SAAS,CAAC,CAAC;SACtD;KACF;AAED;;AAEG;IACK,OAAO,0BAA0B,CAAC,SAAiB,EAAA;QACzD,OAAO,CAAA,OAAA,EAAU,SAAS,CAAA,CAAE,CAAC;KAC9B;AAED;;AAEG;IACK,OAAO,0BAA0B,CAAC,SAAiB,EAAA;AACzD,QAAA,IAAI,KAAa,CAAC;AAClB,QAAA,IAAI,SAAS,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE;AAC3B,YAAA,IAAI,SAAS,CAAC,UAAU,CAAC,SAAS,CAAC,EAAE;;AAEnC,gBAAA,KAAK,GAAG,CAAA,kBAAA,EAAqB,SAAS,CAAA,CAAE,CAAC;aAC1C;iBAAM;;gBAEL,KAAK,GAAG,SAAS,CAAC;aACnB;SACF;aAAM;;AAEL,YAAA,KAAK,GAAG,CAAA,yBAAA,EAA4B,SAAS,CAAA,CAAE,CAAC;SACjD;AAED,QAAA,OAAO,KAAK,CAAC;KACd;AACF;;AC/JD;;;;;;;;;;;;;;;AAeG;AAgBH,IAAY,IAKX,CAAA;AALD,CAAA,UAAY,IAAI,EAAA;AACd,IAAA,IAAA,CAAA,kBAAA,CAAA,GAAA,iBAAoC,CAAA;AACpC,IAAA,IAAA,CAAA,yBAAA,CAAA,GAAA,uBAAiD,CAAA;AACjD,IAAA,IAAA,CAAA,cAAA,CAAA,GAAA,aAA4B,CAAA;AAC5B,IAAA,IAAA,CAAA,SAAA,CAAA,GAAA,SAAmB,CAAA;AACrB,CAAC,EALW,IAAI,KAAJ,IAAI,GAKf,EAAA,CAAA,CAAA,CAAA;MAEY,UAAU,CAAA;IACrB,WACS,CAAA,KAAa,EACb,IAAU,EACV,WAAwB,EACxB,MAAe,EACf,cAA+B,EAAA;QAJ/B,IAAK,CAAA,KAAA,GAAL,KAAK,CAAQ;QACb,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAM;QACV,IAAW,CAAA,WAAA,GAAX,WAAW,CAAa;QACxB,IAAM,CAAA,MAAA,GAAN,MAAM,CAAS;QACf,IAAc,CAAA,cAAA,GAAd,cAAc,CAAiB;KACpC;IACJ,QAAQ,GAAA;QACN,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;AAClC,QAAA,GAAG,CAAC,QAAQ,GAAG,CAAI,CAAA,EAAA,IAAI,CAAC,UAAU,CAAA,CAAA,EAAI,IAAI,CAAC,SAAS,CAAI,CAAA,EAAA,IAAI,CAAC,IAAI,EAAE,CAAC;QACpE,GAAG,CAAC,MAAM,GAAG,IAAI,CAAC,WAAW,CAAC,QAAQ,EAAE,CAAC;AACzC,QAAA,OAAO,GAAG,CAAC,QAAQ,EAAE,CAAC;KACvB;AAED,IAAA,IAAY,OAAO,GAAA;QACjB,OAAO,IAAI,CAAC,cAAc,EAAE,OAAO,IAAI,CAAA,QAAA,EAAW,cAAc,CAAA,CAAE,CAAC;KACpE;AAED,IAAA,IAAY,UAAU,GAAA;QACpB,OAAO,mBAAmB,CAAC;KAC5B;AAED,IAAA,IAAY,SAAS,GAAA;QACnB,IAAI,IAAI,CAAC,WAAW,CAAC,OAAO,YAAY,eAAe,EAAE;YACvD,OAAO,CAAA,SAAA,EAAY,IAAI,CAAC,WAAW,CAAC,OAAO,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,CAAA,CAAE,CAAC;SAC7D;aAAM,IAAI,IAAI,CAAC,WAAW,CAAC,OAAO,YAAY,eAAe,EAAE;AAC9D,YAAA,OAAO,YAAY,IAAI,CAAC,WAAW,CAAC,OAAO,cAAc,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,QAAQ,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,EAAE,CAAC;SAC5G;aAAM;YACL,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,oBAAoB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,CAAA,CAAE,CAC/D,CAAC;SACH;KACF;AAED,IAAA,IAAY,WAAW,GAAA;AACrB,QAAA,MAAM,MAAM,GAAG,IAAI,eAAe,EAAE,CAAC;AACrC,QAAA,IAAI,IAAI,CAAC,MAAM,EAAE;AACf,YAAA,MAAM,CAAC,GAAG,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;SAC1B;AAED,QAAA,OAAO,MAAM,CAAC;KACf;AACF,CAAA;MAEY,YAAY,CAAA;AACvB,IAAA,WAAA,CAAmB,WAAwB,EAAA;QAAxB,IAAW,CAAA,WAAA,GAAX,WAAW,CAAa;KAAI;IAC/C,QAAQ,GAAA;QACN,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,CAAS,MAAA,EAAA,cAAc,CAAE,CAAA,CAAC,CAAC;AAC/C,QAAA,GAAG,CAAC,QAAQ,GAAG,IAAI,CAAC,QAAQ,CAAC;AAE7B,QAAA,MAAM,WAAW,GAAG,IAAI,eAAe,EAAE,CAAC;QAC1C,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;AAChD,QAAA,GAAG,CAAC,MAAM,GAAG,WAAW,CAAC,QAAQ,EAAE,CAAC;AAEpC,QAAA,OAAO,GAAG,CAAC,QAAQ,EAAE,CAAC;KACvB;AAED,IAAA,IAAY,QAAQ,GAAA;AAClB,QAAA,IAAI,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAClE,YAAA,OAAO,0EAA0E,CAAC;SACnF;aAAM;AACL,YAAA,OAAO,mFAAmF,IAAI,CAAC,WAAW,CAAC,QAAQ,EAAE,CAAC;SACvH;KACF;AACF,CAAA;AAED;;AAEG;AACH,SAAS,gBAAgB,GAAA;IACvB,MAAM,WAAW,GAAG,EAAE,CAAC;IACvB,WAAW,CAAC,IAAI,CAAC,CAAA,EAAG,YAAY,CAAI,CAAA,EAAA,eAAe,CAAE,CAAA,CAAC,CAAC;AACvD,IAAA,WAAW,CAAC,IAAI,CAAC,QAAQ,eAAe,CAAA,CAAE,CAAC,CAAC;AAC5C,IAAA,OAAO,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;AAC/B,CAAC;AAEM,eAAe,UAAU,CAAC,GAAe,EAAA;AAC9C,IAAA,MAAM,OAAO,GAAG,IAAI,OAAO,EAAE,CAAC;AAC9B,IAAA,OAAO,CAAC,MAAM,CAAC,cAAc,EAAE,kBAAkB,CAAC,CAAC;IACnD,OAAO,CAAC,MAAM,CAAC,mBAAmB,EAAE,gBAAgB,EAAE,CAAC,CAAC;IACxD,OAAO,CAAC,MAAM,CAAC,gBAAgB,EAAE,GAAG,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;AACzD,IAAA,IAAI,GAAG,CAAC,WAAW,CAAC,8BAA8B,EAAE;QAClD,OAAO,CAAC,MAAM,CAAC,kBAAkB,EAAE,GAAG,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC;KAC3D;AACD,IAAA,IAAI,GAAG,CAAC,WAAW,CAAC,gBAAgB,EAAE;QACpC,MAAM,aAAa,GAAG,MAAM,GAAG,CAAC,WAAW,CAAC,gBAAgB,EAAE,CAAC;QAC/D,IAAI,aAAa,EAAE;YACjB,OAAO,CAAC,MAAM,CAAC,qBAAqB,EAAE,aAAa,CAAC,KAAK,CAAC,CAAC;AAC3D,YAAA,IAAI,aAAa,CAAC,KAAK,EAAE;gBACvB,MAAM,CAAC,IAAI,CACT,CAA6C,0CAAA,EAAA,aAAa,CAAC,KAAK,CAAC,OAAO,CAAE,CAAA,CAC3E,CAAC;aACH;SACF;KACF;AAED,IAAA,IAAI,GAAG,CAAC,WAAW,CAAC,YAAY,EAAE;QAChC,MAAM,SAAS,GAAG,MAAM,GAAG,CAAC,WAAW,CAAC,YAAY,EAAE,CAAC;QACvD,IAAI,SAAS,EAAE;YACb,OAAO,CAAC,MAAM,CAAC,eAAe,EAAE,CAAY,SAAA,EAAA,SAAS,CAAC,WAAW,CAAE,CAAA,CAAC,CAAC;SACtE;KACF;AAED,IAAA,OAAO,OAAO,CAAC;AACjB,CAAC;AAEM,eAAe,gBAAgB,CACpC,KAAa,EACb,IAAU,EACV,WAAwB,EACxB,MAAe,EACf,IAAY,EACZ,cAA+B,EAAA;AAE/B,IAAA,MAAM,GAAG,GAAG,IAAI,UAAU,CAAC,KAAK,EAAE,IAAI,EAAE,WAAW,EAAE,MAAM,EAAE,cAAc,CAAC,CAAC;IAC7E,OAAO;AACL,QAAA,GAAG,EAAE,GAAG,CAAC,QAAQ,EAAE;AACnB,QAAA,YAAY,EAAE;AACZ,YAAA,MAAM,EAAE,MAAM;AACd,YAAA,OAAO,EAAE,MAAM,UAAU,CAAC,GAAG,CAAC;YAC9B,IAAI;AACL,SAAA;KACF,CAAC;AACJ,CAAC;AAEM,eAAe,WAAW,CAC/B,KAAa,EACb,IAAU,EACV,WAAwB,EACxB,MAAe,EACf,IAAY,EACZ,cAA+B,EAAA;AAE/B,IAAA,MAAM,GAAG,GAAG,IAAI,UAAU,CAAC,KAAK,EAAE,IAAI,EAAE,WAAW,EAAE,MAAM,EAAE,cAAc,CAAC,CAAC;AAC7E,IAAA,IAAI,QAAQ,CAAC;AACb,IAAA,IAAI,cAA4D,CAAC;AACjE,IAAA,IAAI;AACF,QAAA,MAAM,OAAO,GAAG,MAAM,gBAAgB,CACpC,KAAK,EACL,IAAI,EACJ,WAAW,EACX,MAAM,EACN,IAAI,EACJ,cAAc,CACf,CAAC;;AAEF,QAAA,MAAM,aAAa,GACjB,cAAc,EAAE,OAAO,IAAI,IAAI,IAAI,cAAc,CAAC,OAAO,IAAI,CAAC;cAC1D,cAAc,CAAC,OAAO;cACtB,wBAAwB,CAAC;AAC/B,QAAA,MAAM,eAAe,GAAG,IAAI,eAAe,EAAE,CAAC;AAC9C,QAAA,cAAc,GAAG,UAAU,CAAC,MAAM,eAAe,CAAC,KAAK,EAAE,EAAE,aAAa,CAAC,CAAC;QAC1E,OAAO,CAAC,YAAY,CAAC,MAAM,GAAG,eAAe,CAAC,MAAM,CAAC;AAErD,QAAA,QAAQ,GAAG,MAAM,KAAK,CAAC,OAAO,CAAC,GAAG,EAAE,OAAO,CAAC,YAAY,CAAC,CAAC;AAC1D,QAAA,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE;YAChB,IAAI,OAAO,GAAG,EAAE,CAAC;AACjB,YAAA,IAAI,YAAY,CAAC;AACjB,YAAA,IAAI;AACF,gBAAA,MAAM,IAAI,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;AACnC,gBAAA,OAAO,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC;AAC7B,gBAAA,IAAI,IAAI,CAAC,KAAK,CAAC,OAAO,EAAE;AACtB,oBAAA,OAAO,IAAI,CAAA,CAAA,EAAI,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,EAAE,CAAC;AACpD,oBAAA,YAAY,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC;iBACnC;aACF;YAAC,OAAO,CAAC,EAAE;;aAEX;AACD,YAAA,IACE,QAAQ,CAAC,MAAM,KAAK,GAAG;gBACvB,YAAY;AACZ,gBAAA,YAAY,CAAC,IAAI,CACf,CAAC,MAAoB,KAAK,MAAM,CAAC,MAAM,KAAK,kBAAkB,CAC/D;gBACD,YAAY,CAAC,IAAI,CAAC,CAAC,MAAoB,KAEnC,MAAM,CAAC,KACR,GAAG,CAAC,CAAC,EAAE,WAAW,CAAC,QAAQ,CAC1B,0CAA0C,CAC3C,CACF,EACD;AACA,gBAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA+C,6CAAA,CAAA;oBAC7C,CAAgE,8DAAA,CAAA;oBAChE,CAAqE,mEAAA,CAAA;AACrE,oBAAA,CAAA,+CAAA,EAAkD,GAAG,CAAC,WAAW,CAAC,OAAO,CAAU,QAAA,CAAA;oBACnF,CAAgE,8DAAA,CAAA;oBAChE,CAAoE,kEAAA,CAAA;AACpE,oBAAA,CAAA,WAAA,CAAa,EACf;oBACE,MAAM,EAAE,QAAQ,CAAC,MAAM;oBACvB,UAAU,EAAE,QAAQ,CAAC,UAAU;oBAC/B,YAAY;AACb,iBAAA,CACF,CAAC;aACH;YACD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,uBAAuB,GAAG,CAAA,GAAA,EAAM,QAAQ,CAAC,MAAM,IAAI,QAAQ,CAAC,UAAU,CAAK,EAAA,EAAA,OAAO,EAAE,EACpF;gBACE,MAAM,EAAE,QAAQ,CAAC,MAAM;gBACvB,UAAU,EAAE,QAAQ,CAAC,UAAU;gBAC/B,YAAY;AACb,aAAA,CACF,CAAC;SACH;KACF;IAAC,OAAO,CAAC,EAAE;QACV,IAAI,GAAG,GAAG,CAAU,CAAC;AACrB,QAAA,IACG,CAAa,CAAC,IAAI,KAAK,WAAW,CAAC,WAAW;AAC9C,YAAA,CAAa,CAAC,IAAI,KAAK,WAAW,CAAC,eAAe;YACnD,CAAC,YAAY,KAAK,EAClB;AACA,YAAA,GAAG,GAAG,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,oBAAA,EAAuB,GAAG,CAAC,QAAQ,EAAE,CAAK,EAAA,EAAA,CAAC,CAAC,OAAO,CAAA,CAAE,CACtD,CAAC;AACF,YAAA,GAAG,CAAC,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC;SACrB;AAED,QAAA,MAAM,GAAG,CAAC;KACX;YAAS;QACR,IAAI,cAAc,EAAE;YAClB,YAAY,CAAC,cAAc,CAAC,CAAC;SAC9B;KACF;AACD,IAAA,OAAO,QAAQ,CAAC;AAClB;;AC7QA;;;;;;;;;;;;;;;AAeG;AAmBH;;;AAGG;AACH,SAAS,kBAAkB,CAAC,QAAiC,EAAA;AAC3D,IAAA,IAAI,QAAQ,CAAC,UAAU,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;QACzD,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;YAClC,MAAM,CAAC,IAAI,CACT,CAAA,kBAAA,EAAqB,QAAQ,CAAC,UAAU,CAAC,MAAM,CAAG,CAAA,CAAA;gBAChD,CAA4D,0DAAA,CAAA;AAC5D,gBAAA,CAAA,gEAAA,CAAkE,CACrE,CAAC;SACH;QACD,IAAI,kBAAkB,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9C,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,gBAAA,EAAmB,uBAAuB,CACxC,QAAQ,CACT,0CAA0C,EAC3C;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,IAAI,CAAC;KACb;SAAM;AACL,QAAA,OAAO,KAAK,CAAC;KACd;AACH,CAAC;AAED;;;AAGG;AACG,SAAU,6BAA6B,CAC3C,QAAiC,EACjC,eAAmC,GAAA,eAAe,CAAC,QAAQ,EAAA;AAE3D;;;;;AAKG;AACH,IAAA,IAAI,QAAQ,CAAC,UAAU,IAAI,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,CAAC,EAAE;QAC1E,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC;KAClC;AAED,IAAA,MAAM,mBAAmB,GAAG,UAAU,CAAC,QAAQ,CAAC,CAAC;AACjD,IAAA,mBAAmB,CAAC,eAAe,GAAG,eAAe,CAAC;AACtD,IAAA,OAAO,mBAAmB,CAAC;AAC7B,CAAC;AAED;;;AAGG;AACG,SAAU,UAAU,CACxB,QAAiC,EAAA;AAEhC,IAAA,QAA4C,CAAC,IAAI,GAAG,MAAK;AACxD,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,OAAO,OAAO,CAAC,QAAQ,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;SACjD;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,oBAAA,EAAuB,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EAC1D;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,EAAE,CAAC;AACZ,KAAC,CAAC;AACD,IAAA,QAA4C,CAAC,cAAc,GAAG,MAAK;AAClE,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,MAAM,MAAM,GAAG,OAAO,CAAC,QAAQ,EAAE,IAAI,IAAI,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;YACzD,OAAO,MAAM,KAAK,EAAE,GAAG,SAAS,GAAG,MAAM,CAAC;SAC3C;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,+BAAA,EAAkC,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EACrE;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,SAAS,CAAC;AACnB,KAAC,CAAC;AACD,IAAA,QAA4C,CAAC,eAAe,GAAG,MAEhD;AACd,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,OAAO,kBAAkB,CAAC,QAAQ,CAAC,CAAC;SACrC;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,oBAAA,EAAuB,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EAC1D;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,SAAS,CAAC;AACnB,KAAC,CAAC;AACD,IAAA,QAA4C,CAAC,aAAa,GAAG,MAAK;AACjE,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,OAAO,gBAAgB,CAAC,QAAQ,CAAC,CAAC;SACnC;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,6BAAA,EAAgC,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EACnE;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,SAAS,CAAC;AACnB,KAAC,CAAC;AACF,IAAA,OAAO,QAA2C,CAAC;AACrD,CAAC;AAED;;;;;;AAMG;AACa,SAAA,OAAO,CACrB,QAAiC,EACjC,UAAmC,EAAA;IAEnC,MAAM,WAAW,GAAG,EAAE,CAAC;AACvB,IAAA,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC3C,QAAA,KAAK,MAAM,IAAI,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;YAC1D,IAAI,IAAI,CAAC,IAAI,IAAI,UAAU,CAAC,IAAI,CAAC,EAAE;AACjC,gBAAA,WAAW,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;aAC7B;SACF;KACF;AACD,IAAA,IAAI,WAAW,CAAC,MAAM,GAAG,CAAC,EAAE;AAC1B,QAAA,OAAO,WAAW,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;KAC7B;SAAM;AACL,QAAA,OAAO,EAAE,CAAC;KACX;AACH,CAAC;AAED;;AAEG;AACG,SAAU,gBAAgB,CAC9B,QAAiC,EAAA;IAEjC,MAAM,aAAa,GAAmB,EAAE,CAAC;AACzC,IAAA,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC3C,QAAA,KAAK,MAAM,IAAI,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC1D,YAAA,IAAI,IAAI,CAAC,YAAY,EAAE;AACrB,gBAAA,aAAa,CAAC,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;aACvC;SACF;KACF;AACD,IAAA,IAAI,aAAa,CAAC,MAAM,GAAG,CAAC,EAAE;AAC5B,QAAA,OAAO,aAAa,CAAC;KACtB;SAAM;AACL,QAAA,OAAO,SAAS,CAAC;KAClB;AACH,CAAC;AAED;;;;AAIG;AACG,SAAU,kBAAkB,CAChC,QAAiC,EAAA;IAEjC,MAAM,IAAI,GAAqB,EAAE,CAAC;AAElC,IAAA,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC3C,QAAA,KAAK,MAAM,IAAI,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC1D,YAAA,IAAI,IAAI,CAAC,UAAU,EAAE;AACnB,gBAAA,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;aACjB;SACF;KACF;AAED,IAAA,IAAI,IAAI,CAAC,MAAM,GAAG,CAAC,EAAE;AACnB,QAAA,OAAO,IAAI,CAAC;KACb;SAAM;AACL,QAAA,OAAO,SAAS,CAAC;KAClB;AACH,CAAC;AAED,MAAM,gBAAgB,GAAG,CAAC,YAAY,CAAC,UAAU,EAAE,YAAY,CAAC,MAAM,CAAC,CAAC;AAExE,SAAS,kBAAkB,CAAC,SAAmC,EAAA;AAC7D,IAAA,QACE,CAAC,CAAC,SAAS,CAAC,YAAY;AACxB,QAAA,gBAAgB,CAAC,IAAI,CAAC,MAAM,IAAI,MAAM,KAAK,SAAS,CAAC,YAAY,CAAC,EAClE;AACJ,CAAC;AAEK,SAAU,uBAAuB,CACrC,QAAiC,EAAA;IAEjC,IAAI,OAAO,GAAG,EAAE,CAAC;AACjB,IAAA,IACE,CAAC,CAAC,QAAQ,CAAC,UAAU,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,KAAK,CAAC;QACzD,QAAQ,CAAC,cAAc,EACvB;QACA,OAAO,IAAI,sBAAsB,CAAC;AAClC,QAAA,IAAI,QAAQ,CAAC,cAAc,EAAE,WAAW,EAAE;YACxC,OAAO,IAAI,WAAW,QAAQ,CAAC,cAAc,CAAC,WAAW,EAAE,CAAC;SAC7D;AACD,QAAA,IAAI,QAAQ,CAAC,cAAc,EAAE,kBAAkB,EAAE;YAC/C,OAAO,IAAI,KAAK,QAAQ,CAAC,cAAc,CAAC,kBAAkB,EAAE,CAAC;SAC9D;KACF;SAAM,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,EAAE;QACnC,MAAM,cAAc,GAAG,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC;AAC9C,QAAA,IAAI,kBAAkB,CAAC,cAAc,CAAC,EAAE;AACtC,YAAA,OAAO,IAAI,CAAgC,6BAAA,EAAA,cAAc,CAAC,YAAY,EAAE,CAAC;AACzE,YAAA,IAAI,cAAc,CAAC,aAAa,EAAE;AAChC,gBAAA,OAAO,IAAI,CAAK,EAAA,EAAA,cAAc,CAAC,aAAa,EAAE,CAAC;aAChD;SACF;KACF;AACD,IAAA,OAAO,OAAO,CAAC;AACjB,CAAC;AAED;;;;;;AAMG;AACI,eAAe,qBAAqB,CAEzC,QAAkB,EAAA;AAClB,IAAA,MAAM,YAAY,GAA2B,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;IAEnE,MAAM,MAAM,GAAQ,EAAE,CAAC;IACvB,IAAI,cAAc,GAAuB,SAAS,CAAC;;AAGnD,IAAA,IAAI,CAAC,YAAY,CAAC,WAAW,IAAI,YAAY,CAAC,WAAW,EAAE,MAAM,KAAK,CAAC,EAAE;QACvE,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,wKAAwK,CACzK,CAAC;KACH;AAED,IAAA,KAAK,MAAM,UAAU,IAAI,YAAY,CAAC,WAAW,EAAE;AACjD,QAAA,IAAI,UAAU,CAAC,iBAAiB,EAAE;AAChC,YAAA,cAAc,GAAG,UAAU,CAAC,iBAAiB,CAAC;SAC/C;aAAM,IAAI,UAAU,CAAC,QAAQ,IAAI,UAAU,CAAC,kBAAkB,EAAE;YAC/D,MAAM,CAAC,IAAI,CAAC;gBACV,QAAQ,EAAE,UAAU,CAAC,QAAQ;gBAC7B,kBAAkB,EAAE,UAAU,CAAC,kBAAkB;AAC7C,aAAA,CAAC,CAAC;SACT;aAAM,IAAI,UAAU,CAAC,QAAQ,IAAI,UAAU,CAAC,MAAM,EAAE;YACnD,MAAM,CAAC,IAAI,CAAC;gBACV,QAAQ,EAAE,UAAU,CAAC,QAAQ;gBAC7B,MAAM,EAAE,UAAU,CAAC,MAAM;AACrB,aAAA,CAAC,CAAC;SACT;AAAM,aAAA,IAAI,UAAU,CAAC,gBAAgB,EAAE,CAEvC;aAAM;AACL,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,wDAAA,EAA2D,IAAI,CAAC,SAAS,CACvE,UAAU,CACX,CAAA,CAAA,CAAG,CACL,CAAC;SACH;KACF;AAED,IAAA,OAAO,EAAE,MAAM,EAAE,cAAc,EAAE,CAAC;AACpC;;ACzTA;;;;;;;;;;;;;;;AAeG;AAsBH;;;;;;;;;;AAUG;AAEH;;;;;;;;;AASG;AACG,SAAU,yBAAyB,CACvC,sBAA8C,EAAA;AAE9C,IAAA,sBAAsB,CAAC,cAAc,EAAE,OAAO,CAAC,aAAa,IAAG;AAC7D,QAAA,IAAI,aAAa,CAAC,MAAM,EAAE;YACxB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,qGAAqG,CACtG,CAAC;SACH;AACH,KAAC,CAAC,CAAC;AAEH,IAAA,IAAI,sBAAsB,CAAC,gBAAgB,EAAE,IAAI,EAAE;AACjD,QAAA,MAAM,WAAW,GAAG,IAAI,CAAC,KAAK,CAC5B,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,CAC7C,CAAC;QAEF,IAAI,WAAW,KAAK,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,EAAE;AAChE,YAAA,MAAM,CAAC,IAAI,CACT,gIAAgI,CACjI,CAAC;AACF,YAAA,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,GAAG,WAAW,CAAC;SAC5D;KACF;AAED,IAAA,OAAO,sBAAsB,CAAC;AAChC,CAAC;AAED;;;;;;;;AAQG;AACG,SAAU,0BAA0B,CACxC,gBAAiD,EAAA;AAEjD,IAAA,MAAM,uBAAuB,GAAG;QAC9B,UAAU,EAAE,gBAAgB,CAAC,UAAU;AACrC,cAAE,4BAA4B,CAAC,gBAAgB,CAAC,UAAU,CAAC;AAC3D,cAAE,SAAS;QACb,MAAM,EAAE,gBAAgB,CAAC,cAAc;AACrC,cAAE,iBAAiB,CAAC,gBAAgB,CAAC,cAAc,CAAC;AACpD,cAAE,SAAS;QACb,aAAa,EAAE,gBAAgB,CAAC,aAAa;KAC9C,CAAC;AAEF,IAAA,OAAO,uBAAuB,CAAC;AACjC,CAAC;AAED;;;;;;;;AAQG;AACa,SAAA,qBAAqB,CACnC,kBAAsC,EACtC,KAAa,EAAA;AAEb,IAAA,MAAM,wBAAwB,GAA+B;AAC3D,QAAA,sBAAsB,EAAE;YACtB,KAAK;AACL,YAAA,GAAG,kBAAkB;AACtB,SAAA;KACF,CAAC;AAEF,IAAA,OAAO,wBAAwB,CAAC;AAClC,CAAC;AAED;;;;;;;;;;AAUG;AACG,SAAU,4BAA4B,CAC1C,UAA8C,EAAA;IAE9C,MAAM,gBAAgB,GAA+B,EAAE,CAAC;AACxD,IAAA,IAAI,mBAAmC,CAAC;IACxC,IAAI,gBAAgB,EAAE;AACpB,QAAA,UAAU,CAAC,OAAO,CAAC,SAAS,IAAG;;AAE7B,YAAA,IAAI,gBAA8C,CAAC;AACnD,YAAA,IAAI,SAAS,CAAC,gBAAgB,EAAE;AAC9B,gBAAA,gBAAgB,GAAG;AACjB,oBAAA,SAAS,EAAE,SAAS,CAAC,gBAAgB,CAAC,eAAe;iBACtD,CAAC;aACH;;AAGD,YAAA,IAAI,SAAS,CAAC,aAAa,EAAE;gBAC3B,mBAAmB,GAAG,SAAS,CAAC,aAAa,CAAC,GAAG,CAAC,YAAY,IAAG;oBAC/D,OAAO;AACL,wBAAA,GAAG,YAAY;AACf,wBAAA,QAAQ,EACN,YAAY,CAAC,QAAQ,IAAI,YAAY,CAAC,yBAAyB;AACjE,wBAAA,gBAAgB,EAAE,YAAY,CAAC,gBAAgB,IAAI,CAAC;AACpD,wBAAA,aAAa,EAAE,YAAY,CAAC,aAAa,IAAI,CAAC;qBAC/C,CAAC;AACJ,iBAAC,CAAC,CAAC;aACJ;;;;AAKD,YAAA,IACE,SAAS,CAAC,OAAO,EAAE,KAAK,EAAE,IAAI,CAC5B,IAAI,IAAK,IAAuB,EAAE,aAAa,CAChD,EACD;gBACA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,+FAA+F,CAChG,CAAC;aACH;AAED,YAAA,MAAM,eAAe,GAAG;gBACtB,KAAK,EAAE,SAAS,CAAC,KAAK;gBACtB,OAAO,EAAE,SAAS,CAAC,OAAO;gBAC1B,YAAY,EAAE,SAAS,CAAC,YAAY;gBACpC,aAAa,EAAE,SAAS,CAAC,aAAa;AACtC,gBAAA,aAAa,EAAE,mBAAmB;gBAClC,gBAAgB;gBAChB,iBAAiB,EAAE,SAAS,CAAC,iBAAiB;gBAC9C,kBAAkB,EAAE,SAAS,CAAC,kBAAkB;aACjD,CAAC;AACF,YAAA,gBAAgB,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;AACzC,SAAC,CAAC,CAAC;KACJ;AAED,IAAA,OAAO,gBAAgB,CAAC;AAC1B,CAAC;AAEK,SAAU,iBAAiB,CAC/B,cAA8B,EAAA;;IAG9B,MAAM,mBAAmB,GAAmB,EAAE,CAAC;AAC/C,IAAA,cAAc,CAAC,aAAa,CAAC,OAAO,CAAC,YAAY,IAAG;QAClD,mBAAmB,CAAC,IAAI,CAAC;YACvB,QAAQ,EAAE,YAAY,CAAC,QAAQ;YAC/B,WAAW,EAAE,YAAY,CAAC,WAAW;AACrC,YAAA,QAAQ,EAAE,YAAY,CAAC,QAAQ,IAAI,YAAY,CAAC,yBAAyB;AACzE,YAAA,gBAAgB,EAAE,YAAY,CAAC,gBAAgB,IAAI,CAAC;AACpD,YAAA,aAAa,EAAE,YAAY,CAAC,aAAa,IAAI,CAAC;YAC9C,OAAO,EAAE,YAAY,CAAC,OAAO;AAC9B,SAAA,CAAC,CAAC;AACL,KAAC,CAAC,CAAC;AAEH,IAAA,MAAM,oBAAoB,GAAmB;QAC3C,WAAW,EAAE,cAAc,CAAC,WAAW;AACvC,QAAA,aAAa,EAAE,mBAAmB;QAClC,kBAAkB,EAAE,cAAc,CAAC,kBAAkB;KACtD,CAAC;AACF,IAAA,OAAO,oBAAoB,CAAC;AAC9B;;ACnOA;;;;;;;;;;;;;;;AAeG;AAqBH,MAAM,cAAc,GAAG,oCAAoC,CAAC;AAE5D;;;;;;;AAOG;SACa,aAAa,CAC3B,QAAkB,EAClB,WAAwB,EACxB,eAAiC,EAAA;IAEjC,MAAM,WAAW,GAAG,QAAQ,CAAC,IAAK,CAAC,WAAW,CAC5C,IAAI,iBAAiB,CAAC,MAAM,EAAE,EAAE,KAAK,EAAE,IAAI,EAAE,CAAC,CAC/C,CAAC;AACF,IAAA,MAAM,cAAc,GAClB,iBAAiB,CAA0B,WAAW,CAAC,CAAC;IAC1D,MAAM,CAAC,OAAO,EAAE,OAAO,CAAC,GAAG,cAAc,CAAC,GAAG,EAAE,CAAC;IAChD,OAAO;QACL,MAAM,EAAE,wBAAwB,CAAC,OAAO,EAAE,WAAW,EAAE,eAAe,CAAC;QACvE,QAAQ,EAAE,kBAAkB,CAAC,OAAO,EAAE,WAAW,EAAE,eAAe,CAAC;KACpE,CAAC;AACJ,CAAC;AAED,eAAe,kBAAkB,CAC/B,MAA+C,EAC/C,WAAwB,EACxB,eAAiC,EAAA;IAEjC,MAAM,YAAY,GAA8B,EAAE,CAAC;AACnD,IAAA,MAAM,MAAM,GAAG,MAAM,CAAC,SAAS,EAAE,CAAC;IAClC,OAAO,IAAI,EAAE;QACX,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QAC5C,IAAI,IAAI,EAAE;AACR,YAAA,IAAI,uBAAuB,GAAG,kBAAkB,CAAC,YAAY,CAAC,CAAC;YAC/D,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,gBAAA,uBAAuB,GAAGA,0BAAyC,CACjE,uBAA0D,CAC3D,CAAC;aACH;AACD,YAAA,OAAO,6BAA6B,CAClC,uBAAuB,EACvB,eAAe,CAChB,CAAC;SACH;AAED,QAAA,YAAY,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;KAC1B;AACH,CAAC;AAED,gBAAgB,wBAAwB,CACtC,MAA+C,EAC/C,WAAwB,EACxB,eAAiC,EAAA;AAEjC,IAAA,MAAM,MAAM,GAAG,MAAM,CAAC,SAAS,EAAE,CAAC;IAClC,OAAO,IAAI,EAAE;QACX,MAAM,EAAE,KAAK,EAAE,IAAI,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QAC5C,IAAI,IAAI,EAAE;YACR,MAAM;SACP;AAED,QAAA,IAAI,gBAAiD,CAAC;QACtD,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,YAAA,gBAAgB,GAAG,6BAA6B,CAC9CA,0BAAyC,CACvC,KAAwC,CACzC,EACD,eAAe,CAChB,CAAC;SACH;aAAM;AACL,YAAA,gBAAgB,GAAG,6BAA6B,CAAC,KAAK,EAAE,eAAe,CAAC,CAAC;SAC1E;QAED,MAAM,cAAc,GAAG,gBAAgB,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC;;AAExD,QAAA,IACE,CAAC,cAAc,EAAE,OAAO,EAAE,KAAK;YAC/B,CAAC,cAAc,EAAE,YAAY;YAC7B,CAAC,cAAc,EAAE,gBAAgB;AACjC,YAAA,CAAC,cAAc,EAAE,kBAAkB,EACnC;YACA,SAAS;SACV;AAED,QAAA,MAAM,gBAAgB,CAAC;KACxB;AACH,CAAC;AAED;;;;AAIG;AACG,SAAU,iBAAiB,CAC/B,WAAmC,EAAA;AAEnC,IAAA,MAAM,MAAM,GAAG,WAAW,CAAC,SAAS,EAAE,CAAC;AACvC,IAAA,MAAM,MAAM,GAAG,IAAI,cAAc,CAAI;AACnC,QAAA,KAAK,CAAC,UAAU,EAAA;YACd,IAAI,WAAW,GAAG,EAAE,CAAC;YACrB,OAAO,IAAI,EAAE,CAAC;AACd,YAAA,SAAS,IAAI,GAAA;AACX,gBAAA,OAAO,MAAM,CAAC,IAAI,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,EAAE,IAAI,EAAE,KAAI;oBAC5C,IAAI,IAAI,EAAE;AACR,wBAAA,IAAI,WAAW,CAAC,IAAI,EAAE,EAAE;AACtB,4BAAA,UAAU,CAAC,KAAK,CACd,IAAI,OAAO,CAAC,WAAW,CAAC,YAAY,EAAE,wBAAwB,CAAC,CAChE,CAAC;4BACF,OAAO;yBACR;wBACD,UAAU,CAAC,KAAK,EAAE,CAAC;wBACnB,OAAO;qBACR;oBAED,WAAW,IAAI,KAAK,CAAC;oBACrB,IAAI,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC;AAC9C,oBAAA,IAAI,cAAiB,CAAC;oBACtB,OAAO,KAAK,EAAE;AACZ,wBAAA,IAAI;4BACF,cAAc,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;yBACvC;wBAAC,OAAO,CAAC,EAAE;AACV,4BAAA,UAAU,CAAC,KAAK,CACd,IAAI,OAAO,CACT,WAAW,CAAC,YAAY,EACxB,CAAA,8BAAA,EAAiC,KAAK,CAAC,CAAC,CAAC,CAAE,CAAA,CAC5C,CACF,CAAC;4BACF,OAAO;yBACR;AACD,wBAAA,UAAU,CAAC,OAAO,CAAC,cAAc,CAAC,CAAC;AACnC,wBAAA,WAAW,GAAG,WAAW,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;AACrD,wBAAA,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC;qBAC3C;oBACD,OAAO,IAAI,EAAE,CAAC;AAChB,iBAAC,CAAC,CAAC;aACJ;SACF;AACF,KAAA,CAAC,CAAC;AACH,IAAA,OAAO,MAAM,CAAC;AAChB,CAAC;AAED;;;AAGG;AACG,SAAU,kBAAkB,CAChC,SAAoC,EAAA;IAEpC,MAAM,YAAY,GAAG,SAAS,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;AACrD,IAAA,MAAM,kBAAkB,GAA4B;QAClD,cAAc,EAAE,YAAY,EAAE,cAAc;KAC7C,CAAC;AACF,IAAA,KAAK,MAAM,QAAQ,IAAI,SAAS,EAAE;AAChC,QAAA,IAAI,QAAQ,CAAC,UAAU,EAAE;AACvB,YAAA,KAAK,MAAM,SAAS,IAAI,QAAQ,CAAC,UAAU,EAAE;;;AAG3C,gBAAA,MAAM,CAAC,GAAG,SAAS,CAAC,KAAK,IAAI,CAAC,CAAC;AAC/B,gBAAA,IAAI,CAAC,kBAAkB,CAAC,UAAU,EAAE;AAClC,oBAAA,kBAAkB,CAAC,UAAU,GAAG,EAAE,CAAC;iBACpC;gBACD,IAAI,CAAC,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE;AACrC,oBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,GAAG;wBACjC,KAAK,EAAE,SAAS,CAAC,KAAK;qBACK,CAAC;iBAC/B;;AAED,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,gBAAgB;oBAC/C,SAAS,CAAC,gBAAgB,CAAC;gBAC7B,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,YAAY,GAAG,SAAS,CAAC,YAAY,CAAC;AACvE,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,aAAa;oBAC5C,SAAS,CAAC,aAAa,CAAC;AAC1B,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,aAAa;oBAC5C,SAAS,CAAC,aAAa,CAAC;AAC1B,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,iBAAiB;oBAChD,SAAS,CAAC,iBAAiB,CAAC;;;;;AAM9B,gBAAA,MAAM,kBAAkB,GAAG,SAAS,CAAC,kBAA6B,CAAC;gBACnE,IACE,OAAO,kBAAkB,KAAK,QAAQ;AACtC,oBAAA,kBAAkB,KAAK,IAAI;oBAC3B,MAAM,CAAC,IAAI,CAAC,kBAAkB,CAAC,CAAC,MAAM,GAAG,CAAC,EAC1C;AACA,oBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,kBAAkB;AACjD,wBAAA,kBAAwC,CAAC;iBAC5C;AAED;;;AAGG;AACH,gBAAA,IAAI,SAAS,CAAC,OAAO,EAAE;;AAErB,oBAAA,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,KAAK,EAAE;wBAC5B,SAAS;qBACV;oBACD,IAAI,CAAC,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE;AAC7C,wBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,GAAG;AACzC,4BAAA,IAAI,EAAE,SAAS,CAAC,OAAO,CAAC,IAAI,IAAI,MAAM;AACtC,4BAAA,KAAK,EAAE,EAAE;yBACV,CAAC;qBACH;oBACD,KAAK,MAAM,IAAI,IAAI,SAAS,CAAC,OAAO,CAAC,KAAK,EAAE;AAC1C,wBAAA,MAAM,OAAO,GAAS,EAAE,GAAG,IAAI,EAAE,CAAC;;;;AAIlC,wBAAA,IAAI,IAAI,CAAC,IAAI,KAAK,EAAE,EAAE;4BACpB,SAAS;yBACV;wBACD,IAAI,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AACnC,4BAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CACjD,OAAe,CAChB,CAAC;yBACH;qBACF;iBACF;aACF;SACF;KACF;AACD,IAAA,OAAO,kBAAkB,CAAC;AAC5B;;ACzQA;;;;;;;;;;;;;;;AAeG;AAYH,MAAM,qBAAqB,GAAkB;;AAE3C,IAAA,WAAW,CAAC,WAAW;;AAEvB,IAAA,WAAW,CAAC,KAAK;;AAEjB,IAAA,WAAW,CAAC,eAAe;CAC5B,CAAC;AAOF;;;;;;;;;AASG;AACI,eAAe,iBAAiB,CACrC,OAA+B,EAC/B,aAAwC,EACxC,YAAqC,EACrC,WAAoC,EAAA;IAEpC,IAAI,CAAC,aAAa,EAAE;QAClB,OAAO;YACL,QAAQ,EAAE,MAAM,WAAW,EAAE;YAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;SAC1C,CAAC;KACH;AACD,IAAA,QAAS,aAAmC,CAAC,IAAI;QAC/C,KAAK,aAAa,CAAC,cAAc;YAC/B,IAAI,MAAM,aAAa,CAAC,WAAW,CAAC,OAAO,CAAC,EAAE;gBAC5C,OAAO;oBACL,QAAQ,EAAE,MAAM,YAAY,EAAE;oBAC9B,eAAe,EAAE,eAAe,CAAC,SAAS;iBAC3C,CAAC;aACH;YACD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,4EAA4E,CAC7E,CAAC;QACJ,KAAK,aAAa,CAAC,aAAa;YAC9B,OAAO;gBACL,QAAQ,EAAE,MAAM,WAAW,EAAE;gBAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;aAC1C,CAAC;QACJ,KAAK,aAAa,CAAC,eAAe;AAChC,YAAA,IAAI;gBACF,OAAO;oBACL,QAAQ,EAAE,MAAM,WAAW,EAAE;oBAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;iBAC1C,CAAC;aACH;YAAC,OAAO,CAAC,EAAE;AACV,gBAAA,IAAI,CAAC,YAAY,OAAO,IAAI,qBAAqB,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE;oBAClE,OAAO;wBACL,QAAQ,EAAE,MAAM,YAAY,EAAE;wBAC9B,eAAe,EAAE,eAAe,CAAC,SAAS;qBAC3C,CAAC;iBACH;AACD,gBAAA,MAAM,CAAC,CAAC;aACT;QACH,KAAK,aAAa,CAAC,gBAAgB;YACjC,IAAI,MAAM,aAAa,CAAC,WAAW,CAAC,OAAO,CAAC,EAAE;gBAC5C,OAAO;oBACL,QAAQ,EAAE,MAAM,YAAY,EAAE;oBAC9B,eAAe,EAAE,eAAe,CAAC,SAAS;iBAC3C,CAAC;aACH;YACD,OAAO;gBACL,QAAQ,EAAE,MAAM,WAAW,EAAE;gBAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;aAC1C,CAAC;AACJ,QAAA;AACE,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,6BAAA,EACG,aAAmC,CAAC,IACvC,CAAA,CAAE,CACH,CAAC;KACL;AACH;;AClHA;;;;;;;;;;;;;;;AAeG;AAkBH,eAAe,4BAA4B,CACzC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,cAA+B,EAAA;IAE/B,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,QAAA,MAAM,GAAGC,yBAAwC,CAAC,MAAM,CAAC,CAAC;KAC3D;IACD,OAAO,WAAW,CAChB,KAAK,EACL,IAAI,CAAC,uBAAuB,EAC5B,WAAW;AACX,iBAAa,IAAI,EACjB,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,EACtB,cAAc,CACf,CAAC;AACJ,CAAC;AAEM,eAAe,qBAAqB,CACzC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,aAA6B,EAC7B,cAA+B,EAAA;AAE/B,IAAA,MAAM,UAAU,GAAG,MAAM,iBAAiB,CACxC,MAAM,EACN,aAAa,EACb,MAAM,aAAc,CAAC,qBAAqB,CAAC,MAAM,CAAC,EAClD,MACE,4BAA4B,CAAC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,cAAc,CAAC,CAC3E,CAAC;IACF,OAAO,aAAa,CAAC,UAAU,CAAC,QAAQ,EAAE,WAAW,CAAC,CAAC;AACzD,CAAC;AAED,eAAe,sBAAsB,CACnC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,cAA+B,EAAA;IAE/B,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,QAAA,MAAM,GAAGA,yBAAwC,CAAC,MAAM,CAAC,CAAC;KAC3D;IACD,OAAO,WAAW,CAChB,KAAK,EACL,IAAI,CAAC,gBAAgB,EACrB,WAAW;AACX,iBAAa,KAAK,EAClB,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,EACtB,cAAc,CACf,CAAC;AACJ,CAAC;AAEM,eAAe,eAAe,CACnC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,aAA6B,EAC7B,cAA+B,EAAA;AAE/B,IAAA,MAAM,UAAU,GAAG,MAAM,iBAAiB,CACxC,MAAM,EACN,aAAa,EACb,MAAM,aAAc,CAAC,eAAe,CAAC,MAAM,CAAC,EAC5C,MAAM,sBAAsB,CAAC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,cAAc,CAAC,CACzE,CAAC;IACF,MAAM,uBAAuB,GAAG,MAAM,8BAA8B,CAClE,UAAU,CAAC,QAAQ,EACnB,WAAW,CACZ,CAAC;IACF,MAAM,gBAAgB,GAAG,6BAA6B,CACpD,uBAAuB,EACvB,UAAU,CAAC,eAAe,CAC3B,CAAC;IACF,OAAO;AACL,QAAA,QAAQ,EAAE,gBAAgB;KAC3B,CAAC;AACJ,CAAC;AAED,eAAe,8BAA8B,CAC3C,QAAkB,EAClB,WAAwB,EAAA;AAExB,IAAA,MAAM,YAAY,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;IAC3C,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,QAAA,OAAOD,0BAAyC,CAAC,YAAY,CAAC,CAAC;KAChE;SAAM;AACL,QAAA,OAAO,YAAY,CAAC;KACrB;AACH;;AC5HA;;;;;;;;;;;;;;;AAeG;AAMG,SAAU,uBAAuB,CACrC,KAA+B,EAAA;;AAG/B,IAAA,IAAI,KAAK,IAAI,IAAI,EAAE;AACjB,QAAA,OAAO,SAAS,CAAC;KAClB;AAAM,SAAA,IAAI,OAAO,KAAK,KAAK,QAAQ,EAAE;AACpC,QAAA,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,CAAC,EAAa,CAAC;KAChE;AAAM,SAAA,IAAK,KAAc,CAAC,IAAI,EAAE;QAC/B,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC,KAAa,CAAC,EAAE,CAAC;KACnD;AAAM,SAAA,IAAK,KAAiB,CAAC,KAAK,EAAE;AACnC,QAAA,IAAI,CAAE,KAAiB,CAAC,IAAI,EAAE;YAC5B,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAG,KAAiB,CAAC,KAAK,EAAE,CAAC;SAC5D;aAAM;AACL,YAAA,OAAO,KAAgB,CAAC;SACzB;KACF;AACH,CAAC;AAEK,SAAU,gBAAgB,CAC9B,OAAsC,EAAA;IAEtC,IAAI,QAAQ,GAAW,EAAE,CAAC;AAC1B,IAAA,IAAI,OAAO,OAAO,KAAK,QAAQ,EAAE;QAC/B,QAAQ,GAAG,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC;KAChC;SAAM;AACL,QAAA,KAAK,MAAM,YAAY,IAAI,OAAO,EAAE;AAClC,YAAA,IAAI,OAAO,YAAY,KAAK,QAAQ,EAAE;gBACpC,QAAQ,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;aACvC;iBAAM;AACL,gBAAA,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;aAC7B;SACF;KACF;AACD,IAAA,OAAO,8CAA8C,CAAC,QAAQ,CAAC,CAAC;AAClE,CAAC;AAED;;;;;;;AAOG;AACH,SAAS,8CAA8C,CACrD,KAAa,EAAA;IAEb,MAAM,WAAW,GAAY,EAAE,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,EAAE,EAAE,CAAC;IACzD,MAAM,eAAe,GAAY,EAAE,IAAI,EAAE,UAAU,EAAE,KAAK,EAAE,EAAE,EAAE,CAAC;IACjE,IAAI,cAAc,GAAG,KAAK,CAAC;IAC3B,IAAI,kBAAkB,GAAG,KAAK,CAAC;AAC/B,IAAA,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE;AACxB,QAAA,IAAI,kBAAkB,IAAI,IAAI,EAAE;AAC9B,YAAA,eAAe,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YACjC,kBAAkB,GAAG,IAAI,CAAC;SAC3B;aAAM;AACL,YAAA,WAAW,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YAC7B,cAAc,GAAG,IAAI,CAAC;SACvB;KACF;AAED,IAAA,IAAI,cAAc,IAAI,kBAAkB,EAAE;QACxC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,4HAA4H,CAC7H,CAAC;KACH;AAED,IAAA,IAAI,CAAC,cAAc,IAAI,CAAC,kBAAkB,EAAE;QAC1C,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,kDAAkD,CACnD,CAAC;KACH;IAED,IAAI,cAAc,EAAE;AAClB,QAAA,OAAO,WAAW,CAAC;KACpB;AAED,IAAA,OAAO,eAAe,CAAC;AACzB,CAAC;AAEK,SAAU,0BAA0B,CACxC,MAA8D,EAAA;AAE9D,IAAA,IAAI,gBAAwC,CAAC;AAC7C,IAAA,IAAK,MAAiC,CAAC,QAAQ,EAAE;QAC/C,gBAAgB,GAAG,MAAgC,CAAC;KACrD;SAAM;;AAEL,QAAA,MAAM,OAAO,GAAG,gBAAgB,CAAC,MAAuC,CAAC,CAAC;QAC1E,gBAAgB,GAAG,EAAE,QAAQ,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC;KAC5C;AACD,IAAA,IAAK,MAAiC,CAAC,iBAAiB,EAAE;QACxD,gBAAgB,CAAC,iBAAiB,GAAG,uBAAuB,CACzD,MAAiC,CAAC,iBAAiB,CACrD,CAAC;KACH;AACD,IAAA,OAAO,gBAAgB,CAAC;AAC1B,CAAC;AAED;;;;;AAKG;AACG,SAAU,wBAAwB,CACtC,MAAc,EACd,EACE,MAAM,EACN,WAAW,EACX,YAAY,EACZ,cAAc,GAAG,CAAC,EAClB,cAAc,EACd,WAAW,EACX,iBAAiB,EACjB,iBAAiB,EACM,EAAA;;AAGzB,IAAA,MAAM,IAAI,GAAuB;AAC/B,QAAA,SAAS,EAAE;AACT,YAAA;gBACE,MAAM;AACP,aAAA;AACF,SAAA;AACD,QAAA,UAAU,EAAE;AACV,YAAA,UAAU,EAAE,MAAM;YAClB,cAAc;AACd,YAAA,WAAW,EAAE,cAAc;YAC3B,WAAW;AACX,YAAA,aAAa,EAAE,WAAW;YAC1B,YAAY;YACZ,iBAAiB;AACjB,YAAA,gBAAgB,EAAE,iBAAiB;AACnC,YAAA,gBAAgB,EAAE,IAAI;AACtB,YAAA,uBAAuB,EAAE,IAAI;AAC9B,SAAA;KACF,CAAC;AACF,IAAA,OAAO,IAAI,CAAC;AACd;;ACnKA;;;;;;;;;;;;;;;AAeG;AAKH;AAEA,MAAM,iBAAiB,GAAsB;IAC3C,MAAM;IACN,YAAY;IACZ,cAAc;IACd,kBAAkB;IAClB,SAAS;IACT,kBAAkB;CACnB,CAAC;AAEF,MAAM,oBAAoB,GAAyC;AACjE,IAAA,IAAI,EAAE,CAAC,MAAM,EAAE,YAAY,CAAC;IAC5B,QAAQ,EAAE,CAAC,kBAAkB,CAAC;IAC9B,KAAK,EAAE,CAAC,MAAM,EAAE,cAAc,EAAE,SAAS,EAAE,kBAAkB,CAAC;;IAE9D,MAAM,EAAE,CAAC,MAAM,CAAC;CACjB,CAAC;AAEF,MAAM,4BAA4B,GAA8B;IAC9D,IAAI,EAAE,CAAC,OAAO,CAAC;IACf,QAAQ,EAAE,CAAC,OAAO,CAAC;AACnB,IAAA,KAAK,EAAE,CAAC,MAAM,EAAE,UAAU,CAAC;;AAE3B,IAAA,MAAM,EAAE,EAAE;CACX,CAAC;AAEI,SAAU,mBAAmB,CAAC,OAAkB,EAAA;IACpD,IAAI,WAAW,GAAmB,IAAI,CAAC;AACvC,IAAA,KAAK,MAAM,WAAW,IAAI,OAAO,EAAE;AACjC,QAAA,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,WAAW,CAAC;AACpC,QAAA,IAAI,CAAC,WAAW,IAAI,IAAI,KAAK,MAAM,EAAE;YACnC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAAiD,8CAAA,EAAA,IAAI,CAAE,CAAA,CACxD,CAAC;SACH;QACD,IAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,IAAI,CAAC,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA4C,yCAAA,EAAA,IAAI,CAAyB,sBAAA,EAAA,IAAI,CAAC,SAAS,CACrF,cAAc,CACf,CAAA,CAAE,CACJ,CAAC;SACH;QAED,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE;YACzB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA6D,2DAAA,CAAA,CAC9D,CAAC;SACH;AAED,QAAA,IAAI,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE;YACtB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA4C,0CAAA,CAAA,CAC7C,CAAC;SACH;AAED,QAAA,MAAM,WAAW,GAA+B;AAC9C,YAAA,IAAI,EAAE,CAAC;AACP,YAAA,UAAU,EAAE,CAAC;AACb,YAAA,YAAY,EAAE,CAAC;AACf,YAAA,gBAAgB,EAAE,CAAC;AACnB,YAAA,OAAO,EAAE,CAAC;AACV,YAAA,gBAAgB,EAAE,CAAC;AACnB,YAAA,cAAc,EAAE,CAAC;AACjB,YAAA,mBAAmB,EAAE,CAAC;SACvB,CAAC;AAEF,QAAA,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE;AACxB,YAAA,KAAK,MAAM,GAAG,IAAI,iBAAiB,EAAE;AACnC,gBAAA,IAAI,GAAG,IAAI,IAAI,EAAE;AACf,oBAAA,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;iBACvB;aACF;SACF;AACD,QAAA,MAAM,UAAU,GAAG,oBAAoB,CAAC,IAAI,CAAC,CAAC;AAC9C,QAAA,KAAK,MAAM,GAAG,IAAI,iBAAiB,EAAE;AACnC,YAAA,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE;AACrD,gBAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAAA,mBAAA,EAAsB,IAAI,CAAA,iBAAA,EAAoB,GAAG,CAAA,MAAA,CAAQ,CAC1D,CAAC;aACH;SACF;QAED,IAAI,WAAW,EAAE;AACf,YAAA,MAAM,yBAAyB,GAAG,4BAA4B,CAAC,IAAI,CAAC,CAAC;YACrE,IAAI,CAAC,yBAAyB,CAAC,QAAQ,CAAC,WAAW,CAAC,IAAI,CAAC,EAAE;gBACzD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAAsB,mBAAA,EAAA,IAAI,CACxB,gBAAA,EAAA,WAAW,CAAC,IACd,CAAA,yBAAA,EAA4B,IAAI,CAAC,SAAS,CACxC,4BAA4B,CAC7B,CAAE,CAAA,CACJ,CAAC;aACH;SACF;QACD,WAAW,GAAG,WAAW,CAAC;KAC3B;AACH;;AC3HA;;;;;;;;;;;;;;;AAeG;AAmBH;;AAEG;AACH,MAAM,YAAY,GAAG,cAAc,CAAC;AAEpC;;;;;AAKG;MACU,WAAW,CAAA;IAKtB,WACE,CAAA,WAAwB,EACjB,KAAa,EACZ,aAA6B,EAC9B,MAAwB,EACxB,cAA+B,EAAA;QAH/B,IAAK,CAAA,KAAA,GAAL,KAAK,CAAQ;QACZ,IAAa,CAAA,aAAA,GAAb,aAAa,CAAgB;QAC9B,IAAM,CAAA,MAAA,GAAN,MAAM,CAAkB;QACxB,IAAc,CAAA,cAAA,GAAd,cAAc,CAAiB;QARhC,IAAQ,CAAA,QAAA,GAAc,EAAE,CAAC;AACzB,QAAA,IAAA,CAAA,YAAY,GAAkB,OAAO,CAAC,OAAO,EAAE,CAAC;AAStD,QAAA,IAAI,CAAC,YAAY,GAAG,WAAW,CAAC;AAChC,QAAA,IAAI,MAAM,EAAE,OAAO,EAAE;AACnB,YAAA,mBAAmB,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;AACpC,YAAA,IAAI,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,CAAC;SAChC;KACF;AAED;;;;AAIG;AACH,IAAA,MAAM,UAAU,GAAA;QACd,MAAM,IAAI,CAAC,YAAY,CAAC;QACxB,OAAO,IAAI,CAAC,QAAQ,CAAC;KACtB;AAED;;;AAGG;IACH,MAAM,WAAW,CACf,OAAsC,EAAA;QAEtC,MAAM,IAAI,CAAC,YAAY,CAAC;AACxB,QAAA,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;AAC7C,QAAA,MAAM,sBAAsB,GAA2B;AACrD,YAAA,cAAc,EAAE,IAAI,CAAC,MAAM,EAAE,cAAc;AAC3C,YAAA,gBAAgB,EAAE,IAAI,CAAC,MAAM,EAAE,gBAAgB;AAC/C,YAAA,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,KAAK;AACzB,YAAA,UAAU,EAAE,IAAI,CAAC,MAAM,EAAE,UAAU;AACnC,YAAA,iBAAiB,EAAE,IAAI,CAAC,MAAM,EAAE,iBAAiB;YACjD,QAAQ,EAAE,CAAC,GAAG,IAAI,CAAC,QAAQ,EAAE,UAAU,CAAC;SACzC,CAAC;QACF,IAAI,WAAW,GAAG,EAA2B,CAAC;;AAE9C,QAAA,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY;aAClC,IAAI,CAAC,MACJ,eAAe,CACb,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,sBAAsB,EACtB,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CACF;aACA,IAAI,CAAC,MAAM,IAAG;AACb,YAAA,IACE,MAAM,CAAC,QAAQ,CAAC,UAAU;gBAC1B,MAAM,CAAC,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EACrC;AACA,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;AAC/B,gBAAA,MAAM,eAAe,GAAY;AAC/B,oBAAA,KAAK,EAAE,MAAM,CAAC,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,IAAI,EAAE;;AAE1D,oBAAA,IAAI,EAAE,MAAM,CAAC,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,IAAI,OAAO;iBAC9D,CAAC;AACF,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;aACrC;iBAAM;gBACL,MAAM,iBAAiB,GAAG,uBAAuB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC;gBACnE,IAAI,iBAAiB,EAAE;AACrB,oBAAA,MAAM,CAAC,IAAI,CACT,mCAAmC,iBAAiB,CAAA,sCAAA,CAAwC,CAC7F,CAAC;iBACH;aACF;YACD,WAAW,GAAG,MAAM,CAAC;AACvB,SAAC,CAAC,CAAC;QACL,MAAM,IAAI,CAAC,YAAY,CAAC;AACxB,QAAA,OAAO,WAAW,CAAC;KACpB;AAED;;;;AAIG;IACH,MAAM,iBAAiB,CACrB,OAAsC,EAAA;QAEtC,MAAM,IAAI,CAAC,YAAY,CAAC;AACxB,QAAA,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;AAC7C,QAAA,MAAM,sBAAsB,GAA2B;AACrD,YAAA,cAAc,EAAE,IAAI,CAAC,MAAM,EAAE,cAAc;AAC3C,YAAA,gBAAgB,EAAE,IAAI,CAAC,MAAM,EAAE,gBAAgB;AAC/C,YAAA,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,KAAK;AACzB,YAAA,UAAU,EAAE,IAAI,CAAC,MAAM,EAAE,UAAU;AACnC,YAAA,iBAAiB,EAAE,IAAI,CAAC,MAAM,EAAE,iBAAiB;YACjD,QAAQ,EAAE,CAAC,GAAG,IAAI,CAAC,QAAQ,EAAE,UAAU,CAAC;SACzC,CAAC;QACF,MAAM,aAAa,GAAG,qBAAqB,CACzC,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,sBAAsB,EACtB,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CAAC;;AAGF,QAAA,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY;AAClC,aAAA,IAAI,CAAC,MAAM,aAAa,CAAC;;;aAGzB,KAAK,CAAC,QAAQ,IAAG;AAChB,YAAA,MAAM,IAAI,KAAK,CAAC,YAAY,CAAC,CAAC;AAChC,SAAC,CAAC;aACD,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,QAAQ,CAAC;aAC3C,IAAI,CAAC,QAAQ,IAAG;AACf,YAAA,IAAI,QAAQ,CAAC,UAAU,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACzD,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;AAC/B,gBAAA,MAAM,eAAe,GAAG,EAAE,GAAG,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,CAAC;;AAE9D,gBAAA,IAAI,CAAC,eAAe,CAAC,IAAI,EAAE;AACzB,oBAAA,eAAe,CAAC,IAAI,GAAG,OAAO,CAAC;iBAChC;AACD,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;aACrC;iBAAM;AACL,gBAAA,MAAM,iBAAiB,GAAG,uBAAuB,CAAC,QAAQ,CAAC,CAAC;gBAC5D,IAAI,iBAAiB,EAAE;AACrB,oBAAA,MAAM,CAAC,IAAI,CACT,yCAAyC,iBAAiB,CAAA,sCAAA,CAAwC,CACnG,CAAC;iBACH;aACF;AACH,SAAC,CAAC;aACD,KAAK,CAAC,CAAC,IAAG;;;;AAIT,YAAA,IAAI,CAAC,CAAC,OAAO,KAAK,YAAY,EAAE;;;AAG9B,gBAAA,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;aACjB;AACH,SAAC,CAAC,CAAC;AACL,QAAA,OAAO,aAAa,CAAC;KACtB;AACF;;AClMD;;;;;;;;;;;;;;;AAeG;AAiBI,eAAe,kBAAkB,CACtC,WAAwB,EACxB,KAAa,EACb,MAA0B,EAC1B,cAA+B,EAAA;IAE/B,IAAI,IAAI,GAAW,EAAE,CAAC;IACtB,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;QAC7D,MAAM,YAAY,GAAGE,qBAAoC,CAAC,MAAM,EAAE,KAAK,CAAC,CAAC;AACzE,QAAA,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,YAAY,CAAC,CAAC;KACrC;SAAM;AACL,QAAA,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC;KAC/B;AACD,IAAA,MAAM,QAAQ,GAAG,MAAM,WAAW,CAChC,KAAK,EACL,IAAI,CAAC,YAAY,EACjB,WAAW,EACX,KAAK,EACL,IAAI,EACJ,cAAc,CACf,CAAC;AACF,IAAA,OAAO,QAAQ,CAAC,IAAI,EAAE,CAAC;AACzB,CAAC;AAEM,eAAe,WAAW,CAC/B,WAAwB,EACxB,KAAa,EACb,MAA0B,EAC1B,aAA6B,EAC7B,cAA+B,EAAA;IAE/B,IACG,aAAmC,EAAE,IAAI,KAAK,aAAa,CAAC,cAAc,EAC3E;QACA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,sDAAsD,CACvD,CAAC;KACH;IACD,OAAO,kBAAkB,CAAC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,cAAc,CAAC,CAAC;AACxE;;ACxEA;;;;;;;;;;;;;;;AAeG;AAgCH;;;AAGG;AACG,MAAO,eAAgB,SAAQ,OAAO,CAAA;AAQ1C,IAAA,WAAA,CACE,EAAM,EACN,WAAwB,EACxB,cAA+B,EACvB,aAA6B,EAAA;AAErC,QAAA,KAAK,CAAC,EAAE,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;QAFrB,IAAa,CAAA,aAAA,GAAb,aAAa,CAAgB;QAGrC,IAAI,CAAC,gBAAgB,GAAG,WAAW,CAAC,gBAAgB,IAAI,EAAE,CAAC;QAC3D,IAAI,CAAC,cAAc,GAAG,WAAW,CAAC,cAAc,IAAI,EAAE,CAAC;AACvD,QAAA,IAAI,CAAC,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC;AAC/B,QAAA,IAAI,CAAC,UAAU,GAAG,WAAW,CAAC,UAAU,CAAC;QACzC,IAAI,CAAC,iBAAiB,GAAG,uBAAuB,CAC9C,WAAW,CAAC,iBAAiB,CAC9B,CAAC;AACF,QAAA,IAAI,CAAC,cAAc,GAAG,cAAc,IAAI,EAAE,CAAC;KAC5C;AAED;;;AAGG;IACH,MAAM,eAAe,CACnB,OAA+D,EAAA;AAE/D,QAAA,MAAM,eAAe,GAAG,0BAA0B,CAAC,OAAO,CAAC,CAAC;QAC5D,OAAO,eAAe,CACpB,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV;YACE,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,cAAc,EAAE,IAAI,CAAC,cAAc;YACnC,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU,EAAE,IAAI,CAAC,UAAU;YAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;AACzC,YAAA,GAAG,eAAe;SACnB,EACD,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CAAC;KACH;AAED;;;;;AAKG;IACH,MAAM,qBAAqB,CACzB,OAA+D,EAAA;AAE/D,QAAA,MAAM,eAAe,GAAG,0BAA0B,CAAC,OAAO,CAAC,CAAC;QAC5D,OAAO,qBAAqB,CAC1B,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV;YACE,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,cAAc,EAAE,IAAI,CAAC,cAAc;YACnC,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU,EAAE,IAAI,CAAC,UAAU;YAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;AACzC,YAAA,GAAG,eAAe;SACnB,EACD,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CAAC;KACH;AAED;;;AAGG;AACH,IAAA,SAAS,CAAC,eAAiC,EAAA;AACzC,QAAA,OAAO,IAAI,WAAW,CACpB,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,IAAI,CAAC,aAAa,EAClB;YACE,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU,EAAE,IAAI,CAAC,UAAU;YAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;YACzC,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,cAAc,EAAE,IAAI,CAAC,cAAc;AACnC;;;;AAIG;AACH,YAAA,GAAG,eAAe;AACnB,SAAA,EACD,IAAI,CAAC,cAAc,CACpB,CAAC;KACH;AAED;;AAEG;IACH,MAAM,WAAW,CACf,OAA2D,EAAA;AAE3D,QAAA,MAAM,eAAe,GAAG,0BAA0B,CAAC,OAAO,CAAC,CAAC;AAC5D,QAAA,OAAO,WAAW,CAChB,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,eAAe,EACf,IAAI,CAAC,aAAa,CACnB,CAAC;KACH;AACF;;ACtKD;;;;;;;;;;;;;;;AAeG;AAsBH;;;;;;AAMG;MACU,WAAW,CAAA;AActB;;AAEG;IACH,WACU,CAAA,gBAAkC,EAClC,cAAuC,EAAA;QADvC,IAAgB,CAAA,gBAAA,GAAhB,gBAAgB,CAAkB;QAClC,IAAc,CAAA,cAAA,GAAd,cAAc,CAAyB;AAlBjD;;;;AAIG;QACH,IAAQ,CAAA,QAAA,GAAG,KAAK,CAAC;AACjB;;;;AAIG;QACH,IAAc,CAAA,cAAA,GAAG,KAAK,CAAC;KAQnB;AAEJ;;;;;;;;AAQG;AACH,IAAA,MAAM,IAAI,CACR,OAAsC,EACtC,YAAY,GAAG,IAAI,EAAA;AAEnB,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;AAE7C,QAAA,MAAM,OAAO,GAAuB;AAClC,YAAA,aAAa,EAAE;gBACb,KAAK,EAAE,CAAC,UAAU,CAAC;gBACnB,YAAY;AACb,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;;;;;AAYG;IACH,MAAM,gBAAgB,CAAC,IAAY,EAAA;AACjC,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA6B;AACxC,YAAA,aAAa,EAAE;gBACb,IAAI;AACL,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;;;;;;;;;;AAiBG;IACH,MAAM,iBAAiB,CAAC,IAA2B,EAAA;AACjD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA6B;AACxC,YAAA,aAAa,EAAE;AACb,gBAAA,KAAK,EAAE,IAAI;AACZ,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;;;;;;;;;AAgBG;IACH,MAAM,iBAAiB,CAAC,IAA2B,EAAA;AACjD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA6B;AACxC,YAAA,aAAa,EAAE;AACb,gBAAA,KAAK,EAAE,IAAI;AACZ,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;AAOG;IACH,MAAM,qBAAqB,CACzB,iBAAqC,EAAA;AAErC,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA4B;AACvC,YAAA,YAAY,EAAE;gBACZ,iBAAiB;AAClB,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;AAQG;IACH,OAAO,OAAO,GAAA;AAGZ,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,kFAAkF,CACnF,CAAC;SACH;QACD,WAAW,MAAM,OAAO,IAAI,IAAI,CAAC,cAAc,EAAE;AAC/C,YAAA,IAAI,OAAO,IAAI,OAAO,OAAO,KAAK,QAAQ,EAAE;AAC1C,gBAAA,IAAI,gBAAgB,CAAC,cAAc,IAAI,OAAO,EAAE;oBAC9C,MAAM;AACJ,wBAAA,IAAI,EAAE,eAAe;AACrB,wBAAA,GAAI,OAA8D;6BAC/D,aAAa;qBACI,CAAC;iBACxB;AAAM,qBAAA,IAAI,gBAAgB,CAAC,SAAS,IAAI,OAAO,EAAE;oBAChD,MAAM;AACJ,wBAAA,IAAI,EAAE,UAAU;AAChB,wBAAA,GAAI,OAA0D;6BAC3D,QAAQ;qBACU,CAAC;iBACzB;AAAM,qBAAA,IAAI,gBAAgB,CAAC,sBAAsB,IAAI,OAAO,EAAE;oBAC7D,MAAM;AACJ,wBAAA,IAAI,EAAE,sBAAsB;wBAC5B,GACE,OAMD,CAAC,oBAAoB;qBACW,CAAC;iBACrC;qBAAM;AACL,oBAAA,MAAM,CAAC,IAAI,CACT,CAAA,kDAAA,EAAqD,IAAI,CAAC,SAAS,CACjE,OAAO,CACR,CAAE,CAAA,CACJ,CAAC;iBACH;aACF;iBAAM;AACL,gBAAA,MAAM,CAAC,IAAI,CACT,CAAA,6CAAA,EAAgD,IAAI,CAAC,SAAS,CAC5D,OAAO,CACR,CAAE,CAAA,CACJ,CAAC;aACH;SACF;KACF;AAED;;;;;AAKG;AACH,IAAA,MAAM,KAAK,GAAA;AACT,QAAA,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;AAClB,YAAA,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC;YACrB,MAAM,IAAI,CAAC,gBAAgB,CAAC,KAAK,CAAC,IAAI,EAAE,wBAAwB,CAAC,CAAC;SACnE;KACF;AAED;;;;;;;;;AASG;IACH,MAAM,eAAe,CAAC,WAAoC,EAAA;AACxD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;;;AAID,QAAA,WAAW,CAAC,OAAO,CAAC,UAAU,IAAG;AAC/B,YAAA,MAAM,OAAO,GAA6B;AACxC,gBAAA,aAAa,EAAE,EAAE,WAAW,EAAE,CAAC,UAAU,CAAC,EAAE;aAC7C,CAAC;AACF,YAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;AACtD,SAAC,CAAC,CAAC;KACJ;AAED;;;;;;;;;AASG;IACH,MAAM,eAAe,CACnB,gBAAuD,EAAA;AAEvD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,MAAM,GAAG,gBAAgB,CAAC,SAAS,EAAE,CAAC;QAC5C,OAAO,IAAI,EAAE;AACX,YAAA,IAAI;gBACF,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;gBAE5C,IAAI,IAAI,EAAE;oBACR,MAAM;iBACP;qBAAM,IAAI,CAAC,KAAK,EAAE;AACjB,oBAAA,MAAM,IAAI,KAAK,CAAC,kDAAkD,CAAC,CAAC;iBACrE;gBAED,MAAM,IAAI,CAAC,eAAe,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;aACrC;YAAC,OAAO,CAAC,EAAE;;AAEV,gBAAA,MAAM,OAAO,GACX,CAAC,YAAY,KAAK,GAAG,CAAC,CAAC,OAAO,GAAG,gCAAgC,CAAC;gBACpE,MAAM,IAAI,OAAO,CAAC,WAAW,CAAC,aAAa,EAAE,OAAO,CAAC,CAAC;aACvD;SACF;KACF;AACF;;ACzWD;;;;;;;;;;;;;;;AAeG;AAoBH;;;;;;;AAOG;AACG,MAAO,mBAAoB,SAAQ,OAAO,CAAA;AAM9C;;AAEG;IACH,WACE,CAAA,EAAM,EACN,WAA4B;AAC5B;;AAEG;IACK,iBAAmC,EAAA;AAE3C,QAAA,KAAK,CAAC,EAAE,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;QAFrB,IAAiB,CAAA,iBAAA,GAAjB,iBAAiB,CAAkB;QAG3C,IAAI,CAAC,gBAAgB,GAAG,WAAW,CAAC,gBAAgB,IAAI,EAAE,CAAC;AAC3D,QAAA,IAAI,CAAC,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC;AAC/B,QAAA,IAAI,CAAC,UAAU,GAAG,WAAW,CAAC,UAAU,CAAC;QACzC,IAAI,CAAC,iBAAiB,GAAG,uBAAuB,CAC9C,WAAW,CAAC,iBAAiB,CAC9B,CAAC;KACH;AAED;;;;;;;AAOG;AACH,IAAA,MAAM,OAAO,GAAA;QACX,MAAM,GAAG,GAAG,IAAI,YAAY,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAChD,MAAM,IAAI,CAAC,iBAAiB,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,EAAE,CAAC,CAAC;AAErD,QAAA,IAAI,aAAqB,CAAC;AAC1B,QAAA,IAAI,IAAI,CAAC,YAAY,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AACnE,YAAA,aAAa,GAAG,CAAA,SAAA,EAAY,IAAI,CAAC,YAAY,CAAC,OAAO,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,CAAA,CAAE,CAAC;SACvE;aAAM;AACL,YAAA,aAAa,GAAG,CAAY,SAAA,EAAA,IAAI,CAAC,YAAY,CAAC,OAAO,CAAc,WAAA,EAAA,IAAI,CAAC,YAAY,CAAC,QAAQ,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,EAAE,CAAC;SAC/G;;;AAID,QAAA,MAAM,EACJ,uBAAuB,EACvB,wBAAwB,EACxB,GAAG,gBAAgB,EACpB,GAAG,IAAI,CAAC,gBAAgB,CAAC;AAE1B,QAAA,MAAM,YAAY,GAAqB;AACrC,YAAA,KAAK,EAAE;AACL,gBAAA,KAAK,EAAE,aAAa;gBACpB,gBAAgB;gBAChB,KAAK,EAAE,IAAI,CAAC,KAAK;gBACjB,UAAU,EAAE,IAAI,CAAC,UAAU;gBAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;gBACzC,uBAAuB;gBACvB,wBAAwB;AACzB,aAAA;SACF,CAAC;AAEF,QAAA,IAAI;;YAEF,MAAM,cAAc,GAAG,IAAI,CAAC,iBAAiB,CAAC,MAAM,EAAE,CAAC;AACvD,YAAA,IAAI,CAAC,iBAAiB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,YAAY,CAAC,CAAC,CAAC;;YAG1D,MAAM,YAAY,GAAG,CAAC,MAAM,cAAc,CAAC,IAAI,EAAE,EAAE,KAAK,CAAC;AACzD,YAAA,IACE,CAAC,YAAY;AACb,gBAAA,EAAE,OAAO,YAAY,KAAK,QAAQ,CAAC;AACnC,gBAAA,EAAE,eAAe,IAAI,YAAY,CAAC,EAClC;gBACA,MAAM,IAAI,CAAC,iBAAiB,CAAC,KAAK,CAAC,IAAI,EAAE,mBAAmB,CAAC,CAAC;gBAC9D,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,8FAA8F,CAC/F,CAAC;aACH;YAED,OAAO,IAAI,WAAW,CAAC,IAAI,CAAC,iBAAiB,EAAE,cAAc,CAAC,CAAC;SAChE;QAAC,OAAO,CAAC,EAAE;;AAEV,YAAA,MAAM,IAAI,CAAC,iBAAiB,CAAC,KAAK,EAAE,CAAC;AACrC,YAAA,MAAM,CAAC,CAAC;SACT;KACF;AACF;;ACtID;;;;;;;;;;;;;;;AAeG;AAiBH;;;;;;;;;;;;;;;;;;;;;AAqBG;AACG,MAAO,WAAY,SAAQ,OAAO,CAAA;AAUtC;;;;;;;;;AASG;AACH,IAAA,WAAA,CACE,EAAM,EACN,WAA8B,EACvB,cAA+B,EAAA;QAEtC,MAAM,EAAE,KAAK,EAAE,gBAAgB,EAAE,cAAc,EAAE,GAAG,WAAW,CAAC;AAChE,QAAA,KAAK,CAAC,EAAE,EAAE,KAAK,CAAC,CAAC;QAHV,IAAc,CAAA,cAAA,GAAd,cAAc,CAAiB;AAItC,QAAA,IAAI,CAAC,gBAAgB,GAAG,gBAAgB,CAAC;AACzC,QAAA,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;KACtC;AAED;;;;;;;;;;;;;;;;;AAiBG;IACH,MAAM,cAAc,CAClB,MAAc,EAAA;AAEd,QAAA,MAAM,IAAI,GAAG,wBAAwB,CAAC,MAAM,EAAE;YAC5C,GAAG,IAAI,CAAC,gBAAgB;YACxB,GAAG,IAAI,CAAC,cAAc;AACvB,SAAA,CAAC,CAAC;AACH,QAAA,MAAM,QAAQ,GAAG,MAAM,WAAW,CAChC,IAAI,CAAC,KAAK,EACV,IAAI,CAAC,OAAO,EACZ,IAAI,CAAC,YAAY;AACjB,qBAAa,KAAK,EAClB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,EACpB,IAAI,CAAC,cAAc,CACpB,CAAC;AACF,QAAA,OAAO,qBAAqB,CAAoB,QAAQ,CAAC,CAAC;KAC3D;AAED;;;;;;;;;;;;;;;;;;AAkBG;AACH,IAAA,MAAM,iBAAiB,CACrB,MAAc,EACd,MAAc,EAAA;AAEd,QAAA,MAAM,IAAI,GAAG,wBAAwB,CAAC,MAAM,EAAE;YAC5C,MAAM;YACN,GAAG,IAAI,CAAC,gBAAgB;YACxB,GAAG,IAAI,CAAC,cAAc;AACvB,SAAA,CAAC,CAAC;AACH,QAAA,MAAM,QAAQ,GAAG,MAAM,WAAW,CAChC,IAAI,CAAC,KAAK,EACV,IAAI,CAAC,OAAO,EACZ,IAAI,CAAC,YAAY;AACjB,qBAAa,KAAK,EAClB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,EACpB,IAAI,CAAC,cAAc,CACpB,CAAC;AACF,QAAA,OAAO,qBAAqB,CAAiB,QAAQ,CAAC,CAAC;KACxD;AACF;;AC/JD;;;;;;;;;;;;;;;AAeG;AAiDH;;;;AAIG;MACU,oBAAoB,CAAA;AAG/B,IAAA,WAAA,GAAA;AACE,QAAA,IAAI,OAAO,SAAS,KAAK,WAAW,EAAE;AACpC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,0DAA0D;gBACxD,+DAA+D;AAC/D,gBAAA,6EAA6E,CAChF,CAAC;SACH;KACF;AAED,IAAA,OAAO,CAAC,GAAW,EAAA;QACjB,OAAO,IAAI,OAAO,CAAC,CAAC,OAAO,EAAE,MAAM,KAAI;YACrC,IAAI,CAAC,EAAE,GAAG,IAAI,SAAS,CAAC,GAAG,CAAC,CAAC;YAC7B,IAAI,CAAC,EAAE,CAAC,UAAU,GAAG,MAAM,CAAC;AAC5B,YAAA,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,MAAM,EAAE,MAAM,OAAO,EAAE,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC,CAAC;AAClE,YAAA,IAAI,CAAC,EAAE,CAAC,gBAAgB,CACtB,OAAO,EACP,MACE,MAAM,CACJ,IAAI,OAAO,CACT,WAAW,CAAC,WAAW,EACvB,CAAA,+BAAA,CAAiC,CAClC,CACF,EACH,EAAE,IAAI,EAAE,IAAI,EAAE,CACf,CAAC;YACF,IAAI,CAAC,EAAG,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,UAAsB,KAAI;AAC5D,gBAAA,IAAI,UAAU,CAAC,MAAM,EAAE;oBACrB,MAAM,CAAC,IAAI,CACT,CAAA,gDAAA,EAAmD,UAAU,CAAC,MAAM,CAAG,CAAA,CAAA,CACxE,CAAC;iBACH;AACH,aAAC,CAAC,CAAC;AACL,SAAC,CAAC,CAAC;KACJ;AAED,IAAA,IAAI,CAAC,IAA0B,EAAA;AAC7B,QAAA,IAAI,CAAC,IAAI,CAAC,EAAE,IAAI,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,IAAI,EAAE;YACrD,MAAM,IAAI,OAAO,CAAC,WAAW,CAAC,aAAa,EAAE,wBAAwB,CAAC,CAAC;SACxE;AACD,QAAA,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;KACpB;IAED,OAAO,MAAM,GAAA;AACX,QAAA,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE;YACZ,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,6BAA6B,CAC9B,CAAC;SACH;QAED,MAAM,YAAY,GAAc,EAAE,CAAC;QACnC,MAAM,UAAU,GAAY,EAAE,CAAC;QAC/B,IAAI,cAAc,GAAwB,IAAI,CAAC;QAC/C,IAAI,QAAQ,GAAG,KAAK,CAAC;AAErB,QAAA,MAAM,eAAe,GAAG,OAAO,KAAmB,KAAmB;AACnE,YAAA,IAAI,IAAY,CAAC;AACjB,YAAA,IAAI,KAAK,CAAC,IAAI,YAAY,IAAI,EAAE;gBAC9B,IAAI,GAAG,MAAM,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC;aAChC;AAAM,iBAAA,IAAI,OAAO,KAAK,CAAC,IAAI,KAAK,QAAQ,EAAE;AACzC,gBAAA,IAAI,GAAG,KAAK,CAAC,IAAI,CAAC;aACnB;iBAAM;AACL,gBAAA,UAAU,CAAC,IAAI,CACb,IAAI,OAAO,CACT,WAAW,CAAC,YAAY,EACxB,CAAA,kFAAA,EAAqF,OAAO,KAAK,CAAC,IAAI,CAAG,CAAA,CAAA,CAC1G,CACF,CAAC;gBACF,IAAI,cAAc,EAAE;AAClB,oBAAA,cAAc,EAAE,CAAC;oBACjB,cAAc,GAAG,IAAI,CAAC;iBACvB;gBACD,OAAO;aACR;AAED,YAAA,IAAI;gBACF,MAAM,GAAG,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAY,CAAC;AACxC,gBAAA,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;aACxB;YAAC,OAAO,CAAC,EAAE;gBACV,MAAM,GAAG,GAAG,CAAU,CAAC;AACvB,gBAAA,UAAU,CAAC,IAAI,CACb,IAAI,OAAO,CACT,WAAW,CAAC,YAAY,EACxB,4CAA4C,GAAG,CAAC,OAAO,CAAE,CAAA,CAC1D,CACF,CAAC;aACH;YAED,IAAI,cAAc,EAAE;AAClB,gBAAA,cAAc,EAAE,CAAC;gBACjB,cAAc,GAAG,IAAI,CAAC;aACvB;AACH,SAAC,CAAC;QAEF,MAAM,aAAa,GAAG,MAAW;AAC/B,YAAA,UAAU,CAAC,IAAI,CACb,IAAI,OAAO,CAAC,WAAW,CAAC,WAAW,EAAE,6BAA6B,CAAC,CACpE,CAAC;YACF,IAAI,cAAc,EAAE;AAClB,gBAAA,cAAc,EAAE,CAAC;gBACjB,cAAc,GAAG,IAAI,CAAC;aACvB;AACH,SAAC,CAAC;AAEF,QAAA,MAAM,aAAa,GAAG,CAAC,KAAiB,KAAU;AAChD,YAAA,IAAI,KAAK,CAAC,MAAM,EAAE;gBAChB,MAAM,CAAC,IAAI,CACT,CAAA,uDAAA,EAA0D,KAAK,CAAC,MAAM,CAAE,CAAA,CACzE,CAAC;aACH;YACD,QAAQ,GAAG,IAAI,CAAC;YAChB,IAAI,cAAc,EAAE;AAClB,gBAAA,cAAc,EAAE,CAAC;gBACjB,cAAc,GAAG,IAAI,CAAC;aACvB;;YAED,IAAI,CAAC,EAAE,EAAE,mBAAmB,CAAC,SAAS,EAAE,eAAe,CAAC,CAAC;YACzD,IAAI,CAAC,EAAE,EAAE,mBAAmB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;YACrD,IAAI,CAAC,EAAE,EAAE,mBAAmB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;AACvD,SAAC,CAAC;QAEF,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,SAAS,EAAE,eAAe,CAAC,CAAC;QACrD,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;QACjD,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;QAEjD,OAAO,CAAC,QAAQ,EAAE;AAChB,YAAA,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACzB,gBAAA,MAAM,KAAK,GAAG,UAAU,CAAC,KAAK,EAAG,CAAC;AAClC,gBAAA,MAAM,KAAK,CAAC;aACb;AACD,YAAA,IAAI,YAAY,CAAC,MAAM,GAAG,CAAC,EAAE;AAC3B,gBAAA,MAAM,YAAY,CAAC,KAAK,EAAG,CAAC;aAC7B;iBAAM;AACL,gBAAA,MAAM,IAAI,OAAO,CAAO,OAAO,IAAG;oBAChC,cAAc,GAAG,OAAO,CAAC;AAC3B,iBAAC,CAAC,CAAC;aACJ;SACF;;AAGD,QAAA,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACzB,YAAA,MAAM,KAAK,GAAG,UAAU,CAAC,KAAK,EAAG,CAAC;AAClC,YAAA,MAAM,KAAK,CAAC;SACb;KACF;IAED,KAAK,CAAC,IAAa,EAAE,MAAe,EAAA;AAClC,QAAA,OAAO,IAAI,OAAO,CAAC,OAAO,IAAG;AAC3B,YAAA,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE;gBACZ,OAAO,OAAO,EAAE,CAAC;aAClB;AAED,YAAA,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,OAAO,EAAE,MAAM,OAAO,EAAE,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC,CAAC;;YAEnE,IACE,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,MAAM;gBACvC,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,UAAU,EAC3C;gBACA,OAAO,OAAO,EAAE,CAAC;aAClB;YAED,IAAI,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,OAAO,EAAE;gBAC5C,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,EAAE,MAAM,CAAC,CAAC;aAC7B;AACH,SAAC,CAAC,CAAC;KACJ;AACF;;AChPD;;;;;;;;;;;;;;;AAeG;AAWH;;;;;;AAMG;MACmB,MAAM,CAAA;AAkC1B,IAAA,WAAA,CAAY,YAA6B,EAAA;;QAEvC,IAAI,CAAC,YAAY,CAAC,IAAI,IAAI,CAAC,YAAY,CAAC,KAAK,EAAE;YAC7C,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,wEAAwE,CACzE,CAAC;SACH;;AAED,QAAA,KAAK,MAAM,QAAQ,IAAI,YAAY,EAAE;YACnC,IAAI,CAAC,QAAQ,CAAC,GAAG,YAAY,CAAC,QAAQ,CAAC,CAAC;SACzC;;AAED,QAAA,IAAI,CAAC,IAAI,GAAG,YAAY,CAAC,IAAI,CAAC;QAC9B,IAAI,CAAC,MAAM,GAAG,YAAY,CAAC,cAAc,CAAC,QAAQ,CAAC;cAC/C,YAAY,CAAC,MAAM;cACnB,SAAS,CAAC;QACd,IAAI,CAAC,QAAQ,GAAG,YAAY,CAAC,cAAc,CAAC,UAAU,CAAC;AACrD,cAAE,CAAC,CAAC,YAAY,CAAC,QAAQ;cACvB,KAAK,CAAC;KACX;AAED;;;;AAIG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAkD;YACzD,IAAI,EAAE,IAAI,CAAC,IAAI;SAChB,CAAC;AACF,QAAA,KAAK,MAAM,IAAI,IAAI,IAAI,EAAE;AACvB,YAAA,IAAI,IAAI,CAAC,cAAc,CAAC,IAAI,CAAC,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK,SAAS,EAAE;AACzD,gBAAA,IAAI,IAAI,KAAK,UAAU,IAAI,IAAI,CAAC,IAAI,KAAK,UAAU,CAAC,MAAM,EAAE;oBAC1D,GAAG,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC,CAAC;iBACxB;aACF;SACF;AACD,QAAA,OAAO,GAAoB,CAAC;KAC7B;IAED,OAAO,KAAK,CAAC,WAA6C,EAAA;QACxD,OAAO,IAAI,WAAW,CAAC,WAAW,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;KACxD;IAED,OAAO,MAAM,CACX,YAKC,EAAA;AAED,QAAA,OAAO,IAAI,YAAY,CACrB,YAAY,EACZ,YAAY,CAAC,UAAU,EACvB,YAAY,CAAC,kBAAkB,CAChC,CAAC;KACH;;IAGD,OAAO,MAAM,CAAC,YAA2B,EAAA;AACvC,QAAA,OAAO,IAAI,YAAY,CAAC,YAAY,CAAC,CAAC;KACvC;IAED,OAAO,UAAU,CACf,YAA+C,EAAA;QAE/C,OAAO,IAAI,YAAY,CAAC,YAAY,EAAE,YAAY,CAAC,IAAI,CAAC,CAAC;KAC1D;IAED,OAAO,OAAO,CAAC,aAA4B,EAAA;AACzC,QAAA,OAAO,IAAI,aAAa,CAAC,aAAa,CAAC,CAAC;KACzC;;IAGD,OAAO,MAAM,CAAC,YAA2B,EAAA;AACvC,QAAA,OAAO,IAAI,YAAY,CAAC,YAAY,CAAC,CAAC;KACvC;;IAGD,OAAO,OAAO,CAAC,aAA4B,EAAA;AACzC,QAAA,OAAO,IAAI,aAAa,CAAC,aAAa,CAAC,CAAC;KACzC;IAED,OAAO,KAAK,CACV,WAAoD,EAAA;AAEpD,QAAA,OAAO,IAAI,WAAW,CAAC,WAAW,CAAC,CAAC;KACrC;AACF,CAAA;AAeD;;;AAGG;AACG,MAAO,aAAc,SAAQ,MAAM,CAAA;AACvC,IAAA,WAAA,CAAY,YAA2B,EAAA;AACrC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,OAAO;AACxB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;KACJ;AACF,CAAA;AAED;;;AAGG;AACG,MAAO,YAAa,SAAQ,MAAM,CAAA;AACtC,IAAA,WAAA,CAAY,YAA2B,EAAA;AACrC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,MAAM;AACvB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;KACJ;AACF,CAAA;AAED;;;AAGG;AACG,MAAO,aAAc,SAAQ,MAAM,CAAA;AACvC,IAAA,WAAA,CAAY,YAA2B,EAAA;AACrC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,OAAO;AACxB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;KACJ;AACF,CAAA;AAED;;;;AAIG;AACG,MAAO,YAAa,SAAQ,MAAM,CAAA;IAEtC,WAAY,CAAA,YAA2B,EAAE,UAAqB,EAAA;AAC5D,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,MAAM;AACvB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;AACH,QAAA,IAAI,CAAC,IAAI,GAAG,UAAU,CAAC;KACxB;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;AAC3B,QAAA,IAAI,IAAI,CAAC,IAAI,EAAE;AACb,YAAA,GAAG,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC;SACzB;AACD,QAAA,OAAO,GAAoB,CAAC;KAC7B;AACF,CAAA;AAED;;;;;AAKG;AACG,MAAO,WAAY,SAAQ,MAAM,CAAA;IACrC,WAAY,CAAA,YAA0B,EAAS,KAAkB,EAAA;AAC/D,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,KAAK;AACtB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;QAJ0C,IAAK,CAAA,KAAA,GAAL,KAAK,CAAa;KAKhE;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;QAC3B,GAAG,CAAC,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC;AAChC,QAAA,OAAO,GAAG,CAAC;KACZ;AACF,CAAA;AAED;;;;AAIG;AACG,MAAO,YAAa,SAAQ,MAAM,CAAA;AACtC,IAAA,WAAA,CACE,YAA0B,EACnB,UAEN,EACM,qBAA+B,EAAE,EAAA;AAExC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,MAAM;AACvB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;QARI,IAAU,CAAA,UAAA,GAAV,UAAU,CAEhB;QACM,IAAkB,CAAA,kBAAA,GAAlB,kBAAkB,CAAe;KAMzC;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;QAC3B,GAAG,CAAC,UAAU,GAAG,EAAE,GAAG,IAAI,CAAC,UAAU,EAAE,CAAC;QACxC,MAAM,QAAQ,GAAG,EAAE,CAAC;AACpB,QAAA,IAAI,IAAI,CAAC,kBAAkB,EAAE;AAC3B,YAAA,KAAK,MAAM,WAAW,IAAI,IAAI,CAAC,kBAAkB,EAAE;gBACjD,IAAI,CAAC,IAAI,CAAC,UAAU,CAAC,cAAc,CAAC,WAAW,CAAC,EAAE;oBAChD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAa,UAAA,EAAA,WAAW,CAAqD,mDAAA,CAAA,CAC9E,CAAC;iBACH;aACF;SACF;AACD,QAAA,KAAK,MAAM,WAAW,IAAI,IAAI,CAAC,UAAU,EAAE;YACzC,IAAI,IAAI,CAAC,UAAU,CAAC,cAAc,CAAC,WAAW,CAAC,EAAE;AAC/C,gBAAA,GAAG,CAAC,UAAU,CAAC,WAAW,CAAC,GAAG,IAAI,CAAC,UAAU,CAC3C,WAAW,CACZ,CAAC,MAAM,EAAmB,CAAC;gBAC5B,IAAI,CAAC,IAAI,CAAC,kBAAkB,CAAC,QAAQ,CAAC,WAAW,CAAC,EAAE;AAClD,oBAAA,QAAQ,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;iBAC5B;aACF;SACF;AACD,QAAA,IAAI,QAAQ,CAAC,MAAM,GAAG,CAAC,EAAE;AACvB,YAAA,GAAG,CAAC,QAAQ,GAAG,QAAQ,CAAC;SACzB;QACD,OAAO,GAAG,CAAC,kBAAkB,CAAC;AAC9B,QAAA,OAAO,GAAoB,CAAC;KAC7B;AACF,CAAA;AAED;;;;AAIG;AACG,MAAO,WAAY,SAAQ,MAAM,CAAA;AAErC,IAAA,WAAA,CAAY,YAAqD,EAAA;QAC/D,IAAI,YAAY,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE;YACnC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,sCAAsC,CACvC,CAAC;SACH;AACD,QAAA,KAAK,CAAC;AACJ,YAAA,GAAG,YAAY;YACf,IAAI,EAAE,SAAS;AAChB,SAAA,CAAC,CAAC;AACH,QAAA,IAAI,CAAC,KAAK,GAAG,YAAY,CAAC,KAAK,CAAC;KACjC;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;;AAE3B,QAAA,IAAI,IAAI,CAAC,KAAK,IAAI,KAAK,CAAC,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE;AAC3C,YAAA,GAAG,CAAC,KAAK,GAAI,IAAI,CAAC,KAAuB,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC;SAChE;AACD,QAAA,OAAO,GAAG,CAAC;KACZ;AACF;;AC5VD;;;;;;;;;;;;;;;AAeG;AAIH;;;;;;;;;;;;;;;;AAgBG;MACU,iBAAiB,CAAA;AAU5B,IAAA,WAAA,GAAA;AACE,QAAA,IAAI,CAAC,QAAQ,GAAG,WAAW,CAAC;KAC7B;AAED;;;;;;;AAOG;IACH,OAAO,IAAI,CAAC,kBAA2B,EAAA;AACrC,QAAA,IACE,kBAAkB;aACjB,kBAAkB,GAAG,CAAC,IAAI,kBAAkB,GAAG,GAAG,CAAC,EACpD;AACA,YAAA,MAAM,CAAC,IAAI,CACT,uCAAuC,kBAAkB,CAAA,4CAAA,CAA8C,CACxG,CAAC;SACH;AACD,QAAA,OAAO,EAAE,QAAQ,EAAE,YAAY,EAAE,kBAAkB,EAAE,CAAC;KACvD;AAED;;;;;;AAMG;AACH,IAAA,OAAO,GAAG,GAAA;AACR,QAAA,OAAO,EAAE,QAAQ,EAAE,WAAW,EAAE,CAAC;KAClC;AACF;;AChFD;;;;;;;;;;;;;;;AAeG;AAcH,MAAM,wBAAwB,GAAG,KAAM,CAAC;AACxC,MAAM,yBAAyB,GAAG,KAAM,CAAC;AAEzC,MAAM,oBAAoB,GAAG,iBAAiB,CAAC;AAE/C;;;;;;;;;AASG;AACH,MAAM,2BAA2B,GAAG,CAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;uBA6Cb,oBAAoB,CAAA;CAC1C,CAAC;AA2CF;;;;AAIG;MACU,uBAAuB,CAAA;AAiBlC,IAAA,WAAA,CACmB,WAAwB,EACxB,OAAsC,EACtC,IAAwB,EAAA;QAFxB,IAAW,CAAA,WAAA,GAAX,WAAW,CAAa;QACxB,IAAO,CAAA,OAAA,GAAP,OAAO,CAA+B;QACtC,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAoB;;QAlBnC,IAAS,CAAA,SAAA,GAAG,KAAK,CAAC;;AAET,QAAA,IAAA,CAAA,YAAY,GAAG,IAAI,QAAQ,EAAQ,CAAC;;QAKpC,IAAa,CAAA,aAAA,GAAkB,EAAE,CAAC;;QAE3C,IAAgB,CAAA,gBAAA,GAA4B,EAAE,CAAC;;QAE/C,IAAa,CAAA,aAAA,GAAG,CAAC,CAAC;;QAElB,IAAqB,CAAA,qBAAA,GAAG,KAAK,CAAC;AAOpC,QAAA,IAAI,CAAC,WAAW,CAAC,cAAc,GAAG,IAAI,CAAC;;AAGvC,QAAA,IAAI,CAAC,kBAAkB,GAAG,IAAI,CAAC,cAAc,EAAE,CAAC,OAAO,CAAC,MACtD,IAAI,CAAC,OAAO,EAAE,CACf,CAAC;;;QAIF,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,SAAS,GAAG,KAAK,IAAG;AAC7C,YAAA,IAAI,IAAI,CAAC,SAAS,EAAE;gBAClB,OAAO;aACR;AAED,YAAA,MAAM,KAAK,GAAG,KAAK,CAAC,IAAkB,CAAC;YACvC,MAAM,MAAM,GAAG,IAAI,CACjB,MAAM,CAAC,YAAY,CAAC,KAAK,CACvB,IAAI,EACJ,KAAK,CAAC,IAAI,CAAC,IAAI,UAAU,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CACzC,CACF,CAAC;AAEF,YAAA,MAAM,KAAK,GAA0B;AACnC,gBAAA,QAAQ,EAAE,WAAW;AACrB,gBAAA,IAAI,EAAE,MAAM;aACb,CAAC;YACF,KAAK,IAAI,CAAC,WAAW,CAAC,iBAAiB,CAAC,KAAK,CAAC,CAAC;AACjD,SAAC,CAAC;KACH;AAED;;AAEG;AACH,IAAA,MAAM,IAAI,GAAA;AACR,QAAA,IAAI,IAAI,CAAC,SAAS,EAAE;YAClB,OAAO;SACR;AACD,QAAA,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC;AACtB,QAAA,IAAI,CAAC,YAAY,CAAC,OAAO,EAAE,CAAC;AAC5B,QAAA,MAAM,IAAI,CAAC,kBAAkB,CAAC;KAC/B;AAED;;;AAGG;IACK,OAAO,GAAA;AACb,QAAA,IAAI,CAAC,iBAAiB,EAAE,CAAC;QACzB,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC;AAC5C,QAAA,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,UAAU,EAAE,CAAC;AACnC,QAAA,IAAI,CAAC,IAAI,CAAC,UAAU,CAAC,UAAU,EAAE,CAAC;AAClC,QAAA,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,SAAS,EAAE,CAAC,OAAO,CAAC,KAAK,IAAI,KAAK,CAAC,IAAI,EAAE,CAAC,CAAC;QACjE,IAAI,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,KAAK,KAAK,QAAQ,EAAE;YAC7C,KAAK,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,KAAK,EAAE,CAAC;SACrC;AACD,QAAA,IAAI,CAAC,WAAW,CAAC,cAAc,GAAG,KAAK,CAAC;KACzC;AAED;;AAEG;AACK,IAAA,cAAc,CAAC,SAAsB,EAAA;AAC3C,QAAA,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;;AAEnC,QAAA,KAAK,IAAI,CAAC,oBAAoB,EAAE,CAAC;KAClC;AAED;;;;AAIG;IACK,iBAAiB,GAAA;;;AAGvB,QAAA,CAAC,GAAG,IAAI,CAAC,gBAAgB,CAAC,CAAC,OAAO,CAAC,MAAM,IAAI,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;;AAG7D,QAAA,IAAI,CAAC,aAAa,CAAC,MAAM,GAAG,CAAC,CAAC;;QAG9B,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,WAAW,CAAC;KACzD;AAED;;AAEG;AACK,IAAA,MAAM,oBAAoB,GAAA;AAChC,QAAA,IAAI,IAAI,CAAC,qBAAqB,EAAE;YAC9B,OAAO;SACR;AACD,QAAA,IAAI,CAAC,qBAAqB,GAAG,IAAI,CAAC;AAElC,QAAA,OAAO,IAAI,CAAC,aAAa,CAAC,MAAM,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE;YACvD,MAAM,YAAY,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,EAAG,CAAC;AACjD,YAAA,IAAI;AACF,gBAAA,MAAM,KAAK,GAAG,IAAI,UAAU,CAAC,YAAY,CAAC,CAAC;AAC3C,gBAAA,MAAM,UAAU,GAAG,KAAK,CAAC,MAAM,CAAC;AAEhC,gBAAA,MAAM,WAAW,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,YAAY,CACrD,CAAC,EACD,UAAU,EACV,yBAAyB,CAC1B,CAAC;;gBAGF,MAAM,WAAW,GAAG,WAAW,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC;AAClD,gBAAA,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,UAAU,EAAE,CAAC,EAAE,EAAE;AACnC,oBAAA,WAAW,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC;iBACnC;gBAED,MAAM,MAAM,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,kBAAkB,EAAE,CAAC;AAC3D,gBAAA,MAAM,CAAC,MAAM,GAAG,WAAW,CAAC;gBAC5B,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,WAAW,CAAC,CAAC;;AAGnD,gBAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;AACnC,gBAAA,MAAM,CAAC,OAAO,GAAG,MAAK;AACpB,oBAAA,IAAI,CAAC,gBAAgB,GAAG,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAClD,CAAC,IAAI,CAAC,KAAK,MAAM,CAClB,CAAC;AACJ,iBAAC,CAAC;;;AAIF,gBAAA,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,GAAG,CAC3B,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,WAAW,EAClC,IAAI,CAAC,aAAa,CACnB,CAAC;AACF,gBAAA,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC;;AAGjC,gBAAA,IAAI,CAAC,aAAa,IAAI,WAAW,CAAC,QAAQ,CAAC;aAC5C;YAAC,OAAO,CAAC,EAAE;AACV,gBAAA,MAAM,CAAC,KAAK,CAAC,sBAAsB,EAAE,CAAC,CAAC,CAAC;aACzC;SACF;AAED,QAAA,IAAI,CAAC,qBAAqB,GAAG,KAAK,CAAC;KACpC;AAED;;AAEG;AACK,IAAA,MAAM,cAAc,GAAA;QAC1B,MAAM,gBAAgB,GAAG,IAAI,CAAC,WAAW,CAAC,OAAO,EAAE,CAAC;AACpD,QAAA,OAAO,CAAC,IAAI,CAAC,SAAS,EAAE;AACtB,YAAA,MAAM,MAAM,GAAG,MAAM,OAAO,CAAC,IAAI,CAAC;gBAChC,gBAAgB,CAAC,IAAI,EAAE;gBACvB,IAAI,CAAC,YAAY,CAAC,OAAO;AAC1B,aAAA,CAAC,CAAC;YAEH,IAAI,IAAI,CAAC,SAAS,IAAI,CAAC,MAAM,IAAI,MAAM,CAAC,IAAI,EAAE;gBAC5C,MAAM;aACP;AAED,YAAA,MAAM,OAAO,GAAG,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAA,IAAI,OAAO,CAAC,IAAI,KAAK,eAAe,EAAE;gBACpC,MAAM,aAAa,GAAG,OAA4B,CAAC;AACnD,gBAAA,IAAI,aAAa,CAAC,WAAW,EAAE;oBAC7B,IAAI,CAAC,iBAAiB,EAAE,CAAC;iBAC1B;gBAED,MAAM,SAAS,GAAG,aAAa,CAAC,SAAS,EAAE,KAAK,CAAC,IAAI,CAAC,IAAI,IACxD,IAAI,CAAC,UAAU,EAAE,QAAQ,CAAC,UAAU,CAAC,QAAQ,CAAC,CAC/C,CAAC;AACF,gBAAA,IAAI,SAAS,EAAE,UAAU,EAAE;AACzB,oBAAA,MAAM,SAAS,GAAG,UAAU,CAAC,IAAI,CAC/B,IAAI,CAAC,SAAS,CAAC,UAAU,CAAC,IAAI,CAAC,EAC/B,CAAC,IAAI,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CACrB,CAAC,MAAM,CAAC;AACT,oBAAA,IAAI,CAAC,cAAc,CAAC,SAAS,CAAC,CAAC;iBAChC;aACF;AAAM,iBAAA,IAAI,OAAO,CAAC,IAAI,KAAK,UAAU,EAAE;AACtC,gBAAA,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,sBAAsB,EAAE;AACxC,oBAAA,MAAM,CAAC,IAAI,CACT,wHAAwH,CACzH,CAAC;iBACH;qBAAM;AACL,oBAAA,IAAI;AACF,wBAAA,MAAM,gBAAgB,GAAG,MAAM,IAAI,CAAC,OAAO,CAAC,sBAAsB,CAChE,OAAO,CAAC,aAAa,CACtB,CAAC;AACF,wBAAA,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE;4BACnB,KAAK,IAAI,CAAC,WAAW,CAAC,qBAAqB,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC;yBACjE;qBACF;oBAAC,OAAO,CAAC,EAAE;AACV,wBAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,iCAAA,EAAqC,CAAW,CAAC,OAAO,CAAA,CAAE,CAC3D,CAAC;qBACH;iBACF;aACF;SACF;KACF;AACF,CAAA;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA6CG;AACI,eAAe,sBAAsB,CAC1C,WAAwB,EACxB,UAAyC,EAAE,EAAA;AAE3C,IAAA,IAAI,WAAW,CAAC,QAAQ,EAAE;QACxB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,0DAA0D,CAC3D,CAAC;KACH;AAED,IAAA,IAAI,WAAW,CAAC,cAAc,EAAE;QAC9B,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,gEAAgE,CACjE,CAAC;KACH;;IAGD,IACE,OAAO,gBAAgB,KAAK,WAAW;QACvC,OAAO,YAAY,KAAK,WAAW;QACnC,OAAO,SAAS,KAAK,WAAW;AAChC,QAAA,CAAC,SAAS,CAAC,YAAY,EACvB;QACA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,kHAAkH,CACnH,CAAC;KACH;AAED,IAAA,IAAI,YAAsC,CAAC;AAC3C,IAAA,IAAI;;;AAGF,QAAA,YAAY,GAAG,IAAI,YAAY,EAAE,CAAC;AAClC,QAAA,IAAI,YAAY,CAAC,KAAK,KAAK,WAAW,EAAE;AACtC,YAAA,MAAM,YAAY,CAAC,MAAM,EAAE,CAAC;SAC7B;;;QAID,MAAM,WAAW,GAAG,MAAM,SAAS,CAAC,YAAY,CAAC,YAAY,CAAC;AAC5D,YAAA,KAAK,EAAE,IAAI;AACZ,SAAA,CAAC,CAAC;;;QAIH,MAAM,WAAW,GAAG,IAAI,IAAI,CAAC,CAAC,2BAA2B,CAAC,EAAE;AAC1D,YAAA,IAAI,EAAE,wBAAwB;AAC/B,SAAA,CAAC,CAAC;QACH,MAAM,UAAU,GAAG,GAAG,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC;QACpD,MAAM,YAAY,CAAC,YAAY,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC;;QAGtD,MAAM,UAAU,GAAG,YAAY,CAAC,uBAAuB,CAAC,WAAW,CAAC,CAAC;QACrE,MAAM,WAAW,GAAG,IAAI,gBAAgB,CACtC,YAAY,EACZ,oBAAoB,EACpB;AACE,YAAA,gBAAgB,EAAE,EAAE,gBAAgB,EAAE,wBAAwB,EAAE;AACjE,SAAA,CACF,CAAC;AACF,QAAA,UAAU,CAAC,OAAO,CAAC,WAAW,CAAC,CAAC;;QAGhC,MAAM,MAAM,GAAG,IAAI,uBAAuB,CAAC,WAAW,EAAE,OAAO,EAAE;YAC/D,YAAY;YACZ,WAAW;YACX,UAAU;YACV,WAAW;AACZ,SAAA,CAAC,CAAC;QAEH,OAAO,EAAE,IAAI,EAAE,MAAM,MAAM,CAAC,IAAI,EAAE,EAAE,CAAC;KACtC;IAAC,OAAO,CAAC,EAAE;;QAEV,IAAI,YAAY,IAAI,YAAY,CAAC,KAAK,KAAK,QAAQ,EAAE;AACnD,YAAA,KAAK,YAAY,CAAC,KAAK,EAAE,CAAC;SAC3B;;;QAID,IAAI,CAAC,YAAY,OAAO,IAAI,CAAC,YAAY,YAAY,EAAE;AACrD,YAAA,MAAM,CAAC,CAAC;SACT;;AAGD,QAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,sCAAA,EAA0C,CAAW,CAAC,OAAO,CAAA,CAAE,CAChE,CAAC;KACH;AACH;;AChfA;;;;;;;;;;;;;;;AAeG;AA6CH;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2BG;SACa,KAAK,CAAC,MAAmB,MAAM,EAAE,EAAE,OAAmB,EAAA;AACpE,IAAA,GAAG,GAAG,kBAAkB,CAAC,GAAG,CAAC,CAAC;;IAE9B,MAAM,UAAU,GAAmB,YAAY,CAAC,GAAG,EAAE,OAAO,CAAC,CAAC;IAE9D,MAAM,OAAO,GAAG,OAAO,EAAE,OAAO,IAAI,IAAI,eAAe,EAAE,CAAC;AAE1D,IAAA,MAAM,YAAY,GAA+B;AAC/C,QAAA,2BAA2B,EAAE,OAAO,EAAE,2BAA2B,IAAI,KAAK;KAC3E,CAAC;AAEF,IAAA,MAAM,UAAU,GAAG,wBAAwB,CAAC,OAAO,CAAC,CAAC;AACrD,IAAA,MAAM,UAAU,GAAG,UAAU,CAAC,YAAY,CAAC;QACzC,UAAU;AACX,KAAA,CAAC,CAAC;AAEH,IAAA,UAAU,CAAC,OAAO,GAAG,YAAY,CAAC;AAElC,IAAA,OAAO,UAAU,CAAC;AACpB,CAAC;AAED;;;;;AAKG;SACa,kBAAkB,CAChC,EAAM,EACN,WAAuC,EACvC,cAA+B,EAAA;;IAG/B,MAAM,YAAY,GAAG,WAA2B,CAAC;AACjD,IAAA,IAAI,aAA0B,CAAC;AAC/B,IAAA,IAAI,YAAY,CAAC,IAAI,EAAE;AACrB,QAAA,aAAa,GAAG,YAAY,CAAC,aAAa,IAAI;AAC5C,YAAA,KAAK,EAAE,6BAA6B;SACrC,CAAC;KACH;SAAM;QACL,aAAa,GAAG,WAA0B,CAAC;KAC5C;AAED,IAAA,IAAI,CAAC,aAAa,CAAC,KAAK,EAAE;QACxB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,QAAQ,EACpB,CAAoF,kFAAA,CAAA,CACrF,CAAC;KACH;AAED;;;AAGG;AACH,IAAA,MAAM,aAAa,GAAI,EAAgB,CAAC,oBAAoB,GAC1D,YAAY,CAAC,IAAI,EACjB,OAAO,MAAM,KAAK,WAAW,GAAG,SAAS,GAAG,MAAM,EAClD,YAAY,CAAC,cAAc,CAC5B,CAAC;IAEF,OAAO,IAAI,eAAe,CAAC,EAAE,EAAE,aAAa,EAAE,cAAc,EAAE,aAAa,CAAC,CAAC;AAC/E,CAAC;AAED;;;;;;;;;;;;;AAaG;SACa,cAAc,CAC5B,EAAM,EACN,WAA8B,EAC9B,cAA+B,EAAA;AAE/B,IAAA,IAAI,CAAC,WAAW,CAAC,KAAK,EAAE;QACtB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,QAAQ,EACpB,CAAgF,8EAAA,CAAA,CACjF,CAAC;KACH;IACD,OAAO,IAAI,WAAW,CAAC,EAAE,EAAE,WAAW,EAAE,cAAc,CAAC,CAAC;AAC1D,CAAC;AAED;;;;;;;;;;;AAWG;AACa,SAAA,sBAAsB,CACpC,EAAM,EACN,WAA4B,EAAA;AAE5B,IAAA,IAAI,CAAC,WAAW,CAAC,KAAK,EAAE;QACtB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,QAAQ,EACpB,CAAuH,qHAAA,CAAA,CACxH,CAAC;KACH;AACD,IAAA,MAAM,gBAAgB,GAAG,IAAI,oBAAoB,EAAE,CAAC;IACpD,OAAO,IAAI,mBAAmB,CAAC,EAAE,EAAE,WAAW,EAAE,gBAAgB,CAAC,CAAC;AACpE;;AC3MA;;;;AAIG;AAgCH,SAAS,UAAU,GAAA;AACjB,IAAA,kBAAkB,CAChB,IAAI,SAAS,CAAC,OAAO,EAAE,OAAO,EAAuB,QAAA,4BAAA,CAAC,oBAAoB,CACxE,IAAI,CACL,CACF,CAAC;AAEF,IAAA,eAAe,CAAC,IAAI,EAAE,OAAO,CAAC,CAAC;;AAE/B,IAAA,eAAe,CAAC,IAAI,EAAE,OAAO,EAAE,SAAkB,CAAC,CAAC;AACrD,CAAC;AAED,UAAU,EAAE;;;;"}
\ No newline at end of file diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/package.json b/frontend-old/node_modules/@firebase/ai/dist/esm/package.json new file mode 100644 index 0000000..7c34deb --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/package.json @@ -0,0 +1 @@ +{"type":"module"}
\ No newline at end of file diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/api.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/api.d.ts new file mode 100644 index 0000000..491268b --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/api.d.ts @@ -0,0 +1,99 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { FirebaseApp } from '@firebase/app'; +import { AI_TYPE } from './constants'; +import { AIService } from './service'; +import { AI, AIOptions } from './public-types'; +import { ImagenModelParams, HybridParams, ModelParams, RequestOptions, LiveModelParams } from './types'; +import { AIError } from './errors'; +import { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel } from './models'; +export { ChatSession } from './methods/chat-session'; +export { LiveSession } from './methods/live-session'; +export * from './requests/schema-builder'; +export { ImagenImageFormat } from './requests/imagen-image-format'; +export { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel, AIError }; +export { Backend, VertexAIBackend, GoogleAIBackend } from './backend'; +export { startAudioConversation, AudioConversationController, StartAudioConversationOptions } from './methods/live-session-helpers'; +declare module '@firebase/component' { + interface NameServiceMapping { + [AI_TYPE]: AIService; + } +} +/** + * Returns the default {@link AI} instance that is associated with the provided + * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the + * default settings. + * + * @example + * ```javascript + * const ai = getAI(app); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Gemini Developer API (via Google AI). + * const ai = getAI(app, { backend: new GoogleAIBackend() }); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Vertex AI Gemini API. + * const ai = getAI(app, { backend: new VertexAIBackend() }); + * ``` + * + * @param app - The {@link @firebase/app#FirebaseApp} to use. + * @param options - {@link AIOptions} that configure the AI instance. + * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}. + * + * @public + */ +export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI; +/** + * Returns a {@link GenerativeModel} class with methods for inference + * and other functionality. + * + * @public + */ +export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel; +/** + * Returns an {@link ImagenModel} class with methods for using Imagen. + * + * Only Imagen 3 models (named `imagen-3.0-*`) are supported. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when making Imagen requests. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @public + */ +export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; +/** + * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication. + * + * The Live API is only supported in modern browser windows and Node >= 22. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when setting up a {@link LiveSession}. + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @beta + */ +export declare function getLiveGenerativeModel(ai: AI, modelParams: LiveModelParams): LiveGenerativeModel; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/backend.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/backend.d.ts new file mode 100644 index 0000000..2a1e9e6 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/backend.d.ts @@ -0,0 +1,74 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { BackendType } from './public-types'; +/** + * Abstract base class representing the configuration for an AI service backend. + * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for + * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and + * {@link VertexAIBackend} for the Vertex AI Gemini API. + * + * @public + */ +export declare abstract class Backend { + /** + * Specifies the backend type. + */ + readonly backendType: BackendType; + /** + * Protected constructor for use by subclasses. + * @param type - The backend type. + */ + protected constructor(type: BackendType); +} +/** + * Configuration class for the Gemini Developer API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Gemini Developer API as the backend. + * + * @public + */ +export declare class GoogleAIBackend extends Backend { + /** + * Creates a configuration object for the Gemini Developer API backend. + */ + constructor(); +} +/** + * Configuration class for the Vertex AI Gemini API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend. + * + * @public + */ +export declare class VertexAIBackend extends Backend { + /** + * The region identifier. + * See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + readonly location: string; + /** + * Creates a configuration object for the Vertex AI backend. + * + * @param location - The region identifier, defaulting to `us-central1`; + * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + constructor(location?: string); +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/constants.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/constants.d.ts new file mode 100644 index 0000000..9d89d40 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/constants.d.ts @@ -0,0 +1,27 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +export declare const AI_TYPE = "AI"; +export declare const DEFAULT_LOCATION = "us-central1"; +export declare const DEFAULT_DOMAIN = "firebasevertexai.googleapis.com"; +export declare const DEFAULT_API_VERSION = "v1beta"; +export declare const PACKAGE_VERSION: string; +export declare const LANGUAGE_TAG = "gl-js"; +export declare const DEFAULT_FETCH_TIMEOUT_MS: number; +/** + * Defines the name of the default in-cloud model to use for hybrid inference. + */ +export declare const DEFAULT_HYBRID_IN_CLOUD_MODEL = "gemini-2.0-flash-lite"; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/errors.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/errors.d.ts new file mode 100644 index 0000000..cb0a0fe --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/errors.d.ts @@ -0,0 +1,35 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { FirebaseError } from '@firebase/util'; +import { AIErrorCode, CustomErrorData } from './types'; +/** + * Error class for the Firebase AI SDK. + * + * @public + */ +export declare class AIError extends FirebaseError { + readonly code: AIErrorCode; + readonly customErrorData?: CustomErrorData | undefined; + /** + * Constructs a new instance of the `AIError` class. + * + * @param code - The error code from {@link (AIErrorCode:type)}. + * @param message - A human-readable message describing the error. + * @param customErrorData - Optional error data. + */ + constructor(code: AIErrorCode, message: string, customErrorData?: CustomErrorData | undefined); +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/factory-browser.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/factory-browser.d.ts new file mode 100644 index 0000000..4dd134a --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/factory-browser.d.ts @@ -0,0 +1,19 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { ComponentContainer, InstanceFactoryOptions } from '@firebase/component'; +import { AIService } from './service'; +export declare function factory(container: ComponentContainer, { instanceIdentifier }: InstanceFactoryOptions): AIService; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/googleai-mappers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/googleai-mappers.d.ts new file mode 100644 index 0000000..ae6a19d --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/googleai-mappers.d.ts @@ -0,0 +1,73 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { CountTokensRequest, GenerateContentCandidate, GenerateContentRequest, GenerateContentResponse, PromptFeedback } from './types'; +import { GoogleAIGenerateContentResponse, GoogleAIGenerateContentCandidate, GoogleAICountTokensRequest } from './types/googleai'; +/** + * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI). + * The public API prioritizes the format used by the Vertex AI Gemini API. + * We avoid having two sets of types by translating requests and responses between the two API formats. + * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API + * with minimal code changes. + * + * In here are functions that map requests and responses between the two API formats. + * Requests in the Vertex AI format are mapped to the Google AI format before being sent. + * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user. + */ +/** + * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI. + * + * @param generateContentRequest The {@link GenerateContentRequest} to map. + * @returns A {@link GenerateContentResponse} that conforms to the Google AI format. + * + * @throws If the request contains properties that are unsupported by Google AI. + * + * @internal + */ +export declare function mapGenerateContentRequest(generateContentRequest: GenerateContentRequest): GenerateContentRequest; +/** + * Maps a {@link GenerateContentResponse} from Google AI to the format of the + * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API. + * + * @param googleAIResponse The {@link GenerateContentResponse} from Google AI. + * @returns A {@link GenerateContentResponse} that conforms to the public API's format. + * + * @internal + */ +export declare function mapGenerateContentResponse(googleAIResponse: GoogleAIGenerateContentResponse): GenerateContentResponse; +/** + * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI. + * + * @param countTokensRequest The {@link CountTokensRequest} to map. + * @param model The model to count tokens with. + * @returns A {@link CountTokensRequest} that conforms to the Google AI format. + * + * @internal + */ +export declare function mapCountTokensRequest(countTokensRequest: CountTokensRequest, model: string): GoogleAICountTokensRequest; +/** + * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms + * to the Vertex AI API format. + * + * @param candidates The {@link GoogleAIGenerateContentCandidate} to map. + * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format. + * + * @throws If any {@link Part} in the candidates has a `videoMetadata` property. + * + * @internal + */ +export declare function mapGenerateContentCandidates(candidates: GoogleAIGenerateContentCandidate[]): GenerateContentCandidate[]; +export declare function mapPromptFeedback(promptFeedback: PromptFeedback): PromptFeedback; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/helpers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/helpers.d.ts new file mode 100644 index 0000000..705ffec --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/helpers.d.ts @@ -0,0 +1,30 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Backend } from './backend'; +/** + * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI} + * instances by backend type. + * + * @internal + */ +export declare function encodeInstanceIdentifier(backend: Backend): string; +/** + * Decodes an instance identifier string into a {@link Backend}. + * + * @internal + */ +export declare function decodeInstanceIdentifier(instanceIdentifier: string): Backend; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/index.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/index.d.ts new file mode 100644 index 0000000..a377500 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/index.d.ts @@ -0,0 +1,13 @@ +/** + * The Firebase AI Web SDK. + * + * @packageDocumentation + */ +import { LanguageModel } from './types/language-model'; +declare global { + interface Window { + LanguageModel: LanguageModel; + } +} +export * from './api'; +export * from './public-types'; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/index.node.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/index.node.d.ts new file mode 100644 index 0000000..e96f4c5 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/index.node.d.ts @@ -0,0 +1,7 @@ +/** + * The Firebase AI Web SDK. + * + * @packageDocumentation + */ +export * from './api'; +export * from './public-types'; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/logger.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/logger.d.ts new file mode 100644 index 0000000..5991ed1 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/logger.d.ts @@ -0,0 +1,18 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Logger } from '@firebase/logger'; +export declare const logger: Logger; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/chat-session-helpers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/chat-session-helpers.d.ts new file mode 100644 index 0000000..65e4eef --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/chat-session-helpers.d.ts @@ -0,0 +1,18 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Content } from '../types'; +export declare function validateChatHistory(history: Content[]): void; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/chat-session.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/chat-session.d.ts new file mode 100644 index 0000000..2f2557a --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/chat-session.d.ts @@ -0,0 +1,52 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Content, GenerateContentResult, GenerateContentStreamResult, Part, RequestOptions, StartChatParams } from '../types'; +import { ApiSettings } from '../types/internal'; +import { ChromeAdapter } from '../types/chrome-adapter'; +/** + * ChatSession class that enables sending chat messages and stores + * history of sent and received messages so far. + * + * @public + */ +export declare class ChatSession { + model: string; + private chromeAdapter?; + params?: StartChatParams | undefined; + requestOptions?: RequestOptions | undefined; + private _apiSettings; + private _history; + private _sendPromise; + constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); + /** + * Gets the chat history so far. Blocked prompts are not added to history. + * Neither blocked candidates nor the prompts that generated them are added + * to history. + */ + getHistory(): Promise<Content[]>; + /** + * Sends a chat message and receives a non-streaming + * {@link GenerateContentResult} + */ + sendMessage(request: string | Array<string | Part>): Promise<GenerateContentResult>; + /** + * Sends a chat message and receives the response as a + * {@link GenerateContentStreamResult} containing an iterable stream + * and a response promise. + */ + sendMessageStream(request: string | Array<string | Part>): Promise<GenerateContentStreamResult>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/chrome-adapter.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/chrome-adapter.d.ts new file mode 100644 index 0000000..5bd0a99 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/chrome-adapter.d.ts @@ -0,0 +1,124 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { CountTokensRequest, GenerateContentRequest, InferenceMode, OnDeviceParams } from '../types'; +import { ChromeAdapter } from '../types/chrome-adapter'; +import { LanguageModel } from '../types/language-model'; +/** + * Defines an inference "backend" that uses Chrome's on-device model, + * and encapsulates logic for detecting when on-device inference is + * possible. + */ +export declare class ChromeAdapterImpl implements ChromeAdapter { + languageModelProvider: LanguageModel; + mode: InferenceMode; + static SUPPORTED_MIME_TYPES: string[]; + private isDownloading; + private downloadPromise; + private oldSession; + onDeviceParams: OnDeviceParams; + constructor(languageModelProvider: LanguageModel, mode: InferenceMode, onDeviceParams?: OnDeviceParams); + /** + * Checks if a given request can be made on-device. + * + * Encapsulates a few concerns: + * the mode + * API existence + * prompt formatting + * model availability, including triggering download if necessary + * + * + * Pros: callers needn't be concerned with details of on-device availability.</p> + * Cons: this method spans a few concerns and splits request validation from usage. + * If instance variables weren't already part of the API, we could consider a better + * separation of concerns. + */ + isAvailable(request: GenerateContentRequest): Promise<boolean>; + /** + * Generates content on device. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContent} for generating content in + * Cloud. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + * @returns {@link Response}, so we can reuse common response formatting. + */ + generateContent(request: GenerateContentRequest): Promise<Response>; + /** + * Generates content stream on device. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in + * Cloud. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + * @returns {@link Response}, so we can reuse common response formatting. + */ + generateContentStream(request: GenerateContentRequest): Promise<Response>; + countTokens(_request: CountTokensRequest): Promise<Response>; + /** + * Asserts inference for the given request can be performed by an on-device model. + */ + private static isOnDeviceRequest; + /** + * Encapsulates logic to get availability and download a model if one is downloadable. + */ + private downloadIfAvailable; + /** + * Triggers out-of-band download of an on-device model. + * + * Chrome only downloads models as needed. Chrome knows a model is needed when code calls + * LanguageModel.create. + * + * Since Chrome manages the download, the SDK can only avoid redundant download requests by + * tracking if a download has previously been requested. + */ + private download; + /** + * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object. + */ + private static toLanguageModelMessage; + /** + * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object. + */ + private static toLanguageModelMessageContent; + /** + * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string. + */ + private static toLanguageModelMessageRole; + /** + * Abstracts Chrome session creation. + * + * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all + * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all + * inference. + * + * Chrome will remove a model from memory if it's no longer in use, so this method ensures a + * new session is created before an old session is destroyed. + */ + private createSession; + /** + * Formats string returned by Chrome as a {@link Response} returned by Firebase AI. + */ + private static toResponse; + /** + * Formats string stream returned by Chrome as SSE returned by Firebase AI. + */ + private static toStreamResponse; +} +/** + * Creates a ChromeAdapterImpl on demand. + */ +export declare function chromeAdapterFactory(mode: InferenceMode, window?: Window, params?: OnDeviceParams): ChromeAdapterImpl | undefined; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/count-tokens.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/count-tokens.d.ts new file mode 100644 index 0000000..9f94f86 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/count-tokens.d.ts @@ -0,0 +1,21 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { CountTokensRequest, CountTokensResponse, RequestOptions } from '../types'; +import { ApiSettings } from '../types/internal'; +import { ChromeAdapter } from '../types/chrome-adapter'; +export declare function countTokensOnCloud(apiSettings: ApiSettings, model: string, params: CountTokensRequest, requestOptions?: RequestOptions): Promise<CountTokensResponse>; +export declare function countTokens(apiSettings: ApiSettings, model: string, params: CountTokensRequest, chromeAdapter?: ChromeAdapter, requestOptions?: RequestOptions): Promise<CountTokensResponse>; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/generate-content.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/generate-content.d.ts new file mode 100644 index 0000000..96493bd --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/generate-content.d.ts @@ -0,0 +1,21 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { GenerateContentRequest, GenerateContentResult, GenerateContentStreamResult, RequestOptions } from '../types'; +import { ApiSettings } from '../types/internal'; +import { ChromeAdapter } from '../types/chrome-adapter'; +export declare function generateContentStream(apiSettings: ApiSettings, model: string, params: GenerateContentRequest, chromeAdapter?: ChromeAdapter, requestOptions?: RequestOptions): Promise<GenerateContentStreamResult>; +export declare function generateContent(apiSettings: ApiSettings, model: string, params: GenerateContentRequest, chromeAdapter?: ChromeAdapter, requestOptions?: RequestOptions): Promise<GenerateContentResult>; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/live-session-helpers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/live-session-helpers.d.ts new file mode 100644 index 0000000..c6f8dea --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/live-session-helpers.d.ts @@ -0,0 +1,154 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { FunctionCall, FunctionResponse } from '../types'; +import { LiveSession } from './live-session'; +/** + * A controller for managing an active audio conversation. + * + * @beta + */ +export interface AudioConversationController { + /** + * Stops the audio conversation, closes the microphone connection, and + * cleans up resources. Returns a promise that resolves when cleanup is complete. + */ + stop: () => Promise<void>; +} +/** + * Options for {@link startAudioConversation}. + * + * @beta + */ +export interface StartAudioConversationOptions { + /** + * An async handler that is called when the model requests a function to be executed. + * The handler should perform the function call and return the result as a `Part`, + * which will then be sent back to the model. + */ + functionCallingHandler?: (functionCalls: FunctionCall[]) => Promise<FunctionResponse>; +} +/** + * Dependencies needed by the {@link AudioConversationRunner}. + * + * @internal + */ +interface RunnerDependencies { + audioContext: AudioContext; + mediaStream: MediaStream; + sourceNode: MediaStreamAudioSourceNode; + workletNode: AudioWorkletNode; +} +/** + * Encapsulates the core logic of an audio conversation. + * + * @internal + */ +export declare class AudioConversationRunner { + private readonly liveSession; + private readonly options; + private readonly deps; + /** A flag to indicate if the conversation has been stopped. */ + private isStopped; + /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */ + private readonly stopDeferred; + /** A promise that tracks the lifecycle of the main `runReceiveLoop`. */ + private readonly receiveLoopPromise; + /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */ + private readonly playbackQueue; + /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */ + private scheduledSources; + /** A high-precision timeline pointer for scheduling gapless audio playback. */ + private nextStartTime; + /** A mutex to prevent the playback processing loop from running multiple times concurrently. */ + private isPlaybackLoopRunning; + constructor(liveSession: LiveSession, options: StartAudioConversationOptions, deps: RunnerDependencies); + /** + * Stops the conversation and unblocks the main receive loop. + */ + stop(): Promise<void>; + /** + * Cleans up all audio resources (nodes, stream tracks, context) and marks the + * session as no longer in a conversation. + */ + private cleanup; + /** + * Adds audio data to the queue and ensures the playback loop is running. + */ + private enqueueAndPlay; + /** + * Stops all current and pending audio playback and clears the queue. This is + * called when the server indicates the model's speech was interrupted with + * `LiveServerContent.modelTurn.interrupted`. + */ + private interruptPlayback; + /** + * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence. + */ + private processPlaybackQueue; + /** + * The main loop that listens for and processes messages from the server. + */ + private runReceiveLoop; +} +/** + * Starts a real-time, bidirectional audio conversation with the model. This helper function manages + * the complexities of microphone access, audio recording, playback, and interruptions. + * + * @remarks Important: This function must be called in response to a user gesture + * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}. + * + * @example + * ```javascript + * const liveSession = await model.connect(); + * let conversationController; + * + * // This function must be called from within a click handler. + * async function startConversation() { + * try { + * conversationController = await startAudioConversation(liveSession); + * } catch (e) { + * // Handle AI-specific errors + * if (e instanceof AIError) { + * console.error("AI Error:", e.message); + * } + * // Handle microphone permission and hardware errors + * else if (e instanceof DOMException) { + * console.error("Microphone Error:", e.message); + * } + * // Handle other unexpected errors + * else { + * console.error("An unexpected error occurred:", e); + * } + * } + * } + * + * // Later, to stop the conversation: + * // if (conversationController) { + * // await conversationController.stop(); + * // } + * ``` + * + * @param liveSession - An active {@link LiveSession} instance. + * @param options - Configuration options for the audio conversation. + * @returns A `Promise` that resolves with an {@link AudioConversationController}. + * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`). + * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions. + * + * @beta + */ +export declare function startAudioConversation(liveSession: LiveSession, options?: StartAudioConversationOptions): Promise<AudioConversationController>; +export {}; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/live-session.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/live-session.d.ts new file mode 100644 index 0000000..92ecbe5 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/methods/live-session.d.ts @@ -0,0 +1,154 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { FunctionResponse, GenerativeContentBlob, LiveServerContent, LiveServerToolCall, LiveServerToolCallCancellation, Part } from '../public-types'; +import { WebSocketHandler } from '../websocket'; +/** + * Represents an active, real-time, bidirectional conversation with the model. + * + * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}. + * + * @beta + */ +export declare class LiveSession { + private webSocketHandler; + private serverMessages; + /** + * Indicates whether this Live session is closed. + * + * @beta + */ + isClosed: boolean; + /** + * Indicates whether this Live session is being controlled by an `AudioConversationController`. + * + * @beta + */ + inConversation: boolean; + /** + * @internal + */ + constructor(webSocketHandler: WebSocketHandler, serverMessages: AsyncGenerator<unknown>); + /** + * Sends content to the server. + * + * @param request - The message to send to the model. + * @param turnComplete - Indicates if the turn is complete. Defaults to false. + * @throws If this session has been closed. + * + * @beta + */ + send(request: string | Array<string | Part>, turnComplete?: boolean): Promise<void>; + /** + * Sends text to the server in realtime. + * + * @example + * ```javascript + * liveSession.sendTextRealtime("Hello, how are you?"); + * ``` + * + * @param text - The text data to send. + * @throws If this session has been closed. + * + * @beta + */ + sendTextRealtime(text: string): Promise<void>; + /** + * Sends audio data to the server in realtime. + * + * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz + * little-endian. + * + * @example + * ```javascript + * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian. + * const blob = { mimeType: "audio/pcm", data: pcmData }; + * liveSession.sendAudioRealtime(blob); + * ``` + * + * @param blob - The base64-encoded PCM data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + sendAudioRealtime(blob: GenerativeContentBlob): Promise<void>; + /** + * Sends video data to the server in realtime. + * + * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It + * is recommended to set `mimeType` to `image/jpeg`. + * + * @example + * ```javascript + * // const videoFrame = ... base64-encoded JPEG data + * const blob = { mimeType: "image/jpeg", data: videoFrame }; + * liveSession.sendVideoRealtime(blob); + * ``` + * @param blob - The base64-encoded video data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + sendVideoRealtime(blob: GenerativeContentBlob): Promise<void>; + /** + * Sends function responses to the server. + * + * @param functionResponses - The function responses to send. + * @throws If this session has been closed. + * + * @beta + */ + sendFunctionResponses(functionResponses: FunctionResponse[]): Promise<void>; + /** + * Yields messages received from the server. + * This can only be used by one consumer at a time. + * + * @returns An `AsyncGenerator` that yields server messages as they arrive. + * @throws If the session is already closed, or if we receive a response that we don't support. + * + * @beta + */ + receive(): AsyncGenerator<LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation>; + /** + * Closes this session. + * All methods on this session will throw an error once this resolves. + * + * @beta + */ + close(): Promise<void>; + /** + * Sends realtime input to the server. + * + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * @param mediaChunks - The media chunks to send. + * @throws If this session has been closed. + * + * @beta + */ + sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void>; + /** + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * Sends a stream of {@link GenerativeContentBlob}. + * + * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send. + * @throws If this session has been closed. + * + * @beta + */ + sendMediaStream(mediaChunkStream: ReadableStream<GenerativeContentBlob>): Promise<void>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/ai-model.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/ai-model.d.ts new file mode 100644 index 0000000..2d5462b --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/ai-model.d.ts @@ -0,0 +1,72 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { AI, BackendType } from '../public-types'; +import { ApiSettings } from '../types/internal'; +/** + * Base class for Firebase AI model APIs. + * + * Instances of this class are associated with a specific Firebase AI {@link Backend} + * and provide methods for interacting with the configured generative model. + * + * @public + */ +export declare abstract class AIModel { + /** + * The fully qualified model resource name to use for generating images + * (for example, `publishers/google/models/imagen-3.0-generate-002`). + */ + readonly model: string; + /** + * @internal + */ + _apiSettings: ApiSettings; + /** + * Constructs a new instance of the {@link AIModel} class. + * + * This constructor should only be called from subclasses that provide + * a model API. + * + * @param ai - an {@link AI} instance. + * @param modelName - The name of the model being used. It can be in one of the following formats: + * - `my-model` (short name, will resolve to `publishers/google/models/my-model`) + * - `models/my-model` (will resolve to `publishers/google/models/my-model`) + * - `publishers/my-publisher/models/my-model` (fully qualified model name) + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @internal + */ + protected constructor(ai: AI, modelName: string); + /** + * Normalizes the given model name to a fully qualified model resource name. + * + * @param modelName - The model name to normalize. + * @returns The fully qualified model resource name. + * + * @internal + */ + static normalizeModelName(modelName: string, backendType: BackendType): string; + /** + * @internal + */ + private static normalizeGoogleAIModelName; + /** + * @internal + */ + private static normalizeVertexAIModelName; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/generative-model.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/generative-model.d.ts new file mode 100644 index 0000000..87fd067 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/generative-model.d.ts @@ -0,0 +1,56 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Content, CountTokensRequest, CountTokensResponse, GenerateContentRequest, GenerateContentResult, GenerateContentStreamResult, GenerationConfig, ModelParams, Part, RequestOptions, SafetySetting, StartChatParams, Tool, ToolConfig } from '../types'; +import { ChatSession } from '../methods/chat-session'; +import { AI } from '../public-types'; +import { AIModel } from './ai-model'; +import { ChromeAdapter } from '../types/chrome-adapter'; +/** + * Class for generative model APIs. + * @public + */ +export declare class GenerativeModel extends AIModel { + private chromeAdapter?; + generationConfig: GenerationConfig; + safetySettings: SafetySetting[]; + requestOptions?: RequestOptions; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: Content; + constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions, chromeAdapter?: ChromeAdapter | undefined); + /** + * Makes a single non-streaming call to the model + * and returns an object containing a single {@link GenerateContentResponse}. + */ + generateContent(request: GenerateContentRequest | string | Array<string | Part>): Promise<GenerateContentResult>; + /** + * Makes a single streaming call to the model + * and returns an object containing an iterable stream that iterates + * over all chunks in the streaming response as well as + * a promise that returns the final aggregated response. + */ + generateContentStream(request: GenerateContentRequest | string | Array<string | Part>): Promise<GenerateContentStreamResult>; + /** + * Gets a new {@link ChatSession} instance which can be used for + * multi-turn chats. + */ + startChat(startChatParams?: StartChatParams): ChatSession; + /** + * Counts the tokens in the provided request. + */ + countTokens(request: CountTokensRequest | string | Array<string | Part>): Promise<CountTokensResponse>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/imagen-model.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/imagen-model.d.ts new file mode 100644 index 0000000..699f2a2 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/imagen-model.d.ts @@ -0,0 +1,102 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { AI } from '../public-types'; +import { ImagenGCSImage, ImagenGenerationConfig, ImagenInlineImage, RequestOptions, ImagenModelParams, ImagenGenerationResponse, ImagenSafetySettings } from '../types'; +import { AIModel } from './ai-model'; +/** + * Class for Imagen model APIs. + * + * This class provides methods for generating images using the Imagen model. + * + * @example + * ```javascript + * const imagen = new ImagenModel( + * ai, + * { + * model: 'imagen-3.0-generate-002' + * } + * ); + * + * const response = await imagen.generateImages('A photo of a cat'); + * if (response.images.length > 0) { + * console.log(response.images[0].bytesBase64Encoded); + * } + * ``` + * + * @public + */ +export declare class ImagenModel extends AIModel { + requestOptions?: RequestOptions | undefined; + /** + * The Imagen generation configuration. + */ + generationConfig?: ImagenGenerationConfig; + /** + * Safety settings for filtering inappropriate content. + */ + safetySettings?: ImagenSafetySettings; + /** + * Constructs a new instance of the {@link ImagenModel} class. + * + * @param ai - an {@link AI} instance. + * @param modelParams - Parameters to use when making requests to Imagen. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + */ + constructor(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined); + /** + * Generates images using the Imagen model and returns them as + * base64-encoded strings. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the generated images. + * + * @throws If the request to generate images fails. This happens if the + * prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + * + * @public + */ + generateImages(prompt: string): Promise<ImagenGenerationResponse<ImagenInlineImage>>; + /** + * Generates images to Cloud Storage for Firebase using the Imagen model. + * + * @internal This method is temporarily internal. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket. + * This should be a directory. For example, `gs://my-bucket/my-directory/`. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the URLs of the generated images. + * + * @throws If the request fails to generate images fails. This happens if + * the prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + */ + generateImagesGCS(prompt: string, gcsURI: string): Promise<ImagenGenerationResponse<ImagenGCSImage>>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/index.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/index.d.ts new file mode 100644 index 0000000..3d79da7 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/index.d.ts @@ -0,0 +1,20 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +export * from './ai-model'; +export * from './generative-model'; +export * from './live-generative-model'; +export * from './imagen-model'; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/live-generative-model.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/live-generative-model.d.ts new file mode 100644 index 0000000..cf0b896 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/models/live-generative-model.d.ts @@ -0,0 +1,55 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { AIModel } from './ai-model'; +import { LiveSession } from '../methods/live-session'; +import { AI, Content, LiveGenerationConfig, LiveModelParams, Tool, ToolConfig } from '../public-types'; +import { WebSocketHandler } from '../websocket'; +/** + * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal + * interactions with Gemini. + * + * This class should only be instantiated with {@link getLiveGenerativeModel}. + * + * @beta + */ +export declare class LiveGenerativeModel extends AIModel { + /** + * @internal + */ + private _webSocketHandler; + generationConfig: LiveGenerationConfig; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: Content; + /** + * @internal + */ + constructor(ai: AI, modelParams: LiveModelParams, + /** + * @internal + */ + _webSocketHandler: WebSocketHandler); + /** + * Starts a {@link LiveSession}. + * + * @returns A {@link LiveSession}. + * @throws If the connection failed to be established with the server. + * + * @beta + */ + connect(): Promise<LiveSession>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/public-types.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/public-types.d.ts new file mode 100644 index 0000000..21620ed --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/public-types.d.ts @@ -0,0 +1,97 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { FirebaseApp } from '@firebase/app'; +import { Backend } from './backend'; +export * from './types'; +/** + * An instance of the Firebase AI SDK. + * + * Do not create this instance directly. Instead, use {@link getAI | getAI()}. + * + * @public + */ +export interface AI { + /** + * The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with. + */ + app: FirebaseApp; + /** + * A {@link Backend} instance that specifies the configuration for the target backend, + * either the Gemini Developer API (using {@link GoogleAIBackend}) or the + * Vertex AI Gemini API (using {@link VertexAIBackend}). + */ + backend: Backend; + /** + * Options applied to this {@link AI} instance. + */ + options?: AIOptions; + /** + * @deprecated use `AI.backend.location` instead. + * + * The location configured for this AI service instance, relevant for Vertex AI backends. + */ + location: string; +} +/** + * An enum-like object containing constants that represent the supported backends + * for the Firebase AI SDK. + * This determines which backend service (Vertex AI Gemini API or Gemini Developer API) + * the SDK will communicate with. + * + * These values are assigned to the `backendType` property within the specific backend + * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify + * which service to target. + * + * @public + */ +export declare const BackendType: { + /** + * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud. + * Use this constant when creating a {@link VertexAIBackend} configuration. + */ + readonly VERTEX_AI: "VERTEX_AI"; + /** + * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}). + * Use this constant when creating a {@link GoogleAIBackend} configuration. + */ + readonly GOOGLE_AI: "GOOGLE_AI"; +}; +/** + * Type alias representing valid backend types. + * It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`. + * + * @public + */ +export type BackendType = (typeof BackendType)[keyof typeof BackendType]; +/** + * Options for initializing the AI service using {@link getAI | getAI()}. + * This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API) + * and configuring its specific options (like location for Vertex AI). + * + * @public + */ +export interface AIOptions { + /** + * The backend configuration to use for the AI service instance. + * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}). + */ + backend?: Backend; + /** + * Whether to use App Check limited use tokens. Defaults to false. + */ + useLimitedUseAppCheckTokens?: boolean; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/hybrid-helpers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/hybrid-helpers.d.ts new file mode 100644 index 0000000..b52e6bf --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/hybrid-helpers.d.ts @@ -0,0 +1,33 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { GenerateContentRequest, ChromeAdapter, InferenceSource } from '../types'; +interface CallResult<Response> { + response: Response; + inferenceSource: InferenceSource; +} +/** + * Dispatches a request to the appropriate backend (on-device or in-cloud) + * based on the inference mode. + * + * @param request - The request to be sent. + * @param chromeAdapter - The on-device model adapter. + * @param onDeviceCall - The function to call for on-device inference. + * @param inCloudCall - The function to call for in-cloud inference. + * @returns The response from the backend. + */ +export declare function callCloudOrDevice<Response>(request: GenerateContentRequest, chromeAdapter: ChromeAdapter | undefined, onDeviceCall: () => Promise<Response>, inCloudCall: () => Promise<Response>): Promise<CallResult<Response>>; +export {}; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/imagen-image-format.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/imagen-image-format.d.ts new file mode 100644 index 0000000..2f3eddb --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/imagen-image-format.d.ts @@ -0,0 +1,61 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Defines the image format for images generated by Imagen. + * + * Use this class to specify the desired format (JPEG or PNG) and compression quality + * for images generated by Imagen. This is typically included as part of + * {@link ImagenModelParams}. + * + * @example + * ```javascript + * const imagenModelParams = { + * // ... other ImagenModelParams + * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75. + * } + * ``` + * + * @public + */ +export declare class ImagenImageFormat { + /** + * The MIME type. + */ + mimeType: string; + /** + * The level of compression (a number between 0 and 100). + */ + compressionQuality?: number; + private constructor(); + /** + * Creates an {@link ImagenImageFormat} for a JPEG image. + * + * @param compressionQuality - The level of compression (a number between 0 and 100). + * @returns An {@link ImagenImageFormat} object for a JPEG image. + * + * @public + */ + static jpeg(compressionQuality?: number): ImagenImageFormat; + /** + * Creates an {@link ImagenImageFormat} for a PNG image. + * + * @returns An {@link ImagenImageFormat} object for a PNG image. + * + * @public + */ + static png(): ImagenImageFormat; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/request-helpers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/request-helpers.d.ts new file mode 100644 index 0000000..fa79626 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/request-helpers.d.ts @@ -0,0 +1,28 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Content, GenerateContentRequest, Part } from '../types'; +import { ImagenGenerationParams, PredictRequestBody } from '../types/internal'; +export declare function formatSystemInstruction(input?: string | Part | Content): Content | undefined; +export declare function formatNewContent(request: string | Array<string | Part>): Content; +export declare function formatGenerateContentInput(params: GenerateContentRequest | string | Array<string | Part>): GenerateContentRequest; +/** + * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format + * that is expected from the REST API. + * + * @internal + */ +export declare function createPredictRequestBody(prompt: string, { gcsURI, imageFormat, addWatermark, numberOfImages, negativePrompt, aspectRatio, safetyFilterLevel, personFilterLevel }: ImagenGenerationParams): PredictRequestBody; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/request.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/request.d.ts new file mode 100644 index 0000000..b0aed14 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/request.d.ts @@ -0,0 +1,49 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { RequestOptions } from '../types'; +import { ApiSettings } from '../types/internal'; +export declare enum Task { + GENERATE_CONTENT = "generateContent", + STREAM_GENERATE_CONTENT = "streamGenerateContent", + COUNT_TOKENS = "countTokens", + PREDICT = "predict" +} +export declare class RequestUrl { + model: string; + task: Task; + apiSettings: ApiSettings; + stream: boolean; + requestOptions?: RequestOptions | undefined; + constructor(model: string, task: Task, apiSettings: ApiSettings, stream: boolean, requestOptions?: RequestOptions | undefined); + toString(): string; + private get baseUrl(); + private get apiVersion(); + private get modelPath(); + private get queryParams(); +} +export declare class WebSocketUrl { + apiSettings: ApiSettings; + constructor(apiSettings: ApiSettings); + toString(): string; + private get pathname(); +} +export declare function getHeaders(url: RequestUrl): Promise<Headers>; +export declare function constructRequest(model: string, task: Task, apiSettings: ApiSettings, stream: boolean, body: string, requestOptions?: RequestOptions): Promise<{ + url: string; + fetchOptions: RequestInit; +}>; +export declare function makeRequest(model: string, task: Task, apiSettings: ApiSettings, stream: boolean, body: string, requestOptions?: RequestOptions): Promise<Response>; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/response-helpers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/response-helpers.d.ts new file mode 100644 index 0000000..d0aded1 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/response-helpers.d.ts @@ -0,0 +1,57 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { EnhancedGenerateContentResponse, FunctionCall, GenerateContentResponse, ImagenGCSImage, ImagenInlineImage, InlineDataPart, Part, InferenceSource } from '../types'; +/** + * Creates an EnhancedGenerateContentResponse object that has helper functions and + * other modifications that improve usability. + */ +export declare function createEnhancedContentResponse(response: GenerateContentResponse, inferenceSource?: InferenceSource): EnhancedGenerateContentResponse; +/** + * Adds convenience helper methods to a response object, including stream + * chunks (as long as each chunk is a complete GenerateContentResponse JSON). + */ +export declare function addHelpers(response: GenerateContentResponse): EnhancedGenerateContentResponse; +/** + * Returns all text from the first candidate's parts, filtering by whether + * `partFilter()` returns true. + * + * @param response - The `GenerateContentResponse` from which to extract text. + * @param partFilter - Only return `Part`s for which this returns true + */ +export declare function getText(response: GenerateContentResponse, partFilter: (part: Part) => boolean): string; +/** + * Returns every {@link FunctionCall} associated with first candidate. + */ +export declare function getFunctionCalls(response: GenerateContentResponse): FunctionCall[] | undefined; +/** + * Returns every {@link InlineDataPart} in the first candidate if present. + * + * @internal + */ +export declare function getInlineDataParts(response: GenerateContentResponse): InlineDataPart[] | undefined; +export declare function formatBlockErrorMessage(response: GenerateContentResponse): string; +/** + * Convert a generic successful fetch response body to an Imagen response object + * that can be returned to the user. This converts the REST APIs response format to our + * APIs representation of a response. + * + * @internal + */ +export declare function handlePredictResponse<T extends ImagenInlineImage | ImagenGCSImage>(response: Response): Promise<{ + images: T[]; + filteredReason?: string; +}>; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/schema-builder.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/schema-builder.d.ts new file mode 100644 index 0000000..e23e74f --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/schema-builder.d.ts @@ -0,0 +1,170 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { SchemaInterface, SchemaType, SchemaParams, SchemaRequest } from '../types/schema'; +/** + * Parent class encompassing all Schema types, with static methods that + * allow building specific Schema types. This class can be converted with + * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints. + * (This string conversion is automatically done when calling SDK methods.) + * @public + */ +export declare abstract class Schema implements SchemaInterface { + /** + * Optional. The type of the property. + * This can only be undefined when using `anyOf` schemas, which do not have an + * explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification}. + */ + type?: SchemaType; + /** Optional. The format of the property. + * Supported formats:<br/> + * <ul> + * <li>for NUMBER type: "float", "double"</li> + * <li>for INTEGER type: "int32", "int64"</li> + * <li>for STRING type: "email", "byte", etc</li> + * </ul> + */ + format?: string; + /** Optional. The description of the property. */ + description?: string; + /** Optional. The items of the property. */ + items?: SchemaInterface; + /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + minItems?: number; + /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + maxItems?: number; + /** Optional. Whether the property is nullable. Defaults to false. */ + nullable: boolean; + /** Optional. The example of the property. */ + example?: unknown; + /** + * Allows user to add other schema properties that have not yet + * been officially added to the SDK. + */ + [key: string]: unknown; + constructor(schemaParams: SchemaInterface); + /** + * Defines how this Schema should be serialized as JSON. + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior + * @internal + */ + toJSON(): SchemaRequest; + static array(arrayParams: SchemaParams & { + items: Schema; + }): ArraySchema; + static object(objectParams: SchemaParams & { + properties: { + [k: string]: Schema; + }; + optionalProperties?: string[]; + }): ObjectSchema; + static string(stringParams?: SchemaParams): StringSchema; + static enumString(stringParams: SchemaParams & { + enum: string[]; + }): StringSchema; + static integer(integerParams?: SchemaParams): IntegerSchema; + static number(numberParams?: SchemaParams): NumberSchema; + static boolean(booleanParams?: SchemaParams): BooleanSchema; + static anyOf(anyOfParams: SchemaParams & { + anyOf: TypedSchema[]; + }): AnyOfSchema; +} +/** + * A type that includes all specific Schema types. + * @public + */ +export type TypedSchema = IntegerSchema | NumberSchema | StringSchema | BooleanSchema | ObjectSchema | ArraySchema | AnyOfSchema; +/** + * Schema class for "integer" types. + * @public + */ +export declare class IntegerSchema extends Schema { + constructor(schemaParams?: SchemaParams); +} +/** + * Schema class for "number" types. + * @public + */ +export declare class NumberSchema extends Schema { + constructor(schemaParams?: SchemaParams); +} +/** + * Schema class for "boolean" types. + * @public + */ +export declare class BooleanSchema extends Schema { + constructor(schemaParams?: SchemaParams); +} +/** + * Schema class for "string" types. Can be used with or without + * enum values. + * @public + */ +export declare class StringSchema extends Schema { + enum?: string[]; + constructor(schemaParams?: SchemaParams, enumValues?: string[]); + /** + * @internal + */ + toJSON(): SchemaRequest; +} +/** + * Schema class for "array" types. + * The `items` param should refer to the type of item that can be a member + * of the array. + * @public + */ +export declare class ArraySchema extends Schema { + items: TypedSchema; + constructor(schemaParams: SchemaParams, items: TypedSchema); + /** + * @internal + */ + toJSON(): SchemaRequest; +} +/** + * Schema class for "object" types. + * The `properties` param must be a map of `Schema` objects. + * @public + */ +export declare class ObjectSchema extends Schema { + properties: { + [k: string]: TypedSchema; + }; + optionalProperties: string[]; + constructor(schemaParams: SchemaParams, properties: { + [k: string]: TypedSchema; + }, optionalProperties?: string[]); + /** + * @internal + */ + toJSON(): SchemaRequest; +} +/** + * Schema class representing a value that can conform to any of the provided sub-schemas. This is + * useful when a field can accept multiple distinct types or structures. + * @public + */ +export declare class AnyOfSchema extends Schema { + anyOf: TypedSchema[]; + constructor(schemaParams: SchemaParams & { + anyOf: TypedSchema[]; + }); + /** + * @internal + */ + toJSON(): SchemaRequest; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/stream-reader.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/stream-reader.d.ts new file mode 100644 index 0000000..4ffb0da --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/requests/stream-reader.d.ts @@ -0,0 +1,39 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { GenerateContentResponse, GenerateContentStreamResult } from '../types'; +import { ApiSettings } from '../types/internal'; +import { InferenceSource } from '../public-types'; +/** + * Process a response.body stream from the backend and return an + * iterator that provides one complete GenerateContentResponse at a time + * and a promise that resolves with a single aggregated + * GenerateContentResponse. + * + * @param response - Response from a fetch call + */ +export declare function processStream(response: Response, apiSettings: ApiSettings, inferenceSource?: InferenceSource): GenerateContentStreamResult; +/** + * Reads a raw stream from the fetch response and join incomplete + * chunks, returning a new stream that provides a single complete + * GenerateContentResponse in each iteration. + */ +export declare function getResponseStream<T>(inputStream: ReadableStream<string>): ReadableStream<T>; +/** + * Aggregates an array of `GenerateContentResponse`s into a single + * GenerateContentResponse. + */ +export declare function aggregateResponses(responses: GenerateContentResponse[]): GenerateContentResponse; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/service.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/service.d.ts new file mode 100644 index 0000000..b0da890 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/service.d.ts @@ -0,0 +1,36 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { FirebaseApp, _FirebaseService } from '@firebase/app'; +import { AI, AIOptions, InferenceMode, OnDeviceParams } from './public-types'; +import { AppCheckInternalComponentName, FirebaseAppCheckInternal } from '@firebase/app-check-interop-types'; +import { Provider } from '@firebase/component'; +import { FirebaseAuthInternal, FirebaseAuthInternalName } from '@firebase/auth-interop-types'; +import { Backend } from './backend'; +import { ChromeAdapterImpl } from './methods/chrome-adapter'; +export declare class AIService implements AI, _FirebaseService { + app: FirebaseApp; + backend: Backend; + chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined; + auth: FirebaseAuthInternal | null; + appCheck: FirebaseAppCheckInternal | null; + _options?: Omit<AIOptions, 'backend'>; + location: string; + constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>, chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined); + _delete(): Promise<void>; + set options(optionsToSet: AIOptions); + get options(): AIOptions | undefined; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/chrome-adapter.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/chrome-adapter.d.ts new file mode 100644 index 0000000..6092353 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/chrome-adapter.d.ts @@ -0,0 +1,56 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { CountTokensRequest, GenerateContentRequest } from './requests'; +/** + * Defines an inference "backend" that uses Chrome's on-device model, + * and encapsulates logic for detecting when on-device inference is + * possible. + * + * These methods should not be called directly by the user. + * + * @beta + */ +export interface ChromeAdapter { + /** + * Checks if the on-device model is capable of handling a given + * request. + * @param request - A potential request to be passed to the model. + */ + isAvailable(request: GenerateContentRequest): Promise<boolean>; + /** + * Generates content using on-device inference. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContent} for generating + * content using in-cloud inference. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + */ + generateContent(request: GenerateContentRequest): Promise<Response>; + /** + * Generates a content stream using on-device inference. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContentStream} for generating + * a content stream using in-cloud inference. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + */ + generateContentStream(request: GenerateContentRequest): Promise<Response>; + /** + * @internal + */ + countTokens(request: CountTokensRequest): Promise<Response>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/content.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/content.d.ts new file mode 100644 index 0000000..a760547 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/content.d.ts @@ -0,0 +1,265 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Language, Outcome, Role } from './enums'; +/** + * Content type for both prompts and response candidates. + * @public + */ +export interface Content { + role: Role; + parts: Part[]; +} +/** + * Content part - includes text, image/video, or function call/response + * part types. + * @public + */ +export type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart | ExecutableCodePart | CodeExecutionResultPart; +/** + * Content part interface if the part represents a text string. + * @public + */ +export interface TextPart { + text: string; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: string; + executableCode?: never; + codeExecutionResult?: never; +} +/** + * Content part interface if the part represents an image. + * @public + */ +export interface InlineDataPart { + text?: never; + inlineData: GenerativeContentBlob; + functionCall?: never; + functionResponse?: never; + /** + * Applicable if `inlineData` is a video. + */ + videoMetadata?: VideoMetadata; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: never; +} +/** + * Describes the input video content. + * @public + */ +export interface VideoMetadata { + /** + * The start offset of the video in + * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format. + */ + startOffset: string; + /** + * The end offset of the video in + * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format. + */ + endOffset: string; +} +/** + * Content part interface if the part represents a {@link FunctionCall}. + * @public + */ +export interface FunctionCallPart { + text?: never; + inlineData?: never; + functionCall: FunctionCall; + functionResponse?: never; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: never; +} +/** + * Content part interface if the part represents {@link FunctionResponse}. + * @public + */ +export interface FunctionResponsePart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse: FunctionResponse; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: never; +} +/** + * Content part interface if the part represents {@link FileData} + * @public + */ +export interface FileDataPart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + fileData: FileData; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: never; +} +/** + * Represents the code that is executed by the model. + * + * @beta + */ +export interface ExecutableCodePart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + fileData: never; + thought?: never; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: ExecutableCode; + codeExecutionResult?: never; +} +/** + * Represents the code execution result from the model. + * + * @beta + */ +export interface CodeExecutionResultPart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + fileData: never; + thought?: never; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: CodeExecutionResult; +} +/** + * An interface for executable code returned by the model. + * + * @beta + */ +export interface ExecutableCode { + /** + * The programming language of the code. + */ + language?: Language; + /** + * The source code to be executed. + */ + code?: string; +} +/** + * The results of code execution run by the model. + * + * @beta + */ +export interface CodeExecutionResult { + /** + * The result of the code execution. + */ + outcome?: Outcome; + /** + * The output from the code execution, or an error message + * if it failed. + */ + output?: string; +} +/** + * A predicted {@link FunctionCall} returned from the model + * that contains a string representing the {@link FunctionDeclaration.name} + * and a structured JSON object containing the parameters and their values. + * @public + */ +export interface FunctionCall { + /** + * The id of the function call. This must be sent back in the associated {@link FunctionResponse}. + * + * + * @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be + * `undefined`. + */ + id?: string; + name: string; + args: object; +} +/** + * The result output from a {@link FunctionCall} that contains a string + * representing the {@link FunctionDeclaration.name} + * and a structured JSON object containing any output + * from the function is used as context to the model. + * This should contain the result of a {@link FunctionCall} + * made based on model prediction. + * @public + */ +export interface FunctionResponse { + /** + * The id of the {@link FunctionCall}. + * + * @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be + * `undefined`. + */ + id?: string; + name: string; + response: object; +} +/** + * Interface for sending an image. + * @public + */ +export interface GenerativeContentBlob { + mimeType: string; + /** + * Image as a base64 string. + */ + data: string; +} +/** + * Data pointing to a file uploaded on Google Cloud Storage. + * @public + */ +export interface FileData { + mimeType: string; + fileUri: string; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/enums.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/enums.d.ts new file mode 100644 index 0000000..170a299 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/enums.d.ts @@ -0,0 +1,398 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Role is the producer of the content. + * @public + */ +export type Role = (typeof POSSIBLE_ROLES)[number]; +/** + * Possible roles. + * @public + */ +export declare const POSSIBLE_ROLES: readonly ["user", "model", "function", "system"]; +/** + * Harm categories that would cause prompts or candidates to be blocked. + * @public + */ +export declare const HarmCategory: { + readonly HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH"; + readonly HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT"; + readonly HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT"; + readonly HARM_CATEGORY_DANGEROUS_CONTENT: "HARM_CATEGORY_DANGEROUS_CONTENT"; +}; +/** + * Harm categories that would cause prompts or candidates to be blocked. + * @public + */ +export type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory]; +/** + * Threshold above which a prompt or candidate will be blocked. + * @public + */ +export declare const HarmBlockThreshold: { + /** + * Content with `NEGLIGIBLE` will be allowed. + */ + readonly BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE"; + /** + * Content with `NEGLIGIBLE` and `LOW` will be allowed. + */ + readonly BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE"; + /** + * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed. + */ + readonly BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH"; + /** + * All content will be allowed. + */ + readonly BLOCK_NONE: "BLOCK_NONE"; + /** + * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding + * to the {@link (HarmCategory:type)} will not be present in the response. + */ + readonly OFF: "OFF"; +}; +/** + * Threshold above which a prompt or candidate will be blocked. + * @public + */ +export type HarmBlockThreshold = (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold]; +/** + * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}). + * + * @public + */ +export declare const HarmBlockMethod: { + /** + * The harm block method uses both probability and severity scores. + */ + readonly SEVERITY: "SEVERITY"; + /** + * The harm block method uses the probability score. + */ + readonly PROBABILITY: "PROBABILITY"; +}; +/** + * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}). + * + * @public + */ +export type HarmBlockMethod = (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod]; +/** + * Probability that a prompt or candidate matches a harm category. + * @public + */ +export declare const HarmProbability: { + /** + * Content has a negligible chance of being unsafe. + */ + readonly NEGLIGIBLE: "NEGLIGIBLE"; + /** + * Content has a low chance of being unsafe. + */ + readonly LOW: "LOW"; + /** + * Content has a medium chance of being unsafe. + */ + readonly MEDIUM: "MEDIUM"; + /** + * Content has a high chance of being unsafe. + */ + readonly HIGH: "HIGH"; +}; +/** + * Probability that a prompt or candidate matches a harm category. + * @public + */ +export type HarmProbability = (typeof HarmProbability)[keyof typeof HarmProbability]; +/** + * Harm severity levels. + * @public + */ +export declare const HarmSeverity: { + /** + * Negligible level of harm severity. + */ + readonly HARM_SEVERITY_NEGLIGIBLE: "HARM_SEVERITY_NEGLIGIBLE"; + /** + * Low level of harm severity. + */ + readonly HARM_SEVERITY_LOW: "HARM_SEVERITY_LOW"; + /** + * Medium level of harm severity. + */ + readonly HARM_SEVERITY_MEDIUM: "HARM_SEVERITY_MEDIUM"; + /** + * High level of harm severity. + */ + readonly HARM_SEVERITY_HIGH: "HARM_SEVERITY_HIGH"; + /** + * Harm severity is not supported. + * + * @remarks + * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback. + */ + readonly HARM_SEVERITY_UNSUPPORTED: "HARM_SEVERITY_UNSUPPORTED"; +}; +/** + * Harm severity levels. + * @public + */ +export type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity]; +/** + * Reason that a prompt was blocked. + * @public + */ +export declare const BlockReason: { + /** + * Content was blocked by safety settings. + */ + readonly SAFETY: "SAFETY"; + /** + * Content was blocked, but the reason is uncategorized. + */ + readonly OTHER: "OTHER"; + /** + * Content was blocked because it contained terms from the terminology blocklist. + */ + readonly BLOCKLIST: "BLOCKLIST"; + /** + * Content was blocked due to prohibited content. + */ + readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT"; +}; +/** + * Reason that a prompt was blocked. + * @public + */ +export type BlockReason = (typeof BlockReason)[keyof typeof BlockReason]; +/** + * Reason that a candidate finished. + * @public + */ +export declare const FinishReason: { + /** + * Natural stop point of the model or provided stop sequence. + */ + readonly STOP: "STOP"; + /** + * The maximum number of tokens as specified in the request was reached. + */ + readonly MAX_TOKENS: "MAX_TOKENS"; + /** + * The candidate content was flagged for safety reasons. + */ + readonly SAFETY: "SAFETY"; + /** + * The candidate content was flagged for recitation reasons. + */ + readonly RECITATION: "RECITATION"; + /** + * Unknown reason. + */ + readonly OTHER: "OTHER"; + /** + * The candidate content contained forbidden terms. + */ + readonly BLOCKLIST: "BLOCKLIST"; + /** + * The candidate content potentially contained prohibited content. + */ + readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT"; + /** + * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII). + */ + readonly SPII: "SPII"; + /** + * The function call generated by the model was invalid. + */ + readonly MALFORMED_FUNCTION_CALL: "MALFORMED_FUNCTION_CALL"; +}; +/** + * Reason that a candidate finished. + * @public + */ +export type FinishReason = (typeof FinishReason)[keyof typeof FinishReason]; +/** + * @public + */ +export declare const FunctionCallingMode: { + /** + * Default model behavior; model decides to predict either a function call + * or a natural language response. + */ + readonly AUTO: "AUTO"; + /** + * Model is constrained to always predicting a function call only. + * If `allowed_function_names` is set, the predicted function call will be + * limited to any one of `allowed_function_names`, else the predicted + * function call will be any one of the provided `function_declarations`. + */ + readonly ANY: "ANY"; + /** + * Model will not predict any function call. Model behavior is same as when + * not passing any function declarations. + */ + readonly NONE: "NONE"; +}; +/** + * @public + */ +export type FunctionCallingMode = (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode]; +/** + * Content part modality. + * @public + */ +export declare const Modality: { + /** + * Unspecified modality. + */ + readonly MODALITY_UNSPECIFIED: "MODALITY_UNSPECIFIED"; + /** + * Plain text. + */ + readonly TEXT: "TEXT"; + /** + * Image. + */ + readonly IMAGE: "IMAGE"; + /** + * Video. + */ + readonly VIDEO: "VIDEO"; + /** + * Audio. + */ + readonly AUDIO: "AUDIO"; + /** + * Document (for example, PDF). + */ + readonly DOCUMENT: "DOCUMENT"; +}; +/** + * Content part modality. + * @public + */ +export type Modality = (typeof Modality)[keyof typeof Modality]; +/** + * Generation modalities to be returned in generation responses. + * + * @beta + */ +export declare const ResponseModality: { + /** + * Text. + * @beta + */ + readonly TEXT: "TEXT"; + /** + * Image. + * @beta + */ + readonly IMAGE: "IMAGE"; + /** + * Audio. + * @beta + */ + readonly AUDIO: "AUDIO"; +}; +/** + * Generation modalities to be returned in generation responses. + * + * @beta + */ +export type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseModality]; +/** + * Determines whether inference happens on-device or in-cloud. + * + * @remarks + * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an + * on-device model. If on-device inference is not available, the SDK + * will fall back to using a cloud-hosted model. + * <br/> + * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an + * on-device model. The SDK will not fall back to a cloud-hosted model. + * If on-device inference is not available, inference methods will throw. + * <br/> + * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a + * cloud-hosted model. The SDK will not fall back to an on-device model. + * <br/> + * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a + * cloud-hosted model. If not available, the SDK will fall back to an + * on-device model. + * + * @beta + */ +export declare const InferenceMode: { + readonly PREFER_ON_DEVICE: "prefer_on_device"; + readonly ONLY_ON_DEVICE: "only_on_device"; + readonly ONLY_IN_CLOUD: "only_in_cloud"; + readonly PREFER_IN_CLOUD: "prefer_in_cloud"; +}; +/** + * Determines whether inference happens on-device or in-cloud. + * + * @beta + */ +export type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode]; +/** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ +export declare const InferenceSource: { + readonly ON_DEVICE: "on_device"; + readonly IN_CLOUD: "in_cloud"; +}; +/** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ +export type InferenceSource = (typeof InferenceSource)[keyof typeof InferenceSource]; +/** + * Represents the result of the code execution. + * + * @beta + */ +export declare const Outcome: { + UNSPECIFIED: string; + OK: string; + FAILED: string; + DEADLINE_EXCEEDED: string; +}; +/** + * Represents the result of the code execution. + * + * @beta + */ +export type Outcome = (typeof Outcome)[keyof typeof Outcome]; +/** + * The programming language of the code. + * + * @beta + */ +export declare const Language: { + UNSPECIFIED: string; + PYTHON: string; +}; +/** + * The programming language of the code. + * + * @beta + */ +export type Language = (typeof Language)[keyof typeof Language]; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/error.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/error.d.ts new file mode 100644 index 0000000..82e6bb4 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/error.d.ts @@ -0,0 +1,89 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { GenerateContentResponse } from './responses'; +/** + * Details object that may be included in an error response. + * + * @public + */ +export interface ErrorDetails { + '@type'?: string; + /** The reason for the error. */ + reason?: string; + /** The domain where the error occurred. */ + domain?: string; + /** Additional metadata about the error. */ + metadata?: Record<string, unknown>; + /** Any other relevant information about the error. */ + [key: string]: unknown; +} +/** + * Details object that contains data originating from a bad HTTP response. + * + * @public + */ +export interface CustomErrorData { + /** HTTP status code of the error response. */ + status?: number; + /** HTTP status text of the error response. */ + statusText?: string; + /** Response from a {@link GenerateContentRequest} */ + response?: GenerateContentResponse; + /** Optional additional details about the error. */ + errorDetails?: ErrorDetails[]; +} +/** + * Standardized error codes that {@link AIError} can have. + * + * @public + */ +export declare const AIErrorCode: { + /** A generic error occurred. */ + readonly ERROR: "error"; + /** An error occurred in a request. */ + readonly REQUEST_ERROR: "request-error"; + /** An error occurred in a response. */ + readonly RESPONSE_ERROR: "response-error"; + /** An error occurred while performing a fetch. */ + readonly FETCH_ERROR: "fetch-error"; + /** An error occurred because an operation was attempted on a closed session. */ + readonly SESSION_CLOSED: "session-closed"; + /** An error associated with a Content object. */ + readonly INVALID_CONTENT: "invalid-content"; + /** An error due to the Firebase API not being enabled in the Console. */ + readonly API_NOT_ENABLED: "api-not-enabled"; + /** An error due to invalid Schema input. */ + readonly INVALID_SCHEMA: "invalid-schema"; + /** An error occurred due to a missing Firebase API key. */ + readonly NO_API_KEY: "no-api-key"; + /** An error occurred due to a missing Firebase app ID. */ + readonly NO_APP_ID: "no-app-id"; + /** An error occurred due to a model name not being specified during initialization. */ + readonly NO_MODEL: "no-model"; + /** An error occurred due to a missing project ID. */ + readonly NO_PROJECT_ID: "no-project-id"; + /** An error occurred while parsing. */ + readonly PARSE_FAILED: "parse-failed"; + /** An error occurred due an attempt to use an unsupported feature. */ + readonly UNSUPPORTED: "unsupported"; +}; +/** + * Standardized error codes that {@link AIError} can have. + * + * @public + */ +export type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode]; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/googleai.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/googleai.d.ts new file mode 100644 index 0000000..7060f48 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/googleai.d.ts @@ -0,0 +1,57 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Tool, GenerationConfig, Citation, FinishReason, GroundingMetadata, PromptFeedback, SafetyRating, UsageMetadata, URLContextMetadata } from '../public-types'; +import { Content, Part } from './content'; +/** + * @internal + */ +export interface GoogleAICountTokensRequest { + generateContentRequest: { + model: string; + contents: Content[]; + systemInstruction?: string | Part | Content; + tools?: Tool[]; + generationConfig?: GenerationConfig; + }; +} +/** + * @internal + */ +export interface GoogleAIGenerateContentResponse { + candidates?: GoogleAIGenerateContentCandidate[]; + promptFeedback?: PromptFeedback; + usageMetadata?: UsageMetadata; +} +/** + * @internal + */ +export interface GoogleAIGenerateContentCandidate { + index: number; + content: Content; + finishReason?: FinishReason; + finishMessage?: string; + safetyRatings?: SafetyRating[]; + citationMetadata?: GoogleAICitationMetadata; + groundingMetadata?: GroundingMetadata; + urlContextMetadata?: URLContextMetadata; +} +/** + * @internal + */ +export interface GoogleAICitationMetadata { + citationSources: Citation[]; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/imagen/index.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/imagen/index.d.ts new file mode 100644 index 0000000..c56c5bc --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/imagen/index.d.ts @@ -0,0 +1,18 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +export * from './requests'; +export * from './responses'; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/imagen/internal.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/imagen/internal.d.ts new file mode 100644 index 0000000..7d5824d --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/imagen/internal.d.ts @@ -0,0 +1,134 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { ImagenGenerationConfig, ImagenSafetySettings } from './requests'; +/** + * A response from the REST API is expected to look like this in the success case: + * { + * "predictions": [ + * { + * "mimeType": "image/png", + * "bytesBase64Encoded": "iVBORw0KG..." + * }, + * { + * "mimeType": "image/png", + * "bytesBase64Encoded": "i4BOtw0KG..." + * } + * ] + * } + * + * And like this in the failure case: + * { + * "predictions": [ + * { + * "raiFilteredReason": "..." + * } + * ] + * } + * + * @internal + */ +export interface ImagenResponseInternal { + predictions?: Array<{ + /** + * The MIME type of the generated image. + */ + mimeType?: string; + /** + * The image data encoded as a base64 string. + */ + bytesBase64Encoded?: string; + /** + * The GCS URI where the image was stored. + */ + gcsUri?: string; + /** + * The reason why the image was filtered. + */ + raiFilteredReason?: string; + /** + * The safety attributes. + * + * This type is currently unused in the SDK. It is sent back because our requests set + * `includeSafetyAttributes`. This property is currently only used to avoid throwing an error + * when encountering this unsupported prediction type. + */ + safetyAttributes?: unknown; + }>; +} +/** + * The parameters to be sent in the request body of the HTTP call + * to the Vertex AI backend. + * + * We need a seperate internal-only interface for this because the REST + * API expects different parameter names than what we show to our users. + * + * Sample request body JSON: + * { + * "instances": [ + * { + * "prompt": "Portrait of a golden retriever on a beach." + * } + * ], + * "parameters": { + * "mimeType": "image/png", + * "safetyFilterLevel": "block_low_and_above", + * "personGeneration": "allow_all", + * "sampleCount": 2, + * "includeRaiReason": true, + * "includeSafetyAttributes": true, + * "aspectRatio": "9:16" + * } + * } + * + * See the Google Cloud docs: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api#-drest + * + * @internal + */ +export interface PredictRequestBody { + instances: [ + { + prompt: string; + } + ]; + parameters: { + sampleCount: number; + aspectRatio?: string; + outputOptions?: { + mimeType: string; + compressionQuality?: number; + }; + negativePrompt?: string; + storageUri?: string; + addWatermark?: boolean; + safetyFilterLevel?: string; + personGeneration?: string; + includeRaiReason: boolean; + includeSafetyAttributes: boolean; + }; +} +/** + * Contains all possible REST API paramaters that are provided by the caller. + * + * @internal + */ +export type ImagenGenerationParams = { + /** + * The Cloud Storage for Firebase bucket URI where the images should be stored + * (for GCS requests only). + */ + gcsURI?: string; +} & ImagenGenerationConfig & ImagenSafetySettings; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/imagen/requests.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/imagen/requests.d.ts new file mode 100644 index 0000000..31083fa --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/imagen/requests.d.ts @@ -0,0 +1,245 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { ImagenImageFormat } from '../../requests/imagen-image-format'; +/** + * Parameters for configuring an {@link ImagenModel}. + * + * @public + */ +export interface ImagenModelParams { + /** + * The Imagen model to use for generating images. + * For example: `imagen-3.0-generate-002`. + * + * Only Imagen 3 models (named `imagen-3.0-*`) are supported. + * + * See {@link https://firebase.google.com/docs/vertex-ai/models | model versions} + * for a full list of supported Imagen 3 models. + */ + model: string; + /** + * Configuration options for generating images with Imagen. + */ + generationConfig?: ImagenGenerationConfig; + /** + * Safety settings for filtering potentially inappropriate content. + */ + safetySettings?: ImagenSafetySettings; +} +/** + * Configuration options for generating images with Imagen. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for + * more details. + * + * @public + */ +export interface ImagenGenerationConfig { + /** + * A description of what should be omitted from the generated images. + * + * Support for negative prompts depends on the Imagen model. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details. + * + * This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions + * greater than `imagen-3.0-generate-002`. + */ + negativePrompt?: string; + /** + * The number of images to generate. The default value is 1. + * + * The number of sample images that may be generated in each request depends on the model + * (typically up to 4); see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">sampleCount</a> + * documentation for more details. + */ + numberOfImages?: number; + /** + * The aspect ratio of the generated images. The default value is square 1:1. + * Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)} + * for more details. + */ + aspectRatio?: ImagenAspectRatio; + /** + * The image format of the generated images. The default is PNG. + * + * See {@link ImagenImageFormat} for more details. + */ + imageFormat?: ImagenImageFormat; + /** + * Whether to add an invisible watermark to generated images. + * + * If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate + * that they are AI generated. If set to `false`, watermarking will be disabled. + * + * For Imagen 3 models, the default value is `true`; see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">addWatermark</a> + * documentation for more details. + * + * When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true, + * and cannot be turned off. + */ + addWatermark?: boolean; +} +/** + * A filter level controlling how aggressively to filter sensitive content. + * + * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI + * are assessed against a list of safety filters, which include 'harmful categories' (for example, + * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to + * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines} + * for more details. + * + * @public + */ +export declare const ImagenSafetyFilterLevel: { + /** + * The most aggressive filtering level; most strict blocking. + */ + readonly BLOCK_LOW_AND_ABOVE: "block_low_and_above"; + /** + * Blocks some sensitive prompts and responses. + */ + readonly BLOCK_MEDIUM_AND_ABOVE: "block_medium_and_above"; + /** + * Blocks few sensitive prompts and responses. + */ + readonly BLOCK_ONLY_HIGH: "block_only_high"; + /** + * The least aggressive filtering level; blocks very few sensitive prompts and responses. + * + * Access to this feature is restricted and may require your case to be reviewed and approved by + * Cloud support. + */ + readonly BLOCK_NONE: "block_none"; +}; +/** + * A filter level controlling how aggressively to filter sensitive content. + * + * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI + * are assessed against a list of safety filters, which include 'harmful categories' (for example, + * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to + * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines} + * for more details. + * + * @public + */ +export type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel]; +/** + * A filter level controlling whether generation of images containing people or faces is allowed. + * + * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a> + * documentation for more details. + * + * @public + */ +export declare const ImagenPersonFilterLevel: { + /** + * Disallow generation of images containing people or faces; images of people are filtered out. + */ + readonly BLOCK_ALL: "dont_allow"; + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + readonly ALLOW_ADULT: "allow_adult"; + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + readonly ALLOW_ALL: "allow_all"; +}; +/** + * A filter level controlling whether generation of images containing people or faces is allowed. + * + * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a> + * documentation for more details. + * + * @public + */ +export type ImagenPersonFilterLevel = (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel]; +/** + * Settings for controlling the aggressiveness of filtering out sensitive content. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details. + * + * @public + */ +export interface ImagenSafetySettings { + /** + * A filter level controlling how aggressive to filter out sensitive content from generated + * images. + */ + safetyFilterLevel?: ImagenSafetyFilterLevel; + /** + * A filter level controlling whether generation of images containing people or faces is allowed. + */ + personFilterLevel?: ImagenPersonFilterLevel; +} +/** + * Aspect ratios for Imagen images. + * + * To specify an aspect ratio for generated images, set the `aspectRatio` property in your + * {@link ImagenGenerationConfig}. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details and examples of the supported aspect ratios. + * + * @public + */ +export declare const ImagenAspectRatio: { + /** + * Square (1:1) aspect ratio. + */ + readonly SQUARE: "1:1"; + /** + * Landscape (3:4) aspect ratio. + */ + readonly LANDSCAPE_3x4: "3:4"; + /** + * Portrait (4:3) aspect ratio. + */ + readonly PORTRAIT_4x3: "4:3"; + /** + * Landscape (16:9) aspect ratio. + */ + readonly LANDSCAPE_16x9: "16:9"; + /** + * Portrait (9:16) aspect ratio. + */ + readonly PORTRAIT_9x16: "9:16"; +}; +/** + * Aspect ratios for Imagen images. + * + * To specify an aspect ratio for generated images, set the `aspectRatio` property in your + * {@link ImagenGenerationConfig}. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details and examples of the supported aspect ratios. + * + * @public + */ +export type ImagenAspectRatio = (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio]; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/imagen/responses.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/imagen/responses.d.ts new file mode 100644 index 0000000..f5dfc0f --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/imagen/responses.d.ts @@ -0,0 +1,79 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * An image generated by Imagen, represented as inline data. + * + * @public + */ +export interface ImagenInlineImage { + /** + * The MIME type of the image; either `"image/png"` or `"image/jpeg"`. + * + * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}. + */ + mimeType: string; + /** + * The base64-encoded image data. + */ + bytesBase64Encoded: string; +} +/** + * An image generated by Imagen, stored in a Cloud Storage for Firebase bucket. + * + * This feature is not available yet. + * @public + */ +export interface ImagenGCSImage { + /** + * The MIME type of the image; either `"image/png"` or `"image/jpeg"`. + * + * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}. + */ + mimeType: string; + /** + * The URI of the file stored in a Cloud Storage for Firebase bucket. + * + * @example `"gs://bucket-name/path/sample_0.jpg"`. + */ + gcsURI: string; +} +/** + * The response from a request to generate images with Imagen. + * + * @public + */ +export interface ImagenGenerationResponse<T extends ImagenInlineImage | ImagenGCSImage> { + /** + * The images generated by Imagen. + * + * The number of images generated may be fewer than the number requested if one or more were + * filtered out; see `filteredReason`. + */ + images: T[]; + /** + * The reason that images were filtered out. This property will only be defined if one + * or more images were filtered. + * + * Images may be filtered out due to the {@link (ImagenSafetyFilterLevel:type)}, + * {@link (ImagenPersonFilterLevel:type)}, or filtering included in the model. + * The filter levels may be adjusted in your {@link ImagenSafetySettings}. + * + * See the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen | Responsible AI and usage guidelines for Imagen} + * for more details. + */ + filteredReason?: string; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/index.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/index.d.ts new file mode 100644 index 0000000..a8508d4 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/index.d.ts @@ -0,0 +1,26 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +export * from './content'; +export * from './enums'; +export * from './requests'; +export * from './responses'; +export * from './error'; +export * from './schema'; +export * from './imagen'; +export * from './googleai'; +export { LanguageModelCreateOptions, LanguageModelCreateCoreOptions, LanguageModelExpected, LanguageModelMessage, LanguageModelMessageContent, LanguageModelMessageContentValue, LanguageModelMessageRole, LanguageModelMessageType, LanguageModelPromptOptions } from './language-model'; +export * from './chrome-adapter'; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/internal.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/internal.d.ts new file mode 100644 index 0000000..3c16979 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/internal.d.ts @@ -0,0 +1,33 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { AppCheckTokenResult } from '@firebase/app-check-interop-types'; +import { FirebaseAuthTokenData } from '@firebase/auth-interop-types'; +import { Backend } from '../backend'; +export * from './imagen/internal'; +export interface ApiSettings { + apiKey: string; + project: string; + appId: string; + automaticDataCollectionEnabled?: boolean; + /** + * @deprecated Use `backend.location` instead. + */ + location: string; + backend: Backend; + getAuthToken?: () => Promise<FirebaseAuthTokenData | null>; + getAppCheckToken?: () => Promise<AppCheckTokenResult>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/language-model.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/language-model.d.ts new file mode 100644 index 0000000..9361a1f --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/language-model.d.ts @@ -0,0 +1,107 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * The subset of the Prompt API + * (see {@link https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl } + * required for hybrid functionality. + * + * @internal + */ +export interface LanguageModel extends EventTarget { + create(options?: LanguageModelCreateOptions): Promise<LanguageModel>; + availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>; + prompt(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<string>; + promptStreaming(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): ReadableStream; + measureInputUsage(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<number>; + destroy(): undefined; +} +/** + * @internal + */ +export declare enum Availability { + 'UNAVAILABLE' = "unavailable", + 'DOWNLOADABLE' = "downloadable", + 'DOWNLOADING' = "downloading", + 'AVAILABLE' = "available" +} +/** + * Configures the creation of an on-device language model session. + * @beta + */ +export interface LanguageModelCreateCoreOptions { + topK?: number; + temperature?: number; + expectedInputs?: LanguageModelExpected[]; +} +/** + * Configures the creation of an on-device language model session. + * @beta + */ +export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions { + signal?: AbortSignal; + initialPrompts?: LanguageModelMessage[]; +} +/** + * Options for an on-device language model prompt. + * @beta + */ +export interface LanguageModelPromptOptions { + responseConstraint?: object; +} +/** + * Options for the expected inputs for an on-device language model. + * @beta + */ export interface LanguageModelExpected { + type: LanguageModelMessageType; + languages?: string[]; +} +/** + * An on-device language model prompt. + * @beta + */ +export type LanguageModelPrompt = LanguageModelMessage[]; +/** + * An on-device language model message. + * @beta + */ +export interface LanguageModelMessage { + role: LanguageModelMessageRole; + content: LanguageModelMessageContent[]; +} +/** + * An on-device language model content object. + * @beta + */ +export interface LanguageModelMessageContent { + type: LanguageModelMessageType; + value: LanguageModelMessageContentValue; +} +/** + * Allowable roles for on-device language model usage. + * @beta + */ +export type LanguageModelMessageRole = 'system' | 'user' | 'assistant'; +/** + * Allowable types for on-device language model messages. + * @beta + */ +export type LanguageModelMessageType = 'text' | 'image' | 'audio'; +/** + * Content formats that can be provided as on-device message content. + * @beta + */ +export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/live-responses.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/live-responses.d.ts new file mode 100644 index 0000000..8270db9 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/live-responses.d.ts @@ -0,0 +1,79 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Content, FunctionResponse, GenerativeContentBlob, Part } from './content'; +import { AudioTranscriptionConfig, LiveGenerationConfig, Tool, ToolConfig } from './requests'; +import { Transcription } from './responses'; +/** + * User input that is sent to the model. + * + * @internal + */ +export interface _LiveClientContent { + clientContent: { + turns: [Content]; + turnComplete: boolean; + inputTranscription?: Transcription; + outputTranscription?: Transcription; + }; +} +/** + * User input that is sent to the model in real time. + * + * @internal + */ +export interface _LiveClientRealtimeInput { + realtimeInput: { + text?: string; + audio?: GenerativeContentBlob; + video?: GenerativeContentBlob; + /** + * @deprecated Use `text`, `audio`, and `video` instead. + */ + mediaChunks?: GenerativeContentBlob[]; + }; +} +/** + * Function responses that are sent to the model in real time. + */ +export interface _LiveClientToolResponse { + toolResponse: { + functionResponses: FunctionResponse[]; + }; +} +/** + * The first message in a Live session, used to configure generation options. + * + * @internal + */ +export interface _LiveClientSetup { + setup: { + model: string; + generationConfig?: _LiveGenerationConfig; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; + inputAudioTranscription?: AudioTranscriptionConfig; + outputAudioTranscription?: AudioTranscriptionConfig; + }; +} +/** + * The Live Generation Config. + * + * The public API ({@link LiveGenerationConfig}) has `inputAudioTranscription` and `outputAudioTranscription`, + * but the server expects these fields to be in the top-level `setup` message. This was a conscious API decision. + */ +export type _LiveGenerationConfig = Omit<LiveGenerationConfig, 'inputAudioTranscription' | 'outputAudioTranscription'>; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/requests.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/requests.d.ts new file mode 100644 index 0000000..6df8be1 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/requests.d.ts @@ -0,0 +1,464 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { ObjectSchema, TypedSchema } from '../requests/schema-builder'; +import { Content, Part } from './content'; +import { LanguageModelCreateOptions, LanguageModelPromptOptions } from './language-model'; +import { FunctionCallingMode, HarmBlockMethod, HarmBlockThreshold, HarmCategory, InferenceMode, ResponseModality } from './enums'; +import { ObjectSchemaRequest, SchemaRequest } from './schema'; +/** + * Base parameters for a number of methods. + * @public + */ +export interface BaseParams { + safetySettings?: SafetySetting[]; + generationConfig?: GenerationConfig; +} +/** + * Params passed to {@link getGenerativeModel}. + * @public + */ +export interface ModelParams extends BaseParams { + model: string; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} +/** + * Params passed to {@link getLiveGenerativeModel}. + * @beta + */ +export interface LiveModelParams { + model: string; + generationConfig?: LiveGenerationConfig; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} +/** + * Request sent through {@link GenerativeModel.generateContent} + * @public + */ +export interface GenerateContentRequest extends BaseParams { + contents: Content[]; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} +/** + * Safety setting that can be sent as part of request parameters. + * @public + */ +export interface SafetySetting { + category: HarmCategory; + threshold: HarmBlockThreshold; + /** + * The harm block method. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), an {@link AIError} will be + * thrown if this property is defined. + */ + method?: HarmBlockMethod; +} +/** + * Config options for content-related requests + * @public + */ +export interface GenerationConfig { + candidateCount?: number; + stopSequences?: string[]; + maxOutputTokens?: number; + temperature?: number; + topP?: number; + topK?: number; + presencePenalty?: number; + frequencyPenalty?: number; + /** + * Output response MIME type of the generated candidate text. + * Supported MIME types are `text/plain` (default, text output), + * `application/json` (JSON response in the candidates), and + * `text/x.enum`. + */ + responseMimeType?: string; + /** + * Output response schema of the generated candidate text. This + * value can be a class generated with a {@link Schema} static method + * like `Schema.string()` or `Schema.object()` or it can be a plain + * JS object matching the {@link SchemaRequest} interface. + * <br/>Note: This only applies when the specified `responseMimeType` supports a schema; currently + * this is limited to `application/json` and `text/x.enum`. + */ + responseSchema?: TypedSchema | SchemaRequest; + /** + * Generation modalities to be returned in generation responses. + * + * @remarks + * - Multimodal response generation is only supported by some Gemini models and versions; see {@link https://firebase.google.com/docs/vertex-ai/models | model versions}. + * - Only image generation (`ResponseModality.IMAGE`) is supported. + * + * @beta + */ + responseModalities?: ResponseModality[]; + /** + * Configuration for "thinking" behavior of compatible Gemini models. + */ + thinkingConfig?: ThinkingConfig; +} +/** + * Configuration parameters used by {@link LiveGenerativeModel} to control live content generation. + * + * @beta + */ +export interface LiveGenerationConfig { + /** + * Configuration for speech synthesis. + */ + speechConfig?: SpeechConfig; + /** + * Specifies the maximum number of tokens that can be generated in the response. The number of + * tokens per word varies depending on the language outputted. Is unbounded by default. + */ + maxOutputTokens?: number; + /** + * Controls the degree of randomness in token selection. A `temperature` value of 0 means that the highest + * probability tokens are always selected. In this case, responses for a given prompt are mostly + * deterministic, but a small amount of variation is still possible. + */ + temperature?: number; + /** + * Changes how the model selects tokens for output. Tokens are + * selected from the most to least probable until the sum of their probabilities equals the `topP` + * value. For example, if tokens A, B, and C have probabilities of 0.3, 0.2, and 0.1 respectively + * and the `topP` value is 0.5, then the model will select either A or B as the next token by using + * the `temperature` and exclude C as a candidate. Defaults to 0.95 if unset. + */ + topP?: number; + /** + * Changes how the model selects token for output. A `topK` value of 1 means the select token is + * the most probable among all tokens in the model's vocabulary, while a `topK` value 3 means that + * the next token is selected from among the 3 most probably using probabilities sampled. Tokens + * are then further filtered with the highest selected `temperature` sampling. Defaults to 40 + * if unspecified. + */ + topK?: number; + /** + * Positive penalties. + */ + presencePenalty?: number; + /** + * Frequency penalties. + */ + frequencyPenalty?: number; + /** + * The modalities of the response. + */ + responseModalities?: ResponseModality[]; + /** + * Enables transcription of audio input. + * + * When enabled, the model will respond with transcriptions of your audio input in the `inputTranscriptions` property + * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across + * messages, so you may only receive small amounts of text per message. For example, if you ask the model + * "How are you today?", the model may transcribe that input across three messages, broken up as "How a", "re yo", "u today?". + */ + inputAudioTranscription?: AudioTranscriptionConfig; + /** + * Enables transcription of audio input. + * + * When enabled, the model will respond with transcriptions of its audio output in the `outputTranscription` property + * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across + * messages, so you may only receive small amounts of text per message. For example, if the model says + * "How are you today?", the model may transcribe that output across three messages, broken up as "How a", "re yo", "u today?". + */ + outputAudioTranscription?: AudioTranscriptionConfig; +} +/** + * Params for {@link GenerativeModel.startChat}. + * @public + */ +export interface StartChatParams extends BaseParams { + history?: Content[]; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} +/** + * Params for calling {@link GenerativeModel.countTokens} + * @public + */ +export interface CountTokensRequest { + contents: Content[]; + /** + * Instructions that direct the model to behave a certain way. + */ + systemInstruction?: string | Part | Content; + /** + * {@link Tool} configuration. + */ + tools?: Tool[]; + /** + * Configuration options that control how the model generates a response. + */ + generationConfig?: GenerationConfig; +} +/** + * Params passed to {@link getGenerativeModel}. + * @public + */ +export interface RequestOptions { + /** + * Request timeout in milliseconds. Defaults to 180 seconds (180000ms). + */ + timeout?: number; + /** + * Base url for endpoint. Defaults to + * https://firebasevertexai.googleapis.com, which is the + * {@link https://console.cloud.google.com/apis/library/firebasevertexai.googleapis.com?project=_ | Firebase AI Logic API} + * (used regardless of your chosen Gemini API provider). + */ + baseUrl?: string; +} +/** + * Defines a tool that model can call to access external knowledge. + * @public + */ +export type Tool = FunctionDeclarationsTool | GoogleSearchTool | CodeExecutionTool | URLContextTool; +/** + * Structured representation of a function declaration as defined by the + * {@link https://spec.openapis.org/oas/v3.0.3 | OpenAPI 3.0 specification}. + * Included + * in this declaration are the function name and parameters. This + * `FunctionDeclaration` is a representation of a block of code that can be used + * as a Tool by the model and executed by the client. + * @public + */ +export interface FunctionDeclaration { + /** + * The name of the function to call. Must start with a letter or an + * underscore. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with + * a max length of 64. + */ + name: string; + /** + * Description and purpose of the function. Model uses it to decide + * how and whether to call the function. + */ + description: string; + /** + * Optional. Describes the parameters to this function in JSON Schema Object + * format. Reflects the Open API 3.03 Parameter Object. Parameter names are + * case-sensitive. For a function with no parameters, this can be left unset. + */ + parameters?: ObjectSchema | ObjectSchemaRequest; +} +/** + * A tool that allows a Gemini model to connect to Google Search to access and incorporate + * up-to-date information from the web into its responses. + * + * Important: If using Grounding with Google Search, you are required to comply with the + * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API} + * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms} + * section within the Service Specific Terms). + * + * @public + */ +export interface GoogleSearchTool { + /** + * Specifies the Google Search configuration. + * Currently, this is an empty object, but it's reserved for future configuration options. + * + * When using this feature, you are required to comply with the "Grounding with Google Search" + * usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API} + * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms} + * section within the Service Specific Terms). + */ + googleSearch: GoogleSearch; +} +/** + * A tool that enables the model to use code execution. + * + * @beta + */ +export interface CodeExecutionTool { + /** + * Specifies the Google Search configuration. + * Currently, this is an empty object, but it's reserved for future configuration options. + */ + codeExecution: {}; +} +/** + * Specifies the Google Search configuration. + * + * @remarks Currently, this is an empty object, but it's reserved for future configuration options. + * + * @public + */ +export interface GoogleSearch { +} +/** + * A tool that allows you to provide additional context to the models in the form of public web + * URLs. By including URLs in your request, the Gemini model will access the content from those + * pages to inform and enhance its response. + * + * @beta + */ +export interface URLContextTool { + /** + * Specifies the URL Context configuration. + */ + urlContext: URLContext; +} +/** + * Specifies the URL Context configuration. + * + * @beta + */ +export interface URLContext { +} +/** + * A `FunctionDeclarationsTool` is a piece of code that enables the system to + * interact with external systems to perform an action, or set of actions, + * outside of knowledge and scope of the model. + * @public + */ +export interface FunctionDeclarationsTool { + /** + * Optional. One or more function declarations + * to be passed to the model along with the current user query. Model may + * decide to call a subset of these functions by populating + * {@link FunctionCall} in the response. User should + * provide a {@link FunctionResponse} for each + * function call in the next turn. Based on the function responses, the model will + * generate the final response back to the user. Maximum 64 function + * declarations can be provided. + */ + functionDeclarations?: FunctionDeclaration[]; +} +/** + * Tool config. This config is shared for all tools provided in the request. + * @public + */ +export interface ToolConfig { + functionCallingConfig?: FunctionCallingConfig; +} +/** + * @public + */ +export interface FunctionCallingConfig { + mode?: FunctionCallingMode; + allowedFunctionNames?: string[]; +} +/** + * Encapsulates configuration for on-device inference. + * + * @beta + */ +export interface OnDeviceParams { + createOptions?: LanguageModelCreateOptions; + promptOptions?: LanguageModelPromptOptions; +} +/** + * Configures hybrid inference. + * @beta + */ +export interface HybridParams { + /** + * Specifies on-device or in-cloud inference. Defaults to prefer on-device. + */ + mode: InferenceMode; + /** + * Optional. Specifies advanced params for on-device inference. + */ + onDeviceParams?: OnDeviceParams; + /** + * Optional. Specifies advanced params for in-cloud inference. + */ + inCloudParams?: ModelParams; +} +/** + * Configuration for "thinking" behavior of compatible Gemini models. + * + * Certain models utilize a thinking process before generating a response. This allows them to + * reason through complex problems and plan a more coherent and accurate answer. + * + * @public + */ +export interface ThinkingConfig { + /** + * The thinking budget, in tokens. + * + * This parameter sets an upper limit on the number of tokens the model can use for its internal + * "thinking" process. A higher budget may result in higher quality responses for complex tasks + * but can also increase latency and cost. + * + * If you don't specify a budget, the model will determine the appropriate amount + * of thinking based on the complexity of the prompt. + * + * An error will be thrown if you set a thinking budget for a model that does not support this + * feature or if the specified budget is not within the model's supported range. + */ + thinkingBudget?: number; + /** + * Whether to include "thought summaries" in the model's response. + * + * @remarks + * Thought summaries provide a brief overview of the model's internal thinking process, + * offering insight into how it arrived at the final answer. This can be useful for + * debugging, understanding the model's reasoning, and verifying its accuracy. + */ + includeThoughts?: boolean; +} +/** + * Configuration for a pre-built voice. + * + * @beta + */ +export interface PrebuiltVoiceConfig { + /** + * The voice name to use for speech synthesis. + * + * For a full list of names and demos of what each voice sounds like, see {@link https://cloud.google.com/text-to-speech/docs/chirp3-hd | Chirp 3: HD Voices}. + */ + voiceName?: string; +} +/** + * Configuration for the voice to used in speech synthesis. + * + * @beta + */ +export interface VoiceConfig { + /** + * Configures the voice using a pre-built voice configuration. + */ + prebuiltVoiceConfig?: PrebuiltVoiceConfig; +} +/** + * Configures speech synthesis. + * + * @beta + */ +export interface SpeechConfig { + /** + * Configures the voice to be used in speech synthesis. + */ + voiceConfig?: VoiceConfig; +} +/** + * The audio transcription configuration. + */ +export interface AudioTranscriptionConfig { +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/responses.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/responses.d.ts new file mode 100644 index 0000000..8896455 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/responses.d.ts @@ -0,0 +1,582 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Content, FunctionCall, InlineDataPart } from './content'; +import { BlockReason, FinishReason, HarmCategory, HarmProbability, HarmSeverity, InferenceSource, Modality } from './enums'; +/** + * Result object returned from {@link GenerativeModel.generateContent} call. + * + * @public + */ +export interface GenerateContentResult { + response: EnhancedGenerateContentResponse; +} +/** + * Result object returned from {@link GenerativeModel.generateContentStream} call. + * Iterate over `stream` to get chunks as they come in and/or + * use the `response` promise to get the aggregated response when + * the stream is done. + * + * @public + */ +export interface GenerateContentStreamResult { + stream: AsyncGenerator<EnhancedGenerateContentResponse>; + response: Promise<EnhancedGenerateContentResponse>; +} +/** + * Response object wrapped with helper methods. + * + * @public + */ +export interface EnhancedGenerateContentResponse extends GenerateContentResponse { + /** + * Returns the text string from the response, if available. + * Throws if the prompt or candidate was blocked. + */ + text: () => string; + /** + * Aggregates and returns every {@link InlineDataPart} from the first candidate of + * {@link GenerateContentResponse}. + * + * @throws If the prompt or candidate was blocked. + */ + inlineDataParts: () => InlineDataPart[] | undefined; + /** + * Aggregates and returns every {@link FunctionCall} from the first candidate of + * {@link GenerateContentResponse}. + * + * @throws If the prompt or candidate was blocked. + */ + functionCalls: () => FunctionCall[] | undefined; + /** + * Aggregates and returns every {@link TextPart} with their `thought` property set + * to `true` from the first candidate of {@link GenerateContentResponse}. + * + * @throws If the prompt or candidate was blocked. + * + * @remarks + * Thought summaries provide a brief overview of the model's internal thinking process, + * offering insight into how it arrived at the final answer. This can be useful for + * debugging, understanding the model's reasoning, and verifying its accuracy. + * + * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is + * set to `true`. + */ + thoughtSummary: () => string | undefined; + /** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ + inferenceSource?: InferenceSource; +} +/** + * Individual response from {@link GenerativeModel.generateContent} and + * {@link GenerativeModel.generateContentStream}. + * `generateContentStream()` will return one in each chunk until + * the stream is done. + * @public + */ +export interface GenerateContentResponse { + candidates?: GenerateContentCandidate[]; + promptFeedback?: PromptFeedback; + usageMetadata?: UsageMetadata; +} +/** + * Usage metadata about a {@link GenerateContentResponse}. + * + * @public + */ +export interface UsageMetadata { + promptTokenCount: number; + candidatesTokenCount: number; + /** + * The number of tokens used by the model's internal "thinking" process. + */ + thoughtsTokenCount?: number; + totalTokenCount: number; + /** + * The number of tokens used by tools. + */ + toolUsePromptTokenCount?: number; + promptTokensDetails?: ModalityTokenCount[]; + candidatesTokensDetails?: ModalityTokenCount[]; + /** + * A list of tokens used by tools, broken down by modality. + */ + toolUsePromptTokensDetails?: ModalityTokenCount[]; +} +/** + * Represents token counting info for a single modality. + * + * @public + */ +export interface ModalityTokenCount { + /** The modality associated with this token count. */ + modality: Modality; + /** The number of tokens counted. */ + tokenCount: number; +} +/** + * If the prompt was blocked, this will be populated with `blockReason` and + * the relevant `safetyRatings`. + * @public + */ +export interface PromptFeedback { + blockReason?: BlockReason; + safetyRatings: SafetyRating[]; + /** + * A human-readable description of the `blockReason`. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + */ + blockReasonMessage?: string; +} +/** + * A candidate returned as part of a {@link GenerateContentResponse}. + * @public + */ +export interface GenerateContentCandidate { + index: number; + content: Content; + finishReason?: FinishReason; + finishMessage?: string; + safetyRatings?: SafetyRating[]; + citationMetadata?: CitationMetadata; + groundingMetadata?: GroundingMetadata; + urlContextMetadata?: URLContextMetadata; +} +/** + * Citation metadata that may be found on a {@link GenerateContentCandidate}. + * @public + */ +export interface CitationMetadata { + citations: Citation[]; +} +/** + * A single citation. + * @public + */ +export interface Citation { + startIndex?: number; + endIndex?: number; + uri?: string; + license?: string; + /** + * The title of the cited source, if available. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + */ + title?: string; + /** + * The publication date of the cited source, if available. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + */ + publicationDate?: Date; +} +/** + * Metadata returned when grounding is enabled. + * + * Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}). + * + * Important: If using Grounding with Google Search, you are required to comply with the + * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API} + * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms} + * section within the Service Specific Terms). + * + * @public + */ +export interface GroundingMetadata { + /** + * Google Search entry point for web searches. This contains an HTML/CSS snippet that must be + * embedded in an app to display a Google Search entry point for follow-up web searches related to + * a model's "Grounded Response". + */ + searchEntryPoint?: SearchEntrypoint; + /** + * A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content + * (for example, from a web page). that the model used to ground its response. + */ + groundingChunks?: GroundingChunk[]; + /** + * A list of {@link GroundingSupport} objects. Each object details how specific segments of the + * model's response are supported by the `groundingChunks`. + */ + groundingSupports?: GroundingSupport[]; + /** + * A list of web search queries that the model performed to gather the grounding information. + * These can be used to allow users to explore the search results themselves. + */ + webSearchQueries?: string[]; + /** + * @deprecated Use {@link GroundingSupport} instead. + */ + retrievalQueries?: string[]; +} +/** + * Google search entry point. + * + * @public + */ +export interface SearchEntrypoint { + /** + * HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid + * undesired interaction with the rest of the page's CSS. + * + * To ensure proper rendering and prevent CSS conflicts, it is recommended + * to encapsulate this `renderedContent` within a shadow DOM when embedding it + * into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}. + * + * @example + * ```javascript + * const container = document.createElement('div'); + * document.body.appendChild(container); + * container.attachShadow({ mode: 'open' }).innerHTML = renderedContent; + * ``` + */ + renderedContent?: string; +} +/** + * Represents a chunk of retrieved data that supports a claim in the model's response. This is part + * of the grounding information provided when grounding is enabled. + * + * @public + */ +export interface GroundingChunk { + /** + * Contains details if the grounding chunk is from a web source. + */ + web?: WebGroundingChunk; +} +/** + * A grounding chunk from the web. + * + * Important: If using Grounding with Google Search, you are required to comply with the + * {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for "Grounding with Google Search". + * + * @public + */ +export interface WebGroundingChunk { + /** + * The URI of the retrieved web page. + */ + uri?: string; + /** + * The title of the retrieved web page. + */ + title?: string; + /** + * The domain of the original URI from which the content was retrieved. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be + * `undefined`. + */ + domain?: string; +} +/** + * Provides information about how a specific segment of the model's response is supported by the + * retrieved grounding chunks. + * + * @public + */ +export interface GroundingSupport { + /** + * Specifies the segment of the model's response content that this grounding support pertains to. + */ + segment?: Segment; + /** + * A list of indices that refer to specific {@link GroundingChunk} objects within the + * {@link GroundingMetadata.groundingChunks} array. These referenced chunks + * are the sources that support the claim made in the associated `segment` of the response. + * For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`, + * and `groundingChunks[4]` are the retrieved content supporting this part of the response. + */ + groundingChunkIndices?: number[]; +} +/** + * Represents a specific segment within a {@link Content} object, often used to + * pinpoint the exact location of text or data that grounding information refers to. + * + * @public + */ +export interface Segment { + /** + * The zero-based index of the {@link Part} object within the `parts` array + * of its parent {@link Content} object. This identifies which part of the + * content the segment belongs to. + */ + partIndex: number; + /** + * The zero-based start index of the segment within the specified `Part`, + * measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the + * beginning of the part's content (e.g., `Part.text`). + */ + startIndex: number; + /** + * The zero-based end index of the segment within the specified `Part`, + * measured in UTF-8 bytes. This offset is exclusive, meaning the character + * at this index is not included in the segment. + */ + endIndex: number; + /** + * The text corresponding to the segment from the response. + */ + text: string; +} +/** + * Metadata related to {@link URLContextTool}. + * + * @beta + */ +export interface URLContextMetadata { + /** + * List of URL metadata used to provide context to the Gemini model. + */ + urlMetadata: URLMetadata[]; +} +/** + * Metadata for a single URL retrieved by the {@link URLContextTool} tool. + * + * @beta + */ +export interface URLMetadata { + /** + * The retrieved URL. + */ + retrievedUrl?: string; + /** + * The status of the URL retrieval. + */ + urlRetrievalStatus?: URLRetrievalStatus; +} +/** + * The status of a URL retrieval. + * + * @remarks + * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status. + * <br/> + * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful. + * <br/> + * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed. + * <br/> + * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall. + * <br/> + * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe. + * <br/> + * + * @beta + */ +export declare const URLRetrievalStatus: { + /** + * Unspecified retrieval status. + */ + URL_RETRIEVAL_STATUS_UNSPECIFIED: string; + /** + * The URL retrieval was successful. + */ + URL_RETRIEVAL_STATUS_SUCCESS: string; + /** + * The URL retrieval failed. + */ + URL_RETRIEVAL_STATUS_ERROR: string; + /** + * The URL retrieval failed because the content is behind a paywall. + */ + URL_RETRIEVAL_STATUS_PAYWALL: string; + /** + * The URL retrieval failed because the content is unsafe. + */ + URL_RETRIEVAL_STATUS_UNSAFE: string; +}; +/** + * The status of a URL retrieval. + * + * @remarks + * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status. + * <br/> + * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful. + * <br/> + * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed. + * <br/> + * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall. + * <br/> + * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe. + * <br/> + * + * @beta + */ +export type URLRetrievalStatus = (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus]; +/** + * @public + */ +export interface WebAttribution { + uri: string; + title: string; +} +/** + * @public + */ +export interface RetrievedContextAttribution { + uri: string; + title: string; +} +/** + * Protobuf google.type.Date + * @public + */ +export interface Date { + year: number; + month: number; + day: number; +} +/** + * A safety rating associated with a {@link GenerateContentCandidate} + * @public + */ +export interface SafetyRating { + category: HarmCategory; + probability: HarmProbability; + /** + * The harm severity level. + * + * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`. + */ + severity: HarmSeverity; + /** + * The probability score of the harm category. + * + * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0. + */ + probabilityScore: number; + /** + * The severity score of the harm category. + * + * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0. + */ + severityScore: number; + blocked: boolean; +} +/** + * Response from calling {@link GenerativeModel.countTokens}. + * @public + */ +export interface CountTokensResponse { + /** + * The total number of tokens counted across all instances from the request. + */ + totalTokens: number; + /** + * @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`. + * + * The total number of billable characters counted across all instances + * from the request. + */ + totalBillableCharacters?: number; + /** + * The breakdown, by modality, of how many tokens are consumed by the prompt. + */ + promptTokensDetails?: ModalityTokenCount[]; +} +/** + * An incremental content update from the model. + * + * @beta + */ +export interface LiveServerContent { + type: 'serverContent'; + /** + * The content that the model has generated as part of the current conversation with the user. + */ + modelTurn?: Content; + /** + * Indicates whether the turn is complete. This is `undefined` if the turn is not complete. + */ + turnComplete?: boolean; + /** + * Indicates whether the model was interrupted by the client. An interruption occurs when + * the client sends a message before the model finishes it's turn. This is `undefined` if the + * model was not interrupted. + */ + interrupted?: boolean; + /** + * Transcription of the audio that was input to the model. + */ + inputTranscription?: Transcription; + /** + * Transcription of the audio output from the model. + */ + outputTranscription?: Transcription; +} +/** + * Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription + * is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on + * the {@link LiveGenerationConfig}. + * + * @beta + */ +export interface Transcription { + /** + * The text transcription of the audio. + */ + text?: string; +} +/** + * A request from the model for the client to execute one or more functions. + * + * @beta + */ +export interface LiveServerToolCall { + type: 'toolCall'; + /** + * An array of function calls to run. + */ + functionCalls: FunctionCall[]; +} +/** + * Notification to cancel a previous function call triggered by {@link LiveServerToolCall}. + * + * @beta + */ +export interface LiveServerToolCallCancellation { + type: 'toolCallCancellation'; + /** + * IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}. + */ + functionIds: string[]; +} +/** + * The types of responses that can be returned by {@link LiveSession.receive}. + * + * @beta + */ +export declare const LiveResponseType: { + SERVER_CONTENT: string; + TOOL_CALL: string; + TOOL_CALL_CANCELLATION: string; +}; +/** + * The types of responses that can be returned by {@link LiveSession.receive}. + * This is a property on all messages that can be used for type narrowing. This property is not + * returned by the server, it is assigned to a server message object once it's parsed. + * + * @beta + */ +export type LiveResponseType = (typeof LiveResponseType)[keyof typeof LiveResponseType]; diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/schema.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/schema.d.ts new file mode 100644 index 0000000..7abb2d1 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/types/schema.d.ts @@ -0,0 +1,139 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Contains the list of OpenAPI data types + * as defined by the + * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification} + * @public + */ +export declare const SchemaType: { + /** String type. */ + readonly STRING: "string"; + /** Number type. */ + readonly NUMBER: "number"; + /** Integer type. */ + readonly INTEGER: "integer"; + /** Boolean type. */ + readonly BOOLEAN: "boolean"; + /** Array type. */ + readonly ARRAY: "array"; + /** Object type. */ + readonly OBJECT: "object"; +}; +/** + * Contains the list of OpenAPI data types + * as defined by the + * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification} + * @public + */ +export type SchemaType = (typeof SchemaType)[keyof typeof SchemaType]; +/** + * Basic {@link Schema} properties shared across several Schema-related + * types. + * @public + */ +export interface SchemaShared<T> { + /** + * An array of {@link Schema}. The generated data must be valid against any of the schemas + * listed in this array. This allows specifying multiple possible structures or types for a + * single field. + */ + anyOf?: T[]; + /** Optional. The format of the property. + * When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or + * `'date-time'`, otherwise requests will fail. + */ + format?: string; + /** Optional. The description of the property. */ + description?: string; + /** + * The title of the property. This helps document the schema's purpose but does not typically + * constrain the generated value. It can subtly guide the model by clarifying the intent of a + * field. + */ + title?: string; + /** Optional. The items of the property. */ + items?: T; + /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + minItems?: number; + /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + maxItems?: number; + /** Optional. Map of `Schema` objects. */ + properties?: { + [k: string]: T; + }; + /** A hint suggesting the order in which the keys should appear in the generated JSON string. */ + propertyOrdering?: string[]; + /** Optional. The enum of the property. */ + enum?: string[]; + /** Optional. The example of the property. */ + example?: unknown; + /** Optional. Whether the property is nullable. */ + nullable?: boolean; + /** The minimum value of a numeric type. */ + minimum?: number; + /** The maximum value of a numeric type. */ + maximum?: number; + [key: string]: unknown; +} +/** + * Params passed to {@link Schema} static methods to create specific + * {@link Schema} classes. + * @public + */ +export interface SchemaParams extends SchemaShared<SchemaInterface> { +} +/** + * Final format for {@link Schema} params passed to backend requests. + * @public + */ +export interface SchemaRequest extends SchemaShared<SchemaRequest> { + /** + * The type of the property. this can only be undefined when using `anyOf` schemas, + * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }. + */ + type?: SchemaType; + /** Optional. Array of required property. */ + required?: string[]; +} +/** + * Interface for {@link Schema} class. + * @public + */ +export interface SchemaInterface extends SchemaShared<SchemaInterface> { + /** + * The type of the property. this can only be undefined when using `anyof` schemas, + * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}. + */ + type?: SchemaType; +} +/** + * Interface for JSON parameters in a schema of {@link (SchemaType:type)} + * "object" when not using the `Schema.object()` helper. + * @public + */ +export interface ObjectSchemaRequest extends SchemaRequest { + type: 'object'; + /** + * This is not a property accepted in the final request to the backend, but is + * a client-side convenience property that is only usable by constructing + * a schema through the `Schema.object()` helper method. Populating this + * property will cause response errors if the object is not wrapped with + * `Schema.object()`. + */ + optionalProperties?: never; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/esm/src/websocket.d.ts b/frontend-old/node_modules/@firebase/ai/dist/esm/src/websocket.d.ts new file mode 100644 index 0000000..e2d511c --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/esm/src/websocket.d.ts @@ -0,0 +1,67 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * A standardized interface for interacting with a WebSocket connection. + * This abstraction allows the SDK to use the appropriate WebSocket implementation + * for the current JS environment (Browser vs. Node) without + * changing the core logic of the `LiveSession`. + * @internal + */ +export interface WebSocketHandler { + /** + * Establishes a connection to the given URL. + * + * @param url The WebSocket URL (e.g., wss://...). + * @returns A promise that resolves on successful connection or rejects on failure. + */ + connect(url: string): Promise<void>; + /** + * Sends data over the WebSocket. + * + * @param data The string or binary data to send. + */ + send(data: string | ArrayBuffer): void; + /** + * Returns an async generator that yields parsed JSON objects from the server. + * The yielded type is `unknown` because the handler cannot guarantee the shape of the data. + * The consumer is responsible for type validation. + * The generator terminates when the connection is closed. + * + * @returns A generator that allows consumers to pull messages using a `for await...of` loop. + */ + listen(): AsyncGenerator<unknown>; + /** + * Closes the WebSocket connection. + * + * @param code - A numeric status code explaining why the connection is closing. + * @param reason - A human-readable string explaining why the connection is closing. + */ + close(code?: number, reason?: string): Promise<void>; +} +/** + * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22. + * + * @internal + */ +export declare class WebSocketHandlerImpl implements WebSocketHandler { + private ws?; + constructor(); + connect(url: string): Promise<void>; + send(data: string | ArrayBuffer): void; + listen(): AsyncGenerator<unknown>; + close(code?: number, reason?: string): Promise<void>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/index.cjs.js b/frontend-old/node_modules/@firebase/ai/dist/index.cjs.js new file mode 100644 index 0000000..83ab94e --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/index.cjs.js @@ -0,0 +1,4297 @@ +'use strict'; + +Object.defineProperty(exports, '__esModule', { value: true }); + +var app = require('@firebase/app'); +var component = require('@firebase/component'); +var util = require('@firebase/util'); +var logger$1 = require('@firebase/logger'); + +var name = "@firebase/ai"; +var version = "2.5.0"; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const AI_TYPE = 'AI'; +const DEFAULT_LOCATION = 'us-central1'; +const DEFAULT_DOMAIN = 'firebasevertexai.googleapis.com'; +const DEFAULT_API_VERSION = 'v1beta'; +const PACKAGE_VERSION = version; +const LANGUAGE_TAG = 'gl-js'; +const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000; +/** + * Defines the name of the default in-cloud model to use for hybrid inference. + */ +const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite'; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Error class for the Firebase AI SDK. + * + * @public + */ +class AIError extends util.FirebaseError { + /** + * Constructs a new instance of the `AIError` class. + * + * @param code - The error code from {@link (AIErrorCode:type)}. + * @param message - A human-readable message describing the error. + * @param customErrorData - Optional error data. + */ + constructor(code, message, customErrorData) { + // Match error format used by FirebaseError from ErrorFactory + const service = AI_TYPE; + const fullCode = `${service}/${code}`; + const fullMessage = `${service}: ${message} (${fullCode})`; + super(code, fullMessage); + this.code = code; + this.customErrorData = customErrorData; + // FirebaseError initializes a stack trace, but it assumes the error is created from the error + // factory. Since we break this assumption, we set the stack trace to be originating from this + // constructor. + // This is only supported in V8. + if (Error.captureStackTrace) { + // Allows us to initialize the stack trace without including the constructor itself at the + // top level of the stack trace. + Error.captureStackTrace(this, AIError); + } + // Allows instanceof AIError in ES5/ES6 + // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work + // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget + // which we can now use since we no longer target ES5. + Object.setPrototypeOf(this, AIError.prototype); + // Since Error is an interface, we don't inherit toString and so we define it ourselves. + this.toString = () => fullMessage; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Possible roles. + * @public + */ +const POSSIBLE_ROLES = ['user', 'model', 'function', 'system']; +/** + * Harm categories that would cause prompts or candidates to be blocked. + * @public + */ +const HarmCategory = { + HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH', + HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', + HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT', + HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT' +}; +/** + * Threshold above which a prompt or candidate will be blocked. + * @public + */ +const HarmBlockThreshold = { + /** + * Content with `NEGLIGIBLE` will be allowed. + */ + BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE', + /** + * Content with `NEGLIGIBLE` and `LOW` will be allowed. + */ + BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE', + /** + * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed. + */ + BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH', + /** + * All content will be allowed. + */ + BLOCK_NONE: 'BLOCK_NONE', + /** + * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding + * to the {@link (HarmCategory:type)} will not be present in the response. + */ + OFF: 'OFF' +}; +/** + * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}). + * + * @public + */ +const HarmBlockMethod = { + /** + * The harm block method uses both probability and severity scores. + */ + SEVERITY: 'SEVERITY', + /** + * The harm block method uses the probability score. + */ + PROBABILITY: 'PROBABILITY' +}; +/** + * Probability that a prompt or candidate matches a harm category. + * @public + */ +const HarmProbability = { + /** + * Content has a negligible chance of being unsafe. + */ + NEGLIGIBLE: 'NEGLIGIBLE', + /** + * Content has a low chance of being unsafe. + */ + LOW: 'LOW', + /** + * Content has a medium chance of being unsafe. + */ + MEDIUM: 'MEDIUM', + /** + * Content has a high chance of being unsafe. + */ + HIGH: 'HIGH' +}; +/** + * Harm severity levels. + * @public + */ +const HarmSeverity = { + /** + * Negligible level of harm severity. + */ + HARM_SEVERITY_NEGLIGIBLE: 'HARM_SEVERITY_NEGLIGIBLE', + /** + * Low level of harm severity. + */ + HARM_SEVERITY_LOW: 'HARM_SEVERITY_LOW', + /** + * Medium level of harm severity. + */ + HARM_SEVERITY_MEDIUM: 'HARM_SEVERITY_MEDIUM', + /** + * High level of harm severity. + */ + HARM_SEVERITY_HIGH: 'HARM_SEVERITY_HIGH', + /** + * Harm severity is not supported. + * + * @remarks + * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback. + */ + HARM_SEVERITY_UNSUPPORTED: 'HARM_SEVERITY_UNSUPPORTED' +}; +/** + * Reason that a prompt was blocked. + * @public + */ +const BlockReason = { + /** + * Content was blocked by safety settings. + */ + SAFETY: 'SAFETY', + /** + * Content was blocked, but the reason is uncategorized. + */ + OTHER: 'OTHER', + /** + * Content was blocked because it contained terms from the terminology blocklist. + */ + BLOCKLIST: 'BLOCKLIST', + /** + * Content was blocked due to prohibited content. + */ + PROHIBITED_CONTENT: 'PROHIBITED_CONTENT' +}; +/** + * Reason that a candidate finished. + * @public + */ +const FinishReason = { + /** + * Natural stop point of the model or provided stop sequence. + */ + STOP: 'STOP', + /** + * The maximum number of tokens as specified in the request was reached. + */ + MAX_TOKENS: 'MAX_TOKENS', + /** + * The candidate content was flagged for safety reasons. + */ + SAFETY: 'SAFETY', + /** + * The candidate content was flagged for recitation reasons. + */ + RECITATION: 'RECITATION', + /** + * Unknown reason. + */ + OTHER: 'OTHER', + /** + * The candidate content contained forbidden terms. + */ + BLOCKLIST: 'BLOCKLIST', + /** + * The candidate content potentially contained prohibited content. + */ + PROHIBITED_CONTENT: 'PROHIBITED_CONTENT', + /** + * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII). + */ + SPII: 'SPII', + /** + * The function call generated by the model was invalid. + */ + MALFORMED_FUNCTION_CALL: 'MALFORMED_FUNCTION_CALL' +}; +/** + * @public + */ +const FunctionCallingMode = { + /** + * Default model behavior; model decides to predict either a function call + * or a natural language response. + */ + AUTO: 'AUTO', + /** + * Model is constrained to always predicting a function call only. + * If `allowed_function_names` is set, the predicted function call will be + * limited to any one of `allowed_function_names`, else the predicted + * function call will be any one of the provided `function_declarations`. + */ + ANY: 'ANY', + /** + * Model will not predict any function call. Model behavior is same as when + * not passing any function declarations. + */ + NONE: 'NONE' +}; +/** + * Content part modality. + * @public + */ +const Modality = { + /** + * Unspecified modality. + */ + MODALITY_UNSPECIFIED: 'MODALITY_UNSPECIFIED', + /** + * Plain text. + */ + TEXT: 'TEXT', + /** + * Image. + */ + IMAGE: 'IMAGE', + /** + * Video. + */ + VIDEO: 'VIDEO', + /** + * Audio. + */ + AUDIO: 'AUDIO', + /** + * Document (for example, PDF). + */ + DOCUMENT: 'DOCUMENT' +}; +/** + * Generation modalities to be returned in generation responses. + * + * @beta + */ +const ResponseModality = { + /** + * Text. + * @beta + */ + TEXT: 'TEXT', + /** + * Image. + * @beta + */ + IMAGE: 'IMAGE', + /** + * Audio. + * @beta + */ + AUDIO: 'AUDIO' +}; +/** + * Determines whether inference happens on-device or in-cloud. + * + * @remarks + * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an + * on-device model. If on-device inference is not available, the SDK + * will fall back to using a cloud-hosted model. + * <br/> + * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an + * on-device model. The SDK will not fall back to a cloud-hosted model. + * If on-device inference is not available, inference methods will throw. + * <br/> + * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a + * cloud-hosted model. The SDK will not fall back to an on-device model. + * <br/> + * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a + * cloud-hosted model. If not available, the SDK will fall back to an + * on-device model. + * + * @beta + */ +const InferenceMode = { + 'PREFER_ON_DEVICE': 'prefer_on_device', + 'ONLY_ON_DEVICE': 'only_on_device', + 'ONLY_IN_CLOUD': 'only_in_cloud', + 'PREFER_IN_CLOUD': 'prefer_in_cloud' +}; +/** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ +const InferenceSource = { + 'ON_DEVICE': 'on_device', + 'IN_CLOUD': 'in_cloud' +}; +/** + * Represents the result of the code execution. + * + * @beta + */ +const Outcome = { + UNSPECIFIED: 'OUTCOME_UNSPECIFIED', + OK: 'OUTCOME_OK', + FAILED: 'OUTCOME_FAILED', + DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED' +}; +/** + * The programming language of the code. + * + * @beta + */ +const Language = { + UNSPECIFIED: 'LANGUAGE_UNSPECIFIED', + PYTHON: 'PYTHON' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * The status of a URL retrieval. + * + * @remarks + * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status. + * <br/> + * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful. + * <br/> + * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed. + * <br/> + * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall. + * <br/> + * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe. + * <br/> + * + * @beta + */ +const URLRetrievalStatus = { + /** + * Unspecified retrieval status. + */ + URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED', + /** + * The URL retrieval was successful. + */ + URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS', + /** + * The URL retrieval failed. + */ + URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR', + /** + * The URL retrieval failed because the content is behind a paywall. + */ + URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL', + /** + * The URL retrieval failed because the content is unsafe. + */ + URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE' +}; +/** + * The types of responses that can be returned by {@link LiveSession.receive}. + * + * @beta + */ +const LiveResponseType = { + SERVER_CONTENT: 'serverContent', + TOOL_CALL: 'toolCall', + TOOL_CALL_CANCELLATION: 'toolCallCancellation' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Standardized error codes that {@link AIError} can have. + * + * @public + */ +const AIErrorCode = { + /** A generic error occurred. */ + ERROR: 'error', + /** An error occurred in a request. */ + REQUEST_ERROR: 'request-error', + /** An error occurred in a response. */ + RESPONSE_ERROR: 'response-error', + /** An error occurred while performing a fetch. */ + FETCH_ERROR: 'fetch-error', + /** An error occurred because an operation was attempted on a closed session. */ + SESSION_CLOSED: 'session-closed', + /** An error associated with a Content object. */ + INVALID_CONTENT: 'invalid-content', + /** An error due to the Firebase API not being enabled in the Console. */ + API_NOT_ENABLED: 'api-not-enabled', + /** An error due to invalid Schema input. */ + INVALID_SCHEMA: 'invalid-schema', + /** An error occurred due to a missing Firebase API key. */ + NO_API_KEY: 'no-api-key', + /** An error occurred due to a missing Firebase app ID. */ + NO_APP_ID: 'no-app-id', + /** An error occurred due to a model name not being specified during initialization. */ + NO_MODEL: 'no-model', + /** An error occurred due to a missing project ID. */ + NO_PROJECT_ID: 'no-project-id', + /** An error occurred while parsing. */ + PARSE_FAILED: 'parse-failed', + /** An error occurred due an attempt to use an unsupported feature. */ + UNSUPPORTED: 'unsupported' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Contains the list of OpenAPI data types + * as defined by the + * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification} + * @public + */ +const SchemaType = { + /** String type. */ + STRING: 'string', + /** Number type. */ + NUMBER: 'number', + /** Integer type. */ + INTEGER: 'integer', + /** Boolean type. */ + BOOLEAN: 'boolean', + /** Array type. */ + ARRAY: 'array', + /** Object type. */ + OBJECT: 'object' +}; + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * A filter level controlling how aggressively to filter sensitive content. + * + * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI + * are assessed against a list of safety filters, which include 'harmful categories' (for example, + * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to + * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines} + * for more details. + * + * @public + */ +const ImagenSafetyFilterLevel = { + /** + * The most aggressive filtering level; most strict blocking. + */ + BLOCK_LOW_AND_ABOVE: 'block_low_and_above', + /** + * Blocks some sensitive prompts and responses. + */ + BLOCK_MEDIUM_AND_ABOVE: 'block_medium_and_above', + /** + * Blocks few sensitive prompts and responses. + */ + BLOCK_ONLY_HIGH: 'block_only_high', + /** + * The least aggressive filtering level; blocks very few sensitive prompts and responses. + * + * Access to this feature is restricted and may require your case to be reviewed and approved by + * Cloud support. + */ + BLOCK_NONE: 'block_none' +}; +/** + * A filter level controlling whether generation of images containing people or faces is allowed. + * + * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a> + * documentation for more details. + * + * @public + */ +const ImagenPersonFilterLevel = { + /** + * Disallow generation of images containing people or faces; images of people are filtered out. + */ + BLOCK_ALL: 'dont_allow', + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + ALLOW_ADULT: 'allow_adult', + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + ALLOW_ALL: 'allow_all' +}; +/** + * Aspect ratios for Imagen images. + * + * To specify an aspect ratio for generated images, set the `aspectRatio` property in your + * {@link ImagenGenerationConfig}. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details and examples of the supported aspect ratios. + * + * @public + */ +const ImagenAspectRatio = { + /** + * Square (1:1) aspect ratio. + */ + 'SQUARE': '1:1', + /** + * Landscape (3:4) aspect ratio. + */ + 'LANDSCAPE_3x4': '3:4', + /** + * Portrait (4:3) aspect ratio. + */ + 'PORTRAIT_4x3': '4:3', + /** + * Landscape (16:9) aspect ratio. + */ + 'LANDSCAPE_16x9': '16:9', + /** + * Portrait (9:16) aspect ratio. + */ + 'PORTRAIT_9x16': '9:16' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * An enum-like object containing constants that represent the supported backends + * for the Firebase AI SDK. + * This determines which backend service (Vertex AI Gemini API or Gemini Developer API) + * the SDK will communicate with. + * + * These values are assigned to the `backendType` property within the specific backend + * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify + * which service to target. + * + * @public + */ +const BackendType = { + /** + * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud. + * Use this constant when creating a {@link VertexAIBackend} configuration. + */ + VERTEX_AI: 'VERTEX_AI', + /** + * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}). + * Use this constant when creating a {@link GoogleAIBackend} configuration. + */ + GOOGLE_AI: 'GOOGLE_AI' +}; // Using 'as const' makes the string values literal types + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Abstract base class representing the configuration for an AI service backend. + * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for + * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and + * {@link VertexAIBackend} for the Vertex AI Gemini API. + * + * @public + */ +class Backend { + /** + * Protected constructor for use by subclasses. + * @param type - The backend type. + */ + constructor(type) { + this.backendType = type; + } +} +/** + * Configuration class for the Gemini Developer API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Gemini Developer API as the backend. + * + * @public + */ +class GoogleAIBackend extends Backend { + /** + * Creates a configuration object for the Gemini Developer API backend. + */ + constructor() { + super(BackendType.GOOGLE_AI); + } +} +/** + * Configuration class for the Vertex AI Gemini API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend. + * + * @public + */ +class VertexAIBackend extends Backend { + /** + * Creates a configuration object for the Vertex AI backend. + * + * @param location - The region identifier, defaulting to `us-central1`; + * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + constructor(location = DEFAULT_LOCATION) { + super(BackendType.VERTEX_AI); + if (!location) { + this.location = DEFAULT_LOCATION; + } + else { + this.location = location; + } + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI} + * instances by backend type. + * + * @internal + */ +function encodeInstanceIdentifier(backend) { + if (backend instanceof GoogleAIBackend) { + return `${AI_TYPE}/googleai`; + } + else if (backend instanceof VertexAIBackend) { + return `${AI_TYPE}/vertexai/${backend.location}`; + } + else { + throw new AIError(AIErrorCode.ERROR, `Invalid backend: ${JSON.stringify(backend.backendType)}`); + } +} +/** + * Decodes an instance identifier string into a {@link Backend}. + * + * @internal + */ +function decodeInstanceIdentifier(instanceIdentifier) { + const identifierParts = instanceIdentifier.split('/'); + if (identifierParts[0] !== AI_TYPE) { + throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier, unknown prefix '${identifierParts[0]}'`); + } + const backendType = identifierParts[1]; + switch (backendType) { + case 'vertexai': + const location = identifierParts[2]; + if (!location) { + throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier, unknown location '${instanceIdentifier}'`); + } + return new VertexAIBackend(location); + case 'googleai': + return new GoogleAIBackend(); + default: + throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier string: '${instanceIdentifier}'`); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const logger = new logger$1.Logger('@firebase/vertexai'); + +/** + * @internal + */ +var Availability; +(function (Availability) { + Availability["UNAVAILABLE"] = "unavailable"; + Availability["DOWNLOADABLE"] = "downloadable"; + Availability["DOWNLOADING"] = "downloading"; + Availability["AVAILABLE"] = "available"; +})(Availability || (Availability = {})); + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Defaults to support image inputs for convenience. +const defaultExpectedInputs = [{ type: 'image' }]; +/** + * Defines an inference "backend" that uses Chrome's on-device model, + * and encapsulates logic for detecting when on-device inference is + * possible. + */ +class ChromeAdapterImpl { + constructor(languageModelProvider, mode, onDeviceParams) { + this.languageModelProvider = languageModelProvider; + this.mode = mode; + this.isDownloading = false; + this.onDeviceParams = { + createOptions: { + expectedInputs: defaultExpectedInputs + } + }; + if (onDeviceParams) { + this.onDeviceParams = onDeviceParams; + if (!this.onDeviceParams.createOptions) { + this.onDeviceParams.createOptions = { + expectedInputs: defaultExpectedInputs + }; + } + else if (!this.onDeviceParams.createOptions.expectedInputs) { + this.onDeviceParams.createOptions.expectedInputs = + defaultExpectedInputs; + } + } + } + /** + * Checks if a given request can be made on-device. + * + * Encapsulates a few concerns: + * the mode + * API existence + * prompt formatting + * model availability, including triggering download if necessary + * + * + * Pros: callers needn't be concerned with details of on-device availability.</p> + * Cons: this method spans a few concerns and splits request validation from usage. + * If instance variables weren't already part of the API, we could consider a better + * separation of concerns. + */ + async isAvailable(request) { + if (!this.mode) { + logger.debug(`On-device inference unavailable because mode is undefined.`); + return false; + } + if (this.mode === InferenceMode.ONLY_IN_CLOUD) { + logger.debug(`On-device inference unavailable because mode is "only_in_cloud".`); + return false; + } + // Triggers out-of-band download so model will eventually become available. + const availability = await this.downloadIfAvailable(); + if (this.mode === InferenceMode.ONLY_ON_DEVICE) { + // If it will never be available due to API inavailability, throw. + if (availability === Availability.UNAVAILABLE) { + throw new AIError(AIErrorCode.API_NOT_ENABLED, 'Local LanguageModel API not available in this environment.'); + } + else if (availability === Availability.DOWNLOADABLE || + availability === Availability.DOWNLOADING) { + // TODO(chholland): Better user experience during download - progress? + logger.debug(`Waiting for download of LanguageModel to complete.`); + await this.downloadPromise; + return true; + } + return true; + } + // Applies prefer_on_device logic. + if (availability !== Availability.AVAILABLE) { + logger.debug(`On-device inference unavailable because availability is "${availability}".`); + return false; + } + if (!ChromeAdapterImpl.isOnDeviceRequest(request)) { + logger.debug(`On-device inference unavailable because request is incompatible.`); + return false; + } + return true; + } + /** + * Generates content on device. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContent} for generating content in + * Cloud. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + * @returns {@link Response}, so we can reuse common response formatting. + */ + async generateContent(request) { + const session = await this.createSession(); + const contents = await Promise.all(request.contents.map(ChromeAdapterImpl.toLanguageModelMessage)); + const text = await session.prompt(contents, this.onDeviceParams.promptOptions); + return ChromeAdapterImpl.toResponse(text); + } + /** + * Generates content stream on device. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in + * Cloud. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + * @returns {@link Response}, so we can reuse common response formatting. + */ + async generateContentStream(request) { + const session = await this.createSession(); + const contents = await Promise.all(request.contents.map(ChromeAdapterImpl.toLanguageModelMessage)); + const stream = session.promptStreaming(contents, this.onDeviceParams.promptOptions); + return ChromeAdapterImpl.toStreamResponse(stream); + } + async countTokens(_request) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'Count Tokens is not yet available for on-device model.'); + } + /** + * Asserts inference for the given request can be performed by an on-device model. + */ + static isOnDeviceRequest(request) { + // Returns false if the prompt is empty. + if (request.contents.length === 0) { + logger.debug('Empty prompt rejected for on-device inference.'); + return false; + } + for (const content of request.contents) { + if (content.role === 'function') { + logger.debug(`"Function" role rejected for on-device inference.`); + return false; + } + // Returns false if request contains an image with an unsupported mime type. + for (const part of content.parts) { + if (part.inlineData && + ChromeAdapterImpl.SUPPORTED_MIME_TYPES.indexOf(part.inlineData.mimeType) === -1) { + logger.debug(`Unsupported mime type "${part.inlineData.mimeType}" rejected for on-device inference.`); + return false; + } + } + } + return true; + } + /** + * Encapsulates logic to get availability and download a model if one is downloadable. + */ + async downloadIfAvailable() { + const availability = await this.languageModelProvider?.availability(this.onDeviceParams.createOptions); + if (availability === Availability.DOWNLOADABLE) { + this.download(); + } + return availability; + } + /** + * Triggers out-of-band download of an on-device model. + * + * Chrome only downloads models as needed. Chrome knows a model is needed when code calls + * LanguageModel.create. + * + * Since Chrome manages the download, the SDK can only avoid redundant download requests by + * tracking if a download has previously been requested. + */ + download() { + if (this.isDownloading) { + return; + } + this.isDownloading = true; + this.downloadPromise = this.languageModelProvider + ?.create(this.onDeviceParams.createOptions) + .finally(() => { + this.isDownloading = false; + }); + } + /** + * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object. + */ + static async toLanguageModelMessage(content) { + const languageModelMessageContents = await Promise.all(content.parts.map(ChromeAdapterImpl.toLanguageModelMessageContent)); + return { + role: ChromeAdapterImpl.toLanguageModelMessageRole(content.role), + content: languageModelMessageContents + }; + } + /** + * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object. + */ + static async toLanguageModelMessageContent(part) { + if (part.text) { + return { + type: 'text', + value: part.text + }; + } + else if (part.inlineData) { + const formattedImageContent = await fetch(`data:${part.inlineData.mimeType};base64,${part.inlineData.data}`); + const imageBlob = await formattedImageContent.blob(); + const imageBitmap = await createImageBitmap(imageBlob); + return { + type: 'image', + value: imageBitmap + }; + } + throw new AIError(AIErrorCode.REQUEST_ERROR, `Processing of this Part type is not currently supported.`); + } + /** + * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string. + */ + static toLanguageModelMessageRole(role) { + // Assumes 'function' rule has been filtered by isOnDeviceRequest + return role === 'model' ? 'assistant' : 'user'; + } + /** + * Abstracts Chrome session creation. + * + * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all + * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all + * inference. + * + * Chrome will remove a model from memory if it's no longer in use, so this method ensures a + * new session is created before an old session is destroyed. + */ + async createSession() { + if (!this.languageModelProvider) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'Chrome AI requested for unsupported browser version.'); + } + const newSession = await this.languageModelProvider.create(this.onDeviceParams.createOptions); + if (this.oldSession) { + this.oldSession.destroy(); + } + // Holds session reference, so model isn't unloaded from memory. + this.oldSession = newSession; + return newSession; + } + /** + * Formats string returned by Chrome as a {@link Response} returned by Firebase AI. + */ + static toResponse(text) { + return { + json: async () => ({ + candidates: [ + { + content: { + parts: [{ text }] + } + } + ] + }) + }; + } + /** + * Formats string stream returned by Chrome as SSE returned by Firebase AI. + */ + static toStreamResponse(stream) { + const encoder = new TextEncoder(); + return { + body: stream.pipeThrough(new TransformStream({ + transform(chunk, controller) { + const json = JSON.stringify({ + candidates: [ + { + content: { + role: 'model', + parts: [{ text: chunk }] + } + } + ] + }); + controller.enqueue(encoder.encode(`data: ${json}\n\n`)); + } + })) + }; + } +} +// Visible for testing +ChromeAdapterImpl.SUPPORTED_MIME_TYPES = ['image/jpeg', 'image/png']; +/** + * Creates a ChromeAdapterImpl on demand. + */ +function chromeAdapterFactory(mode, window, params) { + // Do not initialize a ChromeAdapter if we are not in hybrid mode. + if (typeof window !== 'undefined' && mode) { + return new ChromeAdapterImpl(window.LanguageModel, mode, params); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class AIService { + constructor(app, backend, authProvider, appCheckProvider, chromeAdapterFactory) { + this.app = app; + this.backend = backend; + this.chromeAdapterFactory = chromeAdapterFactory; + const appCheck = appCheckProvider?.getImmediate({ optional: true }); + const auth = authProvider?.getImmediate({ optional: true }); + this.auth = auth || null; + this.appCheck = appCheck || null; + if (backend instanceof VertexAIBackend) { + this.location = backend.location; + } + else { + this.location = ''; + } + } + _delete() { + return Promise.resolve(); + } + set options(optionsToSet) { + this._options = optionsToSet; + } + get options() { + return this._options; + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +function factory(container, { instanceIdentifier }) { + if (!instanceIdentifier) { + throw new AIError(AIErrorCode.ERROR, 'AIService instance identifier is undefined.'); + } + const backend = decodeInstanceIdentifier(instanceIdentifier); + // getImmediate for FirebaseApp will always succeed + const app = container.getProvider('app').getImmediate(); + const auth = container.getProvider('auth-internal'); + const appCheckProvider = container.getProvider('app-check-internal'); + return new AIService(app, backend, auth, appCheckProvider, chromeAdapterFactory); +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Base class for Firebase AI model APIs. + * + * Instances of this class are associated with a specific Firebase AI {@link Backend} + * and provide methods for interacting with the configured generative model. + * + * @public + */ +class AIModel { + /** + * Constructs a new instance of the {@link AIModel} class. + * + * This constructor should only be called from subclasses that provide + * a model API. + * + * @param ai - an {@link AI} instance. + * @param modelName - The name of the model being used. It can be in one of the following formats: + * - `my-model` (short name, will resolve to `publishers/google/models/my-model`) + * - `models/my-model` (will resolve to `publishers/google/models/my-model`) + * - `publishers/my-publisher/models/my-model` (fully qualified model name) + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @internal + */ + constructor(ai, modelName) { + if (!ai.app?.options?.apiKey) { + throw new AIError(AIErrorCode.NO_API_KEY, `The "apiKey" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`); + } + else if (!ai.app?.options?.projectId) { + throw new AIError(AIErrorCode.NO_PROJECT_ID, `The "projectId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`); + } + else if (!ai.app?.options?.appId) { + throw new AIError(AIErrorCode.NO_APP_ID, `The "appId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`); + } + else { + this._apiSettings = { + apiKey: ai.app.options.apiKey, + project: ai.app.options.projectId, + appId: ai.app.options.appId, + automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled, + location: ai.location, + backend: ai.backend + }; + if (app._isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) { + const token = ai.app.settings.appCheckToken; + this._apiSettings.getAppCheckToken = () => { + return Promise.resolve({ token }); + }; + } + else if (ai.appCheck) { + if (ai.options?.useLimitedUseAppCheckTokens) { + this._apiSettings.getAppCheckToken = () => ai.appCheck.getLimitedUseToken(); + } + else { + this._apiSettings.getAppCheckToken = () => ai.appCheck.getToken(); + } + } + if (ai.auth) { + this._apiSettings.getAuthToken = () => ai.auth.getToken(); + } + this.model = AIModel.normalizeModelName(modelName, this._apiSettings.backend.backendType); + } + } + /** + * Normalizes the given model name to a fully qualified model resource name. + * + * @param modelName - The model name to normalize. + * @returns The fully qualified model resource name. + * + * @internal + */ + static normalizeModelName(modelName, backendType) { + if (backendType === BackendType.GOOGLE_AI) { + return AIModel.normalizeGoogleAIModelName(modelName); + } + else { + return AIModel.normalizeVertexAIModelName(modelName); + } + } + /** + * @internal + */ + static normalizeGoogleAIModelName(modelName) { + return `models/${modelName}`; + } + /** + * @internal + */ + static normalizeVertexAIModelName(modelName) { + let model; + if (modelName.includes('/')) { + if (modelName.startsWith('models/')) { + // Add 'publishers/google' if the user is only passing in 'models/model-name'. + model = `publishers/google/${modelName}`; + } + else { + // Any other custom format (e.g. tuned models) must be passed in correctly. + model = modelName; + } + } + else { + // If path is not included, assume it's a non-tuned model. + model = `publishers/google/models/${modelName}`; + } + return model; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +var Task; +(function (Task) { + Task["GENERATE_CONTENT"] = "generateContent"; + Task["STREAM_GENERATE_CONTENT"] = "streamGenerateContent"; + Task["COUNT_TOKENS"] = "countTokens"; + Task["PREDICT"] = "predict"; +})(Task || (Task = {})); +class RequestUrl { + constructor(model, task, apiSettings, stream, requestOptions) { + this.model = model; + this.task = task; + this.apiSettings = apiSettings; + this.stream = stream; + this.requestOptions = requestOptions; + } + toString() { + const url = new URL(this.baseUrl); // Throws if the URL is invalid + url.pathname = `/${this.apiVersion}/${this.modelPath}:${this.task}`; + url.search = this.queryParams.toString(); + return url.toString(); + } + get baseUrl() { + return this.requestOptions?.baseUrl || `https://${DEFAULT_DOMAIN}`; + } + get apiVersion() { + return DEFAULT_API_VERSION; // TODO: allow user-set options if that feature becomes available + } + get modelPath() { + if (this.apiSettings.backend instanceof GoogleAIBackend) { + return `projects/${this.apiSettings.project}/${this.model}`; + } + else if (this.apiSettings.backend instanceof VertexAIBackend) { + return `projects/${this.apiSettings.project}/locations/${this.apiSettings.backend.location}/${this.model}`; + } + else { + throw new AIError(AIErrorCode.ERROR, `Invalid backend: ${JSON.stringify(this.apiSettings.backend)}`); + } + } + get queryParams() { + const params = new URLSearchParams(); + if (this.stream) { + params.set('alt', 'sse'); + } + return params; + } +} +class WebSocketUrl { + constructor(apiSettings) { + this.apiSettings = apiSettings; + } + toString() { + const url = new URL(`wss://${DEFAULT_DOMAIN}`); + url.pathname = this.pathname; + const queryParams = new URLSearchParams(); + queryParams.set('key', this.apiSettings.apiKey); + url.search = queryParams.toString(); + return url.toString(); + } + get pathname() { + if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + return 'ws/google.firebase.vertexai.v1beta.GenerativeService/BidiGenerateContent'; + } + else { + return `ws/google.firebase.vertexai.v1beta.LlmBidiService/BidiGenerateContent/locations/${this.apiSettings.location}`; + } + } +} +/** + * Log language and "fire/version" to x-goog-api-client + */ +function getClientHeaders() { + const loggingTags = []; + loggingTags.push(`${LANGUAGE_TAG}/${PACKAGE_VERSION}`); + loggingTags.push(`fire/${PACKAGE_VERSION}`); + return loggingTags.join(' '); +} +async function getHeaders(url) { + const headers = new Headers(); + headers.append('Content-Type', 'application/json'); + headers.append('x-goog-api-client', getClientHeaders()); + headers.append('x-goog-api-key', url.apiSettings.apiKey); + if (url.apiSettings.automaticDataCollectionEnabled) { + headers.append('X-Firebase-Appid', url.apiSettings.appId); + } + if (url.apiSettings.getAppCheckToken) { + const appCheckToken = await url.apiSettings.getAppCheckToken(); + if (appCheckToken) { + headers.append('X-Firebase-AppCheck', appCheckToken.token); + if (appCheckToken.error) { + logger.warn(`Unable to obtain a valid App Check token: ${appCheckToken.error.message}`); + } + } + } + if (url.apiSettings.getAuthToken) { + const authToken = await url.apiSettings.getAuthToken(); + if (authToken) { + headers.append('Authorization', `Firebase ${authToken.accessToken}`); + } + } + return headers; +} +async function constructRequest(model, task, apiSettings, stream, body, requestOptions) { + const url = new RequestUrl(model, task, apiSettings, stream, requestOptions); + return { + url: url.toString(), + fetchOptions: { + method: 'POST', + headers: await getHeaders(url), + body + } + }; +} +async function makeRequest(model, task, apiSettings, stream, body, requestOptions) { + const url = new RequestUrl(model, task, apiSettings, stream, requestOptions); + let response; + let fetchTimeoutId; + try { + const request = await constructRequest(model, task, apiSettings, stream, body, requestOptions); + // Timeout is 180s by default + const timeoutMillis = requestOptions?.timeout != null && requestOptions.timeout >= 0 + ? requestOptions.timeout + : DEFAULT_FETCH_TIMEOUT_MS; + const abortController = new AbortController(); + fetchTimeoutId = setTimeout(() => abortController.abort(), timeoutMillis); + request.fetchOptions.signal = abortController.signal; + response = await fetch(request.url, request.fetchOptions); + if (!response.ok) { + let message = ''; + let errorDetails; + try { + const json = await response.json(); + message = json.error.message; + if (json.error.details) { + message += ` ${JSON.stringify(json.error.details)}`; + errorDetails = json.error.details; + } + } + catch (e) { + // ignored + } + if (response.status === 403 && + errorDetails && + errorDetails.some((detail) => detail.reason === 'SERVICE_DISABLED') && + errorDetails.some((detail) => detail.links?.[0]?.description.includes('Google developers console API activation'))) { + throw new AIError(AIErrorCode.API_NOT_ENABLED, `The Firebase AI SDK requires the Firebase AI ` + + `API ('firebasevertexai.googleapis.com') to be enabled in your ` + + `Firebase project. Enable this API by visiting the Firebase Console ` + + `at https://console.firebase.google.com/project/${url.apiSettings.project}/genai/ ` + + `and clicking "Get started". If you enabled this API recently, ` + + `wait a few minutes for the action to propagate to our systems and ` + + `then retry.`, { + status: response.status, + statusText: response.statusText, + errorDetails + }); + } + throw new AIError(AIErrorCode.FETCH_ERROR, `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`, { + status: response.status, + statusText: response.statusText, + errorDetails + }); + } + } + catch (e) { + let err = e; + if (e.code !== AIErrorCode.FETCH_ERROR && + e.code !== AIErrorCode.API_NOT_ENABLED && + e instanceof Error) { + err = new AIError(AIErrorCode.ERROR, `Error fetching from ${url.toString()}: ${e.message}`); + err.stack = e.stack; + } + throw err; + } + finally { + if (fetchTimeoutId) { + clearTimeout(fetchTimeoutId); + } + } + return response; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Check that at least one candidate exists and does not have a bad + * finish reason. Warns if multiple candidates exist. + */ +function hasValidCandidates(response) { + if (response.candidates && response.candidates.length > 0) { + if (response.candidates.length > 1) { + logger.warn(`This response had ${response.candidates.length} ` + + `candidates. Returning text from the first candidate only. ` + + `Access response.candidates directly to use the other candidates.`); + } + if (hadBadFinishReason(response.candidates[0])) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage(response)}. Response body stored in error.response`, { + response + }); + } + return true; + } + else { + return false; + } +} +/** + * Creates an EnhancedGenerateContentResponse object that has helper functions and + * other modifications that improve usability. + */ +function createEnhancedContentResponse(response, inferenceSource = InferenceSource.IN_CLOUD) { + /** + * The Vertex AI backend omits default values. + * This causes the `index` property to be omitted from the first candidate in the + * response, since it has index 0, and 0 is a default value. + * See: https://github.com/firebase/firebase-js-sdk/issues/8566 + */ + if (response.candidates && !response.candidates[0].hasOwnProperty('index')) { + response.candidates[0].index = 0; + } + const responseWithHelpers = addHelpers(response); + responseWithHelpers.inferenceSource = inferenceSource; + return responseWithHelpers; +} +/** + * Adds convenience helper methods to a response object, including stream + * chunks (as long as each chunk is a complete GenerateContentResponse JSON). + */ +function addHelpers(response) { + response.text = () => { + if (hasValidCandidates(response)) { + return getText(response, part => !part.thought); + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Text not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return ''; + }; + response.thoughtSummary = () => { + if (hasValidCandidates(response)) { + const result = getText(response, part => !!part.thought); + return result === '' ? undefined : result; + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Thought summary not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return undefined; + }; + response.inlineDataParts = () => { + if (hasValidCandidates(response)) { + return getInlineDataParts(response); + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Data not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return undefined; + }; + response.functionCalls = () => { + if (hasValidCandidates(response)) { + return getFunctionCalls(response); + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Function call not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return undefined; + }; + return response; +} +/** + * Returns all text from the first candidate's parts, filtering by whether + * `partFilter()` returns true. + * + * @param response - The `GenerateContentResponse` from which to extract text. + * @param partFilter - Only return `Part`s for which this returns true + */ +function getText(response, partFilter) { + const textStrings = []; + if (response.candidates?.[0].content?.parts) { + for (const part of response.candidates?.[0].content?.parts) { + if (part.text && partFilter(part)) { + textStrings.push(part.text); + } + } + } + if (textStrings.length > 0) { + return textStrings.join(''); + } + else { + return ''; + } +} +/** + * Returns every {@link FunctionCall} associated with first candidate. + */ +function getFunctionCalls(response) { + const functionCalls = []; + if (response.candidates?.[0].content?.parts) { + for (const part of response.candidates?.[0].content?.parts) { + if (part.functionCall) { + functionCalls.push(part.functionCall); + } + } + } + if (functionCalls.length > 0) { + return functionCalls; + } + else { + return undefined; + } +} +/** + * Returns every {@link InlineDataPart} in the first candidate if present. + * + * @internal + */ +function getInlineDataParts(response) { + const data = []; + if (response.candidates?.[0].content?.parts) { + for (const part of response.candidates?.[0].content?.parts) { + if (part.inlineData) { + data.push(part); + } + } + } + if (data.length > 0) { + return data; + } + else { + return undefined; + } +} +const badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY]; +function hadBadFinishReason(candidate) { + return (!!candidate.finishReason && + badFinishReasons.some(reason => reason === candidate.finishReason)); +} +function formatBlockErrorMessage(response) { + let message = ''; + if ((!response.candidates || response.candidates.length === 0) && + response.promptFeedback) { + message += 'Response was blocked'; + if (response.promptFeedback?.blockReason) { + message += ` due to ${response.promptFeedback.blockReason}`; + } + if (response.promptFeedback?.blockReasonMessage) { + message += `: ${response.promptFeedback.blockReasonMessage}`; + } + } + else if (response.candidates?.[0]) { + const firstCandidate = response.candidates[0]; + if (hadBadFinishReason(firstCandidate)) { + message += `Candidate was blocked due to ${firstCandidate.finishReason}`; + if (firstCandidate.finishMessage) { + message += `: ${firstCandidate.finishMessage}`; + } + } + } + return message; +} +/** + * Convert a generic successful fetch response body to an Imagen response object + * that can be returned to the user. This converts the REST APIs response format to our + * APIs representation of a response. + * + * @internal + */ +async function handlePredictResponse(response) { + const responseJson = await response.json(); + const images = []; + let filteredReason = undefined; + // The backend should always send a non-empty array of predictions if the response was successful. + if (!responseJson.predictions || responseJson.predictions?.length === 0) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.'); + } + for (const prediction of responseJson.predictions) { + if (prediction.raiFilteredReason) { + filteredReason = prediction.raiFilteredReason; + } + else if (prediction.mimeType && prediction.bytesBase64Encoded) { + images.push({ + mimeType: prediction.mimeType, + bytesBase64Encoded: prediction.bytesBase64Encoded + }); + } + else if (prediction.mimeType && prediction.gcsUri) { + images.push({ + mimeType: prediction.mimeType, + gcsURI: prediction.gcsUri + }); + } + else if (prediction.safetyAttributes) ; + else { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Unexpected element in 'predictions' array in response: '${JSON.stringify(prediction)}'`); + } + } + return { images, filteredReason }; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI). + * The public API prioritizes the format used by the Vertex AI Gemini API. + * We avoid having two sets of types by translating requests and responses between the two API formats. + * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API + * with minimal code changes. + * + * In here are functions that map requests and responses between the two API formats. + * Requests in the Vertex AI format are mapped to the Google AI format before being sent. + * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user. + */ +/** + * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI. + * + * @param generateContentRequest The {@link GenerateContentRequest} to map. + * @returns A {@link GenerateContentResponse} that conforms to the Google AI format. + * + * @throws If the request contains properties that are unsupported by Google AI. + * + * @internal + */ +function mapGenerateContentRequest(generateContentRequest) { + generateContentRequest.safetySettings?.forEach(safetySetting => { + if (safetySetting.method) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'SafetySetting.method is not supported in the the Gemini Developer API. Please remove this property.'); + } + }); + if (generateContentRequest.generationConfig?.topK) { + const roundedTopK = Math.round(generateContentRequest.generationConfig.topK); + if (roundedTopK !== generateContentRequest.generationConfig.topK) { + logger.warn('topK in GenerationConfig has been rounded to the nearest integer to match the format for requests to the Gemini Developer API.'); + generateContentRequest.generationConfig.topK = roundedTopK; + } + } + return generateContentRequest; +} +/** + * Maps a {@link GenerateContentResponse} from Google AI to the format of the + * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API. + * + * @param googleAIResponse The {@link GenerateContentResponse} from Google AI. + * @returns A {@link GenerateContentResponse} that conforms to the public API's format. + * + * @internal + */ +function mapGenerateContentResponse(googleAIResponse) { + const generateContentResponse = { + candidates: googleAIResponse.candidates + ? mapGenerateContentCandidates(googleAIResponse.candidates) + : undefined, + prompt: googleAIResponse.promptFeedback + ? mapPromptFeedback(googleAIResponse.promptFeedback) + : undefined, + usageMetadata: googleAIResponse.usageMetadata + }; + return generateContentResponse; +} +/** + * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI. + * + * @param countTokensRequest The {@link CountTokensRequest} to map. + * @param model The model to count tokens with. + * @returns A {@link CountTokensRequest} that conforms to the Google AI format. + * + * @internal + */ +function mapCountTokensRequest(countTokensRequest, model) { + const mappedCountTokensRequest = { + generateContentRequest: { + model, + ...countTokensRequest + } + }; + return mappedCountTokensRequest; +} +/** + * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms + * to the Vertex AI API format. + * + * @param candidates The {@link GoogleAIGenerateContentCandidate} to map. + * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format. + * + * @throws If any {@link Part} in the candidates has a `videoMetadata` property. + * + * @internal + */ +function mapGenerateContentCandidates(candidates) { + const mappedCandidates = []; + let mappedSafetyRatings; + if (mappedCandidates) { + candidates.forEach(candidate => { + // Map citationSources to citations. + let citationMetadata; + if (candidate.citationMetadata) { + citationMetadata = { + citations: candidate.citationMetadata.citationSources + }; + } + // Assign missing candidate SafetyRatings properties to their defaults if undefined. + if (candidate.safetyRatings) { + mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => { + return { + ...safetyRating, + severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + probabilityScore: safetyRating.probabilityScore ?? 0, + severityScore: safetyRating.severityScore ?? 0 + }; + }); + } + // videoMetadata is not supported. + // Throw early since developers may send a long video as input and only expect to pay + // for inference on a small portion of the video. + if (candidate.content?.parts?.some(part => part?.videoMetadata)) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.'); + } + const mappedCandidate = { + index: candidate.index, + content: candidate.content, + finishReason: candidate.finishReason, + finishMessage: candidate.finishMessage, + safetyRatings: mappedSafetyRatings, + citationMetadata, + groundingMetadata: candidate.groundingMetadata, + urlContextMetadata: candidate.urlContextMetadata + }; + mappedCandidates.push(mappedCandidate); + }); + } + return mappedCandidates; +} +function mapPromptFeedback(promptFeedback) { + // Assign missing SafetyRating properties to their defaults if undefined. + const mappedSafetyRatings = []; + promptFeedback.safetyRatings.forEach(safetyRating => { + mappedSafetyRatings.push({ + category: safetyRating.category, + probability: safetyRating.probability, + severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + probabilityScore: safetyRating.probabilityScore ?? 0, + severityScore: safetyRating.severityScore ?? 0, + blocked: safetyRating.blocked + }); + }); + const mappedPromptFeedback = { + blockReason: promptFeedback.blockReason, + safetyRatings: mappedSafetyRatings, + blockReasonMessage: promptFeedback.blockReasonMessage + }; + return mappedPromptFeedback; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/; +/** + * Process a response.body stream from the backend and return an + * iterator that provides one complete GenerateContentResponse at a time + * and a promise that resolves with a single aggregated + * GenerateContentResponse. + * + * @param response - Response from a fetch call + */ +function processStream(response, apiSettings, inferenceSource) { + const inputStream = response.body.pipeThrough(new TextDecoderStream('utf8', { fatal: true })); + const responseStream = getResponseStream(inputStream); + const [stream1, stream2] = responseStream.tee(); + return { + stream: generateResponseSequence(stream1, apiSettings, inferenceSource), + response: getResponsePromise(stream2, apiSettings, inferenceSource) + }; +} +async function getResponsePromise(stream, apiSettings, inferenceSource) { + const allResponses = []; + const reader = stream.getReader(); + while (true) { + const { done, value } = await reader.read(); + if (done) { + let generateContentResponse = aggregateResponses(allResponses); + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + generateContentResponse = mapGenerateContentResponse(generateContentResponse); + } + return createEnhancedContentResponse(generateContentResponse, inferenceSource); + } + allResponses.push(value); + } +} +async function* generateResponseSequence(stream, apiSettings, inferenceSource) { + const reader = stream.getReader(); + while (true) { + const { value, done } = await reader.read(); + if (done) { + break; + } + let enhancedResponse; + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + enhancedResponse = createEnhancedContentResponse(mapGenerateContentResponse(value), inferenceSource); + } + else { + enhancedResponse = createEnhancedContentResponse(value, inferenceSource); + } + const firstCandidate = enhancedResponse.candidates?.[0]; + // Don't yield a response with no useful data for the developer. + if (!firstCandidate?.content?.parts && + !firstCandidate?.finishReason && + !firstCandidate?.citationMetadata && + !firstCandidate?.urlContextMetadata) { + continue; + } + yield enhancedResponse; + } +} +/** + * Reads a raw stream from the fetch response and join incomplete + * chunks, returning a new stream that provides a single complete + * GenerateContentResponse in each iteration. + */ +function getResponseStream(inputStream) { + const reader = inputStream.getReader(); + const stream = new ReadableStream({ + start(controller) { + let currentText = ''; + return pump(); + function pump() { + return reader.read().then(({ value, done }) => { + if (done) { + if (currentText.trim()) { + controller.error(new AIError(AIErrorCode.PARSE_FAILED, 'Failed to parse stream')); + return; + } + controller.close(); + return; + } + currentText += value; + let match = currentText.match(responseLineRE); + let parsedResponse; + while (match) { + try { + parsedResponse = JSON.parse(match[1]); + } + catch (e) { + controller.error(new AIError(AIErrorCode.PARSE_FAILED, `Error parsing JSON response: "${match[1]}`)); + return; + } + controller.enqueue(parsedResponse); + currentText = currentText.substring(match[0].length); + match = currentText.match(responseLineRE); + } + return pump(); + }); + } + } + }); + return stream; +} +/** + * Aggregates an array of `GenerateContentResponse`s into a single + * GenerateContentResponse. + */ +function aggregateResponses(responses) { + const lastResponse = responses[responses.length - 1]; + const aggregatedResponse = { + promptFeedback: lastResponse?.promptFeedback + }; + for (const response of responses) { + if (response.candidates) { + for (const candidate of response.candidates) { + // Index will be undefined if it's the first index (0), so we should use 0 if it's undefined. + // See: https://github.com/firebase/firebase-js-sdk/issues/8566 + const i = candidate.index || 0; + if (!aggregatedResponse.candidates) { + aggregatedResponse.candidates = []; + } + if (!aggregatedResponse.candidates[i]) { + aggregatedResponse.candidates[i] = { + index: candidate.index + }; + } + // Keep overwriting, the last one will be final + aggregatedResponse.candidates[i].citationMetadata = + candidate.citationMetadata; + aggregatedResponse.candidates[i].finishReason = candidate.finishReason; + aggregatedResponse.candidates[i].finishMessage = + candidate.finishMessage; + aggregatedResponse.candidates[i].safetyRatings = + candidate.safetyRatings; + aggregatedResponse.candidates[i].groundingMetadata = + candidate.groundingMetadata; + // The urlContextMetadata object is defined in the first chunk of the response stream. + // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to + // make sure that we don't overwrite the first value urlContextMetadata object with undefined. + // FIXME: What happens if we receive a second, valid urlContextMetadata object? + const urlContextMetadata = candidate.urlContextMetadata; + if (typeof urlContextMetadata === 'object' && + urlContextMetadata !== null && + Object.keys(urlContextMetadata).length > 0) { + aggregatedResponse.candidates[i].urlContextMetadata = + urlContextMetadata; + } + /** + * Candidates should always have content and parts, but this handles + * possible malformed responses. + */ + if (candidate.content) { + // Skip a candidate without parts. + if (!candidate.content.parts) { + continue; + } + if (!aggregatedResponse.candidates[i].content) { + aggregatedResponse.candidates[i].content = { + role: candidate.content.role || 'user', + parts: [] + }; + } + for (const part of candidate.content.parts) { + const newPart = { ...part }; + // The backend can send empty text parts. If these are sent back + // (e.g. in chat history), the backend will respond with an error. + // To prevent this, ignore empty text parts. + if (part.text === '') { + continue; + } + if (Object.keys(newPart).length > 0) { + aggregatedResponse.candidates[i].content.parts.push(newPart); + } + } + } + } + } + } + return aggregatedResponse; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const errorsCausingFallback = [ + // most network errors + AIErrorCode.FETCH_ERROR, + // fallback code for all other errors in makeRequest + AIErrorCode.ERROR, + // error due to API not being enabled in project + AIErrorCode.API_NOT_ENABLED +]; +/** + * Dispatches a request to the appropriate backend (on-device or in-cloud) + * based on the inference mode. + * + * @param request - The request to be sent. + * @param chromeAdapter - The on-device model adapter. + * @param onDeviceCall - The function to call for on-device inference. + * @param inCloudCall - The function to call for in-cloud inference. + * @returns The response from the backend. + */ +async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) { + if (!chromeAdapter) { + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + } + switch (chromeAdapter.mode) { + case InferenceMode.ONLY_ON_DEVICE: + if (await chromeAdapter.isAvailable(request)) { + return { + response: await onDeviceCall(), + inferenceSource: InferenceSource.ON_DEVICE + }; + } + throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.'); + case InferenceMode.ONLY_IN_CLOUD: + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + case InferenceMode.PREFER_IN_CLOUD: + try { + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + } + catch (e) { + if (e instanceof AIError && errorsCausingFallback.includes(e.code)) { + return { + response: await onDeviceCall(), + inferenceSource: InferenceSource.ON_DEVICE + }; + } + throw e; + } + case InferenceMode.PREFER_ON_DEVICE: + if (await chromeAdapter.isAvailable(request)) { + return { + response: await onDeviceCall(), + inferenceSource: InferenceSource.ON_DEVICE + }; + } + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + default: + throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +async function generateContentStreamOnCloud(apiSettings, model, params, requestOptions) { + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + params = mapGenerateContentRequest(params); + } + return makeRequest(model, Task.STREAM_GENERATE_CONTENT, apiSettings, + /* stream */ true, JSON.stringify(params), requestOptions); +} +async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) { + const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions)); + return processStream(callResult.response, apiSettings); // TODO: Map streaming responses +} +async function generateContentOnCloud(apiSettings, model, params, requestOptions) { + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + params = mapGenerateContentRequest(params); + } + return makeRequest(model, Task.GENERATE_CONTENT, apiSettings, + /* stream */ false, JSON.stringify(params), requestOptions); +} +async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) { + const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions)); + const generateContentResponse = await processGenerateContentResponse(callResult.response, apiSettings); + const enhancedResponse = createEnhancedContentResponse(generateContentResponse, callResult.inferenceSource); + return { + response: enhancedResponse + }; +} +async function processGenerateContentResponse(response, apiSettings) { + const responseJson = await response.json(); + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + return mapGenerateContentResponse(responseJson); + } + else { + return responseJson; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +function formatSystemInstruction(input) { + // null or undefined + if (input == null) { + return undefined; + } + else if (typeof input === 'string') { + return { role: 'system', parts: [{ text: input }] }; + } + else if (input.text) { + return { role: 'system', parts: [input] }; + } + else if (input.parts) { + if (!input.role) { + return { role: 'system', parts: input.parts }; + } + else { + return input; + } + } +} +function formatNewContent(request) { + let newParts = []; + if (typeof request === 'string') { + newParts = [{ text: request }]; + } + else { + for (const partOrString of request) { + if (typeof partOrString === 'string') { + newParts.push({ text: partOrString }); + } + else { + newParts.push(partOrString); + } + } + } + return assignRoleToPartsAndValidateSendMessageRequest(newParts); +} +/** + * When multiple Part types (i.e. FunctionResponsePart and TextPart) are + * passed in a single Part array, we may need to assign different roles to each + * part. Currently only FunctionResponsePart requires a role other than 'user'. + * @private + * @param parts Array of parts to pass to the model + * @returns Array of content items + */ +function assignRoleToPartsAndValidateSendMessageRequest(parts) { + const userContent = { role: 'user', parts: [] }; + const functionContent = { role: 'function', parts: [] }; + let hasUserContent = false; + let hasFunctionContent = false; + for (const part of parts) { + if ('functionResponse' in part) { + functionContent.parts.push(part); + hasFunctionContent = true; + } + else { + userContent.parts.push(part); + hasUserContent = true; + } + } + if (hasUserContent && hasFunctionContent) { + throw new AIError(AIErrorCode.INVALID_CONTENT, 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.'); + } + if (!hasUserContent && !hasFunctionContent) { + throw new AIError(AIErrorCode.INVALID_CONTENT, 'No Content is provided for sending chat message.'); + } + if (hasUserContent) { + return userContent; + } + return functionContent; +} +function formatGenerateContentInput(params) { + let formattedRequest; + if (params.contents) { + formattedRequest = params; + } + else { + // Array or string + const content = formatNewContent(params); + formattedRequest = { contents: [content] }; + } + if (params.systemInstruction) { + formattedRequest.systemInstruction = formatSystemInstruction(params.systemInstruction); + } + return formattedRequest; +} +/** + * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format + * that is expected from the REST API. + * + * @internal + */ +function createPredictRequestBody(prompt, { gcsURI, imageFormat, addWatermark, numberOfImages = 1, negativePrompt, aspectRatio, safetyFilterLevel, personFilterLevel }) { + // Properties that are undefined will be omitted from the JSON string that is sent in the request. + const body = { + instances: [ + { + prompt + } + ], + parameters: { + storageUri: gcsURI, + negativePrompt, + sampleCount: numberOfImages, + aspectRatio, + outputOptions: imageFormat, + addWatermark, + safetyFilterLevel, + personGeneration: personFilterLevel, + includeRaiReason: true, + includeSafetyAttributes: true + } + }; + return body; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// https://ai.google.dev/api/rest/v1beta/Content#part +const VALID_PART_FIELDS = [ + 'text', + 'inlineData', + 'functionCall', + 'functionResponse', + 'thought', + 'thoughtSignature' +]; +const VALID_PARTS_PER_ROLE = { + user: ['text', 'inlineData'], + function: ['functionResponse'], + model: ['text', 'functionCall', 'thought', 'thoughtSignature'], + // System instructions shouldn't be in history anyway. + system: ['text'] +}; +const VALID_PREVIOUS_CONTENT_ROLES = { + user: ['model'], + function: ['model'], + model: ['user', 'function'], + // System instructions shouldn't be in history. + system: [] +}; +function validateChatHistory(history) { + let prevContent = null; + for (const currContent of history) { + const { role, parts } = currContent; + if (!prevContent && role !== 'user') { + throw new AIError(AIErrorCode.INVALID_CONTENT, `First Content should be with role 'user', got ${role}`); + } + if (!POSSIBLE_ROLES.includes(role)) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(POSSIBLE_ROLES)}`); + } + if (!Array.isArray(parts)) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Content should have 'parts' property with an array of Parts`); + } + if (parts.length === 0) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Each Content should have at least one part`); + } + const countFields = { + text: 0, + inlineData: 0, + functionCall: 0, + functionResponse: 0, + thought: 0, + thoughtSignature: 0, + executableCode: 0, + codeExecutionResult: 0 + }; + for (const part of parts) { + for (const key of VALID_PART_FIELDS) { + if (key in part) { + countFields[key] += 1; + } + } + } + const validParts = VALID_PARTS_PER_ROLE[role]; + for (const key of VALID_PART_FIELDS) { + if (!validParts.includes(key) && countFields[key] > 0) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't contain '${key}' part`); + } + } + if (prevContent) { + const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role]; + if (!validPreviousContentRoles.includes(prevContent.role)) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't follow '${prevContent.role}'. Valid previous roles: ${JSON.stringify(VALID_PREVIOUS_CONTENT_ROLES)}`); + } + } + prevContent = currContent; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Do not log a message for this error. + */ +const SILENT_ERROR = 'SILENT_ERROR'; +/** + * ChatSession class that enables sending chat messages and stores + * history of sent and received messages so far. + * + * @public + */ +class ChatSession { + constructor(apiSettings, model, chromeAdapter, params, requestOptions) { + this.model = model; + this.chromeAdapter = chromeAdapter; + this.params = params; + this.requestOptions = requestOptions; + this._history = []; + this._sendPromise = Promise.resolve(); + this._apiSettings = apiSettings; + if (params?.history) { + validateChatHistory(params.history); + this._history = params.history; + } + } + /** + * Gets the chat history so far. Blocked prompts are not added to history. + * Neither blocked candidates nor the prompts that generated them are added + * to history. + */ + async getHistory() { + await this._sendPromise; + return this._history; + } + /** + * Sends a chat message and receives a non-streaming + * {@link GenerateContentResult} + */ + async sendMessage(request) { + await this._sendPromise; + const newContent = formatNewContent(request); + const generateContentRequest = { + safetySettings: this.params?.safetySettings, + generationConfig: this.params?.generationConfig, + tools: this.params?.tools, + toolConfig: this.params?.toolConfig, + systemInstruction: this.params?.systemInstruction, + contents: [...this._history, newContent] + }; + let finalResult = {}; + // Add onto the chain. + this._sendPromise = this._sendPromise + .then(() => generateContent(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter, this.requestOptions)) + .then(result => { + if (result.response.candidates && + result.response.candidates.length > 0) { + this._history.push(newContent); + const responseContent = { + parts: result.response.candidates?.[0].content.parts || [], + // Response seems to come back without a role set. + role: result.response.candidates?.[0].content.role || 'model' + }; + this._history.push(responseContent); + } + else { + const blockErrorMessage = formatBlockErrorMessage(result.response); + if (blockErrorMessage) { + logger.warn(`sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`); + } + } + finalResult = result; + }); + await this._sendPromise; + return finalResult; + } + /** + * Sends a chat message and receives the response as a + * {@link GenerateContentStreamResult} containing an iterable stream + * and a response promise. + */ + async sendMessageStream(request) { + await this._sendPromise; + const newContent = formatNewContent(request); + const generateContentRequest = { + safetySettings: this.params?.safetySettings, + generationConfig: this.params?.generationConfig, + tools: this.params?.tools, + toolConfig: this.params?.toolConfig, + systemInstruction: this.params?.systemInstruction, + contents: [...this._history, newContent] + }; + const streamPromise = generateContentStream(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter, this.requestOptions); + // Add onto the chain. + this._sendPromise = this._sendPromise + .then(() => streamPromise) + // This must be handled to avoid unhandled rejection, but jump + // to the final catch block with a label to not log this error. + .catch(_ignored => { + throw new Error(SILENT_ERROR); + }) + .then(streamResult => streamResult.response) + .then(response => { + if (response.candidates && response.candidates.length > 0) { + this._history.push(newContent); + const responseContent = { ...response.candidates[0].content }; + // Response seems to come back without a role set. + if (!responseContent.role) { + responseContent.role = 'model'; + } + this._history.push(responseContent); + } + else { + const blockErrorMessage = formatBlockErrorMessage(response); + if (blockErrorMessage) { + logger.warn(`sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`); + } + } + }) + .catch(e => { + // Errors in streamPromise are already catchable by the user as + // streamPromise is returned. + // Avoid duplicating the error message in logs. + if (e.message !== SILENT_ERROR) { + // Users do not have access to _sendPromise to catch errors + // downstream from streamPromise, so they should not throw. + logger.error(e); + } + }); + return streamPromise; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +async function countTokensOnCloud(apiSettings, model, params, requestOptions) { + let body = ''; + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + const mappedParams = mapCountTokensRequest(params, model); + body = JSON.stringify(mappedParams); + } + else { + body = JSON.stringify(params); + } + const response = await makeRequest(model, Task.COUNT_TOKENS, apiSettings, false, body, requestOptions); + return response.json(); +} +async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) { + if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.'); + } + return countTokensOnCloud(apiSettings, model, params, requestOptions); +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Class for generative model APIs. + * @public + */ +class GenerativeModel extends AIModel { + constructor(ai, modelParams, requestOptions, chromeAdapter) { + super(ai, modelParams.model); + this.chromeAdapter = chromeAdapter; + this.generationConfig = modelParams.generationConfig || {}; + this.safetySettings = modelParams.safetySettings || []; + this.tools = modelParams.tools; + this.toolConfig = modelParams.toolConfig; + this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction); + this.requestOptions = requestOptions || {}; + } + /** + * Makes a single non-streaming call to the model + * and returns an object containing a single {@link GenerateContentResponse}. + */ + async generateContent(request) { + const formattedParams = formatGenerateContentInput(request); + return generateContent(this._apiSettings, this.model, { + generationConfig: this.generationConfig, + safetySettings: this.safetySettings, + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + ...formattedParams + }, this.chromeAdapter, this.requestOptions); + } + /** + * Makes a single streaming call to the model + * and returns an object containing an iterable stream that iterates + * over all chunks in the streaming response as well as + * a promise that returns the final aggregated response. + */ + async generateContentStream(request) { + const formattedParams = formatGenerateContentInput(request); + return generateContentStream(this._apiSettings, this.model, { + generationConfig: this.generationConfig, + safetySettings: this.safetySettings, + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + ...formattedParams + }, this.chromeAdapter, this.requestOptions); + } + /** + * Gets a new {@link ChatSession} instance which can be used for + * multi-turn chats. + */ + startChat(startChatParams) { + return new ChatSession(this._apiSettings, this.model, this.chromeAdapter, { + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + generationConfig: this.generationConfig, + safetySettings: this.safetySettings, + /** + * Overrides params inherited from GenerativeModel with those explicitly set in the + * StartChatParams. For example, if startChatParams.generationConfig is set, it'll override + * this.generationConfig. + */ + ...startChatParams + }, this.requestOptions); + } + /** + * Counts the tokens in the provided request. + */ + async countTokens(request) { + const formattedParams = formatGenerateContentInput(request); + return countTokens(this._apiSettings, this.model, formattedParams, this.chromeAdapter); + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Represents an active, real-time, bidirectional conversation with the model. + * + * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}. + * + * @beta + */ +class LiveSession { + /** + * @internal + */ + constructor(webSocketHandler, serverMessages) { + this.webSocketHandler = webSocketHandler; + this.serverMessages = serverMessages; + /** + * Indicates whether this Live session is closed. + * + * @beta + */ + this.isClosed = false; + /** + * Indicates whether this Live session is being controlled by an `AudioConversationController`. + * + * @beta + */ + this.inConversation = false; + } + /** + * Sends content to the server. + * + * @param request - The message to send to the model. + * @param turnComplete - Indicates if the turn is complete. Defaults to false. + * @throws If this session has been closed. + * + * @beta + */ + async send(request, turnComplete = true) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const newContent = formatNewContent(request); + const message = { + clientContent: { + turns: [newContent], + turnComplete + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends text to the server in realtime. + * + * @example + * ```javascript + * liveSession.sendTextRealtime("Hello, how are you?"); + * ``` + * + * @param text - The text data to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendTextRealtime(text) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + realtimeInput: { + text + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends audio data to the server in realtime. + * + * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz + * little-endian. + * + * @example + * ```javascript + * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian. + * const blob = { mimeType: "audio/pcm", data: pcmData }; + * liveSession.sendAudioRealtime(blob); + * ``` + * + * @param blob - The base64-encoded PCM data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + async sendAudioRealtime(blob) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + realtimeInput: { + audio: blob + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends video data to the server in realtime. + * + * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It + * is recommended to set `mimeType` to `image/jpeg`. + * + * @example + * ```javascript + * // const videoFrame = ... base64-encoded JPEG data + * const blob = { mimeType: "image/jpeg", data: videoFrame }; + * liveSession.sendVideoRealtime(blob); + * ``` + * @param blob - The base64-encoded video data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + async sendVideoRealtime(blob) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + realtimeInput: { + video: blob + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends function responses to the server. + * + * @param functionResponses - The function responses to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendFunctionResponses(functionResponses) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + toolResponse: { + functionResponses + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Yields messages received from the server. + * This can only be used by one consumer at a time. + * + * @returns An `AsyncGenerator` that yields server messages as they arrive. + * @throws If the session is already closed, or if we receive a response that we don't support. + * + * @beta + */ + async *receive() { + if (this.isClosed) { + throw new AIError(AIErrorCode.SESSION_CLOSED, 'Cannot read from a Live session that is closed. Try starting a new Live session.'); + } + for await (const message of this.serverMessages) { + if (message && typeof message === 'object') { + if (LiveResponseType.SERVER_CONTENT in message) { + yield { + type: 'serverContent', + ...message + .serverContent + }; + } + else if (LiveResponseType.TOOL_CALL in message) { + yield { + type: 'toolCall', + ...message + .toolCall + }; + } + else if (LiveResponseType.TOOL_CALL_CANCELLATION in message) { + yield { + type: 'toolCallCancellation', + ...message.toolCallCancellation + }; + } + else { + logger.warn(`Received an unknown message type from the server: ${JSON.stringify(message)}`); + } + } + else { + logger.warn(`Received an invalid message from the server: ${JSON.stringify(message)}`); + } + } + } + /** + * Closes this session. + * All methods on this session will throw an error once this resolves. + * + * @beta + */ + async close() { + if (!this.isClosed) { + this.isClosed = true; + await this.webSocketHandler.close(1000, 'Client closed session.'); + } + } + /** + * Sends realtime input to the server. + * + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * @param mediaChunks - The media chunks to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendMediaChunks(mediaChunks) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + // The backend does not support sending more than one mediaChunk in one message. + // Work around this limitation by sending mediaChunks in separate messages. + mediaChunks.forEach(mediaChunk => { + const message = { + realtimeInput: { mediaChunks: [mediaChunk] } + }; + this.webSocketHandler.send(JSON.stringify(message)); + }); + } + /** + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * Sends a stream of {@link GenerativeContentBlob}. + * + * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendMediaStream(mediaChunkStream) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const reader = mediaChunkStream.getReader(); + while (true) { + try { + const { done, value } = await reader.read(); + if (done) { + break; + } + else if (!value) { + throw new Error('Missing chunk in reader, but reader is not done.'); + } + await this.sendMediaChunks([value]); + } + catch (e) { + // Re-throw any errors that occur during stream consumption or sending. + const message = e instanceof Error ? e.message : 'Error processing media stream.'; + throw new AIError(AIErrorCode.REQUEST_ERROR, message); + } + } + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal + * interactions with Gemini. + * + * This class should only be instantiated with {@link getLiveGenerativeModel}. + * + * @beta + */ +class LiveGenerativeModel extends AIModel { + /** + * @internal + */ + constructor(ai, modelParams, + /** + * @internal + */ + _webSocketHandler) { + super(ai, modelParams.model); + this._webSocketHandler = _webSocketHandler; + this.generationConfig = modelParams.generationConfig || {}; + this.tools = modelParams.tools; + this.toolConfig = modelParams.toolConfig; + this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction); + } + /** + * Starts a {@link LiveSession}. + * + * @returns A {@link LiveSession}. + * @throws If the connection failed to be established with the server. + * + * @beta + */ + async connect() { + const url = new WebSocketUrl(this._apiSettings); + await this._webSocketHandler.connect(url.toString()); + let fullModelPath; + if (this._apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + fullModelPath = `projects/${this._apiSettings.project}/${this.model}`; + } + else { + fullModelPath = `projects/${this._apiSettings.project}/locations/${this._apiSettings.location}/${this.model}`; + } + // inputAudioTranscription and outputAudioTranscription are on the generation config in the public API, + // but the backend expects them to be in the `setup` message. + const { inputAudioTranscription, outputAudioTranscription, ...generationConfig } = this.generationConfig; + const setupMessage = { + setup: { + model: fullModelPath, + generationConfig, + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + inputAudioTranscription, + outputAudioTranscription + } + }; + try { + // Begin listening for server messages, and begin the handshake by sending the 'setupMessage' + const serverMessages = this._webSocketHandler.listen(); + this._webSocketHandler.send(JSON.stringify(setupMessage)); + // Verify we received the handshake response 'setupComplete' + const firstMessage = (await serverMessages.next()).value; + if (!firstMessage || + !(typeof firstMessage === 'object') || + !('setupComplete' in firstMessage)) { + await this._webSocketHandler.close(1011, 'Handshake failure'); + throw new AIError(AIErrorCode.RESPONSE_ERROR, 'Server connection handshake failed. The server did not respond with a setupComplete message.'); + } + return new LiveSession(this._webSocketHandler, serverMessages); + } + catch (e) { + // Ensure connection is closed on any setup error + await this._webSocketHandler.close(); + throw e; + } + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Class for Imagen model APIs. + * + * This class provides methods for generating images using the Imagen model. + * + * @example + * ```javascript + * const imagen = new ImagenModel( + * ai, + * { + * model: 'imagen-3.0-generate-002' + * } + * ); + * + * const response = await imagen.generateImages('A photo of a cat'); + * if (response.images.length > 0) { + * console.log(response.images[0].bytesBase64Encoded); + * } + * ``` + * + * @public + */ +class ImagenModel extends AIModel { + /** + * Constructs a new instance of the {@link ImagenModel} class. + * + * @param ai - an {@link AI} instance. + * @param modelParams - Parameters to use when making requests to Imagen. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + */ + constructor(ai, modelParams, requestOptions) { + const { model, generationConfig, safetySettings } = modelParams; + super(ai, model); + this.requestOptions = requestOptions; + this.generationConfig = generationConfig; + this.safetySettings = safetySettings; + } + /** + * Generates images using the Imagen model and returns them as + * base64-encoded strings. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the generated images. + * + * @throws If the request to generate images fails. This happens if the + * prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + * + * @public + */ + async generateImages(prompt) { + const body = createPredictRequestBody(prompt, { + ...this.generationConfig, + ...this.safetySettings + }); + const response = await makeRequest(this.model, Task.PREDICT, this._apiSettings, + /* stream */ false, JSON.stringify(body), this.requestOptions); + return handlePredictResponse(response); + } + /** + * Generates images to Cloud Storage for Firebase using the Imagen model. + * + * @internal This method is temporarily internal. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket. + * This should be a directory. For example, `gs://my-bucket/my-directory/`. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the URLs of the generated images. + * + * @throws If the request fails to generate images fails. This happens if + * the prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + */ + async generateImagesGCS(prompt, gcsURI) { + const body = createPredictRequestBody(prompt, { + gcsURI, + ...this.generationConfig, + ...this.safetySettings + }); + const response = await makeRequest(this.model, Task.PREDICT, this._apiSettings, + /* stream */ false, JSON.stringify(body), this.requestOptions); + return handlePredictResponse(response); + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22. + * + * @internal + */ +class WebSocketHandlerImpl { + constructor() { + if (typeof WebSocket === 'undefined') { + throw new AIError(AIErrorCode.UNSUPPORTED, 'The WebSocket API is not available in this environment. ' + + 'The "Live" feature is not supported here. It is supported in ' + + 'modern browser windows, Web Workers with WebSocket support, and Node >= 22.'); + } + } + connect(url) { + return new Promise((resolve, reject) => { + this.ws = new WebSocket(url); + this.ws.binaryType = 'blob'; // Only important to set in Node + this.ws.addEventListener('open', () => resolve(), { once: true }); + this.ws.addEventListener('error', () => reject(new AIError(AIErrorCode.FETCH_ERROR, `Error event raised on WebSocket`)), { once: true }); + this.ws.addEventListener('close', (closeEvent) => { + if (closeEvent.reason) { + logger.warn(`WebSocket connection closed by server. Reason: '${closeEvent.reason}'`); + } + }); + }); + } + send(data) { + if (!this.ws || this.ws.readyState !== WebSocket.OPEN) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not open.'); + } + this.ws.send(data); + } + async *listen() { + if (!this.ws) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not connected.'); + } + const messageQueue = []; + const errorQueue = []; + let resolvePromise = null; + let isClosed = false; + const messageListener = async (event) => { + let data; + if (event.data instanceof Blob) { + data = await event.data.text(); + } + else if (typeof event.data === 'string') { + data = event.data; + } + else { + errorQueue.push(new AIError(AIErrorCode.PARSE_FAILED, `Failed to parse WebSocket response. Expected data to be a Blob or string, but was ${typeof event.data}.`)); + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + return; + } + try { + const obj = JSON.parse(data); + messageQueue.push(obj); + } + catch (e) { + const err = e; + errorQueue.push(new AIError(AIErrorCode.PARSE_FAILED, `Error parsing WebSocket message to JSON: ${err.message}`)); + } + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + }; + const errorListener = () => { + errorQueue.push(new AIError(AIErrorCode.FETCH_ERROR, 'WebSocket connection error.')); + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + }; + const closeListener = (event) => { + if (event.reason) { + logger.warn(`WebSocket connection closed by the server with reason: ${event.reason}`); + } + isClosed = true; + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + // Clean up listeners to prevent memory leaks + this.ws?.removeEventListener('message', messageListener); + this.ws?.removeEventListener('close', closeListener); + this.ws?.removeEventListener('error', errorListener); + }; + this.ws.addEventListener('message', messageListener); + this.ws.addEventListener('close', closeListener); + this.ws.addEventListener('error', errorListener); + while (!isClosed) { + if (errorQueue.length > 0) { + const error = errorQueue.shift(); + throw error; + } + if (messageQueue.length > 0) { + yield messageQueue.shift(); + } + else { + await new Promise(resolve => { + resolvePromise = resolve; + }); + } + } + // If the loop terminated because isClosed is true, check for any final errors + if (errorQueue.length > 0) { + const error = errorQueue.shift(); + throw error; + } + } + close(code, reason) { + return new Promise(resolve => { + if (!this.ws) { + return resolve(); + } + this.ws.addEventListener('close', () => resolve(), { once: true }); + // Calling 'close' during these states results in an error. + if (this.ws.readyState === WebSocket.CLOSED || + this.ws.readyState === WebSocket.CONNECTING) { + return resolve(); + } + if (this.ws.readyState !== WebSocket.CLOSING) { + this.ws.close(code, reason); + } + }); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Parent class encompassing all Schema types, with static methods that + * allow building specific Schema types. This class can be converted with + * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints. + * (This string conversion is automatically done when calling SDK methods.) + * @public + */ +class Schema { + constructor(schemaParams) { + // TODO(dlarocque): Enforce this with union types + if (!schemaParams.type && !schemaParams.anyOf) { + throw new AIError(AIErrorCode.INVALID_SCHEMA, "A schema must have either a 'type' or an 'anyOf' array of sub-schemas."); + } + // eslint-disable-next-line guard-for-in + for (const paramKey in schemaParams) { + this[paramKey] = schemaParams[paramKey]; + } + // Ensure these are explicitly set to avoid TS errors. + this.type = schemaParams.type; + this.format = schemaParams.hasOwnProperty('format') + ? schemaParams.format + : undefined; + this.nullable = schemaParams.hasOwnProperty('nullable') + ? !!schemaParams.nullable + : false; + } + /** + * Defines how this Schema should be serialized as JSON. + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior + * @internal + */ + toJSON() { + const obj = { + type: this.type + }; + for (const prop in this) { + if (this.hasOwnProperty(prop) && this[prop] !== undefined) { + if (prop !== 'required' || this.type === SchemaType.OBJECT) { + obj[prop] = this[prop]; + } + } + } + return obj; + } + static array(arrayParams) { + return new ArraySchema(arrayParams, arrayParams.items); + } + static object(objectParams) { + return new ObjectSchema(objectParams, objectParams.properties, objectParams.optionalProperties); + } + // eslint-disable-next-line id-blacklist + static string(stringParams) { + return new StringSchema(stringParams); + } + static enumString(stringParams) { + return new StringSchema(stringParams, stringParams.enum); + } + static integer(integerParams) { + return new IntegerSchema(integerParams); + } + // eslint-disable-next-line id-blacklist + static number(numberParams) { + return new NumberSchema(numberParams); + } + // eslint-disable-next-line id-blacklist + static boolean(booleanParams) { + return new BooleanSchema(booleanParams); + } + static anyOf(anyOfParams) { + return new AnyOfSchema(anyOfParams); + } +} +/** + * Schema class for "integer" types. + * @public + */ +class IntegerSchema extends Schema { + constructor(schemaParams) { + super({ + type: SchemaType.INTEGER, + ...schemaParams + }); + } +} +/** + * Schema class for "number" types. + * @public + */ +class NumberSchema extends Schema { + constructor(schemaParams) { + super({ + type: SchemaType.NUMBER, + ...schemaParams + }); + } +} +/** + * Schema class for "boolean" types. + * @public + */ +class BooleanSchema extends Schema { + constructor(schemaParams) { + super({ + type: SchemaType.BOOLEAN, + ...schemaParams + }); + } +} +/** + * Schema class for "string" types. Can be used with or without + * enum values. + * @public + */ +class StringSchema extends Schema { + constructor(schemaParams, enumValues) { + super({ + type: SchemaType.STRING, + ...schemaParams + }); + this.enum = enumValues; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + if (this.enum) { + obj['enum'] = this.enum; + } + return obj; + } +} +/** + * Schema class for "array" types. + * The `items` param should refer to the type of item that can be a member + * of the array. + * @public + */ +class ArraySchema extends Schema { + constructor(schemaParams, items) { + super({ + type: SchemaType.ARRAY, + ...schemaParams + }); + this.items = items; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + obj.items = this.items.toJSON(); + return obj; + } +} +/** + * Schema class for "object" types. + * The `properties` param must be a map of `Schema` objects. + * @public + */ +class ObjectSchema extends Schema { + constructor(schemaParams, properties, optionalProperties = []) { + super({ + type: SchemaType.OBJECT, + ...schemaParams + }); + this.properties = properties; + this.optionalProperties = optionalProperties; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + obj.properties = { ...this.properties }; + const required = []; + if (this.optionalProperties) { + for (const propertyKey of this.optionalProperties) { + if (!this.properties.hasOwnProperty(propertyKey)) { + throw new AIError(AIErrorCode.INVALID_SCHEMA, `Property "${propertyKey}" specified in "optionalProperties" does not exist.`); + } + } + } + for (const propertyKey in this.properties) { + if (this.properties.hasOwnProperty(propertyKey)) { + obj.properties[propertyKey] = this.properties[propertyKey].toJSON(); + if (!this.optionalProperties.includes(propertyKey)) { + required.push(propertyKey); + } + } + } + if (required.length > 0) { + obj.required = required; + } + delete obj.optionalProperties; + return obj; + } +} +/** + * Schema class representing a value that can conform to any of the provided sub-schemas. This is + * useful when a field can accept multiple distinct types or structures. + * @public + */ +class AnyOfSchema extends Schema { + constructor(schemaParams) { + if (schemaParams.anyOf.length === 0) { + throw new AIError(AIErrorCode.INVALID_SCHEMA, "The 'anyOf' array must not be empty."); + } + super({ + ...schemaParams, + type: undefined // anyOf schemas do not have an explicit type + }); + this.anyOf = schemaParams.anyOf; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + // Ensure the 'anyOf' property contains serialized SchemaRequest objects. + if (this.anyOf && Array.isArray(this.anyOf)) { + obj.anyOf = this.anyOf.map(s => s.toJSON()); + } + return obj; + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Defines the image format for images generated by Imagen. + * + * Use this class to specify the desired format (JPEG or PNG) and compression quality + * for images generated by Imagen. This is typically included as part of + * {@link ImagenModelParams}. + * + * @example + * ```javascript + * const imagenModelParams = { + * // ... other ImagenModelParams + * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75. + * } + * ``` + * + * @public + */ +class ImagenImageFormat { + constructor() { + this.mimeType = 'image/png'; + } + /** + * Creates an {@link ImagenImageFormat} for a JPEG image. + * + * @param compressionQuality - The level of compression (a number between 0 and 100). + * @returns An {@link ImagenImageFormat} object for a JPEG image. + * + * @public + */ + static jpeg(compressionQuality) { + if (compressionQuality && + (compressionQuality < 0 || compressionQuality > 100)) { + logger.warn(`Invalid JPEG compression quality of ${compressionQuality} specified; the supported range is [0, 100].`); + } + return { mimeType: 'image/jpeg', compressionQuality }; + } + /** + * Creates an {@link ImagenImageFormat} for a PNG image. + * + * @returns An {@link ImagenImageFormat} object for a PNG image. + * + * @public + */ + static png() { + return { mimeType: 'image/png' }; + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const SERVER_INPUT_SAMPLE_RATE = 16000; +const SERVER_OUTPUT_SAMPLE_RATE = 24000; +const AUDIO_PROCESSOR_NAME = 'audio-processor'; +/** + * The JS for an `AudioWorkletProcessor`. + * This processor is responsible for taking raw audio from the microphone, + * converting it to the required 16-bit 16kHz PCM, and posting it back to the main thread. + * + * See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor + * + * It is defined as a string here so that it can be converted into a `Blob` + * and loaded at runtime. + */ +const audioProcessorWorkletString = ` + class AudioProcessor extends AudioWorkletProcessor { + constructor(options) { + super(); + this.targetSampleRate = options.processorOptions.targetSampleRate; + // 'sampleRate' is a global variable available inside the AudioWorkletGlobalScope, + // representing the native sample rate of the AudioContext. + this.inputSampleRate = sampleRate; + } + + /** + * This method is called by the browser's audio engine for each block of audio data. + * Input is a single input, with a single channel (input[0][0]). + */ + process(inputs) { + const input = inputs[0]; + if (input && input.length > 0 && input[0].length > 0) { + const pcmData = input[0]; // Float32Array of raw audio samples. + + // Simple linear interpolation for resampling. + const resampled = new Float32Array(Math.round(pcmData.length * this.targetSampleRate / this.inputSampleRate)); + const ratio = pcmData.length / resampled.length; + for (let i = 0; i < resampled.length; i++) { + resampled[i] = pcmData[Math.floor(i * ratio)]; + } + + // Convert Float32 (-1, 1) samples to Int16 (-32768, 32767) + const resampledInt16 = new Int16Array(resampled.length); + for (let i = 0; i < resampled.length; i++) { + const sample = Math.max(-1, Math.min(1, resampled[i])); + if (sample < 0) { + resampledInt16[i] = sample * 32768; + } else { + resampledInt16[i] = sample * 32767; + } + } + + this.port.postMessage(resampledInt16); + } + // Return true to keep the processor alive and processing the next audio block. + return true; + } + } + + // Register the processor with a name that can be used to instantiate it from the main thread. + registerProcessor('${AUDIO_PROCESSOR_NAME}', AudioProcessor); +`; +/** + * Encapsulates the core logic of an audio conversation. + * + * @internal + */ +class AudioConversationRunner { + constructor(liveSession, options, deps) { + this.liveSession = liveSession; + this.options = options; + this.deps = deps; + /** A flag to indicate if the conversation has been stopped. */ + this.isStopped = false; + /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */ + this.stopDeferred = new util.Deferred(); + /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */ + this.playbackQueue = []; + /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */ + this.scheduledSources = []; + /** A high-precision timeline pointer for scheduling gapless audio playback. */ + this.nextStartTime = 0; + /** A mutex to prevent the playback processing loop from running multiple times concurrently. */ + this.isPlaybackLoopRunning = false; + this.liveSession.inConversation = true; + // Start listening for messages from the server. + this.receiveLoopPromise = this.runReceiveLoop().finally(() => this.cleanup()); + // Set up the handler for receiving processed audio data from the worklet. + // Message data has been resampled to 16kHz 16-bit PCM. + this.deps.workletNode.port.onmessage = event => { + if (this.isStopped) { + return; + } + const pcm16 = event.data; + const base64 = btoa(String.fromCharCode.apply(null, Array.from(new Uint8Array(pcm16.buffer)))); + const chunk = { + mimeType: 'audio/pcm', + data: base64 + }; + void this.liveSession.sendAudioRealtime(chunk); + }; + } + /** + * Stops the conversation and unblocks the main receive loop. + */ + async stop() { + if (this.isStopped) { + return; + } + this.isStopped = true; + this.stopDeferred.resolve(); // Unblock the receive loop + await this.receiveLoopPromise; // Wait for the loop and cleanup to finish + } + /** + * Cleans up all audio resources (nodes, stream tracks, context) and marks the + * session as no longer in a conversation. + */ + cleanup() { + this.interruptPlayback(); // Ensure all audio is stopped on final cleanup. + this.deps.workletNode.port.onmessage = null; + this.deps.workletNode.disconnect(); + this.deps.sourceNode.disconnect(); + this.deps.mediaStream.getTracks().forEach(track => track.stop()); + if (this.deps.audioContext.state !== 'closed') { + void this.deps.audioContext.close(); + } + this.liveSession.inConversation = false; + } + /** + * Adds audio data to the queue and ensures the playback loop is running. + */ + enqueueAndPlay(audioData) { + this.playbackQueue.push(audioData); + // Will no-op if it's already running. + void this.processPlaybackQueue(); + } + /** + * Stops all current and pending audio playback and clears the queue. This is + * called when the server indicates the model's speech was interrupted with + * `LiveServerContent.modelTurn.interrupted`. + */ + interruptPlayback() { + // Stop all sources that have been scheduled. The onended event will fire for each, + // which will clean up the scheduledSources array. + [...this.scheduledSources].forEach(source => source.stop(0)); + // Clear the internal buffer of unprocessed audio chunks. + this.playbackQueue.length = 0; + // Reset the playback clock to start fresh. + this.nextStartTime = this.deps.audioContext.currentTime; + } + /** + * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence. + */ + async processPlaybackQueue() { + if (this.isPlaybackLoopRunning) { + return; + } + this.isPlaybackLoopRunning = true; + while (this.playbackQueue.length > 0 && !this.isStopped) { + const pcmRawBuffer = this.playbackQueue.shift(); + try { + const pcm16 = new Int16Array(pcmRawBuffer); + const frameCount = pcm16.length; + const audioBuffer = this.deps.audioContext.createBuffer(1, frameCount, SERVER_OUTPUT_SAMPLE_RATE); + // Convert 16-bit PCM to 32-bit PCM, required by the Web Audio API. + const channelData = audioBuffer.getChannelData(0); + for (let i = 0; i < frameCount; i++) { + channelData[i] = pcm16[i] / 32768; // Normalize to Float32 range [-1.0, 1.0] + } + const source = this.deps.audioContext.createBufferSource(); + source.buffer = audioBuffer; + source.connect(this.deps.audioContext.destination); + // Track the source and set up a handler to remove it from tracking when it finishes. + this.scheduledSources.push(source); + source.onended = () => { + this.scheduledSources = this.scheduledSources.filter(s => s !== source); + }; + // To prevent gaps, schedule the next chunk to start either now (if we're catching up) + // or exactly when the previous chunk is scheduled to end. + this.nextStartTime = Math.max(this.deps.audioContext.currentTime, this.nextStartTime); + source.start(this.nextStartTime); + // Update the schedule for the *next* chunk. + this.nextStartTime += audioBuffer.duration; + } + catch (e) { + logger.error('Error playing audio:', e); + } + } + this.isPlaybackLoopRunning = false; + } + /** + * The main loop that listens for and processes messages from the server. + */ + async runReceiveLoop() { + const messageGenerator = this.liveSession.receive(); + while (!this.isStopped) { + const result = await Promise.race([ + messageGenerator.next(), + this.stopDeferred.promise + ]); + if (this.isStopped || !result || result.done) { + break; + } + const message = result.value; + if (message.type === 'serverContent') { + const serverContent = message; + if (serverContent.interrupted) { + this.interruptPlayback(); + } + const audioPart = serverContent.modelTurn?.parts.find(part => part.inlineData?.mimeType.startsWith('audio/')); + if (audioPart?.inlineData) { + const audioData = Uint8Array.from(atob(audioPart.inlineData.data), c => c.charCodeAt(0)).buffer; + this.enqueueAndPlay(audioData); + } + } + else if (message.type === 'toolCall') { + if (!this.options.functionCallingHandler) { + logger.warn('Received tool call message, but StartAudioConversationOptions.functionCallingHandler is undefined. Ignoring tool call.'); + } + else { + try { + const functionResponse = await this.options.functionCallingHandler(message.functionCalls); + if (!this.isStopped) { + void this.liveSession.sendFunctionResponses([functionResponse]); + } + } + catch (e) { + throw new AIError(AIErrorCode.ERROR, `Function calling handler failed: ${e.message}`); + } + } + } + } + } +} +/** + * Starts a real-time, bidirectional audio conversation with the model. This helper function manages + * the complexities of microphone access, audio recording, playback, and interruptions. + * + * @remarks Important: This function must be called in response to a user gesture + * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}. + * + * @example + * ```javascript + * const liveSession = await model.connect(); + * let conversationController; + * + * // This function must be called from within a click handler. + * async function startConversation() { + * try { + * conversationController = await startAudioConversation(liveSession); + * } catch (e) { + * // Handle AI-specific errors + * if (e instanceof AIError) { + * console.error("AI Error:", e.message); + * } + * // Handle microphone permission and hardware errors + * else if (e instanceof DOMException) { + * console.error("Microphone Error:", e.message); + * } + * // Handle other unexpected errors + * else { + * console.error("An unexpected error occurred:", e); + * } + * } + * } + * + * // Later, to stop the conversation: + * // if (conversationController) { + * // await conversationController.stop(); + * // } + * ``` + * + * @param liveSession - An active {@link LiveSession} instance. + * @param options - Configuration options for the audio conversation. + * @returns A `Promise` that resolves with an {@link AudioConversationController}. + * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`). + * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions. + * + * @beta + */ +async function startAudioConversation(liveSession, options = {}) { + if (liveSession.isClosed) { + throw new AIError(AIErrorCode.SESSION_CLOSED, 'Cannot start audio conversation on a closed LiveSession.'); + } + if (liveSession.inConversation) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'An audio conversation is already in progress for this session.'); + } + // Check for necessary Web API support. + if (typeof AudioWorkletNode === 'undefined' || + typeof AudioContext === 'undefined' || + typeof navigator === 'undefined' || + !navigator.mediaDevices) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'Audio conversation is not supported in this environment. It requires the Web Audio API and AudioWorklet support.'); + } + let audioContext; + try { + // 1. Set up the audio context. This must be in response to a user gesture. + // See: https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy + audioContext = new AudioContext(); + if (audioContext.state === 'suspended') { + await audioContext.resume(); + } + // 2. Prompt for microphone access and get the media stream. + // This can throw a variety of permission or hardware-related errors. + const mediaStream = await navigator.mediaDevices.getUserMedia({ + audio: true + }); + // 3. Load the AudioWorklet processor. + // See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet + const workletBlob = new Blob([audioProcessorWorkletString], { + type: 'application/javascript' + }); + const workletURL = URL.createObjectURL(workletBlob); + await audioContext.audioWorklet.addModule(workletURL); + // 4. Create the audio graph: Microphone -> Source Node -> Worklet Node + const sourceNode = audioContext.createMediaStreamSource(mediaStream); + const workletNode = new AudioWorkletNode(audioContext, AUDIO_PROCESSOR_NAME, { + processorOptions: { targetSampleRate: SERVER_INPUT_SAMPLE_RATE } + }); + sourceNode.connect(workletNode); + // 5. Instantiate and return the runner which manages the conversation. + const runner = new AudioConversationRunner(liveSession, options, { + audioContext, + mediaStream, + sourceNode, + workletNode + }); + return { stop: () => runner.stop() }; + } + catch (e) { + // Ensure the audio context is closed on any setup error. + if (audioContext && audioContext.state !== 'closed') { + void audioContext.close(); + } + // Re-throw specific, known error types directly. The user may want to handle `DOMException` + // errors differently (for example, if permission to access audio device was denied). + if (e instanceof AIError || e instanceof DOMException) { + throw e; + } + // Wrap any other unexpected errors in a standard AIError. + throw new AIError(AIErrorCode.ERROR, `Failed to initialize audio recording: ${e.message}`); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Returns the default {@link AI} instance that is associated with the provided + * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the + * default settings. + * + * @example + * ```javascript + * const ai = getAI(app); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Gemini Developer API (via Google AI). + * const ai = getAI(app, { backend: new GoogleAIBackend() }); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Vertex AI Gemini API. + * const ai = getAI(app, { backend: new VertexAIBackend() }); + * ``` + * + * @param app - The {@link @firebase/app#FirebaseApp} to use. + * @param options - {@link AIOptions} that configure the AI instance. + * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}. + * + * @public + */ +function getAI(app$1 = app.getApp(), options) { + app$1 = util.getModularInstance(app$1); + // Dependencies + const AIProvider = app._getProvider(app$1, AI_TYPE); + const backend = options?.backend ?? new GoogleAIBackend(); + const finalOptions = { + useLimitedUseAppCheckTokens: options?.useLimitedUseAppCheckTokens ?? false + }; + const identifier = encodeInstanceIdentifier(backend); + const aiInstance = AIProvider.getImmediate({ + identifier + }); + aiInstance.options = finalOptions; + return aiInstance; +} +/** + * Returns a {@link GenerativeModel} class with methods for inference + * and other functionality. + * + * @public + */ +function getGenerativeModel(ai, modelParams, requestOptions) { + // Uses the existence of HybridParams.mode to clarify the type of the modelParams input. + const hybridParams = modelParams; + let inCloudParams; + if (hybridParams.mode) { + inCloudParams = hybridParams.inCloudParams || { + model: DEFAULT_HYBRID_IN_CLOUD_MODEL + }; + } + else { + inCloudParams = modelParams; + } + if (!inCloudParams.model) { + throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`); + } + /** + * An AIService registered by index.node.ts will not have a + * chromeAdapterFactory() method. + */ + const chromeAdapter = ai.chromeAdapterFactory?.(hybridParams.mode, typeof window === 'undefined' ? undefined : window, hybridParams.onDeviceParams); + return new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter); +} +/** + * Returns an {@link ImagenModel} class with methods for using Imagen. + * + * Only Imagen 3 models (named `imagen-3.0-*`) are supported. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when making Imagen requests. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @public + */ +function getImagenModel(ai, modelParams, requestOptions) { + if (!modelParams.model) { + throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })`); + } + return new ImagenModel(ai, modelParams, requestOptions); +} +/** + * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication. + * + * The Live API is only supported in modern browser windows and Node >= 22. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when setting up a {@link LiveSession}. + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @beta + */ +function getLiveGenerativeModel(ai, modelParams) { + if (!modelParams.model) { + throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name for getLiveGenerativeModel. Example: getLiveGenerativeModel(ai, { model: 'my-model-name' })`); + } + const webSocketHandler = new WebSocketHandlerImpl(); + return new LiveGenerativeModel(ai, modelParams, webSocketHandler); +} + +/** + * The Firebase AI Web SDK. + * + * @packageDocumentation + */ +function registerAI() { + app._registerComponent(new component.Component(AI_TYPE, factory, "PUBLIC" /* ComponentType.PUBLIC */).setMultipleInstances(true)); + app.registerVersion(name, version); + // BUILD_TARGET will be replaced by values like esm, cjs, etc during the compilation + app.registerVersion(name, version, 'cjs2020'); +} +registerAI(); + +exports.AIError = AIError; +exports.AIErrorCode = AIErrorCode; +exports.AIModel = AIModel; +exports.AnyOfSchema = AnyOfSchema; +exports.ArraySchema = ArraySchema; +exports.Backend = Backend; +exports.BackendType = BackendType; +exports.BlockReason = BlockReason; +exports.BooleanSchema = BooleanSchema; +exports.ChatSession = ChatSession; +exports.FinishReason = FinishReason; +exports.FunctionCallingMode = FunctionCallingMode; +exports.GenerativeModel = GenerativeModel; +exports.GoogleAIBackend = GoogleAIBackend; +exports.HarmBlockMethod = HarmBlockMethod; +exports.HarmBlockThreshold = HarmBlockThreshold; +exports.HarmCategory = HarmCategory; +exports.HarmProbability = HarmProbability; +exports.HarmSeverity = HarmSeverity; +exports.ImagenAspectRatio = ImagenAspectRatio; +exports.ImagenImageFormat = ImagenImageFormat; +exports.ImagenModel = ImagenModel; +exports.ImagenPersonFilterLevel = ImagenPersonFilterLevel; +exports.ImagenSafetyFilterLevel = ImagenSafetyFilterLevel; +exports.InferenceMode = InferenceMode; +exports.InferenceSource = InferenceSource; +exports.IntegerSchema = IntegerSchema; +exports.Language = Language; +exports.LiveGenerativeModel = LiveGenerativeModel; +exports.LiveResponseType = LiveResponseType; +exports.LiveSession = LiveSession; +exports.Modality = Modality; +exports.NumberSchema = NumberSchema; +exports.ObjectSchema = ObjectSchema; +exports.Outcome = Outcome; +exports.POSSIBLE_ROLES = POSSIBLE_ROLES; +exports.ResponseModality = ResponseModality; +exports.Schema = Schema; +exports.SchemaType = SchemaType; +exports.StringSchema = StringSchema; +exports.URLRetrievalStatus = URLRetrievalStatus; +exports.VertexAIBackend = VertexAIBackend; +exports.getAI = getAI; +exports.getGenerativeModel = getGenerativeModel; +exports.getImagenModel = getImagenModel; +exports.getLiveGenerativeModel = getLiveGenerativeModel; +exports.startAudioConversation = startAudioConversation; +//# sourceMappingURL=index.cjs.js.map diff --git a/frontend-old/node_modules/@firebase/ai/dist/index.cjs.js.map b/frontend-old/node_modules/@firebase/ai/dist/index.cjs.js.map new file mode 100644 index 0000000..e7b0d4b --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/index.cjs.js.map @@ -0,0 +1 @@ +{"version":3,"file":"index.cjs.js","sources":["../src/constants.ts","../src/errors.ts","../src/types/enums.ts","../src/types/responses.ts","../src/types/error.ts","../src/types/schema.ts","../src/types/imagen/requests.ts","../src/public-types.ts","../src/backend.ts","../src/helpers.ts","../src/logger.ts","../src/types/language-model.ts","../src/methods/chrome-adapter.ts","../src/service.ts","../src/factory-browser.ts","../src/models/ai-model.ts","../src/requests/request.ts","../src/requests/response-helpers.ts","../src/googleai-mappers.ts","../src/requests/stream-reader.ts","../src/requests/hybrid-helpers.ts","../src/methods/generate-content.ts","../src/requests/request-helpers.ts","../src/methods/chat-session-helpers.ts","../src/methods/chat-session.ts","../src/methods/count-tokens.ts","../src/models/generative-model.ts","../src/methods/live-session.ts","../src/models/live-generative-model.ts","../src/models/imagen-model.ts","../src/websocket.ts","../src/requests/schema-builder.ts","../src/requests/imagen-image-format.ts","../src/methods/live-session-helpers.ts","../src/api.ts","../src/index.ts"],"sourcesContent":["/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { version } from '../package.json';\n\nexport const AI_TYPE = 'AI';\n\nexport const DEFAULT_LOCATION = 'us-central1';\n\nexport const DEFAULT_DOMAIN = 'firebasevertexai.googleapis.com';\n\nexport const DEFAULT_API_VERSION = 'v1beta';\n\nexport const PACKAGE_VERSION = version;\n\nexport const LANGUAGE_TAG = 'gl-js';\n\nexport const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000;\n\n/**\n * Defines the name of the default in-cloud model to use for hybrid inference.\n */\nexport const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite';\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseError } from '@firebase/util';\nimport { AIErrorCode, CustomErrorData } from './types';\nimport { AI_TYPE } from './constants';\n\n/**\n * Error class for the Firebase AI SDK.\n *\n * @public\n */\nexport class AIError extends FirebaseError {\n /**\n * Constructs a new instance of the `AIError` class.\n *\n * @param code - The error code from {@link (AIErrorCode:type)}.\n * @param message - A human-readable message describing the error.\n * @param customErrorData - Optional error data.\n */\n constructor(\n readonly code: AIErrorCode,\n message: string,\n readonly customErrorData?: CustomErrorData\n ) {\n // Match error format used by FirebaseError from ErrorFactory\n const service = AI_TYPE;\n const fullCode = `${service}/${code}`;\n const fullMessage = `${service}: ${message} (${fullCode})`;\n super(code, fullMessage);\n\n // FirebaseError initializes a stack trace, but it assumes the error is created from the error\n // factory. Since we break this assumption, we set the stack trace to be originating from this\n // constructor.\n // This is only supported in V8.\n if (Error.captureStackTrace) {\n // Allows us to initialize the stack trace without including the constructor itself at the\n // top level of the stack trace.\n Error.captureStackTrace(this, AIError);\n }\n\n // Allows instanceof AIError in ES5/ES6\n // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work\n // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget\n // which we can now use since we no longer target ES5.\n Object.setPrototypeOf(this, AIError.prototype);\n\n // Since Error is an interface, we don't inherit toString and so we define it ourselves.\n this.toString = () => fullMessage;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Role is the producer of the content.\n * @public\n */\nexport type Role = (typeof POSSIBLE_ROLES)[number];\n\n/**\n * Possible roles.\n * @public\n */\nexport const POSSIBLE_ROLES = ['user', 'model', 'function', 'system'] as const;\n\n/**\n * Harm categories that would cause prompts or candidates to be blocked.\n * @public\n */\nexport const HarmCategory = {\n HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH',\n HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT',\n HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT'\n} as const;\n\n/**\n * Harm categories that would cause prompts or candidates to be blocked.\n * @public\n */\nexport type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory];\n\n/**\n * Threshold above which a prompt or candidate will be blocked.\n * @public\n */\nexport const HarmBlockThreshold = {\n /**\n * Content with `NEGLIGIBLE` will be allowed.\n */\n BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE',\n /**\n * Content with `NEGLIGIBLE` and `LOW` will be allowed.\n */\n BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE',\n /**\n * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.\n */\n BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH',\n /**\n * All content will be allowed.\n */\n BLOCK_NONE: 'BLOCK_NONE',\n /**\n * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding\n * to the {@link (HarmCategory:type)} will not be present in the response.\n */\n OFF: 'OFF'\n} as const;\n\n/**\n * Threshold above which a prompt or candidate will be blocked.\n * @public\n */\nexport type HarmBlockThreshold =\n (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold];\n\n/**\n * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).\n *\n * @public\n */\nexport const HarmBlockMethod = {\n /**\n * The harm block method uses both probability and severity scores.\n */\n SEVERITY: 'SEVERITY',\n /**\n * The harm block method uses the probability score.\n */\n PROBABILITY: 'PROBABILITY'\n} as const;\n\n/**\n * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).\n *\n * @public\n */\nexport type HarmBlockMethod =\n (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod];\n\n/**\n * Probability that a prompt or candidate matches a harm category.\n * @public\n */\nexport const HarmProbability = {\n /**\n * Content has a negligible chance of being unsafe.\n */\n NEGLIGIBLE: 'NEGLIGIBLE',\n /**\n * Content has a low chance of being unsafe.\n */\n LOW: 'LOW',\n /**\n * Content has a medium chance of being unsafe.\n */\n MEDIUM: 'MEDIUM',\n /**\n * Content has a high chance of being unsafe.\n */\n HIGH: 'HIGH'\n} as const;\n\n/**\n * Probability that a prompt or candidate matches a harm category.\n * @public\n */\nexport type HarmProbability =\n (typeof HarmProbability)[keyof typeof HarmProbability];\n\n/**\n * Harm severity levels.\n * @public\n */\nexport const HarmSeverity = {\n /**\n * Negligible level of harm severity.\n */\n HARM_SEVERITY_NEGLIGIBLE: 'HARM_SEVERITY_NEGLIGIBLE',\n /**\n * Low level of harm severity.\n */\n HARM_SEVERITY_LOW: 'HARM_SEVERITY_LOW',\n /**\n * Medium level of harm severity.\n */\n HARM_SEVERITY_MEDIUM: 'HARM_SEVERITY_MEDIUM',\n /**\n * High level of harm severity.\n */\n HARM_SEVERITY_HIGH: 'HARM_SEVERITY_HIGH',\n /**\n * Harm severity is not supported.\n *\n * @remarks\n * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.\n */\n HARM_SEVERITY_UNSUPPORTED: 'HARM_SEVERITY_UNSUPPORTED'\n} as const;\n\n/**\n * Harm severity levels.\n * @public\n */\nexport type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity];\n\n/**\n * Reason that a prompt was blocked.\n * @public\n */\nexport const BlockReason = {\n /**\n * Content was blocked by safety settings.\n */\n SAFETY: 'SAFETY',\n /**\n * Content was blocked, but the reason is uncategorized.\n */\n OTHER: 'OTHER',\n /**\n * Content was blocked because it contained terms from the terminology blocklist.\n */\n BLOCKLIST: 'BLOCKLIST',\n /**\n * Content was blocked due to prohibited content.\n */\n PROHIBITED_CONTENT: 'PROHIBITED_CONTENT'\n} as const;\n\n/**\n * Reason that a prompt was blocked.\n * @public\n */\nexport type BlockReason = (typeof BlockReason)[keyof typeof BlockReason];\n\n/**\n * Reason that a candidate finished.\n * @public\n */\nexport const FinishReason = {\n /**\n * Natural stop point of the model or provided stop sequence.\n */\n STOP: 'STOP',\n /**\n * The maximum number of tokens as specified in the request was reached.\n */\n MAX_TOKENS: 'MAX_TOKENS',\n /**\n * The candidate content was flagged for safety reasons.\n */\n SAFETY: 'SAFETY',\n /**\n * The candidate content was flagged for recitation reasons.\n */\n RECITATION: 'RECITATION',\n /**\n * Unknown reason.\n */\n OTHER: 'OTHER',\n /**\n * The candidate content contained forbidden terms.\n */\n BLOCKLIST: 'BLOCKLIST',\n /**\n * The candidate content potentially contained prohibited content.\n */\n PROHIBITED_CONTENT: 'PROHIBITED_CONTENT',\n /**\n * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).\n */\n SPII: 'SPII',\n /**\n * The function call generated by the model was invalid.\n */\n MALFORMED_FUNCTION_CALL: 'MALFORMED_FUNCTION_CALL'\n} as const;\n\n/**\n * Reason that a candidate finished.\n * @public\n */\nexport type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];\n\n/**\n * @public\n */\nexport const FunctionCallingMode = {\n /**\n * Default model behavior; model decides to predict either a function call\n * or a natural language response.\n */\n AUTO: 'AUTO',\n /**\n * Model is constrained to always predicting a function call only.\n * If `allowed_function_names` is set, the predicted function call will be\n * limited to any one of `allowed_function_names`, else the predicted\n * function call will be any one of the provided `function_declarations`.\n */\n ANY: 'ANY',\n /**\n * Model will not predict any function call. Model behavior is same as when\n * not passing any function declarations.\n */\n NONE: 'NONE'\n} as const;\n\n/**\n * @public\n */\nexport type FunctionCallingMode =\n (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode];\n\n/**\n * Content part modality.\n * @public\n */\nexport const Modality = {\n /**\n * Unspecified modality.\n */\n MODALITY_UNSPECIFIED: 'MODALITY_UNSPECIFIED',\n /**\n * Plain text.\n */\n TEXT: 'TEXT',\n /**\n * Image.\n */\n IMAGE: 'IMAGE',\n /**\n * Video.\n */\n VIDEO: 'VIDEO',\n /**\n * Audio.\n */\n AUDIO: 'AUDIO',\n /**\n * Document (for example, PDF).\n */\n DOCUMENT: 'DOCUMENT'\n} as const;\n\n/**\n * Content part modality.\n * @public\n */\nexport type Modality = (typeof Modality)[keyof typeof Modality];\n\n/**\n * Generation modalities to be returned in generation responses.\n *\n * @beta\n */\nexport const ResponseModality = {\n /**\n * Text.\n * @beta\n */\n TEXT: 'TEXT',\n /**\n * Image.\n * @beta\n */\n IMAGE: 'IMAGE',\n /**\n * Audio.\n * @beta\n */\n AUDIO: 'AUDIO'\n} as const;\n\n/**\n * Generation modalities to be returned in generation responses.\n *\n * @beta\n */\nexport type ResponseModality =\n (typeof ResponseModality)[keyof typeof ResponseModality];\n\n/**\n * Determines whether inference happens on-device or in-cloud.\n *\n * @remarks\n * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an\n * on-device model. If on-device inference is not available, the SDK\n * will fall back to using a cloud-hosted model.\n * <br/>\n * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an\n * on-device model. The SDK will not fall back to a cloud-hosted model.\n * If on-device inference is not available, inference methods will throw.\n * <br/>\n * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a\n * cloud-hosted model. The SDK will not fall back to an on-device model.\n * <br/>\n * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a\n * cloud-hosted model. If not available, the SDK will fall back to an\n * on-device model.\n *\n * @beta\n */\nexport const InferenceMode = {\n 'PREFER_ON_DEVICE': 'prefer_on_device',\n 'ONLY_ON_DEVICE': 'only_on_device',\n 'ONLY_IN_CLOUD': 'only_in_cloud',\n 'PREFER_IN_CLOUD': 'prefer_in_cloud'\n} as const;\n\n/**\n * Determines whether inference happens on-device or in-cloud.\n *\n * @beta\n */\nexport type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];\n\n/**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\nexport const InferenceSource = {\n 'ON_DEVICE': 'on_device',\n 'IN_CLOUD': 'in_cloud'\n} as const;\n\n/**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\nexport type InferenceSource =\n (typeof InferenceSource)[keyof typeof InferenceSource];\n\n/**\n * Represents the result of the code execution.\n *\n * @beta\n */\nexport const Outcome = {\n UNSPECIFIED: 'OUTCOME_UNSPECIFIED',\n OK: 'OUTCOME_OK',\n FAILED: 'OUTCOME_FAILED',\n DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'\n};\n\n/**\n * Represents the result of the code execution.\n *\n * @beta\n */\nexport type Outcome = (typeof Outcome)[keyof typeof Outcome];\n\n/**\n * The programming language of the code.\n *\n * @beta\n */\nexport const Language = {\n UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',\n PYTHON: 'PYTHON'\n};\n\n/**\n * The programming language of the code.\n *\n * @beta\n */\nexport type Language = (typeof Language)[keyof typeof Language];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, FunctionCall, InlineDataPart } from './content';\nimport {\n BlockReason,\n FinishReason,\n HarmCategory,\n HarmProbability,\n HarmSeverity,\n InferenceSource,\n Modality\n} from './enums';\n\n/**\n * Result object returned from {@link GenerativeModel.generateContent} call.\n *\n * @public\n */\nexport interface GenerateContentResult {\n response: EnhancedGenerateContentResponse;\n}\n\n/**\n * Result object returned from {@link GenerativeModel.generateContentStream} call.\n * Iterate over `stream` to get chunks as they come in and/or\n * use the `response` promise to get the aggregated response when\n * the stream is done.\n *\n * @public\n */\nexport interface GenerateContentStreamResult {\n stream: AsyncGenerator<EnhancedGenerateContentResponse>;\n response: Promise<EnhancedGenerateContentResponse>;\n}\n\n/**\n * Response object wrapped with helper methods.\n *\n * @public\n */\nexport interface EnhancedGenerateContentResponse\n extends GenerateContentResponse {\n /**\n * Returns the text string from the response, if available.\n * Throws if the prompt or candidate was blocked.\n */\n text: () => string;\n /**\n * Aggregates and returns every {@link InlineDataPart} from the first candidate of\n * {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n */\n inlineDataParts: () => InlineDataPart[] | undefined;\n /**\n * Aggregates and returns every {@link FunctionCall} from the first candidate of\n * {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n */\n functionCalls: () => FunctionCall[] | undefined;\n /**\n * Aggregates and returns every {@link TextPart} with their `thought` property set\n * to `true` from the first candidate of {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n *\n * @remarks\n * Thought summaries provide a brief overview of the model's internal thinking process,\n * offering insight into how it arrived at the final answer. This can be useful for\n * debugging, understanding the model's reasoning, and verifying its accuracy.\n *\n * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is\n * set to `true`.\n */\n thoughtSummary: () => string | undefined;\n /**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\n inferenceSource?: InferenceSource;\n}\n\n/**\n * Individual response from {@link GenerativeModel.generateContent} and\n * {@link GenerativeModel.generateContentStream}.\n * `generateContentStream()` will return one in each chunk until\n * the stream is done.\n * @public\n */\nexport interface GenerateContentResponse {\n candidates?: GenerateContentCandidate[];\n promptFeedback?: PromptFeedback;\n usageMetadata?: UsageMetadata;\n}\n\n/**\n * Usage metadata about a {@link GenerateContentResponse}.\n *\n * @public\n */\nexport interface UsageMetadata {\n promptTokenCount: number;\n candidatesTokenCount: number;\n /**\n * The number of tokens used by the model's internal \"thinking\" process.\n */\n thoughtsTokenCount?: number;\n totalTokenCount: number;\n /**\n * The number of tokens used by tools.\n */\n toolUsePromptTokenCount?: number;\n promptTokensDetails?: ModalityTokenCount[];\n candidatesTokensDetails?: ModalityTokenCount[];\n /**\n * A list of tokens used by tools, broken down by modality.\n */\n toolUsePromptTokensDetails?: ModalityTokenCount[];\n}\n\n/**\n * Represents token counting info for a single modality.\n *\n * @public\n */\nexport interface ModalityTokenCount {\n /** The modality associated with this token count. */\n modality: Modality;\n /** The number of tokens counted. */\n tokenCount: number;\n}\n\n/**\n * If the prompt was blocked, this will be populated with `blockReason` and\n * the relevant `safetyRatings`.\n * @public\n */\nexport interface PromptFeedback {\n blockReason?: BlockReason;\n safetyRatings: SafetyRating[];\n /**\n * A human-readable description of the `blockReason`.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n blockReasonMessage?: string;\n}\n\n/**\n * A candidate returned as part of a {@link GenerateContentResponse}.\n * @public\n */\nexport interface GenerateContentCandidate {\n index: number;\n content: Content;\n finishReason?: FinishReason;\n finishMessage?: string;\n safetyRatings?: SafetyRating[];\n citationMetadata?: CitationMetadata;\n groundingMetadata?: GroundingMetadata;\n urlContextMetadata?: URLContextMetadata;\n}\n\n/**\n * Citation metadata that may be found on a {@link GenerateContentCandidate}.\n * @public\n */\nexport interface CitationMetadata {\n citations: Citation[];\n}\n\n/**\n * A single citation.\n * @public\n */\nexport interface Citation {\n startIndex?: number;\n endIndex?: number;\n uri?: string;\n license?: string;\n /**\n * The title of the cited source, if available.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n title?: string;\n /**\n * The publication date of the cited source, if available.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n publicationDate?: Date;\n}\n\n/**\n * Metadata returned when grounding is enabled.\n *\n * Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}).\n *\n * Important: If using Grounding with Google Search, you are required to comply with the\n * \"Grounding with Google Search\" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}\n * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}\n * section within the Service Specific Terms).\n *\n * @public\n */\nexport interface GroundingMetadata {\n /**\n * Google Search entry point for web searches. This contains an HTML/CSS snippet that must be\n * embedded in an app to display a Google Search entry point for follow-up web searches related to\n * a model's \"Grounded Response\".\n */\n searchEntryPoint?: SearchEntrypoint;\n /**\n * A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content\n * (for example, from a web page). that the model used to ground its response.\n */\n groundingChunks?: GroundingChunk[];\n /**\n * A list of {@link GroundingSupport} objects. Each object details how specific segments of the\n * model's response are supported by the `groundingChunks`.\n */\n groundingSupports?: GroundingSupport[];\n /**\n * A list of web search queries that the model performed to gather the grounding information.\n * These can be used to allow users to explore the search results themselves.\n */\n webSearchQueries?: string[];\n /**\n * @deprecated Use {@link GroundingSupport} instead.\n */\n retrievalQueries?: string[];\n}\n\n/**\n * Google search entry point.\n *\n * @public\n */\nexport interface SearchEntrypoint {\n /**\n * HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid\n * undesired interaction with the rest of the page's CSS.\n *\n * To ensure proper rendering and prevent CSS conflicts, it is recommended\n * to encapsulate this `renderedContent` within a shadow DOM when embedding it\n * into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}.\n *\n * @example\n * ```javascript\n * const container = document.createElement('div');\n * document.body.appendChild(container);\n * container.attachShadow({ mode: 'open' }).innerHTML = renderedContent;\n * ```\n */\n renderedContent?: string;\n}\n\n/**\n * Represents a chunk of retrieved data that supports a claim in the model's response. This is part\n * of the grounding information provided when grounding is enabled.\n *\n * @public\n */\nexport interface GroundingChunk {\n /**\n * Contains details if the grounding chunk is from a web source.\n */\n web?: WebGroundingChunk;\n}\n\n/**\n * A grounding chunk from the web.\n *\n * Important: If using Grounding with Google Search, you are required to comply with the\n * {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for \"Grounding with Google Search\".\n *\n * @public\n */\nexport interface WebGroundingChunk {\n /**\n * The URI of the retrieved web page.\n */\n uri?: string;\n /**\n * The title of the retrieved web page.\n */\n title?: string;\n /**\n * The domain of the original URI from which the content was retrieved.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be\n * `undefined`.\n */\n domain?: string;\n}\n\n/**\n * Provides information about how a specific segment of the model's response is supported by the\n * retrieved grounding chunks.\n *\n * @public\n */\nexport interface GroundingSupport {\n /**\n * Specifies the segment of the model's response content that this grounding support pertains to.\n */\n segment?: Segment;\n /**\n * A list of indices that refer to specific {@link GroundingChunk} objects within the\n * {@link GroundingMetadata.groundingChunks} array. These referenced chunks\n * are the sources that support the claim made in the associated `segment` of the response.\n * For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`,\n * and `groundingChunks[4]` are the retrieved content supporting this part of the response.\n */\n groundingChunkIndices?: number[];\n}\n\n/**\n * Represents a specific segment within a {@link Content} object, often used to\n * pinpoint the exact location of text or data that grounding information refers to.\n *\n * @public\n */\nexport interface Segment {\n /**\n * The zero-based index of the {@link Part} object within the `parts` array\n * of its parent {@link Content} object. This identifies which part of the\n * content the segment belongs to.\n */\n partIndex: number;\n /**\n * The zero-based start index of the segment within the specified `Part`,\n * measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the\n * beginning of the part's content (e.g., `Part.text`).\n */\n startIndex: number;\n /**\n * The zero-based end index of the segment within the specified `Part`,\n * measured in UTF-8 bytes. This offset is exclusive, meaning the character\n * at this index is not included in the segment.\n */\n endIndex: number;\n /**\n * The text corresponding to the segment from the response.\n */\n text: string;\n}\n\n/**\n * Metadata related to {@link URLContextTool}.\n *\n * @beta\n */\nexport interface URLContextMetadata {\n /**\n * List of URL metadata used to provide context to the Gemini model.\n */\n urlMetadata: URLMetadata[];\n}\n\n/**\n * Metadata for a single URL retrieved by the {@link URLContextTool} tool.\n *\n * @beta\n */\nexport interface URLMetadata {\n /**\n * The retrieved URL.\n */\n retrievedUrl?: string;\n /**\n * The status of the URL retrieval.\n */\n urlRetrievalStatus?: URLRetrievalStatus;\n}\n\n/**\n * The status of a URL retrieval.\n *\n * @remarks\n * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.\n * <br/>\n *\n * @beta\n */\nexport const URLRetrievalStatus = {\n /**\n * Unspecified retrieval status.\n */\n URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED',\n /**\n * The URL retrieval was successful.\n */\n URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS',\n /**\n * The URL retrieval failed.\n */\n URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR',\n /**\n * The URL retrieval failed because the content is behind a paywall.\n */\n URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL',\n /**\n * The URL retrieval failed because the content is unsafe.\n */\n URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE'\n};\n\n/**\n * The status of a URL retrieval.\n *\n * @remarks\n * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.\n * <br/>\n *\n * @beta\n */\nexport type URLRetrievalStatus =\n (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus];\n\n/**\n * @public\n */\nexport interface WebAttribution {\n uri: string;\n title: string;\n}\n\n/**\n * @public\n */\nexport interface RetrievedContextAttribution {\n uri: string;\n title: string;\n}\n\n/**\n * Protobuf google.type.Date\n * @public\n */\nexport interface Date {\n year: number;\n month: number;\n day: number;\n}\n\n/**\n * A safety rating associated with a {@link GenerateContentCandidate}\n * @public\n */\nexport interface SafetyRating {\n category: HarmCategory;\n probability: HarmProbability;\n /**\n * The harm severity level.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`.\n */\n severity: HarmSeverity;\n /**\n * The probability score of the harm category.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.\n */\n probabilityScore: number;\n /**\n * The severity score of the harm category.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.\n */\n severityScore: number;\n blocked: boolean;\n}\n\n/**\n * Response from calling {@link GenerativeModel.countTokens}.\n * @public\n */\nexport interface CountTokensResponse {\n /**\n * The total number of tokens counted across all instances from the request.\n */\n totalTokens: number;\n /**\n * @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`.\n *\n * The total number of billable characters counted across all instances\n * from the request.\n */\n totalBillableCharacters?: number;\n /**\n * The breakdown, by modality, of how many tokens are consumed by the prompt.\n */\n promptTokensDetails?: ModalityTokenCount[];\n}\n\n/**\n * An incremental content update from the model.\n *\n * @beta\n */\nexport interface LiveServerContent {\n type: 'serverContent';\n /**\n * The content that the model has generated as part of the current conversation with the user.\n */\n modelTurn?: Content;\n /**\n * Indicates whether the turn is complete. This is `undefined` if the turn is not complete.\n */\n turnComplete?: boolean;\n /**\n * Indicates whether the model was interrupted by the client. An interruption occurs when\n * the client sends a message before the model finishes it's turn. This is `undefined` if the\n * model was not interrupted.\n */\n interrupted?: boolean;\n /**\n * Transcription of the audio that was input to the model.\n */\n inputTranscription?: Transcription;\n /**\n * Transcription of the audio output from the model.\n */\n outputTranscription?: Transcription;\n}\n\n/**\n * Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription\n * is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on\n * the {@link LiveGenerationConfig}.\n *\n * @beta\n */\n\nexport interface Transcription {\n /**\n * The text transcription of the audio.\n */\n text?: string;\n}\n\n/**\n * A request from the model for the client to execute one or more functions.\n *\n * @beta\n */\nexport interface LiveServerToolCall {\n type: 'toolCall';\n /**\n * An array of function calls to run.\n */\n functionCalls: FunctionCall[];\n}\n\n/**\n * Notification to cancel a previous function call triggered by {@link LiveServerToolCall}.\n *\n * @beta\n */\nexport interface LiveServerToolCallCancellation {\n type: 'toolCallCancellation';\n /**\n * IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}.\n */\n functionIds: string[];\n}\n\n/**\n * The types of responses that can be returned by {@link LiveSession.receive}.\n *\n * @beta\n */\nexport const LiveResponseType = {\n SERVER_CONTENT: 'serverContent',\n TOOL_CALL: 'toolCall',\n TOOL_CALL_CANCELLATION: 'toolCallCancellation'\n};\n\n/**\n * The types of responses that can be returned by {@link LiveSession.receive}.\n * This is a property on all messages that can be used for type narrowing. This property is not\n * returned by the server, it is assigned to a server message object once it's parsed.\n *\n * @beta\n */\nexport type LiveResponseType =\n (typeof LiveResponseType)[keyof typeof LiveResponseType];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { GenerateContentResponse } from './responses';\n\n/**\n * Details object that may be included in an error response.\n *\n * @public\n */\nexport interface ErrorDetails {\n '@type'?: string;\n\n /** The reason for the error. */\n reason?: string;\n\n /** The domain where the error occurred. */\n domain?: string;\n\n /** Additional metadata about the error. */\n metadata?: Record<string, unknown>;\n\n /** Any other relevant information about the error. */\n [key: string]: unknown;\n}\n\n/**\n * Details object that contains data originating from a bad HTTP response.\n *\n * @public\n */\nexport interface CustomErrorData {\n /** HTTP status code of the error response. */\n status?: number;\n\n /** HTTP status text of the error response. */\n statusText?: string;\n\n /** Response from a {@link GenerateContentRequest} */\n response?: GenerateContentResponse;\n\n /** Optional additional details about the error. */\n errorDetails?: ErrorDetails[];\n}\n\n/**\n * Standardized error codes that {@link AIError} can have.\n *\n * @public\n */\nexport const AIErrorCode = {\n /** A generic error occurred. */\n ERROR: 'error',\n\n /** An error occurred in a request. */\n REQUEST_ERROR: 'request-error',\n\n /** An error occurred in a response. */\n RESPONSE_ERROR: 'response-error',\n\n /** An error occurred while performing a fetch. */\n FETCH_ERROR: 'fetch-error',\n\n /** An error occurred because an operation was attempted on a closed session. */\n SESSION_CLOSED: 'session-closed',\n\n /** An error associated with a Content object. */\n INVALID_CONTENT: 'invalid-content',\n\n /** An error due to the Firebase API not being enabled in the Console. */\n API_NOT_ENABLED: 'api-not-enabled',\n\n /** An error due to invalid Schema input. */\n INVALID_SCHEMA: 'invalid-schema',\n\n /** An error occurred due to a missing Firebase API key. */\n NO_API_KEY: 'no-api-key',\n\n /** An error occurred due to a missing Firebase app ID. */\n NO_APP_ID: 'no-app-id',\n\n /** An error occurred due to a model name not being specified during initialization. */\n NO_MODEL: 'no-model',\n\n /** An error occurred due to a missing project ID. */\n NO_PROJECT_ID: 'no-project-id',\n\n /** An error occurred while parsing. */\n PARSE_FAILED: 'parse-failed',\n\n /** An error occurred due an attempt to use an unsupported feature. */\n UNSUPPORTED: 'unsupported'\n} as const;\n\n/**\n * Standardized error codes that {@link AIError} can have.\n *\n * @public\n */\nexport type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Contains the list of OpenAPI data types\n * as defined by the\n * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}\n * @public\n */\nexport const SchemaType = {\n /** String type. */\n STRING: 'string',\n /** Number type. */\n NUMBER: 'number',\n /** Integer type. */\n INTEGER: 'integer',\n /** Boolean type. */\n BOOLEAN: 'boolean',\n /** Array type. */\n ARRAY: 'array',\n /** Object type. */\n OBJECT: 'object'\n} as const;\n\n/**\n * Contains the list of OpenAPI data types\n * as defined by the\n * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}\n * @public\n */\nexport type SchemaType = (typeof SchemaType)[keyof typeof SchemaType];\n\n/**\n * Basic {@link Schema} properties shared across several Schema-related\n * types.\n * @public\n */\nexport interface SchemaShared<T> {\n /**\n * An array of {@link Schema}. The generated data must be valid against any of the schemas\n * listed in this array. This allows specifying multiple possible structures or types for a\n * single field.\n */\n anyOf?: T[];\n /** Optional. The format of the property.\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or\n * `'date-time'`, otherwise requests will fail.\n */\n format?: string;\n /** Optional. The description of the property. */\n description?: string;\n /**\n * The title of the property. This helps document the schema's purpose but does not typically\n * constrain the generated value. It can subtly guide the model by clarifying the intent of a\n * field.\n */\n title?: string;\n /** Optional. The items of the property. */\n items?: T;\n /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n minItems?: number;\n /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n maxItems?: number;\n /** Optional. Map of `Schema` objects. */\n properties?: {\n [k: string]: T;\n };\n /** A hint suggesting the order in which the keys should appear in the generated JSON string. */\n propertyOrdering?: string[];\n /** Optional. The enum of the property. */\n enum?: string[];\n /** Optional. The example of the property. */\n example?: unknown;\n /** Optional. Whether the property is nullable. */\n nullable?: boolean;\n /** The minimum value of a numeric type. */\n minimum?: number;\n /** The maximum value of a numeric type. */\n maximum?: number;\n [key: string]: unknown;\n}\n\n/**\n * Params passed to {@link Schema} static methods to create specific\n * {@link Schema} classes.\n * @public\n */\nexport interface SchemaParams extends SchemaShared<SchemaInterface> {}\n\n/**\n * Final format for {@link Schema} params passed to backend requests.\n * @public\n */\nexport interface SchemaRequest extends SchemaShared<SchemaRequest> {\n /**\n * The type of the property. this can only be undefined when using `anyOf` schemas,\n * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }.\n */\n type?: SchemaType;\n /** Optional. Array of required property. */\n required?: string[];\n}\n\n/**\n * Interface for {@link Schema} class.\n * @public\n */\nexport interface SchemaInterface extends SchemaShared<SchemaInterface> {\n /**\n * The type of the property. this can only be undefined when using `anyof` schemas,\n * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}.\n */\n type?: SchemaType;\n}\n\n/**\n * Interface for JSON parameters in a schema of {@link (SchemaType:type)}\n * \"object\" when not using the `Schema.object()` helper.\n * @public\n */\nexport interface ObjectSchemaRequest extends SchemaRequest {\n type: 'object';\n /**\n * This is not a property accepted in the final request to the backend, but is\n * a client-side convenience property that is only usable by constructing\n * a schema through the `Schema.object()` helper method. Populating this\n * property will cause response errors if the object is not wrapped with\n * `Schema.object()`.\n */\n optionalProperties?: never;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ImagenImageFormat } from '../../requests/imagen-image-format';\n\n/**\n * Parameters for configuring an {@link ImagenModel}.\n *\n * @public\n */\nexport interface ImagenModelParams {\n /**\n * The Imagen model to use for generating images.\n * For example: `imagen-3.0-generate-002`.\n *\n * Only Imagen 3 models (named `imagen-3.0-*`) are supported.\n *\n * See {@link https://firebase.google.com/docs/vertex-ai/models | model versions}\n * for a full list of supported Imagen 3 models.\n */\n model: string;\n /**\n * Configuration options for generating images with Imagen.\n */\n generationConfig?: ImagenGenerationConfig;\n /**\n * Safety settings for filtering potentially inappropriate content.\n */\n safetySettings?: ImagenSafetySettings;\n}\n\n/**\n * Configuration options for generating images with Imagen.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for\n * more details.\n *\n * @public\n */\nexport interface ImagenGenerationConfig {\n /**\n * A description of what should be omitted from the generated images.\n *\n * Support for negative prompts depends on the Imagen model.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details.\n *\n * This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions\n * greater than `imagen-3.0-generate-002`.\n */\n negativePrompt?: string;\n /**\n * The number of images to generate. The default value is 1.\n *\n * The number of sample images that may be generated in each request depends on the model\n * (typically up to 4); see the <a href=\"http://firebase.google.com/docs/vertex-ai/model-parameters#imagen\">sampleCount</a>\n * documentation for more details.\n */\n numberOfImages?: number;\n /**\n * The aspect ratio of the generated images. The default value is square 1:1.\n * Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)}\n * for more details.\n */\n aspectRatio?: ImagenAspectRatio;\n /**\n * The image format of the generated images. The default is PNG.\n *\n * See {@link ImagenImageFormat} for more details.\n */\n imageFormat?: ImagenImageFormat;\n /**\n * Whether to add an invisible watermark to generated images.\n *\n * If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate\n * that they are AI generated. If set to `false`, watermarking will be disabled.\n *\n * For Imagen 3 models, the default value is `true`; see the <a href=\"http://firebase.google.com/docs/vertex-ai/model-parameters#imagen\">addWatermark</a>\n * documentation for more details.\n *\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true,\n * and cannot be turned off.\n */\n addWatermark?: boolean;\n}\n\n/**\n * A filter level controlling how aggressively to filter sensitive content.\n *\n * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI\n * are assessed against a list of safety filters, which include 'harmful categories' (for example,\n * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to\n * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}\n * for more details.\n *\n * @public\n */\nexport const ImagenSafetyFilterLevel = {\n /**\n * The most aggressive filtering level; most strict blocking.\n */\n BLOCK_LOW_AND_ABOVE: 'block_low_and_above',\n /**\n * Blocks some sensitive prompts and responses.\n */\n BLOCK_MEDIUM_AND_ABOVE: 'block_medium_and_above',\n /**\n * Blocks few sensitive prompts and responses.\n */\n BLOCK_ONLY_HIGH: 'block_only_high',\n /**\n * The least aggressive filtering level; blocks very few sensitive prompts and responses.\n *\n * Access to this feature is restricted and may require your case to be reviewed and approved by\n * Cloud support.\n */\n BLOCK_NONE: 'block_none'\n} as const;\n\n/**\n * A filter level controlling how aggressively to filter sensitive content.\n *\n * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI\n * are assessed against a list of safety filters, which include 'harmful categories' (for example,\n * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to\n * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}\n * for more details.\n *\n * @public\n */\nexport type ImagenSafetyFilterLevel =\n (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel];\n\n/**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n *\n * See the <a href=\"http://firebase.google.com/docs/vertex-ai/generate-images\">personGeneration</a>\n * documentation for more details.\n *\n * @public\n */\nexport const ImagenPersonFilterLevel = {\n /**\n * Disallow generation of images containing people or faces; images of people are filtered out.\n */\n BLOCK_ALL: 'dont_allow',\n /**\n * Allow generation of images containing adults only; images of children are filtered out.\n *\n * Generation of images containing people or faces may require your use case to be\n * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}\n * for more details.\n */\n ALLOW_ADULT: 'allow_adult',\n /**\n * Allow generation of images containing adults only; images of children are filtered out.\n *\n * Generation of images containing people or faces may require your use case to be\n * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}\n * for more details.\n */\n ALLOW_ALL: 'allow_all'\n} as const;\n\n/**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n *\n * See the <a href=\"http://firebase.google.com/docs/vertex-ai/generate-images\">personGeneration</a>\n * documentation for more details.\n *\n * @public\n */\nexport type ImagenPersonFilterLevel =\n (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel];\n\n/**\n * Settings for controlling the aggressiveness of filtering out sensitive content.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details.\n *\n * @public\n */\nexport interface ImagenSafetySettings {\n /**\n * A filter level controlling how aggressive to filter out sensitive content from generated\n * images.\n */\n safetyFilterLevel?: ImagenSafetyFilterLevel;\n /**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n */\n personFilterLevel?: ImagenPersonFilterLevel;\n}\n\n/**\n * Aspect ratios for Imagen images.\n *\n * To specify an aspect ratio for generated images, set the `aspectRatio` property in your\n * {@link ImagenGenerationConfig}.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details and examples of the supported aspect ratios.\n *\n * @public\n */\nexport const ImagenAspectRatio = {\n /**\n * Square (1:1) aspect ratio.\n */\n 'SQUARE': '1:1',\n /**\n * Landscape (3:4) aspect ratio.\n */\n 'LANDSCAPE_3x4': '3:4',\n /**\n * Portrait (4:3) aspect ratio.\n */\n 'PORTRAIT_4x3': '4:3',\n /**\n * Landscape (16:9) aspect ratio.\n */\n 'LANDSCAPE_16x9': '16:9',\n /**\n * Portrait (9:16) aspect ratio.\n */\n 'PORTRAIT_9x16': '9:16'\n} as const;\n\n/**\n * Aspect ratios for Imagen images.\n *\n * To specify an aspect ratio for generated images, set the `aspectRatio` property in your\n * {@link ImagenGenerationConfig}.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details and examples of the supported aspect ratios.\n *\n * @public\n */\nexport type ImagenAspectRatio =\n (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp } from '@firebase/app';\nimport { Backend } from './backend';\n\nexport * from './types';\n\n/**\n * An instance of the Firebase AI SDK.\n *\n * Do not create this instance directly. Instead, use {@link getAI | getAI()}.\n *\n * @public\n */\nexport interface AI {\n /**\n * The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with.\n */\n app: FirebaseApp;\n /**\n * A {@link Backend} instance that specifies the configuration for the target backend,\n * either the Gemini Developer API (using {@link GoogleAIBackend}) or the\n * Vertex AI Gemini API (using {@link VertexAIBackend}).\n */\n backend: Backend;\n /**\n * Options applied to this {@link AI} instance.\n */\n options?: AIOptions;\n /**\n * @deprecated use `AI.backend.location` instead.\n *\n * The location configured for this AI service instance, relevant for Vertex AI backends.\n */\n location: string;\n}\n\n/**\n * An enum-like object containing constants that represent the supported backends\n * for the Firebase AI SDK.\n * This determines which backend service (Vertex AI Gemini API or Gemini Developer API)\n * the SDK will communicate with.\n *\n * These values are assigned to the `backendType` property within the specific backend\n * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify\n * which service to target.\n *\n * @public\n */\nexport const BackendType = {\n /**\n * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud.\n * Use this constant when creating a {@link VertexAIBackend} configuration.\n */\n VERTEX_AI: 'VERTEX_AI',\n\n /**\n * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}).\n * Use this constant when creating a {@link GoogleAIBackend} configuration.\n */\n GOOGLE_AI: 'GOOGLE_AI'\n} as const; // Using 'as const' makes the string values literal types\n\n/**\n * Type alias representing valid backend types.\n * It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`.\n *\n * @public\n */\nexport type BackendType = (typeof BackendType)[keyof typeof BackendType];\n\n/**\n * Options for initializing the AI service using {@link getAI | getAI()}.\n * This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API)\n * and configuring its specific options (like location for Vertex AI).\n *\n * @public\n */\nexport interface AIOptions {\n /**\n * The backend configuration to use for the AI service instance.\n * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}).\n */\n backend?: Backend;\n /**\n * Whether to use App Check limited use tokens. Defaults to false.\n */\n useLimitedUseAppCheckTokens?: boolean;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { DEFAULT_LOCATION } from './constants';\nimport { BackendType } from './public-types';\n\n/**\n * Abstract base class representing the configuration for an AI service backend.\n * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for\n * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and\n * {@link VertexAIBackend} for the Vertex AI Gemini API.\n *\n * @public\n */\nexport abstract class Backend {\n /**\n * Specifies the backend type.\n */\n readonly backendType: BackendType;\n\n /**\n * Protected constructor for use by subclasses.\n * @param type - The backend type.\n */\n protected constructor(type: BackendType) {\n this.backendType = type;\n }\n}\n\n/**\n * Configuration class for the Gemini Developer API.\n *\n * Use this with {@link AIOptions} when initializing the AI service via\n * {@link getAI | getAI()} to specify the Gemini Developer API as the backend.\n *\n * @public\n */\nexport class GoogleAIBackend extends Backend {\n /**\n * Creates a configuration object for the Gemini Developer API backend.\n */\n constructor() {\n super(BackendType.GOOGLE_AI);\n }\n}\n\n/**\n * Configuration class for the Vertex AI Gemini API.\n *\n * Use this with {@link AIOptions} when initializing the AI service via\n * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend.\n *\n * @public\n */\nexport class VertexAIBackend extends Backend {\n /**\n * The region identifier.\n * See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}\n * for a list of supported locations.\n */\n readonly location: string;\n\n /**\n * Creates a configuration object for the Vertex AI backend.\n *\n * @param location - The region identifier, defaulting to `us-central1`;\n * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}\n * for a list of supported locations.\n */\n constructor(location: string = DEFAULT_LOCATION) {\n super(BackendType.VERTEX_AI);\n if (!location) {\n this.location = DEFAULT_LOCATION;\n } else {\n this.location = location;\n }\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AI_TYPE } from './constants';\nimport { AIError } from './errors';\nimport { AIErrorCode } from './types';\nimport { Backend, GoogleAIBackend, VertexAIBackend } from './backend';\n\n/**\n * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI}\n * instances by backend type.\n *\n * @internal\n */\nexport function encodeInstanceIdentifier(backend: Backend): string {\n if (backend instanceof GoogleAIBackend) {\n return `${AI_TYPE}/googleai`;\n } else if (backend instanceof VertexAIBackend) {\n return `${AI_TYPE}/vertexai/${backend.location}`;\n } else {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid backend: ${JSON.stringify(backend.backendType)}`\n );\n }\n}\n\n/**\n * Decodes an instance identifier string into a {@link Backend}.\n *\n * @internal\n */\nexport function decodeInstanceIdentifier(instanceIdentifier: string): Backend {\n const identifierParts = instanceIdentifier.split('/');\n if (identifierParts[0] !== AI_TYPE) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier, unknown prefix '${identifierParts[0]}'`\n );\n }\n const backendType = identifierParts[1];\n switch (backendType) {\n case 'vertexai':\n const location: string | undefined = identifierParts[2];\n if (!location) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier, unknown location '${instanceIdentifier}'`\n );\n }\n return new VertexAIBackend(location);\n case 'googleai':\n return new GoogleAIBackend();\n default:\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier string: '${instanceIdentifier}'`\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Logger } from '@firebase/logger';\n\nexport const logger = new Logger('@firebase/vertexai');\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * The subset of the Prompt API\n * (see {@link https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl }\n * required for hybrid functionality.\n *\n * @internal\n */\nexport interface LanguageModel extends EventTarget {\n create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;\n availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;\n prompt(\n input: LanguageModelPrompt,\n options?: LanguageModelPromptOptions\n ): Promise<string>;\n promptStreaming(\n input: LanguageModelPrompt,\n options?: LanguageModelPromptOptions\n ): ReadableStream;\n measureInputUsage(\n input: LanguageModelPrompt,\n options?: LanguageModelPromptOptions\n ): Promise<number>;\n destroy(): undefined;\n}\n\n/**\n * @internal\n */\nexport enum Availability {\n 'UNAVAILABLE' = 'unavailable',\n 'DOWNLOADABLE' = 'downloadable',\n 'DOWNLOADING' = 'downloading',\n 'AVAILABLE' = 'available'\n}\n\n/**\n * Configures the creation of an on-device language model session.\n * @beta\n */\nexport interface LanguageModelCreateCoreOptions {\n topK?: number;\n temperature?: number;\n expectedInputs?: LanguageModelExpected[];\n}\n\n/**\n * Configures the creation of an on-device language model session.\n * @beta\n */\nexport interface LanguageModelCreateOptions\n extends LanguageModelCreateCoreOptions {\n signal?: AbortSignal;\n initialPrompts?: LanguageModelMessage[];\n}\n\n/**\n * Options for an on-device language model prompt.\n * @beta\n */\nexport interface LanguageModelPromptOptions {\n responseConstraint?: object;\n // TODO: Restore AbortSignal once the API is defined.\n}\n\n/**\n * Options for the expected inputs for an on-device language model.\n * @beta\n */ export interface LanguageModelExpected {\n type: LanguageModelMessageType;\n languages?: string[];\n}\n\n/**\n * An on-device language model prompt.\n * @beta\n */\nexport type LanguageModelPrompt = LanguageModelMessage[];\n\n/**\n * An on-device language model message.\n * @beta\n */\nexport interface LanguageModelMessage {\n role: LanguageModelMessageRole;\n content: LanguageModelMessageContent[];\n}\n\n/**\n * An on-device language model content object.\n * @beta\n */\nexport interface LanguageModelMessageContent {\n type: LanguageModelMessageType;\n value: LanguageModelMessageContentValue;\n}\n\n/**\n * Allowable roles for on-device language model usage.\n * @beta\n */\nexport type LanguageModelMessageRole = 'system' | 'user' | 'assistant';\n\n/**\n * Allowable types for on-device language model messages.\n * @beta\n */\nexport type LanguageModelMessageType = 'text' | 'image' | 'audio';\n\n/**\n * Content formats that can be provided as on-device message content.\n * @beta\n */\nexport type LanguageModelMessageContentValue =\n | ImageBitmapSource\n | AudioBuffer\n | BufferSource\n | string;\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport {\n CountTokensRequest,\n GenerateContentRequest,\n InferenceMode,\n Part,\n AIErrorCode,\n OnDeviceParams,\n Content,\n Role\n} from '../types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport {\n Availability,\n LanguageModel,\n LanguageModelExpected,\n LanguageModelMessage,\n LanguageModelMessageContent,\n LanguageModelMessageRole\n} from '../types/language-model';\n\n// Defaults to support image inputs for convenience.\nconst defaultExpectedInputs: LanguageModelExpected[] = [{ type: 'image' }];\n\n/**\n * Defines an inference \"backend\" that uses Chrome's on-device model,\n * and encapsulates logic for detecting when on-device inference is\n * possible.\n */\nexport class ChromeAdapterImpl implements ChromeAdapter {\n // Visible for testing\n static SUPPORTED_MIME_TYPES = ['image/jpeg', 'image/png'];\n private isDownloading = false;\n private downloadPromise: Promise<LanguageModel | void> | undefined;\n private oldSession: LanguageModel | undefined;\n onDeviceParams: OnDeviceParams = {\n createOptions: {\n expectedInputs: defaultExpectedInputs\n }\n };\n constructor(\n public languageModelProvider: LanguageModel,\n public mode: InferenceMode,\n onDeviceParams?: OnDeviceParams\n ) {\n if (onDeviceParams) {\n this.onDeviceParams = onDeviceParams;\n if (!this.onDeviceParams.createOptions) {\n this.onDeviceParams.createOptions = {\n expectedInputs: defaultExpectedInputs\n };\n } else if (!this.onDeviceParams.createOptions.expectedInputs) {\n this.onDeviceParams.createOptions.expectedInputs =\n defaultExpectedInputs;\n }\n }\n }\n\n /**\n * Checks if a given request can be made on-device.\n *\n * Encapsulates a few concerns:\n * the mode\n * API existence\n * prompt formatting\n * model availability, including triggering download if necessary\n *\n *\n * Pros: callers needn't be concerned with details of on-device availability.</p>\n * Cons: this method spans a few concerns and splits request validation from usage.\n * If instance variables weren't already part of the API, we could consider a better\n * separation of concerns.\n */\n async isAvailable(request: GenerateContentRequest): Promise<boolean> {\n if (!this.mode) {\n logger.debug(\n `On-device inference unavailable because mode is undefined.`\n );\n return false;\n }\n if (this.mode === InferenceMode.ONLY_IN_CLOUD) {\n logger.debug(\n `On-device inference unavailable because mode is \"only_in_cloud\".`\n );\n return false;\n }\n\n // Triggers out-of-band download so model will eventually become available.\n const availability = await this.downloadIfAvailable();\n\n if (this.mode === InferenceMode.ONLY_ON_DEVICE) {\n // If it will never be available due to API inavailability, throw.\n if (availability === Availability.UNAVAILABLE) {\n throw new AIError(\n AIErrorCode.API_NOT_ENABLED,\n 'Local LanguageModel API not available in this environment.'\n );\n } else if (\n availability === Availability.DOWNLOADABLE ||\n availability === Availability.DOWNLOADING\n ) {\n // TODO(chholland): Better user experience during download - progress?\n logger.debug(`Waiting for download of LanguageModel to complete.`);\n await this.downloadPromise;\n return true;\n }\n return true;\n }\n\n // Applies prefer_on_device logic.\n if (availability !== Availability.AVAILABLE) {\n logger.debug(\n `On-device inference unavailable because availability is \"${availability}\".`\n );\n return false;\n }\n if (!ChromeAdapterImpl.isOnDeviceRequest(request)) {\n logger.debug(\n `On-device inference unavailable because request is incompatible.`\n );\n return false;\n }\n\n return true;\n }\n\n /**\n * Generates content on device.\n *\n * @remarks\n * This is comparable to {@link GenerativeModel.generateContent} for generating content in\n * Cloud.\n * @param request - a standard Firebase AI {@link GenerateContentRequest}\n * @returns {@link Response}, so we can reuse common response formatting.\n */\n async generateContent(request: GenerateContentRequest): Promise<Response> {\n const session = await this.createSession();\n const contents = await Promise.all(\n request.contents.map(ChromeAdapterImpl.toLanguageModelMessage)\n );\n const text = await session.prompt(\n contents,\n this.onDeviceParams.promptOptions\n );\n return ChromeAdapterImpl.toResponse(text);\n }\n\n /**\n * Generates content stream on device.\n *\n * @remarks\n * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in\n * Cloud.\n * @param request - a standard Firebase AI {@link GenerateContentRequest}\n * @returns {@link Response}, so we can reuse common response formatting.\n */\n async generateContentStream(\n request: GenerateContentRequest\n ): Promise<Response> {\n const session = await this.createSession();\n const contents = await Promise.all(\n request.contents.map(ChromeAdapterImpl.toLanguageModelMessage)\n );\n const stream = session.promptStreaming(\n contents,\n this.onDeviceParams.promptOptions\n );\n return ChromeAdapterImpl.toStreamResponse(stream);\n }\n\n async countTokens(_request: CountTokensRequest): Promise<Response> {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'Count Tokens is not yet available for on-device model.'\n );\n }\n\n /**\n * Asserts inference for the given request can be performed by an on-device model.\n */\n private static isOnDeviceRequest(request: GenerateContentRequest): boolean {\n // Returns false if the prompt is empty.\n if (request.contents.length === 0) {\n logger.debug('Empty prompt rejected for on-device inference.');\n return false;\n }\n\n for (const content of request.contents) {\n if (content.role === 'function') {\n logger.debug(`\"Function\" role rejected for on-device inference.`);\n return false;\n }\n\n // Returns false if request contains an image with an unsupported mime type.\n for (const part of content.parts) {\n if (\n part.inlineData &&\n ChromeAdapterImpl.SUPPORTED_MIME_TYPES.indexOf(\n part.inlineData.mimeType\n ) === -1\n ) {\n logger.debug(\n `Unsupported mime type \"${part.inlineData.mimeType}\" rejected for on-device inference.`\n );\n return false;\n }\n }\n }\n\n return true;\n }\n\n /**\n * Encapsulates logic to get availability and download a model if one is downloadable.\n */\n private async downloadIfAvailable(): Promise<Availability | undefined> {\n const availability = await this.languageModelProvider?.availability(\n this.onDeviceParams.createOptions\n );\n\n if (availability === Availability.DOWNLOADABLE) {\n this.download();\n }\n\n return availability;\n }\n\n /**\n * Triggers out-of-band download of an on-device model.\n *\n * Chrome only downloads models as needed. Chrome knows a model is needed when code calls\n * LanguageModel.create.\n *\n * Since Chrome manages the download, the SDK can only avoid redundant download requests by\n * tracking if a download has previously been requested.\n */\n private download(): void {\n if (this.isDownloading) {\n return;\n }\n this.isDownloading = true;\n this.downloadPromise = this.languageModelProvider\n ?.create(this.onDeviceParams.createOptions)\n .finally(() => {\n this.isDownloading = false;\n });\n }\n\n /**\n * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.\n */\n private static async toLanguageModelMessage(\n content: Content\n ): Promise<LanguageModelMessage> {\n const languageModelMessageContents = await Promise.all(\n content.parts.map(ChromeAdapterImpl.toLanguageModelMessageContent)\n );\n return {\n role: ChromeAdapterImpl.toLanguageModelMessageRole(content.role),\n content: languageModelMessageContents\n };\n }\n\n /**\n * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.\n */\n private static async toLanguageModelMessageContent(\n part: Part\n ): Promise<LanguageModelMessageContent> {\n if (part.text) {\n return {\n type: 'text',\n value: part.text\n };\n } else if (part.inlineData) {\n const formattedImageContent = await fetch(\n `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`\n );\n const imageBlob = await formattedImageContent.blob();\n const imageBitmap = await createImageBitmap(imageBlob);\n return {\n type: 'image',\n value: imageBitmap\n };\n }\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n `Processing of this Part type is not currently supported.`\n );\n }\n\n /**\n * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.\n */\n private static toLanguageModelMessageRole(\n role: Role\n ): LanguageModelMessageRole {\n // Assumes 'function' rule has been filtered by isOnDeviceRequest\n return role === 'model' ? 'assistant' : 'user';\n }\n\n /**\n * Abstracts Chrome session creation.\n *\n * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all\n * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all\n * inference.\n *\n * Chrome will remove a model from memory if it's no longer in use, so this method ensures a\n * new session is created before an old session is destroyed.\n */\n private async createSession(): Promise<LanguageModel> {\n if (!this.languageModelProvider) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Chrome AI requested for unsupported browser version.'\n );\n }\n const newSession = await this.languageModelProvider.create(\n this.onDeviceParams.createOptions\n );\n if (this.oldSession) {\n this.oldSession.destroy();\n }\n // Holds session reference, so model isn't unloaded from memory.\n this.oldSession = newSession;\n return newSession;\n }\n\n /**\n * Formats string returned by Chrome as a {@link Response} returned by Firebase AI.\n */\n private static toResponse(text: string): Response {\n return {\n json: async () => ({\n candidates: [\n {\n content: {\n parts: [{ text }]\n }\n }\n ]\n })\n } as Response;\n }\n\n /**\n * Formats string stream returned by Chrome as SSE returned by Firebase AI.\n */\n private static toStreamResponse(stream: ReadableStream<string>): Response {\n const encoder = new TextEncoder();\n return {\n body: stream.pipeThrough(\n new TransformStream({\n transform(chunk, controller) {\n const json = JSON.stringify({\n candidates: [\n {\n content: {\n role: 'model',\n parts: [{ text: chunk }]\n }\n }\n ]\n });\n controller.enqueue(encoder.encode(`data: ${json}\\n\\n`));\n }\n })\n )\n } as Response;\n }\n}\n\n/**\n * Creates a ChromeAdapterImpl on demand.\n */\nexport function chromeAdapterFactory(\n mode: InferenceMode,\n window?: Window,\n params?: OnDeviceParams\n): ChromeAdapterImpl | undefined {\n // Do not initialize a ChromeAdapter if we are not in hybrid mode.\n if (typeof window !== 'undefined' && mode) {\n return new ChromeAdapterImpl(\n (window as Window).LanguageModel as LanguageModel,\n mode,\n params\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp, _FirebaseService } from '@firebase/app';\nimport { AI, AIOptions, InferenceMode, OnDeviceParams } from './public-types';\nimport {\n AppCheckInternalComponentName,\n FirebaseAppCheckInternal\n} from '@firebase/app-check-interop-types';\nimport { Provider } from '@firebase/component';\nimport {\n FirebaseAuthInternal,\n FirebaseAuthInternalName\n} from '@firebase/auth-interop-types';\nimport { Backend, VertexAIBackend } from './backend';\nimport { ChromeAdapterImpl } from './methods/chrome-adapter';\n\nexport class AIService implements AI, _FirebaseService {\n auth: FirebaseAuthInternal | null;\n appCheck: FirebaseAppCheckInternal | null;\n _options?: Omit<AIOptions, 'backend'>;\n location: string; // This is here for backwards-compatibility\n\n constructor(\n public app: FirebaseApp,\n public backend: Backend,\n authProvider?: Provider<FirebaseAuthInternalName>,\n appCheckProvider?: Provider<AppCheckInternalComponentName>,\n public chromeAdapterFactory?: (\n mode: InferenceMode,\n window?: Window,\n params?: OnDeviceParams\n ) => ChromeAdapterImpl | undefined\n ) {\n const appCheck = appCheckProvider?.getImmediate({ optional: true });\n const auth = authProvider?.getImmediate({ optional: true });\n this.auth = auth || null;\n this.appCheck = appCheck || null;\n\n if (backend instanceof VertexAIBackend) {\n this.location = backend.location;\n } else {\n this.location = '';\n }\n }\n\n _delete(): Promise<void> {\n return Promise.resolve();\n }\n\n set options(optionsToSet: AIOptions) {\n this._options = optionsToSet;\n }\n\n get options(): AIOptions | undefined {\n return this._options;\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n ComponentContainer,\n InstanceFactoryOptions\n} from '@firebase/component';\nimport { AIError } from './errors';\nimport { decodeInstanceIdentifier } from './helpers';\nimport { chromeAdapterFactory } from './methods/chrome-adapter';\nimport { AIService } from './service';\nimport { AIErrorCode } from './types';\n\nexport function factory(\n container: ComponentContainer,\n { instanceIdentifier }: InstanceFactoryOptions\n): AIService {\n if (!instanceIdentifier) {\n throw new AIError(\n AIErrorCode.ERROR,\n 'AIService instance identifier is undefined.'\n );\n }\n\n const backend = decodeInstanceIdentifier(instanceIdentifier);\n\n // getImmediate for FirebaseApp will always succeed\n const app = container.getProvider('app').getImmediate();\n const auth = container.getProvider('auth-internal');\n const appCheckProvider = container.getProvider('app-check-internal');\n\n return new AIService(\n app,\n backend,\n auth,\n appCheckProvider,\n chromeAdapterFactory\n );\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { AIErrorCode, AI, BackendType } from '../public-types';\nimport { AIService } from '../service';\nimport { ApiSettings } from '../types/internal';\nimport { _isFirebaseServerApp } from '@firebase/app';\n\n/**\n * Base class for Firebase AI model APIs.\n *\n * Instances of this class are associated with a specific Firebase AI {@link Backend}\n * and provide methods for interacting with the configured generative model.\n *\n * @public\n */\nexport abstract class AIModel {\n /**\n * The fully qualified model resource name to use for generating images\n * (for example, `publishers/google/models/imagen-3.0-generate-002`).\n */\n readonly model: string;\n\n /**\n * @internal\n */\n _apiSettings: ApiSettings;\n\n /**\n * Constructs a new instance of the {@link AIModel} class.\n *\n * This constructor should only be called from subclasses that provide\n * a model API.\n *\n * @param ai - an {@link AI} instance.\n * @param modelName - The name of the model being used. It can be in one of the following formats:\n * - `my-model` (short name, will resolve to `publishers/google/models/my-model`)\n * - `models/my-model` (will resolve to `publishers/google/models/my-model`)\n * - `publishers/my-publisher/models/my-model` (fully qualified model name)\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @internal\n */\n protected constructor(ai: AI, modelName: string) {\n if (!ai.app?.options?.apiKey) {\n throw new AIError(\n AIErrorCode.NO_API_KEY,\n `The \"apiKey\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`\n );\n } else if (!ai.app?.options?.projectId) {\n throw new AIError(\n AIErrorCode.NO_PROJECT_ID,\n `The \"projectId\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`\n );\n } else if (!ai.app?.options?.appId) {\n throw new AIError(\n AIErrorCode.NO_APP_ID,\n `The \"appId\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`\n );\n } else {\n this._apiSettings = {\n apiKey: ai.app.options.apiKey,\n project: ai.app.options.projectId,\n appId: ai.app.options.appId,\n automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled,\n location: ai.location,\n backend: ai.backend\n };\n\n if (_isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) {\n const token = ai.app.settings.appCheckToken;\n this._apiSettings.getAppCheckToken = () => {\n return Promise.resolve({ token });\n };\n } else if ((ai as AIService).appCheck) {\n if (ai.options?.useLimitedUseAppCheckTokens) {\n this._apiSettings.getAppCheckToken = () =>\n (ai as AIService).appCheck!.getLimitedUseToken();\n } else {\n this._apiSettings.getAppCheckToken = () =>\n (ai as AIService).appCheck!.getToken();\n }\n }\n\n if ((ai as AIService).auth) {\n this._apiSettings.getAuthToken = () =>\n (ai as AIService).auth!.getToken();\n }\n\n this.model = AIModel.normalizeModelName(\n modelName,\n this._apiSettings.backend.backendType\n );\n }\n }\n\n /**\n * Normalizes the given model name to a fully qualified model resource name.\n *\n * @param modelName - The model name to normalize.\n * @returns The fully qualified model resource name.\n *\n * @internal\n */\n static normalizeModelName(\n modelName: string,\n backendType: BackendType\n ): string {\n if (backendType === BackendType.GOOGLE_AI) {\n return AIModel.normalizeGoogleAIModelName(modelName);\n } else {\n return AIModel.normalizeVertexAIModelName(modelName);\n }\n }\n\n /**\n * @internal\n */\n private static normalizeGoogleAIModelName(modelName: string): string {\n return `models/${modelName}`;\n }\n\n /**\n * @internal\n */\n private static normalizeVertexAIModelName(modelName: string): string {\n let model: string;\n if (modelName.includes('/')) {\n if (modelName.startsWith('models/')) {\n // Add 'publishers/google' if the user is only passing in 'models/model-name'.\n model = `publishers/google/${modelName}`;\n } else {\n // Any other custom format (e.g. tuned models) must be passed in correctly.\n model = modelName;\n }\n } else {\n // If path is not included, assume it's a non-tuned model.\n model = `publishers/google/models/${modelName}`;\n }\n\n return model;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ErrorDetails, RequestOptions, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\nimport { ApiSettings } from '../types/internal';\nimport {\n DEFAULT_API_VERSION,\n DEFAULT_DOMAIN,\n DEFAULT_FETCH_TIMEOUT_MS,\n LANGUAGE_TAG,\n PACKAGE_VERSION\n} from '../constants';\nimport { logger } from '../logger';\nimport { GoogleAIBackend, VertexAIBackend } from '../backend';\nimport { BackendType } from '../public-types';\n\nexport enum Task {\n GENERATE_CONTENT = 'generateContent',\n STREAM_GENERATE_CONTENT = 'streamGenerateContent',\n COUNT_TOKENS = 'countTokens',\n PREDICT = 'predict'\n}\n\nexport class RequestUrl {\n constructor(\n public model: string,\n public task: Task,\n public apiSettings: ApiSettings,\n public stream: boolean,\n public requestOptions?: RequestOptions\n ) {}\n toString(): string {\n const url = new URL(this.baseUrl); // Throws if the URL is invalid\n url.pathname = `/${this.apiVersion}/${this.modelPath}:${this.task}`;\n url.search = this.queryParams.toString();\n return url.toString();\n }\n\n private get baseUrl(): string {\n return this.requestOptions?.baseUrl || `https://${DEFAULT_DOMAIN}`;\n }\n\n private get apiVersion(): string {\n return DEFAULT_API_VERSION; // TODO: allow user-set options if that feature becomes available\n }\n\n private get modelPath(): string {\n if (this.apiSettings.backend instanceof GoogleAIBackend) {\n return `projects/${this.apiSettings.project}/${this.model}`;\n } else if (this.apiSettings.backend instanceof VertexAIBackend) {\n return `projects/${this.apiSettings.project}/locations/${this.apiSettings.backend.location}/${this.model}`;\n } else {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid backend: ${JSON.stringify(this.apiSettings.backend)}`\n );\n }\n }\n\n private get queryParams(): URLSearchParams {\n const params = new URLSearchParams();\n if (this.stream) {\n params.set('alt', 'sse');\n }\n\n return params;\n }\n}\n\nexport class WebSocketUrl {\n constructor(public apiSettings: ApiSettings) {}\n toString(): string {\n const url = new URL(`wss://${DEFAULT_DOMAIN}`);\n url.pathname = this.pathname;\n\n const queryParams = new URLSearchParams();\n queryParams.set('key', this.apiSettings.apiKey);\n url.search = queryParams.toString();\n\n return url.toString();\n }\n\n private get pathname(): string {\n if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n return 'ws/google.firebase.vertexai.v1beta.GenerativeService/BidiGenerateContent';\n } else {\n return `ws/google.firebase.vertexai.v1beta.LlmBidiService/BidiGenerateContent/locations/${this.apiSettings.location}`;\n }\n }\n}\n\n/**\n * Log language and \"fire/version\" to x-goog-api-client\n */\nfunction getClientHeaders(): string {\n const loggingTags = [];\n loggingTags.push(`${LANGUAGE_TAG}/${PACKAGE_VERSION}`);\n loggingTags.push(`fire/${PACKAGE_VERSION}`);\n return loggingTags.join(' ');\n}\n\nexport async function getHeaders(url: RequestUrl): Promise<Headers> {\n const headers = new Headers();\n headers.append('Content-Type', 'application/json');\n headers.append('x-goog-api-client', getClientHeaders());\n headers.append('x-goog-api-key', url.apiSettings.apiKey);\n if (url.apiSettings.automaticDataCollectionEnabled) {\n headers.append('X-Firebase-Appid', url.apiSettings.appId);\n }\n if (url.apiSettings.getAppCheckToken) {\n const appCheckToken = await url.apiSettings.getAppCheckToken();\n if (appCheckToken) {\n headers.append('X-Firebase-AppCheck', appCheckToken.token);\n if (appCheckToken.error) {\n logger.warn(\n `Unable to obtain a valid App Check token: ${appCheckToken.error.message}`\n );\n }\n }\n }\n\n if (url.apiSettings.getAuthToken) {\n const authToken = await url.apiSettings.getAuthToken();\n if (authToken) {\n headers.append('Authorization', `Firebase ${authToken.accessToken}`);\n }\n }\n\n return headers;\n}\n\nexport async function constructRequest(\n model: string,\n task: Task,\n apiSettings: ApiSettings,\n stream: boolean,\n body: string,\n requestOptions?: RequestOptions\n): Promise<{ url: string; fetchOptions: RequestInit }> {\n const url = new RequestUrl(model, task, apiSettings, stream, requestOptions);\n return {\n url: url.toString(),\n fetchOptions: {\n method: 'POST',\n headers: await getHeaders(url),\n body\n }\n };\n}\n\nexport async function makeRequest(\n model: string,\n task: Task,\n apiSettings: ApiSettings,\n stream: boolean,\n body: string,\n requestOptions?: RequestOptions\n): Promise<Response> {\n const url = new RequestUrl(model, task, apiSettings, stream, requestOptions);\n let response;\n let fetchTimeoutId: string | number | NodeJS.Timeout | undefined;\n try {\n const request = await constructRequest(\n model,\n task,\n apiSettings,\n stream,\n body,\n requestOptions\n );\n // Timeout is 180s by default\n const timeoutMillis =\n requestOptions?.timeout != null && requestOptions.timeout >= 0\n ? requestOptions.timeout\n : DEFAULT_FETCH_TIMEOUT_MS;\n const abortController = new AbortController();\n fetchTimeoutId = setTimeout(() => abortController.abort(), timeoutMillis);\n request.fetchOptions.signal = abortController.signal;\n\n response = await fetch(request.url, request.fetchOptions);\n if (!response.ok) {\n let message = '';\n let errorDetails;\n try {\n const json = await response.json();\n message = json.error.message;\n if (json.error.details) {\n message += ` ${JSON.stringify(json.error.details)}`;\n errorDetails = json.error.details;\n }\n } catch (e) {\n // ignored\n }\n if (\n response.status === 403 &&\n errorDetails &&\n errorDetails.some(\n (detail: ErrorDetails) => detail.reason === 'SERVICE_DISABLED'\n ) &&\n errorDetails.some((detail: ErrorDetails) =>\n (\n detail.links as Array<Record<string, string>>\n )?.[0]?.description.includes(\n 'Google developers console API activation'\n )\n )\n ) {\n throw new AIError(\n AIErrorCode.API_NOT_ENABLED,\n `The Firebase AI SDK requires the Firebase AI ` +\n `API ('firebasevertexai.googleapis.com') to be enabled in your ` +\n `Firebase project. Enable this API by visiting the Firebase Console ` +\n `at https://console.firebase.google.com/project/${url.apiSettings.project}/genai/ ` +\n `and clicking \"Get started\". If you enabled this API recently, ` +\n `wait a few minutes for the action to propagate to our systems and ` +\n `then retry.`,\n {\n status: response.status,\n statusText: response.statusText,\n errorDetails\n }\n );\n }\n throw new AIError(\n AIErrorCode.FETCH_ERROR,\n `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`,\n {\n status: response.status,\n statusText: response.statusText,\n errorDetails\n }\n );\n }\n } catch (e) {\n let err = e as Error;\n if (\n (e as AIError).code !== AIErrorCode.FETCH_ERROR &&\n (e as AIError).code !== AIErrorCode.API_NOT_ENABLED &&\n e instanceof Error\n ) {\n err = new AIError(\n AIErrorCode.ERROR,\n `Error fetching from ${url.toString()}: ${e.message}`\n );\n err.stack = e.stack;\n }\n\n throw err;\n } finally {\n if (fetchTimeoutId) {\n clearTimeout(fetchTimeoutId);\n }\n }\n return response;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n EnhancedGenerateContentResponse,\n FinishReason,\n FunctionCall,\n GenerateContentCandidate,\n GenerateContentResponse,\n ImagenGCSImage,\n ImagenInlineImage,\n AIErrorCode,\n InlineDataPart,\n Part,\n InferenceSource\n} from '../types';\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport { ImagenResponseInternal } from '../types/internal';\n\n/**\n * Check that at least one candidate exists and does not have a bad\n * finish reason. Warns if multiple candidates exist.\n */\nfunction hasValidCandidates(response: GenerateContentResponse): boolean {\n if (response.candidates && response.candidates.length > 0) {\n if (response.candidates.length > 1) {\n logger.warn(\n `This response had ${response.candidates.length} ` +\n `candidates. Returning text from the first candidate only. ` +\n `Access response.candidates directly to use the other candidates.`\n );\n }\n if (hadBadFinishReason(response.candidates[0])) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Response error: ${formatBlockErrorMessage(\n response\n )}. Response body stored in error.response`,\n {\n response\n }\n );\n }\n return true;\n } else {\n return false;\n }\n}\n\n/**\n * Creates an EnhancedGenerateContentResponse object that has helper functions and\n * other modifications that improve usability.\n */\nexport function createEnhancedContentResponse(\n response: GenerateContentResponse,\n inferenceSource: InferenceSource = InferenceSource.IN_CLOUD\n): EnhancedGenerateContentResponse {\n /**\n * The Vertex AI backend omits default values.\n * This causes the `index` property to be omitted from the first candidate in the\n * response, since it has index 0, and 0 is a default value.\n * See: https://github.com/firebase/firebase-js-sdk/issues/8566\n */\n if (response.candidates && !response.candidates[0].hasOwnProperty('index')) {\n response.candidates[0].index = 0;\n }\n\n const responseWithHelpers = addHelpers(response);\n responseWithHelpers.inferenceSource = inferenceSource;\n return responseWithHelpers;\n}\n\n/**\n * Adds convenience helper methods to a response object, including stream\n * chunks (as long as each chunk is a complete GenerateContentResponse JSON).\n */\nexport function addHelpers(\n response: GenerateContentResponse\n): EnhancedGenerateContentResponse {\n (response as EnhancedGenerateContentResponse).text = () => {\n if (hasValidCandidates(response)) {\n return getText(response, part => !part.thought);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Text not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return '';\n };\n (response as EnhancedGenerateContentResponse).thoughtSummary = () => {\n if (hasValidCandidates(response)) {\n const result = getText(response, part => !!part.thought);\n return result === '' ? undefined : result;\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Thought summary not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n (response as EnhancedGenerateContentResponse).inlineDataParts = ():\n | InlineDataPart[]\n | undefined => {\n if (hasValidCandidates(response)) {\n return getInlineDataParts(response);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Data not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n (response as EnhancedGenerateContentResponse).functionCalls = () => {\n if (hasValidCandidates(response)) {\n return getFunctionCalls(response);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Function call not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n return response as EnhancedGenerateContentResponse;\n}\n\n/**\n * Returns all text from the first candidate's parts, filtering by whether\n * `partFilter()` returns true.\n *\n * @param response - The `GenerateContentResponse` from which to extract text.\n * @param partFilter - Only return `Part`s for which this returns true\n */\nexport function getText(\n response: GenerateContentResponse,\n partFilter: (part: Part) => boolean\n): string {\n const textStrings = [];\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.text && partFilter(part)) {\n textStrings.push(part.text);\n }\n }\n }\n if (textStrings.length > 0) {\n return textStrings.join('');\n } else {\n return '';\n }\n}\n\n/**\n * Returns every {@link FunctionCall} associated with first candidate.\n */\nexport function getFunctionCalls(\n response: GenerateContentResponse\n): FunctionCall[] | undefined {\n const functionCalls: FunctionCall[] = [];\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.functionCall) {\n functionCalls.push(part.functionCall);\n }\n }\n }\n if (functionCalls.length > 0) {\n return functionCalls;\n } else {\n return undefined;\n }\n}\n\n/**\n * Returns every {@link InlineDataPart} in the first candidate if present.\n *\n * @internal\n */\nexport function getInlineDataParts(\n response: GenerateContentResponse\n): InlineDataPart[] | undefined {\n const data: InlineDataPart[] = [];\n\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.inlineData) {\n data.push(part);\n }\n }\n }\n\n if (data.length > 0) {\n return data;\n } else {\n return undefined;\n }\n}\n\nconst badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY];\n\nfunction hadBadFinishReason(candidate: GenerateContentCandidate): boolean {\n return (\n !!candidate.finishReason &&\n badFinishReasons.some(reason => reason === candidate.finishReason)\n );\n}\n\nexport function formatBlockErrorMessage(\n response: GenerateContentResponse\n): string {\n let message = '';\n if (\n (!response.candidates || response.candidates.length === 0) &&\n response.promptFeedback\n ) {\n message += 'Response was blocked';\n if (response.promptFeedback?.blockReason) {\n message += ` due to ${response.promptFeedback.blockReason}`;\n }\n if (response.promptFeedback?.blockReasonMessage) {\n message += `: ${response.promptFeedback.blockReasonMessage}`;\n }\n } else if (response.candidates?.[0]) {\n const firstCandidate = response.candidates[0];\n if (hadBadFinishReason(firstCandidate)) {\n message += `Candidate was blocked due to ${firstCandidate.finishReason}`;\n if (firstCandidate.finishMessage) {\n message += `: ${firstCandidate.finishMessage}`;\n }\n }\n }\n return message;\n}\n\n/**\n * Convert a generic successful fetch response body to an Imagen response object\n * that can be returned to the user. This converts the REST APIs response format to our\n * APIs representation of a response.\n *\n * @internal\n */\nexport async function handlePredictResponse<\n T extends ImagenInlineImage | ImagenGCSImage\n>(response: Response): Promise<{ images: T[]; filteredReason?: string }> {\n const responseJson: ImagenResponseInternal = await response.json();\n\n const images: T[] = [];\n let filteredReason: string | undefined = undefined;\n\n // The backend should always send a non-empty array of predictions if the response was successful.\n if (!responseJson.predictions || responseJson.predictions?.length === 0) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.'\n );\n }\n\n for (const prediction of responseJson.predictions) {\n if (prediction.raiFilteredReason) {\n filteredReason = prediction.raiFilteredReason;\n } else if (prediction.mimeType && prediction.bytesBase64Encoded) {\n images.push({\n mimeType: prediction.mimeType,\n bytesBase64Encoded: prediction.bytesBase64Encoded\n } as T);\n } else if (prediction.mimeType && prediction.gcsUri) {\n images.push({\n mimeType: prediction.mimeType,\n gcsURI: prediction.gcsUri\n } as T);\n } else if (prediction.safetyAttributes) {\n // Ignore safetyAttributes \"prediction\" to avoid throwing an error below.\n } else {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Unexpected element in 'predictions' array in response: '${JSON.stringify(\n prediction\n )}'`\n );\n }\n }\n\n return { images, filteredReason };\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from './errors';\nimport { logger } from './logger';\nimport {\n CitationMetadata,\n CountTokensRequest,\n GenerateContentCandidate,\n GenerateContentRequest,\n GenerateContentResponse,\n HarmSeverity,\n InlineDataPart,\n PromptFeedback,\n SafetyRating,\n AIErrorCode\n} from './types';\nimport {\n GoogleAIGenerateContentResponse,\n GoogleAIGenerateContentCandidate,\n GoogleAICountTokensRequest\n} from './types/googleai';\n\n/**\n * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI).\n * The public API prioritizes the format used by the Vertex AI Gemini API.\n * We avoid having two sets of types by translating requests and responses between the two API formats.\n * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API\n * with minimal code changes.\n *\n * In here are functions that map requests and responses between the two API formats.\n * Requests in the Vertex AI format are mapped to the Google AI format before being sent.\n * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user.\n */\n\n/**\n * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI.\n *\n * @param generateContentRequest The {@link GenerateContentRequest} to map.\n * @returns A {@link GenerateContentResponse} that conforms to the Google AI format.\n *\n * @throws If the request contains properties that are unsupported by Google AI.\n *\n * @internal\n */\nexport function mapGenerateContentRequest(\n generateContentRequest: GenerateContentRequest\n): GenerateContentRequest {\n generateContentRequest.safetySettings?.forEach(safetySetting => {\n if (safetySetting.method) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'SafetySetting.method is not supported in the the Gemini Developer API. Please remove this property.'\n );\n }\n });\n\n if (generateContentRequest.generationConfig?.topK) {\n const roundedTopK = Math.round(\n generateContentRequest.generationConfig.topK\n );\n\n if (roundedTopK !== generateContentRequest.generationConfig.topK) {\n logger.warn(\n 'topK in GenerationConfig has been rounded to the nearest integer to match the format for requests to the Gemini Developer API.'\n );\n generateContentRequest.generationConfig.topK = roundedTopK;\n }\n }\n\n return generateContentRequest;\n}\n\n/**\n * Maps a {@link GenerateContentResponse} from Google AI to the format of the\n * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API.\n *\n * @param googleAIResponse The {@link GenerateContentResponse} from Google AI.\n * @returns A {@link GenerateContentResponse} that conforms to the public API's format.\n *\n * @internal\n */\nexport function mapGenerateContentResponse(\n googleAIResponse: GoogleAIGenerateContentResponse\n): GenerateContentResponse {\n const generateContentResponse = {\n candidates: googleAIResponse.candidates\n ? mapGenerateContentCandidates(googleAIResponse.candidates)\n : undefined,\n prompt: googleAIResponse.promptFeedback\n ? mapPromptFeedback(googleAIResponse.promptFeedback)\n : undefined,\n usageMetadata: googleAIResponse.usageMetadata\n };\n\n return generateContentResponse;\n}\n\n/**\n * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI.\n *\n * @param countTokensRequest The {@link CountTokensRequest} to map.\n * @param model The model to count tokens with.\n * @returns A {@link CountTokensRequest} that conforms to the Google AI format.\n *\n * @internal\n */\nexport function mapCountTokensRequest(\n countTokensRequest: CountTokensRequest,\n model: string\n): GoogleAICountTokensRequest {\n const mappedCountTokensRequest: GoogleAICountTokensRequest = {\n generateContentRequest: {\n model,\n ...countTokensRequest\n }\n };\n\n return mappedCountTokensRequest;\n}\n\n/**\n * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms\n * to the Vertex AI API format.\n *\n * @param candidates The {@link GoogleAIGenerateContentCandidate} to map.\n * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format.\n *\n * @throws If any {@link Part} in the candidates has a `videoMetadata` property.\n *\n * @internal\n */\nexport function mapGenerateContentCandidates(\n candidates: GoogleAIGenerateContentCandidate[]\n): GenerateContentCandidate[] {\n const mappedCandidates: GenerateContentCandidate[] = [];\n let mappedSafetyRatings: SafetyRating[];\n if (mappedCandidates) {\n candidates.forEach(candidate => {\n // Map citationSources to citations.\n let citationMetadata: CitationMetadata | undefined;\n if (candidate.citationMetadata) {\n citationMetadata = {\n citations: candidate.citationMetadata.citationSources\n };\n }\n\n // Assign missing candidate SafetyRatings properties to their defaults if undefined.\n if (candidate.safetyRatings) {\n mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => {\n return {\n ...safetyRating,\n severity:\n safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,\n probabilityScore: safetyRating.probabilityScore ?? 0,\n severityScore: safetyRating.severityScore ?? 0\n };\n });\n }\n\n // videoMetadata is not supported.\n // Throw early since developers may send a long video as input and only expect to pay\n // for inference on a small portion of the video.\n if (\n candidate.content?.parts?.some(\n part => (part as InlineDataPart)?.videoMetadata\n )\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.'\n );\n }\n\n const mappedCandidate = {\n index: candidate.index,\n content: candidate.content,\n finishReason: candidate.finishReason,\n finishMessage: candidate.finishMessage,\n safetyRatings: mappedSafetyRatings,\n citationMetadata,\n groundingMetadata: candidate.groundingMetadata,\n urlContextMetadata: candidate.urlContextMetadata\n };\n mappedCandidates.push(mappedCandidate);\n });\n }\n\n return mappedCandidates;\n}\n\nexport function mapPromptFeedback(\n promptFeedback: PromptFeedback\n): PromptFeedback {\n // Assign missing SafetyRating properties to their defaults if undefined.\n const mappedSafetyRatings: SafetyRating[] = [];\n promptFeedback.safetyRatings.forEach(safetyRating => {\n mappedSafetyRatings.push({\n category: safetyRating.category,\n probability: safetyRating.probability,\n severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,\n probabilityScore: safetyRating.probabilityScore ?? 0,\n severityScore: safetyRating.severityScore ?? 0,\n blocked: safetyRating.blocked\n });\n });\n\n const mappedPromptFeedback: PromptFeedback = {\n blockReason: promptFeedback.blockReason,\n safetyRatings: mappedSafetyRatings,\n blockReasonMessage: promptFeedback.blockReasonMessage\n };\n return mappedPromptFeedback;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n EnhancedGenerateContentResponse,\n GenerateContentCandidate,\n GenerateContentResponse,\n GenerateContentStreamResult,\n Part,\n AIErrorCode\n} from '../types';\nimport { AIError } from '../errors';\nimport { createEnhancedContentResponse } from './response-helpers';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { GoogleAIGenerateContentResponse } from '../types/googleai';\nimport { ApiSettings } from '../types/internal';\nimport {\n BackendType,\n InferenceSource,\n URLContextMetadata\n} from '../public-types';\n\nconst responseLineRE = /^data\\: (.*)(?:\\n\\n|\\r\\r|\\r\\n\\r\\n)/;\n\n/**\n * Process a response.body stream from the backend and return an\n * iterator that provides one complete GenerateContentResponse at a time\n * and a promise that resolves with a single aggregated\n * GenerateContentResponse.\n *\n * @param response - Response from a fetch call\n */\nexport function processStream(\n response: Response,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): GenerateContentStreamResult {\n const inputStream = response.body!.pipeThrough(\n new TextDecoderStream('utf8', { fatal: true })\n );\n const responseStream =\n getResponseStream<GenerateContentResponse>(inputStream);\n const [stream1, stream2] = responseStream.tee();\n return {\n stream: generateResponseSequence(stream1, apiSettings, inferenceSource),\n response: getResponsePromise(stream2, apiSettings, inferenceSource)\n };\n}\n\nasync function getResponsePromise(\n stream: ReadableStream<GenerateContentResponse>,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): Promise<EnhancedGenerateContentResponse> {\n const allResponses: GenerateContentResponse[] = [];\n const reader = stream.getReader();\n while (true) {\n const { done, value } = await reader.read();\n if (done) {\n let generateContentResponse = aggregateResponses(allResponses);\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n generateContentResponse = GoogleAIMapper.mapGenerateContentResponse(\n generateContentResponse as GoogleAIGenerateContentResponse\n );\n }\n return createEnhancedContentResponse(\n generateContentResponse,\n inferenceSource\n );\n }\n\n allResponses.push(value);\n }\n}\n\nasync function* generateResponseSequence(\n stream: ReadableStream<GenerateContentResponse>,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): AsyncGenerator<EnhancedGenerateContentResponse> {\n const reader = stream.getReader();\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n break;\n }\n\n let enhancedResponse: EnhancedGenerateContentResponse;\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n enhancedResponse = createEnhancedContentResponse(\n GoogleAIMapper.mapGenerateContentResponse(\n value as GoogleAIGenerateContentResponse\n ),\n inferenceSource\n );\n } else {\n enhancedResponse = createEnhancedContentResponse(value, inferenceSource);\n }\n\n const firstCandidate = enhancedResponse.candidates?.[0];\n // Don't yield a response with no useful data for the developer.\n if (\n !firstCandidate?.content?.parts &&\n !firstCandidate?.finishReason &&\n !firstCandidate?.citationMetadata &&\n !firstCandidate?.urlContextMetadata\n ) {\n continue;\n }\n\n yield enhancedResponse;\n }\n}\n\n/**\n * Reads a raw stream from the fetch response and join incomplete\n * chunks, returning a new stream that provides a single complete\n * GenerateContentResponse in each iteration.\n */\nexport function getResponseStream<T>(\n inputStream: ReadableStream<string>\n): ReadableStream<T> {\n const reader = inputStream.getReader();\n const stream = new ReadableStream<T>({\n start(controller) {\n let currentText = '';\n return pump();\n function pump(): Promise<(() => Promise<void>) | undefined> {\n return reader.read().then(({ value, done }) => {\n if (done) {\n if (currentText.trim()) {\n controller.error(\n new AIError(AIErrorCode.PARSE_FAILED, 'Failed to parse stream')\n );\n return;\n }\n controller.close();\n return;\n }\n\n currentText += value;\n let match = currentText.match(responseLineRE);\n let parsedResponse: T;\n while (match) {\n try {\n parsedResponse = JSON.parse(match[1]);\n } catch (e) {\n controller.error(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Error parsing JSON response: \"${match[1]}`\n )\n );\n return;\n }\n controller.enqueue(parsedResponse);\n currentText = currentText.substring(match[0].length);\n match = currentText.match(responseLineRE);\n }\n return pump();\n });\n }\n }\n });\n return stream;\n}\n\n/**\n * Aggregates an array of `GenerateContentResponse`s into a single\n * GenerateContentResponse.\n */\nexport function aggregateResponses(\n responses: GenerateContentResponse[]\n): GenerateContentResponse {\n const lastResponse = responses[responses.length - 1];\n const aggregatedResponse: GenerateContentResponse = {\n promptFeedback: lastResponse?.promptFeedback\n };\n for (const response of responses) {\n if (response.candidates) {\n for (const candidate of response.candidates) {\n // Index will be undefined if it's the first index (0), so we should use 0 if it's undefined.\n // See: https://github.com/firebase/firebase-js-sdk/issues/8566\n const i = candidate.index || 0;\n if (!aggregatedResponse.candidates) {\n aggregatedResponse.candidates = [];\n }\n if (!aggregatedResponse.candidates[i]) {\n aggregatedResponse.candidates[i] = {\n index: candidate.index\n } as GenerateContentCandidate;\n }\n // Keep overwriting, the last one will be final\n aggregatedResponse.candidates[i].citationMetadata =\n candidate.citationMetadata;\n aggregatedResponse.candidates[i].finishReason = candidate.finishReason;\n aggregatedResponse.candidates[i].finishMessage =\n candidate.finishMessage;\n aggregatedResponse.candidates[i].safetyRatings =\n candidate.safetyRatings;\n aggregatedResponse.candidates[i].groundingMetadata =\n candidate.groundingMetadata;\n\n // The urlContextMetadata object is defined in the first chunk of the response stream.\n // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to\n // make sure that we don't overwrite the first value urlContextMetadata object with undefined.\n // FIXME: What happens if we receive a second, valid urlContextMetadata object?\n const urlContextMetadata = candidate.urlContextMetadata as unknown;\n if (\n typeof urlContextMetadata === 'object' &&\n urlContextMetadata !== null &&\n Object.keys(urlContextMetadata).length > 0\n ) {\n aggregatedResponse.candidates[i].urlContextMetadata =\n urlContextMetadata as URLContextMetadata;\n }\n\n /**\n * Candidates should always have content and parts, but this handles\n * possible malformed responses.\n */\n if (candidate.content) {\n // Skip a candidate without parts.\n if (!candidate.content.parts) {\n continue;\n }\n if (!aggregatedResponse.candidates[i].content) {\n aggregatedResponse.candidates[i].content = {\n role: candidate.content.role || 'user',\n parts: []\n };\n }\n for (const part of candidate.content.parts) {\n const newPart: Part = { ...part };\n // The backend can send empty text parts. If these are sent back\n // (e.g. in chat history), the backend will respond with an error.\n // To prevent this, ignore empty text parts.\n if (part.text === '') {\n continue;\n }\n if (Object.keys(newPart).length > 0) {\n aggregatedResponse.candidates[i].content.parts.push(\n newPart as Part\n );\n }\n }\n }\n }\n }\n }\n return aggregatedResponse;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport {\n GenerateContentRequest,\n InferenceMode,\n AIErrorCode,\n ChromeAdapter,\n InferenceSource\n} from '../types';\nimport { ChromeAdapterImpl } from '../methods/chrome-adapter';\n\nconst errorsCausingFallback: AIErrorCode[] = [\n // most network errors\n AIErrorCode.FETCH_ERROR,\n // fallback code for all other errors in makeRequest\n AIErrorCode.ERROR,\n // error due to API not being enabled in project\n AIErrorCode.API_NOT_ENABLED\n];\n\ninterface CallResult<Response> {\n response: Response;\n inferenceSource: InferenceSource;\n}\n\n/**\n * Dispatches a request to the appropriate backend (on-device or in-cloud)\n * based on the inference mode.\n *\n * @param request - The request to be sent.\n * @param chromeAdapter - The on-device model adapter.\n * @param onDeviceCall - The function to call for on-device inference.\n * @param inCloudCall - The function to call for in-cloud inference.\n * @returns The response from the backend.\n */\nexport async function callCloudOrDevice<Response>(\n request: GenerateContentRequest,\n chromeAdapter: ChromeAdapter | undefined,\n onDeviceCall: () => Promise<Response>,\n inCloudCall: () => Promise<Response>\n): Promise<CallResult<Response>> {\n if (!chromeAdapter) {\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n }\n switch ((chromeAdapter as ChromeAdapterImpl).mode) {\n case InferenceMode.ONLY_ON_DEVICE:\n if (await chromeAdapter.isAvailable(request)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.'\n );\n case InferenceMode.ONLY_IN_CLOUD:\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n case InferenceMode.PREFER_IN_CLOUD:\n try {\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n } catch (e) {\n if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n throw e;\n }\n case InferenceMode.PREFER_ON_DEVICE:\n if (await chromeAdapter.isAvailable(request)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n default:\n throw new AIError(\n AIErrorCode.ERROR,\n `Unexpected infererence mode: ${\n (chromeAdapter as ChromeAdapterImpl).mode\n }`\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n GenerateContentRequest,\n GenerateContentResponse,\n GenerateContentResult,\n GenerateContentStreamResult,\n RequestOptions\n} from '../types';\nimport { Task, makeRequest } from '../requests/request';\nimport { createEnhancedContentResponse } from '../requests/response-helpers';\nimport { processStream } from '../requests/stream-reader';\nimport { ApiSettings } from '../types/internal';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { BackendType } from '../public-types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport { callCloudOrDevice } from '../requests/hybrid-helpers';\n\nasync function generateContentStreamOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n requestOptions?: RequestOptions\n): Promise<Response> {\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n params = GoogleAIMapper.mapGenerateContentRequest(params);\n }\n return makeRequest(\n model,\n Task.STREAM_GENERATE_CONTENT,\n apiSettings,\n /* stream */ true,\n JSON.stringify(params),\n requestOptions\n );\n}\n\nexport async function generateContentStream(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<GenerateContentStreamResult> {\n const callResult = await callCloudOrDevice(\n params,\n chromeAdapter,\n () => chromeAdapter!.generateContentStream(params),\n () =>\n generateContentStreamOnCloud(apiSettings, model, params, requestOptions)\n );\n return processStream(callResult.response, apiSettings); // TODO: Map streaming responses\n}\n\nasync function generateContentOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n requestOptions?: RequestOptions\n): Promise<Response> {\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n params = GoogleAIMapper.mapGenerateContentRequest(params);\n }\n return makeRequest(\n model,\n Task.GENERATE_CONTENT,\n apiSettings,\n /* stream */ false,\n JSON.stringify(params),\n requestOptions\n );\n}\n\nexport async function generateContent(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<GenerateContentResult> {\n const callResult = await callCloudOrDevice(\n params,\n chromeAdapter,\n () => chromeAdapter!.generateContent(params),\n () => generateContentOnCloud(apiSettings, model, params, requestOptions)\n );\n const generateContentResponse = await processGenerateContentResponse(\n callResult.response,\n apiSettings\n );\n const enhancedResponse = createEnhancedContentResponse(\n generateContentResponse,\n callResult.inferenceSource\n );\n return {\n response: enhancedResponse\n };\n}\n\nasync function processGenerateContentResponse(\n response: Response,\n apiSettings: ApiSettings\n): Promise<GenerateContentResponse> {\n const responseJson = await response.json();\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n return GoogleAIMapper.mapGenerateContentResponse(responseJson);\n } else {\n return responseJson;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, GenerateContentRequest, Part, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\nimport { ImagenGenerationParams, PredictRequestBody } from '../types/internal';\n\nexport function formatSystemInstruction(\n input?: string | Part | Content\n): Content | undefined {\n // null or undefined\n if (input == null) {\n return undefined;\n } else if (typeof input === 'string') {\n return { role: 'system', parts: [{ text: input }] } as Content;\n } else if ((input as Part).text) {\n return { role: 'system', parts: [input as Part] };\n } else if ((input as Content).parts) {\n if (!(input as Content).role) {\n return { role: 'system', parts: (input as Content).parts };\n } else {\n return input as Content;\n }\n }\n}\n\nexport function formatNewContent(\n request: string | Array<string | Part>\n): Content {\n let newParts: Part[] = [];\n if (typeof request === 'string') {\n newParts = [{ text: request }];\n } else {\n for (const partOrString of request) {\n if (typeof partOrString === 'string') {\n newParts.push({ text: partOrString });\n } else {\n newParts.push(partOrString);\n }\n }\n }\n return assignRoleToPartsAndValidateSendMessageRequest(newParts);\n}\n\n/**\n * When multiple Part types (i.e. FunctionResponsePart and TextPart) are\n * passed in a single Part array, we may need to assign different roles to each\n * part. Currently only FunctionResponsePart requires a role other than 'user'.\n * @private\n * @param parts Array of parts to pass to the model\n * @returns Array of content items\n */\nfunction assignRoleToPartsAndValidateSendMessageRequest(\n parts: Part[]\n): Content {\n const userContent: Content = { role: 'user', parts: [] };\n const functionContent: Content = { role: 'function', parts: [] };\n let hasUserContent = false;\n let hasFunctionContent = false;\n for (const part of parts) {\n if ('functionResponse' in part) {\n functionContent.parts.push(part);\n hasFunctionContent = true;\n } else {\n userContent.parts.push(part);\n hasUserContent = true;\n }\n }\n\n if (hasUserContent && hasFunctionContent) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.'\n );\n }\n\n if (!hasUserContent && !hasFunctionContent) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n 'No Content is provided for sending chat message.'\n );\n }\n\n if (hasUserContent) {\n return userContent;\n }\n\n return functionContent;\n}\n\nexport function formatGenerateContentInput(\n params: GenerateContentRequest | string | Array<string | Part>\n): GenerateContentRequest {\n let formattedRequest: GenerateContentRequest;\n if ((params as GenerateContentRequest).contents) {\n formattedRequest = params as GenerateContentRequest;\n } else {\n // Array or string\n const content = formatNewContent(params as string | Array<string | Part>);\n formattedRequest = { contents: [content] };\n }\n if ((params as GenerateContentRequest).systemInstruction) {\n formattedRequest.systemInstruction = formatSystemInstruction(\n (params as GenerateContentRequest).systemInstruction\n );\n }\n return formattedRequest;\n}\n\n/**\n * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format\n * that is expected from the REST API.\n *\n * @internal\n */\nexport function createPredictRequestBody(\n prompt: string,\n {\n gcsURI,\n imageFormat,\n addWatermark,\n numberOfImages = 1,\n negativePrompt,\n aspectRatio,\n safetyFilterLevel,\n personFilterLevel\n }: ImagenGenerationParams\n): PredictRequestBody {\n // Properties that are undefined will be omitted from the JSON string that is sent in the request.\n const body: PredictRequestBody = {\n instances: [\n {\n prompt\n }\n ],\n parameters: {\n storageUri: gcsURI,\n negativePrompt,\n sampleCount: numberOfImages,\n aspectRatio,\n outputOptions: imageFormat,\n addWatermark,\n safetyFilterLevel,\n personGeneration: personFilterLevel,\n includeRaiReason: true,\n includeSafetyAttributes: true\n }\n };\n return body;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, POSSIBLE_ROLES, Part, Role, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\n\n// https://ai.google.dev/api/rest/v1beta/Content#part\n\nconst VALID_PART_FIELDS: Array<keyof Part> = [\n 'text',\n 'inlineData',\n 'functionCall',\n 'functionResponse',\n 'thought',\n 'thoughtSignature'\n];\n\nconst VALID_PARTS_PER_ROLE: { [key in Role]: Array<keyof Part> } = {\n user: ['text', 'inlineData'],\n function: ['functionResponse'],\n model: ['text', 'functionCall', 'thought', 'thoughtSignature'],\n // System instructions shouldn't be in history anyway.\n system: ['text']\n};\n\nconst VALID_PREVIOUS_CONTENT_ROLES: { [key in Role]: Role[] } = {\n user: ['model'],\n function: ['model'],\n model: ['user', 'function'],\n // System instructions shouldn't be in history.\n system: []\n};\n\nexport function validateChatHistory(history: Content[]): void {\n let prevContent: Content | null = null;\n for (const currContent of history) {\n const { role, parts } = currContent;\n if (!prevContent && role !== 'user') {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `First Content should be with role 'user', got ${role}`\n );\n }\n if (!POSSIBLE_ROLES.includes(role)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(\n POSSIBLE_ROLES\n )}`\n );\n }\n\n if (!Array.isArray(parts)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content should have 'parts' property with an array of Parts`\n );\n }\n\n if (parts.length === 0) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Each Content should have at least one part`\n );\n }\n\n const countFields: Record<keyof Part, number> = {\n text: 0,\n inlineData: 0,\n functionCall: 0,\n functionResponse: 0,\n thought: 0,\n thoughtSignature: 0,\n executableCode: 0,\n codeExecutionResult: 0\n };\n\n for (const part of parts) {\n for (const key of VALID_PART_FIELDS) {\n if (key in part) {\n countFields[key] += 1;\n }\n }\n }\n const validParts = VALID_PARTS_PER_ROLE[role];\n for (const key of VALID_PART_FIELDS) {\n if (!validParts.includes(key) && countFields[key] > 0) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content with role '${role}' can't contain '${key}' part`\n );\n }\n }\n\n if (prevContent) {\n const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role];\n if (!validPreviousContentRoles.includes(prevContent.role)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content with role '${role}' can't follow '${\n prevContent.role\n }'. Valid previous roles: ${JSON.stringify(\n VALID_PREVIOUS_CONTENT_ROLES\n )}`\n );\n }\n }\n prevContent = currContent;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n Content,\n GenerateContentRequest,\n GenerateContentResult,\n GenerateContentStreamResult,\n Part,\n RequestOptions,\n StartChatParams\n} from '../types';\nimport { formatNewContent } from '../requests/request-helpers';\nimport { formatBlockErrorMessage } from '../requests/response-helpers';\nimport { validateChatHistory } from './chat-session-helpers';\nimport { generateContent, generateContentStream } from './generate-content';\nimport { ApiSettings } from '../types/internal';\nimport { logger } from '../logger';\nimport { ChromeAdapter } from '../types/chrome-adapter';\n\n/**\n * Do not log a message for this error.\n */\nconst SILENT_ERROR = 'SILENT_ERROR';\n\n/**\n * ChatSession class that enables sending chat messages and stores\n * history of sent and received messages so far.\n *\n * @public\n */\nexport class ChatSession {\n private _apiSettings: ApiSettings;\n private _history: Content[] = [];\n private _sendPromise: Promise<void> = Promise.resolve();\n\n constructor(\n apiSettings: ApiSettings,\n public model: string,\n private chromeAdapter?: ChromeAdapter,\n public params?: StartChatParams,\n public requestOptions?: RequestOptions\n ) {\n this._apiSettings = apiSettings;\n if (params?.history) {\n validateChatHistory(params.history);\n this._history = params.history;\n }\n }\n\n /**\n * Gets the chat history so far. Blocked prompts are not added to history.\n * Neither blocked candidates nor the prompts that generated them are added\n * to history.\n */\n async getHistory(): Promise<Content[]> {\n await this._sendPromise;\n return this._history;\n }\n\n /**\n * Sends a chat message and receives a non-streaming\n * {@link GenerateContentResult}\n */\n async sendMessage(\n request: string | Array<string | Part>\n ): Promise<GenerateContentResult> {\n await this._sendPromise;\n const newContent = formatNewContent(request);\n const generateContentRequest: GenerateContentRequest = {\n safetySettings: this.params?.safetySettings,\n generationConfig: this.params?.generationConfig,\n tools: this.params?.tools,\n toolConfig: this.params?.toolConfig,\n systemInstruction: this.params?.systemInstruction,\n contents: [...this._history, newContent]\n };\n let finalResult = {} as GenerateContentResult;\n // Add onto the chain.\n this._sendPromise = this._sendPromise\n .then(() =>\n generateContent(\n this._apiSettings,\n this.model,\n generateContentRequest,\n this.chromeAdapter,\n this.requestOptions\n )\n )\n .then(result => {\n if (\n result.response.candidates &&\n result.response.candidates.length > 0\n ) {\n this._history.push(newContent);\n const responseContent: Content = {\n parts: result.response.candidates?.[0].content.parts || [],\n // Response seems to come back without a role set.\n role: result.response.candidates?.[0].content.role || 'model'\n };\n this._history.push(responseContent);\n } else {\n const blockErrorMessage = formatBlockErrorMessage(result.response);\n if (blockErrorMessage) {\n logger.warn(\n `sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`\n );\n }\n }\n finalResult = result;\n });\n await this._sendPromise;\n return finalResult;\n }\n\n /**\n * Sends a chat message and receives the response as a\n * {@link GenerateContentStreamResult} containing an iterable stream\n * and a response promise.\n */\n async sendMessageStream(\n request: string | Array<string | Part>\n ): Promise<GenerateContentStreamResult> {\n await this._sendPromise;\n const newContent = formatNewContent(request);\n const generateContentRequest: GenerateContentRequest = {\n safetySettings: this.params?.safetySettings,\n generationConfig: this.params?.generationConfig,\n tools: this.params?.tools,\n toolConfig: this.params?.toolConfig,\n systemInstruction: this.params?.systemInstruction,\n contents: [...this._history, newContent]\n };\n const streamPromise = generateContentStream(\n this._apiSettings,\n this.model,\n generateContentRequest,\n this.chromeAdapter,\n this.requestOptions\n );\n\n // Add onto the chain.\n this._sendPromise = this._sendPromise\n .then(() => streamPromise)\n // This must be handled to avoid unhandled rejection, but jump\n // to the final catch block with a label to not log this error.\n .catch(_ignored => {\n throw new Error(SILENT_ERROR);\n })\n .then(streamResult => streamResult.response)\n .then(response => {\n if (response.candidates && response.candidates.length > 0) {\n this._history.push(newContent);\n const responseContent = { ...response.candidates[0].content };\n // Response seems to come back without a role set.\n if (!responseContent.role) {\n responseContent.role = 'model';\n }\n this._history.push(responseContent);\n } else {\n const blockErrorMessage = formatBlockErrorMessage(response);\n if (blockErrorMessage) {\n logger.warn(\n `sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`\n );\n }\n }\n })\n .catch(e => {\n // Errors in streamPromise are already catchable by the user as\n // streamPromise is returned.\n // Avoid duplicating the error message in logs.\n if (e.message !== SILENT_ERROR) {\n // Users do not have access to _sendPromise to catch errors\n // downstream from streamPromise, so they should not throw.\n logger.error(e);\n }\n });\n return streamPromise;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport {\n CountTokensRequest,\n CountTokensResponse,\n InferenceMode,\n RequestOptions,\n AIErrorCode\n} from '../types';\nimport { Task, makeRequest } from '../requests/request';\nimport { ApiSettings } from '../types/internal';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { BackendType } from '../public-types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport { ChromeAdapterImpl } from './chrome-adapter';\n\nexport async function countTokensOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: CountTokensRequest,\n requestOptions?: RequestOptions\n): Promise<CountTokensResponse> {\n let body: string = '';\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n const mappedParams = GoogleAIMapper.mapCountTokensRequest(params, model);\n body = JSON.stringify(mappedParams);\n } else {\n body = JSON.stringify(params);\n }\n const response = await makeRequest(\n model,\n Task.COUNT_TOKENS,\n apiSettings,\n false,\n body,\n requestOptions\n );\n return response.json();\n}\n\nexport async function countTokens(\n apiSettings: ApiSettings,\n model: string,\n params: CountTokensRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<CountTokensResponse> {\n if (\n (chromeAdapter as ChromeAdapterImpl)?.mode === InferenceMode.ONLY_ON_DEVICE\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'countTokens() is not supported for on-device models.'\n );\n }\n return countTokensOnCloud(apiSettings, model, params, requestOptions);\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n generateContent,\n generateContentStream\n} from '../methods/generate-content';\nimport {\n Content,\n CountTokensRequest,\n CountTokensResponse,\n GenerateContentRequest,\n GenerateContentResult,\n GenerateContentStreamResult,\n GenerationConfig,\n ModelParams,\n Part,\n RequestOptions,\n SafetySetting,\n StartChatParams,\n Tool,\n ToolConfig\n} from '../types';\nimport { ChatSession } from '../methods/chat-session';\nimport { countTokens } from '../methods/count-tokens';\nimport {\n formatGenerateContentInput,\n formatSystemInstruction\n} from '../requests/request-helpers';\nimport { AI } from '../public-types';\nimport { AIModel } from './ai-model';\nimport { ChromeAdapter } from '../types/chrome-adapter';\n\n/**\n * Class for generative model APIs.\n * @public\n */\nexport class GenerativeModel extends AIModel {\n generationConfig: GenerationConfig;\n safetySettings: SafetySetting[];\n requestOptions?: RequestOptions;\n tools?: Tool[];\n toolConfig?: ToolConfig;\n systemInstruction?: Content;\n\n constructor(\n ai: AI,\n modelParams: ModelParams,\n requestOptions?: RequestOptions,\n private chromeAdapter?: ChromeAdapter\n ) {\n super(ai, modelParams.model);\n this.generationConfig = modelParams.generationConfig || {};\n this.safetySettings = modelParams.safetySettings || [];\n this.tools = modelParams.tools;\n this.toolConfig = modelParams.toolConfig;\n this.systemInstruction = formatSystemInstruction(\n modelParams.systemInstruction\n );\n this.requestOptions = requestOptions || {};\n }\n\n /**\n * Makes a single non-streaming call to the model\n * and returns an object containing a single {@link GenerateContentResponse}.\n */\n async generateContent(\n request: GenerateContentRequest | string | Array<string | Part>\n ): Promise<GenerateContentResult> {\n const formattedParams = formatGenerateContentInput(request);\n return generateContent(\n this._apiSettings,\n this.model,\n {\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n ...formattedParams\n },\n this.chromeAdapter,\n this.requestOptions\n );\n }\n\n /**\n * Makes a single streaming call to the model\n * and returns an object containing an iterable stream that iterates\n * over all chunks in the streaming response as well as\n * a promise that returns the final aggregated response.\n */\n async generateContentStream(\n request: GenerateContentRequest | string | Array<string | Part>\n ): Promise<GenerateContentStreamResult> {\n const formattedParams = formatGenerateContentInput(request);\n return generateContentStream(\n this._apiSettings,\n this.model,\n {\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n ...formattedParams\n },\n this.chromeAdapter,\n this.requestOptions\n );\n }\n\n /**\n * Gets a new {@link ChatSession} instance which can be used for\n * multi-turn chats.\n */\n startChat(startChatParams?: StartChatParams): ChatSession {\n return new ChatSession(\n this._apiSettings,\n this.model,\n this.chromeAdapter,\n {\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n /**\n * Overrides params inherited from GenerativeModel with those explicitly set in the\n * StartChatParams. For example, if startChatParams.generationConfig is set, it'll override\n * this.generationConfig.\n */\n ...startChatParams\n },\n this.requestOptions\n );\n }\n\n /**\n * Counts the tokens in the provided request.\n */\n async countTokens(\n request: CountTokensRequest | string | Array<string | Part>\n ): Promise<CountTokensResponse> {\n const formattedParams = formatGenerateContentInput(request);\n return countTokens(\n this._apiSettings,\n this.model,\n formattedParams,\n this.chromeAdapter\n );\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n AIErrorCode,\n FunctionResponse,\n GenerativeContentBlob,\n LiveResponseType,\n LiveServerContent,\n LiveServerToolCall,\n LiveServerToolCallCancellation,\n Part\n} from '../public-types';\nimport { formatNewContent } from '../requests/request-helpers';\nimport { AIError } from '../errors';\nimport { WebSocketHandler } from '../websocket';\nimport { logger } from '../logger';\nimport {\n _LiveClientContent,\n _LiveClientRealtimeInput,\n _LiveClientToolResponse\n} from '../types/live-responses';\n\n/**\n * Represents an active, real-time, bidirectional conversation with the model.\n *\n * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}.\n *\n * @beta\n */\nexport class LiveSession {\n /**\n * Indicates whether this Live session is closed.\n *\n * @beta\n */\n isClosed = false;\n /**\n * Indicates whether this Live session is being controlled by an `AudioConversationController`.\n *\n * @beta\n */\n inConversation = false;\n\n /**\n * @internal\n */\n constructor(\n private webSocketHandler: WebSocketHandler,\n private serverMessages: AsyncGenerator<unknown>\n ) {}\n\n /**\n * Sends content to the server.\n *\n * @param request - The message to send to the model.\n * @param turnComplete - Indicates if the turn is complete. Defaults to false.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async send(\n request: string | Array<string | Part>,\n turnComplete = true\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const newContent = formatNewContent(request);\n\n const message: _LiveClientContent = {\n clientContent: {\n turns: [newContent],\n turnComplete\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends text to the server in realtime.\n *\n * @example\n * ```javascript\n * liveSession.sendTextRealtime(\"Hello, how are you?\");\n * ```\n *\n * @param text - The text data to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendTextRealtime(text: string): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n text\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends audio data to the server in realtime.\n *\n * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz\n * little-endian.\n *\n * @example\n * ```javascript\n * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian.\n * const blob = { mimeType: \"audio/pcm\", data: pcmData };\n * liveSession.sendAudioRealtime(blob);\n * ```\n *\n * @param blob - The base64-encoded PCM data to send to the server in realtime.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendAudioRealtime(blob: GenerativeContentBlob): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n audio: blob\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends video data to the server in realtime.\n *\n * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It\n * is recommended to set `mimeType` to `image/jpeg`.\n *\n * @example\n * ```javascript\n * // const videoFrame = ... base64-encoded JPEG data\n * const blob = { mimeType: \"image/jpeg\", data: videoFrame };\n * liveSession.sendVideoRealtime(blob);\n * ```\n * @param blob - The base64-encoded video data to send to the server in realtime.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendVideoRealtime(blob: GenerativeContentBlob): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n video: blob\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends function responses to the server.\n *\n * @param functionResponses - The function responses to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendFunctionResponses(\n functionResponses: FunctionResponse[]\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientToolResponse = {\n toolResponse: {\n functionResponses\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Yields messages received from the server.\n * This can only be used by one consumer at a time.\n *\n * @returns An `AsyncGenerator` that yields server messages as they arrive.\n * @throws If the session is already closed, or if we receive a response that we don't support.\n *\n * @beta\n */\n async *receive(): AsyncGenerator<\n LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation\n > {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.SESSION_CLOSED,\n 'Cannot read from a Live session that is closed. Try starting a new Live session.'\n );\n }\n for await (const message of this.serverMessages) {\n if (message && typeof message === 'object') {\n if (LiveResponseType.SERVER_CONTENT in message) {\n yield {\n type: 'serverContent',\n ...(message as { serverContent: Omit<LiveServerContent, 'type'> })\n .serverContent\n } as LiveServerContent;\n } else if (LiveResponseType.TOOL_CALL in message) {\n yield {\n type: 'toolCall',\n ...(message as { toolCall: Omit<LiveServerToolCall, 'type'> })\n .toolCall\n } as LiveServerToolCall;\n } else if (LiveResponseType.TOOL_CALL_CANCELLATION in message) {\n yield {\n type: 'toolCallCancellation',\n ...(\n message as {\n toolCallCancellation: Omit<\n LiveServerToolCallCancellation,\n 'type'\n >;\n }\n ).toolCallCancellation\n } as LiveServerToolCallCancellation;\n } else {\n logger.warn(\n `Received an unknown message type from the server: ${JSON.stringify(\n message\n )}`\n );\n }\n } else {\n logger.warn(\n `Received an invalid message from the server: ${JSON.stringify(\n message\n )}`\n );\n }\n }\n }\n\n /**\n * Closes this session.\n * All methods on this session will throw an error once this resolves.\n *\n * @beta\n */\n async close(): Promise<void> {\n if (!this.isClosed) {\n this.isClosed = true;\n await this.webSocketHandler.close(1000, 'Client closed session.');\n }\n }\n\n /**\n * Sends realtime input to the server.\n *\n * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.\n *\n * @param mediaChunks - The media chunks to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n // The backend does not support sending more than one mediaChunk in one message.\n // Work around this limitation by sending mediaChunks in separate messages.\n mediaChunks.forEach(mediaChunk => {\n const message: _LiveClientRealtimeInput = {\n realtimeInput: { mediaChunks: [mediaChunk] }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n });\n }\n\n /**\n * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.\n *\n * Sends a stream of {@link GenerativeContentBlob}.\n *\n * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendMediaStream(\n mediaChunkStream: ReadableStream<GenerativeContentBlob>\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const reader = mediaChunkStream.getReader();\n while (true) {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n break;\n } else if (!value) {\n throw new Error('Missing chunk in reader, but reader is not done.');\n }\n\n await this.sendMediaChunks([value]);\n } catch (e) {\n // Re-throw any errors that occur during stream consumption or sending.\n const message =\n e instanceof Error ? e.message : 'Error processing media stream.';\n throw new AIError(AIErrorCode.REQUEST_ERROR, message);\n }\n }\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIModel } from './ai-model';\nimport { LiveSession } from '../methods/live-session';\nimport { AIError } from '../errors';\nimport {\n AI,\n AIErrorCode,\n BackendType,\n Content,\n LiveGenerationConfig,\n LiveModelParams,\n Tool,\n ToolConfig\n} from '../public-types';\nimport { WebSocketHandler } from '../websocket';\nimport { WebSocketUrl } from '../requests/request';\nimport { formatSystemInstruction } from '../requests/request-helpers';\nimport { _LiveClientSetup } from '../types/live-responses';\n\n/**\n * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal\n * interactions with Gemini.\n *\n * This class should only be instantiated with {@link getLiveGenerativeModel}.\n *\n * @beta\n */\nexport class LiveGenerativeModel extends AIModel {\n generationConfig: LiveGenerationConfig;\n tools?: Tool[];\n toolConfig?: ToolConfig;\n systemInstruction?: Content;\n\n /**\n * @internal\n */\n constructor(\n ai: AI,\n modelParams: LiveModelParams,\n /**\n * @internal\n */\n private _webSocketHandler: WebSocketHandler\n ) {\n super(ai, modelParams.model);\n this.generationConfig = modelParams.generationConfig || {};\n this.tools = modelParams.tools;\n this.toolConfig = modelParams.toolConfig;\n this.systemInstruction = formatSystemInstruction(\n modelParams.systemInstruction\n );\n }\n\n /**\n * Starts a {@link LiveSession}.\n *\n * @returns A {@link LiveSession}.\n * @throws If the connection failed to be established with the server.\n *\n * @beta\n */\n async connect(): Promise<LiveSession> {\n const url = new WebSocketUrl(this._apiSettings);\n await this._webSocketHandler.connect(url.toString());\n\n let fullModelPath: string;\n if (this._apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n fullModelPath = `projects/${this._apiSettings.project}/${this.model}`;\n } else {\n fullModelPath = `projects/${this._apiSettings.project}/locations/${this._apiSettings.location}/${this.model}`;\n }\n\n // inputAudioTranscription and outputAudioTranscription are on the generation config in the public API,\n // but the backend expects them to be in the `setup` message.\n const {\n inputAudioTranscription,\n outputAudioTranscription,\n ...generationConfig\n } = this.generationConfig;\n\n const setupMessage: _LiveClientSetup = {\n setup: {\n model: fullModelPath,\n generationConfig,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n inputAudioTranscription,\n outputAudioTranscription\n }\n };\n\n try {\n // Begin listening for server messages, and begin the handshake by sending the 'setupMessage'\n const serverMessages = this._webSocketHandler.listen();\n this._webSocketHandler.send(JSON.stringify(setupMessage));\n\n // Verify we received the handshake response 'setupComplete'\n const firstMessage = (await serverMessages.next()).value;\n if (\n !firstMessage ||\n !(typeof firstMessage === 'object') ||\n !('setupComplete' in firstMessage)\n ) {\n await this._webSocketHandler.close(1011, 'Handshake failure');\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n 'Server connection handshake failed. The server did not respond with a setupComplete message.'\n );\n }\n\n return new LiveSession(this._webSocketHandler, serverMessages);\n } catch (e) {\n // Ensure connection is closed on any setup error\n await this._webSocketHandler.close();\n throw e;\n }\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AI } from '../public-types';\nimport { Task, makeRequest } from '../requests/request';\nimport { createPredictRequestBody } from '../requests/request-helpers';\nimport { handlePredictResponse } from '../requests/response-helpers';\nimport {\n ImagenGCSImage,\n ImagenGenerationConfig,\n ImagenInlineImage,\n RequestOptions,\n ImagenModelParams,\n ImagenGenerationResponse,\n ImagenSafetySettings\n} from '../types';\nimport { AIModel } from './ai-model';\n\n/**\n * Class for Imagen model APIs.\n *\n * This class provides methods for generating images using the Imagen model.\n *\n * @example\n * ```javascript\n * const imagen = new ImagenModel(\n * ai,\n * {\n * model: 'imagen-3.0-generate-002'\n * }\n * );\n *\n * const response = await imagen.generateImages('A photo of a cat');\n * if (response.images.length > 0) {\n * console.log(response.images[0].bytesBase64Encoded);\n * }\n * ```\n *\n * @public\n */\nexport class ImagenModel extends AIModel {\n /**\n * The Imagen generation configuration.\n */\n generationConfig?: ImagenGenerationConfig;\n /**\n * Safety settings for filtering inappropriate content.\n */\n safetySettings?: ImagenSafetySettings;\n\n /**\n * Constructs a new instance of the {@link ImagenModel} class.\n *\n * @param ai - an {@link AI} instance.\n * @param modelParams - Parameters to use when making requests to Imagen.\n * @param requestOptions - Additional options to use when making requests.\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n */\n constructor(\n ai: AI,\n modelParams: ImagenModelParams,\n public requestOptions?: RequestOptions\n ) {\n const { model, generationConfig, safetySettings } = modelParams;\n super(ai, model);\n this.generationConfig = generationConfig;\n this.safetySettings = safetySettings;\n }\n\n /**\n * Generates images using the Imagen model and returns them as\n * base64-encoded strings.\n *\n * @param prompt - A text prompt describing the image(s) to generate.\n * @returns A promise that resolves to an {@link ImagenGenerationResponse}\n * object containing the generated images.\n *\n * @throws If the request to generate images fails. This happens if the\n * prompt is blocked.\n *\n * @remarks\n * If the prompt was not blocked, but one or more of the generated images were filtered, the\n * returned object will have a `filteredReason` property.\n * If all images are filtered, the `images` array will be empty.\n *\n * @public\n */\n async generateImages(\n prompt: string\n ): Promise<ImagenGenerationResponse<ImagenInlineImage>> {\n const body = createPredictRequestBody(prompt, {\n ...this.generationConfig,\n ...this.safetySettings\n });\n const response = await makeRequest(\n this.model,\n Task.PREDICT,\n this._apiSettings,\n /* stream */ false,\n JSON.stringify(body),\n this.requestOptions\n );\n return handlePredictResponse<ImagenInlineImage>(response);\n }\n\n /**\n * Generates images to Cloud Storage for Firebase using the Imagen model.\n *\n * @internal This method is temporarily internal.\n *\n * @param prompt - A text prompt describing the image(s) to generate.\n * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket.\n * This should be a directory. For example, `gs://my-bucket/my-directory/`.\n * @returns A promise that resolves to an {@link ImagenGenerationResponse}\n * object containing the URLs of the generated images.\n *\n * @throws If the request fails to generate images fails. This happens if\n * the prompt is blocked.\n *\n * @remarks\n * If the prompt was not blocked, but one or more of the generated images were filtered, the\n * returned object will have a `filteredReason` property.\n * If all images are filtered, the `images` array will be empty.\n */\n async generateImagesGCS(\n prompt: string,\n gcsURI: string\n ): Promise<ImagenGenerationResponse<ImagenGCSImage>> {\n const body = createPredictRequestBody(prompt, {\n gcsURI,\n ...this.generationConfig,\n ...this.safetySettings\n });\n const response = await makeRequest(\n this.model,\n Task.PREDICT,\n this._apiSettings,\n /* stream */ false,\n JSON.stringify(body),\n this.requestOptions\n );\n return handlePredictResponse<ImagenGCSImage>(response);\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from './errors';\nimport { logger } from './logger';\nimport { AIErrorCode } from './types';\n\n/**\n * A standardized interface for interacting with a WebSocket connection.\n * This abstraction allows the SDK to use the appropriate WebSocket implementation\n * for the current JS environment (Browser vs. Node) without\n * changing the core logic of the `LiveSession`.\n * @internal\n */\n\nexport interface WebSocketHandler {\n /**\n * Establishes a connection to the given URL.\n *\n * @param url The WebSocket URL (e.g., wss://...).\n * @returns A promise that resolves on successful connection or rejects on failure.\n */\n connect(url: string): Promise<void>;\n\n /**\n * Sends data over the WebSocket.\n *\n * @param data The string or binary data to send.\n */\n send(data: string | ArrayBuffer): void;\n\n /**\n * Returns an async generator that yields parsed JSON objects from the server.\n * The yielded type is `unknown` because the handler cannot guarantee the shape of the data.\n * The consumer is responsible for type validation.\n * The generator terminates when the connection is closed.\n *\n * @returns A generator that allows consumers to pull messages using a `for await...of` loop.\n */\n listen(): AsyncGenerator<unknown>;\n\n /**\n * Closes the WebSocket connection.\n *\n * @param code - A numeric status code explaining why the connection is closing.\n * @param reason - A human-readable string explaining why the connection is closing.\n */\n close(code?: number, reason?: string): Promise<void>;\n}\n\n/**\n * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22.\n *\n * @internal\n */\nexport class WebSocketHandlerImpl implements WebSocketHandler {\n private ws?: WebSocket;\n\n constructor() {\n if (typeof WebSocket === 'undefined') {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'The WebSocket API is not available in this environment. ' +\n 'The \"Live\" feature is not supported here. It is supported in ' +\n 'modern browser windows, Web Workers with WebSocket support, and Node >= 22.'\n );\n }\n }\n\n connect(url: string): Promise<void> {\n return new Promise((resolve, reject) => {\n this.ws = new WebSocket(url);\n this.ws.binaryType = 'blob'; // Only important to set in Node\n this.ws.addEventListener('open', () => resolve(), { once: true });\n this.ws.addEventListener(\n 'error',\n () =>\n reject(\n new AIError(\n AIErrorCode.FETCH_ERROR,\n `Error event raised on WebSocket`\n )\n ),\n { once: true }\n );\n this.ws!.addEventListener('close', (closeEvent: CloseEvent) => {\n if (closeEvent.reason) {\n logger.warn(\n `WebSocket connection closed by server. Reason: '${closeEvent.reason}'`\n );\n }\n });\n });\n }\n\n send(data: string | ArrayBuffer): void {\n if (!this.ws || this.ws.readyState !== WebSocket.OPEN) {\n throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not open.');\n }\n this.ws.send(data);\n }\n\n async *listen(): AsyncGenerator<unknown> {\n if (!this.ws) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'WebSocket is not connected.'\n );\n }\n\n const messageQueue: unknown[] = [];\n const errorQueue: Error[] = [];\n let resolvePromise: (() => void) | null = null;\n let isClosed = false;\n\n const messageListener = async (event: MessageEvent): Promise<void> => {\n let data: string;\n if (event.data instanceof Blob) {\n data = await event.data.text();\n } else if (typeof event.data === 'string') {\n data = event.data;\n } else {\n errorQueue.push(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Failed to parse WebSocket response. Expected data to be a Blob or string, but was ${typeof event.data}.`\n )\n );\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n return;\n }\n\n try {\n const obj = JSON.parse(data) as unknown;\n messageQueue.push(obj);\n } catch (e) {\n const err = e as Error;\n errorQueue.push(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Error parsing WebSocket message to JSON: ${err.message}`\n )\n );\n }\n\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n };\n\n const errorListener = (): void => {\n errorQueue.push(\n new AIError(AIErrorCode.FETCH_ERROR, 'WebSocket connection error.')\n );\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n };\n\n const closeListener = (event: CloseEvent): void => {\n if (event.reason) {\n logger.warn(\n `WebSocket connection closed by the server with reason: ${event.reason}`\n );\n }\n isClosed = true;\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n // Clean up listeners to prevent memory leaks\n this.ws?.removeEventListener('message', messageListener);\n this.ws?.removeEventListener('close', closeListener);\n this.ws?.removeEventListener('error', errorListener);\n };\n\n this.ws.addEventListener('message', messageListener);\n this.ws.addEventListener('close', closeListener);\n this.ws.addEventListener('error', errorListener);\n\n while (!isClosed) {\n if (errorQueue.length > 0) {\n const error = errorQueue.shift()!;\n throw error;\n }\n if (messageQueue.length > 0) {\n yield messageQueue.shift()!;\n } else {\n await new Promise<void>(resolve => {\n resolvePromise = resolve;\n });\n }\n }\n\n // If the loop terminated because isClosed is true, check for any final errors\n if (errorQueue.length > 0) {\n const error = errorQueue.shift()!;\n throw error;\n }\n }\n\n close(code?: number, reason?: string): Promise<void> {\n return new Promise(resolve => {\n if (!this.ws) {\n return resolve();\n }\n\n this.ws.addEventListener('close', () => resolve(), { once: true });\n // Calling 'close' during these states results in an error.\n if (\n this.ws.readyState === WebSocket.CLOSED ||\n this.ws.readyState === WebSocket.CONNECTING\n ) {\n return resolve();\n }\n\n if (this.ws.readyState !== WebSocket.CLOSING) {\n this.ws.close(code, reason);\n }\n });\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { AIErrorCode } from '../types';\nimport {\n SchemaInterface,\n SchemaType,\n SchemaParams,\n SchemaRequest\n} from '../types/schema';\n\n/**\n * Parent class encompassing all Schema types, with static methods that\n * allow building specific Schema types. This class can be converted with\n * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints.\n * (This string conversion is automatically done when calling SDK methods.)\n * @public\n */\nexport abstract class Schema implements SchemaInterface {\n /**\n * Optional. The type of the property.\n * This can only be undefined when using `anyOf` schemas, which do not have an\n * explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification}.\n */\n type?: SchemaType;\n /** Optional. The format of the property.\n * Supported formats:<br/>\n * <ul>\n * <li>for NUMBER type: \"float\", \"double\"</li>\n * <li>for INTEGER type: \"int32\", \"int64\"</li>\n * <li>for STRING type: \"email\", \"byte\", etc</li>\n * </ul>\n */\n format?: string;\n /** Optional. The description of the property. */\n description?: string;\n /** Optional. The items of the property. */\n items?: SchemaInterface;\n /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n minItems?: number;\n /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n maxItems?: number;\n /** Optional. Whether the property is nullable. Defaults to false. */\n nullable: boolean;\n /** Optional. The example of the property. */\n example?: unknown;\n /**\n * Allows user to add other schema properties that have not yet\n * been officially added to the SDK.\n */\n [key: string]: unknown;\n\n constructor(schemaParams: SchemaInterface) {\n // TODO(dlarocque): Enforce this with union types\n if (!schemaParams.type && !schemaParams.anyOf) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n \"A schema must have either a 'type' or an 'anyOf' array of sub-schemas.\"\n );\n }\n // eslint-disable-next-line guard-for-in\n for (const paramKey in schemaParams) {\n this[paramKey] = schemaParams[paramKey];\n }\n // Ensure these are explicitly set to avoid TS errors.\n this.type = schemaParams.type;\n this.format = schemaParams.hasOwnProperty('format')\n ? schemaParams.format\n : undefined;\n this.nullable = schemaParams.hasOwnProperty('nullable')\n ? !!schemaParams.nullable\n : false;\n }\n\n /**\n * Defines how this Schema should be serialized as JSON.\n * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj: { type?: SchemaType; [key: string]: unknown } = {\n type: this.type\n };\n for (const prop in this) {\n if (this.hasOwnProperty(prop) && this[prop] !== undefined) {\n if (prop !== 'required' || this.type === SchemaType.OBJECT) {\n obj[prop] = this[prop];\n }\n }\n }\n return obj as SchemaRequest;\n }\n\n static array(arrayParams: SchemaParams & { items: Schema }): ArraySchema {\n return new ArraySchema(arrayParams, arrayParams.items);\n }\n\n static object(\n objectParams: SchemaParams & {\n properties: {\n [k: string]: Schema;\n };\n optionalProperties?: string[];\n }\n ): ObjectSchema {\n return new ObjectSchema(\n objectParams,\n objectParams.properties,\n objectParams.optionalProperties\n );\n }\n\n // eslint-disable-next-line id-blacklist\n static string(stringParams?: SchemaParams): StringSchema {\n return new StringSchema(stringParams);\n }\n\n static enumString(\n stringParams: SchemaParams & { enum: string[] }\n ): StringSchema {\n return new StringSchema(stringParams, stringParams.enum);\n }\n\n static integer(integerParams?: SchemaParams): IntegerSchema {\n return new IntegerSchema(integerParams);\n }\n\n // eslint-disable-next-line id-blacklist\n static number(numberParams?: SchemaParams): NumberSchema {\n return new NumberSchema(numberParams);\n }\n\n // eslint-disable-next-line id-blacklist\n static boolean(booleanParams?: SchemaParams): BooleanSchema {\n return new BooleanSchema(booleanParams);\n }\n\n static anyOf(\n anyOfParams: SchemaParams & { anyOf: TypedSchema[] }\n ): AnyOfSchema {\n return new AnyOfSchema(anyOfParams);\n }\n}\n\n/**\n * A type that includes all specific Schema types.\n * @public\n */\nexport type TypedSchema =\n | IntegerSchema\n | NumberSchema\n | StringSchema\n | BooleanSchema\n | ObjectSchema\n | ArraySchema\n | AnyOfSchema;\n\n/**\n * Schema class for \"integer\" types.\n * @public\n */\nexport class IntegerSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.INTEGER,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"number\" types.\n * @public\n */\nexport class NumberSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.NUMBER,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"boolean\" types.\n * @public\n */\nexport class BooleanSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.BOOLEAN,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"string\" types. Can be used with or without\n * enum values.\n * @public\n */\nexport class StringSchema extends Schema {\n enum?: string[];\n constructor(schemaParams?: SchemaParams, enumValues?: string[]) {\n super({\n type: SchemaType.STRING,\n ...schemaParams\n });\n this.enum = enumValues;\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n if (this.enum) {\n obj['enum'] = this.enum;\n }\n return obj as SchemaRequest;\n }\n}\n\n/**\n * Schema class for \"array\" types.\n * The `items` param should refer to the type of item that can be a member\n * of the array.\n * @public\n */\nexport class ArraySchema extends Schema {\n constructor(schemaParams: SchemaParams, public items: TypedSchema) {\n super({\n type: SchemaType.ARRAY,\n ...schemaParams\n });\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n obj.items = this.items.toJSON();\n return obj;\n }\n}\n\n/**\n * Schema class for \"object\" types.\n * The `properties` param must be a map of `Schema` objects.\n * @public\n */\nexport class ObjectSchema extends Schema {\n constructor(\n schemaParams: SchemaParams,\n public properties: {\n [k: string]: TypedSchema;\n },\n public optionalProperties: string[] = []\n ) {\n super({\n type: SchemaType.OBJECT,\n ...schemaParams\n });\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n obj.properties = { ...this.properties };\n const required = [];\n if (this.optionalProperties) {\n for (const propertyKey of this.optionalProperties) {\n if (!this.properties.hasOwnProperty(propertyKey)) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n `Property \"${propertyKey}\" specified in \"optionalProperties\" does not exist.`\n );\n }\n }\n }\n for (const propertyKey in this.properties) {\n if (this.properties.hasOwnProperty(propertyKey)) {\n obj.properties[propertyKey] = this.properties[\n propertyKey\n ].toJSON() as SchemaRequest;\n if (!this.optionalProperties.includes(propertyKey)) {\n required.push(propertyKey);\n }\n }\n }\n if (required.length > 0) {\n obj.required = required;\n }\n delete obj.optionalProperties;\n return obj as SchemaRequest;\n }\n}\n\n/**\n * Schema class representing a value that can conform to any of the provided sub-schemas. This is\n * useful when a field can accept multiple distinct types or structures.\n * @public\n */\nexport class AnyOfSchema extends Schema {\n anyOf: TypedSchema[]; // Re-define field to narrow to required type\n constructor(schemaParams: SchemaParams & { anyOf: TypedSchema[] }) {\n if (schemaParams.anyOf.length === 0) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n \"The 'anyOf' array must not be empty.\"\n );\n }\n super({\n ...schemaParams,\n type: undefined // anyOf schemas do not have an explicit type\n });\n this.anyOf = schemaParams.anyOf;\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n // Ensure the 'anyOf' property contains serialized SchemaRequest objects.\n if (this.anyOf && Array.isArray(this.anyOf)) {\n obj.anyOf = (this.anyOf as TypedSchema[]).map(s => s.toJSON());\n }\n return obj;\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { logger } from '../logger';\n\n/**\n * Defines the image format for images generated by Imagen.\n *\n * Use this class to specify the desired format (JPEG or PNG) and compression quality\n * for images generated by Imagen. This is typically included as part of\n * {@link ImagenModelParams}.\n *\n * @example\n * ```javascript\n * const imagenModelParams = {\n * // ... other ImagenModelParams\n * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75.\n * }\n * ```\n *\n * @public\n */\nexport class ImagenImageFormat {\n /**\n * The MIME type.\n */\n mimeType: string;\n /**\n * The level of compression (a number between 0 and 100).\n */\n compressionQuality?: number;\n\n private constructor() {\n this.mimeType = 'image/png';\n }\n\n /**\n * Creates an {@link ImagenImageFormat} for a JPEG image.\n *\n * @param compressionQuality - The level of compression (a number between 0 and 100).\n * @returns An {@link ImagenImageFormat} object for a JPEG image.\n *\n * @public\n */\n static jpeg(compressionQuality?: number): ImagenImageFormat {\n if (\n compressionQuality &&\n (compressionQuality < 0 || compressionQuality > 100)\n ) {\n logger.warn(\n `Invalid JPEG compression quality of ${compressionQuality} specified; the supported range is [0, 100].`\n );\n }\n return { mimeType: 'image/jpeg', compressionQuality };\n }\n\n /**\n * Creates an {@link ImagenImageFormat} for a PNG image.\n *\n * @returns An {@link ImagenImageFormat} object for a PNG image.\n *\n * @public\n */\n static png(): ImagenImageFormat {\n return { mimeType: 'image/png' };\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport {\n AIErrorCode,\n FunctionCall,\n FunctionResponse,\n GenerativeContentBlob,\n LiveServerContent\n} from '../types';\nimport { LiveSession } from './live-session';\nimport { Deferred } from '@firebase/util';\n\nconst SERVER_INPUT_SAMPLE_RATE = 16_000;\nconst SERVER_OUTPUT_SAMPLE_RATE = 24_000;\n\nconst AUDIO_PROCESSOR_NAME = 'audio-processor';\n\n/**\n * The JS for an `AudioWorkletProcessor`.\n * This processor is responsible for taking raw audio from the microphone,\n * converting it to the required 16-bit 16kHz PCM, and posting it back to the main thread.\n *\n * See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor\n *\n * It is defined as a string here so that it can be converted into a `Blob`\n * and loaded at runtime.\n */\nconst audioProcessorWorkletString = `\n class AudioProcessor extends AudioWorkletProcessor {\n constructor(options) {\n super();\n this.targetSampleRate = options.processorOptions.targetSampleRate;\n // 'sampleRate' is a global variable available inside the AudioWorkletGlobalScope,\n // representing the native sample rate of the AudioContext.\n this.inputSampleRate = sampleRate;\n }\n\n /**\n * This method is called by the browser's audio engine for each block of audio data.\n * Input is a single input, with a single channel (input[0][0]).\n */\n process(inputs) {\n const input = inputs[0];\n if (input && input.length > 0 && input[0].length > 0) {\n const pcmData = input[0]; // Float32Array of raw audio samples.\n \n // Simple linear interpolation for resampling.\n const resampled = new Float32Array(Math.round(pcmData.length * this.targetSampleRate / this.inputSampleRate));\n const ratio = pcmData.length / resampled.length;\n for (let i = 0; i < resampled.length; i++) {\n resampled[i] = pcmData[Math.floor(i * ratio)];\n }\n\n // Convert Float32 (-1, 1) samples to Int16 (-32768, 32767)\n const resampledInt16 = new Int16Array(resampled.length);\n for (let i = 0; i < resampled.length; i++) {\n const sample = Math.max(-1, Math.min(1, resampled[i]));\n if (sample < 0) {\n resampledInt16[i] = sample * 32768;\n } else {\n resampledInt16[i] = sample * 32767;\n }\n }\n \n this.port.postMessage(resampledInt16);\n }\n // Return true to keep the processor alive and processing the next audio block.\n return true;\n }\n }\n\n // Register the processor with a name that can be used to instantiate it from the main thread.\n registerProcessor('${AUDIO_PROCESSOR_NAME}', AudioProcessor);\n`;\n\n/**\n * A controller for managing an active audio conversation.\n *\n * @beta\n */\nexport interface AudioConversationController {\n /**\n * Stops the audio conversation, closes the microphone connection, and\n * cleans up resources. Returns a promise that resolves when cleanup is complete.\n */\n stop: () => Promise<void>;\n}\n\n/**\n * Options for {@link startAudioConversation}.\n *\n * @beta\n */\nexport interface StartAudioConversationOptions {\n /**\n * An async handler that is called when the model requests a function to be executed.\n * The handler should perform the function call and return the result as a `Part`,\n * which will then be sent back to the model.\n */\n functionCallingHandler?: (\n functionCalls: FunctionCall[]\n ) => Promise<FunctionResponse>;\n}\n\n/**\n * Dependencies needed by the {@link AudioConversationRunner}.\n *\n * @internal\n */\ninterface RunnerDependencies {\n audioContext: AudioContext;\n mediaStream: MediaStream;\n sourceNode: MediaStreamAudioSourceNode;\n workletNode: AudioWorkletNode;\n}\n\n/**\n * Encapsulates the core logic of an audio conversation.\n *\n * @internal\n */\nexport class AudioConversationRunner {\n /** A flag to indicate if the conversation has been stopped. */\n private isStopped = false;\n /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */\n private readonly stopDeferred = new Deferred<void>();\n /** A promise that tracks the lifecycle of the main `runReceiveLoop`. */\n private readonly receiveLoopPromise: Promise<void>;\n\n /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */\n private readonly playbackQueue: ArrayBuffer[] = [];\n /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */\n private scheduledSources: AudioBufferSourceNode[] = [];\n /** A high-precision timeline pointer for scheduling gapless audio playback. */\n private nextStartTime = 0;\n /** A mutex to prevent the playback processing loop from running multiple times concurrently. */\n private isPlaybackLoopRunning = false;\n\n constructor(\n private readonly liveSession: LiveSession,\n private readonly options: StartAudioConversationOptions,\n private readonly deps: RunnerDependencies\n ) {\n this.liveSession.inConversation = true;\n\n // Start listening for messages from the server.\n this.receiveLoopPromise = this.runReceiveLoop().finally(() =>\n this.cleanup()\n );\n\n // Set up the handler for receiving processed audio data from the worklet.\n // Message data has been resampled to 16kHz 16-bit PCM.\n this.deps.workletNode.port.onmessage = event => {\n if (this.isStopped) {\n return;\n }\n\n const pcm16 = event.data as Int16Array;\n const base64 = btoa(\n String.fromCharCode.apply(\n null,\n Array.from(new Uint8Array(pcm16.buffer))\n )\n );\n\n const chunk: GenerativeContentBlob = {\n mimeType: 'audio/pcm',\n data: base64\n };\n void this.liveSession.sendAudioRealtime(chunk);\n };\n }\n\n /**\n * Stops the conversation and unblocks the main receive loop.\n */\n async stop(): Promise<void> {\n if (this.isStopped) {\n return;\n }\n this.isStopped = true;\n this.stopDeferred.resolve(); // Unblock the receive loop\n await this.receiveLoopPromise; // Wait for the loop and cleanup to finish\n }\n\n /**\n * Cleans up all audio resources (nodes, stream tracks, context) and marks the\n * session as no longer in a conversation.\n */\n private cleanup(): void {\n this.interruptPlayback(); // Ensure all audio is stopped on final cleanup.\n this.deps.workletNode.port.onmessage = null;\n this.deps.workletNode.disconnect();\n this.deps.sourceNode.disconnect();\n this.deps.mediaStream.getTracks().forEach(track => track.stop());\n if (this.deps.audioContext.state !== 'closed') {\n void this.deps.audioContext.close();\n }\n this.liveSession.inConversation = false;\n }\n\n /**\n * Adds audio data to the queue and ensures the playback loop is running.\n */\n private enqueueAndPlay(audioData: ArrayBuffer): void {\n this.playbackQueue.push(audioData);\n // Will no-op if it's already running.\n void this.processPlaybackQueue();\n }\n\n /**\n * Stops all current and pending audio playback and clears the queue. This is\n * called when the server indicates the model's speech was interrupted with\n * `LiveServerContent.modelTurn.interrupted`.\n */\n private interruptPlayback(): void {\n // Stop all sources that have been scheduled. The onended event will fire for each,\n // which will clean up the scheduledSources array.\n [...this.scheduledSources].forEach(source => source.stop(0));\n\n // Clear the internal buffer of unprocessed audio chunks.\n this.playbackQueue.length = 0;\n\n // Reset the playback clock to start fresh.\n this.nextStartTime = this.deps.audioContext.currentTime;\n }\n\n /**\n * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence.\n */\n private async processPlaybackQueue(): Promise<void> {\n if (this.isPlaybackLoopRunning) {\n return;\n }\n this.isPlaybackLoopRunning = true;\n\n while (this.playbackQueue.length > 0 && !this.isStopped) {\n const pcmRawBuffer = this.playbackQueue.shift()!;\n try {\n const pcm16 = new Int16Array(pcmRawBuffer);\n const frameCount = pcm16.length;\n\n const audioBuffer = this.deps.audioContext.createBuffer(\n 1,\n frameCount,\n SERVER_OUTPUT_SAMPLE_RATE\n );\n\n // Convert 16-bit PCM to 32-bit PCM, required by the Web Audio API.\n const channelData = audioBuffer.getChannelData(0);\n for (let i = 0; i < frameCount; i++) {\n channelData[i] = pcm16[i] / 32768; // Normalize to Float32 range [-1.0, 1.0]\n }\n\n const source = this.deps.audioContext.createBufferSource();\n source.buffer = audioBuffer;\n source.connect(this.deps.audioContext.destination);\n\n // Track the source and set up a handler to remove it from tracking when it finishes.\n this.scheduledSources.push(source);\n source.onended = () => {\n this.scheduledSources = this.scheduledSources.filter(\n s => s !== source\n );\n };\n\n // To prevent gaps, schedule the next chunk to start either now (if we're catching up)\n // or exactly when the previous chunk is scheduled to end.\n this.nextStartTime = Math.max(\n this.deps.audioContext.currentTime,\n this.nextStartTime\n );\n source.start(this.nextStartTime);\n\n // Update the schedule for the *next* chunk.\n this.nextStartTime += audioBuffer.duration;\n } catch (e) {\n logger.error('Error playing audio:', e);\n }\n }\n\n this.isPlaybackLoopRunning = false;\n }\n\n /**\n * The main loop that listens for and processes messages from the server.\n */\n private async runReceiveLoop(): Promise<void> {\n const messageGenerator = this.liveSession.receive();\n while (!this.isStopped) {\n const result = await Promise.race([\n messageGenerator.next(),\n this.stopDeferred.promise\n ]);\n\n if (this.isStopped || !result || result.done) {\n break;\n }\n\n const message = result.value;\n if (message.type === 'serverContent') {\n const serverContent = message as LiveServerContent;\n if (serverContent.interrupted) {\n this.interruptPlayback();\n }\n\n const audioPart = serverContent.modelTurn?.parts.find(part =>\n part.inlineData?.mimeType.startsWith('audio/')\n );\n if (audioPart?.inlineData) {\n const audioData = Uint8Array.from(\n atob(audioPart.inlineData.data),\n c => c.charCodeAt(0)\n ).buffer;\n this.enqueueAndPlay(audioData);\n }\n } else if (message.type === 'toolCall') {\n if (!this.options.functionCallingHandler) {\n logger.warn(\n 'Received tool call message, but StartAudioConversationOptions.functionCallingHandler is undefined. Ignoring tool call.'\n );\n } else {\n try {\n const functionResponse = await this.options.functionCallingHandler(\n message.functionCalls\n );\n if (!this.isStopped) {\n void this.liveSession.sendFunctionResponses([functionResponse]);\n }\n } catch (e) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Function calling handler failed: ${(e as Error).message}`\n );\n }\n }\n }\n }\n }\n}\n\n/**\n * Starts a real-time, bidirectional audio conversation with the model. This helper function manages\n * the complexities of microphone access, audio recording, playback, and interruptions.\n *\n * @remarks Important: This function must be called in response to a user gesture\n * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}.\n *\n * @example\n * ```javascript\n * const liveSession = await model.connect();\n * let conversationController;\n *\n * // This function must be called from within a click handler.\n * async function startConversation() {\n * try {\n * conversationController = await startAudioConversation(liveSession);\n * } catch (e) {\n * // Handle AI-specific errors\n * if (e instanceof AIError) {\n * console.error(\"AI Error:\", e.message);\n * }\n * // Handle microphone permission and hardware errors\n * else if (e instanceof DOMException) {\n * console.error(\"Microphone Error:\", e.message);\n * }\n * // Handle other unexpected errors\n * else {\n * console.error(\"An unexpected error occurred:\", e);\n * }\n * }\n * }\n *\n * // Later, to stop the conversation:\n * // if (conversationController) {\n * // await conversationController.stop();\n * // }\n * ```\n *\n * @param liveSession - An active {@link LiveSession} instance.\n * @param options - Configuration options for the audio conversation.\n * @returns A `Promise` that resolves with an {@link AudioConversationController}.\n * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`).\n * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions.\n *\n * @beta\n */\nexport async function startAudioConversation(\n liveSession: LiveSession,\n options: StartAudioConversationOptions = {}\n): Promise<AudioConversationController> {\n if (liveSession.isClosed) {\n throw new AIError(\n AIErrorCode.SESSION_CLOSED,\n 'Cannot start audio conversation on a closed LiveSession.'\n );\n }\n\n if (liveSession.inConversation) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'An audio conversation is already in progress for this session.'\n );\n }\n\n // Check for necessary Web API support.\n if (\n typeof AudioWorkletNode === 'undefined' ||\n typeof AudioContext === 'undefined' ||\n typeof navigator === 'undefined' ||\n !navigator.mediaDevices\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Audio conversation is not supported in this environment. It requires the Web Audio API and AudioWorklet support.'\n );\n }\n\n let audioContext: AudioContext | undefined;\n try {\n // 1. Set up the audio context. This must be in response to a user gesture.\n // See: https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy\n audioContext = new AudioContext();\n if (audioContext.state === 'suspended') {\n await audioContext.resume();\n }\n\n // 2. Prompt for microphone access and get the media stream.\n // This can throw a variety of permission or hardware-related errors.\n const mediaStream = await navigator.mediaDevices.getUserMedia({\n audio: true\n });\n\n // 3. Load the AudioWorklet processor.\n // See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet\n const workletBlob = new Blob([audioProcessorWorkletString], {\n type: 'application/javascript'\n });\n const workletURL = URL.createObjectURL(workletBlob);\n await audioContext.audioWorklet.addModule(workletURL);\n\n // 4. Create the audio graph: Microphone -> Source Node -> Worklet Node\n const sourceNode = audioContext.createMediaStreamSource(mediaStream);\n const workletNode = new AudioWorkletNode(\n audioContext,\n AUDIO_PROCESSOR_NAME,\n {\n processorOptions: { targetSampleRate: SERVER_INPUT_SAMPLE_RATE }\n }\n );\n sourceNode.connect(workletNode);\n\n // 5. Instantiate and return the runner which manages the conversation.\n const runner = new AudioConversationRunner(liveSession, options, {\n audioContext,\n mediaStream,\n sourceNode,\n workletNode\n });\n\n return { stop: () => runner.stop() };\n } catch (e) {\n // Ensure the audio context is closed on any setup error.\n if (audioContext && audioContext.state !== 'closed') {\n void audioContext.close();\n }\n\n // Re-throw specific, known error types directly. The user may want to handle `DOMException`\n // errors differently (for example, if permission to access audio device was denied).\n if (e instanceof AIError || e instanceof DOMException) {\n throw e;\n }\n\n // Wrap any other unexpected errors in a standard AIError.\n throw new AIError(\n AIErrorCode.ERROR,\n `Failed to initialize audio recording: ${(e as Error).message}`\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp, getApp, _getProvider } from '@firebase/app';\nimport { Provider } from '@firebase/component';\nimport { getModularInstance } from '@firebase/util';\nimport { AI_TYPE, DEFAULT_HYBRID_IN_CLOUD_MODEL } from './constants';\nimport { AIService } from './service';\nimport { AI, AIOptions } from './public-types';\nimport {\n ImagenModelParams,\n HybridParams,\n ModelParams,\n RequestOptions,\n AIErrorCode,\n LiveModelParams\n} from './types';\nimport { AIError } from './errors';\nimport {\n AIModel,\n GenerativeModel,\n LiveGenerativeModel,\n ImagenModel\n} from './models';\nimport { encodeInstanceIdentifier } from './helpers';\nimport { GoogleAIBackend } from './backend';\nimport { WebSocketHandlerImpl } from './websocket';\n\nexport { ChatSession } from './methods/chat-session';\nexport { LiveSession } from './methods/live-session';\nexport * from './requests/schema-builder';\nexport { ImagenImageFormat } from './requests/imagen-image-format';\nexport { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel, AIError };\nexport { Backend, VertexAIBackend, GoogleAIBackend } from './backend';\nexport {\n startAudioConversation,\n AudioConversationController,\n StartAudioConversationOptions\n} from './methods/live-session-helpers';\n\ndeclare module '@firebase/component' {\n interface NameServiceMapping {\n [AI_TYPE]: AIService;\n }\n}\n\n/**\n * Returns the default {@link AI} instance that is associated with the provided\n * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the\n * default settings.\n *\n * @example\n * ```javascript\n * const ai = getAI(app);\n * ```\n *\n * @example\n * ```javascript\n * // Get an AI instance configured to use the Gemini Developer API (via Google AI).\n * const ai = getAI(app, { backend: new GoogleAIBackend() });\n * ```\n *\n * @example\n * ```javascript\n * // Get an AI instance configured to use the Vertex AI Gemini API.\n * const ai = getAI(app, { backend: new VertexAIBackend() });\n * ```\n *\n * @param app - The {@link @firebase/app#FirebaseApp} to use.\n * @param options - {@link AIOptions} that configure the AI instance.\n * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}.\n *\n * @public\n */\nexport function getAI(app: FirebaseApp = getApp(), options?: AIOptions): AI {\n app = getModularInstance(app);\n // Dependencies\n const AIProvider: Provider<'AI'> = _getProvider(app, AI_TYPE);\n\n const backend = options?.backend ?? new GoogleAIBackend();\n\n const finalOptions: Omit<AIOptions, 'backend'> = {\n useLimitedUseAppCheckTokens: options?.useLimitedUseAppCheckTokens ?? false\n };\n\n const identifier = encodeInstanceIdentifier(backend);\n const aiInstance = AIProvider.getImmediate({\n identifier\n });\n\n aiInstance.options = finalOptions;\n\n return aiInstance;\n}\n\n/**\n * Returns a {@link GenerativeModel} class with methods for inference\n * and other functionality.\n *\n * @public\n */\nexport function getGenerativeModel(\n ai: AI,\n modelParams: ModelParams | HybridParams,\n requestOptions?: RequestOptions\n): GenerativeModel {\n // Uses the existence of HybridParams.mode to clarify the type of the modelParams input.\n const hybridParams = modelParams as HybridParams;\n let inCloudParams: ModelParams;\n if (hybridParams.mode) {\n inCloudParams = hybridParams.inCloudParams || {\n model: DEFAULT_HYBRID_IN_CLOUD_MODEL\n };\n } else {\n inCloudParams = modelParams as ModelParams;\n }\n\n if (!inCloudParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`\n );\n }\n\n /**\n * An AIService registered by index.node.ts will not have a\n * chromeAdapterFactory() method.\n */\n const chromeAdapter = (ai as AIService).chromeAdapterFactory?.(\n hybridParams.mode,\n typeof window === 'undefined' ? undefined : window,\n hybridParams.onDeviceParams\n );\n\n return new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter);\n}\n\n/**\n * Returns an {@link ImagenModel} class with methods for using Imagen.\n *\n * Only Imagen 3 models (named `imagen-3.0-*`) are supported.\n *\n * @param ai - An {@link AI} instance.\n * @param modelParams - Parameters to use when making Imagen requests.\n * @param requestOptions - Additional options to use when making requests.\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @public\n */\nexport function getImagenModel(\n ai: AI,\n modelParams: ImagenModelParams,\n requestOptions?: RequestOptions\n): ImagenModel {\n if (!modelParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })`\n );\n }\n return new ImagenModel(ai, modelParams, requestOptions);\n}\n\n/**\n * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication.\n *\n * The Live API is only supported in modern browser windows and Node >= 22.\n *\n * @param ai - An {@link AI} instance.\n * @param modelParams - Parameters to use when setting up a {@link LiveSession}.\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @beta\n */\nexport function getLiveGenerativeModel(\n ai: AI,\n modelParams: LiveModelParams\n): LiveGenerativeModel {\n if (!modelParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name for getLiveGenerativeModel. Example: getLiveGenerativeModel(ai, { model: 'my-model-name' })`\n );\n }\n const webSocketHandler = new WebSocketHandlerImpl();\n return new LiveGenerativeModel(ai, modelParams, webSocketHandler);\n}\n","/**\n * The Firebase AI Web SDK.\n *\n * @packageDocumentation\n */\n\n/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { registerVersion, _registerComponent } from '@firebase/app';\nimport { AI_TYPE } from './constants';\nimport { Component, ComponentType } from '@firebase/component';\nimport { name, version } from '../package.json';\nimport { LanguageModel } from './types/language-model';\nimport { factory } from './factory-browser';\n\ndeclare global {\n interface Window {\n LanguageModel: LanguageModel;\n }\n}\n\nfunction registerAI(): void {\n _registerComponent(\n new Component(AI_TYPE, factory, ComponentType.PUBLIC).setMultipleInstances(\n true\n )\n );\n\n registerVersion(name, version);\n // BUILD_TARGET will be replaced by values like esm, cjs, etc during the compilation\n registerVersion(name, version, '__BUILD_TARGET__');\n}\n\nregisterAI();\n\nexport * from './api';\nexport * from './public-types';\n"],"names":["FirebaseError","Logger","_isFirebaseServerApp","GoogleAIMapper.mapGenerateContentResponse","GoogleAIMapper.mapGenerateContentRequest","GoogleAIMapper.mapCountTokensRequest","Deferred","app","getApp","getModularInstance","_getProvider","_registerComponent","Component","registerVersion"],"mappings":";;;;;;;;;;;;AAAA;;;;;;;;;;;;;;;AAeG;AAII,MAAM,OAAO,GAAG,IAAI,CAAC;AAErB,MAAM,gBAAgB,GAAG,aAAa,CAAC;AAEvC,MAAM,cAAc,GAAG,iCAAiC,CAAC;AAEzD,MAAM,mBAAmB,GAAG,QAAQ,CAAC;AAErC,MAAM,eAAe,GAAG,OAAO,CAAC;AAEhC,MAAM,YAAY,GAAG,OAAO,CAAC;AAE7B,MAAM,wBAAwB,GAAG,GAAG,GAAG,IAAI,CAAC;AAEnD;;AAEG;AACI,MAAM,6BAA6B,GAAG,uBAAuB;;ACpCpE;;;;;;;;;;;;;;;AAeG;AAMH;;;;AAIG;AACG,MAAO,OAAQ,SAAQA,kBAAa,CAAA;AACxC;;;;;;AAMG;AACH,IAAA,WAAA,CACW,IAAiB,EAC1B,OAAe,EACN,eAAiC,EAAA;;QAG1C,MAAM,OAAO,GAAG,OAAO,CAAC;AACxB,QAAA,MAAM,QAAQ,GAAG,CAAA,EAAG,OAAO,CAAI,CAAA,EAAA,IAAI,EAAE,CAAC;QACtC,MAAM,WAAW,GAAG,CAAG,EAAA,OAAO,KAAK,OAAO,CAAA,EAAA,EAAK,QAAQ,CAAA,CAAA,CAAG,CAAC;AAC3D,QAAA,KAAK,CAAC,IAAI,EAAE,WAAW,CAAC,CAAC;QARhB,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAa;QAEjB,IAAe,CAAA,eAAA,GAAf,eAAe,CAAkB;;;;;AAY1C,QAAA,IAAI,KAAK,CAAC,iBAAiB,EAAE;;;AAG3B,YAAA,KAAK,CAAC,iBAAiB,CAAC,IAAI,EAAE,OAAO,CAAC,CAAC;SACxC;;;;;QAMD,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,OAAO,CAAC,SAAS,CAAC,CAAC;;AAG/C,QAAA,IAAI,CAAC,QAAQ,GAAG,MAAM,WAAW,CAAC;KACnC;AACF;;AChED;;;;;;;;;;;;;;;AAeG;AAQH;;;AAGG;AACI,MAAM,cAAc,GAAG,CAAC,MAAM,EAAE,OAAO,EAAE,UAAU,EAAE,QAAQ,EAAW;AAE/E;;;AAGG;AACU,MAAA,YAAY,GAAG;AAC1B,IAAA,yBAAyB,EAAE,2BAA2B;AACtD,IAAA,+BAA+B,EAAE,iCAAiC;AAClE,IAAA,wBAAwB,EAAE,0BAA0B;AACpD,IAAA,+BAA+B,EAAE,iCAAiC;EACzD;AAQX;;;AAGG;AACU,MAAA,kBAAkB,GAAG;AAChC;;AAEG;AACH,IAAA,mBAAmB,EAAE,qBAAqB;AAC1C;;AAEG;AACH,IAAA,sBAAsB,EAAE,wBAAwB;AAChD;;AAEG;AACH,IAAA,eAAe,EAAE,iBAAiB;AAClC;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;;AAGG;AACH,IAAA,GAAG,EAAE,KAAK;EACD;AASX;;;;AAIG;AACU,MAAA,eAAe,GAAG;AAC7B;;AAEG;AACH,IAAA,QAAQ,EAAE,UAAU;AACpB;;AAEG;AACH,IAAA,WAAW,EAAE,aAAa;EACjB;AAUX;;;AAGG;AACU,MAAA,eAAe,GAAG;AAC7B;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;AAEG;AACH,IAAA,GAAG,EAAE,KAAK;AACV;;AAEG;AACH,IAAA,MAAM,EAAE,QAAQ;AAChB;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;EACH;AASX;;;AAGG;AACU,MAAA,YAAY,GAAG;AAC1B;;AAEG;AACH,IAAA,wBAAwB,EAAE,0BAA0B;AACpD;;AAEG;AACH,IAAA,iBAAiB,EAAE,mBAAmB;AACtC;;AAEG;AACH,IAAA,oBAAoB,EAAE,sBAAsB;AAC5C;;AAEG;AACH,IAAA,kBAAkB,EAAE,oBAAoB;AACxC;;;;;AAKG;AACH,IAAA,yBAAyB,EAAE,2BAA2B;EAC7C;AAQX;;;AAGG;AACU,MAAA,WAAW,GAAG;AACzB;;AAEG;AACH,IAAA,MAAM,EAAE,QAAQ;AAChB;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,SAAS,EAAE,WAAW;AACtB;;AAEG;AACH,IAAA,kBAAkB,EAAE,oBAAoB;EAC/B;AAQX;;;AAGG;AACU,MAAA,YAAY,GAAG;AAC1B;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;AAEG;AACH,IAAA,MAAM,EAAE,QAAQ;AAChB;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,SAAS,EAAE,WAAW;AACtB;;AAEG;AACH,IAAA,kBAAkB,EAAE,oBAAoB;AACxC;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;AAEG;AACH,IAAA,uBAAuB,EAAE,yBAAyB;EACzC;AAQX;;AAEG;AACU,MAAA,mBAAmB,GAAG;AACjC;;;AAGG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;;;;AAKG;AACH,IAAA,GAAG,EAAE,KAAK;AACV;;;AAGG;AACH,IAAA,IAAI,EAAE,MAAM;EACH;AAQX;;;AAGG;AACU,MAAA,QAAQ,GAAG;AACtB;;AAEG;AACH,IAAA,oBAAoB,EAAE,sBAAsB;AAC5C;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,QAAQ,EAAE,UAAU;EACX;AAQX;;;;AAIG;AACU,MAAA,gBAAgB,GAAG;AAC9B;;;AAGG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;;AAGG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;;AAGG;AACH,IAAA,KAAK,EAAE,OAAO;EACL;AAUX;;;;;;;;;;;;;;;;;;;;AAoBG;AACU,MAAA,aAAa,GAAG;AAC3B,IAAA,kBAAkB,EAAE,kBAAkB;AACtC,IAAA,gBAAgB,EAAE,gBAAgB;AAClC,IAAA,eAAe,EAAE,eAAe;AAChC,IAAA,iBAAiB,EAAE,iBAAiB;EAC3B;AASX;;;;AAIG;AACU,MAAA,eAAe,GAAG;AAC7B,IAAA,WAAW,EAAE,WAAW;AACxB,IAAA,UAAU,EAAE,UAAU;EACb;AAUX;;;;AAIG;AACU,MAAA,OAAO,GAAG;AACrB,IAAA,WAAW,EAAE,qBAAqB;AAClC,IAAA,EAAE,EAAE,YAAY;AAChB,IAAA,MAAM,EAAE,gBAAgB;AACxB,IAAA,iBAAiB,EAAE,2BAA2B;EAC9C;AASF;;;;AAIG;AACU,MAAA,QAAQ,GAAG;AACtB,IAAA,WAAW,EAAE,sBAAsB;AACnC,IAAA,MAAM,EAAE,QAAQ;;;ACzalB;;;;;;;;;;;;;;;AAeG;AA4XH;;;;;;;;;;;;;;;;AAgBG;AACU,MAAA,kBAAkB,GAAG;AAChC;;AAEG;AACH,IAAA,gCAAgC,EAAE,kCAAkC;AACpE;;AAEG;AACH,IAAA,4BAA4B,EAAE,8BAA8B;AAC5D;;AAEG;AACH,IAAA,0BAA0B,EAAE,4BAA4B;AACxD;;AAEG;AACH,IAAA,4BAA4B,EAAE,8BAA8B;AAC5D;;AAEG;AACH,IAAA,2BAA2B,EAAE,6BAA6B;EAC1D;AA6KF;;;;AAIG;AACU,MAAA,gBAAgB,GAAG;AAC9B,IAAA,cAAc,EAAE,eAAe;AAC/B,IAAA,SAAS,EAAE,UAAU;AACrB,IAAA,sBAAsB,EAAE,sBAAsB;;;ACtmBhD;;;;;;;;;;;;;;;AAeG;AA4CH;;;;AAIG;AACU,MAAA,WAAW,GAAG;;AAEzB,IAAA,KAAK,EAAE,OAAO;;AAGd,IAAA,aAAa,EAAE,eAAe;;AAG9B,IAAA,cAAc,EAAE,gBAAgB;;AAGhC,IAAA,WAAW,EAAE,aAAa;;AAG1B,IAAA,cAAc,EAAE,gBAAgB;;AAGhC,IAAA,eAAe,EAAE,iBAAiB;;AAGlC,IAAA,eAAe,EAAE,iBAAiB;;AAGlC,IAAA,cAAc,EAAE,gBAAgB;;AAGhC,IAAA,UAAU,EAAE,YAAY;;AAGxB,IAAA,SAAS,EAAE,WAAW;;AAGtB,IAAA,QAAQ,EAAE,UAAU;;AAGpB,IAAA,aAAa,EAAE,eAAe;;AAG9B,IAAA,YAAY,EAAE,cAAc;;AAG5B,IAAA,WAAW,EAAE,aAAa;;;ACzG5B;;;;;;;;;;;;;;;AAeG;AAEH;;;;;AAKG;AACU,MAAA,UAAU,GAAG;;AAExB,IAAA,MAAM,EAAE,QAAQ;;AAEhB,IAAA,MAAM,EAAE,QAAQ;;AAEhB,IAAA,OAAO,EAAE,SAAS;;AAElB,IAAA,OAAO,EAAE,SAAS;;AAElB,IAAA,KAAK,EAAE,OAAO;;AAEd,IAAA,MAAM,EAAE,QAAQ;;;ACnClB;;;;;;;;;;;;;;;AAeG;AAqFH;;;;;;;;;;;AAWG;AACU,MAAA,uBAAuB,GAAG;AACrC;;AAEG;AACH,IAAA,mBAAmB,EAAE,qBAAqB;AAC1C;;AAEG;AACH,IAAA,sBAAsB,EAAE,wBAAwB;AAChD;;AAEG;AACH,IAAA,eAAe,EAAE,iBAAiB;AAClC;;;;;AAKG;AACH,IAAA,UAAU,EAAE,YAAY;EACf;AAiBX;;;;;;;AAOG;AACU,MAAA,uBAAuB,GAAG;AACrC;;AAEG;AACH,IAAA,SAAS,EAAE,YAAY;AACvB;;;;;;AAMG;AACH,IAAA,WAAW,EAAE,aAAa;AAC1B;;;;;;AAMG;AACH,IAAA,SAAS,EAAE,WAAW;EACb;AAiCX;;;;;;;;;;AAUG;AACU,MAAA,iBAAiB,GAAG;AAC/B;;AAEG;AACH,IAAA,QAAQ,EAAE,KAAK;AACf;;AAEG;AACH,IAAA,eAAe,EAAE,KAAK;AACtB;;AAEG;AACH,IAAA,cAAc,EAAE,KAAK;AACrB;;AAEG;AACH,IAAA,gBAAgB,EAAE,MAAM;AACxB;;AAEG;AACH,IAAA,eAAe,EAAE,MAAM;;;AClPzB;;;;;;;;;;;;;;;AAeG;AAqCH;;;;;;;;;;;AAWG;AACU,MAAA,WAAW,GAAG;AACzB;;;AAGG;AACH,IAAA,SAAS,EAAE,WAAW;AAEtB;;;AAGG;AACH,IAAA,SAAS,EAAE,WAAW;AACd,EAAC;;AC5EX;;;;;;;;;;;;;;;AAeG;AAKH;;;;;;;AAOG;MACmB,OAAO,CAAA;AAM3B;;;AAGG;AACH,IAAA,WAAA,CAAsB,IAAiB,EAAA;AACrC,QAAA,IAAI,CAAC,WAAW,GAAG,IAAI,CAAC;KACzB;AACF,CAAA;AAED;;;;;;;AAOG;AACG,MAAO,eAAgB,SAAQ,OAAO,CAAA;AAC1C;;AAEG;AACH,IAAA,WAAA,GAAA;AACE,QAAA,KAAK,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC;KAC9B;AACF,CAAA;AAED;;;;;;;AAOG;AACG,MAAO,eAAgB,SAAQ,OAAO,CAAA;AAQ1C;;;;;;AAMG;AACH,IAAA,WAAA,CAAY,WAAmB,gBAAgB,EAAA;AAC7C,QAAA,KAAK,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC;QAC7B,IAAI,CAAC,QAAQ,EAAE;AACb,YAAA,IAAI,CAAC,QAAQ,GAAG,gBAAgB,CAAC;SAClC;aAAM;AACL,YAAA,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;SAC1B;KACF;AACF;;AC3FD;;;;;;;;;;;;;;;AAeG;AAOH;;;;;AAKG;AACG,SAAU,wBAAwB,CAAC,OAAgB,EAAA;AACvD,IAAA,IAAI,OAAO,YAAY,eAAe,EAAE;QACtC,OAAO,CAAA,EAAG,OAAO,CAAA,SAAA,CAAW,CAAC;KAC9B;AAAM,SAAA,IAAI,OAAO,YAAY,eAAe,EAAE;AAC7C,QAAA,OAAO,GAAG,OAAO,CAAA,UAAA,EAAa,OAAO,CAAC,QAAQ,EAAE,CAAC;KAClD;SAAM;AACL,QAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAoB,iBAAA,EAAA,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,WAAW,CAAC,CAAA,CAAE,CAC1D,CAAC;KACH;AACH,CAAC;AAED;;;;AAIG;AACG,SAAU,wBAAwB,CAAC,kBAA0B,EAAA;IACjE,MAAM,eAAe,GAAG,kBAAkB,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;AACtD,IAAA,IAAI,eAAe,CAAC,CAAC,CAAC,KAAK,OAAO,EAAE;AAClC,QAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAgD,6CAAA,EAAA,eAAe,CAAC,CAAC,CAAC,CAAA,CAAA,CAAG,CACtE,CAAC;KACH;AACD,IAAA,MAAM,WAAW,GAAG,eAAe,CAAC,CAAC,CAAC,CAAC;IACvC,QAAQ,WAAW;AACjB,QAAA,KAAK,UAAU;AACb,YAAA,MAAM,QAAQ,GAAuB,eAAe,CAAC,CAAC,CAAC,CAAC;YACxD,IAAI,CAAC,QAAQ,EAAE;gBACb,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAkD,+CAAA,EAAA,kBAAkB,CAAG,CAAA,CAAA,CACxE,CAAC;aACH;AACD,YAAA,OAAO,IAAI,eAAe,CAAC,QAAQ,CAAC,CAAC;AACvC,QAAA,KAAK,UAAU;YACb,OAAO,IAAI,eAAe,EAAE,CAAC;AAC/B,QAAA;YACE,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAwC,qCAAA,EAAA,kBAAkB,CAAG,CAAA,CAAA,CAC9D,CAAC;KACL;AACH;;ACzEA;;;;;;;;;;;;;;;AAeG;AAII,MAAM,MAAM,GAAG,IAAIC,eAAM,CAAC,oBAAoB,CAAC;;ACsBtD;;AAEG;AACH,IAAY,YAKX,CAAA;AALD,CAAA,UAAY,YAAY,EAAA;AACtB,IAAA,YAAA,CAAA,aAAA,CAAA,GAAA,aAA6B,CAAA;AAC7B,IAAA,YAAA,CAAA,cAAA,CAAA,GAAA,cAA+B,CAAA;AAC/B,IAAA,YAAA,CAAA,aAAA,CAAA,GAAA,aAA6B,CAAA;AAC7B,IAAA,YAAA,CAAA,WAAA,CAAA,GAAA,WAAyB,CAAA;AAC3B,CAAC,EALW,YAAY,KAAZ,YAAY,GAKvB,EAAA,CAAA,CAAA;;ACjDD;;;;;;;;;;;;;;;AAeG;AAwBH;AACA,MAAM,qBAAqB,GAA4B,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC;AAE3E;;;;AAIG;MACU,iBAAiB,CAAA;AAW5B,IAAA,WAAA,CACS,qBAAoC,EACpC,IAAmB,EAC1B,cAA+B,EAAA;QAFxB,IAAqB,CAAA,qBAAA,GAArB,qBAAqB,CAAe;QACpC,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAe;QAVpB,IAAa,CAAA,aAAA,GAAG,KAAK,CAAC;AAG9B,QAAA,IAAA,CAAA,cAAc,GAAmB;AAC/B,YAAA,aAAa,EAAE;AACb,gBAAA,cAAc,EAAE,qBAAqB;AACtC,aAAA;SACF,CAAC;QAMA,IAAI,cAAc,EAAE;AAClB,YAAA,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;AACrC,YAAA,IAAI,CAAC,IAAI,CAAC,cAAc,CAAC,aAAa,EAAE;AACtC,gBAAA,IAAI,CAAC,cAAc,CAAC,aAAa,GAAG;AAClC,oBAAA,cAAc,EAAE,qBAAqB;iBACtC,CAAC;aACH;iBAAM,IAAI,CAAC,IAAI,CAAC,cAAc,CAAC,aAAa,CAAC,cAAc,EAAE;AAC5D,gBAAA,IAAI,CAAC,cAAc,CAAC,aAAa,CAAC,cAAc;AAC9C,oBAAA,qBAAqB,CAAC;aACzB;SACF;KACF;AAED;;;;;;;;;;;;;;AAcG;IACH,MAAM,WAAW,CAAC,OAA+B,EAAA;AAC/C,QAAA,IAAI,CAAC,IAAI,CAAC,IAAI,EAAE;AACd,YAAA,MAAM,CAAC,KAAK,CACV,CAAA,0DAAA,CAA4D,CAC7D,CAAC;AACF,YAAA,OAAO,KAAK,CAAC;SACd;QACD,IAAI,IAAI,CAAC,IAAI,KAAK,aAAa,CAAC,aAAa,EAAE;AAC7C,YAAA,MAAM,CAAC,KAAK,CACV,CAAA,gEAAA,CAAkE,CACnE,CAAC;AACF,YAAA,OAAO,KAAK,CAAC;SACd;;AAGD,QAAA,MAAM,YAAY,GAAG,MAAM,IAAI,CAAC,mBAAmB,EAAE,CAAC;QAEtD,IAAI,IAAI,CAAC,IAAI,KAAK,aAAa,CAAC,cAAc,EAAE;;AAE9C,YAAA,IAAI,YAAY,KAAK,YAAY,CAAC,WAAW,EAAE;gBAC7C,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,4DAA4D,CAC7D,CAAC;aACH;AAAM,iBAAA,IACL,YAAY,KAAK,YAAY,CAAC,YAAY;AAC1C,gBAAA,YAAY,KAAK,YAAY,CAAC,WAAW,EACzC;;AAEA,gBAAA,MAAM,CAAC,KAAK,CAAC,CAAA,kDAAA,CAAoD,CAAC,CAAC;gBACnE,MAAM,IAAI,CAAC,eAAe,CAAC;AAC3B,gBAAA,OAAO,IAAI,CAAC;aACb;AACD,YAAA,OAAO,IAAI,CAAC;SACb;;AAGD,QAAA,IAAI,YAAY,KAAK,YAAY,CAAC,SAAS,EAAE;AAC3C,YAAA,MAAM,CAAC,KAAK,CACV,4DAA4D,YAAY,CAAA,EAAA,CAAI,CAC7E,CAAC;AACF,YAAA,OAAO,KAAK,CAAC;SACd;QACD,IAAI,CAAC,iBAAiB,CAAC,iBAAiB,CAAC,OAAO,CAAC,EAAE;AACjD,YAAA,MAAM,CAAC,KAAK,CACV,CAAA,gEAAA,CAAkE,CACnE,CAAC;AACF,YAAA,OAAO,KAAK,CAAC;SACd;AAED,QAAA,OAAO,IAAI,CAAC;KACb;AAED;;;;;;;;AAQG;IACH,MAAM,eAAe,CAAC,OAA+B,EAAA;AACnD,QAAA,MAAM,OAAO,GAAG,MAAM,IAAI,CAAC,aAAa,EAAE,CAAC;AAC3C,QAAA,MAAM,QAAQ,GAAG,MAAM,OAAO,CAAC,GAAG,CAChC,OAAO,CAAC,QAAQ,CAAC,GAAG,CAAC,iBAAiB,CAAC,sBAAsB,CAAC,CAC/D,CAAC;AACF,QAAA,MAAM,IAAI,GAAG,MAAM,OAAO,CAAC,MAAM,CAC/B,QAAQ,EACR,IAAI,CAAC,cAAc,CAAC,aAAa,CAClC,CAAC;AACF,QAAA,OAAO,iBAAiB,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;KAC3C;AAED;;;;;;;;AAQG;IACH,MAAM,qBAAqB,CACzB,OAA+B,EAAA;AAE/B,QAAA,MAAM,OAAO,GAAG,MAAM,IAAI,CAAC,aAAa,EAAE,CAAC;AAC3C,QAAA,MAAM,QAAQ,GAAG,MAAM,OAAO,CAAC,GAAG,CAChC,OAAO,CAAC,QAAQ,CAAC,GAAG,CAAC,iBAAiB,CAAC,sBAAsB,CAAC,CAC/D,CAAC;AACF,QAAA,MAAM,MAAM,GAAG,OAAO,CAAC,eAAe,CACpC,QAAQ,EACR,IAAI,CAAC,cAAc,CAAC,aAAa,CAClC,CAAC;AACF,QAAA,OAAO,iBAAiB,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;KACnD;IAED,MAAM,WAAW,CAAC,QAA4B,EAAA;QAC5C,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,wDAAwD,CACzD,CAAC;KACH;AAED;;AAEG;IACK,OAAO,iBAAiB,CAAC,OAA+B,EAAA;;QAE9D,IAAI,OAAO,CAAC,QAAQ,CAAC,MAAM,KAAK,CAAC,EAAE;AACjC,YAAA,MAAM,CAAC,KAAK,CAAC,gDAAgD,CAAC,CAAC;AAC/D,YAAA,OAAO,KAAK,CAAC;SACd;AAED,QAAA,KAAK,MAAM,OAAO,IAAI,OAAO,CAAC,QAAQ,EAAE;AACtC,YAAA,IAAI,OAAO,CAAC,IAAI,KAAK,UAAU,EAAE;AAC/B,gBAAA,MAAM,CAAC,KAAK,CAAC,CAAA,iDAAA,CAAmD,CAAC,CAAC;AAClE,gBAAA,OAAO,KAAK,CAAC;aACd;;AAGD,YAAA,KAAK,MAAM,IAAI,IAAI,OAAO,CAAC,KAAK,EAAE;gBAChC,IACE,IAAI,CAAC,UAAU;AACf,oBAAA,iBAAiB,CAAC,oBAAoB,CAAC,OAAO,CAC5C,IAAI,CAAC,UAAU,CAAC,QAAQ,CACzB,KAAK,CAAC,CAAC,EACR;oBACA,MAAM,CAAC,KAAK,CACV,CAA0B,uBAAA,EAAA,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAqC,mCAAA,CAAA,CACxF,CAAC;AACF,oBAAA,OAAO,KAAK,CAAC;iBACd;aACF;SACF;AAED,QAAA,OAAO,IAAI,CAAC;KACb;AAED;;AAEG;AACK,IAAA,MAAM,mBAAmB,GAAA;AAC/B,QAAA,MAAM,YAAY,GAAG,MAAM,IAAI,CAAC,qBAAqB,EAAE,YAAY,CACjE,IAAI,CAAC,cAAc,CAAC,aAAa,CAClC,CAAC;AAEF,QAAA,IAAI,YAAY,KAAK,YAAY,CAAC,YAAY,EAAE;YAC9C,IAAI,CAAC,QAAQ,EAAE,CAAC;SACjB;AAED,QAAA,OAAO,YAAY,CAAC;KACrB;AAED;;;;;;;;AAQG;IACK,QAAQ,GAAA;AACd,QAAA,IAAI,IAAI,CAAC,aAAa,EAAE;YACtB,OAAO;SACR;AACD,QAAA,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC;AAC1B,QAAA,IAAI,CAAC,eAAe,GAAG,IAAI,CAAC,qBAAqB;AAC/C,cAAE,MAAM,CAAC,IAAI,CAAC,cAAc,CAAC,aAAa,CAAC;aAC1C,OAAO,CAAC,MAAK;AACZ,YAAA,IAAI,CAAC,aAAa,GAAG,KAAK,CAAC;AAC7B,SAAC,CAAC,CAAC;KACN;AAED;;AAEG;AACK,IAAA,aAAa,sBAAsB,CACzC,OAAgB,EAAA;AAEhB,QAAA,MAAM,4BAA4B,GAAG,MAAM,OAAO,CAAC,GAAG,CACpD,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,iBAAiB,CAAC,6BAA6B,CAAC,CACnE,CAAC;QACF,OAAO;YACL,IAAI,EAAE,iBAAiB,CAAC,0BAA0B,CAAC,OAAO,CAAC,IAAI,CAAC;AAChE,YAAA,OAAO,EAAE,4BAA4B;SACtC,CAAC;KACH;AAED;;AAEG;AACK,IAAA,aAAa,6BAA6B,CAChD,IAAU,EAAA;AAEV,QAAA,IAAI,IAAI,CAAC,IAAI,EAAE;YACb,OAAO;AACL,gBAAA,IAAI,EAAE,MAAM;gBACZ,KAAK,EAAE,IAAI,CAAC,IAAI;aACjB,CAAC;SACH;AAAM,aAAA,IAAI,IAAI,CAAC,UAAU,EAAE;AAC1B,YAAA,MAAM,qBAAqB,GAAG,MAAM,KAAK,CACvC,CAAA,KAAA,EAAQ,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAA,QAAA,EAAW,IAAI,CAAC,UAAU,CAAC,IAAI,CAAA,CAAE,CAClE,CAAC;AACF,YAAA,MAAM,SAAS,GAAG,MAAM,qBAAqB,CAAC,IAAI,EAAE,CAAC;AACrD,YAAA,MAAM,WAAW,GAAG,MAAM,iBAAiB,CAAC,SAAS,CAAC,CAAC;YACvD,OAAO;AACL,gBAAA,IAAI,EAAE,OAAO;AACb,gBAAA,KAAK,EAAE,WAAW;aACnB,CAAC;SACH;QACD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,CAA0D,wDAAA,CAAA,CAC3D,CAAC;KACH;AAED;;AAEG;IACK,OAAO,0BAA0B,CACvC,IAAU,EAAA;;QAGV,OAAO,IAAI,KAAK,OAAO,GAAG,WAAW,GAAG,MAAM,CAAC;KAChD;AAED;;;;;;;;;AASG;AACK,IAAA,MAAM,aAAa,GAAA;AACzB,QAAA,IAAI,CAAC,IAAI,CAAC,qBAAqB,EAAE;YAC/B,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,sDAAsD,CACvD,CAAC;SACH;AACD,QAAA,MAAM,UAAU,GAAG,MAAM,IAAI,CAAC,qBAAqB,CAAC,MAAM,CACxD,IAAI,CAAC,cAAc,CAAC,aAAa,CAClC,CAAC;AACF,QAAA,IAAI,IAAI,CAAC,UAAU,EAAE;AACnB,YAAA,IAAI,CAAC,UAAU,CAAC,OAAO,EAAE,CAAC;SAC3B;;AAED,QAAA,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;AAC7B,QAAA,OAAO,UAAU,CAAC;KACnB;AAED;;AAEG;IACK,OAAO,UAAU,CAAC,IAAY,EAAA;QACpC,OAAO;AACL,YAAA,IAAI,EAAE,aAAa;AACjB,gBAAA,UAAU,EAAE;AACV,oBAAA;AACE,wBAAA,OAAO,EAAE;AACP,4BAAA,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,CAAC;AAClB,yBAAA;AACF,qBAAA;AACF,iBAAA;aACF,CAAC;SACS,CAAC;KACf;AAED;;AAEG;IACK,OAAO,gBAAgB,CAAC,MAA8B,EAAA;AAC5D,QAAA,MAAM,OAAO,GAAG,IAAI,WAAW,EAAE,CAAC;QAClC,OAAO;AACL,YAAA,IAAI,EAAE,MAAM,CAAC,WAAW,CACtB,IAAI,eAAe,CAAC;gBAClB,SAAS,CAAC,KAAK,EAAE,UAAU,EAAA;AACzB,oBAAA,MAAM,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC;AAC1B,wBAAA,UAAU,EAAE;AACV,4BAAA;AACE,gCAAA,OAAO,EAAE;AACP,oCAAA,IAAI,EAAE,OAAO;AACb,oCAAA,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,CAAC;AACzB,iCAAA;AACF,6BAAA;AACF,yBAAA;AACF,qBAAA,CAAC,CAAC;AACH,oBAAA,UAAU,CAAC,OAAO,CAAC,OAAO,CAAC,MAAM,CAAC,CAAA,MAAA,EAAS,IAAI,CAAA,IAAA,CAAM,CAAC,CAAC,CAAC;iBACzD;AACF,aAAA,CAAC,CACH;SACU,CAAC;KACf;;AApVD;AACO,iBAAA,CAAA,oBAAoB,GAAG,CAAC,YAAY,EAAE,WAAW,CAAC,CAAC;AAsV5D;;AAEG;SACa,oBAAoB,CAClC,IAAmB,EACnB,MAAe,EACf,MAAuB,EAAA;;AAGvB,IAAA,IAAI,OAAO,MAAM,KAAK,WAAW,IAAI,IAAI,EAAE;QACzC,OAAO,IAAI,iBAAiB,CACzB,MAAiB,CAAC,aAA8B,EACjD,IAAI,EACJ,MAAM,CACP,CAAC;KACH;AACH;;ACvZA;;;;;;;;;;;;;;;AAeG;MAgBU,SAAS,CAAA;IAMpB,WACS,CAAA,GAAgB,EAChB,OAAgB,EACvB,YAAiD,EACjD,gBAA0D,EACnD,oBAI2B,EAAA;QAR3B,IAAG,CAAA,GAAA,GAAH,GAAG,CAAa;QAChB,IAAO,CAAA,OAAA,GAAP,OAAO,CAAS;QAGhB,IAAoB,CAAA,oBAAA,GAApB,oBAAoB,CAIO;AAElC,QAAA,MAAM,QAAQ,GAAG,gBAAgB,EAAE,YAAY,CAAC,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,CAAC;AACpE,QAAA,MAAM,IAAI,GAAG,YAAY,EAAE,YAAY,CAAC,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,CAAC;AAC5D,QAAA,IAAI,CAAC,IAAI,GAAG,IAAI,IAAI,IAAI,CAAC;AACzB,QAAA,IAAI,CAAC,QAAQ,GAAG,QAAQ,IAAI,IAAI,CAAC;AAEjC,QAAA,IAAI,OAAO,YAAY,eAAe,EAAE;AACtC,YAAA,IAAI,CAAC,QAAQ,GAAG,OAAO,CAAC,QAAQ,CAAC;SAClC;aAAM;AACL,YAAA,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAC;SACpB;KACF;IAED,OAAO,GAAA;AACL,QAAA,OAAO,OAAO,CAAC,OAAO,EAAE,CAAC;KAC1B;IAED,IAAI,OAAO,CAAC,YAAuB,EAAA;AACjC,QAAA,IAAI,CAAC,QAAQ,GAAG,YAAY,CAAC;KAC9B;AAED,IAAA,IAAI,OAAO,GAAA;QACT,OAAO,IAAI,CAAC,QAAQ,CAAC;KACtB;AACF;;ACvED;;;;;;;;;;;;;;;AAeG;SAYa,OAAO,CACrB,SAA6B,EAC7B,EAAE,kBAAkB,EAA0B,EAAA;IAE9C,IAAI,CAAC,kBAAkB,EAAE;QACvB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,6CAA6C,CAC9C,CAAC;KACH;AAED,IAAA,MAAM,OAAO,GAAG,wBAAwB,CAAC,kBAAkB,CAAC,CAAC;;IAG7D,MAAM,GAAG,GAAG,SAAS,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC,YAAY,EAAE,CAAC;IACxD,MAAM,IAAI,GAAG,SAAS,CAAC,WAAW,CAAC,eAAe,CAAC,CAAC;IACpD,MAAM,gBAAgB,GAAG,SAAS,CAAC,WAAW,CAAC,oBAAoB,CAAC,CAAC;AAErE,IAAA,OAAO,IAAI,SAAS,CAClB,GAAG,EACH,OAAO,EACP,IAAI,EACJ,gBAAgB,EAChB,oBAAoB,CACrB,CAAC;AACJ;;ACpDA;;;;;;;;;;;;;;;AAeG;AAQH;;;;;;;AAOG;MACmB,OAAO,CAAA;AAY3B;;;;;;;;;;;;;;;;AAgBG;IACH,WAAsB,CAAA,EAAM,EAAE,SAAiB,EAAA;QAC7C,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,MAAM,EAAE;YAC5B,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,UAAU,EACtB,CAAuH,qHAAA,CAAA,CACxH,CAAC;SACH;aAAM,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,SAAS,EAAE;YACtC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,CAA6H,2HAAA,CAAA,CAC9H,CAAC;SACH;aAAM,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE;YAClC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,SAAS,EACrB,CAAqH,mHAAA,CAAA,CACtH,CAAC;SACH;aAAM;YACL,IAAI,CAAC,YAAY,GAAG;AAClB,gBAAA,MAAM,EAAE,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM;AAC7B,gBAAA,OAAO,EAAE,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,SAAS;AACjC,gBAAA,KAAK,EAAE,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK;AAC3B,gBAAA,8BAA8B,EAAE,EAAE,CAAC,GAAG,CAAC,8BAA8B;gBACrE,QAAQ,EAAE,EAAE,CAAC,QAAQ;gBACrB,OAAO,EAAE,EAAE,CAAC,OAAO;aACpB,CAAC;AAEF,YAAA,IAAIC,wBAAoB,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,aAAa,EAAE;gBACjE,MAAM,KAAK,GAAG,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,aAAa,CAAC;AAC5C,gBAAA,IAAI,CAAC,YAAY,CAAC,gBAAgB,GAAG,MAAK;oBACxC,OAAO,OAAO,CAAC,OAAO,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC;AACpC,iBAAC,CAAC;aACH;AAAM,iBAAA,IAAK,EAAgB,CAAC,QAAQ,EAAE;AACrC,gBAAA,IAAI,EAAE,CAAC,OAAO,EAAE,2BAA2B,EAAE;AAC3C,oBAAA,IAAI,CAAC,YAAY,CAAC,gBAAgB,GAAG,MAClC,EAAgB,CAAC,QAAS,CAAC,kBAAkB,EAAE,CAAC;iBACpD;qBAAM;AACL,oBAAA,IAAI,CAAC,YAAY,CAAC,gBAAgB,GAAG,MAClC,EAAgB,CAAC,QAAS,CAAC,QAAQ,EAAE,CAAC;iBAC1C;aACF;AAED,YAAA,IAAK,EAAgB,CAAC,IAAI,EAAE;AAC1B,gBAAA,IAAI,CAAC,YAAY,CAAC,YAAY,GAAG,MAC9B,EAAgB,CAAC,IAAK,CAAC,QAAQ,EAAE,CAAC;aACtC;AAED,YAAA,IAAI,CAAC,KAAK,GAAG,OAAO,CAAC,kBAAkB,CACrC,SAAS,EACT,IAAI,CAAC,YAAY,CAAC,OAAO,CAAC,WAAW,CACtC,CAAC;SACH;KACF;AAED;;;;;;;AAOG;AACH,IAAA,OAAO,kBAAkB,CACvB,SAAiB,EACjB,WAAwB,EAAA;AAExB,QAAA,IAAI,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AACzC,YAAA,OAAO,OAAO,CAAC,0BAA0B,CAAC,SAAS,CAAC,CAAC;SACtD;aAAM;AACL,YAAA,OAAO,OAAO,CAAC,0BAA0B,CAAC,SAAS,CAAC,CAAC;SACtD;KACF;AAED;;AAEG;IACK,OAAO,0BAA0B,CAAC,SAAiB,EAAA;QACzD,OAAO,CAAA,OAAA,EAAU,SAAS,CAAA,CAAE,CAAC;KAC9B;AAED;;AAEG;IACK,OAAO,0BAA0B,CAAC,SAAiB,EAAA;AACzD,QAAA,IAAI,KAAa,CAAC;AAClB,QAAA,IAAI,SAAS,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE;AAC3B,YAAA,IAAI,SAAS,CAAC,UAAU,CAAC,SAAS,CAAC,EAAE;;AAEnC,gBAAA,KAAK,GAAG,CAAA,kBAAA,EAAqB,SAAS,CAAA,CAAE,CAAC;aAC1C;iBAAM;;gBAEL,KAAK,GAAG,SAAS,CAAC;aACnB;SACF;aAAM;;AAEL,YAAA,KAAK,GAAG,CAAA,yBAAA,EAA4B,SAAS,CAAA,CAAE,CAAC;SACjD;AAED,QAAA,OAAO,KAAK,CAAC;KACd;AACF;;AC/JD;;;;;;;;;;;;;;;AAeG;AAgBH,IAAY,IAKX,CAAA;AALD,CAAA,UAAY,IAAI,EAAA;AACd,IAAA,IAAA,CAAA,kBAAA,CAAA,GAAA,iBAAoC,CAAA;AACpC,IAAA,IAAA,CAAA,yBAAA,CAAA,GAAA,uBAAiD,CAAA;AACjD,IAAA,IAAA,CAAA,cAAA,CAAA,GAAA,aAA4B,CAAA;AAC5B,IAAA,IAAA,CAAA,SAAA,CAAA,GAAA,SAAmB,CAAA;AACrB,CAAC,EALW,IAAI,KAAJ,IAAI,GAKf,EAAA,CAAA,CAAA,CAAA;MAEY,UAAU,CAAA;IACrB,WACS,CAAA,KAAa,EACb,IAAU,EACV,WAAwB,EACxB,MAAe,EACf,cAA+B,EAAA;QAJ/B,IAAK,CAAA,KAAA,GAAL,KAAK,CAAQ;QACb,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAM;QACV,IAAW,CAAA,WAAA,GAAX,WAAW,CAAa;QACxB,IAAM,CAAA,MAAA,GAAN,MAAM,CAAS;QACf,IAAc,CAAA,cAAA,GAAd,cAAc,CAAiB;KACpC;IACJ,QAAQ,GAAA;QACN,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;AAClC,QAAA,GAAG,CAAC,QAAQ,GAAG,CAAI,CAAA,EAAA,IAAI,CAAC,UAAU,CAAA,CAAA,EAAI,IAAI,CAAC,SAAS,CAAI,CAAA,EAAA,IAAI,CAAC,IAAI,EAAE,CAAC;QACpE,GAAG,CAAC,MAAM,GAAG,IAAI,CAAC,WAAW,CAAC,QAAQ,EAAE,CAAC;AACzC,QAAA,OAAO,GAAG,CAAC,QAAQ,EAAE,CAAC;KACvB;AAED,IAAA,IAAY,OAAO,GAAA;QACjB,OAAO,IAAI,CAAC,cAAc,EAAE,OAAO,IAAI,CAAA,QAAA,EAAW,cAAc,CAAA,CAAE,CAAC;KACpE;AAED,IAAA,IAAY,UAAU,GAAA;QACpB,OAAO,mBAAmB,CAAC;KAC5B;AAED,IAAA,IAAY,SAAS,GAAA;QACnB,IAAI,IAAI,CAAC,WAAW,CAAC,OAAO,YAAY,eAAe,EAAE;YACvD,OAAO,CAAA,SAAA,EAAY,IAAI,CAAC,WAAW,CAAC,OAAO,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,CAAA,CAAE,CAAC;SAC7D;aAAM,IAAI,IAAI,CAAC,WAAW,CAAC,OAAO,YAAY,eAAe,EAAE;AAC9D,YAAA,OAAO,YAAY,IAAI,CAAC,WAAW,CAAC,OAAO,cAAc,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,QAAQ,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,EAAE,CAAC;SAC5G;aAAM;YACL,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,oBAAoB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,CAAA,CAAE,CAC/D,CAAC;SACH;KACF;AAED,IAAA,IAAY,WAAW,GAAA;AACrB,QAAA,MAAM,MAAM,GAAG,IAAI,eAAe,EAAE,CAAC;AACrC,QAAA,IAAI,IAAI,CAAC,MAAM,EAAE;AACf,YAAA,MAAM,CAAC,GAAG,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;SAC1B;AAED,QAAA,OAAO,MAAM,CAAC;KACf;AACF,CAAA;MAEY,YAAY,CAAA;AACvB,IAAA,WAAA,CAAmB,WAAwB,EAAA;QAAxB,IAAW,CAAA,WAAA,GAAX,WAAW,CAAa;KAAI;IAC/C,QAAQ,GAAA;QACN,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,CAAS,MAAA,EAAA,cAAc,CAAE,CAAA,CAAC,CAAC;AAC/C,QAAA,GAAG,CAAC,QAAQ,GAAG,IAAI,CAAC,QAAQ,CAAC;AAE7B,QAAA,MAAM,WAAW,GAAG,IAAI,eAAe,EAAE,CAAC;QAC1C,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;AAChD,QAAA,GAAG,CAAC,MAAM,GAAG,WAAW,CAAC,QAAQ,EAAE,CAAC;AAEpC,QAAA,OAAO,GAAG,CAAC,QAAQ,EAAE,CAAC;KACvB;AAED,IAAA,IAAY,QAAQ,GAAA;AAClB,QAAA,IAAI,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAClE,YAAA,OAAO,0EAA0E,CAAC;SACnF;aAAM;AACL,YAAA,OAAO,mFAAmF,IAAI,CAAC,WAAW,CAAC,QAAQ,EAAE,CAAC;SACvH;KACF;AACF,CAAA;AAED;;AAEG;AACH,SAAS,gBAAgB,GAAA;IACvB,MAAM,WAAW,GAAG,EAAE,CAAC;IACvB,WAAW,CAAC,IAAI,CAAC,CAAA,EAAG,YAAY,CAAI,CAAA,EAAA,eAAe,CAAE,CAAA,CAAC,CAAC;AACvD,IAAA,WAAW,CAAC,IAAI,CAAC,QAAQ,eAAe,CAAA,CAAE,CAAC,CAAC;AAC5C,IAAA,OAAO,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;AAC/B,CAAC;AAEM,eAAe,UAAU,CAAC,GAAe,EAAA;AAC9C,IAAA,MAAM,OAAO,GAAG,IAAI,OAAO,EAAE,CAAC;AAC9B,IAAA,OAAO,CAAC,MAAM,CAAC,cAAc,EAAE,kBAAkB,CAAC,CAAC;IACnD,OAAO,CAAC,MAAM,CAAC,mBAAmB,EAAE,gBAAgB,EAAE,CAAC,CAAC;IACxD,OAAO,CAAC,MAAM,CAAC,gBAAgB,EAAE,GAAG,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;AACzD,IAAA,IAAI,GAAG,CAAC,WAAW,CAAC,8BAA8B,EAAE;QAClD,OAAO,CAAC,MAAM,CAAC,kBAAkB,EAAE,GAAG,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC;KAC3D;AACD,IAAA,IAAI,GAAG,CAAC,WAAW,CAAC,gBAAgB,EAAE;QACpC,MAAM,aAAa,GAAG,MAAM,GAAG,CAAC,WAAW,CAAC,gBAAgB,EAAE,CAAC;QAC/D,IAAI,aAAa,EAAE;YACjB,OAAO,CAAC,MAAM,CAAC,qBAAqB,EAAE,aAAa,CAAC,KAAK,CAAC,CAAC;AAC3D,YAAA,IAAI,aAAa,CAAC,KAAK,EAAE;gBACvB,MAAM,CAAC,IAAI,CACT,CAA6C,0CAAA,EAAA,aAAa,CAAC,KAAK,CAAC,OAAO,CAAE,CAAA,CAC3E,CAAC;aACH;SACF;KACF;AAED,IAAA,IAAI,GAAG,CAAC,WAAW,CAAC,YAAY,EAAE;QAChC,MAAM,SAAS,GAAG,MAAM,GAAG,CAAC,WAAW,CAAC,YAAY,EAAE,CAAC;QACvD,IAAI,SAAS,EAAE;YACb,OAAO,CAAC,MAAM,CAAC,eAAe,EAAE,CAAY,SAAA,EAAA,SAAS,CAAC,WAAW,CAAE,CAAA,CAAC,CAAC;SACtE;KACF;AAED,IAAA,OAAO,OAAO,CAAC;AACjB,CAAC;AAEM,eAAe,gBAAgB,CACpC,KAAa,EACb,IAAU,EACV,WAAwB,EACxB,MAAe,EACf,IAAY,EACZ,cAA+B,EAAA;AAE/B,IAAA,MAAM,GAAG,GAAG,IAAI,UAAU,CAAC,KAAK,EAAE,IAAI,EAAE,WAAW,EAAE,MAAM,EAAE,cAAc,CAAC,CAAC;IAC7E,OAAO;AACL,QAAA,GAAG,EAAE,GAAG,CAAC,QAAQ,EAAE;AACnB,QAAA,YAAY,EAAE;AACZ,YAAA,MAAM,EAAE,MAAM;AACd,YAAA,OAAO,EAAE,MAAM,UAAU,CAAC,GAAG,CAAC;YAC9B,IAAI;AACL,SAAA;KACF,CAAC;AACJ,CAAC;AAEM,eAAe,WAAW,CAC/B,KAAa,EACb,IAAU,EACV,WAAwB,EACxB,MAAe,EACf,IAAY,EACZ,cAA+B,EAAA;AAE/B,IAAA,MAAM,GAAG,GAAG,IAAI,UAAU,CAAC,KAAK,EAAE,IAAI,EAAE,WAAW,EAAE,MAAM,EAAE,cAAc,CAAC,CAAC;AAC7E,IAAA,IAAI,QAAQ,CAAC;AACb,IAAA,IAAI,cAA4D,CAAC;AACjE,IAAA,IAAI;AACF,QAAA,MAAM,OAAO,GAAG,MAAM,gBAAgB,CACpC,KAAK,EACL,IAAI,EACJ,WAAW,EACX,MAAM,EACN,IAAI,EACJ,cAAc,CACf,CAAC;;AAEF,QAAA,MAAM,aAAa,GACjB,cAAc,EAAE,OAAO,IAAI,IAAI,IAAI,cAAc,CAAC,OAAO,IAAI,CAAC;cAC1D,cAAc,CAAC,OAAO;cACtB,wBAAwB,CAAC;AAC/B,QAAA,MAAM,eAAe,GAAG,IAAI,eAAe,EAAE,CAAC;AAC9C,QAAA,cAAc,GAAG,UAAU,CAAC,MAAM,eAAe,CAAC,KAAK,EAAE,EAAE,aAAa,CAAC,CAAC;QAC1E,OAAO,CAAC,YAAY,CAAC,MAAM,GAAG,eAAe,CAAC,MAAM,CAAC;AAErD,QAAA,QAAQ,GAAG,MAAM,KAAK,CAAC,OAAO,CAAC,GAAG,EAAE,OAAO,CAAC,YAAY,CAAC,CAAC;AAC1D,QAAA,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE;YAChB,IAAI,OAAO,GAAG,EAAE,CAAC;AACjB,YAAA,IAAI,YAAY,CAAC;AACjB,YAAA,IAAI;AACF,gBAAA,MAAM,IAAI,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;AACnC,gBAAA,OAAO,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC;AAC7B,gBAAA,IAAI,IAAI,CAAC,KAAK,CAAC,OAAO,EAAE;AACtB,oBAAA,OAAO,IAAI,CAAA,CAAA,EAAI,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,EAAE,CAAC;AACpD,oBAAA,YAAY,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC;iBACnC;aACF;YAAC,OAAO,CAAC,EAAE;;aAEX;AACD,YAAA,IACE,QAAQ,CAAC,MAAM,KAAK,GAAG;gBACvB,YAAY;AACZ,gBAAA,YAAY,CAAC,IAAI,CACf,CAAC,MAAoB,KAAK,MAAM,CAAC,MAAM,KAAK,kBAAkB,CAC/D;gBACD,YAAY,CAAC,IAAI,CAAC,CAAC,MAAoB,KAEnC,MAAM,CAAC,KACR,GAAG,CAAC,CAAC,EAAE,WAAW,CAAC,QAAQ,CAC1B,0CAA0C,CAC3C,CACF,EACD;AACA,gBAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA+C,6CAAA,CAAA;oBAC7C,CAAgE,8DAAA,CAAA;oBAChE,CAAqE,mEAAA,CAAA;AACrE,oBAAA,CAAA,+CAAA,EAAkD,GAAG,CAAC,WAAW,CAAC,OAAO,CAAU,QAAA,CAAA;oBACnF,CAAgE,8DAAA,CAAA;oBAChE,CAAoE,kEAAA,CAAA;AACpE,oBAAA,CAAA,WAAA,CAAa,EACf;oBACE,MAAM,EAAE,QAAQ,CAAC,MAAM;oBACvB,UAAU,EAAE,QAAQ,CAAC,UAAU;oBAC/B,YAAY;AACb,iBAAA,CACF,CAAC;aACH;YACD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,uBAAuB,GAAG,CAAA,GAAA,EAAM,QAAQ,CAAC,MAAM,IAAI,QAAQ,CAAC,UAAU,CAAK,EAAA,EAAA,OAAO,EAAE,EACpF;gBACE,MAAM,EAAE,QAAQ,CAAC,MAAM;gBACvB,UAAU,EAAE,QAAQ,CAAC,UAAU;gBAC/B,YAAY;AACb,aAAA,CACF,CAAC;SACH;KACF;IAAC,OAAO,CAAC,EAAE;QACV,IAAI,GAAG,GAAG,CAAU,CAAC;AACrB,QAAA,IACG,CAAa,CAAC,IAAI,KAAK,WAAW,CAAC,WAAW;AAC9C,YAAA,CAAa,CAAC,IAAI,KAAK,WAAW,CAAC,eAAe;YACnD,CAAC,YAAY,KAAK,EAClB;AACA,YAAA,GAAG,GAAG,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,oBAAA,EAAuB,GAAG,CAAC,QAAQ,EAAE,CAAK,EAAA,EAAA,CAAC,CAAC,OAAO,CAAA,CAAE,CACtD,CAAC;AACF,YAAA,GAAG,CAAC,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC;SACrB;AAED,QAAA,MAAM,GAAG,CAAC;KACX;YAAS;QACR,IAAI,cAAc,EAAE;YAClB,YAAY,CAAC,cAAc,CAAC,CAAC;SAC9B;KACF;AACD,IAAA,OAAO,QAAQ,CAAC;AAClB;;AC7QA;;;;;;;;;;;;;;;AAeG;AAmBH;;;AAGG;AACH,SAAS,kBAAkB,CAAC,QAAiC,EAAA;AAC3D,IAAA,IAAI,QAAQ,CAAC,UAAU,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;QACzD,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;YAClC,MAAM,CAAC,IAAI,CACT,CAAA,kBAAA,EAAqB,QAAQ,CAAC,UAAU,CAAC,MAAM,CAAG,CAAA,CAAA;gBAChD,CAA4D,0DAAA,CAAA;AAC5D,gBAAA,CAAA,gEAAA,CAAkE,CACrE,CAAC;SACH;QACD,IAAI,kBAAkB,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9C,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,gBAAA,EAAmB,uBAAuB,CACxC,QAAQ,CACT,0CAA0C,EAC3C;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,IAAI,CAAC;KACb;SAAM;AACL,QAAA,OAAO,KAAK,CAAC;KACd;AACH,CAAC;AAED;;;AAGG;AACG,SAAU,6BAA6B,CAC3C,QAAiC,EACjC,eAAmC,GAAA,eAAe,CAAC,QAAQ,EAAA;AAE3D;;;;;AAKG;AACH,IAAA,IAAI,QAAQ,CAAC,UAAU,IAAI,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,CAAC,EAAE;QAC1E,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC;KAClC;AAED,IAAA,MAAM,mBAAmB,GAAG,UAAU,CAAC,QAAQ,CAAC,CAAC;AACjD,IAAA,mBAAmB,CAAC,eAAe,GAAG,eAAe,CAAC;AACtD,IAAA,OAAO,mBAAmB,CAAC;AAC7B,CAAC;AAED;;;AAGG;AACG,SAAU,UAAU,CACxB,QAAiC,EAAA;AAEhC,IAAA,QAA4C,CAAC,IAAI,GAAG,MAAK;AACxD,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,OAAO,OAAO,CAAC,QAAQ,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;SACjD;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,oBAAA,EAAuB,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EAC1D;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,EAAE,CAAC;AACZ,KAAC,CAAC;AACD,IAAA,QAA4C,CAAC,cAAc,GAAG,MAAK;AAClE,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,MAAM,MAAM,GAAG,OAAO,CAAC,QAAQ,EAAE,IAAI,IAAI,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;YACzD,OAAO,MAAM,KAAK,EAAE,GAAG,SAAS,GAAG,MAAM,CAAC;SAC3C;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,+BAAA,EAAkC,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EACrE;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,SAAS,CAAC;AACnB,KAAC,CAAC;AACD,IAAA,QAA4C,CAAC,eAAe,GAAG,MAEhD;AACd,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,OAAO,kBAAkB,CAAC,QAAQ,CAAC,CAAC;SACrC;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,oBAAA,EAAuB,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EAC1D;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,SAAS,CAAC;AACnB,KAAC,CAAC;AACD,IAAA,QAA4C,CAAC,aAAa,GAAG,MAAK;AACjE,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,OAAO,gBAAgB,CAAC,QAAQ,CAAC,CAAC;SACnC;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,6BAAA,EAAgC,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EACnE;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,SAAS,CAAC;AACnB,KAAC,CAAC;AACF,IAAA,OAAO,QAA2C,CAAC;AACrD,CAAC;AAED;;;;;;AAMG;AACa,SAAA,OAAO,CACrB,QAAiC,EACjC,UAAmC,EAAA;IAEnC,MAAM,WAAW,GAAG,EAAE,CAAC;AACvB,IAAA,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC3C,QAAA,KAAK,MAAM,IAAI,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;YAC1D,IAAI,IAAI,CAAC,IAAI,IAAI,UAAU,CAAC,IAAI,CAAC,EAAE;AACjC,gBAAA,WAAW,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;aAC7B;SACF;KACF;AACD,IAAA,IAAI,WAAW,CAAC,MAAM,GAAG,CAAC,EAAE;AAC1B,QAAA,OAAO,WAAW,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;KAC7B;SAAM;AACL,QAAA,OAAO,EAAE,CAAC;KACX;AACH,CAAC;AAED;;AAEG;AACG,SAAU,gBAAgB,CAC9B,QAAiC,EAAA;IAEjC,MAAM,aAAa,GAAmB,EAAE,CAAC;AACzC,IAAA,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC3C,QAAA,KAAK,MAAM,IAAI,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC1D,YAAA,IAAI,IAAI,CAAC,YAAY,EAAE;AACrB,gBAAA,aAAa,CAAC,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;aACvC;SACF;KACF;AACD,IAAA,IAAI,aAAa,CAAC,MAAM,GAAG,CAAC,EAAE;AAC5B,QAAA,OAAO,aAAa,CAAC;KACtB;SAAM;AACL,QAAA,OAAO,SAAS,CAAC;KAClB;AACH,CAAC;AAED;;;;AAIG;AACG,SAAU,kBAAkB,CAChC,QAAiC,EAAA;IAEjC,MAAM,IAAI,GAAqB,EAAE,CAAC;AAElC,IAAA,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC3C,QAAA,KAAK,MAAM,IAAI,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC1D,YAAA,IAAI,IAAI,CAAC,UAAU,EAAE;AACnB,gBAAA,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;aACjB;SACF;KACF;AAED,IAAA,IAAI,IAAI,CAAC,MAAM,GAAG,CAAC,EAAE;AACnB,QAAA,OAAO,IAAI,CAAC;KACb;SAAM;AACL,QAAA,OAAO,SAAS,CAAC;KAClB;AACH,CAAC;AAED,MAAM,gBAAgB,GAAG,CAAC,YAAY,CAAC,UAAU,EAAE,YAAY,CAAC,MAAM,CAAC,CAAC;AAExE,SAAS,kBAAkB,CAAC,SAAmC,EAAA;AAC7D,IAAA,QACE,CAAC,CAAC,SAAS,CAAC,YAAY;AACxB,QAAA,gBAAgB,CAAC,IAAI,CAAC,MAAM,IAAI,MAAM,KAAK,SAAS,CAAC,YAAY,CAAC,EAClE;AACJ,CAAC;AAEK,SAAU,uBAAuB,CACrC,QAAiC,EAAA;IAEjC,IAAI,OAAO,GAAG,EAAE,CAAC;AACjB,IAAA,IACE,CAAC,CAAC,QAAQ,CAAC,UAAU,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,KAAK,CAAC;QACzD,QAAQ,CAAC,cAAc,EACvB;QACA,OAAO,IAAI,sBAAsB,CAAC;AAClC,QAAA,IAAI,QAAQ,CAAC,cAAc,EAAE,WAAW,EAAE;YACxC,OAAO,IAAI,WAAW,QAAQ,CAAC,cAAc,CAAC,WAAW,EAAE,CAAC;SAC7D;AACD,QAAA,IAAI,QAAQ,CAAC,cAAc,EAAE,kBAAkB,EAAE;YAC/C,OAAO,IAAI,KAAK,QAAQ,CAAC,cAAc,CAAC,kBAAkB,EAAE,CAAC;SAC9D;KACF;SAAM,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,EAAE;QACnC,MAAM,cAAc,GAAG,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC;AAC9C,QAAA,IAAI,kBAAkB,CAAC,cAAc,CAAC,EAAE;AACtC,YAAA,OAAO,IAAI,CAAgC,6BAAA,EAAA,cAAc,CAAC,YAAY,EAAE,CAAC;AACzE,YAAA,IAAI,cAAc,CAAC,aAAa,EAAE;AAChC,gBAAA,OAAO,IAAI,CAAK,EAAA,EAAA,cAAc,CAAC,aAAa,EAAE,CAAC;aAChD;SACF;KACF;AACD,IAAA,OAAO,OAAO,CAAC;AACjB,CAAC;AAED;;;;;;AAMG;AACI,eAAe,qBAAqB,CAEzC,QAAkB,EAAA;AAClB,IAAA,MAAM,YAAY,GAA2B,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;IAEnE,MAAM,MAAM,GAAQ,EAAE,CAAC;IACvB,IAAI,cAAc,GAAuB,SAAS,CAAC;;AAGnD,IAAA,IAAI,CAAC,YAAY,CAAC,WAAW,IAAI,YAAY,CAAC,WAAW,EAAE,MAAM,KAAK,CAAC,EAAE;QACvE,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,wKAAwK,CACzK,CAAC;KACH;AAED,IAAA,KAAK,MAAM,UAAU,IAAI,YAAY,CAAC,WAAW,EAAE;AACjD,QAAA,IAAI,UAAU,CAAC,iBAAiB,EAAE;AAChC,YAAA,cAAc,GAAG,UAAU,CAAC,iBAAiB,CAAC;SAC/C;aAAM,IAAI,UAAU,CAAC,QAAQ,IAAI,UAAU,CAAC,kBAAkB,EAAE;YAC/D,MAAM,CAAC,IAAI,CAAC;gBACV,QAAQ,EAAE,UAAU,CAAC,QAAQ;gBAC7B,kBAAkB,EAAE,UAAU,CAAC,kBAAkB;AAC7C,aAAA,CAAC,CAAC;SACT;aAAM,IAAI,UAAU,CAAC,QAAQ,IAAI,UAAU,CAAC,MAAM,EAAE;YACnD,MAAM,CAAC,IAAI,CAAC;gBACV,QAAQ,EAAE,UAAU,CAAC,QAAQ;gBAC7B,MAAM,EAAE,UAAU,CAAC,MAAM;AACrB,aAAA,CAAC,CAAC;SACT;AAAM,aAAA,IAAI,UAAU,CAAC,gBAAgB,EAAE,CAEvC;aAAM;AACL,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,wDAAA,EAA2D,IAAI,CAAC,SAAS,CACvE,UAAU,CACX,CAAA,CAAA,CAAG,CACL,CAAC;SACH;KACF;AAED,IAAA,OAAO,EAAE,MAAM,EAAE,cAAc,EAAE,CAAC;AACpC;;ACzTA;;;;;;;;;;;;;;;AAeG;AAsBH;;;;;;;;;;AAUG;AAEH;;;;;;;;;AASG;AACG,SAAU,yBAAyB,CACvC,sBAA8C,EAAA;AAE9C,IAAA,sBAAsB,CAAC,cAAc,EAAE,OAAO,CAAC,aAAa,IAAG;AAC7D,QAAA,IAAI,aAAa,CAAC,MAAM,EAAE;YACxB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,qGAAqG,CACtG,CAAC;SACH;AACH,KAAC,CAAC,CAAC;AAEH,IAAA,IAAI,sBAAsB,CAAC,gBAAgB,EAAE,IAAI,EAAE;AACjD,QAAA,MAAM,WAAW,GAAG,IAAI,CAAC,KAAK,CAC5B,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,CAC7C,CAAC;QAEF,IAAI,WAAW,KAAK,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,EAAE;AAChE,YAAA,MAAM,CAAC,IAAI,CACT,gIAAgI,CACjI,CAAC;AACF,YAAA,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,GAAG,WAAW,CAAC;SAC5D;KACF;AAED,IAAA,OAAO,sBAAsB,CAAC;AAChC,CAAC;AAED;;;;;;;;AAQG;AACG,SAAU,0BAA0B,CACxC,gBAAiD,EAAA;AAEjD,IAAA,MAAM,uBAAuB,GAAG;QAC9B,UAAU,EAAE,gBAAgB,CAAC,UAAU;AACrC,cAAE,4BAA4B,CAAC,gBAAgB,CAAC,UAAU,CAAC;AAC3D,cAAE,SAAS;QACb,MAAM,EAAE,gBAAgB,CAAC,cAAc;AACrC,cAAE,iBAAiB,CAAC,gBAAgB,CAAC,cAAc,CAAC;AACpD,cAAE,SAAS;QACb,aAAa,EAAE,gBAAgB,CAAC,aAAa;KAC9C,CAAC;AAEF,IAAA,OAAO,uBAAuB,CAAC;AACjC,CAAC;AAED;;;;;;;;AAQG;AACa,SAAA,qBAAqB,CACnC,kBAAsC,EACtC,KAAa,EAAA;AAEb,IAAA,MAAM,wBAAwB,GAA+B;AAC3D,QAAA,sBAAsB,EAAE;YACtB,KAAK;AACL,YAAA,GAAG,kBAAkB;AACtB,SAAA;KACF,CAAC;AAEF,IAAA,OAAO,wBAAwB,CAAC;AAClC,CAAC;AAED;;;;;;;;;;AAUG;AACG,SAAU,4BAA4B,CAC1C,UAA8C,EAAA;IAE9C,MAAM,gBAAgB,GAA+B,EAAE,CAAC;AACxD,IAAA,IAAI,mBAAmC,CAAC;IACxC,IAAI,gBAAgB,EAAE;AACpB,QAAA,UAAU,CAAC,OAAO,CAAC,SAAS,IAAG;;AAE7B,YAAA,IAAI,gBAA8C,CAAC;AACnD,YAAA,IAAI,SAAS,CAAC,gBAAgB,EAAE;AAC9B,gBAAA,gBAAgB,GAAG;AACjB,oBAAA,SAAS,EAAE,SAAS,CAAC,gBAAgB,CAAC,eAAe;iBACtD,CAAC;aACH;;AAGD,YAAA,IAAI,SAAS,CAAC,aAAa,EAAE;gBAC3B,mBAAmB,GAAG,SAAS,CAAC,aAAa,CAAC,GAAG,CAAC,YAAY,IAAG;oBAC/D,OAAO;AACL,wBAAA,GAAG,YAAY;AACf,wBAAA,QAAQ,EACN,YAAY,CAAC,QAAQ,IAAI,YAAY,CAAC,yBAAyB;AACjE,wBAAA,gBAAgB,EAAE,YAAY,CAAC,gBAAgB,IAAI,CAAC;AACpD,wBAAA,aAAa,EAAE,YAAY,CAAC,aAAa,IAAI,CAAC;qBAC/C,CAAC;AACJ,iBAAC,CAAC,CAAC;aACJ;;;;AAKD,YAAA,IACE,SAAS,CAAC,OAAO,EAAE,KAAK,EAAE,IAAI,CAC5B,IAAI,IAAK,IAAuB,EAAE,aAAa,CAChD,EACD;gBACA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,+FAA+F,CAChG,CAAC;aACH;AAED,YAAA,MAAM,eAAe,GAAG;gBACtB,KAAK,EAAE,SAAS,CAAC,KAAK;gBACtB,OAAO,EAAE,SAAS,CAAC,OAAO;gBAC1B,YAAY,EAAE,SAAS,CAAC,YAAY;gBACpC,aAAa,EAAE,SAAS,CAAC,aAAa;AACtC,gBAAA,aAAa,EAAE,mBAAmB;gBAClC,gBAAgB;gBAChB,iBAAiB,EAAE,SAAS,CAAC,iBAAiB;gBAC9C,kBAAkB,EAAE,SAAS,CAAC,kBAAkB;aACjD,CAAC;AACF,YAAA,gBAAgB,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;AACzC,SAAC,CAAC,CAAC;KACJ;AAED,IAAA,OAAO,gBAAgB,CAAC;AAC1B,CAAC;AAEK,SAAU,iBAAiB,CAC/B,cAA8B,EAAA;;IAG9B,MAAM,mBAAmB,GAAmB,EAAE,CAAC;AAC/C,IAAA,cAAc,CAAC,aAAa,CAAC,OAAO,CAAC,YAAY,IAAG;QAClD,mBAAmB,CAAC,IAAI,CAAC;YACvB,QAAQ,EAAE,YAAY,CAAC,QAAQ;YAC/B,WAAW,EAAE,YAAY,CAAC,WAAW;AACrC,YAAA,QAAQ,EAAE,YAAY,CAAC,QAAQ,IAAI,YAAY,CAAC,yBAAyB;AACzE,YAAA,gBAAgB,EAAE,YAAY,CAAC,gBAAgB,IAAI,CAAC;AACpD,YAAA,aAAa,EAAE,YAAY,CAAC,aAAa,IAAI,CAAC;YAC9C,OAAO,EAAE,YAAY,CAAC,OAAO;AAC9B,SAAA,CAAC,CAAC;AACL,KAAC,CAAC,CAAC;AAEH,IAAA,MAAM,oBAAoB,GAAmB;QAC3C,WAAW,EAAE,cAAc,CAAC,WAAW;AACvC,QAAA,aAAa,EAAE,mBAAmB;QAClC,kBAAkB,EAAE,cAAc,CAAC,kBAAkB;KACtD,CAAC;AACF,IAAA,OAAO,oBAAoB,CAAC;AAC9B;;ACnOA;;;;;;;;;;;;;;;AAeG;AAqBH,MAAM,cAAc,GAAG,oCAAoC,CAAC;AAE5D;;;;;;;AAOG;SACa,aAAa,CAC3B,QAAkB,EAClB,WAAwB,EACxB,eAAiC,EAAA;IAEjC,MAAM,WAAW,GAAG,QAAQ,CAAC,IAAK,CAAC,WAAW,CAC5C,IAAI,iBAAiB,CAAC,MAAM,EAAE,EAAE,KAAK,EAAE,IAAI,EAAE,CAAC,CAC/C,CAAC;AACF,IAAA,MAAM,cAAc,GAClB,iBAAiB,CAA0B,WAAW,CAAC,CAAC;IAC1D,MAAM,CAAC,OAAO,EAAE,OAAO,CAAC,GAAG,cAAc,CAAC,GAAG,EAAE,CAAC;IAChD,OAAO;QACL,MAAM,EAAE,wBAAwB,CAAC,OAAO,EAAE,WAAW,EAAE,eAAe,CAAC;QACvE,QAAQ,EAAE,kBAAkB,CAAC,OAAO,EAAE,WAAW,EAAE,eAAe,CAAC;KACpE,CAAC;AACJ,CAAC;AAED,eAAe,kBAAkB,CAC/B,MAA+C,EAC/C,WAAwB,EACxB,eAAiC,EAAA;IAEjC,MAAM,YAAY,GAA8B,EAAE,CAAC;AACnD,IAAA,MAAM,MAAM,GAAG,MAAM,CAAC,SAAS,EAAE,CAAC;IAClC,OAAO,IAAI,EAAE;QACX,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QAC5C,IAAI,IAAI,EAAE;AACR,YAAA,IAAI,uBAAuB,GAAG,kBAAkB,CAAC,YAAY,CAAC,CAAC;YAC/D,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,gBAAA,uBAAuB,GAAGC,0BAAyC,CACjE,uBAA0D,CAC3D,CAAC;aACH;AACD,YAAA,OAAO,6BAA6B,CAClC,uBAAuB,EACvB,eAAe,CAChB,CAAC;SACH;AAED,QAAA,YAAY,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;KAC1B;AACH,CAAC;AAED,gBAAgB,wBAAwB,CACtC,MAA+C,EAC/C,WAAwB,EACxB,eAAiC,EAAA;AAEjC,IAAA,MAAM,MAAM,GAAG,MAAM,CAAC,SAAS,EAAE,CAAC;IAClC,OAAO,IAAI,EAAE;QACX,MAAM,EAAE,KAAK,EAAE,IAAI,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QAC5C,IAAI,IAAI,EAAE;YACR,MAAM;SACP;AAED,QAAA,IAAI,gBAAiD,CAAC;QACtD,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,YAAA,gBAAgB,GAAG,6BAA6B,CAC9CA,0BAAyC,CACvC,KAAwC,CACzC,EACD,eAAe,CAChB,CAAC;SACH;aAAM;AACL,YAAA,gBAAgB,GAAG,6BAA6B,CAAC,KAAK,EAAE,eAAe,CAAC,CAAC;SAC1E;QAED,MAAM,cAAc,GAAG,gBAAgB,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC;;AAExD,QAAA,IACE,CAAC,cAAc,EAAE,OAAO,EAAE,KAAK;YAC/B,CAAC,cAAc,EAAE,YAAY;YAC7B,CAAC,cAAc,EAAE,gBAAgB;AACjC,YAAA,CAAC,cAAc,EAAE,kBAAkB,EACnC;YACA,SAAS;SACV;AAED,QAAA,MAAM,gBAAgB,CAAC;KACxB;AACH,CAAC;AAED;;;;AAIG;AACG,SAAU,iBAAiB,CAC/B,WAAmC,EAAA;AAEnC,IAAA,MAAM,MAAM,GAAG,WAAW,CAAC,SAAS,EAAE,CAAC;AACvC,IAAA,MAAM,MAAM,GAAG,IAAI,cAAc,CAAI;AACnC,QAAA,KAAK,CAAC,UAAU,EAAA;YACd,IAAI,WAAW,GAAG,EAAE,CAAC;YACrB,OAAO,IAAI,EAAE,CAAC;AACd,YAAA,SAAS,IAAI,GAAA;AACX,gBAAA,OAAO,MAAM,CAAC,IAAI,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,EAAE,IAAI,EAAE,KAAI;oBAC5C,IAAI,IAAI,EAAE;AACR,wBAAA,IAAI,WAAW,CAAC,IAAI,EAAE,EAAE;AACtB,4BAAA,UAAU,CAAC,KAAK,CACd,IAAI,OAAO,CAAC,WAAW,CAAC,YAAY,EAAE,wBAAwB,CAAC,CAChE,CAAC;4BACF,OAAO;yBACR;wBACD,UAAU,CAAC,KAAK,EAAE,CAAC;wBACnB,OAAO;qBACR;oBAED,WAAW,IAAI,KAAK,CAAC;oBACrB,IAAI,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC;AAC9C,oBAAA,IAAI,cAAiB,CAAC;oBACtB,OAAO,KAAK,EAAE;AACZ,wBAAA,IAAI;4BACF,cAAc,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;yBACvC;wBAAC,OAAO,CAAC,EAAE;AACV,4BAAA,UAAU,CAAC,KAAK,CACd,IAAI,OAAO,CACT,WAAW,CAAC,YAAY,EACxB,CAAA,8BAAA,EAAiC,KAAK,CAAC,CAAC,CAAC,CAAE,CAAA,CAC5C,CACF,CAAC;4BACF,OAAO;yBACR;AACD,wBAAA,UAAU,CAAC,OAAO,CAAC,cAAc,CAAC,CAAC;AACnC,wBAAA,WAAW,GAAG,WAAW,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;AACrD,wBAAA,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC;qBAC3C;oBACD,OAAO,IAAI,EAAE,CAAC;AAChB,iBAAC,CAAC,CAAC;aACJ;SACF;AACF,KAAA,CAAC,CAAC;AACH,IAAA,OAAO,MAAM,CAAC;AAChB,CAAC;AAED;;;AAGG;AACG,SAAU,kBAAkB,CAChC,SAAoC,EAAA;IAEpC,MAAM,YAAY,GAAG,SAAS,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;AACrD,IAAA,MAAM,kBAAkB,GAA4B;QAClD,cAAc,EAAE,YAAY,EAAE,cAAc;KAC7C,CAAC;AACF,IAAA,KAAK,MAAM,QAAQ,IAAI,SAAS,EAAE;AAChC,QAAA,IAAI,QAAQ,CAAC,UAAU,EAAE;AACvB,YAAA,KAAK,MAAM,SAAS,IAAI,QAAQ,CAAC,UAAU,EAAE;;;AAG3C,gBAAA,MAAM,CAAC,GAAG,SAAS,CAAC,KAAK,IAAI,CAAC,CAAC;AAC/B,gBAAA,IAAI,CAAC,kBAAkB,CAAC,UAAU,EAAE;AAClC,oBAAA,kBAAkB,CAAC,UAAU,GAAG,EAAE,CAAC;iBACpC;gBACD,IAAI,CAAC,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE;AACrC,oBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,GAAG;wBACjC,KAAK,EAAE,SAAS,CAAC,KAAK;qBACK,CAAC;iBAC/B;;AAED,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,gBAAgB;oBAC/C,SAAS,CAAC,gBAAgB,CAAC;gBAC7B,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,YAAY,GAAG,SAAS,CAAC,YAAY,CAAC;AACvE,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,aAAa;oBAC5C,SAAS,CAAC,aAAa,CAAC;AAC1B,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,aAAa;oBAC5C,SAAS,CAAC,aAAa,CAAC;AAC1B,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,iBAAiB;oBAChD,SAAS,CAAC,iBAAiB,CAAC;;;;;AAM9B,gBAAA,MAAM,kBAAkB,GAAG,SAAS,CAAC,kBAA6B,CAAC;gBACnE,IACE,OAAO,kBAAkB,KAAK,QAAQ;AACtC,oBAAA,kBAAkB,KAAK,IAAI;oBAC3B,MAAM,CAAC,IAAI,CAAC,kBAAkB,CAAC,CAAC,MAAM,GAAG,CAAC,EAC1C;AACA,oBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,kBAAkB;AACjD,wBAAA,kBAAwC,CAAC;iBAC5C;AAED;;;AAGG;AACH,gBAAA,IAAI,SAAS,CAAC,OAAO,EAAE;;AAErB,oBAAA,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,KAAK,EAAE;wBAC5B,SAAS;qBACV;oBACD,IAAI,CAAC,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE;AAC7C,wBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,GAAG;AACzC,4BAAA,IAAI,EAAE,SAAS,CAAC,OAAO,CAAC,IAAI,IAAI,MAAM;AACtC,4BAAA,KAAK,EAAE,EAAE;yBACV,CAAC;qBACH;oBACD,KAAK,MAAM,IAAI,IAAI,SAAS,CAAC,OAAO,CAAC,KAAK,EAAE;AAC1C,wBAAA,MAAM,OAAO,GAAS,EAAE,GAAG,IAAI,EAAE,CAAC;;;;AAIlC,wBAAA,IAAI,IAAI,CAAC,IAAI,KAAK,EAAE,EAAE;4BACpB,SAAS;yBACV;wBACD,IAAI,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AACnC,4BAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CACjD,OAAe,CAChB,CAAC;yBACH;qBACF;iBACF;aACF;SACF;KACF;AACD,IAAA,OAAO,kBAAkB,CAAC;AAC5B;;ACzQA;;;;;;;;;;;;;;;AAeG;AAYH,MAAM,qBAAqB,GAAkB;;AAE3C,IAAA,WAAW,CAAC,WAAW;;AAEvB,IAAA,WAAW,CAAC,KAAK;;AAEjB,IAAA,WAAW,CAAC,eAAe;CAC5B,CAAC;AAOF;;;;;;;;;AASG;AACI,eAAe,iBAAiB,CACrC,OAA+B,EAC/B,aAAwC,EACxC,YAAqC,EACrC,WAAoC,EAAA;IAEpC,IAAI,CAAC,aAAa,EAAE;QAClB,OAAO;YACL,QAAQ,EAAE,MAAM,WAAW,EAAE;YAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;SAC1C,CAAC;KACH;AACD,IAAA,QAAS,aAAmC,CAAC,IAAI;QAC/C,KAAK,aAAa,CAAC,cAAc;YAC/B,IAAI,MAAM,aAAa,CAAC,WAAW,CAAC,OAAO,CAAC,EAAE;gBAC5C,OAAO;oBACL,QAAQ,EAAE,MAAM,YAAY,EAAE;oBAC9B,eAAe,EAAE,eAAe,CAAC,SAAS;iBAC3C,CAAC;aACH;YACD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,4EAA4E,CAC7E,CAAC;QACJ,KAAK,aAAa,CAAC,aAAa;YAC9B,OAAO;gBACL,QAAQ,EAAE,MAAM,WAAW,EAAE;gBAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;aAC1C,CAAC;QACJ,KAAK,aAAa,CAAC,eAAe;AAChC,YAAA,IAAI;gBACF,OAAO;oBACL,QAAQ,EAAE,MAAM,WAAW,EAAE;oBAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;iBAC1C,CAAC;aACH;YAAC,OAAO,CAAC,EAAE;AACV,gBAAA,IAAI,CAAC,YAAY,OAAO,IAAI,qBAAqB,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE;oBAClE,OAAO;wBACL,QAAQ,EAAE,MAAM,YAAY,EAAE;wBAC9B,eAAe,EAAE,eAAe,CAAC,SAAS;qBAC3C,CAAC;iBACH;AACD,gBAAA,MAAM,CAAC,CAAC;aACT;QACH,KAAK,aAAa,CAAC,gBAAgB;YACjC,IAAI,MAAM,aAAa,CAAC,WAAW,CAAC,OAAO,CAAC,EAAE;gBAC5C,OAAO;oBACL,QAAQ,EAAE,MAAM,YAAY,EAAE;oBAC9B,eAAe,EAAE,eAAe,CAAC,SAAS;iBAC3C,CAAC;aACH;YACD,OAAO;gBACL,QAAQ,EAAE,MAAM,WAAW,EAAE;gBAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;aAC1C,CAAC;AACJ,QAAA;AACE,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,6BAAA,EACG,aAAmC,CAAC,IACvC,CAAA,CAAE,CACH,CAAC;KACL;AACH;;AClHA;;;;;;;;;;;;;;;AAeG;AAkBH,eAAe,4BAA4B,CACzC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,cAA+B,EAAA;IAE/B,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,QAAA,MAAM,GAAGC,yBAAwC,CAAC,MAAM,CAAC,CAAC;KAC3D;IACD,OAAO,WAAW,CAChB,KAAK,EACL,IAAI,CAAC,uBAAuB,EAC5B,WAAW;AACX,iBAAa,IAAI,EACjB,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,EACtB,cAAc,CACf,CAAC;AACJ,CAAC;AAEM,eAAe,qBAAqB,CACzC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,aAA6B,EAC7B,cAA+B,EAAA;AAE/B,IAAA,MAAM,UAAU,GAAG,MAAM,iBAAiB,CACxC,MAAM,EACN,aAAa,EACb,MAAM,aAAc,CAAC,qBAAqB,CAAC,MAAM,CAAC,EAClD,MACE,4BAA4B,CAAC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,cAAc,CAAC,CAC3E,CAAC;IACF,OAAO,aAAa,CAAC,UAAU,CAAC,QAAQ,EAAE,WAAW,CAAC,CAAC;AACzD,CAAC;AAED,eAAe,sBAAsB,CACnC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,cAA+B,EAAA;IAE/B,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,QAAA,MAAM,GAAGA,yBAAwC,CAAC,MAAM,CAAC,CAAC;KAC3D;IACD,OAAO,WAAW,CAChB,KAAK,EACL,IAAI,CAAC,gBAAgB,EACrB,WAAW;AACX,iBAAa,KAAK,EAClB,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,EACtB,cAAc,CACf,CAAC;AACJ,CAAC;AAEM,eAAe,eAAe,CACnC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,aAA6B,EAC7B,cAA+B,EAAA;AAE/B,IAAA,MAAM,UAAU,GAAG,MAAM,iBAAiB,CACxC,MAAM,EACN,aAAa,EACb,MAAM,aAAc,CAAC,eAAe,CAAC,MAAM,CAAC,EAC5C,MAAM,sBAAsB,CAAC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,cAAc,CAAC,CACzE,CAAC;IACF,MAAM,uBAAuB,GAAG,MAAM,8BAA8B,CAClE,UAAU,CAAC,QAAQ,EACnB,WAAW,CACZ,CAAC;IACF,MAAM,gBAAgB,GAAG,6BAA6B,CACpD,uBAAuB,EACvB,UAAU,CAAC,eAAe,CAC3B,CAAC;IACF,OAAO;AACL,QAAA,QAAQ,EAAE,gBAAgB;KAC3B,CAAC;AACJ,CAAC;AAED,eAAe,8BAA8B,CAC3C,QAAkB,EAClB,WAAwB,EAAA;AAExB,IAAA,MAAM,YAAY,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;IAC3C,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,QAAA,OAAOD,0BAAyC,CAAC,YAAY,CAAC,CAAC;KAChE;SAAM;AACL,QAAA,OAAO,YAAY,CAAC;KACrB;AACH;;AC5HA;;;;;;;;;;;;;;;AAeG;AAMG,SAAU,uBAAuB,CACrC,KAA+B,EAAA;;AAG/B,IAAA,IAAI,KAAK,IAAI,IAAI,EAAE;AACjB,QAAA,OAAO,SAAS,CAAC;KAClB;AAAM,SAAA,IAAI,OAAO,KAAK,KAAK,QAAQ,EAAE;AACpC,QAAA,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,CAAC,EAAa,CAAC;KAChE;AAAM,SAAA,IAAK,KAAc,CAAC,IAAI,EAAE;QAC/B,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC,KAAa,CAAC,EAAE,CAAC;KACnD;AAAM,SAAA,IAAK,KAAiB,CAAC,KAAK,EAAE;AACnC,QAAA,IAAI,CAAE,KAAiB,CAAC,IAAI,EAAE;YAC5B,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAG,KAAiB,CAAC,KAAK,EAAE,CAAC;SAC5D;aAAM;AACL,YAAA,OAAO,KAAgB,CAAC;SACzB;KACF;AACH,CAAC;AAEK,SAAU,gBAAgB,CAC9B,OAAsC,EAAA;IAEtC,IAAI,QAAQ,GAAW,EAAE,CAAC;AAC1B,IAAA,IAAI,OAAO,OAAO,KAAK,QAAQ,EAAE;QAC/B,QAAQ,GAAG,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC;KAChC;SAAM;AACL,QAAA,KAAK,MAAM,YAAY,IAAI,OAAO,EAAE;AAClC,YAAA,IAAI,OAAO,YAAY,KAAK,QAAQ,EAAE;gBACpC,QAAQ,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;aACvC;iBAAM;AACL,gBAAA,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;aAC7B;SACF;KACF;AACD,IAAA,OAAO,8CAA8C,CAAC,QAAQ,CAAC,CAAC;AAClE,CAAC;AAED;;;;;;;AAOG;AACH,SAAS,8CAA8C,CACrD,KAAa,EAAA;IAEb,MAAM,WAAW,GAAY,EAAE,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,EAAE,EAAE,CAAC;IACzD,MAAM,eAAe,GAAY,EAAE,IAAI,EAAE,UAAU,EAAE,KAAK,EAAE,EAAE,EAAE,CAAC;IACjE,IAAI,cAAc,GAAG,KAAK,CAAC;IAC3B,IAAI,kBAAkB,GAAG,KAAK,CAAC;AAC/B,IAAA,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE;AACxB,QAAA,IAAI,kBAAkB,IAAI,IAAI,EAAE;AAC9B,YAAA,eAAe,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YACjC,kBAAkB,GAAG,IAAI,CAAC;SAC3B;aAAM;AACL,YAAA,WAAW,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YAC7B,cAAc,GAAG,IAAI,CAAC;SACvB;KACF;AAED,IAAA,IAAI,cAAc,IAAI,kBAAkB,EAAE;QACxC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,4HAA4H,CAC7H,CAAC;KACH;AAED,IAAA,IAAI,CAAC,cAAc,IAAI,CAAC,kBAAkB,EAAE;QAC1C,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,kDAAkD,CACnD,CAAC;KACH;IAED,IAAI,cAAc,EAAE;AAClB,QAAA,OAAO,WAAW,CAAC;KACpB;AAED,IAAA,OAAO,eAAe,CAAC;AACzB,CAAC;AAEK,SAAU,0BAA0B,CACxC,MAA8D,EAAA;AAE9D,IAAA,IAAI,gBAAwC,CAAC;AAC7C,IAAA,IAAK,MAAiC,CAAC,QAAQ,EAAE;QAC/C,gBAAgB,GAAG,MAAgC,CAAC;KACrD;SAAM;;AAEL,QAAA,MAAM,OAAO,GAAG,gBAAgB,CAAC,MAAuC,CAAC,CAAC;QAC1E,gBAAgB,GAAG,EAAE,QAAQ,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC;KAC5C;AACD,IAAA,IAAK,MAAiC,CAAC,iBAAiB,EAAE;QACxD,gBAAgB,CAAC,iBAAiB,GAAG,uBAAuB,CACzD,MAAiC,CAAC,iBAAiB,CACrD,CAAC;KACH;AACD,IAAA,OAAO,gBAAgB,CAAC;AAC1B,CAAC;AAED;;;;;AAKG;AACG,SAAU,wBAAwB,CACtC,MAAc,EACd,EACE,MAAM,EACN,WAAW,EACX,YAAY,EACZ,cAAc,GAAG,CAAC,EAClB,cAAc,EACd,WAAW,EACX,iBAAiB,EACjB,iBAAiB,EACM,EAAA;;AAGzB,IAAA,MAAM,IAAI,GAAuB;AAC/B,QAAA,SAAS,EAAE;AACT,YAAA;gBACE,MAAM;AACP,aAAA;AACF,SAAA;AACD,QAAA,UAAU,EAAE;AACV,YAAA,UAAU,EAAE,MAAM;YAClB,cAAc;AACd,YAAA,WAAW,EAAE,cAAc;YAC3B,WAAW;AACX,YAAA,aAAa,EAAE,WAAW;YAC1B,YAAY;YACZ,iBAAiB;AACjB,YAAA,gBAAgB,EAAE,iBAAiB;AACnC,YAAA,gBAAgB,EAAE,IAAI;AACtB,YAAA,uBAAuB,EAAE,IAAI;AAC9B,SAAA;KACF,CAAC;AACF,IAAA,OAAO,IAAI,CAAC;AACd;;ACnKA;;;;;;;;;;;;;;;AAeG;AAKH;AAEA,MAAM,iBAAiB,GAAsB;IAC3C,MAAM;IACN,YAAY;IACZ,cAAc;IACd,kBAAkB;IAClB,SAAS;IACT,kBAAkB;CACnB,CAAC;AAEF,MAAM,oBAAoB,GAAyC;AACjE,IAAA,IAAI,EAAE,CAAC,MAAM,EAAE,YAAY,CAAC;IAC5B,QAAQ,EAAE,CAAC,kBAAkB,CAAC;IAC9B,KAAK,EAAE,CAAC,MAAM,EAAE,cAAc,EAAE,SAAS,EAAE,kBAAkB,CAAC;;IAE9D,MAAM,EAAE,CAAC,MAAM,CAAC;CACjB,CAAC;AAEF,MAAM,4BAA4B,GAA8B;IAC9D,IAAI,EAAE,CAAC,OAAO,CAAC;IACf,QAAQ,EAAE,CAAC,OAAO,CAAC;AACnB,IAAA,KAAK,EAAE,CAAC,MAAM,EAAE,UAAU,CAAC;;AAE3B,IAAA,MAAM,EAAE,EAAE;CACX,CAAC;AAEI,SAAU,mBAAmB,CAAC,OAAkB,EAAA;IACpD,IAAI,WAAW,GAAmB,IAAI,CAAC;AACvC,IAAA,KAAK,MAAM,WAAW,IAAI,OAAO,EAAE;AACjC,QAAA,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,WAAW,CAAC;AACpC,QAAA,IAAI,CAAC,WAAW,IAAI,IAAI,KAAK,MAAM,EAAE;YACnC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAAiD,8CAAA,EAAA,IAAI,CAAE,CAAA,CACxD,CAAC;SACH;QACD,IAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,IAAI,CAAC,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA4C,yCAAA,EAAA,IAAI,CAAyB,sBAAA,EAAA,IAAI,CAAC,SAAS,CACrF,cAAc,CACf,CAAA,CAAE,CACJ,CAAC;SACH;QAED,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE;YACzB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA6D,2DAAA,CAAA,CAC9D,CAAC;SACH;AAED,QAAA,IAAI,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE;YACtB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA4C,0CAAA,CAAA,CAC7C,CAAC;SACH;AAED,QAAA,MAAM,WAAW,GAA+B;AAC9C,YAAA,IAAI,EAAE,CAAC;AACP,YAAA,UAAU,EAAE,CAAC;AACb,YAAA,YAAY,EAAE,CAAC;AACf,YAAA,gBAAgB,EAAE,CAAC;AACnB,YAAA,OAAO,EAAE,CAAC;AACV,YAAA,gBAAgB,EAAE,CAAC;AACnB,YAAA,cAAc,EAAE,CAAC;AACjB,YAAA,mBAAmB,EAAE,CAAC;SACvB,CAAC;AAEF,QAAA,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE;AACxB,YAAA,KAAK,MAAM,GAAG,IAAI,iBAAiB,EAAE;AACnC,gBAAA,IAAI,GAAG,IAAI,IAAI,EAAE;AACf,oBAAA,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;iBACvB;aACF;SACF;AACD,QAAA,MAAM,UAAU,GAAG,oBAAoB,CAAC,IAAI,CAAC,CAAC;AAC9C,QAAA,KAAK,MAAM,GAAG,IAAI,iBAAiB,EAAE;AACnC,YAAA,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE;AACrD,gBAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAAA,mBAAA,EAAsB,IAAI,CAAA,iBAAA,EAAoB,GAAG,CAAA,MAAA,CAAQ,CAC1D,CAAC;aACH;SACF;QAED,IAAI,WAAW,EAAE;AACf,YAAA,MAAM,yBAAyB,GAAG,4BAA4B,CAAC,IAAI,CAAC,CAAC;YACrE,IAAI,CAAC,yBAAyB,CAAC,QAAQ,CAAC,WAAW,CAAC,IAAI,CAAC,EAAE;gBACzD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAAsB,mBAAA,EAAA,IAAI,CACxB,gBAAA,EAAA,WAAW,CAAC,IACd,CAAA,yBAAA,EAA4B,IAAI,CAAC,SAAS,CACxC,4BAA4B,CAC7B,CAAE,CAAA,CACJ,CAAC;aACH;SACF;QACD,WAAW,GAAG,WAAW,CAAC;KAC3B;AACH;;AC3HA;;;;;;;;;;;;;;;AAeG;AAmBH;;AAEG;AACH,MAAM,YAAY,GAAG,cAAc,CAAC;AAEpC;;;;;AAKG;MACU,WAAW,CAAA;IAKtB,WACE,CAAA,WAAwB,EACjB,KAAa,EACZ,aAA6B,EAC9B,MAAwB,EACxB,cAA+B,EAAA;QAH/B,IAAK,CAAA,KAAA,GAAL,KAAK,CAAQ;QACZ,IAAa,CAAA,aAAA,GAAb,aAAa,CAAgB;QAC9B,IAAM,CAAA,MAAA,GAAN,MAAM,CAAkB;QACxB,IAAc,CAAA,cAAA,GAAd,cAAc,CAAiB;QARhC,IAAQ,CAAA,QAAA,GAAc,EAAE,CAAC;AACzB,QAAA,IAAA,CAAA,YAAY,GAAkB,OAAO,CAAC,OAAO,EAAE,CAAC;AAStD,QAAA,IAAI,CAAC,YAAY,GAAG,WAAW,CAAC;AAChC,QAAA,IAAI,MAAM,EAAE,OAAO,EAAE;AACnB,YAAA,mBAAmB,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;AACpC,YAAA,IAAI,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,CAAC;SAChC;KACF;AAED;;;;AAIG;AACH,IAAA,MAAM,UAAU,GAAA;QACd,MAAM,IAAI,CAAC,YAAY,CAAC;QACxB,OAAO,IAAI,CAAC,QAAQ,CAAC;KACtB;AAED;;;AAGG;IACH,MAAM,WAAW,CACf,OAAsC,EAAA;QAEtC,MAAM,IAAI,CAAC,YAAY,CAAC;AACxB,QAAA,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;AAC7C,QAAA,MAAM,sBAAsB,GAA2B;AACrD,YAAA,cAAc,EAAE,IAAI,CAAC,MAAM,EAAE,cAAc;AAC3C,YAAA,gBAAgB,EAAE,IAAI,CAAC,MAAM,EAAE,gBAAgB;AAC/C,YAAA,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,KAAK;AACzB,YAAA,UAAU,EAAE,IAAI,CAAC,MAAM,EAAE,UAAU;AACnC,YAAA,iBAAiB,EAAE,IAAI,CAAC,MAAM,EAAE,iBAAiB;YACjD,QAAQ,EAAE,CAAC,GAAG,IAAI,CAAC,QAAQ,EAAE,UAAU,CAAC;SACzC,CAAC;QACF,IAAI,WAAW,GAAG,EAA2B,CAAC;;AAE9C,QAAA,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY;aAClC,IAAI,CAAC,MACJ,eAAe,CACb,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,sBAAsB,EACtB,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CACF;aACA,IAAI,CAAC,MAAM,IAAG;AACb,YAAA,IACE,MAAM,CAAC,QAAQ,CAAC,UAAU;gBAC1B,MAAM,CAAC,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EACrC;AACA,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;AAC/B,gBAAA,MAAM,eAAe,GAAY;AAC/B,oBAAA,KAAK,EAAE,MAAM,CAAC,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,IAAI,EAAE;;AAE1D,oBAAA,IAAI,EAAE,MAAM,CAAC,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,IAAI,OAAO;iBAC9D,CAAC;AACF,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;aACrC;iBAAM;gBACL,MAAM,iBAAiB,GAAG,uBAAuB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC;gBACnE,IAAI,iBAAiB,EAAE;AACrB,oBAAA,MAAM,CAAC,IAAI,CACT,mCAAmC,iBAAiB,CAAA,sCAAA,CAAwC,CAC7F,CAAC;iBACH;aACF;YACD,WAAW,GAAG,MAAM,CAAC;AACvB,SAAC,CAAC,CAAC;QACL,MAAM,IAAI,CAAC,YAAY,CAAC;AACxB,QAAA,OAAO,WAAW,CAAC;KACpB;AAED;;;;AAIG;IACH,MAAM,iBAAiB,CACrB,OAAsC,EAAA;QAEtC,MAAM,IAAI,CAAC,YAAY,CAAC;AACxB,QAAA,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;AAC7C,QAAA,MAAM,sBAAsB,GAA2B;AACrD,YAAA,cAAc,EAAE,IAAI,CAAC,MAAM,EAAE,cAAc;AAC3C,YAAA,gBAAgB,EAAE,IAAI,CAAC,MAAM,EAAE,gBAAgB;AAC/C,YAAA,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,KAAK;AACzB,YAAA,UAAU,EAAE,IAAI,CAAC,MAAM,EAAE,UAAU;AACnC,YAAA,iBAAiB,EAAE,IAAI,CAAC,MAAM,EAAE,iBAAiB;YACjD,QAAQ,EAAE,CAAC,GAAG,IAAI,CAAC,QAAQ,EAAE,UAAU,CAAC;SACzC,CAAC;QACF,MAAM,aAAa,GAAG,qBAAqB,CACzC,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,sBAAsB,EACtB,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CAAC;;AAGF,QAAA,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY;AAClC,aAAA,IAAI,CAAC,MAAM,aAAa,CAAC;;;aAGzB,KAAK,CAAC,QAAQ,IAAG;AAChB,YAAA,MAAM,IAAI,KAAK,CAAC,YAAY,CAAC,CAAC;AAChC,SAAC,CAAC;aACD,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,QAAQ,CAAC;aAC3C,IAAI,CAAC,QAAQ,IAAG;AACf,YAAA,IAAI,QAAQ,CAAC,UAAU,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACzD,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;AAC/B,gBAAA,MAAM,eAAe,GAAG,EAAE,GAAG,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,CAAC;;AAE9D,gBAAA,IAAI,CAAC,eAAe,CAAC,IAAI,EAAE;AACzB,oBAAA,eAAe,CAAC,IAAI,GAAG,OAAO,CAAC;iBAChC;AACD,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;aACrC;iBAAM;AACL,gBAAA,MAAM,iBAAiB,GAAG,uBAAuB,CAAC,QAAQ,CAAC,CAAC;gBAC5D,IAAI,iBAAiB,EAAE;AACrB,oBAAA,MAAM,CAAC,IAAI,CACT,yCAAyC,iBAAiB,CAAA,sCAAA,CAAwC,CACnG,CAAC;iBACH;aACF;AACH,SAAC,CAAC;aACD,KAAK,CAAC,CAAC,IAAG;;;;AAIT,YAAA,IAAI,CAAC,CAAC,OAAO,KAAK,YAAY,EAAE;;;AAG9B,gBAAA,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;aACjB;AACH,SAAC,CAAC,CAAC;AACL,QAAA,OAAO,aAAa,CAAC;KACtB;AACF;;AClMD;;;;;;;;;;;;;;;AAeG;AAiBI,eAAe,kBAAkB,CACtC,WAAwB,EACxB,KAAa,EACb,MAA0B,EAC1B,cAA+B,EAAA;IAE/B,IAAI,IAAI,GAAW,EAAE,CAAC;IACtB,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;QAC7D,MAAM,YAAY,GAAGE,qBAAoC,CAAC,MAAM,EAAE,KAAK,CAAC,CAAC;AACzE,QAAA,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,YAAY,CAAC,CAAC;KACrC;SAAM;AACL,QAAA,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC;KAC/B;AACD,IAAA,MAAM,QAAQ,GAAG,MAAM,WAAW,CAChC,KAAK,EACL,IAAI,CAAC,YAAY,EACjB,WAAW,EACX,KAAK,EACL,IAAI,EACJ,cAAc,CACf,CAAC;AACF,IAAA,OAAO,QAAQ,CAAC,IAAI,EAAE,CAAC;AACzB,CAAC;AAEM,eAAe,WAAW,CAC/B,WAAwB,EACxB,KAAa,EACb,MAA0B,EAC1B,aAA6B,EAC7B,cAA+B,EAAA;IAE/B,IACG,aAAmC,EAAE,IAAI,KAAK,aAAa,CAAC,cAAc,EAC3E;QACA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,sDAAsD,CACvD,CAAC;KACH;IACD,OAAO,kBAAkB,CAAC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,cAAc,CAAC,CAAC;AACxE;;ACxEA;;;;;;;;;;;;;;;AAeG;AAgCH;;;AAGG;AACG,MAAO,eAAgB,SAAQ,OAAO,CAAA;AAQ1C,IAAA,WAAA,CACE,EAAM,EACN,WAAwB,EACxB,cAA+B,EACvB,aAA6B,EAAA;AAErC,QAAA,KAAK,CAAC,EAAE,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;QAFrB,IAAa,CAAA,aAAA,GAAb,aAAa,CAAgB;QAGrC,IAAI,CAAC,gBAAgB,GAAG,WAAW,CAAC,gBAAgB,IAAI,EAAE,CAAC;QAC3D,IAAI,CAAC,cAAc,GAAG,WAAW,CAAC,cAAc,IAAI,EAAE,CAAC;AACvD,QAAA,IAAI,CAAC,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC;AAC/B,QAAA,IAAI,CAAC,UAAU,GAAG,WAAW,CAAC,UAAU,CAAC;QACzC,IAAI,CAAC,iBAAiB,GAAG,uBAAuB,CAC9C,WAAW,CAAC,iBAAiB,CAC9B,CAAC;AACF,QAAA,IAAI,CAAC,cAAc,GAAG,cAAc,IAAI,EAAE,CAAC;KAC5C;AAED;;;AAGG;IACH,MAAM,eAAe,CACnB,OAA+D,EAAA;AAE/D,QAAA,MAAM,eAAe,GAAG,0BAA0B,CAAC,OAAO,CAAC,CAAC;QAC5D,OAAO,eAAe,CACpB,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV;YACE,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,cAAc,EAAE,IAAI,CAAC,cAAc;YACnC,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU,EAAE,IAAI,CAAC,UAAU;YAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;AACzC,YAAA,GAAG,eAAe;SACnB,EACD,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CAAC;KACH;AAED;;;;;AAKG;IACH,MAAM,qBAAqB,CACzB,OAA+D,EAAA;AAE/D,QAAA,MAAM,eAAe,GAAG,0BAA0B,CAAC,OAAO,CAAC,CAAC;QAC5D,OAAO,qBAAqB,CAC1B,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV;YACE,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,cAAc,EAAE,IAAI,CAAC,cAAc;YACnC,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU,EAAE,IAAI,CAAC,UAAU;YAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;AACzC,YAAA,GAAG,eAAe;SACnB,EACD,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CAAC;KACH;AAED;;;AAGG;AACH,IAAA,SAAS,CAAC,eAAiC,EAAA;AACzC,QAAA,OAAO,IAAI,WAAW,CACpB,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,IAAI,CAAC,aAAa,EAClB;YACE,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU,EAAE,IAAI,CAAC,UAAU;YAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;YACzC,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,cAAc,EAAE,IAAI,CAAC,cAAc;AACnC;;;;AAIG;AACH,YAAA,GAAG,eAAe;AACnB,SAAA,EACD,IAAI,CAAC,cAAc,CACpB,CAAC;KACH;AAED;;AAEG;IACH,MAAM,WAAW,CACf,OAA2D,EAAA;AAE3D,QAAA,MAAM,eAAe,GAAG,0BAA0B,CAAC,OAAO,CAAC,CAAC;AAC5D,QAAA,OAAO,WAAW,CAChB,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,eAAe,EACf,IAAI,CAAC,aAAa,CACnB,CAAC;KACH;AACF;;ACtKD;;;;;;;;;;;;;;;AAeG;AAsBH;;;;;;AAMG;MACU,WAAW,CAAA;AActB;;AAEG;IACH,WACU,CAAA,gBAAkC,EAClC,cAAuC,EAAA;QADvC,IAAgB,CAAA,gBAAA,GAAhB,gBAAgB,CAAkB;QAClC,IAAc,CAAA,cAAA,GAAd,cAAc,CAAyB;AAlBjD;;;;AAIG;QACH,IAAQ,CAAA,QAAA,GAAG,KAAK,CAAC;AACjB;;;;AAIG;QACH,IAAc,CAAA,cAAA,GAAG,KAAK,CAAC;KAQnB;AAEJ;;;;;;;;AAQG;AACH,IAAA,MAAM,IAAI,CACR,OAAsC,EACtC,YAAY,GAAG,IAAI,EAAA;AAEnB,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;AAE7C,QAAA,MAAM,OAAO,GAAuB;AAClC,YAAA,aAAa,EAAE;gBACb,KAAK,EAAE,CAAC,UAAU,CAAC;gBACnB,YAAY;AACb,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;;;;;AAYG;IACH,MAAM,gBAAgB,CAAC,IAAY,EAAA;AACjC,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA6B;AACxC,YAAA,aAAa,EAAE;gBACb,IAAI;AACL,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;;;;;;;;;;AAiBG;IACH,MAAM,iBAAiB,CAAC,IAA2B,EAAA;AACjD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA6B;AACxC,YAAA,aAAa,EAAE;AACb,gBAAA,KAAK,EAAE,IAAI;AACZ,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;;;;;;;;;AAgBG;IACH,MAAM,iBAAiB,CAAC,IAA2B,EAAA;AACjD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA6B;AACxC,YAAA,aAAa,EAAE;AACb,gBAAA,KAAK,EAAE,IAAI;AACZ,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;AAOG;IACH,MAAM,qBAAqB,CACzB,iBAAqC,EAAA;AAErC,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA4B;AACvC,YAAA,YAAY,EAAE;gBACZ,iBAAiB;AAClB,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;AAQG;IACH,OAAO,OAAO,GAAA;AAGZ,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,kFAAkF,CACnF,CAAC;SACH;QACD,WAAW,MAAM,OAAO,IAAI,IAAI,CAAC,cAAc,EAAE;AAC/C,YAAA,IAAI,OAAO,IAAI,OAAO,OAAO,KAAK,QAAQ,EAAE;AAC1C,gBAAA,IAAI,gBAAgB,CAAC,cAAc,IAAI,OAAO,EAAE;oBAC9C,MAAM;AACJ,wBAAA,IAAI,EAAE,eAAe;AACrB,wBAAA,GAAI,OAA8D;6BAC/D,aAAa;qBACI,CAAC;iBACxB;AAAM,qBAAA,IAAI,gBAAgB,CAAC,SAAS,IAAI,OAAO,EAAE;oBAChD,MAAM;AACJ,wBAAA,IAAI,EAAE,UAAU;AAChB,wBAAA,GAAI,OAA0D;6BAC3D,QAAQ;qBACU,CAAC;iBACzB;AAAM,qBAAA,IAAI,gBAAgB,CAAC,sBAAsB,IAAI,OAAO,EAAE;oBAC7D,MAAM;AACJ,wBAAA,IAAI,EAAE,sBAAsB;wBAC5B,GACE,OAMD,CAAC,oBAAoB;qBACW,CAAC;iBACrC;qBAAM;AACL,oBAAA,MAAM,CAAC,IAAI,CACT,CAAA,kDAAA,EAAqD,IAAI,CAAC,SAAS,CACjE,OAAO,CACR,CAAE,CAAA,CACJ,CAAC;iBACH;aACF;iBAAM;AACL,gBAAA,MAAM,CAAC,IAAI,CACT,CAAA,6CAAA,EAAgD,IAAI,CAAC,SAAS,CAC5D,OAAO,CACR,CAAE,CAAA,CACJ,CAAC;aACH;SACF;KACF;AAED;;;;;AAKG;AACH,IAAA,MAAM,KAAK,GAAA;AACT,QAAA,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;AAClB,YAAA,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC;YACrB,MAAM,IAAI,CAAC,gBAAgB,CAAC,KAAK,CAAC,IAAI,EAAE,wBAAwB,CAAC,CAAC;SACnE;KACF;AAED;;;;;;;;;AASG;IACH,MAAM,eAAe,CAAC,WAAoC,EAAA;AACxD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;;;AAID,QAAA,WAAW,CAAC,OAAO,CAAC,UAAU,IAAG;AAC/B,YAAA,MAAM,OAAO,GAA6B;AACxC,gBAAA,aAAa,EAAE,EAAE,WAAW,EAAE,CAAC,UAAU,CAAC,EAAE;aAC7C,CAAC;AACF,YAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;AACtD,SAAC,CAAC,CAAC;KACJ;AAED;;;;;;;;;AASG;IACH,MAAM,eAAe,CACnB,gBAAuD,EAAA;AAEvD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,MAAM,GAAG,gBAAgB,CAAC,SAAS,EAAE,CAAC;QAC5C,OAAO,IAAI,EAAE;AACX,YAAA,IAAI;gBACF,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;gBAE5C,IAAI,IAAI,EAAE;oBACR,MAAM;iBACP;qBAAM,IAAI,CAAC,KAAK,EAAE;AACjB,oBAAA,MAAM,IAAI,KAAK,CAAC,kDAAkD,CAAC,CAAC;iBACrE;gBAED,MAAM,IAAI,CAAC,eAAe,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;aACrC;YAAC,OAAO,CAAC,EAAE;;AAEV,gBAAA,MAAM,OAAO,GACX,CAAC,YAAY,KAAK,GAAG,CAAC,CAAC,OAAO,GAAG,gCAAgC,CAAC;gBACpE,MAAM,IAAI,OAAO,CAAC,WAAW,CAAC,aAAa,EAAE,OAAO,CAAC,CAAC;aACvD;SACF;KACF;AACF;;ACzWD;;;;;;;;;;;;;;;AAeG;AAoBH;;;;;;;AAOG;AACG,MAAO,mBAAoB,SAAQ,OAAO,CAAA;AAM9C;;AAEG;IACH,WACE,CAAA,EAAM,EACN,WAA4B;AAC5B;;AAEG;IACK,iBAAmC,EAAA;AAE3C,QAAA,KAAK,CAAC,EAAE,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;QAFrB,IAAiB,CAAA,iBAAA,GAAjB,iBAAiB,CAAkB;QAG3C,IAAI,CAAC,gBAAgB,GAAG,WAAW,CAAC,gBAAgB,IAAI,EAAE,CAAC;AAC3D,QAAA,IAAI,CAAC,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC;AAC/B,QAAA,IAAI,CAAC,UAAU,GAAG,WAAW,CAAC,UAAU,CAAC;QACzC,IAAI,CAAC,iBAAiB,GAAG,uBAAuB,CAC9C,WAAW,CAAC,iBAAiB,CAC9B,CAAC;KACH;AAED;;;;;;;AAOG;AACH,IAAA,MAAM,OAAO,GAAA;QACX,MAAM,GAAG,GAAG,IAAI,YAAY,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAChD,MAAM,IAAI,CAAC,iBAAiB,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,EAAE,CAAC,CAAC;AAErD,QAAA,IAAI,aAAqB,CAAC;AAC1B,QAAA,IAAI,IAAI,CAAC,YAAY,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AACnE,YAAA,aAAa,GAAG,CAAA,SAAA,EAAY,IAAI,CAAC,YAAY,CAAC,OAAO,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,CAAA,CAAE,CAAC;SACvE;aAAM;AACL,YAAA,aAAa,GAAG,CAAY,SAAA,EAAA,IAAI,CAAC,YAAY,CAAC,OAAO,CAAc,WAAA,EAAA,IAAI,CAAC,YAAY,CAAC,QAAQ,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,EAAE,CAAC;SAC/G;;;AAID,QAAA,MAAM,EACJ,uBAAuB,EACvB,wBAAwB,EACxB,GAAG,gBAAgB,EACpB,GAAG,IAAI,CAAC,gBAAgB,CAAC;AAE1B,QAAA,MAAM,YAAY,GAAqB;AACrC,YAAA,KAAK,EAAE;AACL,gBAAA,KAAK,EAAE,aAAa;gBACpB,gBAAgB;gBAChB,KAAK,EAAE,IAAI,CAAC,KAAK;gBACjB,UAAU,EAAE,IAAI,CAAC,UAAU;gBAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;gBACzC,uBAAuB;gBACvB,wBAAwB;AACzB,aAAA;SACF,CAAC;AAEF,QAAA,IAAI;;YAEF,MAAM,cAAc,GAAG,IAAI,CAAC,iBAAiB,CAAC,MAAM,EAAE,CAAC;AACvD,YAAA,IAAI,CAAC,iBAAiB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,YAAY,CAAC,CAAC,CAAC;;YAG1D,MAAM,YAAY,GAAG,CAAC,MAAM,cAAc,CAAC,IAAI,EAAE,EAAE,KAAK,CAAC;AACzD,YAAA,IACE,CAAC,YAAY;AACb,gBAAA,EAAE,OAAO,YAAY,KAAK,QAAQ,CAAC;AACnC,gBAAA,EAAE,eAAe,IAAI,YAAY,CAAC,EAClC;gBACA,MAAM,IAAI,CAAC,iBAAiB,CAAC,KAAK,CAAC,IAAI,EAAE,mBAAmB,CAAC,CAAC;gBAC9D,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,8FAA8F,CAC/F,CAAC;aACH;YAED,OAAO,IAAI,WAAW,CAAC,IAAI,CAAC,iBAAiB,EAAE,cAAc,CAAC,CAAC;SAChE;QAAC,OAAO,CAAC,EAAE;;AAEV,YAAA,MAAM,IAAI,CAAC,iBAAiB,CAAC,KAAK,EAAE,CAAC;AACrC,YAAA,MAAM,CAAC,CAAC;SACT;KACF;AACF;;ACtID;;;;;;;;;;;;;;;AAeG;AAiBH;;;;;;;;;;;;;;;;;;;;;AAqBG;AACG,MAAO,WAAY,SAAQ,OAAO,CAAA;AAUtC;;;;;;;;;AASG;AACH,IAAA,WAAA,CACE,EAAM,EACN,WAA8B,EACvB,cAA+B,EAAA;QAEtC,MAAM,EAAE,KAAK,EAAE,gBAAgB,EAAE,cAAc,EAAE,GAAG,WAAW,CAAC;AAChE,QAAA,KAAK,CAAC,EAAE,EAAE,KAAK,CAAC,CAAC;QAHV,IAAc,CAAA,cAAA,GAAd,cAAc,CAAiB;AAItC,QAAA,IAAI,CAAC,gBAAgB,GAAG,gBAAgB,CAAC;AACzC,QAAA,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;KACtC;AAED;;;;;;;;;;;;;;;;;AAiBG;IACH,MAAM,cAAc,CAClB,MAAc,EAAA;AAEd,QAAA,MAAM,IAAI,GAAG,wBAAwB,CAAC,MAAM,EAAE;YAC5C,GAAG,IAAI,CAAC,gBAAgB;YACxB,GAAG,IAAI,CAAC,cAAc;AACvB,SAAA,CAAC,CAAC;AACH,QAAA,MAAM,QAAQ,GAAG,MAAM,WAAW,CAChC,IAAI,CAAC,KAAK,EACV,IAAI,CAAC,OAAO,EACZ,IAAI,CAAC,YAAY;AACjB,qBAAa,KAAK,EAClB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,EACpB,IAAI,CAAC,cAAc,CACpB,CAAC;AACF,QAAA,OAAO,qBAAqB,CAAoB,QAAQ,CAAC,CAAC;KAC3D;AAED;;;;;;;;;;;;;;;;;;AAkBG;AACH,IAAA,MAAM,iBAAiB,CACrB,MAAc,EACd,MAAc,EAAA;AAEd,QAAA,MAAM,IAAI,GAAG,wBAAwB,CAAC,MAAM,EAAE;YAC5C,MAAM;YACN,GAAG,IAAI,CAAC,gBAAgB;YACxB,GAAG,IAAI,CAAC,cAAc;AACvB,SAAA,CAAC,CAAC;AACH,QAAA,MAAM,QAAQ,GAAG,MAAM,WAAW,CAChC,IAAI,CAAC,KAAK,EACV,IAAI,CAAC,OAAO,EACZ,IAAI,CAAC,YAAY;AACjB,qBAAa,KAAK,EAClB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,EACpB,IAAI,CAAC,cAAc,CACpB,CAAC;AACF,QAAA,OAAO,qBAAqB,CAAiB,QAAQ,CAAC,CAAC;KACxD;AACF;;AC/JD;;;;;;;;;;;;;;;AAeG;AAiDH;;;;AAIG;MACU,oBAAoB,CAAA;AAG/B,IAAA,WAAA,GAAA;AACE,QAAA,IAAI,OAAO,SAAS,KAAK,WAAW,EAAE;AACpC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,0DAA0D;gBACxD,+DAA+D;AAC/D,gBAAA,6EAA6E,CAChF,CAAC;SACH;KACF;AAED,IAAA,OAAO,CAAC,GAAW,EAAA;QACjB,OAAO,IAAI,OAAO,CAAC,CAAC,OAAO,EAAE,MAAM,KAAI;YACrC,IAAI,CAAC,EAAE,GAAG,IAAI,SAAS,CAAC,GAAG,CAAC,CAAC;YAC7B,IAAI,CAAC,EAAE,CAAC,UAAU,GAAG,MAAM,CAAC;AAC5B,YAAA,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,MAAM,EAAE,MAAM,OAAO,EAAE,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC,CAAC;AAClE,YAAA,IAAI,CAAC,EAAE,CAAC,gBAAgB,CACtB,OAAO,EACP,MACE,MAAM,CACJ,IAAI,OAAO,CACT,WAAW,CAAC,WAAW,EACvB,CAAA,+BAAA,CAAiC,CAClC,CACF,EACH,EAAE,IAAI,EAAE,IAAI,EAAE,CACf,CAAC;YACF,IAAI,CAAC,EAAG,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,UAAsB,KAAI;AAC5D,gBAAA,IAAI,UAAU,CAAC,MAAM,EAAE;oBACrB,MAAM,CAAC,IAAI,CACT,CAAA,gDAAA,EAAmD,UAAU,CAAC,MAAM,CAAG,CAAA,CAAA,CACxE,CAAC;iBACH;AACH,aAAC,CAAC,CAAC;AACL,SAAC,CAAC,CAAC;KACJ;AAED,IAAA,IAAI,CAAC,IAA0B,EAAA;AAC7B,QAAA,IAAI,CAAC,IAAI,CAAC,EAAE,IAAI,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,IAAI,EAAE;YACrD,MAAM,IAAI,OAAO,CAAC,WAAW,CAAC,aAAa,EAAE,wBAAwB,CAAC,CAAC;SACxE;AACD,QAAA,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;KACpB;IAED,OAAO,MAAM,GAAA;AACX,QAAA,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE;YACZ,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,6BAA6B,CAC9B,CAAC;SACH;QAED,MAAM,YAAY,GAAc,EAAE,CAAC;QACnC,MAAM,UAAU,GAAY,EAAE,CAAC;QAC/B,IAAI,cAAc,GAAwB,IAAI,CAAC;QAC/C,IAAI,QAAQ,GAAG,KAAK,CAAC;AAErB,QAAA,MAAM,eAAe,GAAG,OAAO,KAAmB,KAAmB;AACnE,YAAA,IAAI,IAAY,CAAC;AACjB,YAAA,IAAI,KAAK,CAAC,IAAI,YAAY,IAAI,EAAE;gBAC9B,IAAI,GAAG,MAAM,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC;aAChC;AAAM,iBAAA,IAAI,OAAO,KAAK,CAAC,IAAI,KAAK,QAAQ,EAAE;AACzC,gBAAA,IAAI,GAAG,KAAK,CAAC,IAAI,CAAC;aACnB;iBAAM;AACL,gBAAA,UAAU,CAAC,IAAI,CACb,IAAI,OAAO,CACT,WAAW,CAAC,YAAY,EACxB,CAAA,kFAAA,EAAqF,OAAO,KAAK,CAAC,IAAI,CAAG,CAAA,CAAA,CAC1G,CACF,CAAC;gBACF,IAAI,cAAc,EAAE;AAClB,oBAAA,cAAc,EAAE,CAAC;oBACjB,cAAc,GAAG,IAAI,CAAC;iBACvB;gBACD,OAAO;aACR;AAED,YAAA,IAAI;gBACF,MAAM,GAAG,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAY,CAAC;AACxC,gBAAA,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;aACxB;YAAC,OAAO,CAAC,EAAE;gBACV,MAAM,GAAG,GAAG,CAAU,CAAC;AACvB,gBAAA,UAAU,CAAC,IAAI,CACb,IAAI,OAAO,CACT,WAAW,CAAC,YAAY,EACxB,4CAA4C,GAAG,CAAC,OAAO,CAAE,CAAA,CAC1D,CACF,CAAC;aACH;YAED,IAAI,cAAc,EAAE;AAClB,gBAAA,cAAc,EAAE,CAAC;gBACjB,cAAc,GAAG,IAAI,CAAC;aACvB;AACH,SAAC,CAAC;QAEF,MAAM,aAAa,GAAG,MAAW;AAC/B,YAAA,UAAU,CAAC,IAAI,CACb,IAAI,OAAO,CAAC,WAAW,CAAC,WAAW,EAAE,6BAA6B,CAAC,CACpE,CAAC;YACF,IAAI,cAAc,EAAE;AAClB,gBAAA,cAAc,EAAE,CAAC;gBACjB,cAAc,GAAG,IAAI,CAAC;aACvB;AACH,SAAC,CAAC;AAEF,QAAA,MAAM,aAAa,GAAG,CAAC,KAAiB,KAAU;AAChD,YAAA,IAAI,KAAK,CAAC,MAAM,EAAE;gBAChB,MAAM,CAAC,IAAI,CACT,CAAA,uDAAA,EAA0D,KAAK,CAAC,MAAM,CAAE,CAAA,CACzE,CAAC;aACH;YACD,QAAQ,GAAG,IAAI,CAAC;YAChB,IAAI,cAAc,EAAE;AAClB,gBAAA,cAAc,EAAE,CAAC;gBACjB,cAAc,GAAG,IAAI,CAAC;aACvB;;YAED,IAAI,CAAC,EAAE,EAAE,mBAAmB,CAAC,SAAS,EAAE,eAAe,CAAC,CAAC;YACzD,IAAI,CAAC,EAAE,EAAE,mBAAmB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;YACrD,IAAI,CAAC,EAAE,EAAE,mBAAmB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;AACvD,SAAC,CAAC;QAEF,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,SAAS,EAAE,eAAe,CAAC,CAAC;QACrD,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;QACjD,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;QAEjD,OAAO,CAAC,QAAQ,EAAE;AAChB,YAAA,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACzB,gBAAA,MAAM,KAAK,GAAG,UAAU,CAAC,KAAK,EAAG,CAAC;AAClC,gBAAA,MAAM,KAAK,CAAC;aACb;AACD,YAAA,IAAI,YAAY,CAAC,MAAM,GAAG,CAAC,EAAE;AAC3B,gBAAA,MAAM,YAAY,CAAC,KAAK,EAAG,CAAC;aAC7B;iBAAM;AACL,gBAAA,MAAM,IAAI,OAAO,CAAO,OAAO,IAAG;oBAChC,cAAc,GAAG,OAAO,CAAC;AAC3B,iBAAC,CAAC,CAAC;aACJ;SACF;;AAGD,QAAA,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACzB,YAAA,MAAM,KAAK,GAAG,UAAU,CAAC,KAAK,EAAG,CAAC;AAClC,YAAA,MAAM,KAAK,CAAC;SACb;KACF;IAED,KAAK,CAAC,IAAa,EAAE,MAAe,EAAA;AAClC,QAAA,OAAO,IAAI,OAAO,CAAC,OAAO,IAAG;AAC3B,YAAA,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE;gBACZ,OAAO,OAAO,EAAE,CAAC;aAClB;AAED,YAAA,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,OAAO,EAAE,MAAM,OAAO,EAAE,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC,CAAC;;YAEnE,IACE,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,MAAM;gBACvC,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,UAAU,EAC3C;gBACA,OAAO,OAAO,EAAE,CAAC;aAClB;YAED,IAAI,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,OAAO,EAAE;gBAC5C,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,EAAE,MAAM,CAAC,CAAC;aAC7B;AACH,SAAC,CAAC,CAAC;KACJ;AACF;;AChPD;;;;;;;;;;;;;;;AAeG;AAWH;;;;;;AAMG;MACmB,MAAM,CAAA;AAkC1B,IAAA,WAAA,CAAY,YAA6B,EAAA;;QAEvC,IAAI,CAAC,YAAY,CAAC,IAAI,IAAI,CAAC,YAAY,CAAC,KAAK,EAAE;YAC7C,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,wEAAwE,CACzE,CAAC;SACH;;AAED,QAAA,KAAK,MAAM,QAAQ,IAAI,YAAY,EAAE;YACnC,IAAI,CAAC,QAAQ,CAAC,GAAG,YAAY,CAAC,QAAQ,CAAC,CAAC;SACzC;;AAED,QAAA,IAAI,CAAC,IAAI,GAAG,YAAY,CAAC,IAAI,CAAC;QAC9B,IAAI,CAAC,MAAM,GAAG,YAAY,CAAC,cAAc,CAAC,QAAQ,CAAC;cAC/C,YAAY,CAAC,MAAM;cACnB,SAAS,CAAC;QACd,IAAI,CAAC,QAAQ,GAAG,YAAY,CAAC,cAAc,CAAC,UAAU,CAAC;AACrD,cAAE,CAAC,CAAC,YAAY,CAAC,QAAQ;cACvB,KAAK,CAAC;KACX;AAED;;;;AAIG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAkD;YACzD,IAAI,EAAE,IAAI,CAAC,IAAI;SAChB,CAAC;AACF,QAAA,KAAK,MAAM,IAAI,IAAI,IAAI,EAAE;AACvB,YAAA,IAAI,IAAI,CAAC,cAAc,CAAC,IAAI,CAAC,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK,SAAS,EAAE;AACzD,gBAAA,IAAI,IAAI,KAAK,UAAU,IAAI,IAAI,CAAC,IAAI,KAAK,UAAU,CAAC,MAAM,EAAE;oBAC1D,GAAG,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC,CAAC;iBACxB;aACF;SACF;AACD,QAAA,OAAO,GAAoB,CAAC;KAC7B;IAED,OAAO,KAAK,CAAC,WAA6C,EAAA;QACxD,OAAO,IAAI,WAAW,CAAC,WAAW,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;KACxD;IAED,OAAO,MAAM,CACX,YAKC,EAAA;AAED,QAAA,OAAO,IAAI,YAAY,CACrB,YAAY,EACZ,YAAY,CAAC,UAAU,EACvB,YAAY,CAAC,kBAAkB,CAChC,CAAC;KACH;;IAGD,OAAO,MAAM,CAAC,YAA2B,EAAA;AACvC,QAAA,OAAO,IAAI,YAAY,CAAC,YAAY,CAAC,CAAC;KACvC;IAED,OAAO,UAAU,CACf,YAA+C,EAAA;QAE/C,OAAO,IAAI,YAAY,CAAC,YAAY,EAAE,YAAY,CAAC,IAAI,CAAC,CAAC;KAC1D;IAED,OAAO,OAAO,CAAC,aAA4B,EAAA;AACzC,QAAA,OAAO,IAAI,aAAa,CAAC,aAAa,CAAC,CAAC;KACzC;;IAGD,OAAO,MAAM,CAAC,YAA2B,EAAA;AACvC,QAAA,OAAO,IAAI,YAAY,CAAC,YAAY,CAAC,CAAC;KACvC;;IAGD,OAAO,OAAO,CAAC,aAA4B,EAAA;AACzC,QAAA,OAAO,IAAI,aAAa,CAAC,aAAa,CAAC,CAAC;KACzC;IAED,OAAO,KAAK,CACV,WAAoD,EAAA;AAEpD,QAAA,OAAO,IAAI,WAAW,CAAC,WAAW,CAAC,CAAC;KACrC;AACF,CAAA;AAeD;;;AAGG;AACG,MAAO,aAAc,SAAQ,MAAM,CAAA;AACvC,IAAA,WAAA,CAAY,YAA2B,EAAA;AACrC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,OAAO;AACxB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;KACJ;AACF,CAAA;AAED;;;AAGG;AACG,MAAO,YAAa,SAAQ,MAAM,CAAA;AACtC,IAAA,WAAA,CAAY,YAA2B,EAAA;AACrC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,MAAM;AACvB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;KACJ;AACF,CAAA;AAED;;;AAGG;AACG,MAAO,aAAc,SAAQ,MAAM,CAAA;AACvC,IAAA,WAAA,CAAY,YAA2B,EAAA;AACrC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,OAAO;AACxB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;KACJ;AACF,CAAA;AAED;;;;AAIG;AACG,MAAO,YAAa,SAAQ,MAAM,CAAA;IAEtC,WAAY,CAAA,YAA2B,EAAE,UAAqB,EAAA;AAC5D,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,MAAM;AACvB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;AACH,QAAA,IAAI,CAAC,IAAI,GAAG,UAAU,CAAC;KACxB;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;AAC3B,QAAA,IAAI,IAAI,CAAC,IAAI,EAAE;AACb,YAAA,GAAG,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC;SACzB;AACD,QAAA,OAAO,GAAoB,CAAC;KAC7B;AACF,CAAA;AAED;;;;;AAKG;AACG,MAAO,WAAY,SAAQ,MAAM,CAAA;IACrC,WAAY,CAAA,YAA0B,EAAS,KAAkB,EAAA;AAC/D,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,KAAK;AACtB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;QAJ0C,IAAK,CAAA,KAAA,GAAL,KAAK,CAAa;KAKhE;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;QAC3B,GAAG,CAAC,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC;AAChC,QAAA,OAAO,GAAG,CAAC;KACZ;AACF,CAAA;AAED;;;;AAIG;AACG,MAAO,YAAa,SAAQ,MAAM,CAAA;AACtC,IAAA,WAAA,CACE,YAA0B,EACnB,UAEN,EACM,qBAA+B,EAAE,EAAA;AAExC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,MAAM;AACvB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;QARI,IAAU,CAAA,UAAA,GAAV,UAAU,CAEhB;QACM,IAAkB,CAAA,kBAAA,GAAlB,kBAAkB,CAAe;KAMzC;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;QAC3B,GAAG,CAAC,UAAU,GAAG,EAAE,GAAG,IAAI,CAAC,UAAU,EAAE,CAAC;QACxC,MAAM,QAAQ,GAAG,EAAE,CAAC;AACpB,QAAA,IAAI,IAAI,CAAC,kBAAkB,EAAE;AAC3B,YAAA,KAAK,MAAM,WAAW,IAAI,IAAI,CAAC,kBAAkB,EAAE;gBACjD,IAAI,CAAC,IAAI,CAAC,UAAU,CAAC,cAAc,CAAC,WAAW,CAAC,EAAE;oBAChD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAa,UAAA,EAAA,WAAW,CAAqD,mDAAA,CAAA,CAC9E,CAAC;iBACH;aACF;SACF;AACD,QAAA,KAAK,MAAM,WAAW,IAAI,IAAI,CAAC,UAAU,EAAE;YACzC,IAAI,IAAI,CAAC,UAAU,CAAC,cAAc,CAAC,WAAW,CAAC,EAAE;AAC/C,gBAAA,GAAG,CAAC,UAAU,CAAC,WAAW,CAAC,GAAG,IAAI,CAAC,UAAU,CAC3C,WAAW,CACZ,CAAC,MAAM,EAAmB,CAAC;gBAC5B,IAAI,CAAC,IAAI,CAAC,kBAAkB,CAAC,QAAQ,CAAC,WAAW,CAAC,EAAE;AAClD,oBAAA,QAAQ,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;iBAC5B;aACF;SACF;AACD,QAAA,IAAI,QAAQ,CAAC,MAAM,GAAG,CAAC,EAAE;AACvB,YAAA,GAAG,CAAC,QAAQ,GAAG,QAAQ,CAAC;SACzB;QACD,OAAO,GAAG,CAAC,kBAAkB,CAAC;AAC9B,QAAA,OAAO,GAAoB,CAAC;KAC7B;AACF,CAAA;AAED;;;;AAIG;AACG,MAAO,WAAY,SAAQ,MAAM,CAAA;AAErC,IAAA,WAAA,CAAY,YAAqD,EAAA;QAC/D,IAAI,YAAY,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE;YACnC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,sCAAsC,CACvC,CAAC;SACH;AACD,QAAA,KAAK,CAAC;AACJ,YAAA,GAAG,YAAY;YACf,IAAI,EAAE,SAAS;AAChB,SAAA,CAAC,CAAC;AACH,QAAA,IAAI,CAAC,KAAK,GAAG,YAAY,CAAC,KAAK,CAAC;KACjC;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;;AAE3B,QAAA,IAAI,IAAI,CAAC,KAAK,IAAI,KAAK,CAAC,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE;AAC3C,YAAA,GAAG,CAAC,KAAK,GAAI,IAAI,CAAC,KAAuB,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC;SAChE;AACD,QAAA,OAAO,GAAG,CAAC;KACZ;AACF;;AC5VD;;;;;;;;;;;;;;;AAeG;AAIH;;;;;;;;;;;;;;;;AAgBG;MACU,iBAAiB,CAAA;AAU5B,IAAA,WAAA,GAAA;AACE,QAAA,IAAI,CAAC,QAAQ,GAAG,WAAW,CAAC;KAC7B;AAED;;;;;;;AAOG;IACH,OAAO,IAAI,CAAC,kBAA2B,EAAA;AACrC,QAAA,IACE,kBAAkB;aACjB,kBAAkB,GAAG,CAAC,IAAI,kBAAkB,GAAG,GAAG,CAAC,EACpD;AACA,YAAA,MAAM,CAAC,IAAI,CACT,uCAAuC,kBAAkB,CAAA,4CAAA,CAA8C,CACxG,CAAC;SACH;AACD,QAAA,OAAO,EAAE,QAAQ,EAAE,YAAY,EAAE,kBAAkB,EAAE,CAAC;KACvD;AAED;;;;;;AAMG;AACH,IAAA,OAAO,GAAG,GAAA;AACR,QAAA,OAAO,EAAE,QAAQ,EAAE,WAAW,EAAE,CAAC;KAClC;AACF;;AChFD;;;;;;;;;;;;;;;AAeG;AAcH,MAAM,wBAAwB,GAAG,KAAM,CAAC;AACxC,MAAM,yBAAyB,GAAG,KAAM,CAAC;AAEzC,MAAM,oBAAoB,GAAG,iBAAiB,CAAC;AAE/C;;;;;;;;;AASG;AACH,MAAM,2BAA2B,GAAG,CAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;uBA6Cb,oBAAoB,CAAA;CAC1C,CAAC;AA2CF;;;;AAIG;MACU,uBAAuB,CAAA;AAiBlC,IAAA,WAAA,CACmB,WAAwB,EACxB,OAAsC,EACtC,IAAwB,EAAA;QAFxB,IAAW,CAAA,WAAA,GAAX,WAAW,CAAa;QACxB,IAAO,CAAA,OAAA,GAAP,OAAO,CAA+B;QACtC,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAoB;;QAlBnC,IAAS,CAAA,SAAA,GAAG,KAAK,CAAC;;AAET,QAAA,IAAA,CAAA,YAAY,GAAG,IAAIC,aAAQ,EAAQ,CAAC;;QAKpC,IAAa,CAAA,aAAA,GAAkB,EAAE,CAAC;;QAE3C,IAAgB,CAAA,gBAAA,GAA4B,EAAE,CAAC;;QAE/C,IAAa,CAAA,aAAA,GAAG,CAAC,CAAC;;QAElB,IAAqB,CAAA,qBAAA,GAAG,KAAK,CAAC;AAOpC,QAAA,IAAI,CAAC,WAAW,CAAC,cAAc,GAAG,IAAI,CAAC;;AAGvC,QAAA,IAAI,CAAC,kBAAkB,GAAG,IAAI,CAAC,cAAc,EAAE,CAAC,OAAO,CAAC,MACtD,IAAI,CAAC,OAAO,EAAE,CACf,CAAC;;;QAIF,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,SAAS,GAAG,KAAK,IAAG;AAC7C,YAAA,IAAI,IAAI,CAAC,SAAS,EAAE;gBAClB,OAAO;aACR;AAED,YAAA,MAAM,KAAK,GAAG,KAAK,CAAC,IAAkB,CAAC;YACvC,MAAM,MAAM,GAAG,IAAI,CACjB,MAAM,CAAC,YAAY,CAAC,KAAK,CACvB,IAAI,EACJ,KAAK,CAAC,IAAI,CAAC,IAAI,UAAU,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CACzC,CACF,CAAC;AAEF,YAAA,MAAM,KAAK,GAA0B;AACnC,gBAAA,QAAQ,EAAE,WAAW;AACrB,gBAAA,IAAI,EAAE,MAAM;aACb,CAAC;YACF,KAAK,IAAI,CAAC,WAAW,CAAC,iBAAiB,CAAC,KAAK,CAAC,CAAC;AACjD,SAAC,CAAC;KACH;AAED;;AAEG;AACH,IAAA,MAAM,IAAI,GAAA;AACR,QAAA,IAAI,IAAI,CAAC,SAAS,EAAE;YAClB,OAAO;SACR;AACD,QAAA,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC;AACtB,QAAA,IAAI,CAAC,YAAY,CAAC,OAAO,EAAE,CAAC;AAC5B,QAAA,MAAM,IAAI,CAAC,kBAAkB,CAAC;KAC/B;AAED;;;AAGG;IACK,OAAO,GAAA;AACb,QAAA,IAAI,CAAC,iBAAiB,EAAE,CAAC;QACzB,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC;AAC5C,QAAA,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,UAAU,EAAE,CAAC;AACnC,QAAA,IAAI,CAAC,IAAI,CAAC,UAAU,CAAC,UAAU,EAAE,CAAC;AAClC,QAAA,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,SAAS,EAAE,CAAC,OAAO,CAAC,KAAK,IAAI,KAAK,CAAC,IAAI,EAAE,CAAC,CAAC;QACjE,IAAI,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,KAAK,KAAK,QAAQ,EAAE;YAC7C,KAAK,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,KAAK,EAAE,CAAC;SACrC;AACD,QAAA,IAAI,CAAC,WAAW,CAAC,cAAc,GAAG,KAAK,CAAC;KACzC;AAED;;AAEG;AACK,IAAA,cAAc,CAAC,SAAsB,EAAA;AAC3C,QAAA,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;;AAEnC,QAAA,KAAK,IAAI,CAAC,oBAAoB,EAAE,CAAC;KAClC;AAED;;;;AAIG;IACK,iBAAiB,GAAA;;;AAGvB,QAAA,CAAC,GAAG,IAAI,CAAC,gBAAgB,CAAC,CAAC,OAAO,CAAC,MAAM,IAAI,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;;AAG7D,QAAA,IAAI,CAAC,aAAa,CAAC,MAAM,GAAG,CAAC,CAAC;;QAG9B,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,WAAW,CAAC;KACzD;AAED;;AAEG;AACK,IAAA,MAAM,oBAAoB,GAAA;AAChC,QAAA,IAAI,IAAI,CAAC,qBAAqB,EAAE;YAC9B,OAAO;SACR;AACD,QAAA,IAAI,CAAC,qBAAqB,GAAG,IAAI,CAAC;AAElC,QAAA,OAAO,IAAI,CAAC,aAAa,CAAC,MAAM,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE;YACvD,MAAM,YAAY,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,EAAG,CAAC;AACjD,YAAA,IAAI;AACF,gBAAA,MAAM,KAAK,GAAG,IAAI,UAAU,CAAC,YAAY,CAAC,CAAC;AAC3C,gBAAA,MAAM,UAAU,GAAG,KAAK,CAAC,MAAM,CAAC;AAEhC,gBAAA,MAAM,WAAW,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,YAAY,CACrD,CAAC,EACD,UAAU,EACV,yBAAyB,CAC1B,CAAC;;gBAGF,MAAM,WAAW,GAAG,WAAW,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC;AAClD,gBAAA,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,UAAU,EAAE,CAAC,EAAE,EAAE;AACnC,oBAAA,WAAW,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC;iBACnC;gBAED,MAAM,MAAM,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,kBAAkB,EAAE,CAAC;AAC3D,gBAAA,MAAM,CAAC,MAAM,GAAG,WAAW,CAAC;gBAC5B,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,WAAW,CAAC,CAAC;;AAGnD,gBAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;AACnC,gBAAA,MAAM,CAAC,OAAO,GAAG,MAAK;AACpB,oBAAA,IAAI,CAAC,gBAAgB,GAAG,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAClD,CAAC,IAAI,CAAC,KAAK,MAAM,CAClB,CAAC;AACJ,iBAAC,CAAC;;;AAIF,gBAAA,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,GAAG,CAC3B,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,WAAW,EAClC,IAAI,CAAC,aAAa,CACnB,CAAC;AACF,gBAAA,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC;;AAGjC,gBAAA,IAAI,CAAC,aAAa,IAAI,WAAW,CAAC,QAAQ,CAAC;aAC5C;YAAC,OAAO,CAAC,EAAE;AACV,gBAAA,MAAM,CAAC,KAAK,CAAC,sBAAsB,EAAE,CAAC,CAAC,CAAC;aACzC;SACF;AAED,QAAA,IAAI,CAAC,qBAAqB,GAAG,KAAK,CAAC;KACpC;AAED;;AAEG;AACK,IAAA,MAAM,cAAc,GAAA;QAC1B,MAAM,gBAAgB,GAAG,IAAI,CAAC,WAAW,CAAC,OAAO,EAAE,CAAC;AACpD,QAAA,OAAO,CAAC,IAAI,CAAC,SAAS,EAAE;AACtB,YAAA,MAAM,MAAM,GAAG,MAAM,OAAO,CAAC,IAAI,CAAC;gBAChC,gBAAgB,CAAC,IAAI,EAAE;gBACvB,IAAI,CAAC,YAAY,CAAC,OAAO;AAC1B,aAAA,CAAC,CAAC;YAEH,IAAI,IAAI,CAAC,SAAS,IAAI,CAAC,MAAM,IAAI,MAAM,CAAC,IAAI,EAAE;gBAC5C,MAAM;aACP;AAED,YAAA,MAAM,OAAO,GAAG,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAA,IAAI,OAAO,CAAC,IAAI,KAAK,eAAe,EAAE;gBACpC,MAAM,aAAa,GAAG,OAA4B,CAAC;AACnD,gBAAA,IAAI,aAAa,CAAC,WAAW,EAAE;oBAC7B,IAAI,CAAC,iBAAiB,EAAE,CAAC;iBAC1B;gBAED,MAAM,SAAS,GAAG,aAAa,CAAC,SAAS,EAAE,KAAK,CAAC,IAAI,CAAC,IAAI,IACxD,IAAI,CAAC,UAAU,EAAE,QAAQ,CAAC,UAAU,CAAC,QAAQ,CAAC,CAC/C,CAAC;AACF,gBAAA,IAAI,SAAS,EAAE,UAAU,EAAE;AACzB,oBAAA,MAAM,SAAS,GAAG,UAAU,CAAC,IAAI,CAC/B,IAAI,CAAC,SAAS,CAAC,UAAU,CAAC,IAAI,CAAC,EAC/B,CAAC,IAAI,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CACrB,CAAC,MAAM,CAAC;AACT,oBAAA,IAAI,CAAC,cAAc,CAAC,SAAS,CAAC,CAAC;iBAChC;aACF;AAAM,iBAAA,IAAI,OAAO,CAAC,IAAI,KAAK,UAAU,EAAE;AACtC,gBAAA,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,sBAAsB,EAAE;AACxC,oBAAA,MAAM,CAAC,IAAI,CACT,wHAAwH,CACzH,CAAC;iBACH;qBAAM;AACL,oBAAA,IAAI;AACF,wBAAA,MAAM,gBAAgB,GAAG,MAAM,IAAI,CAAC,OAAO,CAAC,sBAAsB,CAChE,OAAO,CAAC,aAAa,CACtB,CAAC;AACF,wBAAA,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE;4BACnB,KAAK,IAAI,CAAC,WAAW,CAAC,qBAAqB,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC;yBACjE;qBACF;oBAAC,OAAO,CAAC,EAAE;AACV,wBAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,iCAAA,EAAqC,CAAW,CAAC,OAAO,CAAA,CAAE,CAC3D,CAAC;qBACH;iBACF;aACF;SACF;KACF;AACF,CAAA;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA6CG;AACI,eAAe,sBAAsB,CAC1C,WAAwB,EACxB,UAAyC,EAAE,EAAA;AAE3C,IAAA,IAAI,WAAW,CAAC,QAAQ,EAAE;QACxB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,0DAA0D,CAC3D,CAAC;KACH;AAED,IAAA,IAAI,WAAW,CAAC,cAAc,EAAE;QAC9B,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,gEAAgE,CACjE,CAAC;KACH;;IAGD,IACE,OAAO,gBAAgB,KAAK,WAAW;QACvC,OAAO,YAAY,KAAK,WAAW;QACnC,OAAO,SAAS,KAAK,WAAW;AAChC,QAAA,CAAC,SAAS,CAAC,YAAY,EACvB;QACA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,kHAAkH,CACnH,CAAC;KACH;AAED,IAAA,IAAI,YAAsC,CAAC;AAC3C,IAAA,IAAI;;;AAGF,QAAA,YAAY,GAAG,IAAI,YAAY,EAAE,CAAC;AAClC,QAAA,IAAI,YAAY,CAAC,KAAK,KAAK,WAAW,EAAE;AACtC,YAAA,MAAM,YAAY,CAAC,MAAM,EAAE,CAAC;SAC7B;;;QAID,MAAM,WAAW,GAAG,MAAM,SAAS,CAAC,YAAY,CAAC,YAAY,CAAC;AAC5D,YAAA,KAAK,EAAE,IAAI;AACZ,SAAA,CAAC,CAAC;;;QAIH,MAAM,WAAW,GAAG,IAAI,IAAI,CAAC,CAAC,2BAA2B,CAAC,EAAE;AAC1D,YAAA,IAAI,EAAE,wBAAwB;AAC/B,SAAA,CAAC,CAAC;QACH,MAAM,UAAU,GAAG,GAAG,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC;QACpD,MAAM,YAAY,CAAC,YAAY,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC;;QAGtD,MAAM,UAAU,GAAG,YAAY,CAAC,uBAAuB,CAAC,WAAW,CAAC,CAAC;QACrE,MAAM,WAAW,GAAG,IAAI,gBAAgB,CACtC,YAAY,EACZ,oBAAoB,EACpB;AACE,YAAA,gBAAgB,EAAE,EAAE,gBAAgB,EAAE,wBAAwB,EAAE;AACjE,SAAA,CACF,CAAC;AACF,QAAA,UAAU,CAAC,OAAO,CAAC,WAAW,CAAC,CAAC;;QAGhC,MAAM,MAAM,GAAG,IAAI,uBAAuB,CAAC,WAAW,EAAE,OAAO,EAAE;YAC/D,YAAY;YACZ,WAAW;YACX,UAAU;YACV,WAAW;AACZ,SAAA,CAAC,CAAC;QAEH,OAAO,EAAE,IAAI,EAAE,MAAM,MAAM,CAAC,IAAI,EAAE,EAAE,CAAC;KACtC;IAAC,OAAO,CAAC,EAAE;;QAEV,IAAI,YAAY,IAAI,YAAY,CAAC,KAAK,KAAK,QAAQ,EAAE;AACnD,YAAA,KAAK,YAAY,CAAC,KAAK,EAAE,CAAC;SAC3B;;;QAID,IAAI,CAAC,YAAY,OAAO,IAAI,CAAC,YAAY,YAAY,EAAE;AACrD,YAAA,MAAM,CAAC,CAAC;SACT;;AAGD,QAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,sCAAA,EAA0C,CAAW,CAAC,OAAO,CAAA,CAAE,CAChE,CAAC;KACH;AACH;;AChfA;;;;;;;;;;;;;;;AAeG;AA6CH;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2BG;SACa,KAAK,CAACC,QAAmBC,UAAM,EAAE,EAAE,OAAmB,EAAA;AACpE,IAAAD,KAAG,GAAGE,uBAAkB,CAACF,KAAG,CAAC,CAAC;;IAE9B,MAAM,UAAU,GAAmBG,gBAAY,CAACH,KAAG,EAAE,OAAO,CAAC,CAAC;IAE9D,MAAM,OAAO,GAAG,OAAO,EAAE,OAAO,IAAI,IAAI,eAAe,EAAE,CAAC;AAE1D,IAAA,MAAM,YAAY,GAA+B;AAC/C,QAAA,2BAA2B,EAAE,OAAO,EAAE,2BAA2B,IAAI,KAAK;KAC3E,CAAC;AAEF,IAAA,MAAM,UAAU,GAAG,wBAAwB,CAAC,OAAO,CAAC,CAAC;AACrD,IAAA,MAAM,UAAU,GAAG,UAAU,CAAC,YAAY,CAAC;QACzC,UAAU;AACX,KAAA,CAAC,CAAC;AAEH,IAAA,UAAU,CAAC,OAAO,GAAG,YAAY,CAAC;AAElC,IAAA,OAAO,UAAU,CAAC;AACpB,CAAC;AAED;;;;;AAKG;SACa,kBAAkB,CAChC,EAAM,EACN,WAAuC,EACvC,cAA+B,EAAA;;IAG/B,MAAM,YAAY,GAAG,WAA2B,CAAC;AACjD,IAAA,IAAI,aAA0B,CAAC;AAC/B,IAAA,IAAI,YAAY,CAAC,IAAI,EAAE;AACrB,QAAA,aAAa,GAAG,YAAY,CAAC,aAAa,IAAI;AAC5C,YAAA,KAAK,EAAE,6BAA6B;SACrC,CAAC;KACH;SAAM;QACL,aAAa,GAAG,WAA0B,CAAC;KAC5C;AAED,IAAA,IAAI,CAAC,aAAa,CAAC,KAAK,EAAE;QACxB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,QAAQ,EACpB,CAAoF,kFAAA,CAAA,CACrF,CAAC;KACH;AAED;;;AAGG;AACH,IAAA,MAAM,aAAa,GAAI,EAAgB,CAAC,oBAAoB,GAC1D,YAAY,CAAC,IAAI,EACjB,OAAO,MAAM,KAAK,WAAW,GAAG,SAAS,GAAG,MAAM,EAClD,YAAY,CAAC,cAAc,CAC5B,CAAC;IAEF,OAAO,IAAI,eAAe,CAAC,EAAE,EAAE,aAAa,EAAE,cAAc,EAAE,aAAa,CAAC,CAAC;AAC/E,CAAC;AAED;;;;;;;;;;;;;AAaG;SACa,cAAc,CAC5B,EAAM,EACN,WAA8B,EAC9B,cAA+B,EAAA;AAE/B,IAAA,IAAI,CAAC,WAAW,CAAC,KAAK,EAAE;QACtB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,QAAQ,EACpB,CAAgF,8EAAA,CAAA,CACjF,CAAC;KACH;IACD,OAAO,IAAI,WAAW,CAAC,EAAE,EAAE,WAAW,EAAE,cAAc,CAAC,CAAC;AAC1D,CAAC;AAED;;;;;;;;;;;AAWG;AACa,SAAA,sBAAsB,CACpC,EAAM,EACN,WAA4B,EAAA;AAE5B,IAAA,IAAI,CAAC,WAAW,CAAC,KAAK,EAAE;QACtB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,QAAQ,EACpB,CAAuH,qHAAA,CAAA,CACxH,CAAC;KACH;AACD,IAAA,MAAM,gBAAgB,GAAG,IAAI,oBAAoB,EAAE,CAAC;IACpD,OAAO,IAAI,mBAAmB,CAAC,EAAE,EAAE,WAAW,EAAE,gBAAgB,CAAC,CAAC;AACpE;;AC3MA;;;;AAIG;AAgCH,SAAS,UAAU,GAAA;AACjB,IAAAI,sBAAkB,CAChB,IAAIC,mBAAS,CAAC,OAAO,EAAE,OAAO,EAAuB,QAAA,4BAAA,CAAC,oBAAoB,CACxE,IAAI,CACL,CACF,CAAC;AAEF,IAAAC,mBAAe,CAAC,IAAI,EAAE,OAAO,CAAC,CAAC;;AAE/B,IAAAA,mBAAe,CAAC,IAAI,EAAE,OAAO,EAAE,SAAkB,CAAC,CAAC;AACrD,CAAC;AAED,UAAU,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}
\ No newline at end of file diff --git a/frontend-old/node_modules/@firebase/ai/dist/index.node.cjs.js b/frontend-old/node_modules/@firebase/ai/dist/index.node.cjs.js new file mode 100644 index 0000000..da7e499 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/index.node.cjs.js @@ -0,0 +1,3971 @@ +'use strict'; + +Object.defineProperty(exports, '__esModule', { value: true }); + +var app = require('@firebase/app'); +var component = require('@firebase/component'); +var util = require('@firebase/util'); +var logger$1 = require('@firebase/logger'); + +var name = "@firebase/ai"; +var version = "2.5.0"; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const AI_TYPE = 'AI'; +const DEFAULT_LOCATION = 'us-central1'; +const DEFAULT_DOMAIN = 'firebasevertexai.googleapis.com'; +const DEFAULT_API_VERSION = 'v1beta'; +const PACKAGE_VERSION = version; +const LANGUAGE_TAG = 'gl-js'; +const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000; +/** + * Defines the name of the default in-cloud model to use for hybrid inference. + */ +const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite'; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Possible roles. + * @public + */ +const POSSIBLE_ROLES = ['user', 'model', 'function', 'system']; +/** + * Harm categories that would cause prompts or candidates to be blocked. + * @public + */ +const HarmCategory = { + HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH', + HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', + HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT', + HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT' +}; +/** + * Threshold above which a prompt or candidate will be blocked. + * @public + */ +const HarmBlockThreshold = { + /** + * Content with `NEGLIGIBLE` will be allowed. + */ + BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE', + /** + * Content with `NEGLIGIBLE` and `LOW` will be allowed. + */ + BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE', + /** + * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed. + */ + BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH', + /** + * All content will be allowed. + */ + BLOCK_NONE: 'BLOCK_NONE', + /** + * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding + * to the {@link (HarmCategory:type)} will not be present in the response. + */ + OFF: 'OFF' +}; +/** + * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}). + * + * @public + */ +const HarmBlockMethod = { + /** + * The harm block method uses both probability and severity scores. + */ + SEVERITY: 'SEVERITY', + /** + * The harm block method uses the probability score. + */ + PROBABILITY: 'PROBABILITY' +}; +/** + * Probability that a prompt or candidate matches a harm category. + * @public + */ +const HarmProbability = { + /** + * Content has a negligible chance of being unsafe. + */ + NEGLIGIBLE: 'NEGLIGIBLE', + /** + * Content has a low chance of being unsafe. + */ + LOW: 'LOW', + /** + * Content has a medium chance of being unsafe. + */ + MEDIUM: 'MEDIUM', + /** + * Content has a high chance of being unsafe. + */ + HIGH: 'HIGH' +}; +/** + * Harm severity levels. + * @public + */ +const HarmSeverity = { + /** + * Negligible level of harm severity. + */ + HARM_SEVERITY_NEGLIGIBLE: 'HARM_SEVERITY_NEGLIGIBLE', + /** + * Low level of harm severity. + */ + HARM_SEVERITY_LOW: 'HARM_SEVERITY_LOW', + /** + * Medium level of harm severity. + */ + HARM_SEVERITY_MEDIUM: 'HARM_SEVERITY_MEDIUM', + /** + * High level of harm severity. + */ + HARM_SEVERITY_HIGH: 'HARM_SEVERITY_HIGH', + /** + * Harm severity is not supported. + * + * @remarks + * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback. + */ + HARM_SEVERITY_UNSUPPORTED: 'HARM_SEVERITY_UNSUPPORTED' +}; +/** + * Reason that a prompt was blocked. + * @public + */ +const BlockReason = { + /** + * Content was blocked by safety settings. + */ + SAFETY: 'SAFETY', + /** + * Content was blocked, but the reason is uncategorized. + */ + OTHER: 'OTHER', + /** + * Content was blocked because it contained terms from the terminology blocklist. + */ + BLOCKLIST: 'BLOCKLIST', + /** + * Content was blocked due to prohibited content. + */ + PROHIBITED_CONTENT: 'PROHIBITED_CONTENT' +}; +/** + * Reason that a candidate finished. + * @public + */ +const FinishReason = { + /** + * Natural stop point of the model or provided stop sequence. + */ + STOP: 'STOP', + /** + * The maximum number of tokens as specified in the request was reached. + */ + MAX_TOKENS: 'MAX_TOKENS', + /** + * The candidate content was flagged for safety reasons. + */ + SAFETY: 'SAFETY', + /** + * The candidate content was flagged for recitation reasons. + */ + RECITATION: 'RECITATION', + /** + * Unknown reason. + */ + OTHER: 'OTHER', + /** + * The candidate content contained forbidden terms. + */ + BLOCKLIST: 'BLOCKLIST', + /** + * The candidate content potentially contained prohibited content. + */ + PROHIBITED_CONTENT: 'PROHIBITED_CONTENT', + /** + * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII). + */ + SPII: 'SPII', + /** + * The function call generated by the model was invalid. + */ + MALFORMED_FUNCTION_CALL: 'MALFORMED_FUNCTION_CALL' +}; +/** + * @public + */ +const FunctionCallingMode = { + /** + * Default model behavior; model decides to predict either a function call + * or a natural language response. + */ + AUTO: 'AUTO', + /** + * Model is constrained to always predicting a function call only. + * If `allowed_function_names` is set, the predicted function call will be + * limited to any one of `allowed_function_names`, else the predicted + * function call will be any one of the provided `function_declarations`. + */ + ANY: 'ANY', + /** + * Model will not predict any function call. Model behavior is same as when + * not passing any function declarations. + */ + NONE: 'NONE' +}; +/** + * Content part modality. + * @public + */ +const Modality = { + /** + * Unspecified modality. + */ + MODALITY_UNSPECIFIED: 'MODALITY_UNSPECIFIED', + /** + * Plain text. + */ + TEXT: 'TEXT', + /** + * Image. + */ + IMAGE: 'IMAGE', + /** + * Video. + */ + VIDEO: 'VIDEO', + /** + * Audio. + */ + AUDIO: 'AUDIO', + /** + * Document (for example, PDF). + */ + DOCUMENT: 'DOCUMENT' +}; +/** + * Generation modalities to be returned in generation responses. + * + * @beta + */ +const ResponseModality = { + /** + * Text. + * @beta + */ + TEXT: 'TEXT', + /** + * Image. + * @beta + */ + IMAGE: 'IMAGE', + /** + * Audio. + * @beta + */ + AUDIO: 'AUDIO' +}; +/** + * Determines whether inference happens on-device or in-cloud. + * + * @remarks + * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an + * on-device model. If on-device inference is not available, the SDK + * will fall back to using a cloud-hosted model. + * <br/> + * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an + * on-device model. The SDK will not fall back to a cloud-hosted model. + * If on-device inference is not available, inference methods will throw. + * <br/> + * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a + * cloud-hosted model. The SDK will not fall back to an on-device model. + * <br/> + * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a + * cloud-hosted model. If not available, the SDK will fall back to an + * on-device model. + * + * @beta + */ +const InferenceMode = { + 'PREFER_ON_DEVICE': 'prefer_on_device', + 'ONLY_ON_DEVICE': 'only_on_device', + 'ONLY_IN_CLOUD': 'only_in_cloud', + 'PREFER_IN_CLOUD': 'prefer_in_cloud' +}; +/** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ +const InferenceSource = { + 'ON_DEVICE': 'on_device', + 'IN_CLOUD': 'in_cloud' +}; +/** + * Represents the result of the code execution. + * + * @beta + */ +const Outcome = { + UNSPECIFIED: 'OUTCOME_UNSPECIFIED', + OK: 'OUTCOME_OK', + FAILED: 'OUTCOME_FAILED', + DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED' +}; +/** + * The programming language of the code. + * + * @beta + */ +const Language = { + UNSPECIFIED: 'LANGUAGE_UNSPECIFIED', + PYTHON: 'PYTHON' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * The status of a URL retrieval. + * + * @remarks + * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status. + * <br/> + * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful. + * <br/> + * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed. + * <br/> + * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall. + * <br/> + * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe. + * <br/> + * + * @beta + */ +const URLRetrievalStatus = { + /** + * Unspecified retrieval status. + */ + URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED', + /** + * The URL retrieval was successful. + */ + URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS', + /** + * The URL retrieval failed. + */ + URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR', + /** + * The URL retrieval failed because the content is behind a paywall. + */ + URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL', + /** + * The URL retrieval failed because the content is unsafe. + */ + URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE' +}; +/** + * The types of responses that can be returned by {@link LiveSession.receive}. + * + * @beta + */ +const LiveResponseType = { + SERVER_CONTENT: 'serverContent', + TOOL_CALL: 'toolCall', + TOOL_CALL_CANCELLATION: 'toolCallCancellation' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Standardized error codes that {@link AIError} can have. + * + * @public + */ +const AIErrorCode = { + /** A generic error occurred. */ + ERROR: 'error', + /** An error occurred in a request. */ + REQUEST_ERROR: 'request-error', + /** An error occurred in a response. */ + RESPONSE_ERROR: 'response-error', + /** An error occurred while performing a fetch. */ + FETCH_ERROR: 'fetch-error', + /** An error occurred because an operation was attempted on a closed session. */ + SESSION_CLOSED: 'session-closed', + /** An error associated with a Content object. */ + INVALID_CONTENT: 'invalid-content', + /** An error due to the Firebase API not being enabled in the Console. */ + API_NOT_ENABLED: 'api-not-enabled', + /** An error due to invalid Schema input. */ + INVALID_SCHEMA: 'invalid-schema', + /** An error occurred due to a missing Firebase API key. */ + NO_API_KEY: 'no-api-key', + /** An error occurred due to a missing Firebase app ID. */ + NO_APP_ID: 'no-app-id', + /** An error occurred due to a model name not being specified during initialization. */ + NO_MODEL: 'no-model', + /** An error occurred due to a missing project ID. */ + NO_PROJECT_ID: 'no-project-id', + /** An error occurred while parsing. */ + PARSE_FAILED: 'parse-failed', + /** An error occurred due an attempt to use an unsupported feature. */ + UNSUPPORTED: 'unsupported' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Contains the list of OpenAPI data types + * as defined by the + * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification} + * @public + */ +const SchemaType = { + /** String type. */ + STRING: 'string', + /** Number type. */ + NUMBER: 'number', + /** Integer type. */ + INTEGER: 'integer', + /** Boolean type. */ + BOOLEAN: 'boolean', + /** Array type. */ + ARRAY: 'array', + /** Object type. */ + OBJECT: 'object' +}; + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * A filter level controlling how aggressively to filter sensitive content. + * + * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI + * are assessed against a list of safety filters, which include 'harmful categories' (for example, + * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to + * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines} + * for more details. + * + * @public + */ +const ImagenSafetyFilterLevel = { + /** + * The most aggressive filtering level; most strict blocking. + */ + BLOCK_LOW_AND_ABOVE: 'block_low_and_above', + /** + * Blocks some sensitive prompts and responses. + */ + BLOCK_MEDIUM_AND_ABOVE: 'block_medium_and_above', + /** + * Blocks few sensitive prompts and responses. + */ + BLOCK_ONLY_HIGH: 'block_only_high', + /** + * The least aggressive filtering level; blocks very few sensitive prompts and responses. + * + * Access to this feature is restricted and may require your case to be reviewed and approved by + * Cloud support. + */ + BLOCK_NONE: 'block_none' +}; +/** + * A filter level controlling whether generation of images containing people or faces is allowed. + * + * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a> + * documentation for more details. + * + * @public + */ +const ImagenPersonFilterLevel = { + /** + * Disallow generation of images containing people or faces; images of people are filtered out. + */ + BLOCK_ALL: 'dont_allow', + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + ALLOW_ADULT: 'allow_adult', + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + ALLOW_ALL: 'allow_all' +}; +/** + * Aspect ratios for Imagen images. + * + * To specify an aspect ratio for generated images, set the `aspectRatio` property in your + * {@link ImagenGenerationConfig}. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details and examples of the supported aspect ratios. + * + * @public + */ +const ImagenAspectRatio = { + /** + * Square (1:1) aspect ratio. + */ + 'SQUARE': '1:1', + /** + * Landscape (3:4) aspect ratio. + */ + 'LANDSCAPE_3x4': '3:4', + /** + * Portrait (4:3) aspect ratio. + */ + 'PORTRAIT_4x3': '4:3', + /** + * Landscape (16:9) aspect ratio. + */ + 'LANDSCAPE_16x9': '16:9', + /** + * Portrait (9:16) aspect ratio. + */ + 'PORTRAIT_9x16': '9:16' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * An enum-like object containing constants that represent the supported backends + * for the Firebase AI SDK. + * This determines which backend service (Vertex AI Gemini API or Gemini Developer API) + * the SDK will communicate with. + * + * These values are assigned to the `backendType` property within the specific backend + * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify + * which service to target. + * + * @public + */ +const BackendType = { + /** + * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud. + * Use this constant when creating a {@link VertexAIBackend} configuration. + */ + VERTEX_AI: 'VERTEX_AI', + /** + * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}). + * Use this constant when creating a {@link GoogleAIBackend} configuration. + */ + GOOGLE_AI: 'GOOGLE_AI' +}; // Using 'as const' makes the string values literal types + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Abstract base class representing the configuration for an AI service backend. + * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for + * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and + * {@link VertexAIBackend} for the Vertex AI Gemini API. + * + * @public + */ +class Backend { + /** + * Protected constructor for use by subclasses. + * @param type - The backend type. + */ + constructor(type) { + this.backendType = type; + } +} +/** + * Configuration class for the Gemini Developer API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Gemini Developer API as the backend. + * + * @public + */ +class GoogleAIBackend extends Backend { + /** + * Creates a configuration object for the Gemini Developer API backend. + */ + constructor() { + super(BackendType.GOOGLE_AI); + } +} +/** + * Configuration class for the Vertex AI Gemini API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend. + * + * @public + */ +class VertexAIBackend extends Backend { + /** + * Creates a configuration object for the Vertex AI backend. + * + * @param location - The region identifier, defaulting to `us-central1`; + * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + constructor(location = DEFAULT_LOCATION) { + super(BackendType.VERTEX_AI); + if (!location) { + this.location = DEFAULT_LOCATION; + } + else { + this.location = location; + } + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class AIService { + constructor(app, backend, authProvider, appCheckProvider, chromeAdapterFactory) { + this.app = app; + this.backend = backend; + this.chromeAdapterFactory = chromeAdapterFactory; + const appCheck = appCheckProvider?.getImmediate({ optional: true }); + const auth = authProvider?.getImmediate({ optional: true }); + this.auth = auth || null; + this.appCheck = appCheck || null; + if (backend instanceof VertexAIBackend) { + this.location = backend.location; + } + else { + this.location = ''; + } + } + _delete() { + return Promise.resolve(); + } + set options(optionsToSet) { + this._options = optionsToSet; + } + get options() { + return this._options; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Error class for the Firebase AI SDK. + * + * @public + */ +class AIError extends util.FirebaseError { + /** + * Constructs a new instance of the `AIError` class. + * + * @param code - The error code from {@link (AIErrorCode:type)}. + * @param message - A human-readable message describing the error. + * @param customErrorData - Optional error data. + */ + constructor(code, message, customErrorData) { + // Match error format used by FirebaseError from ErrorFactory + const service = AI_TYPE; + const fullCode = `${service}/${code}`; + const fullMessage = `${service}: ${message} (${fullCode})`; + super(code, fullMessage); + this.code = code; + this.customErrorData = customErrorData; + // FirebaseError initializes a stack trace, but it assumes the error is created from the error + // factory. Since we break this assumption, we set the stack trace to be originating from this + // constructor. + // This is only supported in V8. + if (Error.captureStackTrace) { + // Allows us to initialize the stack trace without including the constructor itself at the + // top level of the stack trace. + Error.captureStackTrace(this, AIError); + } + // Allows instanceof AIError in ES5/ES6 + // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work + // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget + // which we can now use since we no longer target ES5. + Object.setPrototypeOf(this, AIError.prototype); + // Since Error is an interface, we don't inherit toString and so we define it ourselves. + this.toString = () => fullMessage; + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI} + * instances by backend type. + * + * @internal + */ +function encodeInstanceIdentifier(backend) { + if (backend instanceof GoogleAIBackend) { + return `${AI_TYPE}/googleai`; + } + else if (backend instanceof VertexAIBackend) { + return `${AI_TYPE}/vertexai/${backend.location}`; + } + else { + throw new AIError(AIErrorCode.ERROR, `Invalid backend: ${JSON.stringify(backend.backendType)}`); + } +} +/** + * Decodes an instance identifier string into a {@link Backend}. + * + * @internal + */ +function decodeInstanceIdentifier(instanceIdentifier) { + const identifierParts = instanceIdentifier.split('/'); + if (identifierParts[0] !== AI_TYPE) { + throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier, unknown prefix '${identifierParts[0]}'`); + } + const backendType = identifierParts[1]; + switch (backendType) { + case 'vertexai': + const location = identifierParts[2]; + if (!location) { + throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier, unknown location '${instanceIdentifier}'`); + } + return new VertexAIBackend(location); + case 'googleai': + return new GoogleAIBackend(); + default: + throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier string: '${instanceIdentifier}'`); + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Base class for Firebase AI model APIs. + * + * Instances of this class are associated with a specific Firebase AI {@link Backend} + * and provide methods for interacting with the configured generative model. + * + * @public + */ +class AIModel { + /** + * Constructs a new instance of the {@link AIModel} class. + * + * This constructor should only be called from subclasses that provide + * a model API. + * + * @param ai - an {@link AI} instance. + * @param modelName - The name of the model being used. It can be in one of the following formats: + * - `my-model` (short name, will resolve to `publishers/google/models/my-model`) + * - `models/my-model` (will resolve to `publishers/google/models/my-model`) + * - `publishers/my-publisher/models/my-model` (fully qualified model name) + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @internal + */ + constructor(ai, modelName) { + if (!ai.app?.options?.apiKey) { + throw new AIError(AIErrorCode.NO_API_KEY, `The "apiKey" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`); + } + else if (!ai.app?.options?.projectId) { + throw new AIError(AIErrorCode.NO_PROJECT_ID, `The "projectId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`); + } + else if (!ai.app?.options?.appId) { + throw new AIError(AIErrorCode.NO_APP_ID, `The "appId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`); + } + else { + this._apiSettings = { + apiKey: ai.app.options.apiKey, + project: ai.app.options.projectId, + appId: ai.app.options.appId, + automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled, + location: ai.location, + backend: ai.backend + }; + if (app._isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) { + const token = ai.app.settings.appCheckToken; + this._apiSettings.getAppCheckToken = () => { + return Promise.resolve({ token }); + }; + } + else if (ai.appCheck) { + if (ai.options?.useLimitedUseAppCheckTokens) { + this._apiSettings.getAppCheckToken = () => ai.appCheck.getLimitedUseToken(); + } + else { + this._apiSettings.getAppCheckToken = () => ai.appCheck.getToken(); + } + } + if (ai.auth) { + this._apiSettings.getAuthToken = () => ai.auth.getToken(); + } + this.model = AIModel.normalizeModelName(modelName, this._apiSettings.backend.backendType); + } + } + /** + * Normalizes the given model name to a fully qualified model resource name. + * + * @param modelName - The model name to normalize. + * @returns The fully qualified model resource name. + * + * @internal + */ + static normalizeModelName(modelName, backendType) { + if (backendType === BackendType.GOOGLE_AI) { + return AIModel.normalizeGoogleAIModelName(modelName); + } + else { + return AIModel.normalizeVertexAIModelName(modelName); + } + } + /** + * @internal + */ + static normalizeGoogleAIModelName(modelName) { + return `models/${modelName}`; + } + /** + * @internal + */ + static normalizeVertexAIModelName(modelName) { + let model; + if (modelName.includes('/')) { + if (modelName.startsWith('models/')) { + // Add 'publishers/google' if the user is only passing in 'models/model-name'. + model = `publishers/google/${modelName}`; + } + else { + // Any other custom format (e.g. tuned models) must be passed in correctly. + model = modelName; + } + } + else { + // If path is not included, assume it's a non-tuned model. + model = `publishers/google/models/${modelName}`; + } + return model; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const logger = new logger$1.Logger('@firebase/vertexai'); + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +var Task; +(function (Task) { + Task["GENERATE_CONTENT"] = "generateContent"; + Task["STREAM_GENERATE_CONTENT"] = "streamGenerateContent"; + Task["COUNT_TOKENS"] = "countTokens"; + Task["PREDICT"] = "predict"; +})(Task || (Task = {})); +class RequestUrl { + constructor(model, task, apiSettings, stream, requestOptions) { + this.model = model; + this.task = task; + this.apiSettings = apiSettings; + this.stream = stream; + this.requestOptions = requestOptions; + } + toString() { + const url = new URL(this.baseUrl); // Throws if the URL is invalid + url.pathname = `/${this.apiVersion}/${this.modelPath}:${this.task}`; + url.search = this.queryParams.toString(); + return url.toString(); + } + get baseUrl() { + return this.requestOptions?.baseUrl || `https://${DEFAULT_DOMAIN}`; + } + get apiVersion() { + return DEFAULT_API_VERSION; // TODO: allow user-set options if that feature becomes available + } + get modelPath() { + if (this.apiSettings.backend instanceof GoogleAIBackend) { + return `projects/${this.apiSettings.project}/${this.model}`; + } + else if (this.apiSettings.backend instanceof VertexAIBackend) { + return `projects/${this.apiSettings.project}/locations/${this.apiSettings.backend.location}/${this.model}`; + } + else { + throw new AIError(AIErrorCode.ERROR, `Invalid backend: ${JSON.stringify(this.apiSettings.backend)}`); + } + } + get queryParams() { + const params = new URLSearchParams(); + if (this.stream) { + params.set('alt', 'sse'); + } + return params; + } +} +class WebSocketUrl { + constructor(apiSettings) { + this.apiSettings = apiSettings; + } + toString() { + const url = new URL(`wss://${DEFAULT_DOMAIN}`); + url.pathname = this.pathname; + const queryParams = new URLSearchParams(); + queryParams.set('key', this.apiSettings.apiKey); + url.search = queryParams.toString(); + return url.toString(); + } + get pathname() { + if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + return 'ws/google.firebase.vertexai.v1beta.GenerativeService/BidiGenerateContent'; + } + else { + return `ws/google.firebase.vertexai.v1beta.LlmBidiService/BidiGenerateContent/locations/${this.apiSettings.location}`; + } + } +} +/** + * Log language and "fire/version" to x-goog-api-client + */ +function getClientHeaders() { + const loggingTags = []; + loggingTags.push(`${LANGUAGE_TAG}/${PACKAGE_VERSION}`); + loggingTags.push(`fire/${PACKAGE_VERSION}`); + return loggingTags.join(' '); +} +async function getHeaders(url) { + const headers = new Headers(); + headers.append('Content-Type', 'application/json'); + headers.append('x-goog-api-client', getClientHeaders()); + headers.append('x-goog-api-key', url.apiSettings.apiKey); + if (url.apiSettings.automaticDataCollectionEnabled) { + headers.append('X-Firebase-Appid', url.apiSettings.appId); + } + if (url.apiSettings.getAppCheckToken) { + const appCheckToken = await url.apiSettings.getAppCheckToken(); + if (appCheckToken) { + headers.append('X-Firebase-AppCheck', appCheckToken.token); + if (appCheckToken.error) { + logger.warn(`Unable to obtain a valid App Check token: ${appCheckToken.error.message}`); + } + } + } + if (url.apiSettings.getAuthToken) { + const authToken = await url.apiSettings.getAuthToken(); + if (authToken) { + headers.append('Authorization', `Firebase ${authToken.accessToken}`); + } + } + return headers; +} +async function constructRequest(model, task, apiSettings, stream, body, requestOptions) { + const url = new RequestUrl(model, task, apiSettings, stream, requestOptions); + return { + url: url.toString(), + fetchOptions: { + method: 'POST', + headers: await getHeaders(url), + body + } + }; +} +async function makeRequest(model, task, apiSettings, stream, body, requestOptions) { + const url = new RequestUrl(model, task, apiSettings, stream, requestOptions); + let response; + let fetchTimeoutId; + try { + const request = await constructRequest(model, task, apiSettings, stream, body, requestOptions); + // Timeout is 180s by default + const timeoutMillis = requestOptions?.timeout != null && requestOptions.timeout >= 0 + ? requestOptions.timeout + : DEFAULT_FETCH_TIMEOUT_MS; + const abortController = new AbortController(); + fetchTimeoutId = setTimeout(() => abortController.abort(), timeoutMillis); + request.fetchOptions.signal = abortController.signal; + response = await fetch(request.url, request.fetchOptions); + if (!response.ok) { + let message = ''; + let errorDetails; + try { + const json = await response.json(); + message = json.error.message; + if (json.error.details) { + message += ` ${JSON.stringify(json.error.details)}`; + errorDetails = json.error.details; + } + } + catch (e) { + // ignored + } + if (response.status === 403 && + errorDetails && + errorDetails.some((detail) => detail.reason === 'SERVICE_DISABLED') && + errorDetails.some((detail) => detail.links?.[0]?.description.includes('Google developers console API activation'))) { + throw new AIError(AIErrorCode.API_NOT_ENABLED, `The Firebase AI SDK requires the Firebase AI ` + + `API ('firebasevertexai.googleapis.com') to be enabled in your ` + + `Firebase project. Enable this API by visiting the Firebase Console ` + + `at https://console.firebase.google.com/project/${url.apiSettings.project}/genai/ ` + + `and clicking "Get started". If you enabled this API recently, ` + + `wait a few minutes for the action to propagate to our systems and ` + + `then retry.`, { + status: response.status, + statusText: response.statusText, + errorDetails + }); + } + throw new AIError(AIErrorCode.FETCH_ERROR, `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`, { + status: response.status, + statusText: response.statusText, + errorDetails + }); + } + } + catch (e) { + let err = e; + if (e.code !== AIErrorCode.FETCH_ERROR && + e.code !== AIErrorCode.API_NOT_ENABLED && + e instanceof Error) { + err = new AIError(AIErrorCode.ERROR, `Error fetching from ${url.toString()}: ${e.message}`); + err.stack = e.stack; + } + throw err; + } + finally { + if (fetchTimeoutId) { + clearTimeout(fetchTimeoutId); + } + } + return response; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Check that at least one candidate exists and does not have a bad + * finish reason. Warns if multiple candidates exist. + */ +function hasValidCandidates(response) { + if (response.candidates && response.candidates.length > 0) { + if (response.candidates.length > 1) { + logger.warn(`This response had ${response.candidates.length} ` + + `candidates. Returning text from the first candidate only. ` + + `Access response.candidates directly to use the other candidates.`); + } + if (hadBadFinishReason(response.candidates[0])) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage(response)}. Response body stored in error.response`, { + response + }); + } + return true; + } + else { + return false; + } +} +/** + * Creates an EnhancedGenerateContentResponse object that has helper functions and + * other modifications that improve usability. + */ +function createEnhancedContentResponse(response, inferenceSource = InferenceSource.IN_CLOUD) { + /** + * The Vertex AI backend omits default values. + * This causes the `index` property to be omitted from the first candidate in the + * response, since it has index 0, and 0 is a default value. + * See: https://github.com/firebase/firebase-js-sdk/issues/8566 + */ + if (response.candidates && !response.candidates[0].hasOwnProperty('index')) { + response.candidates[0].index = 0; + } + const responseWithHelpers = addHelpers(response); + responseWithHelpers.inferenceSource = inferenceSource; + return responseWithHelpers; +} +/** + * Adds convenience helper methods to a response object, including stream + * chunks (as long as each chunk is a complete GenerateContentResponse JSON). + */ +function addHelpers(response) { + response.text = () => { + if (hasValidCandidates(response)) { + return getText(response, part => !part.thought); + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Text not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return ''; + }; + response.thoughtSummary = () => { + if (hasValidCandidates(response)) { + const result = getText(response, part => !!part.thought); + return result === '' ? undefined : result; + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Thought summary not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return undefined; + }; + response.inlineDataParts = () => { + if (hasValidCandidates(response)) { + return getInlineDataParts(response); + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Data not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return undefined; + }; + response.functionCalls = () => { + if (hasValidCandidates(response)) { + return getFunctionCalls(response); + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Function call not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return undefined; + }; + return response; +} +/** + * Returns all text from the first candidate's parts, filtering by whether + * `partFilter()` returns true. + * + * @param response - The `GenerateContentResponse` from which to extract text. + * @param partFilter - Only return `Part`s for which this returns true + */ +function getText(response, partFilter) { + const textStrings = []; + if (response.candidates?.[0].content?.parts) { + for (const part of response.candidates?.[0].content?.parts) { + if (part.text && partFilter(part)) { + textStrings.push(part.text); + } + } + } + if (textStrings.length > 0) { + return textStrings.join(''); + } + else { + return ''; + } +} +/** + * Returns every {@link FunctionCall} associated with first candidate. + */ +function getFunctionCalls(response) { + const functionCalls = []; + if (response.candidates?.[0].content?.parts) { + for (const part of response.candidates?.[0].content?.parts) { + if (part.functionCall) { + functionCalls.push(part.functionCall); + } + } + } + if (functionCalls.length > 0) { + return functionCalls; + } + else { + return undefined; + } +} +/** + * Returns every {@link InlineDataPart} in the first candidate if present. + * + * @internal + */ +function getInlineDataParts(response) { + const data = []; + if (response.candidates?.[0].content?.parts) { + for (const part of response.candidates?.[0].content?.parts) { + if (part.inlineData) { + data.push(part); + } + } + } + if (data.length > 0) { + return data; + } + else { + return undefined; + } +} +const badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY]; +function hadBadFinishReason(candidate) { + return (!!candidate.finishReason && + badFinishReasons.some(reason => reason === candidate.finishReason)); +} +function formatBlockErrorMessage(response) { + let message = ''; + if ((!response.candidates || response.candidates.length === 0) && + response.promptFeedback) { + message += 'Response was blocked'; + if (response.promptFeedback?.blockReason) { + message += ` due to ${response.promptFeedback.blockReason}`; + } + if (response.promptFeedback?.blockReasonMessage) { + message += `: ${response.promptFeedback.blockReasonMessage}`; + } + } + else if (response.candidates?.[0]) { + const firstCandidate = response.candidates[0]; + if (hadBadFinishReason(firstCandidate)) { + message += `Candidate was blocked due to ${firstCandidate.finishReason}`; + if (firstCandidate.finishMessage) { + message += `: ${firstCandidate.finishMessage}`; + } + } + } + return message; +} +/** + * Convert a generic successful fetch response body to an Imagen response object + * that can be returned to the user. This converts the REST APIs response format to our + * APIs representation of a response. + * + * @internal + */ +async function handlePredictResponse(response) { + const responseJson = await response.json(); + const images = []; + let filteredReason = undefined; + // The backend should always send a non-empty array of predictions if the response was successful. + if (!responseJson.predictions || responseJson.predictions?.length === 0) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.'); + } + for (const prediction of responseJson.predictions) { + if (prediction.raiFilteredReason) { + filteredReason = prediction.raiFilteredReason; + } + else if (prediction.mimeType && prediction.bytesBase64Encoded) { + images.push({ + mimeType: prediction.mimeType, + bytesBase64Encoded: prediction.bytesBase64Encoded + }); + } + else if (prediction.mimeType && prediction.gcsUri) { + images.push({ + mimeType: prediction.mimeType, + gcsURI: prediction.gcsUri + }); + } + else if (prediction.safetyAttributes) ; + else { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Unexpected element in 'predictions' array in response: '${JSON.stringify(prediction)}'`); + } + } + return { images, filteredReason }; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI). + * The public API prioritizes the format used by the Vertex AI Gemini API. + * We avoid having two sets of types by translating requests and responses between the two API formats. + * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API + * with minimal code changes. + * + * In here are functions that map requests and responses between the two API formats. + * Requests in the Vertex AI format are mapped to the Google AI format before being sent. + * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user. + */ +/** + * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI. + * + * @param generateContentRequest The {@link GenerateContentRequest} to map. + * @returns A {@link GenerateContentResponse} that conforms to the Google AI format. + * + * @throws If the request contains properties that are unsupported by Google AI. + * + * @internal + */ +function mapGenerateContentRequest(generateContentRequest) { + generateContentRequest.safetySettings?.forEach(safetySetting => { + if (safetySetting.method) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'SafetySetting.method is not supported in the the Gemini Developer API. Please remove this property.'); + } + }); + if (generateContentRequest.generationConfig?.topK) { + const roundedTopK = Math.round(generateContentRequest.generationConfig.topK); + if (roundedTopK !== generateContentRequest.generationConfig.topK) { + logger.warn('topK in GenerationConfig has been rounded to the nearest integer to match the format for requests to the Gemini Developer API.'); + generateContentRequest.generationConfig.topK = roundedTopK; + } + } + return generateContentRequest; +} +/** + * Maps a {@link GenerateContentResponse} from Google AI to the format of the + * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API. + * + * @param googleAIResponse The {@link GenerateContentResponse} from Google AI. + * @returns A {@link GenerateContentResponse} that conforms to the public API's format. + * + * @internal + */ +function mapGenerateContentResponse(googleAIResponse) { + const generateContentResponse = { + candidates: googleAIResponse.candidates + ? mapGenerateContentCandidates(googleAIResponse.candidates) + : undefined, + prompt: googleAIResponse.promptFeedback + ? mapPromptFeedback(googleAIResponse.promptFeedback) + : undefined, + usageMetadata: googleAIResponse.usageMetadata + }; + return generateContentResponse; +} +/** + * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI. + * + * @param countTokensRequest The {@link CountTokensRequest} to map. + * @param model The model to count tokens with. + * @returns A {@link CountTokensRequest} that conforms to the Google AI format. + * + * @internal + */ +function mapCountTokensRequest(countTokensRequest, model) { + const mappedCountTokensRequest = { + generateContentRequest: { + model, + ...countTokensRequest + } + }; + return mappedCountTokensRequest; +} +/** + * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms + * to the Vertex AI API format. + * + * @param candidates The {@link GoogleAIGenerateContentCandidate} to map. + * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format. + * + * @throws If any {@link Part} in the candidates has a `videoMetadata` property. + * + * @internal + */ +function mapGenerateContentCandidates(candidates) { + const mappedCandidates = []; + let mappedSafetyRatings; + if (mappedCandidates) { + candidates.forEach(candidate => { + // Map citationSources to citations. + let citationMetadata; + if (candidate.citationMetadata) { + citationMetadata = { + citations: candidate.citationMetadata.citationSources + }; + } + // Assign missing candidate SafetyRatings properties to their defaults if undefined. + if (candidate.safetyRatings) { + mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => { + return { + ...safetyRating, + severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + probabilityScore: safetyRating.probabilityScore ?? 0, + severityScore: safetyRating.severityScore ?? 0 + }; + }); + } + // videoMetadata is not supported. + // Throw early since developers may send a long video as input and only expect to pay + // for inference on a small portion of the video. + if (candidate.content?.parts?.some(part => part?.videoMetadata)) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.'); + } + const mappedCandidate = { + index: candidate.index, + content: candidate.content, + finishReason: candidate.finishReason, + finishMessage: candidate.finishMessage, + safetyRatings: mappedSafetyRatings, + citationMetadata, + groundingMetadata: candidate.groundingMetadata, + urlContextMetadata: candidate.urlContextMetadata + }; + mappedCandidates.push(mappedCandidate); + }); + } + return mappedCandidates; +} +function mapPromptFeedback(promptFeedback) { + // Assign missing SafetyRating properties to their defaults if undefined. + const mappedSafetyRatings = []; + promptFeedback.safetyRatings.forEach(safetyRating => { + mappedSafetyRatings.push({ + category: safetyRating.category, + probability: safetyRating.probability, + severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + probabilityScore: safetyRating.probabilityScore ?? 0, + severityScore: safetyRating.severityScore ?? 0, + blocked: safetyRating.blocked + }); + }); + const mappedPromptFeedback = { + blockReason: promptFeedback.blockReason, + safetyRatings: mappedSafetyRatings, + blockReasonMessage: promptFeedback.blockReasonMessage + }; + return mappedPromptFeedback; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/; +/** + * Process a response.body stream from the backend and return an + * iterator that provides one complete GenerateContentResponse at a time + * and a promise that resolves with a single aggregated + * GenerateContentResponse. + * + * @param response - Response from a fetch call + */ +function processStream(response, apiSettings, inferenceSource) { + const inputStream = response.body.pipeThrough(new TextDecoderStream('utf8', { fatal: true })); + const responseStream = getResponseStream(inputStream); + const [stream1, stream2] = responseStream.tee(); + return { + stream: generateResponseSequence(stream1, apiSettings, inferenceSource), + response: getResponsePromise(stream2, apiSettings, inferenceSource) + }; +} +async function getResponsePromise(stream, apiSettings, inferenceSource) { + const allResponses = []; + const reader = stream.getReader(); + while (true) { + const { done, value } = await reader.read(); + if (done) { + let generateContentResponse = aggregateResponses(allResponses); + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + generateContentResponse = mapGenerateContentResponse(generateContentResponse); + } + return createEnhancedContentResponse(generateContentResponse, inferenceSource); + } + allResponses.push(value); + } +} +async function* generateResponseSequence(stream, apiSettings, inferenceSource) { + const reader = stream.getReader(); + while (true) { + const { value, done } = await reader.read(); + if (done) { + break; + } + let enhancedResponse; + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + enhancedResponse = createEnhancedContentResponse(mapGenerateContentResponse(value), inferenceSource); + } + else { + enhancedResponse = createEnhancedContentResponse(value, inferenceSource); + } + const firstCandidate = enhancedResponse.candidates?.[0]; + // Don't yield a response with no useful data for the developer. + if (!firstCandidate?.content?.parts && + !firstCandidate?.finishReason && + !firstCandidate?.citationMetadata && + !firstCandidate?.urlContextMetadata) { + continue; + } + yield enhancedResponse; + } +} +/** + * Reads a raw stream from the fetch response and join incomplete + * chunks, returning a new stream that provides a single complete + * GenerateContentResponse in each iteration. + */ +function getResponseStream(inputStream) { + const reader = inputStream.getReader(); + const stream = new ReadableStream({ + start(controller) { + let currentText = ''; + return pump(); + function pump() { + return reader.read().then(({ value, done }) => { + if (done) { + if (currentText.trim()) { + controller.error(new AIError(AIErrorCode.PARSE_FAILED, 'Failed to parse stream')); + return; + } + controller.close(); + return; + } + currentText += value; + let match = currentText.match(responseLineRE); + let parsedResponse; + while (match) { + try { + parsedResponse = JSON.parse(match[1]); + } + catch (e) { + controller.error(new AIError(AIErrorCode.PARSE_FAILED, `Error parsing JSON response: "${match[1]}`)); + return; + } + controller.enqueue(parsedResponse); + currentText = currentText.substring(match[0].length); + match = currentText.match(responseLineRE); + } + return pump(); + }); + } + } + }); + return stream; +} +/** + * Aggregates an array of `GenerateContentResponse`s into a single + * GenerateContentResponse. + */ +function aggregateResponses(responses) { + const lastResponse = responses[responses.length - 1]; + const aggregatedResponse = { + promptFeedback: lastResponse?.promptFeedback + }; + for (const response of responses) { + if (response.candidates) { + for (const candidate of response.candidates) { + // Index will be undefined if it's the first index (0), so we should use 0 if it's undefined. + // See: https://github.com/firebase/firebase-js-sdk/issues/8566 + const i = candidate.index || 0; + if (!aggregatedResponse.candidates) { + aggregatedResponse.candidates = []; + } + if (!aggregatedResponse.candidates[i]) { + aggregatedResponse.candidates[i] = { + index: candidate.index + }; + } + // Keep overwriting, the last one will be final + aggregatedResponse.candidates[i].citationMetadata = + candidate.citationMetadata; + aggregatedResponse.candidates[i].finishReason = candidate.finishReason; + aggregatedResponse.candidates[i].finishMessage = + candidate.finishMessage; + aggregatedResponse.candidates[i].safetyRatings = + candidate.safetyRatings; + aggregatedResponse.candidates[i].groundingMetadata = + candidate.groundingMetadata; + // The urlContextMetadata object is defined in the first chunk of the response stream. + // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to + // make sure that we don't overwrite the first value urlContextMetadata object with undefined. + // FIXME: What happens if we receive a second, valid urlContextMetadata object? + const urlContextMetadata = candidate.urlContextMetadata; + if (typeof urlContextMetadata === 'object' && + urlContextMetadata !== null && + Object.keys(urlContextMetadata).length > 0) { + aggregatedResponse.candidates[i].urlContextMetadata = + urlContextMetadata; + } + /** + * Candidates should always have content and parts, but this handles + * possible malformed responses. + */ + if (candidate.content) { + // Skip a candidate without parts. + if (!candidate.content.parts) { + continue; + } + if (!aggregatedResponse.candidates[i].content) { + aggregatedResponse.candidates[i].content = { + role: candidate.content.role || 'user', + parts: [] + }; + } + for (const part of candidate.content.parts) { + const newPart = { ...part }; + // The backend can send empty text parts. If these are sent back + // (e.g. in chat history), the backend will respond with an error. + // To prevent this, ignore empty text parts. + if (part.text === '') { + continue; + } + if (Object.keys(newPart).length > 0) { + aggregatedResponse.candidates[i].content.parts.push(newPart); + } + } + } + } + } + } + return aggregatedResponse; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const errorsCausingFallback = [ + // most network errors + AIErrorCode.FETCH_ERROR, + // fallback code for all other errors in makeRequest + AIErrorCode.ERROR, + // error due to API not being enabled in project + AIErrorCode.API_NOT_ENABLED +]; +/** + * Dispatches a request to the appropriate backend (on-device or in-cloud) + * based on the inference mode. + * + * @param request - The request to be sent. + * @param chromeAdapter - The on-device model adapter. + * @param onDeviceCall - The function to call for on-device inference. + * @param inCloudCall - The function to call for in-cloud inference. + * @returns The response from the backend. + */ +async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) { + if (!chromeAdapter) { + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + } + switch (chromeAdapter.mode) { + case InferenceMode.ONLY_ON_DEVICE: + if (await chromeAdapter.isAvailable(request)) { + return { + response: await onDeviceCall(), + inferenceSource: InferenceSource.ON_DEVICE + }; + } + throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.'); + case InferenceMode.ONLY_IN_CLOUD: + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + case InferenceMode.PREFER_IN_CLOUD: + try { + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + } + catch (e) { + if (e instanceof AIError && errorsCausingFallback.includes(e.code)) { + return { + response: await onDeviceCall(), + inferenceSource: InferenceSource.ON_DEVICE + }; + } + throw e; + } + case InferenceMode.PREFER_ON_DEVICE: + if (await chromeAdapter.isAvailable(request)) { + return { + response: await onDeviceCall(), + inferenceSource: InferenceSource.ON_DEVICE + }; + } + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + default: + throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +async function generateContentStreamOnCloud(apiSettings, model, params, requestOptions) { + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + params = mapGenerateContentRequest(params); + } + return makeRequest(model, Task.STREAM_GENERATE_CONTENT, apiSettings, + /* stream */ true, JSON.stringify(params), requestOptions); +} +async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) { + const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions)); + return processStream(callResult.response, apiSettings); // TODO: Map streaming responses +} +async function generateContentOnCloud(apiSettings, model, params, requestOptions) { + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + params = mapGenerateContentRequest(params); + } + return makeRequest(model, Task.GENERATE_CONTENT, apiSettings, + /* stream */ false, JSON.stringify(params), requestOptions); +} +async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) { + const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions)); + const generateContentResponse = await processGenerateContentResponse(callResult.response, apiSettings); + const enhancedResponse = createEnhancedContentResponse(generateContentResponse, callResult.inferenceSource); + return { + response: enhancedResponse + }; +} +async function processGenerateContentResponse(response, apiSettings) { + const responseJson = await response.json(); + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + return mapGenerateContentResponse(responseJson); + } + else { + return responseJson; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +function formatSystemInstruction(input) { + // null or undefined + if (input == null) { + return undefined; + } + else if (typeof input === 'string') { + return { role: 'system', parts: [{ text: input }] }; + } + else if (input.text) { + return { role: 'system', parts: [input] }; + } + else if (input.parts) { + if (!input.role) { + return { role: 'system', parts: input.parts }; + } + else { + return input; + } + } +} +function formatNewContent(request) { + let newParts = []; + if (typeof request === 'string') { + newParts = [{ text: request }]; + } + else { + for (const partOrString of request) { + if (typeof partOrString === 'string') { + newParts.push({ text: partOrString }); + } + else { + newParts.push(partOrString); + } + } + } + return assignRoleToPartsAndValidateSendMessageRequest(newParts); +} +/** + * When multiple Part types (i.e. FunctionResponsePart and TextPart) are + * passed in a single Part array, we may need to assign different roles to each + * part. Currently only FunctionResponsePart requires a role other than 'user'. + * @private + * @param parts Array of parts to pass to the model + * @returns Array of content items + */ +function assignRoleToPartsAndValidateSendMessageRequest(parts) { + const userContent = { role: 'user', parts: [] }; + const functionContent = { role: 'function', parts: [] }; + let hasUserContent = false; + let hasFunctionContent = false; + for (const part of parts) { + if ('functionResponse' in part) { + functionContent.parts.push(part); + hasFunctionContent = true; + } + else { + userContent.parts.push(part); + hasUserContent = true; + } + } + if (hasUserContent && hasFunctionContent) { + throw new AIError(AIErrorCode.INVALID_CONTENT, 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.'); + } + if (!hasUserContent && !hasFunctionContent) { + throw new AIError(AIErrorCode.INVALID_CONTENT, 'No Content is provided for sending chat message.'); + } + if (hasUserContent) { + return userContent; + } + return functionContent; +} +function formatGenerateContentInput(params) { + let formattedRequest; + if (params.contents) { + formattedRequest = params; + } + else { + // Array or string + const content = formatNewContent(params); + formattedRequest = { contents: [content] }; + } + if (params.systemInstruction) { + formattedRequest.systemInstruction = formatSystemInstruction(params.systemInstruction); + } + return formattedRequest; +} +/** + * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format + * that is expected from the REST API. + * + * @internal + */ +function createPredictRequestBody(prompt, { gcsURI, imageFormat, addWatermark, numberOfImages = 1, negativePrompt, aspectRatio, safetyFilterLevel, personFilterLevel }) { + // Properties that are undefined will be omitted from the JSON string that is sent in the request. + const body = { + instances: [ + { + prompt + } + ], + parameters: { + storageUri: gcsURI, + negativePrompt, + sampleCount: numberOfImages, + aspectRatio, + outputOptions: imageFormat, + addWatermark, + safetyFilterLevel, + personGeneration: personFilterLevel, + includeRaiReason: true, + includeSafetyAttributes: true + } + }; + return body; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// https://ai.google.dev/api/rest/v1beta/Content#part +const VALID_PART_FIELDS = [ + 'text', + 'inlineData', + 'functionCall', + 'functionResponse', + 'thought', + 'thoughtSignature' +]; +const VALID_PARTS_PER_ROLE = { + user: ['text', 'inlineData'], + function: ['functionResponse'], + model: ['text', 'functionCall', 'thought', 'thoughtSignature'], + // System instructions shouldn't be in history anyway. + system: ['text'] +}; +const VALID_PREVIOUS_CONTENT_ROLES = { + user: ['model'], + function: ['model'], + model: ['user', 'function'], + // System instructions shouldn't be in history. + system: [] +}; +function validateChatHistory(history) { + let prevContent = null; + for (const currContent of history) { + const { role, parts } = currContent; + if (!prevContent && role !== 'user') { + throw new AIError(AIErrorCode.INVALID_CONTENT, `First Content should be with role 'user', got ${role}`); + } + if (!POSSIBLE_ROLES.includes(role)) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(POSSIBLE_ROLES)}`); + } + if (!Array.isArray(parts)) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Content should have 'parts' property with an array of Parts`); + } + if (parts.length === 0) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Each Content should have at least one part`); + } + const countFields = { + text: 0, + inlineData: 0, + functionCall: 0, + functionResponse: 0, + thought: 0, + thoughtSignature: 0, + executableCode: 0, + codeExecutionResult: 0 + }; + for (const part of parts) { + for (const key of VALID_PART_FIELDS) { + if (key in part) { + countFields[key] += 1; + } + } + } + const validParts = VALID_PARTS_PER_ROLE[role]; + for (const key of VALID_PART_FIELDS) { + if (!validParts.includes(key) && countFields[key] > 0) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't contain '${key}' part`); + } + } + if (prevContent) { + const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role]; + if (!validPreviousContentRoles.includes(prevContent.role)) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't follow '${prevContent.role}'. Valid previous roles: ${JSON.stringify(VALID_PREVIOUS_CONTENT_ROLES)}`); + } + } + prevContent = currContent; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Do not log a message for this error. + */ +const SILENT_ERROR = 'SILENT_ERROR'; +/** + * ChatSession class that enables sending chat messages and stores + * history of sent and received messages so far. + * + * @public + */ +class ChatSession { + constructor(apiSettings, model, chromeAdapter, params, requestOptions) { + this.model = model; + this.chromeAdapter = chromeAdapter; + this.params = params; + this.requestOptions = requestOptions; + this._history = []; + this._sendPromise = Promise.resolve(); + this._apiSettings = apiSettings; + if (params?.history) { + validateChatHistory(params.history); + this._history = params.history; + } + } + /** + * Gets the chat history so far. Blocked prompts are not added to history. + * Neither blocked candidates nor the prompts that generated them are added + * to history. + */ + async getHistory() { + await this._sendPromise; + return this._history; + } + /** + * Sends a chat message and receives a non-streaming + * {@link GenerateContentResult} + */ + async sendMessage(request) { + await this._sendPromise; + const newContent = formatNewContent(request); + const generateContentRequest = { + safetySettings: this.params?.safetySettings, + generationConfig: this.params?.generationConfig, + tools: this.params?.tools, + toolConfig: this.params?.toolConfig, + systemInstruction: this.params?.systemInstruction, + contents: [...this._history, newContent] + }; + let finalResult = {}; + // Add onto the chain. + this._sendPromise = this._sendPromise + .then(() => generateContent(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter, this.requestOptions)) + .then(result => { + if (result.response.candidates && + result.response.candidates.length > 0) { + this._history.push(newContent); + const responseContent = { + parts: result.response.candidates?.[0].content.parts || [], + // Response seems to come back without a role set. + role: result.response.candidates?.[0].content.role || 'model' + }; + this._history.push(responseContent); + } + else { + const blockErrorMessage = formatBlockErrorMessage(result.response); + if (blockErrorMessage) { + logger.warn(`sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`); + } + } + finalResult = result; + }); + await this._sendPromise; + return finalResult; + } + /** + * Sends a chat message and receives the response as a + * {@link GenerateContentStreamResult} containing an iterable stream + * and a response promise. + */ + async sendMessageStream(request) { + await this._sendPromise; + const newContent = formatNewContent(request); + const generateContentRequest = { + safetySettings: this.params?.safetySettings, + generationConfig: this.params?.generationConfig, + tools: this.params?.tools, + toolConfig: this.params?.toolConfig, + systemInstruction: this.params?.systemInstruction, + contents: [...this._history, newContent] + }; + const streamPromise = generateContentStream(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter, this.requestOptions); + // Add onto the chain. + this._sendPromise = this._sendPromise + .then(() => streamPromise) + // This must be handled to avoid unhandled rejection, but jump + // to the final catch block with a label to not log this error. + .catch(_ignored => { + throw new Error(SILENT_ERROR); + }) + .then(streamResult => streamResult.response) + .then(response => { + if (response.candidates && response.candidates.length > 0) { + this._history.push(newContent); + const responseContent = { ...response.candidates[0].content }; + // Response seems to come back without a role set. + if (!responseContent.role) { + responseContent.role = 'model'; + } + this._history.push(responseContent); + } + else { + const blockErrorMessage = formatBlockErrorMessage(response); + if (blockErrorMessage) { + logger.warn(`sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`); + } + } + }) + .catch(e => { + // Errors in streamPromise are already catchable by the user as + // streamPromise is returned. + // Avoid duplicating the error message in logs. + if (e.message !== SILENT_ERROR) { + // Users do not have access to _sendPromise to catch errors + // downstream from streamPromise, so they should not throw. + logger.error(e); + } + }); + return streamPromise; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +async function countTokensOnCloud(apiSettings, model, params, requestOptions) { + let body = ''; + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + const mappedParams = mapCountTokensRequest(params, model); + body = JSON.stringify(mappedParams); + } + else { + body = JSON.stringify(params); + } + const response = await makeRequest(model, Task.COUNT_TOKENS, apiSettings, false, body, requestOptions); + return response.json(); +} +async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) { + if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.'); + } + return countTokensOnCloud(apiSettings, model, params, requestOptions); +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Class for generative model APIs. + * @public + */ +class GenerativeModel extends AIModel { + constructor(ai, modelParams, requestOptions, chromeAdapter) { + super(ai, modelParams.model); + this.chromeAdapter = chromeAdapter; + this.generationConfig = modelParams.generationConfig || {}; + this.safetySettings = modelParams.safetySettings || []; + this.tools = modelParams.tools; + this.toolConfig = modelParams.toolConfig; + this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction); + this.requestOptions = requestOptions || {}; + } + /** + * Makes a single non-streaming call to the model + * and returns an object containing a single {@link GenerateContentResponse}. + */ + async generateContent(request) { + const formattedParams = formatGenerateContentInput(request); + return generateContent(this._apiSettings, this.model, { + generationConfig: this.generationConfig, + safetySettings: this.safetySettings, + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + ...formattedParams + }, this.chromeAdapter, this.requestOptions); + } + /** + * Makes a single streaming call to the model + * and returns an object containing an iterable stream that iterates + * over all chunks in the streaming response as well as + * a promise that returns the final aggregated response. + */ + async generateContentStream(request) { + const formattedParams = formatGenerateContentInput(request); + return generateContentStream(this._apiSettings, this.model, { + generationConfig: this.generationConfig, + safetySettings: this.safetySettings, + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + ...formattedParams + }, this.chromeAdapter, this.requestOptions); + } + /** + * Gets a new {@link ChatSession} instance which can be used for + * multi-turn chats. + */ + startChat(startChatParams) { + return new ChatSession(this._apiSettings, this.model, this.chromeAdapter, { + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + generationConfig: this.generationConfig, + safetySettings: this.safetySettings, + /** + * Overrides params inherited from GenerativeModel with those explicitly set in the + * StartChatParams. For example, if startChatParams.generationConfig is set, it'll override + * this.generationConfig. + */ + ...startChatParams + }, this.requestOptions); + } + /** + * Counts the tokens in the provided request. + */ + async countTokens(request) { + const formattedParams = formatGenerateContentInput(request); + return countTokens(this._apiSettings, this.model, formattedParams, this.chromeAdapter); + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Represents an active, real-time, bidirectional conversation with the model. + * + * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}. + * + * @beta + */ +class LiveSession { + /** + * @internal + */ + constructor(webSocketHandler, serverMessages) { + this.webSocketHandler = webSocketHandler; + this.serverMessages = serverMessages; + /** + * Indicates whether this Live session is closed. + * + * @beta + */ + this.isClosed = false; + /** + * Indicates whether this Live session is being controlled by an `AudioConversationController`. + * + * @beta + */ + this.inConversation = false; + } + /** + * Sends content to the server. + * + * @param request - The message to send to the model. + * @param turnComplete - Indicates if the turn is complete. Defaults to false. + * @throws If this session has been closed. + * + * @beta + */ + async send(request, turnComplete = true) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const newContent = formatNewContent(request); + const message = { + clientContent: { + turns: [newContent], + turnComplete + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends text to the server in realtime. + * + * @example + * ```javascript + * liveSession.sendTextRealtime("Hello, how are you?"); + * ``` + * + * @param text - The text data to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendTextRealtime(text) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + realtimeInput: { + text + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends audio data to the server in realtime. + * + * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz + * little-endian. + * + * @example + * ```javascript + * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian. + * const blob = { mimeType: "audio/pcm", data: pcmData }; + * liveSession.sendAudioRealtime(blob); + * ``` + * + * @param blob - The base64-encoded PCM data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + async sendAudioRealtime(blob) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + realtimeInput: { + audio: blob + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends video data to the server in realtime. + * + * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It + * is recommended to set `mimeType` to `image/jpeg`. + * + * @example + * ```javascript + * // const videoFrame = ... base64-encoded JPEG data + * const blob = { mimeType: "image/jpeg", data: videoFrame }; + * liveSession.sendVideoRealtime(blob); + * ``` + * @param blob - The base64-encoded video data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + async sendVideoRealtime(blob) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + realtimeInput: { + video: blob + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends function responses to the server. + * + * @param functionResponses - The function responses to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendFunctionResponses(functionResponses) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + toolResponse: { + functionResponses + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Yields messages received from the server. + * This can only be used by one consumer at a time. + * + * @returns An `AsyncGenerator` that yields server messages as they arrive. + * @throws If the session is already closed, or if we receive a response that we don't support. + * + * @beta + */ + async *receive() { + if (this.isClosed) { + throw new AIError(AIErrorCode.SESSION_CLOSED, 'Cannot read from a Live session that is closed. Try starting a new Live session.'); + } + for await (const message of this.serverMessages) { + if (message && typeof message === 'object') { + if (LiveResponseType.SERVER_CONTENT in message) { + yield { + type: 'serverContent', + ...message + .serverContent + }; + } + else if (LiveResponseType.TOOL_CALL in message) { + yield { + type: 'toolCall', + ...message + .toolCall + }; + } + else if (LiveResponseType.TOOL_CALL_CANCELLATION in message) { + yield { + type: 'toolCallCancellation', + ...message.toolCallCancellation + }; + } + else { + logger.warn(`Received an unknown message type from the server: ${JSON.stringify(message)}`); + } + } + else { + logger.warn(`Received an invalid message from the server: ${JSON.stringify(message)}`); + } + } + } + /** + * Closes this session. + * All methods on this session will throw an error once this resolves. + * + * @beta + */ + async close() { + if (!this.isClosed) { + this.isClosed = true; + await this.webSocketHandler.close(1000, 'Client closed session.'); + } + } + /** + * Sends realtime input to the server. + * + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * @param mediaChunks - The media chunks to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendMediaChunks(mediaChunks) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + // The backend does not support sending more than one mediaChunk in one message. + // Work around this limitation by sending mediaChunks in separate messages. + mediaChunks.forEach(mediaChunk => { + const message = { + realtimeInput: { mediaChunks: [mediaChunk] } + }; + this.webSocketHandler.send(JSON.stringify(message)); + }); + } + /** + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * Sends a stream of {@link GenerativeContentBlob}. + * + * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendMediaStream(mediaChunkStream) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const reader = mediaChunkStream.getReader(); + while (true) { + try { + const { done, value } = await reader.read(); + if (done) { + break; + } + else if (!value) { + throw new Error('Missing chunk in reader, but reader is not done.'); + } + await this.sendMediaChunks([value]); + } + catch (e) { + // Re-throw any errors that occur during stream consumption or sending. + const message = e instanceof Error ? e.message : 'Error processing media stream.'; + throw new AIError(AIErrorCode.REQUEST_ERROR, message); + } + } + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal + * interactions with Gemini. + * + * This class should only be instantiated with {@link getLiveGenerativeModel}. + * + * @beta + */ +class LiveGenerativeModel extends AIModel { + /** + * @internal + */ + constructor(ai, modelParams, + /** + * @internal + */ + _webSocketHandler) { + super(ai, modelParams.model); + this._webSocketHandler = _webSocketHandler; + this.generationConfig = modelParams.generationConfig || {}; + this.tools = modelParams.tools; + this.toolConfig = modelParams.toolConfig; + this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction); + } + /** + * Starts a {@link LiveSession}. + * + * @returns A {@link LiveSession}. + * @throws If the connection failed to be established with the server. + * + * @beta + */ + async connect() { + const url = new WebSocketUrl(this._apiSettings); + await this._webSocketHandler.connect(url.toString()); + let fullModelPath; + if (this._apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + fullModelPath = `projects/${this._apiSettings.project}/${this.model}`; + } + else { + fullModelPath = `projects/${this._apiSettings.project}/locations/${this._apiSettings.location}/${this.model}`; + } + // inputAudioTranscription and outputAudioTranscription are on the generation config in the public API, + // but the backend expects them to be in the `setup` message. + const { inputAudioTranscription, outputAudioTranscription, ...generationConfig } = this.generationConfig; + const setupMessage = { + setup: { + model: fullModelPath, + generationConfig, + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + inputAudioTranscription, + outputAudioTranscription + } + }; + try { + // Begin listening for server messages, and begin the handshake by sending the 'setupMessage' + const serverMessages = this._webSocketHandler.listen(); + this._webSocketHandler.send(JSON.stringify(setupMessage)); + // Verify we received the handshake response 'setupComplete' + const firstMessage = (await serverMessages.next()).value; + if (!firstMessage || + !(typeof firstMessage === 'object') || + !('setupComplete' in firstMessage)) { + await this._webSocketHandler.close(1011, 'Handshake failure'); + throw new AIError(AIErrorCode.RESPONSE_ERROR, 'Server connection handshake failed. The server did not respond with a setupComplete message.'); + } + return new LiveSession(this._webSocketHandler, serverMessages); + } + catch (e) { + // Ensure connection is closed on any setup error + await this._webSocketHandler.close(); + throw e; + } + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Class for Imagen model APIs. + * + * This class provides methods for generating images using the Imagen model. + * + * @example + * ```javascript + * const imagen = new ImagenModel( + * ai, + * { + * model: 'imagen-3.0-generate-002' + * } + * ); + * + * const response = await imagen.generateImages('A photo of a cat'); + * if (response.images.length > 0) { + * console.log(response.images[0].bytesBase64Encoded); + * } + * ``` + * + * @public + */ +class ImagenModel extends AIModel { + /** + * Constructs a new instance of the {@link ImagenModel} class. + * + * @param ai - an {@link AI} instance. + * @param modelParams - Parameters to use when making requests to Imagen. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + */ + constructor(ai, modelParams, requestOptions) { + const { model, generationConfig, safetySettings } = modelParams; + super(ai, model); + this.requestOptions = requestOptions; + this.generationConfig = generationConfig; + this.safetySettings = safetySettings; + } + /** + * Generates images using the Imagen model and returns them as + * base64-encoded strings. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the generated images. + * + * @throws If the request to generate images fails. This happens if the + * prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + * + * @public + */ + async generateImages(prompt) { + const body = createPredictRequestBody(prompt, { + ...this.generationConfig, + ...this.safetySettings + }); + const response = await makeRequest(this.model, Task.PREDICT, this._apiSettings, + /* stream */ false, JSON.stringify(body), this.requestOptions); + return handlePredictResponse(response); + } + /** + * Generates images to Cloud Storage for Firebase using the Imagen model. + * + * @internal This method is temporarily internal. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket. + * This should be a directory. For example, `gs://my-bucket/my-directory/`. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the URLs of the generated images. + * + * @throws If the request fails to generate images fails. This happens if + * the prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + */ + async generateImagesGCS(prompt, gcsURI) { + const body = createPredictRequestBody(prompt, { + gcsURI, + ...this.generationConfig, + ...this.safetySettings + }); + const response = await makeRequest(this.model, Task.PREDICT, this._apiSettings, + /* stream */ false, JSON.stringify(body), this.requestOptions); + return handlePredictResponse(response); + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22. + * + * @internal + */ +class WebSocketHandlerImpl { + constructor() { + if (typeof WebSocket === 'undefined') { + throw new AIError(AIErrorCode.UNSUPPORTED, 'The WebSocket API is not available in this environment. ' + + 'The "Live" feature is not supported here. It is supported in ' + + 'modern browser windows, Web Workers with WebSocket support, and Node >= 22.'); + } + } + connect(url) { + return new Promise((resolve, reject) => { + this.ws = new WebSocket(url); + this.ws.binaryType = 'blob'; // Only important to set in Node + this.ws.addEventListener('open', () => resolve(), { once: true }); + this.ws.addEventListener('error', () => reject(new AIError(AIErrorCode.FETCH_ERROR, `Error event raised on WebSocket`)), { once: true }); + this.ws.addEventListener('close', (closeEvent) => { + if (closeEvent.reason) { + logger.warn(`WebSocket connection closed by server. Reason: '${closeEvent.reason}'`); + } + }); + }); + } + send(data) { + if (!this.ws || this.ws.readyState !== WebSocket.OPEN) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not open.'); + } + this.ws.send(data); + } + async *listen() { + if (!this.ws) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not connected.'); + } + const messageQueue = []; + const errorQueue = []; + let resolvePromise = null; + let isClosed = false; + const messageListener = async (event) => { + let data; + if (event.data instanceof Blob) { + data = await event.data.text(); + } + else if (typeof event.data === 'string') { + data = event.data; + } + else { + errorQueue.push(new AIError(AIErrorCode.PARSE_FAILED, `Failed to parse WebSocket response. Expected data to be a Blob or string, but was ${typeof event.data}.`)); + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + return; + } + try { + const obj = JSON.parse(data); + messageQueue.push(obj); + } + catch (e) { + const err = e; + errorQueue.push(new AIError(AIErrorCode.PARSE_FAILED, `Error parsing WebSocket message to JSON: ${err.message}`)); + } + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + }; + const errorListener = () => { + errorQueue.push(new AIError(AIErrorCode.FETCH_ERROR, 'WebSocket connection error.')); + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + }; + const closeListener = (event) => { + if (event.reason) { + logger.warn(`WebSocket connection closed by the server with reason: ${event.reason}`); + } + isClosed = true; + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + // Clean up listeners to prevent memory leaks + this.ws?.removeEventListener('message', messageListener); + this.ws?.removeEventListener('close', closeListener); + this.ws?.removeEventListener('error', errorListener); + }; + this.ws.addEventListener('message', messageListener); + this.ws.addEventListener('close', closeListener); + this.ws.addEventListener('error', errorListener); + while (!isClosed) { + if (errorQueue.length > 0) { + const error = errorQueue.shift(); + throw error; + } + if (messageQueue.length > 0) { + yield messageQueue.shift(); + } + else { + await new Promise(resolve => { + resolvePromise = resolve; + }); + } + } + // If the loop terminated because isClosed is true, check for any final errors + if (errorQueue.length > 0) { + const error = errorQueue.shift(); + throw error; + } + } + close(code, reason) { + return new Promise(resolve => { + if (!this.ws) { + return resolve(); + } + this.ws.addEventListener('close', () => resolve(), { once: true }); + // Calling 'close' during these states results in an error. + if (this.ws.readyState === WebSocket.CLOSED || + this.ws.readyState === WebSocket.CONNECTING) { + return resolve(); + } + if (this.ws.readyState !== WebSocket.CLOSING) { + this.ws.close(code, reason); + } + }); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Parent class encompassing all Schema types, with static methods that + * allow building specific Schema types. This class can be converted with + * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints. + * (This string conversion is automatically done when calling SDK methods.) + * @public + */ +class Schema { + constructor(schemaParams) { + // TODO(dlarocque): Enforce this with union types + if (!schemaParams.type && !schemaParams.anyOf) { + throw new AIError(AIErrorCode.INVALID_SCHEMA, "A schema must have either a 'type' or an 'anyOf' array of sub-schemas."); + } + // eslint-disable-next-line guard-for-in + for (const paramKey in schemaParams) { + this[paramKey] = schemaParams[paramKey]; + } + // Ensure these are explicitly set to avoid TS errors. + this.type = schemaParams.type; + this.format = schemaParams.hasOwnProperty('format') + ? schemaParams.format + : undefined; + this.nullable = schemaParams.hasOwnProperty('nullable') + ? !!schemaParams.nullable + : false; + } + /** + * Defines how this Schema should be serialized as JSON. + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior + * @internal + */ + toJSON() { + const obj = { + type: this.type + }; + for (const prop in this) { + if (this.hasOwnProperty(prop) && this[prop] !== undefined) { + if (prop !== 'required' || this.type === SchemaType.OBJECT) { + obj[prop] = this[prop]; + } + } + } + return obj; + } + static array(arrayParams) { + return new ArraySchema(arrayParams, arrayParams.items); + } + static object(objectParams) { + return new ObjectSchema(objectParams, objectParams.properties, objectParams.optionalProperties); + } + // eslint-disable-next-line id-blacklist + static string(stringParams) { + return new StringSchema(stringParams); + } + static enumString(stringParams) { + return new StringSchema(stringParams, stringParams.enum); + } + static integer(integerParams) { + return new IntegerSchema(integerParams); + } + // eslint-disable-next-line id-blacklist + static number(numberParams) { + return new NumberSchema(numberParams); + } + // eslint-disable-next-line id-blacklist + static boolean(booleanParams) { + return new BooleanSchema(booleanParams); + } + static anyOf(anyOfParams) { + return new AnyOfSchema(anyOfParams); + } +} +/** + * Schema class for "integer" types. + * @public + */ +class IntegerSchema extends Schema { + constructor(schemaParams) { + super({ + type: SchemaType.INTEGER, + ...schemaParams + }); + } +} +/** + * Schema class for "number" types. + * @public + */ +class NumberSchema extends Schema { + constructor(schemaParams) { + super({ + type: SchemaType.NUMBER, + ...schemaParams + }); + } +} +/** + * Schema class for "boolean" types. + * @public + */ +class BooleanSchema extends Schema { + constructor(schemaParams) { + super({ + type: SchemaType.BOOLEAN, + ...schemaParams + }); + } +} +/** + * Schema class for "string" types. Can be used with or without + * enum values. + * @public + */ +class StringSchema extends Schema { + constructor(schemaParams, enumValues) { + super({ + type: SchemaType.STRING, + ...schemaParams + }); + this.enum = enumValues; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + if (this.enum) { + obj['enum'] = this.enum; + } + return obj; + } +} +/** + * Schema class for "array" types. + * The `items` param should refer to the type of item that can be a member + * of the array. + * @public + */ +class ArraySchema extends Schema { + constructor(schemaParams, items) { + super({ + type: SchemaType.ARRAY, + ...schemaParams + }); + this.items = items; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + obj.items = this.items.toJSON(); + return obj; + } +} +/** + * Schema class for "object" types. + * The `properties` param must be a map of `Schema` objects. + * @public + */ +class ObjectSchema extends Schema { + constructor(schemaParams, properties, optionalProperties = []) { + super({ + type: SchemaType.OBJECT, + ...schemaParams + }); + this.properties = properties; + this.optionalProperties = optionalProperties; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + obj.properties = { ...this.properties }; + const required = []; + if (this.optionalProperties) { + for (const propertyKey of this.optionalProperties) { + if (!this.properties.hasOwnProperty(propertyKey)) { + throw new AIError(AIErrorCode.INVALID_SCHEMA, `Property "${propertyKey}" specified in "optionalProperties" does not exist.`); + } + } + } + for (const propertyKey in this.properties) { + if (this.properties.hasOwnProperty(propertyKey)) { + obj.properties[propertyKey] = this.properties[propertyKey].toJSON(); + if (!this.optionalProperties.includes(propertyKey)) { + required.push(propertyKey); + } + } + } + if (required.length > 0) { + obj.required = required; + } + delete obj.optionalProperties; + return obj; + } +} +/** + * Schema class representing a value that can conform to any of the provided sub-schemas. This is + * useful when a field can accept multiple distinct types or structures. + * @public + */ +class AnyOfSchema extends Schema { + constructor(schemaParams) { + if (schemaParams.anyOf.length === 0) { + throw new AIError(AIErrorCode.INVALID_SCHEMA, "The 'anyOf' array must not be empty."); + } + super({ + ...schemaParams, + type: undefined // anyOf schemas do not have an explicit type + }); + this.anyOf = schemaParams.anyOf; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + // Ensure the 'anyOf' property contains serialized SchemaRequest objects. + if (this.anyOf && Array.isArray(this.anyOf)) { + obj.anyOf = this.anyOf.map(s => s.toJSON()); + } + return obj; + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Defines the image format for images generated by Imagen. + * + * Use this class to specify the desired format (JPEG or PNG) and compression quality + * for images generated by Imagen. This is typically included as part of + * {@link ImagenModelParams}. + * + * @example + * ```javascript + * const imagenModelParams = { + * // ... other ImagenModelParams + * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75. + * } + * ``` + * + * @public + */ +class ImagenImageFormat { + constructor() { + this.mimeType = 'image/png'; + } + /** + * Creates an {@link ImagenImageFormat} for a JPEG image. + * + * @param compressionQuality - The level of compression (a number between 0 and 100). + * @returns An {@link ImagenImageFormat} object for a JPEG image. + * + * @public + */ + static jpeg(compressionQuality) { + if (compressionQuality && + (compressionQuality < 0 || compressionQuality > 100)) { + logger.warn(`Invalid JPEG compression quality of ${compressionQuality} specified; the supported range is [0, 100].`); + } + return { mimeType: 'image/jpeg', compressionQuality }; + } + /** + * Creates an {@link ImagenImageFormat} for a PNG image. + * + * @returns An {@link ImagenImageFormat} object for a PNG image. + * + * @public + */ + static png() { + return { mimeType: 'image/png' }; + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const SERVER_INPUT_SAMPLE_RATE = 16000; +const SERVER_OUTPUT_SAMPLE_RATE = 24000; +const AUDIO_PROCESSOR_NAME = 'audio-processor'; +/** + * The JS for an `AudioWorkletProcessor`. + * This processor is responsible for taking raw audio from the microphone, + * converting it to the required 16-bit 16kHz PCM, and posting it back to the main thread. + * + * See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor + * + * It is defined as a string here so that it can be converted into a `Blob` + * and loaded at runtime. + */ +const audioProcessorWorkletString = ` + class AudioProcessor extends AudioWorkletProcessor { + constructor(options) { + super(); + this.targetSampleRate = options.processorOptions.targetSampleRate; + // 'sampleRate' is a global variable available inside the AudioWorkletGlobalScope, + // representing the native sample rate of the AudioContext. + this.inputSampleRate = sampleRate; + } + + /** + * This method is called by the browser's audio engine for each block of audio data. + * Input is a single input, with a single channel (input[0][0]). + */ + process(inputs) { + const input = inputs[0]; + if (input && input.length > 0 && input[0].length > 0) { + const pcmData = input[0]; // Float32Array of raw audio samples. + + // Simple linear interpolation for resampling. + const resampled = new Float32Array(Math.round(pcmData.length * this.targetSampleRate / this.inputSampleRate)); + const ratio = pcmData.length / resampled.length; + for (let i = 0; i < resampled.length; i++) { + resampled[i] = pcmData[Math.floor(i * ratio)]; + } + + // Convert Float32 (-1, 1) samples to Int16 (-32768, 32767) + const resampledInt16 = new Int16Array(resampled.length); + for (let i = 0; i < resampled.length; i++) { + const sample = Math.max(-1, Math.min(1, resampled[i])); + if (sample < 0) { + resampledInt16[i] = sample * 32768; + } else { + resampledInt16[i] = sample * 32767; + } + } + + this.port.postMessage(resampledInt16); + } + // Return true to keep the processor alive and processing the next audio block. + return true; + } + } + + // Register the processor with a name that can be used to instantiate it from the main thread. + registerProcessor('${AUDIO_PROCESSOR_NAME}', AudioProcessor); +`; +/** + * Encapsulates the core logic of an audio conversation. + * + * @internal + */ +class AudioConversationRunner { + constructor(liveSession, options, deps) { + this.liveSession = liveSession; + this.options = options; + this.deps = deps; + /** A flag to indicate if the conversation has been stopped. */ + this.isStopped = false; + /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */ + this.stopDeferred = new util.Deferred(); + /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */ + this.playbackQueue = []; + /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */ + this.scheduledSources = []; + /** A high-precision timeline pointer for scheduling gapless audio playback. */ + this.nextStartTime = 0; + /** A mutex to prevent the playback processing loop from running multiple times concurrently. */ + this.isPlaybackLoopRunning = false; + this.liveSession.inConversation = true; + // Start listening for messages from the server. + this.receiveLoopPromise = this.runReceiveLoop().finally(() => this.cleanup()); + // Set up the handler for receiving processed audio data from the worklet. + // Message data has been resampled to 16kHz 16-bit PCM. + this.deps.workletNode.port.onmessage = event => { + if (this.isStopped) { + return; + } + const pcm16 = event.data; + const base64 = btoa(String.fromCharCode.apply(null, Array.from(new Uint8Array(pcm16.buffer)))); + const chunk = { + mimeType: 'audio/pcm', + data: base64 + }; + void this.liveSession.sendAudioRealtime(chunk); + }; + } + /** + * Stops the conversation and unblocks the main receive loop. + */ + async stop() { + if (this.isStopped) { + return; + } + this.isStopped = true; + this.stopDeferred.resolve(); // Unblock the receive loop + await this.receiveLoopPromise; // Wait for the loop and cleanup to finish + } + /** + * Cleans up all audio resources (nodes, stream tracks, context) and marks the + * session as no longer in a conversation. + */ + cleanup() { + this.interruptPlayback(); // Ensure all audio is stopped on final cleanup. + this.deps.workletNode.port.onmessage = null; + this.deps.workletNode.disconnect(); + this.deps.sourceNode.disconnect(); + this.deps.mediaStream.getTracks().forEach(track => track.stop()); + if (this.deps.audioContext.state !== 'closed') { + void this.deps.audioContext.close(); + } + this.liveSession.inConversation = false; + } + /** + * Adds audio data to the queue and ensures the playback loop is running. + */ + enqueueAndPlay(audioData) { + this.playbackQueue.push(audioData); + // Will no-op if it's already running. + void this.processPlaybackQueue(); + } + /** + * Stops all current and pending audio playback and clears the queue. This is + * called when the server indicates the model's speech was interrupted with + * `LiveServerContent.modelTurn.interrupted`. + */ + interruptPlayback() { + // Stop all sources that have been scheduled. The onended event will fire for each, + // which will clean up the scheduledSources array. + [...this.scheduledSources].forEach(source => source.stop(0)); + // Clear the internal buffer of unprocessed audio chunks. + this.playbackQueue.length = 0; + // Reset the playback clock to start fresh. + this.nextStartTime = this.deps.audioContext.currentTime; + } + /** + * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence. + */ + async processPlaybackQueue() { + if (this.isPlaybackLoopRunning) { + return; + } + this.isPlaybackLoopRunning = true; + while (this.playbackQueue.length > 0 && !this.isStopped) { + const pcmRawBuffer = this.playbackQueue.shift(); + try { + const pcm16 = new Int16Array(pcmRawBuffer); + const frameCount = pcm16.length; + const audioBuffer = this.deps.audioContext.createBuffer(1, frameCount, SERVER_OUTPUT_SAMPLE_RATE); + // Convert 16-bit PCM to 32-bit PCM, required by the Web Audio API. + const channelData = audioBuffer.getChannelData(0); + for (let i = 0; i < frameCount; i++) { + channelData[i] = pcm16[i] / 32768; // Normalize to Float32 range [-1.0, 1.0] + } + const source = this.deps.audioContext.createBufferSource(); + source.buffer = audioBuffer; + source.connect(this.deps.audioContext.destination); + // Track the source and set up a handler to remove it from tracking when it finishes. + this.scheduledSources.push(source); + source.onended = () => { + this.scheduledSources = this.scheduledSources.filter(s => s !== source); + }; + // To prevent gaps, schedule the next chunk to start either now (if we're catching up) + // or exactly when the previous chunk is scheduled to end. + this.nextStartTime = Math.max(this.deps.audioContext.currentTime, this.nextStartTime); + source.start(this.nextStartTime); + // Update the schedule for the *next* chunk. + this.nextStartTime += audioBuffer.duration; + } + catch (e) { + logger.error('Error playing audio:', e); + } + } + this.isPlaybackLoopRunning = false; + } + /** + * The main loop that listens for and processes messages from the server. + */ + async runReceiveLoop() { + const messageGenerator = this.liveSession.receive(); + while (!this.isStopped) { + const result = await Promise.race([ + messageGenerator.next(), + this.stopDeferred.promise + ]); + if (this.isStopped || !result || result.done) { + break; + } + const message = result.value; + if (message.type === 'serverContent') { + const serverContent = message; + if (serverContent.interrupted) { + this.interruptPlayback(); + } + const audioPart = serverContent.modelTurn?.parts.find(part => part.inlineData?.mimeType.startsWith('audio/')); + if (audioPart?.inlineData) { + const audioData = Uint8Array.from(atob(audioPart.inlineData.data), c => c.charCodeAt(0)).buffer; + this.enqueueAndPlay(audioData); + } + } + else if (message.type === 'toolCall') { + if (!this.options.functionCallingHandler) { + logger.warn('Received tool call message, but StartAudioConversationOptions.functionCallingHandler is undefined. Ignoring tool call.'); + } + else { + try { + const functionResponse = await this.options.functionCallingHandler(message.functionCalls); + if (!this.isStopped) { + void this.liveSession.sendFunctionResponses([functionResponse]); + } + } + catch (e) { + throw new AIError(AIErrorCode.ERROR, `Function calling handler failed: ${e.message}`); + } + } + } + } + } +} +/** + * Starts a real-time, bidirectional audio conversation with the model. This helper function manages + * the complexities of microphone access, audio recording, playback, and interruptions. + * + * @remarks Important: This function must be called in response to a user gesture + * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}. + * + * @example + * ```javascript + * const liveSession = await model.connect(); + * let conversationController; + * + * // This function must be called from within a click handler. + * async function startConversation() { + * try { + * conversationController = await startAudioConversation(liveSession); + * } catch (e) { + * // Handle AI-specific errors + * if (e instanceof AIError) { + * console.error("AI Error:", e.message); + * } + * // Handle microphone permission and hardware errors + * else if (e instanceof DOMException) { + * console.error("Microphone Error:", e.message); + * } + * // Handle other unexpected errors + * else { + * console.error("An unexpected error occurred:", e); + * } + * } + * } + * + * // Later, to stop the conversation: + * // if (conversationController) { + * // await conversationController.stop(); + * // } + * ``` + * + * @param liveSession - An active {@link LiveSession} instance. + * @param options - Configuration options for the audio conversation. + * @returns A `Promise` that resolves with an {@link AudioConversationController}. + * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`). + * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions. + * + * @beta + */ +async function startAudioConversation(liveSession, options = {}) { + if (liveSession.isClosed) { + throw new AIError(AIErrorCode.SESSION_CLOSED, 'Cannot start audio conversation on a closed LiveSession.'); + } + if (liveSession.inConversation) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'An audio conversation is already in progress for this session.'); + } + // Check for necessary Web API support. + if (typeof AudioWorkletNode === 'undefined' || + typeof AudioContext === 'undefined' || + typeof navigator === 'undefined' || + !navigator.mediaDevices) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'Audio conversation is not supported in this environment. It requires the Web Audio API and AudioWorklet support.'); + } + let audioContext; + try { + // 1. Set up the audio context. This must be in response to a user gesture. + // See: https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy + audioContext = new AudioContext(); + if (audioContext.state === 'suspended') { + await audioContext.resume(); + } + // 2. Prompt for microphone access and get the media stream. + // This can throw a variety of permission or hardware-related errors. + const mediaStream = await navigator.mediaDevices.getUserMedia({ + audio: true + }); + // 3. Load the AudioWorklet processor. + // See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet + const workletBlob = new Blob([audioProcessorWorkletString], { + type: 'application/javascript' + }); + const workletURL = URL.createObjectURL(workletBlob); + await audioContext.audioWorklet.addModule(workletURL); + // 4. Create the audio graph: Microphone -> Source Node -> Worklet Node + const sourceNode = audioContext.createMediaStreamSource(mediaStream); + const workletNode = new AudioWorkletNode(audioContext, AUDIO_PROCESSOR_NAME, { + processorOptions: { targetSampleRate: SERVER_INPUT_SAMPLE_RATE } + }); + sourceNode.connect(workletNode); + // 5. Instantiate and return the runner which manages the conversation. + const runner = new AudioConversationRunner(liveSession, options, { + audioContext, + mediaStream, + sourceNode, + workletNode + }); + return { stop: () => runner.stop() }; + } + catch (e) { + // Ensure the audio context is closed on any setup error. + if (audioContext && audioContext.state !== 'closed') { + void audioContext.close(); + } + // Re-throw specific, known error types directly. The user may want to handle `DOMException` + // errors differently (for example, if permission to access audio device was denied). + if (e instanceof AIError || e instanceof DOMException) { + throw e; + } + // Wrap any other unexpected errors in a standard AIError. + throw new AIError(AIErrorCode.ERROR, `Failed to initialize audio recording: ${e.message}`); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Returns the default {@link AI} instance that is associated with the provided + * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the + * default settings. + * + * @example + * ```javascript + * const ai = getAI(app); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Gemini Developer API (via Google AI). + * const ai = getAI(app, { backend: new GoogleAIBackend() }); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Vertex AI Gemini API. + * const ai = getAI(app, { backend: new VertexAIBackend() }); + * ``` + * + * @param app - The {@link @firebase/app#FirebaseApp} to use. + * @param options - {@link AIOptions} that configure the AI instance. + * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}. + * + * @public + */ +function getAI(app$1 = app.getApp(), options) { + app$1 = util.getModularInstance(app$1); + // Dependencies + const AIProvider = app._getProvider(app$1, AI_TYPE); + const backend = options?.backend ?? new GoogleAIBackend(); + const finalOptions = { + useLimitedUseAppCheckTokens: options?.useLimitedUseAppCheckTokens ?? false + }; + const identifier = encodeInstanceIdentifier(backend); + const aiInstance = AIProvider.getImmediate({ + identifier + }); + aiInstance.options = finalOptions; + return aiInstance; +} +/** + * Returns a {@link GenerativeModel} class with methods for inference + * and other functionality. + * + * @public + */ +function getGenerativeModel(ai, modelParams, requestOptions) { + // Uses the existence of HybridParams.mode to clarify the type of the modelParams input. + const hybridParams = modelParams; + let inCloudParams; + if (hybridParams.mode) { + inCloudParams = hybridParams.inCloudParams || { + model: DEFAULT_HYBRID_IN_CLOUD_MODEL + }; + } + else { + inCloudParams = modelParams; + } + if (!inCloudParams.model) { + throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`); + } + /** + * An AIService registered by index.node.ts will not have a + * chromeAdapterFactory() method. + */ + const chromeAdapter = ai.chromeAdapterFactory?.(hybridParams.mode, typeof window === 'undefined' ? undefined : window, hybridParams.onDeviceParams); + return new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter); +} +/** + * Returns an {@link ImagenModel} class with methods for using Imagen. + * + * Only Imagen 3 models (named `imagen-3.0-*`) are supported. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when making Imagen requests. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @public + */ +function getImagenModel(ai, modelParams, requestOptions) { + if (!modelParams.model) { + throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })`); + } + return new ImagenModel(ai, modelParams, requestOptions); +} +/** + * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication. + * + * The Live API is only supported in modern browser windows and Node >= 22. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when setting up a {@link LiveSession}. + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @beta + */ +function getLiveGenerativeModel(ai, modelParams) { + if (!modelParams.model) { + throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name for getLiveGenerativeModel. Example: getLiveGenerativeModel(ai, { model: 'my-model-name' })`); + } + const webSocketHandler = new WebSocketHandlerImpl(); + return new LiveGenerativeModel(ai, modelParams, webSocketHandler); +} + +/** + * The Firebase AI Web SDK. + * + * @packageDocumentation + */ +function registerAI() { + app._registerComponent(new component.Component(AI_TYPE, (container, { instanceIdentifier }) => { + if (!instanceIdentifier) { + throw new AIError(AIErrorCode.ERROR, 'AIService instance identifier is undefined.'); + } + const backend = decodeInstanceIdentifier(instanceIdentifier); + // getImmediate for FirebaseApp will always succeed + const app = container.getProvider('app').getImmediate(); + const auth = container.getProvider('auth-internal'); + const appCheckProvider = container.getProvider('app-check-internal'); + return new AIService(app, backend, auth, appCheckProvider); + }, "PUBLIC" /* ComponentType.PUBLIC */).setMultipleInstances(true)); + app.registerVersion(name, version, 'node'); + // BUILD_TARGET will be replaced by values like esm, cjs, etc during the compilation + app.registerVersion(name, version, 'cjs2020'); +} +registerAI(); + +exports.AIError = AIError; +exports.AIErrorCode = AIErrorCode; +exports.AIModel = AIModel; +exports.AnyOfSchema = AnyOfSchema; +exports.ArraySchema = ArraySchema; +exports.Backend = Backend; +exports.BackendType = BackendType; +exports.BlockReason = BlockReason; +exports.BooleanSchema = BooleanSchema; +exports.ChatSession = ChatSession; +exports.FinishReason = FinishReason; +exports.FunctionCallingMode = FunctionCallingMode; +exports.GenerativeModel = GenerativeModel; +exports.GoogleAIBackend = GoogleAIBackend; +exports.HarmBlockMethod = HarmBlockMethod; +exports.HarmBlockThreshold = HarmBlockThreshold; +exports.HarmCategory = HarmCategory; +exports.HarmProbability = HarmProbability; +exports.HarmSeverity = HarmSeverity; +exports.ImagenAspectRatio = ImagenAspectRatio; +exports.ImagenImageFormat = ImagenImageFormat; +exports.ImagenModel = ImagenModel; +exports.ImagenPersonFilterLevel = ImagenPersonFilterLevel; +exports.ImagenSafetyFilterLevel = ImagenSafetyFilterLevel; +exports.InferenceMode = InferenceMode; +exports.InferenceSource = InferenceSource; +exports.IntegerSchema = IntegerSchema; +exports.Language = Language; +exports.LiveGenerativeModel = LiveGenerativeModel; +exports.LiveResponseType = LiveResponseType; +exports.LiveSession = LiveSession; +exports.Modality = Modality; +exports.NumberSchema = NumberSchema; +exports.ObjectSchema = ObjectSchema; +exports.Outcome = Outcome; +exports.POSSIBLE_ROLES = POSSIBLE_ROLES; +exports.ResponseModality = ResponseModality; +exports.Schema = Schema; +exports.SchemaType = SchemaType; +exports.StringSchema = StringSchema; +exports.URLRetrievalStatus = URLRetrievalStatus; +exports.VertexAIBackend = VertexAIBackend; +exports.getAI = getAI; +exports.getGenerativeModel = getGenerativeModel; +exports.getImagenModel = getImagenModel; +exports.getLiveGenerativeModel = getLiveGenerativeModel; +exports.startAudioConversation = startAudioConversation; +//# sourceMappingURL=index.node.cjs.js.map diff --git a/frontend-old/node_modules/@firebase/ai/dist/index.node.cjs.js.map b/frontend-old/node_modules/@firebase/ai/dist/index.node.cjs.js.map new file mode 100644 index 0000000..e97e3fd --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/index.node.cjs.js.map @@ -0,0 +1 @@ +{"version":3,"file":"index.node.cjs.js","sources":["../src/constants.ts","../src/types/enums.ts","../src/types/responses.ts","../src/types/error.ts","../src/types/schema.ts","../src/types/imagen/requests.ts","../src/public-types.ts","../src/backend.ts","../src/service.ts","../src/errors.ts","../src/helpers.ts","../src/models/ai-model.ts","../src/logger.ts","../src/requests/request.ts","../src/requests/response-helpers.ts","../src/googleai-mappers.ts","../src/requests/stream-reader.ts","../src/requests/hybrid-helpers.ts","../src/methods/generate-content.ts","../src/requests/request-helpers.ts","../src/methods/chat-session-helpers.ts","../src/methods/chat-session.ts","../src/methods/count-tokens.ts","../src/models/generative-model.ts","../src/methods/live-session.ts","../src/models/live-generative-model.ts","../src/models/imagen-model.ts","../src/websocket.ts","../src/requests/schema-builder.ts","../src/requests/imagen-image-format.ts","../src/methods/live-session-helpers.ts","../src/api.ts","../src/index.node.ts"],"sourcesContent":["/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { version } from '../package.json';\n\nexport const AI_TYPE = 'AI';\n\nexport const DEFAULT_LOCATION = 'us-central1';\n\nexport const DEFAULT_DOMAIN = 'firebasevertexai.googleapis.com';\n\nexport const DEFAULT_API_VERSION = 'v1beta';\n\nexport const PACKAGE_VERSION = version;\n\nexport const LANGUAGE_TAG = 'gl-js';\n\nexport const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000;\n\n/**\n * Defines the name of the default in-cloud model to use for hybrid inference.\n */\nexport const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite';\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Role is the producer of the content.\n * @public\n */\nexport type Role = (typeof POSSIBLE_ROLES)[number];\n\n/**\n * Possible roles.\n * @public\n */\nexport const POSSIBLE_ROLES = ['user', 'model', 'function', 'system'] as const;\n\n/**\n * Harm categories that would cause prompts or candidates to be blocked.\n * @public\n */\nexport const HarmCategory = {\n HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH',\n HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT',\n HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT'\n} as const;\n\n/**\n * Harm categories that would cause prompts or candidates to be blocked.\n * @public\n */\nexport type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory];\n\n/**\n * Threshold above which a prompt or candidate will be blocked.\n * @public\n */\nexport const HarmBlockThreshold = {\n /**\n * Content with `NEGLIGIBLE` will be allowed.\n */\n BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE',\n /**\n * Content with `NEGLIGIBLE` and `LOW` will be allowed.\n */\n BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE',\n /**\n * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.\n */\n BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH',\n /**\n * All content will be allowed.\n */\n BLOCK_NONE: 'BLOCK_NONE',\n /**\n * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding\n * to the {@link (HarmCategory:type)} will not be present in the response.\n */\n OFF: 'OFF'\n} as const;\n\n/**\n * Threshold above which a prompt or candidate will be blocked.\n * @public\n */\nexport type HarmBlockThreshold =\n (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold];\n\n/**\n * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).\n *\n * @public\n */\nexport const HarmBlockMethod = {\n /**\n * The harm block method uses both probability and severity scores.\n */\n SEVERITY: 'SEVERITY',\n /**\n * The harm block method uses the probability score.\n */\n PROBABILITY: 'PROBABILITY'\n} as const;\n\n/**\n * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).\n *\n * @public\n */\nexport type HarmBlockMethod =\n (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod];\n\n/**\n * Probability that a prompt or candidate matches a harm category.\n * @public\n */\nexport const HarmProbability = {\n /**\n * Content has a negligible chance of being unsafe.\n */\n NEGLIGIBLE: 'NEGLIGIBLE',\n /**\n * Content has a low chance of being unsafe.\n */\n LOW: 'LOW',\n /**\n * Content has a medium chance of being unsafe.\n */\n MEDIUM: 'MEDIUM',\n /**\n * Content has a high chance of being unsafe.\n */\n HIGH: 'HIGH'\n} as const;\n\n/**\n * Probability that a prompt or candidate matches a harm category.\n * @public\n */\nexport type HarmProbability =\n (typeof HarmProbability)[keyof typeof HarmProbability];\n\n/**\n * Harm severity levels.\n * @public\n */\nexport const HarmSeverity = {\n /**\n * Negligible level of harm severity.\n */\n HARM_SEVERITY_NEGLIGIBLE: 'HARM_SEVERITY_NEGLIGIBLE',\n /**\n * Low level of harm severity.\n */\n HARM_SEVERITY_LOW: 'HARM_SEVERITY_LOW',\n /**\n * Medium level of harm severity.\n */\n HARM_SEVERITY_MEDIUM: 'HARM_SEVERITY_MEDIUM',\n /**\n * High level of harm severity.\n */\n HARM_SEVERITY_HIGH: 'HARM_SEVERITY_HIGH',\n /**\n * Harm severity is not supported.\n *\n * @remarks\n * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.\n */\n HARM_SEVERITY_UNSUPPORTED: 'HARM_SEVERITY_UNSUPPORTED'\n} as const;\n\n/**\n * Harm severity levels.\n * @public\n */\nexport type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity];\n\n/**\n * Reason that a prompt was blocked.\n * @public\n */\nexport const BlockReason = {\n /**\n * Content was blocked by safety settings.\n */\n SAFETY: 'SAFETY',\n /**\n * Content was blocked, but the reason is uncategorized.\n */\n OTHER: 'OTHER',\n /**\n * Content was blocked because it contained terms from the terminology blocklist.\n */\n BLOCKLIST: 'BLOCKLIST',\n /**\n * Content was blocked due to prohibited content.\n */\n PROHIBITED_CONTENT: 'PROHIBITED_CONTENT'\n} as const;\n\n/**\n * Reason that a prompt was blocked.\n * @public\n */\nexport type BlockReason = (typeof BlockReason)[keyof typeof BlockReason];\n\n/**\n * Reason that a candidate finished.\n * @public\n */\nexport const FinishReason = {\n /**\n * Natural stop point of the model or provided stop sequence.\n */\n STOP: 'STOP',\n /**\n * The maximum number of tokens as specified in the request was reached.\n */\n MAX_TOKENS: 'MAX_TOKENS',\n /**\n * The candidate content was flagged for safety reasons.\n */\n SAFETY: 'SAFETY',\n /**\n * The candidate content was flagged for recitation reasons.\n */\n RECITATION: 'RECITATION',\n /**\n * Unknown reason.\n */\n OTHER: 'OTHER',\n /**\n * The candidate content contained forbidden terms.\n */\n BLOCKLIST: 'BLOCKLIST',\n /**\n * The candidate content potentially contained prohibited content.\n */\n PROHIBITED_CONTENT: 'PROHIBITED_CONTENT',\n /**\n * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).\n */\n SPII: 'SPII',\n /**\n * The function call generated by the model was invalid.\n */\n MALFORMED_FUNCTION_CALL: 'MALFORMED_FUNCTION_CALL'\n} as const;\n\n/**\n * Reason that a candidate finished.\n * @public\n */\nexport type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];\n\n/**\n * @public\n */\nexport const FunctionCallingMode = {\n /**\n * Default model behavior; model decides to predict either a function call\n * or a natural language response.\n */\n AUTO: 'AUTO',\n /**\n * Model is constrained to always predicting a function call only.\n * If `allowed_function_names` is set, the predicted function call will be\n * limited to any one of `allowed_function_names`, else the predicted\n * function call will be any one of the provided `function_declarations`.\n */\n ANY: 'ANY',\n /**\n * Model will not predict any function call. Model behavior is same as when\n * not passing any function declarations.\n */\n NONE: 'NONE'\n} as const;\n\n/**\n * @public\n */\nexport type FunctionCallingMode =\n (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode];\n\n/**\n * Content part modality.\n * @public\n */\nexport const Modality = {\n /**\n * Unspecified modality.\n */\n MODALITY_UNSPECIFIED: 'MODALITY_UNSPECIFIED',\n /**\n * Plain text.\n */\n TEXT: 'TEXT',\n /**\n * Image.\n */\n IMAGE: 'IMAGE',\n /**\n * Video.\n */\n VIDEO: 'VIDEO',\n /**\n * Audio.\n */\n AUDIO: 'AUDIO',\n /**\n * Document (for example, PDF).\n */\n DOCUMENT: 'DOCUMENT'\n} as const;\n\n/**\n * Content part modality.\n * @public\n */\nexport type Modality = (typeof Modality)[keyof typeof Modality];\n\n/**\n * Generation modalities to be returned in generation responses.\n *\n * @beta\n */\nexport const ResponseModality = {\n /**\n * Text.\n * @beta\n */\n TEXT: 'TEXT',\n /**\n * Image.\n * @beta\n */\n IMAGE: 'IMAGE',\n /**\n * Audio.\n * @beta\n */\n AUDIO: 'AUDIO'\n} as const;\n\n/**\n * Generation modalities to be returned in generation responses.\n *\n * @beta\n */\nexport type ResponseModality =\n (typeof ResponseModality)[keyof typeof ResponseModality];\n\n/**\n * Determines whether inference happens on-device or in-cloud.\n *\n * @remarks\n * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an\n * on-device model. If on-device inference is not available, the SDK\n * will fall back to using a cloud-hosted model.\n * <br/>\n * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an\n * on-device model. The SDK will not fall back to a cloud-hosted model.\n * If on-device inference is not available, inference methods will throw.\n * <br/>\n * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a\n * cloud-hosted model. The SDK will not fall back to an on-device model.\n * <br/>\n * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a\n * cloud-hosted model. If not available, the SDK will fall back to an\n * on-device model.\n *\n * @beta\n */\nexport const InferenceMode = {\n 'PREFER_ON_DEVICE': 'prefer_on_device',\n 'ONLY_ON_DEVICE': 'only_on_device',\n 'ONLY_IN_CLOUD': 'only_in_cloud',\n 'PREFER_IN_CLOUD': 'prefer_in_cloud'\n} as const;\n\n/**\n * Determines whether inference happens on-device or in-cloud.\n *\n * @beta\n */\nexport type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];\n\n/**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\nexport const InferenceSource = {\n 'ON_DEVICE': 'on_device',\n 'IN_CLOUD': 'in_cloud'\n} as const;\n\n/**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\nexport type InferenceSource =\n (typeof InferenceSource)[keyof typeof InferenceSource];\n\n/**\n * Represents the result of the code execution.\n *\n * @beta\n */\nexport const Outcome = {\n UNSPECIFIED: 'OUTCOME_UNSPECIFIED',\n OK: 'OUTCOME_OK',\n FAILED: 'OUTCOME_FAILED',\n DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'\n};\n\n/**\n * Represents the result of the code execution.\n *\n * @beta\n */\nexport type Outcome = (typeof Outcome)[keyof typeof Outcome];\n\n/**\n * The programming language of the code.\n *\n * @beta\n */\nexport const Language = {\n UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',\n PYTHON: 'PYTHON'\n};\n\n/**\n * The programming language of the code.\n *\n * @beta\n */\nexport type Language = (typeof Language)[keyof typeof Language];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, FunctionCall, InlineDataPart } from './content';\nimport {\n BlockReason,\n FinishReason,\n HarmCategory,\n HarmProbability,\n HarmSeverity,\n InferenceSource,\n Modality\n} from './enums';\n\n/**\n * Result object returned from {@link GenerativeModel.generateContent} call.\n *\n * @public\n */\nexport interface GenerateContentResult {\n response: EnhancedGenerateContentResponse;\n}\n\n/**\n * Result object returned from {@link GenerativeModel.generateContentStream} call.\n * Iterate over `stream` to get chunks as they come in and/or\n * use the `response` promise to get the aggregated response when\n * the stream is done.\n *\n * @public\n */\nexport interface GenerateContentStreamResult {\n stream: AsyncGenerator<EnhancedGenerateContentResponse>;\n response: Promise<EnhancedGenerateContentResponse>;\n}\n\n/**\n * Response object wrapped with helper methods.\n *\n * @public\n */\nexport interface EnhancedGenerateContentResponse\n extends GenerateContentResponse {\n /**\n * Returns the text string from the response, if available.\n * Throws if the prompt or candidate was blocked.\n */\n text: () => string;\n /**\n * Aggregates and returns every {@link InlineDataPart} from the first candidate of\n * {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n */\n inlineDataParts: () => InlineDataPart[] | undefined;\n /**\n * Aggregates and returns every {@link FunctionCall} from the first candidate of\n * {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n */\n functionCalls: () => FunctionCall[] | undefined;\n /**\n * Aggregates and returns every {@link TextPart} with their `thought` property set\n * to `true` from the first candidate of {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n *\n * @remarks\n * Thought summaries provide a brief overview of the model's internal thinking process,\n * offering insight into how it arrived at the final answer. This can be useful for\n * debugging, understanding the model's reasoning, and verifying its accuracy.\n *\n * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is\n * set to `true`.\n */\n thoughtSummary: () => string | undefined;\n /**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\n inferenceSource?: InferenceSource;\n}\n\n/**\n * Individual response from {@link GenerativeModel.generateContent} and\n * {@link GenerativeModel.generateContentStream}.\n * `generateContentStream()` will return one in each chunk until\n * the stream is done.\n * @public\n */\nexport interface GenerateContentResponse {\n candidates?: GenerateContentCandidate[];\n promptFeedback?: PromptFeedback;\n usageMetadata?: UsageMetadata;\n}\n\n/**\n * Usage metadata about a {@link GenerateContentResponse}.\n *\n * @public\n */\nexport interface UsageMetadata {\n promptTokenCount: number;\n candidatesTokenCount: number;\n /**\n * The number of tokens used by the model's internal \"thinking\" process.\n */\n thoughtsTokenCount?: number;\n totalTokenCount: number;\n /**\n * The number of tokens used by tools.\n */\n toolUsePromptTokenCount?: number;\n promptTokensDetails?: ModalityTokenCount[];\n candidatesTokensDetails?: ModalityTokenCount[];\n /**\n * A list of tokens used by tools, broken down by modality.\n */\n toolUsePromptTokensDetails?: ModalityTokenCount[];\n}\n\n/**\n * Represents token counting info for a single modality.\n *\n * @public\n */\nexport interface ModalityTokenCount {\n /** The modality associated with this token count. */\n modality: Modality;\n /** The number of tokens counted. */\n tokenCount: number;\n}\n\n/**\n * If the prompt was blocked, this will be populated with `blockReason` and\n * the relevant `safetyRatings`.\n * @public\n */\nexport interface PromptFeedback {\n blockReason?: BlockReason;\n safetyRatings: SafetyRating[];\n /**\n * A human-readable description of the `blockReason`.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n blockReasonMessage?: string;\n}\n\n/**\n * A candidate returned as part of a {@link GenerateContentResponse}.\n * @public\n */\nexport interface GenerateContentCandidate {\n index: number;\n content: Content;\n finishReason?: FinishReason;\n finishMessage?: string;\n safetyRatings?: SafetyRating[];\n citationMetadata?: CitationMetadata;\n groundingMetadata?: GroundingMetadata;\n urlContextMetadata?: URLContextMetadata;\n}\n\n/**\n * Citation metadata that may be found on a {@link GenerateContentCandidate}.\n * @public\n */\nexport interface CitationMetadata {\n citations: Citation[];\n}\n\n/**\n * A single citation.\n * @public\n */\nexport interface Citation {\n startIndex?: number;\n endIndex?: number;\n uri?: string;\n license?: string;\n /**\n * The title of the cited source, if available.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n title?: string;\n /**\n * The publication date of the cited source, if available.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n publicationDate?: Date;\n}\n\n/**\n * Metadata returned when grounding is enabled.\n *\n * Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}).\n *\n * Important: If using Grounding with Google Search, you are required to comply with the\n * \"Grounding with Google Search\" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}\n * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}\n * section within the Service Specific Terms).\n *\n * @public\n */\nexport interface GroundingMetadata {\n /**\n * Google Search entry point for web searches. This contains an HTML/CSS snippet that must be\n * embedded in an app to display a Google Search entry point for follow-up web searches related to\n * a model's \"Grounded Response\".\n */\n searchEntryPoint?: SearchEntrypoint;\n /**\n * A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content\n * (for example, from a web page). that the model used to ground its response.\n */\n groundingChunks?: GroundingChunk[];\n /**\n * A list of {@link GroundingSupport} objects. Each object details how specific segments of the\n * model's response are supported by the `groundingChunks`.\n */\n groundingSupports?: GroundingSupport[];\n /**\n * A list of web search queries that the model performed to gather the grounding information.\n * These can be used to allow users to explore the search results themselves.\n */\n webSearchQueries?: string[];\n /**\n * @deprecated Use {@link GroundingSupport} instead.\n */\n retrievalQueries?: string[];\n}\n\n/**\n * Google search entry point.\n *\n * @public\n */\nexport interface SearchEntrypoint {\n /**\n * HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid\n * undesired interaction with the rest of the page's CSS.\n *\n * To ensure proper rendering and prevent CSS conflicts, it is recommended\n * to encapsulate this `renderedContent` within a shadow DOM when embedding it\n * into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}.\n *\n * @example\n * ```javascript\n * const container = document.createElement('div');\n * document.body.appendChild(container);\n * container.attachShadow({ mode: 'open' }).innerHTML = renderedContent;\n * ```\n */\n renderedContent?: string;\n}\n\n/**\n * Represents a chunk of retrieved data that supports a claim in the model's response. This is part\n * of the grounding information provided when grounding is enabled.\n *\n * @public\n */\nexport interface GroundingChunk {\n /**\n * Contains details if the grounding chunk is from a web source.\n */\n web?: WebGroundingChunk;\n}\n\n/**\n * A grounding chunk from the web.\n *\n * Important: If using Grounding with Google Search, you are required to comply with the\n * {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for \"Grounding with Google Search\".\n *\n * @public\n */\nexport interface WebGroundingChunk {\n /**\n * The URI of the retrieved web page.\n */\n uri?: string;\n /**\n * The title of the retrieved web page.\n */\n title?: string;\n /**\n * The domain of the original URI from which the content was retrieved.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be\n * `undefined`.\n */\n domain?: string;\n}\n\n/**\n * Provides information about how a specific segment of the model's response is supported by the\n * retrieved grounding chunks.\n *\n * @public\n */\nexport interface GroundingSupport {\n /**\n * Specifies the segment of the model's response content that this grounding support pertains to.\n */\n segment?: Segment;\n /**\n * A list of indices that refer to specific {@link GroundingChunk} objects within the\n * {@link GroundingMetadata.groundingChunks} array. These referenced chunks\n * are the sources that support the claim made in the associated `segment` of the response.\n * For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`,\n * and `groundingChunks[4]` are the retrieved content supporting this part of the response.\n */\n groundingChunkIndices?: number[];\n}\n\n/**\n * Represents a specific segment within a {@link Content} object, often used to\n * pinpoint the exact location of text or data that grounding information refers to.\n *\n * @public\n */\nexport interface Segment {\n /**\n * The zero-based index of the {@link Part} object within the `parts` array\n * of its parent {@link Content} object. This identifies which part of the\n * content the segment belongs to.\n */\n partIndex: number;\n /**\n * The zero-based start index of the segment within the specified `Part`,\n * measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the\n * beginning of the part's content (e.g., `Part.text`).\n */\n startIndex: number;\n /**\n * The zero-based end index of the segment within the specified `Part`,\n * measured in UTF-8 bytes. This offset is exclusive, meaning the character\n * at this index is not included in the segment.\n */\n endIndex: number;\n /**\n * The text corresponding to the segment from the response.\n */\n text: string;\n}\n\n/**\n * Metadata related to {@link URLContextTool}.\n *\n * @beta\n */\nexport interface URLContextMetadata {\n /**\n * List of URL metadata used to provide context to the Gemini model.\n */\n urlMetadata: URLMetadata[];\n}\n\n/**\n * Metadata for a single URL retrieved by the {@link URLContextTool} tool.\n *\n * @beta\n */\nexport interface URLMetadata {\n /**\n * The retrieved URL.\n */\n retrievedUrl?: string;\n /**\n * The status of the URL retrieval.\n */\n urlRetrievalStatus?: URLRetrievalStatus;\n}\n\n/**\n * The status of a URL retrieval.\n *\n * @remarks\n * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.\n * <br/>\n *\n * @beta\n */\nexport const URLRetrievalStatus = {\n /**\n * Unspecified retrieval status.\n */\n URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED',\n /**\n * The URL retrieval was successful.\n */\n URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS',\n /**\n * The URL retrieval failed.\n */\n URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR',\n /**\n * The URL retrieval failed because the content is behind a paywall.\n */\n URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL',\n /**\n * The URL retrieval failed because the content is unsafe.\n */\n URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE'\n};\n\n/**\n * The status of a URL retrieval.\n *\n * @remarks\n * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.\n * <br/>\n *\n * @beta\n */\nexport type URLRetrievalStatus =\n (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus];\n\n/**\n * @public\n */\nexport interface WebAttribution {\n uri: string;\n title: string;\n}\n\n/**\n * @public\n */\nexport interface RetrievedContextAttribution {\n uri: string;\n title: string;\n}\n\n/**\n * Protobuf google.type.Date\n * @public\n */\nexport interface Date {\n year: number;\n month: number;\n day: number;\n}\n\n/**\n * A safety rating associated with a {@link GenerateContentCandidate}\n * @public\n */\nexport interface SafetyRating {\n category: HarmCategory;\n probability: HarmProbability;\n /**\n * The harm severity level.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`.\n */\n severity: HarmSeverity;\n /**\n * The probability score of the harm category.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.\n */\n probabilityScore: number;\n /**\n * The severity score of the harm category.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.\n */\n severityScore: number;\n blocked: boolean;\n}\n\n/**\n * Response from calling {@link GenerativeModel.countTokens}.\n * @public\n */\nexport interface CountTokensResponse {\n /**\n * The total number of tokens counted across all instances from the request.\n */\n totalTokens: number;\n /**\n * @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`.\n *\n * The total number of billable characters counted across all instances\n * from the request.\n */\n totalBillableCharacters?: number;\n /**\n * The breakdown, by modality, of how many tokens are consumed by the prompt.\n */\n promptTokensDetails?: ModalityTokenCount[];\n}\n\n/**\n * An incremental content update from the model.\n *\n * @beta\n */\nexport interface LiveServerContent {\n type: 'serverContent';\n /**\n * The content that the model has generated as part of the current conversation with the user.\n */\n modelTurn?: Content;\n /**\n * Indicates whether the turn is complete. This is `undefined` if the turn is not complete.\n */\n turnComplete?: boolean;\n /**\n * Indicates whether the model was interrupted by the client. An interruption occurs when\n * the client sends a message before the model finishes it's turn. This is `undefined` if the\n * model was not interrupted.\n */\n interrupted?: boolean;\n /**\n * Transcription of the audio that was input to the model.\n */\n inputTranscription?: Transcription;\n /**\n * Transcription of the audio output from the model.\n */\n outputTranscription?: Transcription;\n}\n\n/**\n * Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription\n * is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on\n * the {@link LiveGenerationConfig}.\n *\n * @beta\n */\n\nexport interface Transcription {\n /**\n * The text transcription of the audio.\n */\n text?: string;\n}\n\n/**\n * A request from the model for the client to execute one or more functions.\n *\n * @beta\n */\nexport interface LiveServerToolCall {\n type: 'toolCall';\n /**\n * An array of function calls to run.\n */\n functionCalls: FunctionCall[];\n}\n\n/**\n * Notification to cancel a previous function call triggered by {@link LiveServerToolCall}.\n *\n * @beta\n */\nexport interface LiveServerToolCallCancellation {\n type: 'toolCallCancellation';\n /**\n * IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}.\n */\n functionIds: string[];\n}\n\n/**\n * The types of responses that can be returned by {@link LiveSession.receive}.\n *\n * @beta\n */\nexport const LiveResponseType = {\n SERVER_CONTENT: 'serverContent',\n TOOL_CALL: 'toolCall',\n TOOL_CALL_CANCELLATION: 'toolCallCancellation'\n};\n\n/**\n * The types of responses that can be returned by {@link LiveSession.receive}.\n * This is a property on all messages that can be used for type narrowing. This property is not\n * returned by the server, it is assigned to a server message object once it's parsed.\n *\n * @beta\n */\nexport type LiveResponseType =\n (typeof LiveResponseType)[keyof typeof LiveResponseType];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { GenerateContentResponse } from './responses';\n\n/**\n * Details object that may be included in an error response.\n *\n * @public\n */\nexport interface ErrorDetails {\n '@type'?: string;\n\n /** The reason for the error. */\n reason?: string;\n\n /** The domain where the error occurred. */\n domain?: string;\n\n /** Additional metadata about the error. */\n metadata?: Record<string, unknown>;\n\n /** Any other relevant information about the error. */\n [key: string]: unknown;\n}\n\n/**\n * Details object that contains data originating from a bad HTTP response.\n *\n * @public\n */\nexport interface CustomErrorData {\n /** HTTP status code of the error response. */\n status?: number;\n\n /** HTTP status text of the error response. */\n statusText?: string;\n\n /** Response from a {@link GenerateContentRequest} */\n response?: GenerateContentResponse;\n\n /** Optional additional details about the error. */\n errorDetails?: ErrorDetails[];\n}\n\n/**\n * Standardized error codes that {@link AIError} can have.\n *\n * @public\n */\nexport const AIErrorCode = {\n /** A generic error occurred. */\n ERROR: 'error',\n\n /** An error occurred in a request. */\n REQUEST_ERROR: 'request-error',\n\n /** An error occurred in a response. */\n RESPONSE_ERROR: 'response-error',\n\n /** An error occurred while performing a fetch. */\n FETCH_ERROR: 'fetch-error',\n\n /** An error occurred because an operation was attempted on a closed session. */\n SESSION_CLOSED: 'session-closed',\n\n /** An error associated with a Content object. */\n INVALID_CONTENT: 'invalid-content',\n\n /** An error due to the Firebase API not being enabled in the Console. */\n API_NOT_ENABLED: 'api-not-enabled',\n\n /** An error due to invalid Schema input. */\n INVALID_SCHEMA: 'invalid-schema',\n\n /** An error occurred due to a missing Firebase API key. */\n NO_API_KEY: 'no-api-key',\n\n /** An error occurred due to a missing Firebase app ID. */\n NO_APP_ID: 'no-app-id',\n\n /** An error occurred due to a model name not being specified during initialization. */\n NO_MODEL: 'no-model',\n\n /** An error occurred due to a missing project ID. */\n NO_PROJECT_ID: 'no-project-id',\n\n /** An error occurred while parsing. */\n PARSE_FAILED: 'parse-failed',\n\n /** An error occurred due an attempt to use an unsupported feature. */\n UNSUPPORTED: 'unsupported'\n} as const;\n\n/**\n * Standardized error codes that {@link AIError} can have.\n *\n * @public\n */\nexport type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Contains the list of OpenAPI data types\n * as defined by the\n * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}\n * @public\n */\nexport const SchemaType = {\n /** String type. */\n STRING: 'string',\n /** Number type. */\n NUMBER: 'number',\n /** Integer type. */\n INTEGER: 'integer',\n /** Boolean type. */\n BOOLEAN: 'boolean',\n /** Array type. */\n ARRAY: 'array',\n /** Object type. */\n OBJECT: 'object'\n} as const;\n\n/**\n * Contains the list of OpenAPI data types\n * as defined by the\n * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}\n * @public\n */\nexport type SchemaType = (typeof SchemaType)[keyof typeof SchemaType];\n\n/**\n * Basic {@link Schema} properties shared across several Schema-related\n * types.\n * @public\n */\nexport interface SchemaShared<T> {\n /**\n * An array of {@link Schema}. The generated data must be valid against any of the schemas\n * listed in this array. This allows specifying multiple possible structures or types for a\n * single field.\n */\n anyOf?: T[];\n /** Optional. The format of the property.\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or\n * `'date-time'`, otherwise requests will fail.\n */\n format?: string;\n /** Optional. The description of the property. */\n description?: string;\n /**\n * The title of the property. This helps document the schema's purpose but does not typically\n * constrain the generated value. It can subtly guide the model by clarifying the intent of a\n * field.\n */\n title?: string;\n /** Optional. The items of the property. */\n items?: T;\n /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n minItems?: number;\n /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n maxItems?: number;\n /** Optional. Map of `Schema` objects. */\n properties?: {\n [k: string]: T;\n };\n /** A hint suggesting the order in which the keys should appear in the generated JSON string. */\n propertyOrdering?: string[];\n /** Optional. The enum of the property. */\n enum?: string[];\n /** Optional. The example of the property. */\n example?: unknown;\n /** Optional. Whether the property is nullable. */\n nullable?: boolean;\n /** The minimum value of a numeric type. */\n minimum?: number;\n /** The maximum value of a numeric type. */\n maximum?: number;\n [key: string]: unknown;\n}\n\n/**\n * Params passed to {@link Schema} static methods to create specific\n * {@link Schema} classes.\n * @public\n */\nexport interface SchemaParams extends SchemaShared<SchemaInterface> {}\n\n/**\n * Final format for {@link Schema} params passed to backend requests.\n * @public\n */\nexport interface SchemaRequest extends SchemaShared<SchemaRequest> {\n /**\n * The type of the property. this can only be undefined when using `anyOf` schemas,\n * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }.\n */\n type?: SchemaType;\n /** Optional. Array of required property. */\n required?: string[];\n}\n\n/**\n * Interface for {@link Schema} class.\n * @public\n */\nexport interface SchemaInterface extends SchemaShared<SchemaInterface> {\n /**\n * The type of the property. this can only be undefined when using `anyof` schemas,\n * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}.\n */\n type?: SchemaType;\n}\n\n/**\n * Interface for JSON parameters in a schema of {@link (SchemaType:type)}\n * \"object\" when not using the `Schema.object()` helper.\n * @public\n */\nexport interface ObjectSchemaRequest extends SchemaRequest {\n type: 'object';\n /**\n * This is not a property accepted in the final request to the backend, but is\n * a client-side convenience property that is only usable by constructing\n * a schema through the `Schema.object()` helper method. Populating this\n * property will cause response errors if the object is not wrapped with\n * `Schema.object()`.\n */\n optionalProperties?: never;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ImagenImageFormat } from '../../requests/imagen-image-format';\n\n/**\n * Parameters for configuring an {@link ImagenModel}.\n *\n * @public\n */\nexport interface ImagenModelParams {\n /**\n * The Imagen model to use for generating images.\n * For example: `imagen-3.0-generate-002`.\n *\n * Only Imagen 3 models (named `imagen-3.0-*`) are supported.\n *\n * See {@link https://firebase.google.com/docs/vertex-ai/models | model versions}\n * for a full list of supported Imagen 3 models.\n */\n model: string;\n /**\n * Configuration options for generating images with Imagen.\n */\n generationConfig?: ImagenGenerationConfig;\n /**\n * Safety settings for filtering potentially inappropriate content.\n */\n safetySettings?: ImagenSafetySettings;\n}\n\n/**\n * Configuration options for generating images with Imagen.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for\n * more details.\n *\n * @public\n */\nexport interface ImagenGenerationConfig {\n /**\n * A description of what should be omitted from the generated images.\n *\n * Support for negative prompts depends on the Imagen model.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details.\n *\n * This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions\n * greater than `imagen-3.0-generate-002`.\n */\n negativePrompt?: string;\n /**\n * The number of images to generate. The default value is 1.\n *\n * The number of sample images that may be generated in each request depends on the model\n * (typically up to 4); see the <a href=\"http://firebase.google.com/docs/vertex-ai/model-parameters#imagen\">sampleCount</a>\n * documentation for more details.\n */\n numberOfImages?: number;\n /**\n * The aspect ratio of the generated images. The default value is square 1:1.\n * Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)}\n * for more details.\n */\n aspectRatio?: ImagenAspectRatio;\n /**\n * The image format of the generated images. The default is PNG.\n *\n * See {@link ImagenImageFormat} for more details.\n */\n imageFormat?: ImagenImageFormat;\n /**\n * Whether to add an invisible watermark to generated images.\n *\n * If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate\n * that they are AI generated. If set to `false`, watermarking will be disabled.\n *\n * For Imagen 3 models, the default value is `true`; see the <a href=\"http://firebase.google.com/docs/vertex-ai/model-parameters#imagen\">addWatermark</a>\n * documentation for more details.\n *\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true,\n * and cannot be turned off.\n */\n addWatermark?: boolean;\n}\n\n/**\n * A filter level controlling how aggressively to filter sensitive content.\n *\n * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI\n * are assessed against a list of safety filters, which include 'harmful categories' (for example,\n * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to\n * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}\n * for more details.\n *\n * @public\n */\nexport const ImagenSafetyFilterLevel = {\n /**\n * The most aggressive filtering level; most strict blocking.\n */\n BLOCK_LOW_AND_ABOVE: 'block_low_and_above',\n /**\n * Blocks some sensitive prompts and responses.\n */\n BLOCK_MEDIUM_AND_ABOVE: 'block_medium_and_above',\n /**\n * Blocks few sensitive prompts and responses.\n */\n BLOCK_ONLY_HIGH: 'block_only_high',\n /**\n * The least aggressive filtering level; blocks very few sensitive prompts and responses.\n *\n * Access to this feature is restricted and may require your case to be reviewed and approved by\n * Cloud support.\n */\n BLOCK_NONE: 'block_none'\n} as const;\n\n/**\n * A filter level controlling how aggressively to filter sensitive content.\n *\n * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI\n * are assessed against a list of safety filters, which include 'harmful categories' (for example,\n * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to\n * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}\n * for more details.\n *\n * @public\n */\nexport type ImagenSafetyFilterLevel =\n (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel];\n\n/**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n *\n * See the <a href=\"http://firebase.google.com/docs/vertex-ai/generate-images\">personGeneration</a>\n * documentation for more details.\n *\n * @public\n */\nexport const ImagenPersonFilterLevel = {\n /**\n * Disallow generation of images containing people or faces; images of people are filtered out.\n */\n BLOCK_ALL: 'dont_allow',\n /**\n * Allow generation of images containing adults only; images of children are filtered out.\n *\n * Generation of images containing people or faces may require your use case to be\n * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}\n * for more details.\n */\n ALLOW_ADULT: 'allow_adult',\n /**\n * Allow generation of images containing adults only; images of children are filtered out.\n *\n * Generation of images containing people or faces may require your use case to be\n * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}\n * for more details.\n */\n ALLOW_ALL: 'allow_all'\n} as const;\n\n/**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n *\n * See the <a href=\"http://firebase.google.com/docs/vertex-ai/generate-images\">personGeneration</a>\n * documentation for more details.\n *\n * @public\n */\nexport type ImagenPersonFilterLevel =\n (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel];\n\n/**\n * Settings for controlling the aggressiveness of filtering out sensitive content.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details.\n *\n * @public\n */\nexport interface ImagenSafetySettings {\n /**\n * A filter level controlling how aggressive to filter out sensitive content from generated\n * images.\n */\n safetyFilterLevel?: ImagenSafetyFilterLevel;\n /**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n */\n personFilterLevel?: ImagenPersonFilterLevel;\n}\n\n/**\n * Aspect ratios for Imagen images.\n *\n * To specify an aspect ratio for generated images, set the `aspectRatio` property in your\n * {@link ImagenGenerationConfig}.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details and examples of the supported aspect ratios.\n *\n * @public\n */\nexport const ImagenAspectRatio = {\n /**\n * Square (1:1) aspect ratio.\n */\n 'SQUARE': '1:1',\n /**\n * Landscape (3:4) aspect ratio.\n */\n 'LANDSCAPE_3x4': '3:4',\n /**\n * Portrait (4:3) aspect ratio.\n */\n 'PORTRAIT_4x3': '4:3',\n /**\n * Landscape (16:9) aspect ratio.\n */\n 'LANDSCAPE_16x9': '16:9',\n /**\n * Portrait (9:16) aspect ratio.\n */\n 'PORTRAIT_9x16': '9:16'\n} as const;\n\n/**\n * Aspect ratios for Imagen images.\n *\n * To specify an aspect ratio for generated images, set the `aspectRatio` property in your\n * {@link ImagenGenerationConfig}.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details and examples of the supported aspect ratios.\n *\n * @public\n */\nexport type ImagenAspectRatio =\n (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp } from '@firebase/app';\nimport { Backend } from './backend';\n\nexport * from './types';\n\n/**\n * An instance of the Firebase AI SDK.\n *\n * Do not create this instance directly. Instead, use {@link getAI | getAI()}.\n *\n * @public\n */\nexport interface AI {\n /**\n * The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with.\n */\n app: FirebaseApp;\n /**\n * A {@link Backend} instance that specifies the configuration for the target backend,\n * either the Gemini Developer API (using {@link GoogleAIBackend}) or the\n * Vertex AI Gemini API (using {@link VertexAIBackend}).\n */\n backend: Backend;\n /**\n * Options applied to this {@link AI} instance.\n */\n options?: AIOptions;\n /**\n * @deprecated use `AI.backend.location` instead.\n *\n * The location configured for this AI service instance, relevant for Vertex AI backends.\n */\n location: string;\n}\n\n/**\n * An enum-like object containing constants that represent the supported backends\n * for the Firebase AI SDK.\n * This determines which backend service (Vertex AI Gemini API or Gemini Developer API)\n * the SDK will communicate with.\n *\n * These values are assigned to the `backendType` property within the specific backend\n * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify\n * which service to target.\n *\n * @public\n */\nexport const BackendType = {\n /**\n * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud.\n * Use this constant when creating a {@link VertexAIBackend} configuration.\n */\n VERTEX_AI: 'VERTEX_AI',\n\n /**\n * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}).\n * Use this constant when creating a {@link GoogleAIBackend} configuration.\n */\n GOOGLE_AI: 'GOOGLE_AI'\n} as const; // Using 'as const' makes the string values literal types\n\n/**\n * Type alias representing valid backend types.\n * It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`.\n *\n * @public\n */\nexport type BackendType = (typeof BackendType)[keyof typeof BackendType];\n\n/**\n * Options for initializing the AI service using {@link getAI | getAI()}.\n * This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API)\n * and configuring its specific options (like location for Vertex AI).\n *\n * @public\n */\nexport interface AIOptions {\n /**\n * The backend configuration to use for the AI service instance.\n * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}).\n */\n backend?: Backend;\n /**\n * Whether to use App Check limited use tokens. Defaults to false.\n */\n useLimitedUseAppCheckTokens?: boolean;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { DEFAULT_LOCATION } from './constants';\nimport { BackendType } from './public-types';\n\n/**\n * Abstract base class representing the configuration for an AI service backend.\n * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for\n * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and\n * {@link VertexAIBackend} for the Vertex AI Gemini API.\n *\n * @public\n */\nexport abstract class Backend {\n /**\n * Specifies the backend type.\n */\n readonly backendType: BackendType;\n\n /**\n * Protected constructor for use by subclasses.\n * @param type - The backend type.\n */\n protected constructor(type: BackendType) {\n this.backendType = type;\n }\n}\n\n/**\n * Configuration class for the Gemini Developer API.\n *\n * Use this with {@link AIOptions} when initializing the AI service via\n * {@link getAI | getAI()} to specify the Gemini Developer API as the backend.\n *\n * @public\n */\nexport class GoogleAIBackend extends Backend {\n /**\n * Creates a configuration object for the Gemini Developer API backend.\n */\n constructor() {\n super(BackendType.GOOGLE_AI);\n }\n}\n\n/**\n * Configuration class for the Vertex AI Gemini API.\n *\n * Use this with {@link AIOptions} when initializing the AI service via\n * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend.\n *\n * @public\n */\nexport class VertexAIBackend extends Backend {\n /**\n * The region identifier.\n * See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}\n * for a list of supported locations.\n */\n readonly location: string;\n\n /**\n * Creates a configuration object for the Vertex AI backend.\n *\n * @param location - The region identifier, defaulting to `us-central1`;\n * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}\n * for a list of supported locations.\n */\n constructor(location: string = DEFAULT_LOCATION) {\n super(BackendType.VERTEX_AI);\n if (!location) {\n this.location = DEFAULT_LOCATION;\n } else {\n this.location = location;\n }\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp, _FirebaseService } from '@firebase/app';\nimport { AI, AIOptions, InferenceMode, OnDeviceParams } from './public-types';\nimport {\n AppCheckInternalComponentName,\n FirebaseAppCheckInternal\n} from '@firebase/app-check-interop-types';\nimport { Provider } from '@firebase/component';\nimport {\n FirebaseAuthInternal,\n FirebaseAuthInternalName\n} from '@firebase/auth-interop-types';\nimport { Backend, VertexAIBackend } from './backend';\nimport { ChromeAdapterImpl } from './methods/chrome-adapter';\n\nexport class AIService implements AI, _FirebaseService {\n auth: FirebaseAuthInternal | null;\n appCheck: FirebaseAppCheckInternal | null;\n _options?: Omit<AIOptions, 'backend'>;\n location: string; // This is here for backwards-compatibility\n\n constructor(\n public app: FirebaseApp,\n public backend: Backend,\n authProvider?: Provider<FirebaseAuthInternalName>,\n appCheckProvider?: Provider<AppCheckInternalComponentName>,\n public chromeAdapterFactory?: (\n mode: InferenceMode,\n window?: Window,\n params?: OnDeviceParams\n ) => ChromeAdapterImpl | undefined\n ) {\n const appCheck = appCheckProvider?.getImmediate({ optional: true });\n const auth = authProvider?.getImmediate({ optional: true });\n this.auth = auth || null;\n this.appCheck = appCheck || null;\n\n if (backend instanceof VertexAIBackend) {\n this.location = backend.location;\n } else {\n this.location = '';\n }\n }\n\n _delete(): Promise<void> {\n return Promise.resolve();\n }\n\n set options(optionsToSet: AIOptions) {\n this._options = optionsToSet;\n }\n\n get options(): AIOptions | undefined {\n return this._options;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseError } from '@firebase/util';\nimport { AIErrorCode, CustomErrorData } from './types';\nimport { AI_TYPE } from './constants';\n\n/**\n * Error class for the Firebase AI SDK.\n *\n * @public\n */\nexport class AIError extends FirebaseError {\n /**\n * Constructs a new instance of the `AIError` class.\n *\n * @param code - The error code from {@link (AIErrorCode:type)}.\n * @param message - A human-readable message describing the error.\n * @param customErrorData - Optional error data.\n */\n constructor(\n readonly code: AIErrorCode,\n message: string,\n readonly customErrorData?: CustomErrorData\n ) {\n // Match error format used by FirebaseError from ErrorFactory\n const service = AI_TYPE;\n const fullCode = `${service}/${code}`;\n const fullMessage = `${service}: ${message} (${fullCode})`;\n super(code, fullMessage);\n\n // FirebaseError initializes a stack trace, but it assumes the error is created from the error\n // factory. Since we break this assumption, we set the stack trace to be originating from this\n // constructor.\n // This is only supported in V8.\n if (Error.captureStackTrace) {\n // Allows us to initialize the stack trace without including the constructor itself at the\n // top level of the stack trace.\n Error.captureStackTrace(this, AIError);\n }\n\n // Allows instanceof AIError in ES5/ES6\n // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work\n // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget\n // which we can now use since we no longer target ES5.\n Object.setPrototypeOf(this, AIError.prototype);\n\n // Since Error is an interface, we don't inherit toString and so we define it ourselves.\n this.toString = () => fullMessage;\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AI_TYPE } from './constants';\nimport { AIError } from './errors';\nimport { AIErrorCode } from './types';\nimport { Backend, GoogleAIBackend, VertexAIBackend } from './backend';\n\n/**\n * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI}\n * instances by backend type.\n *\n * @internal\n */\nexport function encodeInstanceIdentifier(backend: Backend): string {\n if (backend instanceof GoogleAIBackend) {\n return `${AI_TYPE}/googleai`;\n } else if (backend instanceof VertexAIBackend) {\n return `${AI_TYPE}/vertexai/${backend.location}`;\n } else {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid backend: ${JSON.stringify(backend.backendType)}`\n );\n }\n}\n\n/**\n * Decodes an instance identifier string into a {@link Backend}.\n *\n * @internal\n */\nexport function decodeInstanceIdentifier(instanceIdentifier: string): Backend {\n const identifierParts = instanceIdentifier.split('/');\n if (identifierParts[0] !== AI_TYPE) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier, unknown prefix '${identifierParts[0]}'`\n );\n }\n const backendType = identifierParts[1];\n switch (backendType) {\n case 'vertexai':\n const location: string | undefined = identifierParts[2];\n if (!location) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier, unknown location '${instanceIdentifier}'`\n );\n }\n return new VertexAIBackend(location);\n case 'googleai':\n return new GoogleAIBackend();\n default:\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier string: '${instanceIdentifier}'`\n );\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { AIErrorCode, AI, BackendType } from '../public-types';\nimport { AIService } from '../service';\nimport { ApiSettings } from '../types/internal';\nimport { _isFirebaseServerApp } from '@firebase/app';\n\n/**\n * Base class for Firebase AI model APIs.\n *\n * Instances of this class are associated with a specific Firebase AI {@link Backend}\n * and provide methods for interacting with the configured generative model.\n *\n * @public\n */\nexport abstract class AIModel {\n /**\n * The fully qualified model resource name to use for generating images\n * (for example, `publishers/google/models/imagen-3.0-generate-002`).\n */\n readonly model: string;\n\n /**\n * @internal\n */\n _apiSettings: ApiSettings;\n\n /**\n * Constructs a new instance of the {@link AIModel} class.\n *\n * This constructor should only be called from subclasses that provide\n * a model API.\n *\n * @param ai - an {@link AI} instance.\n * @param modelName - The name of the model being used. It can be in one of the following formats:\n * - `my-model` (short name, will resolve to `publishers/google/models/my-model`)\n * - `models/my-model` (will resolve to `publishers/google/models/my-model`)\n * - `publishers/my-publisher/models/my-model` (fully qualified model name)\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @internal\n */\n protected constructor(ai: AI, modelName: string) {\n if (!ai.app?.options?.apiKey) {\n throw new AIError(\n AIErrorCode.NO_API_KEY,\n `The \"apiKey\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`\n );\n } else if (!ai.app?.options?.projectId) {\n throw new AIError(\n AIErrorCode.NO_PROJECT_ID,\n `The \"projectId\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`\n );\n } else if (!ai.app?.options?.appId) {\n throw new AIError(\n AIErrorCode.NO_APP_ID,\n `The \"appId\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`\n );\n } else {\n this._apiSettings = {\n apiKey: ai.app.options.apiKey,\n project: ai.app.options.projectId,\n appId: ai.app.options.appId,\n automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled,\n location: ai.location,\n backend: ai.backend\n };\n\n if (_isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) {\n const token = ai.app.settings.appCheckToken;\n this._apiSettings.getAppCheckToken = () => {\n return Promise.resolve({ token });\n };\n } else if ((ai as AIService).appCheck) {\n if (ai.options?.useLimitedUseAppCheckTokens) {\n this._apiSettings.getAppCheckToken = () =>\n (ai as AIService).appCheck!.getLimitedUseToken();\n } else {\n this._apiSettings.getAppCheckToken = () =>\n (ai as AIService).appCheck!.getToken();\n }\n }\n\n if ((ai as AIService).auth) {\n this._apiSettings.getAuthToken = () =>\n (ai as AIService).auth!.getToken();\n }\n\n this.model = AIModel.normalizeModelName(\n modelName,\n this._apiSettings.backend.backendType\n );\n }\n }\n\n /**\n * Normalizes the given model name to a fully qualified model resource name.\n *\n * @param modelName - The model name to normalize.\n * @returns The fully qualified model resource name.\n *\n * @internal\n */\n static normalizeModelName(\n modelName: string,\n backendType: BackendType\n ): string {\n if (backendType === BackendType.GOOGLE_AI) {\n return AIModel.normalizeGoogleAIModelName(modelName);\n } else {\n return AIModel.normalizeVertexAIModelName(modelName);\n }\n }\n\n /**\n * @internal\n */\n private static normalizeGoogleAIModelName(modelName: string): string {\n return `models/${modelName}`;\n }\n\n /**\n * @internal\n */\n private static normalizeVertexAIModelName(modelName: string): string {\n let model: string;\n if (modelName.includes('/')) {\n if (modelName.startsWith('models/')) {\n // Add 'publishers/google' if the user is only passing in 'models/model-name'.\n model = `publishers/google/${modelName}`;\n } else {\n // Any other custom format (e.g. tuned models) must be passed in correctly.\n model = modelName;\n }\n } else {\n // If path is not included, assume it's a non-tuned model.\n model = `publishers/google/models/${modelName}`;\n }\n\n return model;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Logger } from '@firebase/logger';\n\nexport const logger = new Logger('@firebase/vertexai');\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ErrorDetails, RequestOptions, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\nimport { ApiSettings } from '../types/internal';\nimport {\n DEFAULT_API_VERSION,\n DEFAULT_DOMAIN,\n DEFAULT_FETCH_TIMEOUT_MS,\n LANGUAGE_TAG,\n PACKAGE_VERSION\n} from '../constants';\nimport { logger } from '../logger';\nimport { GoogleAIBackend, VertexAIBackend } from '../backend';\nimport { BackendType } from '../public-types';\n\nexport enum Task {\n GENERATE_CONTENT = 'generateContent',\n STREAM_GENERATE_CONTENT = 'streamGenerateContent',\n COUNT_TOKENS = 'countTokens',\n PREDICT = 'predict'\n}\n\nexport class RequestUrl {\n constructor(\n public model: string,\n public task: Task,\n public apiSettings: ApiSettings,\n public stream: boolean,\n public requestOptions?: RequestOptions\n ) {}\n toString(): string {\n const url = new URL(this.baseUrl); // Throws if the URL is invalid\n url.pathname = `/${this.apiVersion}/${this.modelPath}:${this.task}`;\n url.search = this.queryParams.toString();\n return url.toString();\n }\n\n private get baseUrl(): string {\n return this.requestOptions?.baseUrl || `https://${DEFAULT_DOMAIN}`;\n }\n\n private get apiVersion(): string {\n return DEFAULT_API_VERSION; // TODO: allow user-set options if that feature becomes available\n }\n\n private get modelPath(): string {\n if (this.apiSettings.backend instanceof GoogleAIBackend) {\n return `projects/${this.apiSettings.project}/${this.model}`;\n } else if (this.apiSettings.backend instanceof VertexAIBackend) {\n return `projects/${this.apiSettings.project}/locations/${this.apiSettings.backend.location}/${this.model}`;\n } else {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid backend: ${JSON.stringify(this.apiSettings.backend)}`\n );\n }\n }\n\n private get queryParams(): URLSearchParams {\n const params = new URLSearchParams();\n if (this.stream) {\n params.set('alt', 'sse');\n }\n\n return params;\n }\n}\n\nexport class WebSocketUrl {\n constructor(public apiSettings: ApiSettings) {}\n toString(): string {\n const url = new URL(`wss://${DEFAULT_DOMAIN}`);\n url.pathname = this.pathname;\n\n const queryParams = new URLSearchParams();\n queryParams.set('key', this.apiSettings.apiKey);\n url.search = queryParams.toString();\n\n return url.toString();\n }\n\n private get pathname(): string {\n if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n return 'ws/google.firebase.vertexai.v1beta.GenerativeService/BidiGenerateContent';\n } else {\n return `ws/google.firebase.vertexai.v1beta.LlmBidiService/BidiGenerateContent/locations/${this.apiSettings.location}`;\n }\n }\n}\n\n/**\n * Log language and \"fire/version\" to x-goog-api-client\n */\nfunction getClientHeaders(): string {\n const loggingTags = [];\n loggingTags.push(`${LANGUAGE_TAG}/${PACKAGE_VERSION}`);\n loggingTags.push(`fire/${PACKAGE_VERSION}`);\n return loggingTags.join(' ');\n}\n\nexport async function getHeaders(url: RequestUrl): Promise<Headers> {\n const headers = new Headers();\n headers.append('Content-Type', 'application/json');\n headers.append('x-goog-api-client', getClientHeaders());\n headers.append('x-goog-api-key', url.apiSettings.apiKey);\n if (url.apiSettings.automaticDataCollectionEnabled) {\n headers.append('X-Firebase-Appid', url.apiSettings.appId);\n }\n if (url.apiSettings.getAppCheckToken) {\n const appCheckToken = await url.apiSettings.getAppCheckToken();\n if (appCheckToken) {\n headers.append('X-Firebase-AppCheck', appCheckToken.token);\n if (appCheckToken.error) {\n logger.warn(\n `Unable to obtain a valid App Check token: ${appCheckToken.error.message}`\n );\n }\n }\n }\n\n if (url.apiSettings.getAuthToken) {\n const authToken = await url.apiSettings.getAuthToken();\n if (authToken) {\n headers.append('Authorization', `Firebase ${authToken.accessToken}`);\n }\n }\n\n return headers;\n}\n\nexport async function constructRequest(\n model: string,\n task: Task,\n apiSettings: ApiSettings,\n stream: boolean,\n body: string,\n requestOptions?: RequestOptions\n): Promise<{ url: string; fetchOptions: RequestInit }> {\n const url = new RequestUrl(model, task, apiSettings, stream, requestOptions);\n return {\n url: url.toString(),\n fetchOptions: {\n method: 'POST',\n headers: await getHeaders(url),\n body\n }\n };\n}\n\nexport async function makeRequest(\n model: string,\n task: Task,\n apiSettings: ApiSettings,\n stream: boolean,\n body: string,\n requestOptions?: RequestOptions\n): Promise<Response> {\n const url = new RequestUrl(model, task, apiSettings, stream, requestOptions);\n let response;\n let fetchTimeoutId: string | number | NodeJS.Timeout | undefined;\n try {\n const request = await constructRequest(\n model,\n task,\n apiSettings,\n stream,\n body,\n requestOptions\n );\n // Timeout is 180s by default\n const timeoutMillis =\n requestOptions?.timeout != null && requestOptions.timeout >= 0\n ? requestOptions.timeout\n : DEFAULT_FETCH_TIMEOUT_MS;\n const abortController = new AbortController();\n fetchTimeoutId = setTimeout(() => abortController.abort(), timeoutMillis);\n request.fetchOptions.signal = abortController.signal;\n\n response = await fetch(request.url, request.fetchOptions);\n if (!response.ok) {\n let message = '';\n let errorDetails;\n try {\n const json = await response.json();\n message = json.error.message;\n if (json.error.details) {\n message += ` ${JSON.stringify(json.error.details)}`;\n errorDetails = json.error.details;\n }\n } catch (e) {\n // ignored\n }\n if (\n response.status === 403 &&\n errorDetails &&\n errorDetails.some(\n (detail: ErrorDetails) => detail.reason === 'SERVICE_DISABLED'\n ) &&\n errorDetails.some((detail: ErrorDetails) =>\n (\n detail.links as Array<Record<string, string>>\n )?.[0]?.description.includes(\n 'Google developers console API activation'\n )\n )\n ) {\n throw new AIError(\n AIErrorCode.API_NOT_ENABLED,\n `The Firebase AI SDK requires the Firebase AI ` +\n `API ('firebasevertexai.googleapis.com') to be enabled in your ` +\n `Firebase project. Enable this API by visiting the Firebase Console ` +\n `at https://console.firebase.google.com/project/${url.apiSettings.project}/genai/ ` +\n `and clicking \"Get started\". If you enabled this API recently, ` +\n `wait a few minutes for the action to propagate to our systems and ` +\n `then retry.`,\n {\n status: response.status,\n statusText: response.statusText,\n errorDetails\n }\n );\n }\n throw new AIError(\n AIErrorCode.FETCH_ERROR,\n `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`,\n {\n status: response.status,\n statusText: response.statusText,\n errorDetails\n }\n );\n }\n } catch (e) {\n let err = e as Error;\n if (\n (e as AIError).code !== AIErrorCode.FETCH_ERROR &&\n (e as AIError).code !== AIErrorCode.API_NOT_ENABLED &&\n e instanceof Error\n ) {\n err = new AIError(\n AIErrorCode.ERROR,\n `Error fetching from ${url.toString()}: ${e.message}`\n );\n err.stack = e.stack;\n }\n\n throw err;\n } finally {\n if (fetchTimeoutId) {\n clearTimeout(fetchTimeoutId);\n }\n }\n return response;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n EnhancedGenerateContentResponse,\n FinishReason,\n FunctionCall,\n GenerateContentCandidate,\n GenerateContentResponse,\n ImagenGCSImage,\n ImagenInlineImage,\n AIErrorCode,\n InlineDataPart,\n Part,\n InferenceSource\n} from '../types';\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport { ImagenResponseInternal } from '../types/internal';\n\n/**\n * Check that at least one candidate exists and does not have a bad\n * finish reason. Warns if multiple candidates exist.\n */\nfunction hasValidCandidates(response: GenerateContentResponse): boolean {\n if (response.candidates && response.candidates.length > 0) {\n if (response.candidates.length > 1) {\n logger.warn(\n `This response had ${response.candidates.length} ` +\n `candidates. Returning text from the first candidate only. ` +\n `Access response.candidates directly to use the other candidates.`\n );\n }\n if (hadBadFinishReason(response.candidates[0])) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Response error: ${formatBlockErrorMessage(\n response\n )}. Response body stored in error.response`,\n {\n response\n }\n );\n }\n return true;\n } else {\n return false;\n }\n}\n\n/**\n * Creates an EnhancedGenerateContentResponse object that has helper functions and\n * other modifications that improve usability.\n */\nexport function createEnhancedContentResponse(\n response: GenerateContentResponse,\n inferenceSource: InferenceSource = InferenceSource.IN_CLOUD\n): EnhancedGenerateContentResponse {\n /**\n * The Vertex AI backend omits default values.\n * This causes the `index` property to be omitted from the first candidate in the\n * response, since it has index 0, and 0 is a default value.\n * See: https://github.com/firebase/firebase-js-sdk/issues/8566\n */\n if (response.candidates && !response.candidates[0].hasOwnProperty('index')) {\n response.candidates[0].index = 0;\n }\n\n const responseWithHelpers = addHelpers(response);\n responseWithHelpers.inferenceSource = inferenceSource;\n return responseWithHelpers;\n}\n\n/**\n * Adds convenience helper methods to a response object, including stream\n * chunks (as long as each chunk is a complete GenerateContentResponse JSON).\n */\nexport function addHelpers(\n response: GenerateContentResponse\n): EnhancedGenerateContentResponse {\n (response as EnhancedGenerateContentResponse).text = () => {\n if (hasValidCandidates(response)) {\n return getText(response, part => !part.thought);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Text not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return '';\n };\n (response as EnhancedGenerateContentResponse).thoughtSummary = () => {\n if (hasValidCandidates(response)) {\n const result = getText(response, part => !!part.thought);\n return result === '' ? undefined : result;\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Thought summary not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n (response as EnhancedGenerateContentResponse).inlineDataParts = ():\n | InlineDataPart[]\n | undefined => {\n if (hasValidCandidates(response)) {\n return getInlineDataParts(response);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Data not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n (response as EnhancedGenerateContentResponse).functionCalls = () => {\n if (hasValidCandidates(response)) {\n return getFunctionCalls(response);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Function call not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n return response as EnhancedGenerateContentResponse;\n}\n\n/**\n * Returns all text from the first candidate's parts, filtering by whether\n * `partFilter()` returns true.\n *\n * @param response - The `GenerateContentResponse` from which to extract text.\n * @param partFilter - Only return `Part`s for which this returns true\n */\nexport function getText(\n response: GenerateContentResponse,\n partFilter: (part: Part) => boolean\n): string {\n const textStrings = [];\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.text && partFilter(part)) {\n textStrings.push(part.text);\n }\n }\n }\n if (textStrings.length > 0) {\n return textStrings.join('');\n } else {\n return '';\n }\n}\n\n/**\n * Returns every {@link FunctionCall} associated with first candidate.\n */\nexport function getFunctionCalls(\n response: GenerateContentResponse\n): FunctionCall[] | undefined {\n const functionCalls: FunctionCall[] = [];\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.functionCall) {\n functionCalls.push(part.functionCall);\n }\n }\n }\n if (functionCalls.length > 0) {\n return functionCalls;\n } else {\n return undefined;\n }\n}\n\n/**\n * Returns every {@link InlineDataPart} in the first candidate if present.\n *\n * @internal\n */\nexport function getInlineDataParts(\n response: GenerateContentResponse\n): InlineDataPart[] | undefined {\n const data: InlineDataPart[] = [];\n\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.inlineData) {\n data.push(part);\n }\n }\n }\n\n if (data.length > 0) {\n return data;\n } else {\n return undefined;\n }\n}\n\nconst badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY];\n\nfunction hadBadFinishReason(candidate: GenerateContentCandidate): boolean {\n return (\n !!candidate.finishReason &&\n badFinishReasons.some(reason => reason === candidate.finishReason)\n );\n}\n\nexport function formatBlockErrorMessage(\n response: GenerateContentResponse\n): string {\n let message = '';\n if (\n (!response.candidates || response.candidates.length === 0) &&\n response.promptFeedback\n ) {\n message += 'Response was blocked';\n if (response.promptFeedback?.blockReason) {\n message += ` due to ${response.promptFeedback.blockReason}`;\n }\n if (response.promptFeedback?.blockReasonMessage) {\n message += `: ${response.promptFeedback.blockReasonMessage}`;\n }\n } else if (response.candidates?.[0]) {\n const firstCandidate = response.candidates[0];\n if (hadBadFinishReason(firstCandidate)) {\n message += `Candidate was blocked due to ${firstCandidate.finishReason}`;\n if (firstCandidate.finishMessage) {\n message += `: ${firstCandidate.finishMessage}`;\n }\n }\n }\n return message;\n}\n\n/**\n * Convert a generic successful fetch response body to an Imagen response object\n * that can be returned to the user. This converts the REST APIs response format to our\n * APIs representation of a response.\n *\n * @internal\n */\nexport async function handlePredictResponse<\n T extends ImagenInlineImage | ImagenGCSImage\n>(response: Response): Promise<{ images: T[]; filteredReason?: string }> {\n const responseJson: ImagenResponseInternal = await response.json();\n\n const images: T[] = [];\n let filteredReason: string | undefined = undefined;\n\n // The backend should always send a non-empty array of predictions if the response was successful.\n if (!responseJson.predictions || responseJson.predictions?.length === 0) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.'\n );\n }\n\n for (const prediction of responseJson.predictions) {\n if (prediction.raiFilteredReason) {\n filteredReason = prediction.raiFilteredReason;\n } else if (prediction.mimeType && prediction.bytesBase64Encoded) {\n images.push({\n mimeType: prediction.mimeType,\n bytesBase64Encoded: prediction.bytesBase64Encoded\n } as T);\n } else if (prediction.mimeType && prediction.gcsUri) {\n images.push({\n mimeType: prediction.mimeType,\n gcsURI: prediction.gcsUri\n } as T);\n } else if (prediction.safetyAttributes) {\n // Ignore safetyAttributes \"prediction\" to avoid throwing an error below.\n } else {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Unexpected element in 'predictions' array in response: '${JSON.stringify(\n prediction\n )}'`\n );\n }\n }\n\n return { images, filteredReason };\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from './errors';\nimport { logger } from './logger';\nimport {\n CitationMetadata,\n CountTokensRequest,\n GenerateContentCandidate,\n GenerateContentRequest,\n GenerateContentResponse,\n HarmSeverity,\n InlineDataPart,\n PromptFeedback,\n SafetyRating,\n AIErrorCode\n} from './types';\nimport {\n GoogleAIGenerateContentResponse,\n GoogleAIGenerateContentCandidate,\n GoogleAICountTokensRequest\n} from './types/googleai';\n\n/**\n * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI).\n * The public API prioritizes the format used by the Vertex AI Gemini API.\n * We avoid having two sets of types by translating requests and responses between the two API formats.\n * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API\n * with minimal code changes.\n *\n * In here are functions that map requests and responses between the two API formats.\n * Requests in the Vertex AI format are mapped to the Google AI format before being sent.\n * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user.\n */\n\n/**\n * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI.\n *\n * @param generateContentRequest The {@link GenerateContentRequest} to map.\n * @returns A {@link GenerateContentResponse} that conforms to the Google AI format.\n *\n * @throws If the request contains properties that are unsupported by Google AI.\n *\n * @internal\n */\nexport function mapGenerateContentRequest(\n generateContentRequest: GenerateContentRequest\n): GenerateContentRequest {\n generateContentRequest.safetySettings?.forEach(safetySetting => {\n if (safetySetting.method) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'SafetySetting.method is not supported in the the Gemini Developer API. Please remove this property.'\n );\n }\n });\n\n if (generateContentRequest.generationConfig?.topK) {\n const roundedTopK = Math.round(\n generateContentRequest.generationConfig.topK\n );\n\n if (roundedTopK !== generateContentRequest.generationConfig.topK) {\n logger.warn(\n 'topK in GenerationConfig has been rounded to the nearest integer to match the format for requests to the Gemini Developer API.'\n );\n generateContentRequest.generationConfig.topK = roundedTopK;\n }\n }\n\n return generateContentRequest;\n}\n\n/**\n * Maps a {@link GenerateContentResponse} from Google AI to the format of the\n * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API.\n *\n * @param googleAIResponse The {@link GenerateContentResponse} from Google AI.\n * @returns A {@link GenerateContentResponse} that conforms to the public API's format.\n *\n * @internal\n */\nexport function mapGenerateContentResponse(\n googleAIResponse: GoogleAIGenerateContentResponse\n): GenerateContentResponse {\n const generateContentResponse = {\n candidates: googleAIResponse.candidates\n ? mapGenerateContentCandidates(googleAIResponse.candidates)\n : undefined,\n prompt: googleAIResponse.promptFeedback\n ? mapPromptFeedback(googleAIResponse.promptFeedback)\n : undefined,\n usageMetadata: googleAIResponse.usageMetadata\n };\n\n return generateContentResponse;\n}\n\n/**\n * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI.\n *\n * @param countTokensRequest The {@link CountTokensRequest} to map.\n * @param model The model to count tokens with.\n * @returns A {@link CountTokensRequest} that conforms to the Google AI format.\n *\n * @internal\n */\nexport function mapCountTokensRequest(\n countTokensRequest: CountTokensRequest,\n model: string\n): GoogleAICountTokensRequest {\n const mappedCountTokensRequest: GoogleAICountTokensRequest = {\n generateContentRequest: {\n model,\n ...countTokensRequest\n }\n };\n\n return mappedCountTokensRequest;\n}\n\n/**\n * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms\n * to the Vertex AI API format.\n *\n * @param candidates The {@link GoogleAIGenerateContentCandidate} to map.\n * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format.\n *\n * @throws If any {@link Part} in the candidates has a `videoMetadata` property.\n *\n * @internal\n */\nexport function mapGenerateContentCandidates(\n candidates: GoogleAIGenerateContentCandidate[]\n): GenerateContentCandidate[] {\n const mappedCandidates: GenerateContentCandidate[] = [];\n let mappedSafetyRatings: SafetyRating[];\n if (mappedCandidates) {\n candidates.forEach(candidate => {\n // Map citationSources to citations.\n let citationMetadata: CitationMetadata | undefined;\n if (candidate.citationMetadata) {\n citationMetadata = {\n citations: candidate.citationMetadata.citationSources\n };\n }\n\n // Assign missing candidate SafetyRatings properties to their defaults if undefined.\n if (candidate.safetyRatings) {\n mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => {\n return {\n ...safetyRating,\n severity:\n safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,\n probabilityScore: safetyRating.probabilityScore ?? 0,\n severityScore: safetyRating.severityScore ?? 0\n };\n });\n }\n\n // videoMetadata is not supported.\n // Throw early since developers may send a long video as input and only expect to pay\n // for inference on a small portion of the video.\n if (\n candidate.content?.parts?.some(\n part => (part as InlineDataPart)?.videoMetadata\n )\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.'\n );\n }\n\n const mappedCandidate = {\n index: candidate.index,\n content: candidate.content,\n finishReason: candidate.finishReason,\n finishMessage: candidate.finishMessage,\n safetyRatings: mappedSafetyRatings,\n citationMetadata,\n groundingMetadata: candidate.groundingMetadata,\n urlContextMetadata: candidate.urlContextMetadata\n };\n mappedCandidates.push(mappedCandidate);\n });\n }\n\n return mappedCandidates;\n}\n\nexport function mapPromptFeedback(\n promptFeedback: PromptFeedback\n): PromptFeedback {\n // Assign missing SafetyRating properties to their defaults if undefined.\n const mappedSafetyRatings: SafetyRating[] = [];\n promptFeedback.safetyRatings.forEach(safetyRating => {\n mappedSafetyRatings.push({\n category: safetyRating.category,\n probability: safetyRating.probability,\n severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,\n probabilityScore: safetyRating.probabilityScore ?? 0,\n severityScore: safetyRating.severityScore ?? 0,\n blocked: safetyRating.blocked\n });\n });\n\n const mappedPromptFeedback: PromptFeedback = {\n blockReason: promptFeedback.blockReason,\n safetyRatings: mappedSafetyRatings,\n blockReasonMessage: promptFeedback.blockReasonMessage\n };\n return mappedPromptFeedback;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n EnhancedGenerateContentResponse,\n GenerateContentCandidate,\n GenerateContentResponse,\n GenerateContentStreamResult,\n Part,\n AIErrorCode\n} from '../types';\nimport { AIError } from '../errors';\nimport { createEnhancedContentResponse } from './response-helpers';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { GoogleAIGenerateContentResponse } from '../types/googleai';\nimport { ApiSettings } from '../types/internal';\nimport {\n BackendType,\n InferenceSource,\n URLContextMetadata\n} from '../public-types';\n\nconst responseLineRE = /^data\\: (.*)(?:\\n\\n|\\r\\r|\\r\\n\\r\\n)/;\n\n/**\n * Process a response.body stream from the backend and return an\n * iterator that provides one complete GenerateContentResponse at a time\n * and a promise that resolves with a single aggregated\n * GenerateContentResponse.\n *\n * @param response - Response from a fetch call\n */\nexport function processStream(\n response: Response,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): GenerateContentStreamResult {\n const inputStream = response.body!.pipeThrough(\n new TextDecoderStream('utf8', { fatal: true })\n );\n const responseStream =\n getResponseStream<GenerateContentResponse>(inputStream);\n const [stream1, stream2] = responseStream.tee();\n return {\n stream: generateResponseSequence(stream1, apiSettings, inferenceSource),\n response: getResponsePromise(stream2, apiSettings, inferenceSource)\n };\n}\n\nasync function getResponsePromise(\n stream: ReadableStream<GenerateContentResponse>,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): Promise<EnhancedGenerateContentResponse> {\n const allResponses: GenerateContentResponse[] = [];\n const reader = stream.getReader();\n while (true) {\n const { done, value } = await reader.read();\n if (done) {\n let generateContentResponse = aggregateResponses(allResponses);\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n generateContentResponse = GoogleAIMapper.mapGenerateContentResponse(\n generateContentResponse as GoogleAIGenerateContentResponse\n );\n }\n return createEnhancedContentResponse(\n generateContentResponse,\n inferenceSource\n );\n }\n\n allResponses.push(value);\n }\n}\n\nasync function* generateResponseSequence(\n stream: ReadableStream<GenerateContentResponse>,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): AsyncGenerator<EnhancedGenerateContentResponse> {\n const reader = stream.getReader();\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n break;\n }\n\n let enhancedResponse: EnhancedGenerateContentResponse;\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n enhancedResponse = createEnhancedContentResponse(\n GoogleAIMapper.mapGenerateContentResponse(\n value as GoogleAIGenerateContentResponse\n ),\n inferenceSource\n );\n } else {\n enhancedResponse = createEnhancedContentResponse(value, inferenceSource);\n }\n\n const firstCandidate = enhancedResponse.candidates?.[0];\n // Don't yield a response with no useful data for the developer.\n if (\n !firstCandidate?.content?.parts &&\n !firstCandidate?.finishReason &&\n !firstCandidate?.citationMetadata &&\n !firstCandidate?.urlContextMetadata\n ) {\n continue;\n }\n\n yield enhancedResponse;\n }\n}\n\n/**\n * Reads a raw stream from the fetch response and join incomplete\n * chunks, returning a new stream that provides a single complete\n * GenerateContentResponse in each iteration.\n */\nexport function getResponseStream<T>(\n inputStream: ReadableStream<string>\n): ReadableStream<T> {\n const reader = inputStream.getReader();\n const stream = new ReadableStream<T>({\n start(controller) {\n let currentText = '';\n return pump();\n function pump(): Promise<(() => Promise<void>) | undefined> {\n return reader.read().then(({ value, done }) => {\n if (done) {\n if (currentText.trim()) {\n controller.error(\n new AIError(AIErrorCode.PARSE_FAILED, 'Failed to parse stream')\n );\n return;\n }\n controller.close();\n return;\n }\n\n currentText += value;\n let match = currentText.match(responseLineRE);\n let parsedResponse: T;\n while (match) {\n try {\n parsedResponse = JSON.parse(match[1]);\n } catch (e) {\n controller.error(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Error parsing JSON response: \"${match[1]}`\n )\n );\n return;\n }\n controller.enqueue(parsedResponse);\n currentText = currentText.substring(match[0].length);\n match = currentText.match(responseLineRE);\n }\n return pump();\n });\n }\n }\n });\n return stream;\n}\n\n/**\n * Aggregates an array of `GenerateContentResponse`s into a single\n * GenerateContentResponse.\n */\nexport function aggregateResponses(\n responses: GenerateContentResponse[]\n): GenerateContentResponse {\n const lastResponse = responses[responses.length - 1];\n const aggregatedResponse: GenerateContentResponse = {\n promptFeedback: lastResponse?.promptFeedback\n };\n for (const response of responses) {\n if (response.candidates) {\n for (const candidate of response.candidates) {\n // Index will be undefined if it's the first index (0), so we should use 0 if it's undefined.\n // See: https://github.com/firebase/firebase-js-sdk/issues/8566\n const i = candidate.index || 0;\n if (!aggregatedResponse.candidates) {\n aggregatedResponse.candidates = [];\n }\n if (!aggregatedResponse.candidates[i]) {\n aggregatedResponse.candidates[i] = {\n index: candidate.index\n } as GenerateContentCandidate;\n }\n // Keep overwriting, the last one will be final\n aggregatedResponse.candidates[i].citationMetadata =\n candidate.citationMetadata;\n aggregatedResponse.candidates[i].finishReason = candidate.finishReason;\n aggregatedResponse.candidates[i].finishMessage =\n candidate.finishMessage;\n aggregatedResponse.candidates[i].safetyRatings =\n candidate.safetyRatings;\n aggregatedResponse.candidates[i].groundingMetadata =\n candidate.groundingMetadata;\n\n // The urlContextMetadata object is defined in the first chunk of the response stream.\n // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to\n // make sure that we don't overwrite the first value urlContextMetadata object with undefined.\n // FIXME: What happens if we receive a second, valid urlContextMetadata object?\n const urlContextMetadata = candidate.urlContextMetadata as unknown;\n if (\n typeof urlContextMetadata === 'object' &&\n urlContextMetadata !== null &&\n Object.keys(urlContextMetadata).length > 0\n ) {\n aggregatedResponse.candidates[i].urlContextMetadata =\n urlContextMetadata as URLContextMetadata;\n }\n\n /**\n * Candidates should always have content and parts, but this handles\n * possible malformed responses.\n */\n if (candidate.content) {\n // Skip a candidate without parts.\n if (!candidate.content.parts) {\n continue;\n }\n if (!aggregatedResponse.candidates[i].content) {\n aggregatedResponse.candidates[i].content = {\n role: candidate.content.role || 'user',\n parts: []\n };\n }\n for (const part of candidate.content.parts) {\n const newPart: Part = { ...part };\n // The backend can send empty text parts. If these are sent back\n // (e.g. in chat history), the backend will respond with an error.\n // To prevent this, ignore empty text parts.\n if (part.text === '') {\n continue;\n }\n if (Object.keys(newPart).length > 0) {\n aggregatedResponse.candidates[i].content.parts.push(\n newPart as Part\n );\n }\n }\n }\n }\n }\n }\n return aggregatedResponse;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport {\n GenerateContentRequest,\n InferenceMode,\n AIErrorCode,\n ChromeAdapter,\n InferenceSource\n} from '../types';\nimport { ChromeAdapterImpl } from '../methods/chrome-adapter';\n\nconst errorsCausingFallback: AIErrorCode[] = [\n // most network errors\n AIErrorCode.FETCH_ERROR,\n // fallback code for all other errors in makeRequest\n AIErrorCode.ERROR,\n // error due to API not being enabled in project\n AIErrorCode.API_NOT_ENABLED\n];\n\ninterface CallResult<Response> {\n response: Response;\n inferenceSource: InferenceSource;\n}\n\n/**\n * Dispatches a request to the appropriate backend (on-device or in-cloud)\n * based on the inference mode.\n *\n * @param request - The request to be sent.\n * @param chromeAdapter - The on-device model adapter.\n * @param onDeviceCall - The function to call for on-device inference.\n * @param inCloudCall - The function to call for in-cloud inference.\n * @returns The response from the backend.\n */\nexport async function callCloudOrDevice<Response>(\n request: GenerateContentRequest,\n chromeAdapter: ChromeAdapter | undefined,\n onDeviceCall: () => Promise<Response>,\n inCloudCall: () => Promise<Response>\n): Promise<CallResult<Response>> {\n if (!chromeAdapter) {\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n }\n switch ((chromeAdapter as ChromeAdapterImpl).mode) {\n case InferenceMode.ONLY_ON_DEVICE:\n if (await chromeAdapter.isAvailable(request)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.'\n );\n case InferenceMode.ONLY_IN_CLOUD:\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n case InferenceMode.PREFER_IN_CLOUD:\n try {\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n } catch (e) {\n if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n throw e;\n }\n case InferenceMode.PREFER_ON_DEVICE:\n if (await chromeAdapter.isAvailable(request)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n default:\n throw new AIError(\n AIErrorCode.ERROR,\n `Unexpected infererence mode: ${\n (chromeAdapter as ChromeAdapterImpl).mode\n }`\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n GenerateContentRequest,\n GenerateContentResponse,\n GenerateContentResult,\n GenerateContentStreamResult,\n RequestOptions\n} from '../types';\nimport { Task, makeRequest } from '../requests/request';\nimport { createEnhancedContentResponse } from '../requests/response-helpers';\nimport { processStream } from '../requests/stream-reader';\nimport { ApiSettings } from '../types/internal';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { BackendType } from '../public-types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport { callCloudOrDevice } from '../requests/hybrid-helpers';\n\nasync function generateContentStreamOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n requestOptions?: RequestOptions\n): Promise<Response> {\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n params = GoogleAIMapper.mapGenerateContentRequest(params);\n }\n return makeRequest(\n model,\n Task.STREAM_GENERATE_CONTENT,\n apiSettings,\n /* stream */ true,\n JSON.stringify(params),\n requestOptions\n );\n}\n\nexport async function generateContentStream(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<GenerateContentStreamResult> {\n const callResult = await callCloudOrDevice(\n params,\n chromeAdapter,\n () => chromeAdapter!.generateContentStream(params),\n () =>\n generateContentStreamOnCloud(apiSettings, model, params, requestOptions)\n );\n return processStream(callResult.response, apiSettings); // TODO: Map streaming responses\n}\n\nasync function generateContentOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n requestOptions?: RequestOptions\n): Promise<Response> {\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n params = GoogleAIMapper.mapGenerateContentRequest(params);\n }\n return makeRequest(\n model,\n Task.GENERATE_CONTENT,\n apiSettings,\n /* stream */ false,\n JSON.stringify(params),\n requestOptions\n );\n}\n\nexport async function generateContent(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<GenerateContentResult> {\n const callResult = await callCloudOrDevice(\n params,\n chromeAdapter,\n () => chromeAdapter!.generateContent(params),\n () => generateContentOnCloud(apiSettings, model, params, requestOptions)\n );\n const generateContentResponse = await processGenerateContentResponse(\n callResult.response,\n apiSettings\n );\n const enhancedResponse = createEnhancedContentResponse(\n generateContentResponse,\n callResult.inferenceSource\n );\n return {\n response: enhancedResponse\n };\n}\n\nasync function processGenerateContentResponse(\n response: Response,\n apiSettings: ApiSettings\n): Promise<GenerateContentResponse> {\n const responseJson = await response.json();\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n return GoogleAIMapper.mapGenerateContentResponse(responseJson);\n } else {\n return responseJson;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, GenerateContentRequest, Part, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\nimport { ImagenGenerationParams, PredictRequestBody } from '../types/internal';\n\nexport function formatSystemInstruction(\n input?: string | Part | Content\n): Content | undefined {\n // null or undefined\n if (input == null) {\n return undefined;\n } else if (typeof input === 'string') {\n return { role: 'system', parts: [{ text: input }] } as Content;\n } else if ((input as Part).text) {\n return { role: 'system', parts: [input as Part] };\n } else if ((input as Content).parts) {\n if (!(input as Content).role) {\n return { role: 'system', parts: (input as Content).parts };\n } else {\n return input as Content;\n }\n }\n}\n\nexport function formatNewContent(\n request: string | Array<string | Part>\n): Content {\n let newParts: Part[] = [];\n if (typeof request === 'string') {\n newParts = [{ text: request }];\n } else {\n for (const partOrString of request) {\n if (typeof partOrString === 'string') {\n newParts.push({ text: partOrString });\n } else {\n newParts.push(partOrString);\n }\n }\n }\n return assignRoleToPartsAndValidateSendMessageRequest(newParts);\n}\n\n/**\n * When multiple Part types (i.e. FunctionResponsePart and TextPart) are\n * passed in a single Part array, we may need to assign different roles to each\n * part. Currently only FunctionResponsePart requires a role other than 'user'.\n * @private\n * @param parts Array of parts to pass to the model\n * @returns Array of content items\n */\nfunction assignRoleToPartsAndValidateSendMessageRequest(\n parts: Part[]\n): Content {\n const userContent: Content = { role: 'user', parts: [] };\n const functionContent: Content = { role: 'function', parts: [] };\n let hasUserContent = false;\n let hasFunctionContent = false;\n for (const part of parts) {\n if ('functionResponse' in part) {\n functionContent.parts.push(part);\n hasFunctionContent = true;\n } else {\n userContent.parts.push(part);\n hasUserContent = true;\n }\n }\n\n if (hasUserContent && hasFunctionContent) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.'\n );\n }\n\n if (!hasUserContent && !hasFunctionContent) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n 'No Content is provided for sending chat message.'\n );\n }\n\n if (hasUserContent) {\n return userContent;\n }\n\n return functionContent;\n}\n\nexport function formatGenerateContentInput(\n params: GenerateContentRequest | string | Array<string | Part>\n): GenerateContentRequest {\n let formattedRequest: GenerateContentRequest;\n if ((params as GenerateContentRequest).contents) {\n formattedRequest = params as GenerateContentRequest;\n } else {\n // Array or string\n const content = formatNewContent(params as string | Array<string | Part>);\n formattedRequest = { contents: [content] };\n }\n if ((params as GenerateContentRequest).systemInstruction) {\n formattedRequest.systemInstruction = formatSystemInstruction(\n (params as GenerateContentRequest).systemInstruction\n );\n }\n return formattedRequest;\n}\n\n/**\n * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format\n * that is expected from the REST API.\n *\n * @internal\n */\nexport function createPredictRequestBody(\n prompt: string,\n {\n gcsURI,\n imageFormat,\n addWatermark,\n numberOfImages = 1,\n negativePrompt,\n aspectRatio,\n safetyFilterLevel,\n personFilterLevel\n }: ImagenGenerationParams\n): PredictRequestBody {\n // Properties that are undefined will be omitted from the JSON string that is sent in the request.\n const body: PredictRequestBody = {\n instances: [\n {\n prompt\n }\n ],\n parameters: {\n storageUri: gcsURI,\n negativePrompt,\n sampleCount: numberOfImages,\n aspectRatio,\n outputOptions: imageFormat,\n addWatermark,\n safetyFilterLevel,\n personGeneration: personFilterLevel,\n includeRaiReason: true,\n includeSafetyAttributes: true\n }\n };\n return body;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, POSSIBLE_ROLES, Part, Role, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\n\n// https://ai.google.dev/api/rest/v1beta/Content#part\n\nconst VALID_PART_FIELDS: Array<keyof Part> = [\n 'text',\n 'inlineData',\n 'functionCall',\n 'functionResponse',\n 'thought',\n 'thoughtSignature'\n];\n\nconst VALID_PARTS_PER_ROLE: { [key in Role]: Array<keyof Part> } = {\n user: ['text', 'inlineData'],\n function: ['functionResponse'],\n model: ['text', 'functionCall', 'thought', 'thoughtSignature'],\n // System instructions shouldn't be in history anyway.\n system: ['text']\n};\n\nconst VALID_PREVIOUS_CONTENT_ROLES: { [key in Role]: Role[] } = {\n user: ['model'],\n function: ['model'],\n model: ['user', 'function'],\n // System instructions shouldn't be in history.\n system: []\n};\n\nexport function validateChatHistory(history: Content[]): void {\n let prevContent: Content | null = null;\n for (const currContent of history) {\n const { role, parts } = currContent;\n if (!prevContent && role !== 'user') {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `First Content should be with role 'user', got ${role}`\n );\n }\n if (!POSSIBLE_ROLES.includes(role)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(\n POSSIBLE_ROLES\n )}`\n );\n }\n\n if (!Array.isArray(parts)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content should have 'parts' property with an array of Parts`\n );\n }\n\n if (parts.length === 0) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Each Content should have at least one part`\n );\n }\n\n const countFields: Record<keyof Part, number> = {\n text: 0,\n inlineData: 0,\n functionCall: 0,\n functionResponse: 0,\n thought: 0,\n thoughtSignature: 0,\n executableCode: 0,\n codeExecutionResult: 0\n };\n\n for (const part of parts) {\n for (const key of VALID_PART_FIELDS) {\n if (key in part) {\n countFields[key] += 1;\n }\n }\n }\n const validParts = VALID_PARTS_PER_ROLE[role];\n for (const key of VALID_PART_FIELDS) {\n if (!validParts.includes(key) && countFields[key] > 0) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content with role '${role}' can't contain '${key}' part`\n );\n }\n }\n\n if (prevContent) {\n const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role];\n if (!validPreviousContentRoles.includes(prevContent.role)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content with role '${role}' can't follow '${\n prevContent.role\n }'. Valid previous roles: ${JSON.stringify(\n VALID_PREVIOUS_CONTENT_ROLES\n )}`\n );\n }\n }\n prevContent = currContent;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n Content,\n GenerateContentRequest,\n GenerateContentResult,\n GenerateContentStreamResult,\n Part,\n RequestOptions,\n StartChatParams\n} from '../types';\nimport { formatNewContent } from '../requests/request-helpers';\nimport { formatBlockErrorMessage } from '../requests/response-helpers';\nimport { validateChatHistory } from './chat-session-helpers';\nimport { generateContent, generateContentStream } from './generate-content';\nimport { ApiSettings } from '../types/internal';\nimport { logger } from '../logger';\nimport { ChromeAdapter } from '../types/chrome-adapter';\n\n/**\n * Do not log a message for this error.\n */\nconst SILENT_ERROR = 'SILENT_ERROR';\n\n/**\n * ChatSession class that enables sending chat messages and stores\n * history of sent and received messages so far.\n *\n * @public\n */\nexport class ChatSession {\n private _apiSettings: ApiSettings;\n private _history: Content[] = [];\n private _sendPromise: Promise<void> = Promise.resolve();\n\n constructor(\n apiSettings: ApiSettings,\n public model: string,\n private chromeAdapter?: ChromeAdapter,\n public params?: StartChatParams,\n public requestOptions?: RequestOptions\n ) {\n this._apiSettings = apiSettings;\n if (params?.history) {\n validateChatHistory(params.history);\n this._history = params.history;\n }\n }\n\n /**\n * Gets the chat history so far. Blocked prompts are not added to history.\n * Neither blocked candidates nor the prompts that generated them are added\n * to history.\n */\n async getHistory(): Promise<Content[]> {\n await this._sendPromise;\n return this._history;\n }\n\n /**\n * Sends a chat message and receives a non-streaming\n * {@link GenerateContentResult}\n */\n async sendMessage(\n request: string | Array<string | Part>\n ): Promise<GenerateContentResult> {\n await this._sendPromise;\n const newContent = formatNewContent(request);\n const generateContentRequest: GenerateContentRequest = {\n safetySettings: this.params?.safetySettings,\n generationConfig: this.params?.generationConfig,\n tools: this.params?.tools,\n toolConfig: this.params?.toolConfig,\n systemInstruction: this.params?.systemInstruction,\n contents: [...this._history, newContent]\n };\n let finalResult = {} as GenerateContentResult;\n // Add onto the chain.\n this._sendPromise = this._sendPromise\n .then(() =>\n generateContent(\n this._apiSettings,\n this.model,\n generateContentRequest,\n this.chromeAdapter,\n this.requestOptions\n )\n )\n .then(result => {\n if (\n result.response.candidates &&\n result.response.candidates.length > 0\n ) {\n this._history.push(newContent);\n const responseContent: Content = {\n parts: result.response.candidates?.[0].content.parts || [],\n // Response seems to come back without a role set.\n role: result.response.candidates?.[0].content.role || 'model'\n };\n this._history.push(responseContent);\n } else {\n const blockErrorMessage = formatBlockErrorMessage(result.response);\n if (blockErrorMessage) {\n logger.warn(\n `sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`\n );\n }\n }\n finalResult = result;\n });\n await this._sendPromise;\n return finalResult;\n }\n\n /**\n * Sends a chat message and receives the response as a\n * {@link GenerateContentStreamResult} containing an iterable stream\n * and a response promise.\n */\n async sendMessageStream(\n request: string | Array<string | Part>\n ): Promise<GenerateContentStreamResult> {\n await this._sendPromise;\n const newContent = formatNewContent(request);\n const generateContentRequest: GenerateContentRequest = {\n safetySettings: this.params?.safetySettings,\n generationConfig: this.params?.generationConfig,\n tools: this.params?.tools,\n toolConfig: this.params?.toolConfig,\n systemInstruction: this.params?.systemInstruction,\n contents: [...this._history, newContent]\n };\n const streamPromise = generateContentStream(\n this._apiSettings,\n this.model,\n generateContentRequest,\n this.chromeAdapter,\n this.requestOptions\n );\n\n // Add onto the chain.\n this._sendPromise = this._sendPromise\n .then(() => streamPromise)\n // This must be handled to avoid unhandled rejection, but jump\n // to the final catch block with a label to not log this error.\n .catch(_ignored => {\n throw new Error(SILENT_ERROR);\n })\n .then(streamResult => streamResult.response)\n .then(response => {\n if (response.candidates && response.candidates.length > 0) {\n this._history.push(newContent);\n const responseContent = { ...response.candidates[0].content };\n // Response seems to come back without a role set.\n if (!responseContent.role) {\n responseContent.role = 'model';\n }\n this._history.push(responseContent);\n } else {\n const blockErrorMessage = formatBlockErrorMessage(response);\n if (blockErrorMessage) {\n logger.warn(\n `sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`\n );\n }\n }\n })\n .catch(e => {\n // Errors in streamPromise are already catchable by the user as\n // streamPromise is returned.\n // Avoid duplicating the error message in logs.\n if (e.message !== SILENT_ERROR) {\n // Users do not have access to _sendPromise to catch errors\n // downstream from streamPromise, so they should not throw.\n logger.error(e);\n }\n });\n return streamPromise;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport {\n CountTokensRequest,\n CountTokensResponse,\n InferenceMode,\n RequestOptions,\n AIErrorCode\n} from '../types';\nimport { Task, makeRequest } from '../requests/request';\nimport { ApiSettings } from '../types/internal';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { BackendType } from '../public-types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport { ChromeAdapterImpl } from './chrome-adapter';\n\nexport async function countTokensOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: CountTokensRequest,\n requestOptions?: RequestOptions\n): Promise<CountTokensResponse> {\n let body: string = '';\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n const mappedParams = GoogleAIMapper.mapCountTokensRequest(params, model);\n body = JSON.stringify(mappedParams);\n } else {\n body = JSON.stringify(params);\n }\n const response = await makeRequest(\n model,\n Task.COUNT_TOKENS,\n apiSettings,\n false,\n body,\n requestOptions\n );\n return response.json();\n}\n\nexport async function countTokens(\n apiSettings: ApiSettings,\n model: string,\n params: CountTokensRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<CountTokensResponse> {\n if (\n (chromeAdapter as ChromeAdapterImpl)?.mode === InferenceMode.ONLY_ON_DEVICE\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'countTokens() is not supported for on-device models.'\n );\n }\n return countTokensOnCloud(apiSettings, model, params, requestOptions);\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n generateContent,\n generateContentStream\n} from '../methods/generate-content';\nimport {\n Content,\n CountTokensRequest,\n CountTokensResponse,\n GenerateContentRequest,\n GenerateContentResult,\n GenerateContentStreamResult,\n GenerationConfig,\n ModelParams,\n Part,\n RequestOptions,\n SafetySetting,\n StartChatParams,\n Tool,\n ToolConfig\n} from '../types';\nimport { ChatSession } from '../methods/chat-session';\nimport { countTokens } from '../methods/count-tokens';\nimport {\n formatGenerateContentInput,\n formatSystemInstruction\n} from '../requests/request-helpers';\nimport { AI } from '../public-types';\nimport { AIModel } from './ai-model';\nimport { ChromeAdapter } from '../types/chrome-adapter';\n\n/**\n * Class for generative model APIs.\n * @public\n */\nexport class GenerativeModel extends AIModel {\n generationConfig: GenerationConfig;\n safetySettings: SafetySetting[];\n requestOptions?: RequestOptions;\n tools?: Tool[];\n toolConfig?: ToolConfig;\n systemInstruction?: Content;\n\n constructor(\n ai: AI,\n modelParams: ModelParams,\n requestOptions?: RequestOptions,\n private chromeAdapter?: ChromeAdapter\n ) {\n super(ai, modelParams.model);\n this.generationConfig = modelParams.generationConfig || {};\n this.safetySettings = modelParams.safetySettings || [];\n this.tools = modelParams.tools;\n this.toolConfig = modelParams.toolConfig;\n this.systemInstruction = formatSystemInstruction(\n modelParams.systemInstruction\n );\n this.requestOptions = requestOptions || {};\n }\n\n /**\n * Makes a single non-streaming call to the model\n * and returns an object containing a single {@link GenerateContentResponse}.\n */\n async generateContent(\n request: GenerateContentRequest | string | Array<string | Part>\n ): Promise<GenerateContentResult> {\n const formattedParams = formatGenerateContentInput(request);\n return generateContent(\n this._apiSettings,\n this.model,\n {\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n ...formattedParams\n },\n this.chromeAdapter,\n this.requestOptions\n );\n }\n\n /**\n * Makes a single streaming call to the model\n * and returns an object containing an iterable stream that iterates\n * over all chunks in the streaming response as well as\n * a promise that returns the final aggregated response.\n */\n async generateContentStream(\n request: GenerateContentRequest | string | Array<string | Part>\n ): Promise<GenerateContentStreamResult> {\n const formattedParams = formatGenerateContentInput(request);\n return generateContentStream(\n this._apiSettings,\n this.model,\n {\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n ...formattedParams\n },\n this.chromeAdapter,\n this.requestOptions\n );\n }\n\n /**\n * Gets a new {@link ChatSession} instance which can be used for\n * multi-turn chats.\n */\n startChat(startChatParams?: StartChatParams): ChatSession {\n return new ChatSession(\n this._apiSettings,\n this.model,\n this.chromeAdapter,\n {\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n /**\n * Overrides params inherited from GenerativeModel with those explicitly set in the\n * StartChatParams. For example, if startChatParams.generationConfig is set, it'll override\n * this.generationConfig.\n */\n ...startChatParams\n },\n this.requestOptions\n );\n }\n\n /**\n * Counts the tokens in the provided request.\n */\n async countTokens(\n request: CountTokensRequest | string | Array<string | Part>\n ): Promise<CountTokensResponse> {\n const formattedParams = formatGenerateContentInput(request);\n return countTokens(\n this._apiSettings,\n this.model,\n formattedParams,\n this.chromeAdapter\n );\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n AIErrorCode,\n FunctionResponse,\n GenerativeContentBlob,\n LiveResponseType,\n LiveServerContent,\n LiveServerToolCall,\n LiveServerToolCallCancellation,\n Part\n} from '../public-types';\nimport { formatNewContent } from '../requests/request-helpers';\nimport { AIError } from '../errors';\nimport { WebSocketHandler } from '../websocket';\nimport { logger } from '../logger';\nimport {\n _LiveClientContent,\n _LiveClientRealtimeInput,\n _LiveClientToolResponse\n} from '../types/live-responses';\n\n/**\n * Represents an active, real-time, bidirectional conversation with the model.\n *\n * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}.\n *\n * @beta\n */\nexport class LiveSession {\n /**\n * Indicates whether this Live session is closed.\n *\n * @beta\n */\n isClosed = false;\n /**\n * Indicates whether this Live session is being controlled by an `AudioConversationController`.\n *\n * @beta\n */\n inConversation = false;\n\n /**\n * @internal\n */\n constructor(\n private webSocketHandler: WebSocketHandler,\n private serverMessages: AsyncGenerator<unknown>\n ) {}\n\n /**\n * Sends content to the server.\n *\n * @param request - The message to send to the model.\n * @param turnComplete - Indicates if the turn is complete. Defaults to false.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async send(\n request: string | Array<string | Part>,\n turnComplete = true\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const newContent = formatNewContent(request);\n\n const message: _LiveClientContent = {\n clientContent: {\n turns: [newContent],\n turnComplete\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends text to the server in realtime.\n *\n * @example\n * ```javascript\n * liveSession.sendTextRealtime(\"Hello, how are you?\");\n * ```\n *\n * @param text - The text data to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendTextRealtime(text: string): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n text\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends audio data to the server in realtime.\n *\n * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz\n * little-endian.\n *\n * @example\n * ```javascript\n * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian.\n * const blob = { mimeType: \"audio/pcm\", data: pcmData };\n * liveSession.sendAudioRealtime(blob);\n * ```\n *\n * @param blob - The base64-encoded PCM data to send to the server in realtime.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendAudioRealtime(blob: GenerativeContentBlob): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n audio: blob\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends video data to the server in realtime.\n *\n * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It\n * is recommended to set `mimeType` to `image/jpeg`.\n *\n * @example\n * ```javascript\n * // const videoFrame = ... base64-encoded JPEG data\n * const blob = { mimeType: \"image/jpeg\", data: videoFrame };\n * liveSession.sendVideoRealtime(blob);\n * ```\n * @param blob - The base64-encoded video data to send to the server in realtime.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendVideoRealtime(blob: GenerativeContentBlob): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n video: blob\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends function responses to the server.\n *\n * @param functionResponses - The function responses to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendFunctionResponses(\n functionResponses: FunctionResponse[]\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientToolResponse = {\n toolResponse: {\n functionResponses\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Yields messages received from the server.\n * This can only be used by one consumer at a time.\n *\n * @returns An `AsyncGenerator` that yields server messages as they arrive.\n * @throws If the session is already closed, or if we receive a response that we don't support.\n *\n * @beta\n */\n async *receive(): AsyncGenerator<\n LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation\n > {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.SESSION_CLOSED,\n 'Cannot read from a Live session that is closed. Try starting a new Live session.'\n );\n }\n for await (const message of this.serverMessages) {\n if (message && typeof message === 'object') {\n if (LiveResponseType.SERVER_CONTENT in message) {\n yield {\n type: 'serverContent',\n ...(message as { serverContent: Omit<LiveServerContent, 'type'> })\n .serverContent\n } as LiveServerContent;\n } else if (LiveResponseType.TOOL_CALL in message) {\n yield {\n type: 'toolCall',\n ...(message as { toolCall: Omit<LiveServerToolCall, 'type'> })\n .toolCall\n } as LiveServerToolCall;\n } else if (LiveResponseType.TOOL_CALL_CANCELLATION in message) {\n yield {\n type: 'toolCallCancellation',\n ...(\n message as {\n toolCallCancellation: Omit<\n LiveServerToolCallCancellation,\n 'type'\n >;\n }\n ).toolCallCancellation\n } as LiveServerToolCallCancellation;\n } else {\n logger.warn(\n `Received an unknown message type from the server: ${JSON.stringify(\n message\n )}`\n );\n }\n } else {\n logger.warn(\n `Received an invalid message from the server: ${JSON.stringify(\n message\n )}`\n );\n }\n }\n }\n\n /**\n * Closes this session.\n * All methods on this session will throw an error once this resolves.\n *\n * @beta\n */\n async close(): Promise<void> {\n if (!this.isClosed) {\n this.isClosed = true;\n await this.webSocketHandler.close(1000, 'Client closed session.');\n }\n }\n\n /**\n * Sends realtime input to the server.\n *\n * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.\n *\n * @param mediaChunks - The media chunks to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n // The backend does not support sending more than one mediaChunk in one message.\n // Work around this limitation by sending mediaChunks in separate messages.\n mediaChunks.forEach(mediaChunk => {\n const message: _LiveClientRealtimeInput = {\n realtimeInput: { mediaChunks: [mediaChunk] }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n });\n }\n\n /**\n * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.\n *\n * Sends a stream of {@link GenerativeContentBlob}.\n *\n * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendMediaStream(\n mediaChunkStream: ReadableStream<GenerativeContentBlob>\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const reader = mediaChunkStream.getReader();\n while (true) {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n break;\n } else if (!value) {\n throw new Error('Missing chunk in reader, but reader is not done.');\n }\n\n await this.sendMediaChunks([value]);\n } catch (e) {\n // Re-throw any errors that occur during stream consumption or sending.\n const message =\n e instanceof Error ? e.message : 'Error processing media stream.';\n throw new AIError(AIErrorCode.REQUEST_ERROR, message);\n }\n }\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIModel } from './ai-model';\nimport { LiveSession } from '../methods/live-session';\nimport { AIError } from '../errors';\nimport {\n AI,\n AIErrorCode,\n BackendType,\n Content,\n LiveGenerationConfig,\n LiveModelParams,\n Tool,\n ToolConfig\n} from '../public-types';\nimport { WebSocketHandler } from '../websocket';\nimport { WebSocketUrl } from '../requests/request';\nimport { formatSystemInstruction } from '../requests/request-helpers';\nimport { _LiveClientSetup } from '../types/live-responses';\n\n/**\n * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal\n * interactions with Gemini.\n *\n * This class should only be instantiated with {@link getLiveGenerativeModel}.\n *\n * @beta\n */\nexport class LiveGenerativeModel extends AIModel {\n generationConfig: LiveGenerationConfig;\n tools?: Tool[];\n toolConfig?: ToolConfig;\n systemInstruction?: Content;\n\n /**\n * @internal\n */\n constructor(\n ai: AI,\n modelParams: LiveModelParams,\n /**\n * @internal\n */\n private _webSocketHandler: WebSocketHandler\n ) {\n super(ai, modelParams.model);\n this.generationConfig = modelParams.generationConfig || {};\n this.tools = modelParams.tools;\n this.toolConfig = modelParams.toolConfig;\n this.systemInstruction = formatSystemInstruction(\n modelParams.systemInstruction\n );\n }\n\n /**\n * Starts a {@link LiveSession}.\n *\n * @returns A {@link LiveSession}.\n * @throws If the connection failed to be established with the server.\n *\n * @beta\n */\n async connect(): Promise<LiveSession> {\n const url = new WebSocketUrl(this._apiSettings);\n await this._webSocketHandler.connect(url.toString());\n\n let fullModelPath: string;\n if (this._apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n fullModelPath = `projects/${this._apiSettings.project}/${this.model}`;\n } else {\n fullModelPath = `projects/${this._apiSettings.project}/locations/${this._apiSettings.location}/${this.model}`;\n }\n\n // inputAudioTranscription and outputAudioTranscription are on the generation config in the public API,\n // but the backend expects them to be in the `setup` message.\n const {\n inputAudioTranscription,\n outputAudioTranscription,\n ...generationConfig\n } = this.generationConfig;\n\n const setupMessage: _LiveClientSetup = {\n setup: {\n model: fullModelPath,\n generationConfig,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n inputAudioTranscription,\n outputAudioTranscription\n }\n };\n\n try {\n // Begin listening for server messages, and begin the handshake by sending the 'setupMessage'\n const serverMessages = this._webSocketHandler.listen();\n this._webSocketHandler.send(JSON.stringify(setupMessage));\n\n // Verify we received the handshake response 'setupComplete'\n const firstMessage = (await serverMessages.next()).value;\n if (\n !firstMessage ||\n !(typeof firstMessage === 'object') ||\n !('setupComplete' in firstMessage)\n ) {\n await this._webSocketHandler.close(1011, 'Handshake failure');\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n 'Server connection handshake failed. The server did not respond with a setupComplete message.'\n );\n }\n\n return new LiveSession(this._webSocketHandler, serverMessages);\n } catch (e) {\n // Ensure connection is closed on any setup error\n await this._webSocketHandler.close();\n throw e;\n }\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AI } from '../public-types';\nimport { Task, makeRequest } from '../requests/request';\nimport { createPredictRequestBody } from '../requests/request-helpers';\nimport { handlePredictResponse } from '../requests/response-helpers';\nimport {\n ImagenGCSImage,\n ImagenGenerationConfig,\n ImagenInlineImage,\n RequestOptions,\n ImagenModelParams,\n ImagenGenerationResponse,\n ImagenSafetySettings\n} from '../types';\nimport { AIModel } from './ai-model';\n\n/**\n * Class for Imagen model APIs.\n *\n * This class provides methods for generating images using the Imagen model.\n *\n * @example\n * ```javascript\n * const imagen = new ImagenModel(\n * ai,\n * {\n * model: 'imagen-3.0-generate-002'\n * }\n * );\n *\n * const response = await imagen.generateImages('A photo of a cat');\n * if (response.images.length > 0) {\n * console.log(response.images[0].bytesBase64Encoded);\n * }\n * ```\n *\n * @public\n */\nexport class ImagenModel extends AIModel {\n /**\n * The Imagen generation configuration.\n */\n generationConfig?: ImagenGenerationConfig;\n /**\n * Safety settings for filtering inappropriate content.\n */\n safetySettings?: ImagenSafetySettings;\n\n /**\n * Constructs a new instance of the {@link ImagenModel} class.\n *\n * @param ai - an {@link AI} instance.\n * @param modelParams - Parameters to use when making requests to Imagen.\n * @param requestOptions - Additional options to use when making requests.\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n */\n constructor(\n ai: AI,\n modelParams: ImagenModelParams,\n public requestOptions?: RequestOptions\n ) {\n const { model, generationConfig, safetySettings } = modelParams;\n super(ai, model);\n this.generationConfig = generationConfig;\n this.safetySettings = safetySettings;\n }\n\n /**\n * Generates images using the Imagen model and returns them as\n * base64-encoded strings.\n *\n * @param prompt - A text prompt describing the image(s) to generate.\n * @returns A promise that resolves to an {@link ImagenGenerationResponse}\n * object containing the generated images.\n *\n * @throws If the request to generate images fails. This happens if the\n * prompt is blocked.\n *\n * @remarks\n * If the prompt was not blocked, but one or more of the generated images were filtered, the\n * returned object will have a `filteredReason` property.\n * If all images are filtered, the `images` array will be empty.\n *\n * @public\n */\n async generateImages(\n prompt: string\n ): Promise<ImagenGenerationResponse<ImagenInlineImage>> {\n const body = createPredictRequestBody(prompt, {\n ...this.generationConfig,\n ...this.safetySettings\n });\n const response = await makeRequest(\n this.model,\n Task.PREDICT,\n this._apiSettings,\n /* stream */ false,\n JSON.stringify(body),\n this.requestOptions\n );\n return handlePredictResponse<ImagenInlineImage>(response);\n }\n\n /**\n * Generates images to Cloud Storage for Firebase using the Imagen model.\n *\n * @internal This method is temporarily internal.\n *\n * @param prompt - A text prompt describing the image(s) to generate.\n * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket.\n * This should be a directory. For example, `gs://my-bucket/my-directory/`.\n * @returns A promise that resolves to an {@link ImagenGenerationResponse}\n * object containing the URLs of the generated images.\n *\n * @throws If the request fails to generate images fails. This happens if\n * the prompt is blocked.\n *\n * @remarks\n * If the prompt was not blocked, but one or more of the generated images were filtered, the\n * returned object will have a `filteredReason` property.\n * If all images are filtered, the `images` array will be empty.\n */\n async generateImagesGCS(\n prompt: string,\n gcsURI: string\n ): Promise<ImagenGenerationResponse<ImagenGCSImage>> {\n const body = createPredictRequestBody(prompt, {\n gcsURI,\n ...this.generationConfig,\n ...this.safetySettings\n });\n const response = await makeRequest(\n this.model,\n Task.PREDICT,\n this._apiSettings,\n /* stream */ false,\n JSON.stringify(body),\n this.requestOptions\n );\n return handlePredictResponse<ImagenGCSImage>(response);\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from './errors';\nimport { logger } from './logger';\nimport { AIErrorCode } from './types';\n\n/**\n * A standardized interface for interacting with a WebSocket connection.\n * This abstraction allows the SDK to use the appropriate WebSocket implementation\n * for the current JS environment (Browser vs. Node) without\n * changing the core logic of the `LiveSession`.\n * @internal\n */\n\nexport interface WebSocketHandler {\n /**\n * Establishes a connection to the given URL.\n *\n * @param url The WebSocket URL (e.g., wss://...).\n * @returns A promise that resolves on successful connection or rejects on failure.\n */\n connect(url: string): Promise<void>;\n\n /**\n * Sends data over the WebSocket.\n *\n * @param data The string or binary data to send.\n */\n send(data: string | ArrayBuffer): void;\n\n /**\n * Returns an async generator that yields parsed JSON objects from the server.\n * The yielded type is `unknown` because the handler cannot guarantee the shape of the data.\n * The consumer is responsible for type validation.\n * The generator terminates when the connection is closed.\n *\n * @returns A generator that allows consumers to pull messages using a `for await...of` loop.\n */\n listen(): AsyncGenerator<unknown>;\n\n /**\n * Closes the WebSocket connection.\n *\n * @param code - A numeric status code explaining why the connection is closing.\n * @param reason - A human-readable string explaining why the connection is closing.\n */\n close(code?: number, reason?: string): Promise<void>;\n}\n\n/**\n * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22.\n *\n * @internal\n */\nexport class WebSocketHandlerImpl implements WebSocketHandler {\n private ws?: WebSocket;\n\n constructor() {\n if (typeof WebSocket === 'undefined') {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'The WebSocket API is not available in this environment. ' +\n 'The \"Live\" feature is not supported here. It is supported in ' +\n 'modern browser windows, Web Workers with WebSocket support, and Node >= 22.'\n );\n }\n }\n\n connect(url: string): Promise<void> {\n return new Promise((resolve, reject) => {\n this.ws = new WebSocket(url);\n this.ws.binaryType = 'blob'; // Only important to set in Node\n this.ws.addEventListener('open', () => resolve(), { once: true });\n this.ws.addEventListener(\n 'error',\n () =>\n reject(\n new AIError(\n AIErrorCode.FETCH_ERROR,\n `Error event raised on WebSocket`\n )\n ),\n { once: true }\n );\n this.ws!.addEventListener('close', (closeEvent: CloseEvent) => {\n if (closeEvent.reason) {\n logger.warn(\n `WebSocket connection closed by server. Reason: '${closeEvent.reason}'`\n );\n }\n });\n });\n }\n\n send(data: string | ArrayBuffer): void {\n if (!this.ws || this.ws.readyState !== WebSocket.OPEN) {\n throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not open.');\n }\n this.ws.send(data);\n }\n\n async *listen(): AsyncGenerator<unknown> {\n if (!this.ws) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'WebSocket is not connected.'\n );\n }\n\n const messageQueue: unknown[] = [];\n const errorQueue: Error[] = [];\n let resolvePromise: (() => void) | null = null;\n let isClosed = false;\n\n const messageListener = async (event: MessageEvent): Promise<void> => {\n let data: string;\n if (event.data instanceof Blob) {\n data = await event.data.text();\n } else if (typeof event.data === 'string') {\n data = event.data;\n } else {\n errorQueue.push(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Failed to parse WebSocket response. Expected data to be a Blob or string, but was ${typeof event.data}.`\n )\n );\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n return;\n }\n\n try {\n const obj = JSON.parse(data) as unknown;\n messageQueue.push(obj);\n } catch (e) {\n const err = e as Error;\n errorQueue.push(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Error parsing WebSocket message to JSON: ${err.message}`\n )\n );\n }\n\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n };\n\n const errorListener = (): void => {\n errorQueue.push(\n new AIError(AIErrorCode.FETCH_ERROR, 'WebSocket connection error.')\n );\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n };\n\n const closeListener = (event: CloseEvent): void => {\n if (event.reason) {\n logger.warn(\n `WebSocket connection closed by the server with reason: ${event.reason}`\n );\n }\n isClosed = true;\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n // Clean up listeners to prevent memory leaks\n this.ws?.removeEventListener('message', messageListener);\n this.ws?.removeEventListener('close', closeListener);\n this.ws?.removeEventListener('error', errorListener);\n };\n\n this.ws.addEventListener('message', messageListener);\n this.ws.addEventListener('close', closeListener);\n this.ws.addEventListener('error', errorListener);\n\n while (!isClosed) {\n if (errorQueue.length > 0) {\n const error = errorQueue.shift()!;\n throw error;\n }\n if (messageQueue.length > 0) {\n yield messageQueue.shift()!;\n } else {\n await new Promise<void>(resolve => {\n resolvePromise = resolve;\n });\n }\n }\n\n // If the loop terminated because isClosed is true, check for any final errors\n if (errorQueue.length > 0) {\n const error = errorQueue.shift()!;\n throw error;\n }\n }\n\n close(code?: number, reason?: string): Promise<void> {\n return new Promise(resolve => {\n if (!this.ws) {\n return resolve();\n }\n\n this.ws.addEventListener('close', () => resolve(), { once: true });\n // Calling 'close' during these states results in an error.\n if (\n this.ws.readyState === WebSocket.CLOSED ||\n this.ws.readyState === WebSocket.CONNECTING\n ) {\n return resolve();\n }\n\n if (this.ws.readyState !== WebSocket.CLOSING) {\n this.ws.close(code, reason);\n }\n });\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { AIErrorCode } from '../types';\nimport {\n SchemaInterface,\n SchemaType,\n SchemaParams,\n SchemaRequest\n} from '../types/schema';\n\n/**\n * Parent class encompassing all Schema types, with static methods that\n * allow building specific Schema types. This class can be converted with\n * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints.\n * (This string conversion is automatically done when calling SDK methods.)\n * @public\n */\nexport abstract class Schema implements SchemaInterface {\n /**\n * Optional. The type of the property.\n * This can only be undefined when using `anyOf` schemas, which do not have an\n * explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification}.\n */\n type?: SchemaType;\n /** Optional. The format of the property.\n * Supported formats:<br/>\n * <ul>\n * <li>for NUMBER type: \"float\", \"double\"</li>\n * <li>for INTEGER type: \"int32\", \"int64\"</li>\n * <li>for STRING type: \"email\", \"byte\", etc</li>\n * </ul>\n */\n format?: string;\n /** Optional. The description of the property. */\n description?: string;\n /** Optional. The items of the property. */\n items?: SchemaInterface;\n /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n minItems?: number;\n /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n maxItems?: number;\n /** Optional. Whether the property is nullable. Defaults to false. */\n nullable: boolean;\n /** Optional. The example of the property. */\n example?: unknown;\n /**\n * Allows user to add other schema properties that have not yet\n * been officially added to the SDK.\n */\n [key: string]: unknown;\n\n constructor(schemaParams: SchemaInterface) {\n // TODO(dlarocque): Enforce this with union types\n if (!schemaParams.type && !schemaParams.anyOf) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n \"A schema must have either a 'type' or an 'anyOf' array of sub-schemas.\"\n );\n }\n // eslint-disable-next-line guard-for-in\n for (const paramKey in schemaParams) {\n this[paramKey] = schemaParams[paramKey];\n }\n // Ensure these are explicitly set to avoid TS errors.\n this.type = schemaParams.type;\n this.format = schemaParams.hasOwnProperty('format')\n ? schemaParams.format\n : undefined;\n this.nullable = schemaParams.hasOwnProperty('nullable')\n ? !!schemaParams.nullable\n : false;\n }\n\n /**\n * Defines how this Schema should be serialized as JSON.\n * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj: { type?: SchemaType; [key: string]: unknown } = {\n type: this.type\n };\n for (const prop in this) {\n if (this.hasOwnProperty(prop) && this[prop] !== undefined) {\n if (prop !== 'required' || this.type === SchemaType.OBJECT) {\n obj[prop] = this[prop];\n }\n }\n }\n return obj as SchemaRequest;\n }\n\n static array(arrayParams: SchemaParams & { items: Schema }): ArraySchema {\n return new ArraySchema(arrayParams, arrayParams.items);\n }\n\n static object(\n objectParams: SchemaParams & {\n properties: {\n [k: string]: Schema;\n };\n optionalProperties?: string[];\n }\n ): ObjectSchema {\n return new ObjectSchema(\n objectParams,\n objectParams.properties,\n objectParams.optionalProperties\n );\n }\n\n // eslint-disable-next-line id-blacklist\n static string(stringParams?: SchemaParams): StringSchema {\n return new StringSchema(stringParams);\n }\n\n static enumString(\n stringParams: SchemaParams & { enum: string[] }\n ): StringSchema {\n return new StringSchema(stringParams, stringParams.enum);\n }\n\n static integer(integerParams?: SchemaParams): IntegerSchema {\n return new IntegerSchema(integerParams);\n }\n\n // eslint-disable-next-line id-blacklist\n static number(numberParams?: SchemaParams): NumberSchema {\n return new NumberSchema(numberParams);\n }\n\n // eslint-disable-next-line id-blacklist\n static boolean(booleanParams?: SchemaParams): BooleanSchema {\n return new BooleanSchema(booleanParams);\n }\n\n static anyOf(\n anyOfParams: SchemaParams & { anyOf: TypedSchema[] }\n ): AnyOfSchema {\n return new AnyOfSchema(anyOfParams);\n }\n}\n\n/**\n * A type that includes all specific Schema types.\n * @public\n */\nexport type TypedSchema =\n | IntegerSchema\n | NumberSchema\n | StringSchema\n | BooleanSchema\n | ObjectSchema\n | ArraySchema\n | AnyOfSchema;\n\n/**\n * Schema class for \"integer\" types.\n * @public\n */\nexport class IntegerSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.INTEGER,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"number\" types.\n * @public\n */\nexport class NumberSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.NUMBER,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"boolean\" types.\n * @public\n */\nexport class BooleanSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.BOOLEAN,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"string\" types. Can be used with or without\n * enum values.\n * @public\n */\nexport class StringSchema extends Schema {\n enum?: string[];\n constructor(schemaParams?: SchemaParams, enumValues?: string[]) {\n super({\n type: SchemaType.STRING,\n ...schemaParams\n });\n this.enum = enumValues;\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n if (this.enum) {\n obj['enum'] = this.enum;\n }\n return obj as SchemaRequest;\n }\n}\n\n/**\n * Schema class for \"array\" types.\n * The `items` param should refer to the type of item that can be a member\n * of the array.\n * @public\n */\nexport class ArraySchema extends Schema {\n constructor(schemaParams: SchemaParams, public items: TypedSchema) {\n super({\n type: SchemaType.ARRAY,\n ...schemaParams\n });\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n obj.items = this.items.toJSON();\n return obj;\n }\n}\n\n/**\n * Schema class for \"object\" types.\n * The `properties` param must be a map of `Schema` objects.\n * @public\n */\nexport class ObjectSchema extends Schema {\n constructor(\n schemaParams: SchemaParams,\n public properties: {\n [k: string]: TypedSchema;\n },\n public optionalProperties: string[] = []\n ) {\n super({\n type: SchemaType.OBJECT,\n ...schemaParams\n });\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n obj.properties = { ...this.properties };\n const required = [];\n if (this.optionalProperties) {\n for (const propertyKey of this.optionalProperties) {\n if (!this.properties.hasOwnProperty(propertyKey)) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n `Property \"${propertyKey}\" specified in \"optionalProperties\" does not exist.`\n );\n }\n }\n }\n for (const propertyKey in this.properties) {\n if (this.properties.hasOwnProperty(propertyKey)) {\n obj.properties[propertyKey] = this.properties[\n propertyKey\n ].toJSON() as SchemaRequest;\n if (!this.optionalProperties.includes(propertyKey)) {\n required.push(propertyKey);\n }\n }\n }\n if (required.length > 0) {\n obj.required = required;\n }\n delete obj.optionalProperties;\n return obj as SchemaRequest;\n }\n}\n\n/**\n * Schema class representing a value that can conform to any of the provided sub-schemas. This is\n * useful when a field can accept multiple distinct types or structures.\n * @public\n */\nexport class AnyOfSchema extends Schema {\n anyOf: TypedSchema[]; // Re-define field to narrow to required type\n constructor(schemaParams: SchemaParams & { anyOf: TypedSchema[] }) {\n if (schemaParams.anyOf.length === 0) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n \"The 'anyOf' array must not be empty.\"\n );\n }\n super({\n ...schemaParams,\n type: undefined // anyOf schemas do not have an explicit type\n });\n this.anyOf = schemaParams.anyOf;\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n // Ensure the 'anyOf' property contains serialized SchemaRequest objects.\n if (this.anyOf && Array.isArray(this.anyOf)) {\n obj.anyOf = (this.anyOf as TypedSchema[]).map(s => s.toJSON());\n }\n return obj;\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { logger } from '../logger';\n\n/**\n * Defines the image format for images generated by Imagen.\n *\n * Use this class to specify the desired format (JPEG or PNG) and compression quality\n * for images generated by Imagen. This is typically included as part of\n * {@link ImagenModelParams}.\n *\n * @example\n * ```javascript\n * const imagenModelParams = {\n * // ... other ImagenModelParams\n * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75.\n * }\n * ```\n *\n * @public\n */\nexport class ImagenImageFormat {\n /**\n * The MIME type.\n */\n mimeType: string;\n /**\n * The level of compression (a number between 0 and 100).\n */\n compressionQuality?: number;\n\n private constructor() {\n this.mimeType = 'image/png';\n }\n\n /**\n * Creates an {@link ImagenImageFormat} for a JPEG image.\n *\n * @param compressionQuality - The level of compression (a number between 0 and 100).\n * @returns An {@link ImagenImageFormat} object for a JPEG image.\n *\n * @public\n */\n static jpeg(compressionQuality?: number): ImagenImageFormat {\n if (\n compressionQuality &&\n (compressionQuality < 0 || compressionQuality > 100)\n ) {\n logger.warn(\n `Invalid JPEG compression quality of ${compressionQuality} specified; the supported range is [0, 100].`\n );\n }\n return { mimeType: 'image/jpeg', compressionQuality };\n }\n\n /**\n * Creates an {@link ImagenImageFormat} for a PNG image.\n *\n * @returns An {@link ImagenImageFormat} object for a PNG image.\n *\n * @public\n */\n static png(): ImagenImageFormat {\n return { mimeType: 'image/png' };\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport {\n AIErrorCode,\n FunctionCall,\n FunctionResponse,\n GenerativeContentBlob,\n LiveServerContent\n} from '../types';\nimport { LiveSession } from './live-session';\nimport { Deferred } from '@firebase/util';\n\nconst SERVER_INPUT_SAMPLE_RATE = 16_000;\nconst SERVER_OUTPUT_SAMPLE_RATE = 24_000;\n\nconst AUDIO_PROCESSOR_NAME = 'audio-processor';\n\n/**\n * The JS for an `AudioWorkletProcessor`.\n * This processor is responsible for taking raw audio from the microphone,\n * converting it to the required 16-bit 16kHz PCM, and posting it back to the main thread.\n *\n * See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor\n *\n * It is defined as a string here so that it can be converted into a `Blob`\n * and loaded at runtime.\n */\nconst audioProcessorWorkletString = `\n class AudioProcessor extends AudioWorkletProcessor {\n constructor(options) {\n super();\n this.targetSampleRate = options.processorOptions.targetSampleRate;\n // 'sampleRate' is a global variable available inside the AudioWorkletGlobalScope,\n // representing the native sample rate of the AudioContext.\n this.inputSampleRate = sampleRate;\n }\n\n /**\n * This method is called by the browser's audio engine for each block of audio data.\n * Input is a single input, with a single channel (input[0][0]).\n */\n process(inputs) {\n const input = inputs[0];\n if (input && input.length > 0 && input[0].length > 0) {\n const pcmData = input[0]; // Float32Array of raw audio samples.\n \n // Simple linear interpolation for resampling.\n const resampled = new Float32Array(Math.round(pcmData.length * this.targetSampleRate / this.inputSampleRate));\n const ratio = pcmData.length / resampled.length;\n for (let i = 0; i < resampled.length; i++) {\n resampled[i] = pcmData[Math.floor(i * ratio)];\n }\n\n // Convert Float32 (-1, 1) samples to Int16 (-32768, 32767)\n const resampledInt16 = new Int16Array(resampled.length);\n for (let i = 0; i < resampled.length; i++) {\n const sample = Math.max(-1, Math.min(1, resampled[i]));\n if (sample < 0) {\n resampledInt16[i] = sample * 32768;\n } else {\n resampledInt16[i] = sample * 32767;\n }\n }\n \n this.port.postMessage(resampledInt16);\n }\n // Return true to keep the processor alive and processing the next audio block.\n return true;\n }\n }\n\n // Register the processor with a name that can be used to instantiate it from the main thread.\n registerProcessor('${AUDIO_PROCESSOR_NAME}', AudioProcessor);\n`;\n\n/**\n * A controller for managing an active audio conversation.\n *\n * @beta\n */\nexport interface AudioConversationController {\n /**\n * Stops the audio conversation, closes the microphone connection, and\n * cleans up resources. Returns a promise that resolves when cleanup is complete.\n */\n stop: () => Promise<void>;\n}\n\n/**\n * Options for {@link startAudioConversation}.\n *\n * @beta\n */\nexport interface StartAudioConversationOptions {\n /**\n * An async handler that is called when the model requests a function to be executed.\n * The handler should perform the function call and return the result as a `Part`,\n * which will then be sent back to the model.\n */\n functionCallingHandler?: (\n functionCalls: FunctionCall[]\n ) => Promise<FunctionResponse>;\n}\n\n/**\n * Dependencies needed by the {@link AudioConversationRunner}.\n *\n * @internal\n */\ninterface RunnerDependencies {\n audioContext: AudioContext;\n mediaStream: MediaStream;\n sourceNode: MediaStreamAudioSourceNode;\n workletNode: AudioWorkletNode;\n}\n\n/**\n * Encapsulates the core logic of an audio conversation.\n *\n * @internal\n */\nexport class AudioConversationRunner {\n /** A flag to indicate if the conversation has been stopped. */\n private isStopped = false;\n /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */\n private readonly stopDeferred = new Deferred<void>();\n /** A promise that tracks the lifecycle of the main `runReceiveLoop`. */\n private readonly receiveLoopPromise: Promise<void>;\n\n /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */\n private readonly playbackQueue: ArrayBuffer[] = [];\n /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */\n private scheduledSources: AudioBufferSourceNode[] = [];\n /** A high-precision timeline pointer for scheduling gapless audio playback. */\n private nextStartTime = 0;\n /** A mutex to prevent the playback processing loop from running multiple times concurrently. */\n private isPlaybackLoopRunning = false;\n\n constructor(\n private readonly liveSession: LiveSession,\n private readonly options: StartAudioConversationOptions,\n private readonly deps: RunnerDependencies\n ) {\n this.liveSession.inConversation = true;\n\n // Start listening for messages from the server.\n this.receiveLoopPromise = this.runReceiveLoop().finally(() =>\n this.cleanup()\n );\n\n // Set up the handler for receiving processed audio data from the worklet.\n // Message data has been resampled to 16kHz 16-bit PCM.\n this.deps.workletNode.port.onmessage = event => {\n if (this.isStopped) {\n return;\n }\n\n const pcm16 = event.data as Int16Array;\n const base64 = btoa(\n String.fromCharCode.apply(\n null,\n Array.from(new Uint8Array(pcm16.buffer))\n )\n );\n\n const chunk: GenerativeContentBlob = {\n mimeType: 'audio/pcm',\n data: base64\n };\n void this.liveSession.sendAudioRealtime(chunk);\n };\n }\n\n /**\n * Stops the conversation and unblocks the main receive loop.\n */\n async stop(): Promise<void> {\n if (this.isStopped) {\n return;\n }\n this.isStopped = true;\n this.stopDeferred.resolve(); // Unblock the receive loop\n await this.receiveLoopPromise; // Wait for the loop and cleanup to finish\n }\n\n /**\n * Cleans up all audio resources (nodes, stream tracks, context) and marks the\n * session as no longer in a conversation.\n */\n private cleanup(): void {\n this.interruptPlayback(); // Ensure all audio is stopped on final cleanup.\n this.deps.workletNode.port.onmessage = null;\n this.deps.workletNode.disconnect();\n this.deps.sourceNode.disconnect();\n this.deps.mediaStream.getTracks().forEach(track => track.stop());\n if (this.deps.audioContext.state !== 'closed') {\n void this.deps.audioContext.close();\n }\n this.liveSession.inConversation = false;\n }\n\n /**\n * Adds audio data to the queue and ensures the playback loop is running.\n */\n private enqueueAndPlay(audioData: ArrayBuffer): void {\n this.playbackQueue.push(audioData);\n // Will no-op if it's already running.\n void this.processPlaybackQueue();\n }\n\n /**\n * Stops all current and pending audio playback and clears the queue. This is\n * called when the server indicates the model's speech was interrupted with\n * `LiveServerContent.modelTurn.interrupted`.\n */\n private interruptPlayback(): void {\n // Stop all sources that have been scheduled. The onended event will fire for each,\n // which will clean up the scheduledSources array.\n [...this.scheduledSources].forEach(source => source.stop(0));\n\n // Clear the internal buffer of unprocessed audio chunks.\n this.playbackQueue.length = 0;\n\n // Reset the playback clock to start fresh.\n this.nextStartTime = this.deps.audioContext.currentTime;\n }\n\n /**\n * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence.\n */\n private async processPlaybackQueue(): Promise<void> {\n if (this.isPlaybackLoopRunning) {\n return;\n }\n this.isPlaybackLoopRunning = true;\n\n while (this.playbackQueue.length > 0 && !this.isStopped) {\n const pcmRawBuffer = this.playbackQueue.shift()!;\n try {\n const pcm16 = new Int16Array(pcmRawBuffer);\n const frameCount = pcm16.length;\n\n const audioBuffer = this.deps.audioContext.createBuffer(\n 1,\n frameCount,\n SERVER_OUTPUT_SAMPLE_RATE\n );\n\n // Convert 16-bit PCM to 32-bit PCM, required by the Web Audio API.\n const channelData = audioBuffer.getChannelData(0);\n for (let i = 0; i < frameCount; i++) {\n channelData[i] = pcm16[i] / 32768; // Normalize to Float32 range [-1.0, 1.0]\n }\n\n const source = this.deps.audioContext.createBufferSource();\n source.buffer = audioBuffer;\n source.connect(this.deps.audioContext.destination);\n\n // Track the source and set up a handler to remove it from tracking when it finishes.\n this.scheduledSources.push(source);\n source.onended = () => {\n this.scheduledSources = this.scheduledSources.filter(\n s => s !== source\n );\n };\n\n // To prevent gaps, schedule the next chunk to start either now (if we're catching up)\n // or exactly when the previous chunk is scheduled to end.\n this.nextStartTime = Math.max(\n this.deps.audioContext.currentTime,\n this.nextStartTime\n );\n source.start(this.nextStartTime);\n\n // Update the schedule for the *next* chunk.\n this.nextStartTime += audioBuffer.duration;\n } catch (e) {\n logger.error('Error playing audio:', e);\n }\n }\n\n this.isPlaybackLoopRunning = false;\n }\n\n /**\n * The main loop that listens for and processes messages from the server.\n */\n private async runReceiveLoop(): Promise<void> {\n const messageGenerator = this.liveSession.receive();\n while (!this.isStopped) {\n const result = await Promise.race([\n messageGenerator.next(),\n this.stopDeferred.promise\n ]);\n\n if (this.isStopped || !result || result.done) {\n break;\n }\n\n const message = result.value;\n if (message.type === 'serverContent') {\n const serverContent = message as LiveServerContent;\n if (serverContent.interrupted) {\n this.interruptPlayback();\n }\n\n const audioPart = serverContent.modelTurn?.parts.find(part =>\n part.inlineData?.mimeType.startsWith('audio/')\n );\n if (audioPart?.inlineData) {\n const audioData = Uint8Array.from(\n atob(audioPart.inlineData.data),\n c => c.charCodeAt(0)\n ).buffer;\n this.enqueueAndPlay(audioData);\n }\n } else if (message.type === 'toolCall') {\n if (!this.options.functionCallingHandler) {\n logger.warn(\n 'Received tool call message, but StartAudioConversationOptions.functionCallingHandler is undefined. Ignoring tool call.'\n );\n } else {\n try {\n const functionResponse = await this.options.functionCallingHandler(\n message.functionCalls\n );\n if (!this.isStopped) {\n void this.liveSession.sendFunctionResponses([functionResponse]);\n }\n } catch (e) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Function calling handler failed: ${(e as Error).message}`\n );\n }\n }\n }\n }\n }\n}\n\n/**\n * Starts a real-time, bidirectional audio conversation with the model. This helper function manages\n * the complexities of microphone access, audio recording, playback, and interruptions.\n *\n * @remarks Important: This function must be called in response to a user gesture\n * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}.\n *\n * @example\n * ```javascript\n * const liveSession = await model.connect();\n * let conversationController;\n *\n * // This function must be called from within a click handler.\n * async function startConversation() {\n * try {\n * conversationController = await startAudioConversation(liveSession);\n * } catch (e) {\n * // Handle AI-specific errors\n * if (e instanceof AIError) {\n * console.error(\"AI Error:\", e.message);\n * }\n * // Handle microphone permission and hardware errors\n * else if (e instanceof DOMException) {\n * console.error(\"Microphone Error:\", e.message);\n * }\n * // Handle other unexpected errors\n * else {\n * console.error(\"An unexpected error occurred:\", e);\n * }\n * }\n * }\n *\n * // Later, to stop the conversation:\n * // if (conversationController) {\n * // await conversationController.stop();\n * // }\n * ```\n *\n * @param liveSession - An active {@link LiveSession} instance.\n * @param options - Configuration options for the audio conversation.\n * @returns A `Promise` that resolves with an {@link AudioConversationController}.\n * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`).\n * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions.\n *\n * @beta\n */\nexport async function startAudioConversation(\n liveSession: LiveSession,\n options: StartAudioConversationOptions = {}\n): Promise<AudioConversationController> {\n if (liveSession.isClosed) {\n throw new AIError(\n AIErrorCode.SESSION_CLOSED,\n 'Cannot start audio conversation on a closed LiveSession.'\n );\n }\n\n if (liveSession.inConversation) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'An audio conversation is already in progress for this session.'\n );\n }\n\n // Check for necessary Web API support.\n if (\n typeof AudioWorkletNode === 'undefined' ||\n typeof AudioContext === 'undefined' ||\n typeof navigator === 'undefined' ||\n !navigator.mediaDevices\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Audio conversation is not supported in this environment. It requires the Web Audio API and AudioWorklet support.'\n );\n }\n\n let audioContext: AudioContext | undefined;\n try {\n // 1. Set up the audio context. This must be in response to a user gesture.\n // See: https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy\n audioContext = new AudioContext();\n if (audioContext.state === 'suspended') {\n await audioContext.resume();\n }\n\n // 2. Prompt for microphone access and get the media stream.\n // This can throw a variety of permission or hardware-related errors.\n const mediaStream = await navigator.mediaDevices.getUserMedia({\n audio: true\n });\n\n // 3. Load the AudioWorklet processor.\n // See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet\n const workletBlob = new Blob([audioProcessorWorkletString], {\n type: 'application/javascript'\n });\n const workletURL = URL.createObjectURL(workletBlob);\n await audioContext.audioWorklet.addModule(workletURL);\n\n // 4. Create the audio graph: Microphone -> Source Node -> Worklet Node\n const sourceNode = audioContext.createMediaStreamSource(mediaStream);\n const workletNode = new AudioWorkletNode(\n audioContext,\n AUDIO_PROCESSOR_NAME,\n {\n processorOptions: { targetSampleRate: SERVER_INPUT_SAMPLE_RATE }\n }\n );\n sourceNode.connect(workletNode);\n\n // 5. Instantiate and return the runner which manages the conversation.\n const runner = new AudioConversationRunner(liveSession, options, {\n audioContext,\n mediaStream,\n sourceNode,\n workletNode\n });\n\n return { stop: () => runner.stop() };\n } catch (e) {\n // Ensure the audio context is closed on any setup error.\n if (audioContext && audioContext.state !== 'closed') {\n void audioContext.close();\n }\n\n // Re-throw specific, known error types directly. The user may want to handle `DOMException`\n // errors differently (for example, if permission to access audio device was denied).\n if (e instanceof AIError || e instanceof DOMException) {\n throw e;\n }\n\n // Wrap any other unexpected errors in a standard AIError.\n throw new AIError(\n AIErrorCode.ERROR,\n `Failed to initialize audio recording: ${(e as Error).message}`\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp, getApp, _getProvider } from '@firebase/app';\nimport { Provider } from '@firebase/component';\nimport { getModularInstance } from '@firebase/util';\nimport { AI_TYPE, DEFAULT_HYBRID_IN_CLOUD_MODEL } from './constants';\nimport { AIService } from './service';\nimport { AI, AIOptions } from './public-types';\nimport {\n ImagenModelParams,\n HybridParams,\n ModelParams,\n RequestOptions,\n AIErrorCode,\n LiveModelParams\n} from './types';\nimport { AIError } from './errors';\nimport {\n AIModel,\n GenerativeModel,\n LiveGenerativeModel,\n ImagenModel\n} from './models';\nimport { encodeInstanceIdentifier } from './helpers';\nimport { GoogleAIBackend } from './backend';\nimport { WebSocketHandlerImpl } from './websocket';\n\nexport { ChatSession } from './methods/chat-session';\nexport { LiveSession } from './methods/live-session';\nexport * from './requests/schema-builder';\nexport { ImagenImageFormat } from './requests/imagen-image-format';\nexport { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel, AIError };\nexport { Backend, VertexAIBackend, GoogleAIBackend } from './backend';\nexport {\n startAudioConversation,\n AudioConversationController,\n StartAudioConversationOptions\n} from './methods/live-session-helpers';\n\ndeclare module '@firebase/component' {\n interface NameServiceMapping {\n [AI_TYPE]: AIService;\n }\n}\n\n/**\n * Returns the default {@link AI} instance that is associated with the provided\n * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the\n * default settings.\n *\n * @example\n * ```javascript\n * const ai = getAI(app);\n * ```\n *\n * @example\n * ```javascript\n * // Get an AI instance configured to use the Gemini Developer API (via Google AI).\n * const ai = getAI(app, { backend: new GoogleAIBackend() });\n * ```\n *\n * @example\n * ```javascript\n * // Get an AI instance configured to use the Vertex AI Gemini API.\n * const ai = getAI(app, { backend: new VertexAIBackend() });\n * ```\n *\n * @param app - The {@link @firebase/app#FirebaseApp} to use.\n * @param options - {@link AIOptions} that configure the AI instance.\n * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}.\n *\n * @public\n */\nexport function getAI(app: FirebaseApp = getApp(), options?: AIOptions): AI {\n app = getModularInstance(app);\n // Dependencies\n const AIProvider: Provider<'AI'> = _getProvider(app, AI_TYPE);\n\n const backend = options?.backend ?? new GoogleAIBackend();\n\n const finalOptions: Omit<AIOptions, 'backend'> = {\n useLimitedUseAppCheckTokens: options?.useLimitedUseAppCheckTokens ?? false\n };\n\n const identifier = encodeInstanceIdentifier(backend);\n const aiInstance = AIProvider.getImmediate({\n identifier\n });\n\n aiInstance.options = finalOptions;\n\n return aiInstance;\n}\n\n/**\n * Returns a {@link GenerativeModel} class with methods for inference\n * and other functionality.\n *\n * @public\n */\nexport function getGenerativeModel(\n ai: AI,\n modelParams: ModelParams | HybridParams,\n requestOptions?: RequestOptions\n): GenerativeModel {\n // Uses the existence of HybridParams.mode to clarify the type of the modelParams input.\n const hybridParams = modelParams as HybridParams;\n let inCloudParams: ModelParams;\n if (hybridParams.mode) {\n inCloudParams = hybridParams.inCloudParams || {\n model: DEFAULT_HYBRID_IN_CLOUD_MODEL\n };\n } else {\n inCloudParams = modelParams as ModelParams;\n }\n\n if (!inCloudParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`\n );\n }\n\n /**\n * An AIService registered by index.node.ts will not have a\n * chromeAdapterFactory() method.\n */\n const chromeAdapter = (ai as AIService).chromeAdapterFactory?.(\n hybridParams.mode,\n typeof window === 'undefined' ? undefined : window,\n hybridParams.onDeviceParams\n );\n\n return new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter);\n}\n\n/**\n * Returns an {@link ImagenModel} class with methods for using Imagen.\n *\n * Only Imagen 3 models (named `imagen-3.0-*`) are supported.\n *\n * @param ai - An {@link AI} instance.\n * @param modelParams - Parameters to use when making Imagen requests.\n * @param requestOptions - Additional options to use when making requests.\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @public\n */\nexport function getImagenModel(\n ai: AI,\n modelParams: ImagenModelParams,\n requestOptions?: RequestOptions\n): ImagenModel {\n if (!modelParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })`\n );\n }\n return new ImagenModel(ai, modelParams, requestOptions);\n}\n\n/**\n * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication.\n *\n * The Live API is only supported in modern browser windows and Node >= 22.\n *\n * @param ai - An {@link AI} instance.\n * @param modelParams - Parameters to use when setting up a {@link LiveSession}.\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @beta\n */\nexport function getLiveGenerativeModel(\n ai: AI,\n modelParams: LiveModelParams\n): LiveGenerativeModel {\n if (!modelParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name for getLiveGenerativeModel. Example: getLiveGenerativeModel(ai, { model: 'my-model-name' })`\n );\n }\n const webSocketHandler = new WebSocketHandlerImpl();\n return new LiveGenerativeModel(ai, modelParams, webSocketHandler);\n}\n","/**\n * The Firebase AI Web SDK.\n *\n * @packageDocumentation\n */\n\n/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { registerVersion, _registerComponent } from '@firebase/app';\nimport { AIService } from './service';\nimport { AI_TYPE } from './constants';\nimport { Component, ComponentType } from '@firebase/component';\nimport { name, version } from '../package.json';\nimport { decodeInstanceIdentifier } from './helpers';\nimport { AIError } from './errors';\nimport { AIErrorCode } from './public-types';\n\nfunction registerAI(): void {\n _registerComponent(\n new Component(\n AI_TYPE,\n (container, { instanceIdentifier }) => {\n if (!instanceIdentifier) {\n throw new AIError(\n AIErrorCode.ERROR,\n 'AIService instance identifier is undefined.'\n );\n }\n\n const backend = decodeInstanceIdentifier(instanceIdentifier);\n\n // getImmediate for FirebaseApp will always succeed\n const app = container.getProvider('app').getImmediate();\n const auth = container.getProvider('auth-internal');\n const appCheckProvider = container.getProvider('app-check-internal');\n return new AIService(app, backend, auth, appCheckProvider);\n },\n ComponentType.PUBLIC\n ).setMultipleInstances(true)\n );\n\n registerVersion(name, version, 'node');\n // BUILD_TARGET will be replaced by values like esm, cjs, etc during the compilation\n registerVersion(name, version, '__BUILD_TARGET__');\n}\n\nregisterAI();\n\nexport * from './api';\nexport * from './public-types';\n"],"names":["FirebaseError","_isFirebaseServerApp","Logger","GoogleAIMapper.mapGenerateContentResponse","GoogleAIMapper.mapGenerateContentRequest","GoogleAIMapper.mapCountTokensRequest","Deferred","app","getApp","getModularInstance","_getProvider","_registerComponent","Component","registerVersion"],"mappings":";;;;;;;;;;;;AAAA;;;;;;;;;;;;;;;AAeG;AAII,MAAM,OAAO,GAAG,IAAI,CAAC;AAErB,MAAM,gBAAgB,GAAG,aAAa,CAAC;AAEvC,MAAM,cAAc,GAAG,iCAAiC,CAAC;AAEzD,MAAM,mBAAmB,GAAG,QAAQ,CAAC;AAErC,MAAM,eAAe,GAAG,OAAO,CAAC;AAEhC,MAAM,YAAY,GAAG,OAAO,CAAC;AAE7B,MAAM,wBAAwB,GAAG,GAAG,GAAG,IAAI,CAAC;AAEnD;;AAEG;AACI,MAAM,6BAA6B,GAAG,uBAAuB;;ACpCpE;;;;;;;;;;;;;;;AAeG;AAQH;;;AAGG;AACI,MAAM,cAAc,GAAG,CAAC,MAAM,EAAE,OAAO,EAAE,UAAU,EAAE,QAAQ,EAAW;AAE/E;;;AAGG;AACU,MAAA,YAAY,GAAG;AAC1B,IAAA,yBAAyB,EAAE,2BAA2B;AACtD,IAAA,+BAA+B,EAAE,iCAAiC;AAClE,IAAA,wBAAwB,EAAE,0BAA0B;AACpD,IAAA,+BAA+B,EAAE,iCAAiC;EACzD;AAQX;;;AAGG;AACU,MAAA,kBAAkB,GAAG;AAChC;;AAEG;AACH,IAAA,mBAAmB,EAAE,qBAAqB;AAC1C;;AAEG;AACH,IAAA,sBAAsB,EAAE,wBAAwB;AAChD;;AAEG;AACH,IAAA,eAAe,EAAE,iBAAiB;AAClC;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;;AAGG;AACH,IAAA,GAAG,EAAE,KAAK;EACD;AASX;;;;AAIG;AACU,MAAA,eAAe,GAAG;AAC7B;;AAEG;AACH,IAAA,QAAQ,EAAE,UAAU;AACpB;;AAEG;AACH,IAAA,WAAW,EAAE,aAAa;EACjB;AAUX;;;AAGG;AACU,MAAA,eAAe,GAAG;AAC7B;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;AAEG;AACH,IAAA,GAAG,EAAE,KAAK;AACV;;AAEG;AACH,IAAA,MAAM,EAAE,QAAQ;AAChB;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;EACH;AASX;;;AAGG;AACU,MAAA,YAAY,GAAG;AAC1B;;AAEG;AACH,IAAA,wBAAwB,EAAE,0BAA0B;AACpD;;AAEG;AACH,IAAA,iBAAiB,EAAE,mBAAmB;AACtC;;AAEG;AACH,IAAA,oBAAoB,EAAE,sBAAsB;AAC5C;;AAEG;AACH,IAAA,kBAAkB,EAAE,oBAAoB;AACxC;;;;;AAKG;AACH,IAAA,yBAAyB,EAAE,2BAA2B;EAC7C;AAQX;;;AAGG;AACU,MAAA,WAAW,GAAG;AACzB;;AAEG;AACH,IAAA,MAAM,EAAE,QAAQ;AAChB;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,SAAS,EAAE,WAAW;AACtB;;AAEG;AACH,IAAA,kBAAkB,EAAE,oBAAoB;EAC/B;AAQX;;;AAGG;AACU,MAAA,YAAY,GAAG;AAC1B;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;AAEG;AACH,IAAA,MAAM,EAAE,QAAQ;AAChB;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,SAAS,EAAE,WAAW;AACtB;;AAEG;AACH,IAAA,kBAAkB,EAAE,oBAAoB;AACxC;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;AAEG;AACH,IAAA,uBAAuB,EAAE,yBAAyB;EACzC;AAQX;;AAEG;AACU,MAAA,mBAAmB,GAAG;AACjC;;;AAGG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;;;;AAKG;AACH,IAAA,GAAG,EAAE,KAAK;AACV;;;AAGG;AACH,IAAA,IAAI,EAAE,MAAM;EACH;AAQX;;;AAGG;AACU,MAAA,QAAQ,GAAG;AACtB;;AAEG;AACH,IAAA,oBAAoB,EAAE,sBAAsB;AAC5C;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,QAAQ,EAAE,UAAU;EACX;AAQX;;;;AAIG;AACU,MAAA,gBAAgB,GAAG;AAC9B;;;AAGG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;;AAGG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;;AAGG;AACH,IAAA,KAAK,EAAE,OAAO;EACL;AAUX;;;;;;;;;;;;;;;;;;;;AAoBG;AACU,MAAA,aAAa,GAAG;AAC3B,IAAA,kBAAkB,EAAE,kBAAkB;AACtC,IAAA,gBAAgB,EAAE,gBAAgB;AAClC,IAAA,eAAe,EAAE,eAAe;AAChC,IAAA,iBAAiB,EAAE,iBAAiB;EAC3B;AASX;;;;AAIG;AACU,MAAA,eAAe,GAAG;AAC7B,IAAA,WAAW,EAAE,WAAW;AACxB,IAAA,UAAU,EAAE,UAAU;EACb;AAUX;;;;AAIG;AACU,MAAA,OAAO,GAAG;AACrB,IAAA,WAAW,EAAE,qBAAqB;AAClC,IAAA,EAAE,EAAE,YAAY;AAChB,IAAA,MAAM,EAAE,gBAAgB;AACxB,IAAA,iBAAiB,EAAE,2BAA2B;EAC9C;AASF;;;;AAIG;AACU,MAAA,QAAQ,GAAG;AACtB,IAAA,WAAW,EAAE,sBAAsB;AACnC,IAAA,MAAM,EAAE,QAAQ;;;ACzalB;;;;;;;;;;;;;;;AAeG;AA4XH;;;;;;;;;;;;;;;;AAgBG;AACU,MAAA,kBAAkB,GAAG;AAChC;;AAEG;AACH,IAAA,gCAAgC,EAAE,kCAAkC;AACpE;;AAEG;AACH,IAAA,4BAA4B,EAAE,8BAA8B;AAC5D;;AAEG;AACH,IAAA,0BAA0B,EAAE,4BAA4B;AACxD;;AAEG;AACH,IAAA,4BAA4B,EAAE,8BAA8B;AAC5D;;AAEG;AACH,IAAA,2BAA2B,EAAE,6BAA6B;EAC1D;AA6KF;;;;AAIG;AACU,MAAA,gBAAgB,GAAG;AAC9B,IAAA,cAAc,EAAE,eAAe;AAC/B,IAAA,SAAS,EAAE,UAAU;AACrB,IAAA,sBAAsB,EAAE,sBAAsB;;;ACtmBhD;;;;;;;;;;;;;;;AAeG;AA4CH;;;;AAIG;AACU,MAAA,WAAW,GAAG;;AAEzB,IAAA,KAAK,EAAE,OAAO;;AAGd,IAAA,aAAa,EAAE,eAAe;;AAG9B,IAAA,cAAc,EAAE,gBAAgB;;AAGhC,IAAA,WAAW,EAAE,aAAa;;AAG1B,IAAA,cAAc,EAAE,gBAAgB;;AAGhC,IAAA,eAAe,EAAE,iBAAiB;;AAGlC,IAAA,eAAe,EAAE,iBAAiB;;AAGlC,IAAA,cAAc,EAAE,gBAAgB;;AAGhC,IAAA,UAAU,EAAE,YAAY;;AAGxB,IAAA,SAAS,EAAE,WAAW;;AAGtB,IAAA,QAAQ,EAAE,UAAU;;AAGpB,IAAA,aAAa,EAAE,eAAe;;AAG9B,IAAA,YAAY,EAAE,cAAc;;AAG5B,IAAA,WAAW,EAAE,aAAa;;;ACzG5B;;;;;;;;;;;;;;;AAeG;AAEH;;;;;AAKG;AACU,MAAA,UAAU,GAAG;;AAExB,IAAA,MAAM,EAAE,QAAQ;;AAEhB,IAAA,MAAM,EAAE,QAAQ;;AAEhB,IAAA,OAAO,EAAE,SAAS;;AAElB,IAAA,OAAO,EAAE,SAAS;;AAElB,IAAA,KAAK,EAAE,OAAO;;AAEd,IAAA,MAAM,EAAE,QAAQ;;;ACnClB;;;;;;;;;;;;;;;AAeG;AAqFH;;;;;;;;;;;AAWG;AACU,MAAA,uBAAuB,GAAG;AACrC;;AAEG;AACH,IAAA,mBAAmB,EAAE,qBAAqB;AAC1C;;AAEG;AACH,IAAA,sBAAsB,EAAE,wBAAwB;AAChD;;AAEG;AACH,IAAA,eAAe,EAAE,iBAAiB;AAClC;;;;;AAKG;AACH,IAAA,UAAU,EAAE,YAAY;EACf;AAiBX;;;;;;;AAOG;AACU,MAAA,uBAAuB,GAAG;AACrC;;AAEG;AACH,IAAA,SAAS,EAAE,YAAY;AACvB;;;;;;AAMG;AACH,IAAA,WAAW,EAAE,aAAa;AAC1B;;;;;;AAMG;AACH,IAAA,SAAS,EAAE,WAAW;EACb;AAiCX;;;;;;;;;;AAUG;AACU,MAAA,iBAAiB,GAAG;AAC/B;;AAEG;AACH,IAAA,QAAQ,EAAE,KAAK;AACf;;AAEG;AACH,IAAA,eAAe,EAAE,KAAK;AACtB;;AAEG;AACH,IAAA,cAAc,EAAE,KAAK;AACrB;;AAEG;AACH,IAAA,gBAAgB,EAAE,MAAM;AACxB;;AAEG;AACH,IAAA,eAAe,EAAE,MAAM;;;AClPzB;;;;;;;;;;;;;;;AAeG;AAqCH;;;;;;;;;;;AAWG;AACU,MAAA,WAAW,GAAG;AACzB;;;AAGG;AACH,IAAA,SAAS,EAAE,WAAW;AAEtB;;;AAGG;AACH,IAAA,SAAS,EAAE,WAAW;AACd,EAAC;;AC5EX;;;;;;;;;;;;;;;AAeG;AAKH;;;;;;;AAOG;MACmB,OAAO,CAAA;AAM3B;;;AAGG;AACH,IAAA,WAAA,CAAsB,IAAiB,EAAA;AACrC,QAAA,IAAI,CAAC,WAAW,GAAG,IAAI,CAAC;KACzB;AACF,CAAA;AAED;;;;;;;AAOG;AACG,MAAO,eAAgB,SAAQ,OAAO,CAAA;AAC1C;;AAEG;AACH,IAAA,WAAA,GAAA;AACE,QAAA,KAAK,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC;KAC9B;AACF,CAAA;AAED;;;;;;;AAOG;AACG,MAAO,eAAgB,SAAQ,OAAO,CAAA;AAQ1C;;;;;;AAMG;AACH,IAAA,WAAA,CAAY,WAAmB,gBAAgB,EAAA;AAC7C,QAAA,KAAK,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC;QAC7B,IAAI,CAAC,QAAQ,EAAE;AACb,YAAA,IAAI,CAAC,QAAQ,GAAG,gBAAgB,CAAC;SAClC;aAAM;AACL,YAAA,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;SAC1B;KACF;AACF;;AC3FD;;;;;;;;;;;;;;;AAeG;MAgBU,SAAS,CAAA;IAMpB,WACS,CAAA,GAAgB,EAChB,OAAgB,EACvB,YAAiD,EACjD,gBAA0D,EACnD,oBAI2B,EAAA;QAR3B,IAAG,CAAA,GAAA,GAAH,GAAG,CAAa;QAChB,IAAO,CAAA,OAAA,GAAP,OAAO,CAAS;QAGhB,IAAoB,CAAA,oBAAA,GAApB,oBAAoB,CAIO;AAElC,QAAA,MAAM,QAAQ,GAAG,gBAAgB,EAAE,YAAY,CAAC,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,CAAC;AACpE,QAAA,MAAM,IAAI,GAAG,YAAY,EAAE,YAAY,CAAC,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,CAAC;AAC5D,QAAA,IAAI,CAAC,IAAI,GAAG,IAAI,IAAI,IAAI,CAAC;AACzB,QAAA,IAAI,CAAC,QAAQ,GAAG,QAAQ,IAAI,IAAI,CAAC;AAEjC,QAAA,IAAI,OAAO,YAAY,eAAe,EAAE;AACtC,YAAA,IAAI,CAAC,QAAQ,GAAG,OAAO,CAAC,QAAQ,CAAC;SAClC;aAAM;AACL,YAAA,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAC;SACpB;KACF;IAED,OAAO,GAAA;AACL,QAAA,OAAO,OAAO,CAAC,OAAO,EAAE,CAAC;KAC1B;IAED,IAAI,OAAO,CAAC,YAAuB,EAAA;AACjC,QAAA,IAAI,CAAC,QAAQ,GAAG,YAAY,CAAC;KAC9B;AAED,IAAA,IAAI,OAAO,GAAA;QACT,OAAO,IAAI,CAAC,QAAQ,CAAC;KACtB;AACF;;ACvED;;;;;;;;;;;;;;;AAeG;AAMH;;;;AAIG;AACG,MAAO,OAAQ,SAAQA,kBAAa,CAAA;AACxC;;;;;;AAMG;AACH,IAAA,WAAA,CACW,IAAiB,EAC1B,OAAe,EACN,eAAiC,EAAA;;QAG1C,MAAM,OAAO,GAAG,OAAO,CAAC;AACxB,QAAA,MAAM,QAAQ,GAAG,CAAA,EAAG,OAAO,CAAI,CAAA,EAAA,IAAI,EAAE,CAAC;QACtC,MAAM,WAAW,GAAG,CAAG,EAAA,OAAO,KAAK,OAAO,CAAA,EAAA,EAAK,QAAQ,CAAA,CAAA,CAAG,CAAC;AAC3D,QAAA,KAAK,CAAC,IAAI,EAAE,WAAW,CAAC,CAAC;QARhB,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAa;QAEjB,IAAe,CAAA,eAAA,GAAf,eAAe,CAAkB;;;;;AAY1C,QAAA,IAAI,KAAK,CAAC,iBAAiB,EAAE;;;AAG3B,YAAA,KAAK,CAAC,iBAAiB,CAAC,IAAI,EAAE,OAAO,CAAC,CAAC;SACxC;;;;;QAMD,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,OAAO,CAAC,SAAS,CAAC,CAAC;;AAG/C,QAAA,IAAI,CAAC,QAAQ,GAAG,MAAM,WAAW,CAAC;KACnC;AACF;;AChED;;;;;;;;;;;;;;;AAeG;AAOH;;;;;AAKG;AACG,SAAU,wBAAwB,CAAC,OAAgB,EAAA;AACvD,IAAA,IAAI,OAAO,YAAY,eAAe,EAAE;QACtC,OAAO,CAAA,EAAG,OAAO,CAAA,SAAA,CAAW,CAAC;KAC9B;AAAM,SAAA,IAAI,OAAO,YAAY,eAAe,EAAE;AAC7C,QAAA,OAAO,GAAG,OAAO,CAAA,UAAA,EAAa,OAAO,CAAC,QAAQ,EAAE,CAAC;KAClD;SAAM;AACL,QAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAoB,iBAAA,EAAA,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,WAAW,CAAC,CAAA,CAAE,CAC1D,CAAC;KACH;AACH,CAAC;AAED;;;;AAIG;AACG,SAAU,wBAAwB,CAAC,kBAA0B,EAAA;IACjE,MAAM,eAAe,GAAG,kBAAkB,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;AACtD,IAAA,IAAI,eAAe,CAAC,CAAC,CAAC,KAAK,OAAO,EAAE;AAClC,QAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAgD,6CAAA,EAAA,eAAe,CAAC,CAAC,CAAC,CAAA,CAAA,CAAG,CACtE,CAAC;KACH;AACD,IAAA,MAAM,WAAW,GAAG,eAAe,CAAC,CAAC,CAAC,CAAC;IACvC,QAAQ,WAAW;AACjB,QAAA,KAAK,UAAU;AACb,YAAA,MAAM,QAAQ,GAAuB,eAAe,CAAC,CAAC,CAAC,CAAC;YACxD,IAAI,CAAC,QAAQ,EAAE;gBACb,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAkD,+CAAA,EAAA,kBAAkB,CAAG,CAAA,CAAA,CACxE,CAAC;aACH;AACD,YAAA,OAAO,IAAI,eAAe,CAAC,QAAQ,CAAC,CAAC;AACvC,QAAA,KAAK,UAAU;YACb,OAAO,IAAI,eAAe,EAAE,CAAC;AAC/B,QAAA;YACE,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAwC,qCAAA,EAAA,kBAAkB,CAAG,CAAA,CAAA,CAC9D,CAAC;KACL;AACH;;ACzEA;;;;;;;;;;;;;;;AAeG;AAQH;;;;;;;AAOG;MACmB,OAAO,CAAA;AAY3B;;;;;;;;;;;;;;;;AAgBG;IACH,WAAsB,CAAA,EAAM,EAAE,SAAiB,EAAA;QAC7C,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,MAAM,EAAE;YAC5B,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,UAAU,EACtB,CAAuH,qHAAA,CAAA,CACxH,CAAC;SACH;aAAM,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,SAAS,EAAE;YACtC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,CAA6H,2HAAA,CAAA,CAC9H,CAAC;SACH;aAAM,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE;YAClC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,SAAS,EACrB,CAAqH,mHAAA,CAAA,CACtH,CAAC;SACH;aAAM;YACL,IAAI,CAAC,YAAY,GAAG;AAClB,gBAAA,MAAM,EAAE,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM;AAC7B,gBAAA,OAAO,EAAE,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,SAAS;AACjC,gBAAA,KAAK,EAAE,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK;AAC3B,gBAAA,8BAA8B,EAAE,EAAE,CAAC,GAAG,CAAC,8BAA8B;gBACrE,QAAQ,EAAE,EAAE,CAAC,QAAQ;gBACrB,OAAO,EAAE,EAAE,CAAC,OAAO;aACpB,CAAC;AAEF,YAAA,IAAIC,wBAAoB,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,aAAa,EAAE;gBACjE,MAAM,KAAK,GAAG,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,aAAa,CAAC;AAC5C,gBAAA,IAAI,CAAC,YAAY,CAAC,gBAAgB,GAAG,MAAK;oBACxC,OAAO,OAAO,CAAC,OAAO,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC;AACpC,iBAAC,CAAC;aACH;AAAM,iBAAA,IAAK,EAAgB,CAAC,QAAQ,EAAE;AACrC,gBAAA,IAAI,EAAE,CAAC,OAAO,EAAE,2BAA2B,EAAE;AAC3C,oBAAA,IAAI,CAAC,YAAY,CAAC,gBAAgB,GAAG,MAClC,EAAgB,CAAC,QAAS,CAAC,kBAAkB,EAAE,CAAC;iBACpD;qBAAM;AACL,oBAAA,IAAI,CAAC,YAAY,CAAC,gBAAgB,GAAG,MAClC,EAAgB,CAAC,QAAS,CAAC,QAAQ,EAAE,CAAC;iBAC1C;aACF;AAED,YAAA,IAAK,EAAgB,CAAC,IAAI,EAAE;AAC1B,gBAAA,IAAI,CAAC,YAAY,CAAC,YAAY,GAAG,MAC9B,EAAgB,CAAC,IAAK,CAAC,QAAQ,EAAE,CAAC;aACtC;AAED,YAAA,IAAI,CAAC,KAAK,GAAG,OAAO,CAAC,kBAAkB,CACrC,SAAS,EACT,IAAI,CAAC,YAAY,CAAC,OAAO,CAAC,WAAW,CACtC,CAAC;SACH;KACF;AAED;;;;;;;AAOG;AACH,IAAA,OAAO,kBAAkB,CACvB,SAAiB,EACjB,WAAwB,EAAA;AAExB,QAAA,IAAI,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AACzC,YAAA,OAAO,OAAO,CAAC,0BAA0B,CAAC,SAAS,CAAC,CAAC;SACtD;aAAM;AACL,YAAA,OAAO,OAAO,CAAC,0BAA0B,CAAC,SAAS,CAAC,CAAC;SACtD;KACF;AAED;;AAEG;IACK,OAAO,0BAA0B,CAAC,SAAiB,EAAA;QACzD,OAAO,CAAA,OAAA,EAAU,SAAS,CAAA,CAAE,CAAC;KAC9B;AAED;;AAEG;IACK,OAAO,0BAA0B,CAAC,SAAiB,EAAA;AACzD,QAAA,IAAI,KAAa,CAAC;AAClB,QAAA,IAAI,SAAS,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE;AAC3B,YAAA,IAAI,SAAS,CAAC,UAAU,CAAC,SAAS,CAAC,EAAE;;AAEnC,gBAAA,KAAK,GAAG,CAAA,kBAAA,EAAqB,SAAS,CAAA,CAAE,CAAC;aAC1C;iBAAM;;gBAEL,KAAK,GAAG,SAAS,CAAC;aACnB;SACF;aAAM;;AAEL,YAAA,KAAK,GAAG,CAAA,yBAAA,EAA4B,SAAS,CAAA,CAAE,CAAC;SACjD;AAED,QAAA,OAAO,KAAK,CAAC;KACd;AACF;;AC/JD;;;;;;;;;;;;;;;AAeG;AAII,MAAM,MAAM,GAAG,IAAIC,eAAM,CAAC,oBAAoB,CAAC;;ACnBtD;;;;;;;;;;;;;;;AAeG;AAgBH,IAAY,IAKX,CAAA;AALD,CAAA,UAAY,IAAI,EAAA;AACd,IAAA,IAAA,CAAA,kBAAA,CAAA,GAAA,iBAAoC,CAAA;AACpC,IAAA,IAAA,CAAA,yBAAA,CAAA,GAAA,uBAAiD,CAAA;AACjD,IAAA,IAAA,CAAA,cAAA,CAAA,GAAA,aAA4B,CAAA;AAC5B,IAAA,IAAA,CAAA,SAAA,CAAA,GAAA,SAAmB,CAAA;AACrB,CAAC,EALW,IAAI,KAAJ,IAAI,GAKf,EAAA,CAAA,CAAA,CAAA;MAEY,UAAU,CAAA;IACrB,WACS,CAAA,KAAa,EACb,IAAU,EACV,WAAwB,EACxB,MAAe,EACf,cAA+B,EAAA;QAJ/B,IAAK,CAAA,KAAA,GAAL,KAAK,CAAQ;QACb,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAM;QACV,IAAW,CAAA,WAAA,GAAX,WAAW,CAAa;QACxB,IAAM,CAAA,MAAA,GAAN,MAAM,CAAS;QACf,IAAc,CAAA,cAAA,GAAd,cAAc,CAAiB;KACpC;IACJ,QAAQ,GAAA;QACN,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;AAClC,QAAA,GAAG,CAAC,QAAQ,GAAG,CAAI,CAAA,EAAA,IAAI,CAAC,UAAU,CAAA,CAAA,EAAI,IAAI,CAAC,SAAS,CAAI,CAAA,EAAA,IAAI,CAAC,IAAI,EAAE,CAAC;QACpE,GAAG,CAAC,MAAM,GAAG,IAAI,CAAC,WAAW,CAAC,QAAQ,EAAE,CAAC;AACzC,QAAA,OAAO,GAAG,CAAC,QAAQ,EAAE,CAAC;KACvB;AAED,IAAA,IAAY,OAAO,GAAA;QACjB,OAAO,IAAI,CAAC,cAAc,EAAE,OAAO,IAAI,CAAA,QAAA,EAAW,cAAc,CAAA,CAAE,CAAC;KACpE;AAED,IAAA,IAAY,UAAU,GAAA;QACpB,OAAO,mBAAmB,CAAC;KAC5B;AAED,IAAA,IAAY,SAAS,GAAA;QACnB,IAAI,IAAI,CAAC,WAAW,CAAC,OAAO,YAAY,eAAe,EAAE;YACvD,OAAO,CAAA,SAAA,EAAY,IAAI,CAAC,WAAW,CAAC,OAAO,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,CAAA,CAAE,CAAC;SAC7D;aAAM,IAAI,IAAI,CAAC,WAAW,CAAC,OAAO,YAAY,eAAe,EAAE;AAC9D,YAAA,OAAO,YAAY,IAAI,CAAC,WAAW,CAAC,OAAO,cAAc,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,QAAQ,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,EAAE,CAAC;SAC5G;aAAM;YACL,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,oBAAoB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,CAAA,CAAE,CAC/D,CAAC;SACH;KACF;AAED,IAAA,IAAY,WAAW,GAAA;AACrB,QAAA,MAAM,MAAM,GAAG,IAAI,eAAe,EAAE,CAAC;AACrC,QAAA,IAAI,IAAI,CAAC,MAAM,EAAE;AACf,YAAA,MAAM,CAAC,GAAG,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;SAC1B;AAED,QAAA,OAAO,MAAM,CAAC;KACf;AACF,CAAA;MAEY,YAAY,CAAA;AACvB,IAAA,WAAA,CAAmB,WAAwB,EAAA;QAAxB,IAAW,CAAA,WAAA,GAAX,WAAW,CAAa;KAAI;IAC/C,QAAQ,GAAA;QACN,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,CAAS,MAAA,EAAA,cAAc,CAAE,CAAA,CAAC,CAAC;AAC/C,QAAA,GAAG,CAAC,QAAQ,GAAG,IAAI,CAAC,QAAQ,CAAC;AAE7B,QAAA,MAAM,WAAW,GAAG,IAAI,eAAe,EAAE,CAAC;QAC1C,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;AAChD,QAAA,GAAG,CAAC,MAAM,GAAG,WAAW,CAAC,QAAQ,EAAE,CAAC;AAEpC,QAAA,OAAO,GAAG,CAAC,QAAQ,EAAE,CAAC;KACvB;AAED,IAAA,IAAY,QAAQ,GAAA;AAClB,QAAA,IAAI,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAClE,YAAA,OAAO,0EAA0E,CAAC;SACnF;aAAM;AACL,YAAA,OAAO,mFAAmF,IAAI,CAAC,WAAW,CAAC,QAAQ,EAAE,CAAC;SACvH;KACF;AACF,CAAA;AAED;;AAEG;AACH,SAAS,gBAAgB,GAAA;IACvB,MAAM,WAAW,GAAG,EAAE,CAAC;IACvB,WAAW,CAAC,IAAI,CAAC,CAAA,EAAG,YAAY,CAAI,CAAA,EAAA,eAAe,CAAE,CAAA,CAAC,CAAC;AACvD,IAAA,WAAW,CAAC,IAAI,CAAC,QAAQ,eAAe,CAAA,CAAE,CAAC,CAAC;AAC5C,IAAA,OAAO,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;AAC/B,CAAC;AAEM,eAAe,UAAU,CAAC,GAAe,EAAA;AAC9C,IAAA,MAAM,OAAO,GAAG,IAAI,OAAO,EAAE,CAAC;AAC9B,IAAA,OAAO,CAAC,MAAM,CAAC,cAAc,EAAE,kBAAkB,CAAC,CAAC;IACnD,OAAO,CAAC,MAAM,CAAC,mBAAmB,EAAE,gBAAgB,EAAE,CAAC,CAAC;IACxD,OAAO,CAAC,MAAM,CAAC,gBAAgB,EAAE,GAAG,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;AACzD,IAAA,IAAI,GAAG,CAAC,WAAW,CAAC,8BAA8B,EAAE;QAClD,OAAO,CAAC,MAAM,CAAC,kBAAkB,EAAE,GAAG,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC;KAC3D;AACD,IAAA,IAAI,GAAG,CAAC,WAAW,CAAC,gBAAgB,EAAE;QACpC,MAAM,aAAa,GAAG,MAAM,GAAG,CAAC,WAAW,CAAC,gBAAgB,EAAE,CAAC;QAC/D,IAAI,aAAa,EAAE;YACjB,OAAO,CAAC,MAAM,CAAC,qBAAqB,EAAE,aAAa,CAAC,KAAK,CAAC,CAAC;AAC3D,YAAA,IAAI,aAAa,CAAC,KAAK,EAAE;gBACvB,MAAM,CAAC,IAAI,CACT,CAA6C,0CAAA,EAAA,aAAa,CAAC,KAAK,CAAC,OAAO,CAAE,CAAA,CAC3E,CAAC;aACH;SACF;KACF;AAED,IAAA,IAAI,GAAG,CAAC,WAAW,CAAC,YAAY,EAAE;QAChC,MAAM,SAAS,GAAG,MAAM,GAAG,CAAC,WAAW,CAAC,YAAY,EAAE,CAAC;QACvD,IAAI,SAAS,EAAE;YACb,OAAO,CAAC,MAAM,CAAC,eAAe,EAAE,CAAY,SAAA,EAAA,SAAS,CAAC,WAAW,CAAE,CAAA,CAAC,CAAC;SACtE;KACF;AAED,IAAA,OAAO,OAAO,CAAC;AACjB,CAAC;AAEM,eAAe,gBAAgB,CACpC,KAAa,EACb,IAAU,EACV,WAAwB,EACxB,MAAe,EACf,IAAY,EACZ,cAA+B,EAAA;AAE/B,IAAA,MAAM,GAAG,GAAG,IAAI,UAAU,CAAC,KAAK,EAAE,IAAI,EAAE,WAAW,EAAE,MAAM,EAAE,cAAc,CAAC,CAAC;IAC7E,OAAO;AACL,QAAA,GAAG,EAAE,GAAG,CAAC,QAAQ,EAAE;AACnB,QAAA,YAAY,EAAE;AACZ,YAAA,MAAM,EAAE,MAAM;AACd,YAAA,OAAO,EAAE,MAAM,UAAU,CAAC,GAAG,CAAC;YAC9B,IAAI;AACL,SAAA;KACF,CAAC;AACJ,CAAC;AAEM,eAAe,WAAW,CAC/B,KAAa,EACb,IAAU,EACV,WAAwB,EACxB,MAAe,EACf,IAAY,EACZ,cAA+B,EAAA;AAE/B,IAAA,MAAM,GAAG,GAAG,IAAI,UAAU,CAAC,KAAK,EAAE,IAAI,EAAE,WAAW,EAAE,MAAM,EAAE,cAAc,CAAC,CAAC;AAC7E,IAAA,IAAI,QAAQ,CAAC;AACb,IAAA,IAAI,cAA4D,CAAC;AACjE,IAAA,IAAI;AACF,QAAA,MAAM,OAAO,GAAG,MAAM,gBAAgB,CACpC,KAAK,EACL,IAAI,EACJ,WAAW,EACX,MAAM,EACN,IAAI,EACJ,cAAc,CACf,CAAC;;AAEF,QAAA,MAAM,aAAa,GACjB,cAAc,EAAE,OAAO,IAAI,IAAI,IAAI,cAAc,CAAC,OAAO,IAAI,CAAC;cAC1D,cAAc,CAAC,OAAO;cACtB,wBAAwB,CAAC;AAC/B,QAAA,MAAM,eAAe,GAAG,IAAI,eAAe,EAAE,CAAC;AAC9C,QAAA,cAAc,GAAG,UAAU,CAAC,MAAM,eAAe,CAAC,KAAK,EAAE,EAAE,aAAa,CAAC,CAAC;QAC1E,OAAO,CAAC,YAAY,CAAC,MAAM,GAAG,eAAe,CAAC,MAAM,CAAC;AAErD,QAAA,QAAQ,GAAG,MAAM,KAAK,CAAC,OAAO,CAAC,GAAG,EAAE,OAAO,CAAC,YAAY,CAAC,CAAC;AAC1D,QAAA,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE;YAChB,IAAI,OAAO,GAAG,EAAE,CAAC;AACjB,YAAA,IAAI,YAAY,CAAC;AACjB,YAAA,IAAI;AACF,gBAAA,MAAM,IAAI,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;AACnC,gBAAA,OAAO,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC;AAC7B,gBAAA,IAAI,IAAI,CAAC,KAAK,CAAC,OAAO,EAAE;AACtB,oBAAA,OAAO,IAAI,CAAA,CAAA,EAAI,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,EAAE,CAAC;AACpD,oBAAA,YAAY,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC;iBACnC;aACF;YAAC,OAAO,CAAC,EAAE;;aAEX;AACD,YAAA,IACE,QAAQ,CAAC,MAAM,KAAK,GAAG;gBACvB,YAAY;AACZ,gBAAA,YAAY,CAAC,IAAI,CACf,CAAC,MAAoB,KAAK,MAAM,CAAC,MAAM,KAAK,kBAAkB,CAC/D;gBACD,YAAY,CAAC,IAAI,CAAC,CAAC,MAAoB,KAEnC,MAAM,CAAC,KACR,GAAG,CAAC,CAAC,EAAE,WAAW,CAAC,QAAQ,CAC1B,0CAA0C,CAC3C,CACF,EACD;AACA,gBAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA+C,6CAAA,CAAA;oBAC7C,CAAgE,8DAAA,CAAA;oBAChE,CAAqE,mEAAA,CAAA;AACrE,oBAAA,CAAA,+CAAA,EAAkD,GAAG,CAAC,WAAW,CAAC,OAAO,CAAU,QAAA,CAAA;oBACnF,CAAgE,8DAAA,CAAA;oBAChE,CAAoE,kEAAA,CAAA;AACpE,oBAAA,CAAA,WAAA,CAAa,EACf;oBACE,MAAM,EAAE,QAAQ,CAAC,MAAM;oBACvB,UAAU,EAAE,QAAQ,CAAC,UAAU;oBAC/B,YAAY;AACb,iBAAA,CACF,CAAC;aACH;YACD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,uBAAuB,GAAG,CAAA,GAAA,EAAM,QAAQ,CAAC,MAAM,IAAI,QAAQ,CAAC,UAAU,CAAK,EAAA,EAAA,OAAO,EAAE,EACpF;gBACE,MAAM,EAAE,QAAQ,CAAC,MAAM;gBACvB,UAAU,EAAE,QAAQ,CAAC,UAAU;gBAC/B,YAAY;AACb,aAAA,CACF,CAAC;SACH;KACF;IAAC,OAAO,CAAC,EAAE;QACV,IAAI,GAAG,GAAG,CAAU,CAAC;AACrB,QAAA,IACG,CAAa,CAAC,IAAI,KAAK,WAAW,CAAC,WAAW;AAC9C,YAAA,CAAa,CAAC,IAAI,KAAK,WAAW,CAAC,eAAe;YACnD,CAAC,YAAY,KAAK,EAClB;AACA,YAAA,GAAG,GAAG,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,oBAAA,EAAuB,GAAG,CAAC,QAAQ,EAAE,CAAK,EAAA,EAAA,CAAC,CAAC,OAAO,CAAA,CAAE,CACtD,CAAC;AACF,YAAA,GAAG,CAAC,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC;SACrB;AAED,QAAA,MAAM,GAAG,CAAC;KACX;YAAS;QACR,IAAI,cAAc,EAAE;YAClB,YAAY,CAAC,cAAc,CAAC,CAAC;SAC9B;KACF;AACD,IAAA,OAAO,QAAQ,CAAC;AAClB;;AC7QA;;;;;;;;;;;;;;;AAeG;AAmBH;;;AAGG;AACH,SAAS,kBAAkB,CAAC,QAAiC,EAAA;AAC3D,IAAA,IAAI,QAAQ,CAAC,UAAU,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;QACzD,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;YAClC,MAAM,CAAC,IAAI,CACT,CAAA,kBAAA,EAAqB,QAAQ,CAAC,UAAU,CAAC,MAAM,CAAG,CAAA,CAAA;gBAChD,CAA4D,0DAAA,CAAA;AAC5D,gBAAA,CAAA,gEAAA,CAAkE,CACrE,CAAC;SACH;QACD,IAAI,kBAAkB,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9C,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,gBAAA,EAAmB,uBAAuB,CACxC,QAAQ,CACT,0CAA0C,EAC3C;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,IAAI,CAAC;KACb;SAAM;AACL,QAAA,OAAO,KAAK,CAAC;KACd;AACH,CAAC;AAED;;;AAGG;AACG,SAAU,6BAA6B,CAC3C,QAAiC,EACjC,eAAmC,GAAA,eAAe,CAAC,QAAQ,EAAA;AAE3D;;;;;AAKG;AACH,IAAA,IAAI,QAAQ,CAAC,UAAU,IAAI,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,CAAC,EAAE;QAC1E,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC;KAClC;AAED,IAAA,MAAM,mBAAmB,GAAG,UAAU,CAAC,QAAQ,CAAC,CAAC;AACjD,IAAA,mBAAmB,CAAC,eAAe,GAAG,eAAe,CAAC;AACtD,IAAA,OAAO,mBAAmB,CAAC;AAC7B,CAAC;AAED;;;AAGG;AACG,SAAU,UAAU,CACxB,QAAiC,EAAA;AAEhC,IAAA,QAA4C,CAAC,IAAI,GAAG,MAAK;AACxD,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,OAAO,OAAO,CAAC,QAAQ,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;SACjD;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,oBAAA,EAAuB,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EAC1D;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,EAAE,CAAC;AACZ,KAAC,CAAC;AACD,IAAA,QAA4C,CAAC,cAAc,GAAG,MAAK;AAClE,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,MAAM,MAAM,GAAG,OAAO,CAAC,QAAQ,EAAE,IAAI,IAAI,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;YACzD,OAAO,MAAM,KAAK,EAAE,GAAG,SAAS,GAAG,MAAM,CAAC;SAC3C;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,+BAAA,EAAkC,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EACrE;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,SAAS,CAAC;AACnB,KAAC,CAAC;AACD,IAAA,QAA4C,CAAC,eAAe,GAAG,MAEhD;AACd,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,OAAO,kBAAkB,CAAC,QAAQ,CAAC,CAAC;SACrC;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,oBAAA,EAAuB,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EAC1D;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,SAAS,CAAC;AACnB,KAAC,CAAC;AACD,IAAA,QAA4C,CAAC,aAAa,GAAG,MAAK;AACjE,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,OAAO,gBAAgB,CAAC,QAAQ,CAAC,CAAC;SACnC;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,6BAAA,EAAgC,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EACnE;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,SAAS,CAAC;AACnB,KAAC,CAAC;AACF,IAAA,OAAO,QAA2C,CAAC;AACrD,CAAC;AAED;;;;;;AAMG;AACa,SAAA,OAAO,CACrB,QAAiC,EACjC,UAAmC,EAAA;IAEnC,MAAM,WAAW,GAAG,EAAE,CAAC;AACvB,IAAA,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC3C,QAAA,KAAK,MAAM,IAAI,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;YAC1D,IAAI,IAAI,CAAC,IAAI,IAAI,UAAU,CAAC,IAAI,CAAC,EAAE;AACjC,gBAAA,WAAW,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;aAC7B;SACF;KACF;AACD,IAAA,IAAI,WAAW,CAAC,MAAM,GAAG,CAAC,EAAE;AAC1B,QAAA,OAAO,WAAW,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;KAC7B;SAAM;AACL,QAAA,OAAO,EAAE,CAAC;KACX;AACH,CAAC;AAED;;AAEG;AACG,SAAU,gBAAgB,CAC9B,QAAiC,EAAA;IAEjC,MAAM,aAAa,GAAmB,EAAE,CAAC;AACzC,IAAA,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC3C,QAAA,KAAK,MAAM,IAAI,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC1D,YAAA,IAAI,IAAI,CAAC,YAAY,EAAE;AACrB,gBAAA,aAAa,CAAC,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;aACvC;SACF;KACF;AACD,IAAA,IAAI,aAAa,CAAC,MAAM,GAAG,CAAC,EAAE;AAC5B,QAAA,OAAO,aAAa,CAAC;KACtB;SAAM;AACL,QAAA,OAAO,SAAS,CAAC;KAClB;AACH,CAAC;AAED;;;;AAIG;AACG,SAAU,kBAAkB,CAChC,QAAiC,EAAA;IAEjC,MAAM,IAAI,GAAqB,EAAE,CAAC;AAElC,IAAA,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC3C,QAAA,KAAK,MAAM,IAAI,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC1D,YAAA,IAAI,IAAI,CAAC,UAAU,EAAE;AACnB,gBAAA,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;aACjB;SACF;KACF;AAED,IAAA,IAAI,IAAI,CAAC,MAAM,GAAG,CAAC,EAAE;AACnB,QAAA,OAAO,IAAI,CAAC;KACb;SAAM;AACL,QAAA,OAAO,SAAS,CAAC;KAClB;AACH,CAAC;AAED,MAAM,gBAAgB,GAAG,CAAC,YAAY,CAAC,UAAU,EAAE,YAAY,CAAC,MAAM,CAAC,CAAC;AAExE,SAAS,kBAAkB,CAAC,SAAmC,EAAA;AAC7D,IAAA,QACE,CAAC,CAAC,SAAS,CAAC,YAAY;AACxB,QAAA,gBAAgB,CAAC,IAAI,CAAC,MAAM,IAAI,MAAM,KAAK,SAAS,CAAC,YAAY,CAAC,EAClE;AACJ,CAAC;AAEK,SAAU,uBAAuB,CACrC,QAAiC,EAAA;IAEjC,IAAI,OAAO,GAAG,EAAE,CAAC;AACjB,IAAA,IACE,CAAC,CAAC,QAAQ,CAAC,UAAU,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,KAAK,CAAC;QACzD,QAAQ,CAAC,cAAc,EACvB;QACA,OAAO,IAAI,sBAAsB,CAAC;AAClC,QAAA,IAAI,QAAQ,CAAC,cAAc,EAAE,WAAW,EAAE;YACxC,OAAO,IAAI,WAAW,QAAQ,CAAC,cAAc,CAAC,WAAW,EAAE,CAAC;SAC7D;AACD,QAAA,IAAI,QAAQ,CAAC,cAAc,EAAE,kBAAkB,EAAE;YAC/C,OAAO,IAAI,KAAK,QAAQ,CAAC,cAAc,CAAC,kBAAkB,EAAE,CAAC;SAC9D;KACF;SAAM,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,EAAE;QACnC,MAAM,cAAc,GAAG,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC;AAC9C,QAAA,IAAI,kBAAkB,CAAC,cAAc,CAAC,EAAE;AACtC,YAAA,OAAO,IAAI,CAAgC,6BAAA,EAAA,cAAc,CAAC,YAAY,EAAE,CAAC;AACzE,YAAA,IAAI,cAAc,CAAC,aAAa,EAAE;AAChC,gBAAA,OAAO,IAAI,CAAK,EAAA,EAAA,cAAc,CAAC,aAAa,EAAE,CAAC;aAChD;SACF;KACF;AACD,IAAA,OAAO,OAAO,CAAC;AACjB,CAAC;AAED;;;;;;AAMG;AACI,eAAe,qBAAqB,CAEzC,QAAkB,EAAA;AAClB,IAAA,MAAM,YAAY,GAA2B,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;IAEnE,MAAM,MAAM,GAAQ,EAAE,CAAC;IACvB,IAAI,cAAc,GAAuB,SAAS,CAAC;;AAGnD,IAAA,IAAI,CAAC,YAAY,CAAC,WAAW,IAAI,YAAY,CAAC,WAAW,EAAE,MAAM,KAAK,CAAC,EAAE;QACvE,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,wKAAwK,CACzK,CAAC;KACH;AAED,IAAA,KAAK,MAAM,UAAU,IAAI,YAAY,CAAC,WAAW,EAAE;AACjD,QAAA,IAAI,UAAU,CAAC,iBAAiB,EAAE;AAChC,YAAA,cAAc,GAAG,UAAU,CAAC,iBAAiB,CAAC;SAC/C;aAAM,IAAI,UAAU,CAAC,QAAQ,IAAI,UAAU,CAAC,kBAAkB,EAAE;YAC/D,MAAM,CAAC,IAAI,CAAC;gBACV,QAAQ,EAAE,UAAU,CAAC,QAAQ;gBAC7B,kBAAkB,EAAE,UAAU,CAAC,kBAAkB;AAC7C,aAAA,CAAC,CAAC;SACT;aAAM,IAAI,UAAU,CAAC,QAAQ,IAAI,UAAU,CAAC,MAAM,EAAE;YACnD,MAAM,CAAC,IAAI,CAAC;gBACV,QAAQ,EAAE,UAAU,CAAC,QAAQ;gBAC7B,MAAM,EAAE,UAAU,CAAC,MAAM;AACrB,aAAA,CAAC,CAAC;SACT;AAAM,aAAA,IAAI,UAAU,CAAC,gBAAgB,EAAE,CAEvC;aAAM;AACL,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,wDAAA,EAA2D,IAAI,CAAC,SAAS,CACvE,UAAU,CACX,CAAA,CAAA,CAAG,CACL,CAAC;SACH;KACF;AAED,IAAA,OAAO,EAAE,MAAM,EAAE,cAAc,EAAE,CAAC;AACpC;;ACzTA;;;;;;;;;;;;;;;AAeG;AAsBH;;;;;;;;;;AAUG;AAEH;;;;;;;;;AASG;AACG,SAAU,yBAAyB,CACvC,sBAA8C,EAAA;AAE9C,IAAA,sBAAsB,CAAC,cAAc,EAAE,OAAO,CAAC,aAAa,IAAG;AAC7D,QAAA,IAAI,aAAa,CAAC,MAAM,EAAE;YACxB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,qGAAqG,CACtG,CAAC;SACH;AACH,KAAC,CAAC,CAAC;AAEH,IAAA,IAAI,sBAAsB,CAAC,gBAAgB,EAAE,IAAI,EAAE;AACjD,QAAA,MAAM,WAAW,GAAG,IAAI,CAAC,KAAK,CAC5B,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,CAC7C,CAAC;QAEF,IAAI,WAAW,KAAK,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,EAAE;AAChE,YAAA,MAAM,CAAC,IAAI,CACT,gIAAgI,CACjI,CAAC;AACF,YAAA,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,GAAG,WAAW,CAAC;SAC5D;KACF;AAED,IAAA,OAAO,sBAAsB,CAAC;AAChC,CAAC;AAED;;;;;;;;AAQG;AACG,SAAU,0BAA0B,CACxC,gBAAiD,EAAA;AAEjD,IAAA,MAAM,uBAAuB,GAAG;QAC9B,UAAU,EAAE,gBAAgB,CAAC,UAAU;AACrC,cAAE,4BAA4B,CAAC,gBAAgB,CAAC,UAAU,CAAC;AAC3D,cAAE,SAAS;QACb,MAAM,EAAE,gBAAgB,CAAC,cAAc;AACrC,cAAE,iBAAiB,CAAC,gBAAgB,CAAC,cAAc,CAAC;AACpD,cAAE,SAAS;QACb,aAAa,EAAE,gBAAgB,CAAC,aAAa;KAC9C,CAAC;AAEF,IAAA,OAAO,uBAAuB,CAAC;AACjC,CAAC;AAED;;;;;;;;AAQG;AACa,SAAA,qBAAqB,CACnC,kBAAsC,EACtC,KAAa,EAAA;AAEb,IAAA,MAAM,wBAAwB,GAA+B;AAC3D,QAAA,sBAAsB,EAAE;YACtB,KAAK;AACL,YAAA,GAAG,kBAAkB;AACtB,SAAA;KACF,CAAC;AAEF,IAAA,OAAO,wBAAwB,CAAC;AAClC,CAAC;AAED;;;;;;;;;;AAUG;AACG,SAAU,4BAA4B,CAC1C,UAA8C,EAAA;IAE9C,MAAM,gBAAgB,GAA+B,EAAE,CAAC;AACxD,IAAA,IAAI,mBAAmC,CAAC;IACxC,IAAI,gBAAgB,EAAE;AACpB,QAAA,UAAU,CAAC,OAAO,CAAC,SAAS,IAAG;;AAE7B,YAAA,IAAI,gBAA8C,CAAC;AACnD,YAAA,IAAI,SAAS,CAAC,gBAAgB,EAAE;AAC9B,gBAAA,gBAAgB,GAAG;AACjB,oBAAA,SAAS,EAAE,SAAS,CAAC,gBAAgB,CAAC,eAAe;iBACtD,CAAC;aACH;;AAGD,YAAA,IAAI,SAAS,CAAC,aAAa,EAAE;gBAC3B,mBAAmB,GAAG,SAAS,CAAC,aAAa,CAAC,GAAG,CAAC,YAAY,IAAG;oBAC/D,OAAO;AACL,wBAAA,GAAG,YAAY;AACf,wBAAA,QAAQ,EACN,YAAY,CAAC,QAAQ,IAAI,YAAY,CAAC,yBAAyB;AACjE,wBAAA,gBAAgB,EAAE,YAAY,CAAC,gBAAgB,IAAI,CAAC;AACpD,wBAAA,aAAa,EAAE,YAAY,CAAC,aAAa,IAAI,CAAC;qBAC/C,CAAC;AACJ,iBAAC,CAAC,CAAC;aACJ;;;;AAKD,YAAA,IACE,SAAS,CAAC,OAAO,EAAE,KAAK,EAAE,IAAI,CAC5B,IAAI,IAAK,IAAuB,EAAE,aAAa,CAChD,EACD;gBACA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,+FAA+F,CAChG,CAAC;aACH;AAED,YAAA,MAAM,eAAe,GAAG;gBACtB,KAAK,EAAE,SAAS,CAAC,KAAK;gBACtB,OAAO,EAAE,SAAS,CAAC,OAAO;gBAC1B,YAAY,EAAE,SAAS,CAAC,YAAY;gBACpC,aAAa,EAAE,SAAS,CAAC,aAAa;AACtC,gBAAA,aAAa,EAAE,mBAAmB;gBAClC,gBAAgB;gBAChB,iBAAiB,EAAE,SAAS,CAAC,iBAAiB;gBAC9C,kBAAkB,EAAE,SAAS,CAAC,kBAAkB;aACjD,CAAC;AACF,YAAA,gBAAgB,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;AACzC,SAAC,CAAC,CAAC;KACJ;AAED,IAAA,OAAO,gBAAgB,CAAC;AAC1B,CAAC;AAEK,SAAU,iBAAiB,CAC/B,cAA8B,EAAA;;IAG9B,MAAM,mBAAmB,GAAmB,EAAE,CAAC;AAC/C,IAAA,cAAc,CAAC,aAAa,CAAC,OAAO,CAAC,YAAY,IAAG;QAClD,mBAAmB,CAAC,IAAI,CAAC;YACvB,QAAQ,EAAE,YAAY,CAAC,QAAQ;YAC/B,WAAW,EAAE,YAAY,CAAC,WAAW;AACrC,YAAA,QAAQ,EAAE,YAAY,CAAC,QAAQ,IAAI,YAAY,CAAC,yBAAyB;AACzE,YAAA,gBAAgB,EAAE,YAAY,CAAC,gBAAgB,IAAI,CAAC;AACpD,YAAA,aAAa,EAAE,YAAY,CAAC,aAAa,IAAI,CAAC;YAC9C,OAAO,EAAE,YAAY,CAAC,OAAO;AAC9B,SAAA,CAAC,CAAC;AACL,KAAC,CAAC,CAAC;AAEH,IAAA,MAAM,oBAAoB,GAAmB;QAC3C,WAAW,EAAE,cAAc,CAAC,WAAW;AACvC,QAAA,aAAa,EAAE,mBAAmB;QAClC,kBAAkB,EAAE,cAAc,CAAC,kBAAkB;KACtD,CAAC;AACF,IAAA,OAAO,oBAAoB,CAAC;AAC9B;;ACnOA;;;;;;;;;;;;;;;AAeG;AAqBH,MAAM,cAAc,GAAG,oCAAoC,CAAC;AAE5D;;;;;;;AAOG;SACa,aAAa,CAC3B,QAAkB,EAClB,WAAwB,EACxB,eAAiC,EAAA;IAEjC,MAAM,WAAW,GAAG,QAAQ,CAAC,IAAK,CAAC,WAAW,CAC5C,IAAI,iBAAiB,CAAC,MAAM,EAAE,EAAE,KAAK,EAAE,IAAI,EAAE,CAAC,CAC/C,CAAC;AACF,IAAA,MAAM,cAAc,GAClB,iBAAiB,CAA0B,WAAW,CAAC,CAAC;IAC1D,MAAM,CAAC,OAAO,EAAE,OAAO,CAAC,GAAG,cAAc,CAAC,GAAG,EAAE,CAAC;IAChD,OAAO;QACL,MAAM,EAAE,wBAAwB,CAAC,OAAO,EAAE,WAAW,EAAE,eAAe,CAAC;QACvE,QAAQ,EAAE,kBAAkB,CAAC,OAAO,EAAE,WAAW,EAAE,eAAe,CAAC;KACpE,CAAC;AACJ,CAAC;AAED,eAAe,kBAAkB,CAC/B,MAA+C,EAC/C,WAAwB,EACxB,eAAiC,EAAA;IAEjC,MAAM,YAAY,GAA8B,EAAE,CAAC;AACnD,IAAA,MAAM,MAAM,GAAG,MAAM,CAAC,SAAS,EAAE,CAAC;IAClC,OAAO,IAAI,EAAE;QACX,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QAC5C,IAAI,IAAI,EAAE;AACR,YAAA,IAAI,uBAAuB,GAAG,kBAAkB,CAAC,YAAY,CAAC,CAAC;YAC/D,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,gBAAA,uBAAuB,GAAGC,0BAAyC,CACjE,uBAA0D,CAC3D,CAAC;aACH;AACD,YAAA,OAAO,6BAA6B,CAClC,uBAAuB,EACvB,eAAe,CAChB,CAAC;SACH;AAED,QAAA,YAAY,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;KAC1B;AACH,CAAC;AAED,gBAAgB,wBAAwB,CACtC,MAA+C,EAC/C,WAAwB,EACxB,eAAiC,EAAA;AAEjC,IAAA,MAAM,MAAM,GAAG,MAAM,CAAC,SAAS,EAAE,CAAC;IAClC,OAAO,IAAI,EAAE;QACX,MAAM,EAAE,KAAK,EAAE,IAAI,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QAC5C,IAAI,IAAI,EAAE;YACR,MAAM;SACP;AAED,QAAA,IAAI,gBAAiD,CAAC;QACtD,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,YAAA,gBAAgB,GAAG,6BAA6B,CAC9CA,0BAAyC,CACvC,KAAwC,CACzC,EACD,eAAe,CAChB,CAAC;SACH;aAAM;AACL,YAAA,gBAAgB,GAAG,6BAA6B,CAAC,KAAK,EAAE,eAAe,CAAC,CAAC;SAC1E;QAED,MAAM,cAAc,GAAG,gBAAgB,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC;;AAExD,QAAA,IACE,CAAC,cAAc,EAAE,OAAO,EAAE,KAAK;YAC/B,CAAC,cAAc,EAAE,YAAY;YAC7B,CAAC,cAAc,EAAE,gBAAgB;AACjC,YAAA,CAAC,cAAc,EAAE,kBAAkB,EACnC;YACA,SAAS;SACV;AAED,QAAA,MAAM,gBAAgB,CAAC;KACxB;AACH,CAAC;AAED;;;;AAIG;AACG,SAAU,iBAAiB,CAC/B,WAAmC,EAAA;AAEnC,IAAA,MAAM,MAAM,GAAG,WAAW,CAAC,SAAS,EAAE,CAAC;AACvC,IAAA,MAAM,MAAM,GAAG,IAAI,cAAc,CAAI;AACnC,QAAA,KAAK,CAAC,UAAU,EAAA;YACd,IAAI,WAAW,GAAG,EAAE,CAAC;YACrB,OAAO,IAAI,EAAE,CAAC;AACd,YAAA,SAAS,IAAI,GAAA;AACX,gBAAA,OAAO,MAAM,CAAC,IAAI,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,EAAE,IAAI,EAAE,KAAI;oBAC5C,IAAI,IAAI,EAAE;AACR,wBAAA,IAAI,WAAW,CAAC,IAAI,EAAE,EAAE;AACtB,4BAAA,UAAU,CAAC,KAAK,CACd,IAAI,OAAO,CAAC,WAAW,CAAC,YAAY,EAAE,wBAAwB,CAAC,CAChE,CAAC;4BACF,OAAO;yBACR;wBACD,UAAU,CAAC,KAAK,EAAE,CAAC;wBACnB,OAAO;qBACR;oBAED,WAAW,IAAI,KAAK,CAAC;oBACrB,IAAI,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC;AAC9C,oBAAA,IAAI,cAAiB,CAAC;oBACtB,OAAO,KAAK,EAAE;AACZ,wBAAA,IAAI;4BACF,cAAc,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;yBACvC;wBAAC,OAAO,CAAC,EAAE;AACV,4BAAA,UAAU,CAAC,KAAK,CACd,IAAI,OAAO,CACT,WAAW,CAAC,YAAY,EACxB,CAAA,8BAAA,EAAiC,KAAK,CAAC,CAAC,CAAC,CAAE,CAAA,CAC5C,CACF,CAAC;4BACF,OAAO;yBACR;AACD,wBAAA,UAAU,CAAC,OAAO,CAAC,cAAc,CAAC,CAAC;AACnC,wBAAA,WAAW,GAAG,WAAW,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;AACrD,wBAAA,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC;qBAC3C;oBACD,OAAO,IAAI,EAAE,CAAC;AAChB,iBAAC,CAAC,CAAC;aACJ;SACF;AACF,KAAA,CAAC,CAAC;AACH,IAAA,OAAO,MAAM,CAAC;AAChB,CAAC;AAED;;;AAGG;AACG,SAAU,kBAAkB,CAChC,SAAoC,EAAA;IAEpC,MAAM,YAAY,GAAG,SAAS,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;AACrD,IAAA,MAAM,kBAAkB,GAA4B;QAClD,cAAc,EAAE,YAAY,EAAE,cAAc;KAC7C,CAAC;AACF,IAAA,KAAK,MAAM,QAAQ,IAAI,SAAS,EAAE;AAChC,QAAA,IAAI,QAAQ,CAAC,UAAU,EAAE;AACvB,YAAA,KAAK,MAAM,SAAS,IAAI,QAAQ,CAAC,UAAU,EAAE;;;AAG3C,gBAAA,MAAM,CAAC,GAAG,SAAS,CAAC,KAAK,IAAI,CAAC,CAAC;AAC/B,gBAAA,IAAI,CAAC,kBAAkB,CAAC,UAAU,EAAE;AAClC,oBAAA,kBAAkB,CAAC,UAAU,GAAG,EAAE,CAAC;iBACpC;gBACD,IAAI,CAAC,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE;AACrC,oBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,GAAG;wBACjC,KAAK,EAAE,SAAS,CAAC,KAAK;qBACK,CAAC;iBAC/B;;AAED,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,gBAAgB;oBAC/C,SAAS,CAAC,gBAAgB,CAAC;gBAC7B,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,YAAY,GAAG,SAAS,CAAC,YAAY,CAAC;AACvE,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,aAAa;oBAC5C,SAAS,CAAC,aAAa,CAAC;AAC1B,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,aAAa;oBAC5C,SAAS,CAAC,aAAa,CAAC;AAC1B,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,iBAAiB;oBAChD,SAAS,CAAC,iBAAiB,CAAC;;;;;AAM9B,gBAAA,MAAM,kBAAkB,GAAG,SAAS,CAAC,kBAA6B,CAAC;gBACnE,IACE,OAAO,kBAAkB,KAAK,QAAQ;AACtC,oBAAA,kBAAkB,KAAK,IAAI;oBAC3B,MAAM,CAAC,IAAI,CAAC,kBAAkB,CAAC,CAAC,MAAM,GAAG,CAAC,EAC1C;AACA,oBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,kBAAkB;AACjD,wBAAA,kBAAwC,CAAC;iBAC5C;AAED;;;AAGG;AACH,gBAAA,IAAI,SAAS,CAAC,OAAO,EAAE;;AAErB,oBAAA,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,KAAK,EAAE;wBAC5B,SAAS;qBACV;oBACD,IAAI,CAAC,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE;AAC7C,wBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,GAAG;AACzC,4BAAA,IAAI,EAAE,SAAS,CAAC,OAAO,CAAC,IAAI,IAAI,MAAM;AACtC,4BAAA,KAAK,EAAE,EAAE;yBACV,CAAC;qBACH;oBACD,KAAK,MAAM,IAAI,IAAI,SAAS,CAAC,OAAO,CAAC,KAAK,EAAE;AAC1C,wBAAA,MAAM,OAAO,GAAS,EAAE,GAAG,IAAI,EAAE,CAAC;;;;AAIlC,wBAAA,IAAI,IAAI,CAAC,IAAI,KAAK,EAAE,EAAE;4BACpB,SAAS;yBACV;wBACD,IAAI,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AACnC,4BAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CACjD,OAAe,CAChB,CAAC;yBACH;qBACF;iBACF;aACF;SACF;KACF;AACD,IAAA,OAAO,kBAAkB,CAAC;AAC5B;;ACzQA;;;;;;;;;;;;;;;AAeG;AAYH,MAAM,qBAAqB,GAAkB;;AAE3C,IAAA,WAAW,CAAC,WAAW;;AAEvB,IAAA,WAAW,CAAC,KAAK;;AAEjB,IAAA,WAAW,CAAC,eAAe;CAC5B,CAAC;AAOF;;;;;;;;;AASG;AACI,eAAe,iBAAiB,CACrC,OAA+B,EAC/B,aAAwC,EACxC,YAAqC,EACrC,WAAoC,EAAA;IAEpC,IAAI,CAAC,aAAa,EAAE;QAClB,OAAO;YACL,QAAQ,EAAE,MAAM,WAAW,EAAE;YAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;SAC1C,CAAC;KACH;AACD,IAAA,QAAS,aAAmC,CAAC,IAAI;QAC/C,KAAK,aAAa,CAAC,cAAc;YAC/B,IAAI,MAAM,aAAa,CAAC,WAAW,CAAC,OAAO,CAAC,EAAE;gBAC5C,OAAO;oBACL,QAAQ,EAAE,MAAM,YAAY,EAAE;oBAC9B,eAAe,EAAE,eAAe,CAAC,SAAS;iBAC3C,CAAC;aACH;YACD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,4EAA4E,CAC7E,CAAC;QACJ,KAAK,aAAa,CAAC,aAAa;YAC9B,OAAO;gBACL,QAAQ,EAAE,MAAM,WAAW,EAAE;gBAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;aAC1C,CAAC;QACJ,KAAK,aAAa,CAAC,eAAe;AAChC,YAAA,IAAI;gBACF,OAAO;oBACL,QAAQ,EAAE,MAAM,WAAW,EAAE;oBAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;iBAC1C,CAAC;aACH;YAAC,OAAO,CAAC,EAAE;AACV,gBAAA,IAAI,CAAC,YAAY,OAAO,IAAI,qBAAqB,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE;oBAClE,OAAO;wBACL,QAAQ,EAAE,MAAM,YAAY,EAAE;wBAC9B,eAAe,EAAE,eAAe,CAAC,SAAS;qBAC3C,CAAC;iBACH;AACD,gBAAA,MAAM,CAAC,CAAC;aACT;QACH,KAAK,aAAa,CAAC,gBAAgB;YACjC,IAAI,MAAM,aAAa,CAAC,WAAW,CAAC,OAAO,CAAC,EAAE;gBAC5C,OAAO;oBACL,QAAQ,EAAE,MAAM,YAAY,EAAE;oBAC9B,eAAe,EAAE,eAAe,CAAC,SAAS;iBAC3C,CAAC;aACH;YACD,OAAO;gBACL,QAAQ,EAAE,MAAM,WAAW,EAAE;gBAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;aAC1C,CAAC;AACJ,QAAA;AACE,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,6BAAA,EACG,aAAmC,CAAC,IACvC,CAAA,CAAE,CACH,CAAC;KACL;AACH;;AClHA;;;;;;;;;;;;;;;AAeG;AAkBH,eAAe,4BAA4B,CACzC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,cAA+B,EAAA;IAE/B,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,QAAA,MAAM,GAAGC,yBAAwC,CAAC,MAAM,CAAC,CAAC;KAC3D;IACD,OAAO,WAAW,CAChB,KAAK,EACL,IAAI,CAAC,uBAAuB,EAC5B,WAAW;AACX,iBAAa,IAAI,EACjB,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,EACtB,cAAc,CACf,CAAC;AACJ,CAAC;AAEM,eAAe,qBAAqB,CACzC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,aAA6B,EAC7B,cAA+B,EAAA;AAE/B,IAAA,MAAM,UAAU,GAAG,MAAM,iBAAiB,CACxC,MAAM,EACN,aAAa,EACb,MAAM,aAAc,CAAC,qBAAqB,CAAC,MAAM,CAAC,EAClD,MACE,4BAA4B,CAAC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,cAAc,CAAC,CAC3E,CAAC;IACF,OAAO,aAAa,CAAC,UAAU,CAAC,QAAQ,EAAE,WAAW,CAAC,CAAC;AACzD,CAAC;AAED,eAAe,sBAAsB,CACnC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,cAA+B,EAAA;IAE/B,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,QAAA,MAAM,GAAGA,yBAAwC,CAAC,MAAM,CAAC,CAAC;KAC3D;IACD,OAAO,WAAW,CAChB,KAAK,EACL,IAAI,CAAC,gBAAgB,EACrB,WAAW;AACX,iBAAa,KAAK,EAClB,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,EACtB,cAAc,CACf,CAAC;AACJ,CAAC;AAEM,eAAe,eAAe,CACnC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,aAA6B,EAC7B,cAA+B,EAAA;AAE/B,IAAA,MAAM,UAAU,GAAG,MAAM,iBAAiB,CACxC,MAAM,EACN,aAAa,EACb,MAAM,aAAc,CAAC,eAAe,CAAC,MAAM,CAAC,EAC5C,MAAM,sBAAsB,CAAC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,cAAc,CAAC,CACzE,CAAC;IACF,MAAM,uBAAuB,GAAG,MAAM,8BAA8B,CAClE,UAAU,CAAC,QAAQ,EACnB,WAAW,CACZ,CAAC;IACF,MAAM,gBAAgB,GAAG,6BAA6B,CACpD,uBAAuB,EACvB,UAAU,CAAC,eAAe,CAC3B,CAAC;IACF,OAAO;AACL,QAAA,QAAQ,EAAE,gBAAgB;KAC3B,CAAC;AACJ,CAAC;AAED,eAAe,8BAA8B,CAC3C,QAAkB,EAClB,WAAwB,EAAA;AAExB,IAAA,MAAM,YAAY,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;IAC3C,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,QAAA,OAAOD,0BAAyC,CAAC,YAAY,CAAC,CAAC;KAChE;SAAM;AACL,QAAA,OAAO,YAAY,CAAC;KACrB;AACH;;AC5HA;;;;;;;;;;;;;;;AAeG;AAMG,SAAU,uBAAuB,CACrC,KAA+B,EAAA;;AAG/B,IAAA,IAAI,KAAK,IAAI,IAAI,EAAE;AACjB,QAAA,OAAO,SAAS,CAAC;KAClB;AAAM,SAAA,IAAI,OAAO,KAAK,KAAK,QAAQ,EAAE;AACpC,QAAA,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,CAAC,EAAa,CAAC;KAChE;AAAM,SAAA,IAAK,KAAc,CAAC,IAAI,EAAE;QAC/B,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC,KAAa,CAAC,EAAE,CAAC;KACnD;AAAM,SAAA,IAAK,KAAiB,CAAC,KAAK,EAAE;AACnC,QAAA,IAAI,CAAE,KAAiB,CAAC,IAAI,EAAE;YAC5B,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAG,KAAiB,CAAC,KAAK,EAAE,CAAC;SAC5D;aAAM;AACL,YAAA,OAAO,KAAgB,CAAC;SACzB;KACF;AACH,CAAC;AAEK,SAAU,gBAAgB,CAC9B,OAAsC,EAAA;IAEtC,IAAI,QAAQ,GAAW,EAAE,CAAC;AAC1B,IAAA,IAAI,OAAO,OAAO,KAAK,QAAQ,EAAE;QAC/B,QAAQ,GAAG,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC;KAChC;SAAM;AACL,QAAA,KAAK,MAAM,YAAY,IAAI,OAAO,EAAE;AAClC,YAAA,IAAI,OAAO,YAAY,KAAK,QAAQ,EAAE;gBACpC,QAAQ,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;aACvC;iBAAM;AACL,gBAAA,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;aAC7B;SACF;KACF;AACD,IAAA,OAAO,8CAA8C,CAAC,QAAQ,CAAC,CAAC;AAClE,CAAC;AAED;;;;;;;AAOG;AACH,SAAS,8CAA8C,CACrD,KAAa,EAAA;IAEb,MAAM,WAAW,GAAY,EAAE,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,EAAE,EAAE,CAAC;IACzD,MAAM,eAAe,GAAY,EAAE,IAAI,EAAE,UAAU,EAAE,KAAK,EAAE,EAAE,EAAE,CAAC;IACjE,IAAI,cAAc,GAAG,KAAK,CAAC;IAC3B,IAAI,kBAAkB,GAAG,KAAK,CAAC;AAC/B,IAAA,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE;AACxB,QAAA,IAAI,kBAAkB,IAAI,IAAI,EAAE;AAC9B,YAAA,eAAe,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YACjC,kBAAkB,GAAG,IAAI,CAAC;SAC3B;aAAM;AACL,YAAA,WAAW,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YAC7B,cAAc,GAAG,IAAI,CAAC;SACvB;KACF;AAED,IAAA,IAAI,cAAc,IAAI,kBAAkB,EAAE;QACxC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,4HAA4H,CAC7H,CAAC;KACH;AAED,IAAA,IAAI,CAAC,cAAc,IAAI,CAAC,kBAAkB,EAAE;QAC1C,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,kDAAkD,CACnD,CAAC;KACH;IAED,IAAI,cAAc,EAAE;AAClB,QAAA,OAAO,WAAW,CAAC;KACpB;AAED,IAAA,OAAO,eAAe,CAAC;AACzB,CAAC;AAEK,SAAU,0BAA0B,CACxC,MAA8D,EAAA;AAE9D,IAAA,IAAI,gBAAwC,CAAC;AAC7C,IAAA,IAAK,MAAiC,CAAC,QAAQ,EAAE;QAC/C,gBAAgB,GAAG,MAAgC,CAAC;KACrD;SAAM;;AAEL,QAAA,MAAM,OAAO,GAAG,gBAAgB,CAAC,MAAuC,CAAC,CAAC;QAC1E,gBAAgB,GAAG,EAAE,QAAQ,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC;KAC5C;AACD,IAAA,IAAK,MAAiC,CAAC,iBAAiB,EAAE;QACxD,gBAAgB,CAAC,iBAAiB,GAAG,uBAAuB,CACzD,MAAiC,CAAC,iBAAiB,CACrD,CAAC;KACH;AACD,IAAA,OAAO,gBAAgB,CAAC;AAC1B,CAAC;AAED;;;;;AAKG;AACG,SAAU,wBAAwB,CACtC,MAAc,EACd,EACE,MAAM,EACN,WAAW,EACX,YAAY,EACZ,cAAc,GAAG,CAAC,EAClB,cAAc,EACd,WAAW,EACX,iBAAiB,EACjB,iBAAiB,EACM,EAAA;;AAGzB,IAAA,MAAM,IAAI,GAAuB;AAC/B,QAAA,SAAS,EAAE;AACT,YAAA;gBACE,MAAM;AACP,aAAA;AACF,SAAA;AACD,QAAA,UAAU,EAAE;AACV,YAAA,UAAU,EAAE,MAAM;YAClB,cAAc;AACd,YAAA,WAAW,EAAE,cAAc;YAC3B,WAAW;AACX,YAAA,aAAa,EAAE,WAAW;YAC1B,YAAY;YACZ,iBAAiB;AACjB,YAAA,gBAAgB,EAAE,iBAAiB;AACnC,YAAA,gBAAgB,EAAE,IAAI;AACtB,YAAA,uBAAuB,EAAE,IAAI;AAC9B,SAAA;KACF,CAAC;AACF,IAAA,OAAO,IAAI,CAAC;AACd;;ACnKA;;;;;;;;;;;;;;;AAeG;AAKH;AAEA,MAAM,iBAAiB,GAAsB;IAC3C,MAAM;IACN,YAAY;IACZ,cAAc;IACd,kBAAkB;IAClB,SAAS;IACT,kBAAkB;CACnB,CAAC;AAEF,MAAM,oBAAoB,GAAyC;AACjE,IAAA,IAAI,EAAE,CAAC,MAAM,EAAE,YAAY,CAAC;IAC5B,QAAQ,EAAE,CAAC,kBAAkB,CAAC;IAC9B,KAAK,EAAE,CAAC,MAAM,EAAE,cAAc,EAAE,SAAS,EAAE,kBAAkB,CAAC;;IAE9D,MAAM,EAAE,CAAC,MAAM,CAAC;CACjB,CAAC;AAEF,MAAM,4BAA4B,GAA8B;IAC9D,IAAI,EAAE,CAAC,OAAO,CAAC;IACf,QAAQ,EAAE,CAAC,OAAO,CAAC;AACnB,IAAA,KAAK,EAAE,CAAC,MAAM,EAAE,UAAU,CAAC;;AAE3B,IAAA,MAAM,EAAE,EAAE;CACX,CAAC;AAEI,SAAU,mBAAmB,CAAC,OAAkB,EAAA;IACpD,IAAI,WAAW,GAAmB,IAAI,CAAC;AACvC,IAAA,KAAK,MAAM,WAAW,IAAI,OAAO,EAAE;AACjC,QAAA,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,WAAW,CAAC;AACpC,QAAA,IAAI,CAAC,WAAW,IAAI,IAAI,KAAK,MAAM,EAAE;YACnC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAAiD,8CAAA,EAAA,IAAI,CAAE,CAAA,CACxD,CAAC;SACH;QACD,IAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,IAAI,CAAC,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA4C,yCAAA,EAAA,IAAI,CAAyB,sBAAA,EAAA,IAAI,CAAC,SAAS,CACrF,cAAc,CACf,CAAA,CAAE,CACJ,CAAC;SACH;QAED,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE;YACzB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA6D,2DAAA,CAAA,CAC9D,CAAC;SACH;AAED,QAAA,IAAI,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE;YACtB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA4C,0CAAA,CAAA,CAC7C,CAAC;SACH;AAED,QAAA,MAAM,WAAW,GAA+B;AAC9C,YAAA,IAAI,EAAE,CAAC;AACP,YAAA,UAAU,EAAE,CAAC;AACb,YAAA,YAAY,EAAE,CAAC;AACf,YAAA,gBAAgB,EAAE,CAAC;AACnB,YAAA,OAAO,EAAE,CAAC;AACV,YAAA,gBAAgB,EAAE,CAAC;AACnB,YAAA,cAAc,EAAE,CAAC;AACjB,YAAA,mBAAmB,EAAE,CAAC;SACvB,CAAC;AAEF,QAAA,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE;AACxB,YAAA,KAAK,MAAM,GAAG,IAAI,iBAAiB,EAAE;AACnC,gBAAA,IAAI,GAAG,IAAI,IAAI,EAAE;AACf,oBAAA,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;iBACvB;aACF;SACF;AACD,QAAA,MAAM,UAAU,GAAG,oBAAoB,CAAC,IAAI,CAAC,CAAC;AAC9C,QAAA,KAAK,MAAM,GAAG,IAAI,iBAAiB,EAAE;AACnC,YAAA,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE;AACrD,gBAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAAA,mBAAA,EAAsB,IAAI,CAAA,iBAAA,EAAoB,GAAG,CAAA,MAAA,CAAQ,CAC1D,CAAC;aACH;SACF;QAED,IAAI,WAAW,EAAE;AACf,YAAA,MAAM,yBAAyB,GAAG,4BAA4B,CAAC,IAAI,CAAC,CAAC;YACrE,IAAI,CAAC,yBAAyB,CAAC,QAAQ,CAAC,WAAW,CAAC,IAAI,CAAC,EAAE;gBACzD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAAsB,mBAAA,EAAA,IAAI,CACxB,gBAAA,EAAA,WAAW,CAAC,IACd,CAAA,yBAAA,EAA4B,IAAI,CAAC,SAAS,CACxC,4BAA4B,CAC7B,CAAE,CAAA,CACJ,CAAC;aACH;SACF;QACD,WAAW,GAAG,WAAW,CAAC;KAC3B;AACH;;AC3HA;;;;;;;;;;;;;;;AAeG;AAmBH;;AAEG;AACH,MAAM,YAAY,GAAG,cAAc,CAAC;AAEpC;;;;;AAKG;MACU,WAAW,CAAA;IAKtB,WACE,CAAA,WAAwB,EACjB,KAAa,EACZ,aAA6B,EAC9B,MAAwB,EACxB,cAA+B,EAAA;QAH/B,IAAK,CAAA,KAAA,GAAL,KAAK,CAAQ;QACZ,IAAa,CAAA,aAAA,GAAb,aAAa,CAAgB;QAC9B,IAAM,CAAA,MAAA,GAAN,MAAM,CAAkB;QACxB,IAAc,CAAA,cAAA,GAAd,cAAc,CAAiB;QARhC,IAAQ,CAAA,QAAA,GAAc,EAAE,CAAC;AACzB,QAAA,IAAA,CAAA,YAAY,GAAkB,OAAO,CAAC,OAAO,EAAE,CAAC;AAStD,QAAA,IAAI,CAAC,YAAY,GAAG,WAAW,CAAC;AAChC,QAAA,IAAI,MAAM,EAAE,OAAO,EAAE;AACnB,YAAA,mBAAmB,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;AACpC,YAAA,IAAI,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,CAAC;SAChC;KACF;AAED;;;;AAIG;AACH,IAAA,MAAM,UAAU,GAAA;QACd,MAAM,IAAI,CAAC,YAAY,CAAC;QACxB,OAAO,IAAI,CAAC,QAAQ,CAAC;KACtB;AAED;;;AAGG;IACH,MAAM,WAAW,CACf,OAAsC,EAAA;QAEtC,MAAM,IAAI,CAAC,YAAY,CAAC;AACxB,QAAA,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;AAC7C,QAAA,MAAM,sBAAsB,GAA2B;AACrD,YAAA,cAAc,EAAE,IAAI,CAAC,MAAM,EAAE,cAAc;AAC3C,YAAA,gBAAgB,EAAE,IAAI,CAAC,MAAM,EAAE,gBAAgB;AAC/C,YAAA,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,KAAK;AACzB,YAAA,UAAU,EAAE,IAAI,CAAC,MAAM,EAAE,UAAU;AACnC,YAAA,iBAAiB,EAAE,IAAI,CAAC,MAAM,EAAE,iBAAiB;YACjD,QAAQ,EAAE,CAAC,GAAG,IAAI,CAAC,QAAQ,EAAE,UAAU,CAAC;SACzC,CAAC;QACF,IAAI,WAAW,GAAG,EAA2B,CAAC;;AAE9C,QAAA,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY;aAClC,IAAI,CAAC,MACJ,eAAe,CACb,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,sBAAsB,EACtB,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CACF;aACA,IAAI,CAAC,MAAM,IAAG;AACb,YAAA,IACE,MAAM,CAAC,QAAQ,CAAC,UAAU;gBAC1B,MAAM,CAAC,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EACrC;AACA,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;AAC/B,gBAAA,MAAM,eAAe,GAAY;AAC/B,oBAAA,KAAK,EAAE,MAAM,CAAC,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,IAAI,EAAE;;AAE1D,oBAAA,IAAI,EAAE,MAAM,CAAC,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,IAAI,OAAO;iBAC9D,CAAC;AACF,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;aACrC;iBAAM;gBACL,MAAM,iBAAiB,GAAG,uBAAuB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC;gBACnE,IAAI,iBAAiB,EAAE;AACrB,oBAAA,MAAM,CAAC,IAAI,CACT,mCAAmC,iBAAiB,CAAA,sCAAA,CAAwC,CAC7F,CAAC;iBACH;aACF;YACD,WAAW,GAAG,MAAM,CAAC;AACvB,SAAC,CAAC,CAAC;QACL,MAAM,IAAI,CAAC,YAAY,CAAC;AACxB,QAAA,OAAO,WAAW,CAAC;KACpB;AAED;;;;AAIG;IACH,MAAM,iBAAiB,CACrB,OAAsC,EAAA;QAEtC,MAAM,IAAI,CAAC,YAAY,CAAC;AACxB,QAAA,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;AAC7C,QAAA,MAAM,sBAAsB,GAA2B;AACrD,YAAA,cAAc,EAAE,IAAI,CAAC,MAAM,EAAE,cAAc;AAC3C,YAAA,gBAAgB,EAAE,IAAI,CAAC,MAAM,EAAE,gBAAgB;AAC/C,YAAA,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,KAAK;AACzB,YAAA,UAAU,EAAE,IAAI,CAAC,MAAM,EAAE,UAAU;AACnC,YAAA,iBAAiB,EAAE,IAAI,CAAC,MAAM,EAAE,iBAAiB;YACjD,QAAQ,EAAE,CAAC,GAAG,IAAI,CAAC,QAAQ,EAAE,UAAU,CAAC;SACzC,CAAC;QACF,MAAM,aAAa,GAAG,qBAAqB,CACzC,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,sBAAsB,EACtB,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CAAC;;AAGF,QAAA,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY;AAClC,aAAA,IAAI,CAAC,MAAM,aAAa,CAAC;;;aAGzB,KAAK,CAAC,QAAQ,IAAG;AAChB,YAAA,MAAM,IAAI,KAAK,CAAC,YAAY,CAAC,CAAC;AAChC,SAAC,CAAC;aACD,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,QAAQ,CAAC;aAC3C,IAAI,CAAC,QAAQ,IAAG;AACf,YAAA,IAAI,QAAQ,CAAC,UAAU,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACzD,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;AAC/B,gBAAA,MAAM,eAAe,GAAG,EAAE,GAAG,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,CAAC;;AAE9D,gBAAA,IAAI,CAAC,eAAe,CAAC,IAAI,EAAE;AACzB,oBAAA,eAAe,CAAC,IAAI,GAAG,OAAO,CAAC;iBAChC;AACD,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;aACrC;iBAAM;AACL,gBAAA,MAAM,iBAAiB,GAAG,uBAAuB,CAAC,QAAQ,CAAC,CAAC;gBAC5D,IAAI,iBAAiB,EAAE;AACrB,oBAAA,MAAM,CAAC,IAAI,CACT,yCAAyC,iBAAiB,CAAA,sCAAA,CAAwC,CACnG,CAAC;iBACH;aACF;AACH,SAAC,CAAC;aACD,KAAK,CAAC,CAAC,IAAG;;;;AAIT,YAAA,IAAI,CAAC,CAAC,OAAO,KAAK,YAAY,EAAE;;;AAG9B,gBAAA,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;aACjB;AACH,SAAC,CAAC,CAAC;AACL,QAAA,OAAO,aAAa,CAAC;KACtB;AACF;;AClMD;;;;;;;;;;;;;;;AAeG;AAiBI,eAAe,kBAAkB,CACtC,WAAwB,EACxB,KAAa,EACb,MAA0B,EAC1B,cAA+B,EAAA;IAE/B,IAAI,IAAI,GAAW,EAAE,CAAC;IACtB,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;QAC7D,MAAM,YAAY,GAAGE,qBAAoC,CAAC,MAAM,EAAE,KAAK,CAAC,CAAC;AACzE,QAAA,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,YAAY,CAAC,CAAC;KACrC;SAAM;AACL,QAAA,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC;KAC/B;AACD,IAAA,MAAM,QAAQ,GAAG,MAAM,WAAW,CAChC,KAAK,EACL,IAAI,CAAC,YAAY,EACjB,WAAW,EACX,KAAK,EACL,IAAI,EACJ,cAAc,CACf,CAAC;AACF,IAAA,OAAO,QAAQ,CAAC,IAAI,EAAE,CAAC;AACzB,CAAC;AAEM,eAAe,WAAW,CAC/B,WAAwB,EACxB,KAAa,EACb,MAA0B,EAC1B,aAA6B,EAC7B,cAA+B,EAAA;IAE/B,IACG,aAAmC,EAAE,IAAI,KAAK,aAAa,CAAC,cAAc,EAC3E;QACA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,sDAAsD,CACvD,CAAC;KACH;IACD,OAAO,kBAAkB,CAAC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,cAAc,CAAC,CAAC;AACxE;;ACxEA;;;;;;;;;;;;;;;AAeG;AAgCH;;;AAGG;AACG,MAAO,eAAgB,SAAQ,OAAO,CAAA;AAQ1C,IAAA,WAAA,CACE,EAAM,EACN,WAAwB,EACxB,cAA+B,EACvB,aAA6B,EAAA;AAErC,QAAA,KAAK,CAAC,EAAE,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;QAFrB,IAAa,CAAA,aAAA,GAAb,aAAa,CAAgB;QAGrC,IAAI,CAAC,gBAAgB,GAAG,WAAW,CAAC,gBAAgB,IAAI,EAAE,CAAC;QAC3D,IAAI,CAAC,cAAc,GAAG,WAAW,CAAC,cAAc,IAAI,EAAE,CAAC;AACvD,QAAA,IAAI,CAAC,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC;AAC/B,QAAA,IAAI,CAAC,UAAU,GAAG,WAAW,CAAC,UAAU,CAAC;QACzC,IAAI,CAAC,iBAAiB,GAAG,uBAAuB,CAC9C,WAAW,CAAC,iBAAiB,CAC9B,CAAC;AACF,QAAA,IAAI,CAAC,cAAc,GAAG,cAAc,IAAI,EAAE,CAAC;KAC5C;AAED;;;AAGG;IACH,MAAM,eAAe,CACnB,OAA+D,EAAA;AAE/D,QAAA,MAAM,eAAe,GAAG,0BAA0B,CAAC,OAAO,CAAC,CAAC;QAC5D,OAAO,eAAe,CACpB,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV;YACE,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,cAAc,EAAE,IAAI,CAAC,cAAc;YACnC,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU,EAAE,IAAI,CAAC,UAAU;YAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;AACzC,YAAA,GAAG,eAAe;SACnB,EACD,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CAAC;KACH;AAED;;;;;AAKG;IACH,MAAM,qBAAqB,CACzB,OAA+D,EAAA;AAE/D,QAAA,MAAM,eAAe,GAAG,0BAA0B,CAAC,OAAO,CAAC,CAAC;QAC5D,OAAO,qBAAqB,CAC1B,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV;YACE,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,cAAc,EAAE,IAAI,CAAC,cAAc;YACnC,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU,EAAE,IAAI,CAAC,UAAU;YAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;AACzC,YAAA,GAAG,eAAe;SACnB,EACD,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CAAC;KACH;AAED;;;AAGG;AACH,IAAA,SAAS,CAAC,eAAiC,EAAA;AACzC,QAAA,OAAO,IAAI,WAAW,CACpB,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,IAAI,CAAC,aAAa,EAClB;YACE,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU,EAAE,IAAI,CAAC,UAAU;YAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;YACzC,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,cAAc,EAAE,IAAI,CAAC,cAAc;AACnC;;;;AAIG;AACH,YAAA,GAAG,eAAe;AACnB,SAAA,EACD,IAAI,CAAC,cAAc,CACpB,CAAC;KACH;AAED;;AAEG;IACH,MAAM,WAAW,CACf,OAA2D,EAAA;AAE3D,QAAA,MAAM,eAAe,GAAG,0BAA0B,CAAC,OAAO,CAAC,CAAC;AAC5D,QAAA,OAAO,WAAW,CAChB,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,eAAe,EACf,IAAI,CAAC,aAAa,CACnB,CAAC;KACH;AACF;;ACtKD;;;;;;;;;;;;;;;AAeG;AAsBH;;;;;;AAMG;MACU,WAAW,CAAA;AActB;;AAEG;IACH,WACU,CAAA,gBAAkC,EAClC,cAAuC,EAAA;QADvC,IAAgB,CAAA,gBAAA,GAAhB,gBAAgB,CAAkB;QAClC,IAAc,CAAA,cAAA,GAAd,cAAc,CAAyB;AAlBjD;;;;AAIG;QACH,IAAQ,CAAA,QAAA,GAAG,KAAK,CAAC;AACjB;;;;AAIG;QACH,IAAc,CAAA,cAAA,GAAG,KAAK,CAAC;KAQnB;AAEJ;;;;;;;;AAQG;AACH,IAAA,MAAM,IAAI,CACR,OAAsC,EACtC,YAAY,GAAG,IAAI,EAAA;AAEnB,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;AAE7C,QAAA,MAAM,OAAO,GAAuB;AAClC,YAAA,aAAa,EAAE;gBACb,KAAK,EAAE,CAAC,UAAU,CAAC;gBACnB,YAAY;AACb,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;;;;;AAYG;IACH,MAAM,gBAAgB,CAAC,IAAY,EAAA;AACjC,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA6B;AACxC,YAAA,aAAa,EAAE;gBACb,IAAI;AACL,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;;;;;;;;;;AAiBG;IACH,MAAM,iBAAiB,CAAC,IAA2B,EAAA;AACjD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA6B;AACxC,YAAA,aAAa,EAAE;AACb,gBAAA,KAAK,EAAE,IAAI;AACZ,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;;;;;;;;;AAgBG;IACH,MAAM,iBAAiB,CAAC,IAA2B,EAAA;AACjD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA6B;AACxC,YAAA,aAAa,EAAE;AACb,gBAAA,KAAK,EAAE,IAAI;AACZ,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;AAOG;IACH,MAAM,qBAAqB,CACzB,iBAAqC,EAAA;AAErC,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA4B;AACvC,YAAA,YAAY,EAAE;gBACZ,iBAAiB;AAClB,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;AAQG;IACH,OAAO,OAAO,GAAA;AAGZ,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,kFAAkF,CACnF,CAAC;SACH;QACD,WAAW,MAAM,OAAO,IAAI,IAAI,CAAC,cAAc,EAAE;AAC/C,YAAA,IAAI,OAAO,IAAI,OAAO,OAAO,KAAK,QAAQ,EAAE;AAC1C,gBAAA,IAAI,gBAAgB,CAAC,cAAc,IAAI,OAAO,EAAE;oBAC9C,MAAM;AACJ,wBAAA,IAAI,EAAE,eAAe;AACrB,wBAAA,GAAI,OAA8D;6BAC/D,aAAa;qBACI,CAAC;iBACxB;AAAM,qBAAA,IAAI,gBAAgB,CAAC,SAAS,IAAI,OAAO,EAAE;oBAChD,MAAM;AACJ,wBAAA,IAAI,EAAE,UAAU;AAChB,wBAAA,GAAI,OAA0D;6BAC3D,QAAQ;qBACU,CAAC;iBACzB;AAAM,qBAAA,IAAI,gBAAgB,CAAC,sBAAsB,IAAI,OAAO,EAAE;oBAC7D,MAAM;AACJ,wBAAA,IAAI,EAAE,sBAAsB;wBAC5B,GACE,OAMD,CAAC,oBAAoB;qBACW,CAAC;iBACrC;qBAAM;AACL,oBAAA,MAAM,CAAC,IAAI,CACT,CAAA,kDAAA,EAAqD,IAAI,CAAC,SAAS,CACjE,OAAO,CACR,CAAE,CAAA,CACJ,CAAC;iBACH;aACF;iBAAM;AACL,gBAAA,MAAM,CAAC,IAAI,CACT,CAAA,6CAAA,EAAgD,IAAI,CAAC,SAAS,CAC5D,OAAO,CACR,CAAE,CAAA,CACJ,CAAC;aACH;SACF;KACF;AAED;;;;;AAKG;AACH,IAAA,MAAM,KAAK,GAAA;AACT,QAAA,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;AAClB,YAAA,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC;YACrB,MAAM,IAAI,CAAC,gBAAgB,CAAC,KAAK,CAAC,IAAI,EAAE,wBAAwB,CAAC,CAAC;SACnE;KACF;AAED;;;;;;;;;AASG;IACH,MAAM,eAAe,CAAC,WAAoC,EAAA;AACxD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;;;AAID,QAAA,WAAW,CAAC,OAAO,CAAC,UAAU,IAAG;AAC/B,YAAA,MAAM,OAAO,GAA6B;AACxC,gBAAA,aAAa,EAAE,EAAE,WAAW,EAAE,CAAC,UAAU,CAAC,EAAE;aAC7C,CAAC;AACF,YAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;AACtD,SAAC,CAAC,CAAC;KACJ;AAED;;;;;;;;;AASG;IACH,MAAM,eAAe,CACnB,gBAAuD,EAAA;AAEvD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,MAAM,GAAG,gBAAgB,CAAC,SAAS,EAAE,CAAC;QAC5C,OAAO,IAAI,EAAE;AACX,YAAA,IAAI;gBACF,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;gBAE5C,IAAI,IAAI,EAAE;oBACR,MAAM;iBACP;qBAAM,IAAI,CAAC,KAAK,EAAE;AACjB,oBAAA,MAAM,IAAI,KAAK,CAAC,kDAAkD,CAAC,CAAC;iBACrE;gBAED,MAAM,IAAI,CAAC,eAAe,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;aACrC;YAAC,OAAO,CAAC,EAAE;;AAEV,gBAAA,MAAM,OAAO,GACX,CAAC,YAAY,KAAK,GAAG,CAAC,CAAC,OAAO,GAAG,gCAAgC,CAAC;gBACpE,MAAM,IAAI,OAAO,CAAC,WAAW,CAAC,aAAa,EAAE,OAAO,CAAC,CAAC;aACvD;SACF;KACF;AACF;;ACzWD;;;;;;;;;;;;;;;AAeG;AAoBH;;;;;;;AAOG;AACG,MAAO,mBAAoB,SAAQ,OAAO,CAAA;AAM9C;;AAEG;IACH,WACE,CAAA,EAAM,EACN,WAA4B;AAC5B;;AAEG;IACK,iBAAmC,EAAA;AAE3C,QAAA,KAAK,CAAC,EAAE,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;QAFrB,IAAiB,CAAA,iBAAA,GAAjB,iBAAiB,CAAkB;QAG3C,IAAI,CAAC,gBAAgB,GAAG,WAAW,CAAC,gBAAgB,IAAI,EAAE,CAAC;AAC3D,QAAA,IAAI,CAAC,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC;AAC/B,QAAA,IAAI,CAAC,UAAU,GAAG,WAAW,CAAC,UAAU,CAAC;QACzC,IAAI,CAAC,iBAAiB,GAAG,uBAAuB,CAC9C,WAAW,CAAC,iBAAiB,CAC9B,CAAC;KACH;AAED;;;;;;;AAOG;AACH,IAAA,MAAM,OAAO,GAAA;QACX,MAAM,GAAG,GAAG,IAAI,YAAY,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAChD,MAAM,IAAI,CAAC,iBAAiB,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,EAAE,CAAC,CAAC;AAErD,QAAA,IAAI,aAAqB,CAAC;AAC1B,QAAA,IAAI,IAAI,CAAC,YAAY,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AACnE,YAAA,aAAa,GAAG,CAAA,SAAA,EAAY,IAAI,CAAC,YAAY,CAAC,OAAO,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,CAAA,CAAE,CAAC;SACvE;aAAM;AACL,YAAA,aAAa,GAAG,CAAY,SAAA,EAAA,IAAI,CAAC,YAAY,CAAC,OAAO,CAAc,WAAA,EAAA,IAAI,CAAC,YAAY,CAAC,QAAQ,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,EAAE,CAAC;SAC/G;;;AAID,QAAA,MAAM,EACJ,uBAAuB,EACvB,wBAAwB,EACxB,GAAG,gBAAgB,EACpB,GAAG,IAAI,CAAC,gBAAgB,CAAC;AAE1B,QAAA,MAAM,YAAY,GAAqB;AACrC,YAAA,KAAK,EAAE;AACL,gBAAA,KAAK,EAAE,aAAa;gBACpB,gBAAgB;gBAChB,KAAK,EAAE,IAAI,CAAC,KAAK;gBACjB,UAAU,EAAE,IAAI,CAAC,UAAU;gBAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;gBACzC,uBAAuB;gBACvB,wBAAwB;AACzB,aAAA;SACF,CAAC;AAEF,QAAA,IAAI;;YAEF,MAAM,cAAc,GAAG,IAAI,CAAC,iBAAiB,CAAC,MAAM,EAAE,CAAC;AACvD,YAAA,IAAI,CAAC,iBAAiB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,YAAY,CAAC,CAAC,CAAC;;YAG1D,MAAM,YAAY,GAAG,CAAC,MAAM,cAAc,CAAC,IAAI,EAAE,EAAE,KAAK,CAAC;AACzD,YAAA,IACE,CAAC,YAAY;AACb,gBAAA,EAAE,OAAO,YAAY,KAAK,QAAQ,CAAC;AACnC,gBAAA,EAAE,eAAe,IAAI,YAAY,CAAC,EAClC;gBACA,MAAM,IAAI,CAAC,iBAAiB,CAAC,KAAK,CAAC,IAAI,EAAE,mBAAmB,CAAC,CAAC;gBAC9D,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,8FAA8F,CAC/F,CAAC;aACH;YAED,OAAO,IAAI,WAAW,CAAC,IAAI,CAAC,iBAAiB,EAAE,cAAc,CAAC,CAAC;SAChE;QAAC,OAAO,CAAC,EAAE;;AAEV,YAAA,MAAM,IAAI,CAAC,iBAAiB,CAAC,KAAK,EAAE,CAAC;AACrC,YAAA,MAAM,CAAC,CAAC;SACT;KACF;AACF;;ACtID;;;;;;;;;;;;;;;AAeG;AAiBH;;;;;;;;;;;;;;;;;;;;;AAqBG;AACG,MAAO,WAAY,SAAQ,OAAO,CAAA;AAUtC;;;;;;;;;AASG;AACH,IAAA,WAAA,CACE,EAAM,EACN,WAA8B,EACvB,cAA+B,EAAA;QAEtC,MAAM,EAAE,KAAK,EAAE,gBAAgB,EAAE,cAAc,EAAE,GAAG,WAAW,CAAC;AAChE,QAAA,KAAK,CAAC,EAAE,EAAE,KAAK,CAAC,CAAC;QAHV,IAAc,CAAA,cAAA,GAAd,cAAc,CAAiB;AAItC,QAAA,IAAI,CAAC,gBAAgB,GAAG,gBAAgB,CAAC;AACzC,QAAA,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;KACtC;AAED;;;;;;;;;;;;;;;;;AAiBG;IACH,MAAM,cAAc,CAClB,MAAc,EAAA;AAEd,QAAA,MAAM,IAAI,GAAG,wBAAwB,CAAC,MAAM,EAAE;YAC5C,GAAG,IAAI,CAAC,gBAAgB;YACxB,GAAG,IAAI,CAAC,cAAc;AACvB,SAAA,CAAC,CAAC;AACH,QAAA,MAAM,QAAQ,GAAG,MAAM,WAAW,CAChC,IAAI,CAAC,KAAK,EACV,IAAI,CAAC,OAAO,EACZ,IAAI,CAAC,YAAY;AACjB,qBAAa,KAAK,EAClB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,EACpB,IAAI,CAAC,cAAc,CACpB,CAAC;AACF,QAAA,OAAO,qBAAqB,CAAoB,QAAQ,CAAC,CAAC;KAC3D;AAED;;;;;;;;;;;;;;;;;;AAkBG;AACH,IAAA,MAAM,iBAAiB,CACrB,MAAc,EACd,MAAc,EAAA;AAEd,QAAA,MAAM,IAAI,GAAG,wBAAwB,CAAC,MAAM,EAAE;YAC5C,MAAM;YACN,GAAG,IAAI,CAAC,gBAAgB;YACxB,GAAG,IAAI,CAAC,cAAc;AACvB,SAAA,CAAC,CAAC;AACH,QAAA,MAAM,QAAQ,GAAG,MAAM,WAAW,CAChC,IAAI,CAAC,KAAK,EACV,IAAI,CAAC,OAAO,EACZ,IAAI,CAAC,YAAY;AACjB,qBAAa,KAAK,EAClB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,EACpB,IAAI,CAAC,cAAc,CACpB,CAAC;AACF,QAAA,OAAO,qBAAqB,CAAiB,QAAQ,CAAC,CAAC;KACxD;AACF;;AC/JD;;;;;;;;;;;;;;;AAeG;AAiDH;;;;AAIG;MACU,oBAAoB,CAAA;AAG/B,IAAA,WAAA,GAAA;AACE,QAAA,IAAI,OAAO,SAAS,KAAK,WAAW,EAAE;AACpC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,0DAA0D;gBACxD,+DAA+D;AAC/D,gBAAA,6EAA6E,CAChF,CAAC;SACH;KACF;AAED,IAAA,OAAO,CAAC,GAAW,EAAA;QACjB,OAAO,IAAI,OAAO,CAAC,CAAC,OAAO,EAAE,MAAM,KAAI;YACrC,IAAI,CAAC,EAAE,GAAG,IAAI,SAAS,CAAC,GAAG,CAAC,CAAC;YAC7B,IAAI,CAAC,EAAE,CAAC,UAAU,GAAG,MAAM,CAAC;AAC5B,YAAA,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,MAAM,EAAE,MAAM,OAAO,EAAE,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC,CAAC;AAClE,YAAA,IAAI,CAAC,EAAE,CAAC,gBAAgB,CACtB,OAAO,EACP,MACE,MAAM,CACJ,IAAI,OAAO,CACT,WAAW,CAAC,WAAW,EACvB,CAAA,+BAAA,CAAiC,CAClC,CACF,EACH,EAAE,IAAI,EAAE,IAAI,EAAE,CACf,CAAC;YACF,IAAI,CAAC,EAAG,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,UAAsB,KAAI;AAC5D,gBAAA,IAAI,UAAU,CAAC,MAAM,EAAE;oBACrB,MAAM,CAAC,IAAI,CACT,CAAA,gDAAA,EAAmD,UAAU,CAAC,MAAM,CAAG,CAAA,CAAA,CACxE,CAAC;iBACH;AACH,aAAC,CAAC,CAAC;AACL,SAAC,CAAC,CAAC;KACJ;AAED,IAAA,IAAI,CAAC,IAA0B,EAAA;AAC7B,QAAA,IAAI,CAAC,IAAI,CAAC,EAAE,IAAI,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,IAAI,EAAE;YACrD,MAAM,IAAI,OAAO,CAAC,WAAW,CAAC,aAAa,EAAE,wBAAwB,CAAC,CAAC;SACxE;AACD,QAAA,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;KACpB;IAED,OAAO,MAAM,GAAA;AACX,QAAA,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE;YACZ,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,6BAA6B,CAC9B,CAAC;SACH;QAED,MAAM,YAAY,GAAc,EAAE,CAAC;QACnC,MAAM,UAAU,GAAY,EAAE,CAAC;QAC/B,IAAI,cAAc,GAAwB,IAAI,CAAC;QAC/C,IAAI,QAAQ,GAAG,KAAK,CAAC;AAErB,QAAA,MAAM,eAAe,GAAG,OAAO,KAAmB,KAAmB;AACnE,YAAA,IAAI,IAAY,CAAC;AACjB,YAAA,IAAI,KAAK,CAAC,IAAI,YAAY,IAAI,EAAE;gBAC9B,IAAI,GAAG,MAAM,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC;aAChC;AAAM,iBAAA,IAAI,OAAO,KAAK,CAAC,IAAI,KAAK,QAAQ,EAAE;AACzC,gBAAA,IAAI,GAAG,KAAK,CAAC,IAAI,CAAC;aACnB;iBAAM;AACL,gBAAA,UAAU,CAAC,IAAI,CACb,IAAI,OAAO,CACT,WAAW,CAAC,YAAY,EACxB,CAAA,kFAAA,EAAqF,OAAO,KAAK,CAAC,IAAI,CAAG,CAAA,CAAA,CAC1G,CACF,CAAC;gBACF,IAAI,cAAc,EAAE;AAClB,oBAAA,cAAc,EAAE,CAAC;oBACjB,cAAc,GAAG,IAAI,CAAC;iBACvB;gBACD,OAAO;aACR;AAED,YAAA,IAAI;gBACF,MAAM,GAAG,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAY,CAAC;AACxC,gBAAA,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;aACxB;YAAC,OAAO,CAAC,EAAE;gBACV,MAAM,GAAG,GAAG,CAAU,CAAC;AACvB,gBAAA,UAAU,CAAC,IAAI,CACb,IAAI,OAAO,CACT,WAAW,CAAC,YAAY,EACxB,4CAA4C,GAAG,CAAC,OAAO,CAAE,CAAA,CAC1D,CACF,CAAC;aACH;YAED,IAAI,cAAc,EAAE;AAClB,gBAAA,cAAc,EAAE,CAAC;gBACjB,cAAc,GAAG,IAAI,CAAC;aACvB;AACH,SAAC,CAAC;QAEF,MAAM,aAAa,GAAG,MAAW;AAC/B,YAAA,UAAU,CAAC,IAAI,CACb,IAAI,OAAO,CAAC,WAAW,CAAC,WAAW,EAAE,6BAA6B,CAAC,CACpE,CAAC;YACF,IAAI,cAAc,EAAE;AAClB,gBAAA,cAAc,EAAE,CAAC;gBACjB,cAAc,GAAG,IAAI,CAAC;aACvB;AACH,SAAC,CAAC;AAEF,QAAA,MAAM,aAAa,GAAG,CAAC,KAAiB,KAAU;AAChD,YAAA,IAAI,KAAK,CAAC,MAAM,EAAE;gBAChB,MAAM,CAAC,IAAI,CACT,CAAA,uDAAA,EAA0D,KAAK,CAAC,MAAM,CAAE,CAAA,CACzE,CAAC;aACH;YACD,QAAQ,GAAG,IAAI,CAAC;YAChB,IAAI,cAAc,EAAE;AAClB,gBAAA,cAAc,EAAE,CAAC;gBACjB,cAAc,GAAG,IAAI,CAAC;aACvB;;YAED,IAAI,CAAC,EAAE,EAAE,mBAAmB,CAAC,SAAS,EAAE,eAAe,CAAC,CAAC;YACzD,IAAI,CAAC,EAAE,EAAE,mBAAmB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;YACrD,IAAI,CAAC,EAAE,EAAE,mBAAmB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;AACvD,SAAC,CAAC;QAEF,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,SAAS,EAAE,eAAe,CAAC,CAAC;QACrD,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;QACjD,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;QAEjD,OAAO,CAAC,QAAQ,EAAE;AAChB,YAAA,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACzB,gBAAA,MAAM,KAAK,GAAG,UAAU,CAAC,KAAK,EAAG,CAAC;AAClC,gBAAA,MAAM,KAAK,CAAC;aACb;AACD,YAAA,IAAI,YAAY,CAAC,MAAM,GAAG,CAAC,EAAE;AAC3B,gBAAA,MAAM,YAAY,CAAC,KAAK,EAAG,CAAC;aAC7B;iBAAM;AACL,gBAAA,MAAM,IAAI,OAAO,CAAO,OAAO,IAAG;oBAChC,cAAc,GAAG,OAAO,CAAC;AAC3B,iBAAC,CAAC,CAAC;aACJ;SACF;;AAGD,QAAA,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACzB,YAAA,MAAM,KAAK,GAAG,UAAU,CAAC,KAAK,EAAG,CAAC;AAClC,YAAA,MAAM,KAAK,CAAC;SACb;KACF;IAED,KAAK,CAAC,IAAa,EAAE,MAAe,EAAA;AAClC,QAAA,OAAO,IAAI,OAAO,CAAC,OAAO,IAAG;AAC3B,YAAA,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE;gBACZ,OAAO,OAAO,EAAE,CAAC;aAClB;AAED,YAAA,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,OAAO,EAAE,MAAM,OAAO,EAAE,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC,CAAC;;YAEnE,IACE,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,MAAM;gBACvC,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,UAAU,EAC3C;gBACA,OAAO,OAAO,EAAE,CAAC;aAClB;YAED,IAAI,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,OAAO,EAAE;gBAC5C,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,EAAE,MAAM,CAAC,CAAC;aAC7B;AACH,SAAC,CAAC,CAAC;KACJ;AACF;;AChPD;;;;;;;;;;;;;;;AAeG;AAWH;;;;;;AAMG;MACmB,MAAM,CAAA;AAkC1B,IAAA,WAAA,CAAY,YAA6B,EAAA;;QAEvC,IAAI,CAAC,YAAY,CAAC,IAAI,IAAI,CAAC,YAAY,CAAC,KAAK,EAAE;YAC7C,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,wEAAwE,CACzE,CAAC;SACH;;AAED,QAAA,KAAK,MAAM,QAAQ,IAAI,YAAY,EAAE;YACnC,IAAI,CAAC,QAAQ,CAAC,GAAG,YAAY,CAAC,QAAQ,CAAC,CAAC;SACzC;;AAED,QAAA,IAAI,CAAC,IAAI,GAAG,YAAY,CAAC,IAAI,CAAC;QAC9B,IAAI,CAAC,MAAM,GAAG,YAAY,CAAC,cAAc,CAAC,QAAQ,CAAC;cAC/C,YAAY,CAAC,MAAM;cACnB,SAAS,CAAC;QACd,IAAI,CAAC,QAAQ,GAAG,YAAY,CAAC,cAAc,CAAC,UAAU,CAAC;AACrD,cAAE,CAAC,CAAC,YAAY,CAAC,QAAQ;cACvB,KAAK,CAAC;KACX;AAED;;;;AAIG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAkD;YACzD,IAAI,EAAE,IAAI,CAAC,IAAI;SAChB,CAAC;AACF,QAAA,KAAK,MAAM,IAAI,IAAI,IAAI,EAAE;AACvB,YAAA,IAAI,IAAI,CAAC,cAAc,CAAC,IAAI,CAAC,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK,SAAS,EAAE;AACzD,gBAAA,IAAI,IAAI,KAAK,UAAU,IAAI,IAAI,CAAC,IAAI,KAAK,UAAU,CAAC,MAAM,EAAE;oBAC1D,GAAG,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC,CAAC;iBACxB;aACF;SACF;AACD,QAAA,OAAO,GAAoB,CAAC;KAC7B;IAED,OAAO,KAAK,CAAC,WAA6C,EAAA;QACxD,OAAO,IAAI,WAAW,CAAC,WAAW,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;KACxD;IAED,OAAO,MAAM,CACX,YAKC,EAAA;AAED,QAAA,OAAO,IAAI,YAAY,CACrB,YAAY,EACZ,YAAY,CAAC,UAAU,EACvB,YAAY,CAAC,kBAAkB,CAChC,CAAC;KACH;;IAGD,OAAO,MAAM,CAAC,YAA2B,EAAA;AACvC,QAAA,OAAO,IAAI,YAAY,CAAC,YAAY,CAAC,CAAC;KACvC;IAED,OAAO,UAAU,CACf,YAA+C,EAAA;QAE/C,OAAO,IAAI,YAAY,CAAC,YAAY,EAAE,YAAY,CAAC,IAAI,CAAC,CAAC;KAC1D;IAED,OAAO,OAAO,CAAC,aAA4B,EAAA;AACzC,QAAA,OAAO,IAAI,aAAa,CAAC,aAAa,CAAC,CAAC;KACzC;;IAGD,OAAO,MAAM,CAAC,YAA2B,EAAA;AACvC,QAAA,OAAO,IAAI,YAAY,CAAC,YAAY,CAAC,CAAC;KACvC;;IAGD,OAAO,OAAO,CAAC,aAA4B,EAAA;AACzC,QAAA,OAAO,IAAI,aAAa,CAAC,aAAa,CAAC,CAAC;KACzC;IAED,OAAO,KAAK,CACV,WAAoD,EAAA;AAEpD,QAAA,OAAO,IAAI,WAAW,CAAC,WAAW,CAAC,CAAC;KACrC;AACF,CAAA;AAeD;;;AAGG;AACG,MAAO,aAAc,SAAQ,MAAM,CAAA;AACvC,IAAA,WAAA,CAAY,YAA2B,EAAA;AACrC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,OAAO;AACxB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;KACJ;AACF,CAAA;AAED;;;AAGG;AACG,MAAO,YAAa,SAAQ,MAAM,CAAA;AACtC,IAAA,WAAA,CAAY,YAA2B,EAAA;AACrC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,MAAM;AACvB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;KACJ;AACF,CAAA;AAED;;;AAGG;AACG,MAAO,aAAc,SAAQ,MAAM,CAAA;AACvC,IAAA,WAAA,CAAY,YAA2B,EAAA;AACrC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,OAAO;AACxB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;KACJ;AACF,CAAA;AAED;;;;AAIG;AACG,MAAO,YAAa,SAAQ,MAAM,CAAA;IAEtC,WAAY,CAAA,YAA2B,EAAE,UAAqB,EAAA;AAC5D,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,MAAM;AACvB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;AACH,QAAA,IAAI,CAAC,IAAI,GAAG,UAAU,CAAC;KACxB;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;AAC3B,QAAA,IAAI,IAAI,CAAC,IAAI,EAAE;AACb,YAAA,GAAG,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC;SACzB;AACD,QAAA,OAAO,GAAoB,CAAC;KAC7B;AACF,CAAA;AAED;;;;;AAKG;AACG,MAAO,WAAY,SAAQ,MAAM,CAAA;IACrC,WAAY,CAAA,YAA0B,EAAS,KAAkB,EAAA;AAC/D,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,KAAK;AACtB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;QAJ0C,IAAK,CAAA,KAAA,GAAL,KAAK,CAAa;KAKhE;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;QAC3B,GAAG,CAAC,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC;AAChC,QAAA,OAAO,GAAG,CAAC;KACZ;AACF,CAAA;AAED;;;;AAIG;AACG,MAAO,YAAa,SAAQ,MAAM,CAAA;AACtC,IAAA,WAAA,CACE,YAA0B,EACnB,UAEN,EACM,qBAA+B,EAAE,EAAA;AAExC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,MAAM;AACvB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;QARI,IAAU,CAAA,UAAA,GAAV,UAAU,CAEhB;QACM,IAAkB,CAAA,kBAAA,GAAlB,kBAAkB,CAAe;KAMzC;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;QAC3B,GAAG,CAAC,UAAU,GAAG,EAAE,GAAG,IAAI,CAAC,UAAU,EAAE,CAAC;QACxC,MAAM,QAAQ,GAAG,EAAE,CAAC;AACpB,QAAA,IAAI,IAAI,CAAC,kBAAkB,EAAE;AAC3B,YAAA,KAAK,MAAM,WAAW,IAAI,IAAI,CAAC,kBAAkB,EAAE;gBACjD,IAAI,CAAC,IAAI,CAAC,UAAU,CAAC,cAAc,CAAC,WAAW,CAAC,EAAE;oBAChD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAa,UAAA,EAAA,WAAW,CAAqD,mDAAA,CAAA,CAC9E,CAAC;iBACH;aACF;SACF;AACD,QAAA,KAAK,MAAM,WAAW,IAAI,IAAI,CAAC,UAAU,EAAE;YACzC,IAAI,IAAI,CAAC,UAAU,CAAC,cAAc,CAAC,WAAW,CAAC,EAAE;AAC/C,gBAAA,GAAG,CAAC,UAAU,CAAC,WAAW,CAAC,GAAG,IAAI,CAAC,UAAU,CAC3C,WAAW,CACZ,CAAC,MAAM,EAAmB,CAAC;gBAC5B,IAAI,CAAC,IAAI,CAAC,kBAAkB,CAAC,QAAQ,CAAC,WAAW,CAAC,EAAE;AAClD,oBAAA,QAAQ,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;iBAC5B;aACF;SACF;AACD,QAAA,IAAI,QAAQ,CAAC,MAAM,GAAG,CAAC,EAAE;AACvB,YAAA,GAAG,CAAC,QAAQ,GAAG,QAAQ,CAAC;SACzB;QACD,OAAO,GAAG,CAAC,kBAAkB,CAAC;AAC9B,QAAA,OAAO,GAAoB,CAAC;KAC7B;AACF,CAAA;AAED;;;;AAIG;AACG,MAAO,WAAY,SAAQ,MAAM,CAAA;AAErC,IAAA,WAAA,CAAY,YAAqD,EAAA;QAC/D,IAAI,YAAY,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE;YACnC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,sCAAsC,CACvC,CAAC;SACH;AACD,QAAA,KAAK,CAAC;AACJ,YAAA,GAAG,YAAY;YACf,IAAI,EAAE,SAAS;AAChB,SAAA,CAAC,CAAC;AACH,QAAA,IAAI,CAAC,KAAK,GAAG,YAAY,CAAC,KAAK,CAAC;KACjC;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;;AAE3B,QAAA,IAAI,IAAI,CAAC,KAAK,IAAI,KAAK,CAAC,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE;AAC3C,YAAA,GAAG,CAAC,KAAK,GAAI,IAAI,CAAC,KAAuB,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC;SAChE;AACD,QAAA,OAAO,GAAG,CAAC;KACZ;AACF;;AC5VD;;;;;;;;;;;;;;;AAeG;AAIH;;;;;;;;;;;;;;;;AAgBG;MACU,iBAAiB,CAAA;AAU5B,IAAA,WAAA,GAAA;AACE,QAAA,IAAI,CAAC,QAAQ,GAAG,WAAW,CAAC;KAC7B;AAED;;;;;;;AAOG;IACH,OAAO,IAAI,CAAC,kBAA2B,EAAA;AACrC,QAAA,IACE,kBAAkB;aACjB,kBAAkB,GAAG,CAAC,IAAI,kBAAkB,GAAG,GAAG,CAAC,EACpD;AACA,YAAA,MAAM,CAAC,IAAI,CACT,uCAAuC,kBAAkB,CAAA,4CAAA,CAA8C,CACxG,CAAC;SACH;AACD,QAAA,OAAO,EAAE,QAAQ,EAAE,YAAY,EAAE,kBAAkB,EAAE,CAAC;KACvD;AAED;;;;;;AAMG;AACH,IAAA,OAAO,GAAG,GAAA;AACR,QAAA,OAAO,EAAE,QAAQ,EAAE,WAAW,EAAE,CAAC;KAClC;AACF;;AChFD;;;;;;;;;;;;;;;AAeG;AAcH,MAAM,wBAAwB,GAAG,KAAM,CAAC;AACxC,MAAM,yBAAyB,GAAG,KAAM,CAAC;AAEzC,MAAM,oBAAoB,GAAG,iBAAiB,CAAC;AAE/C;;;;;;;;;AASG;AACH,MAAM,2BAA2B,GAAG,CAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;uBA6Cb,oBAAoB,CAAA;CAC1C,CAAC;AA2CF;;;;AAIG;MACU,uBAAuB,CAAA;AAiBlC,IAAA,WAAA,CACmB,WAAwB,EACxB,OAAsC,EACtC,IAAwB,EAAA;QAFxB,IAAW,CAAA,WAAA,GAAX,WAAW,CAAa;QACxB,IAAO,CAAA,OAAA,GAAP,OAAO,CAA+B;QACtC,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAoB;;QAlBnC,IAAS,CAAA,SAAA,GAAG,KAAK,CAAC;;AAET,QAAA,IAAA,CAAA,YAAY,GAAG,IAAIC,aAAQ,EAAQ,CAAC;;QAKpC,IAAa,CAAA,aAAA,GAAkB,EAAE,CAAC;;QAE3C,IAAgB,CAAA,gBAAA,GAA4B,EAAE,CAAC;;QAE/C,IAAa,CAAA,aAAA,GAAG,CAAC,CAAC;;QAElB,IAAqB,CAAA,qBAAA,GAAG,KAAK,CAAC;AAOpC,QAAA,IAAI,CAAC,WAAW,CAAC,cAAc,GAAG,IAAI,CAAC;;AAGvC,QAAA,IAAI,CAAC,kBAAkB,GAAG,IAAI,CAAC,cAAc,EAAE,CAAC,OAAO,CAAC,MACtD,IAAI,CAAC,OAAO,EAAE,CACf,CAAC;;;QAIF,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,SAAS,GAAG,KAAK,IAAG;AAC7C,YAAA,IAAI,IAAI,CAAC,SAAS,EAAE;gBAClB,OAAO;aACR;AAED,YAAA,MAAM,KAAK,GAAG,KAAK,CAAC,IAAkB,CAAC;YACvC,MAAM,MAAM,GAAG,IAAI,CACjB,MAAM,CAAC,YAAY,CAAC,KAAK,CACvB,IAAI,EACJ,KAAK,CAAC,IAAI,CAAC,IAAI,UAAU,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CACzC,CACF,CAAC;AAEF,YAAA,MAAM,KAAK,GAA0B;AACnC,gBAAA,QAAQ,EAAE,WAAW;AACrB,gBAAA,IAAI,EAAE,MAAM;aACb,CAAC;YACF,KAAK,IAAI,CAAC,WAAW,CAAC,iBAAiB,CAAC,KAAK,CAAC,CAAC;AACjD,SAAC,CAAC;KACH;AAED;;AAEG;AACH,IAAA,MAAM,IAAI,GAAA;AACR,QAAA,IAAI,IAAI,CAAC,SAAS,EAAE;YAClB,OAAO;SACR;AACD,QAAA,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC;AACtB,QAAA,IAAI,CAAC,YAAY,CAAC,OAAO,EAAE,CAAC;AAC5B,QAAA,MAAM,IAAI,CAAC,kBAAkB,CAAC;KAC/B;AAED;;;AAGG;IACK,OAAO,GAAA;AACb,QAAA,IAAI,CAAC,iBAAiB,EAAE,CAAC;QACzB,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC;AAC5C,QAAA,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,UAAU,EAAE,CAAC;AACnC,QAAA,IAAI,CAAC,IAAI,CAAC,UAAU,CAAC,UAAU,EAAE,CAAC;AAClC,QAAA,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,SAAS,EAAE,CAAC,OAAO,CAAC,KAAK,IAAI,KAAK,CAAC,IAAI,EAAE,CAAC,CAAC;QACjE,IAAI,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,KAAK,KAAK,QAAQ,EAAE;YAC7C,KAAK,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,KAAK,EAAE,CAAC;SACrC;AACD,QAAA,IAAI,CAAC,WAAW,CAAC,cAAc,GAAG,KAAK,CAAC;KACzC;AAED;;AAEG;AACK,IAAA,cAAc,CAAC,SAAsB,EAAA;AAC3C,QAAA,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;;AAEnC,QAAA,KAAK,IAAI,CAAC,oBAAoB,EAAE,CAAC;KAClC;AAED;;;;AAIG;IACK,iBAAiB,GAAA;;;AAGvB,QAAA,CAAC,GAAG,IAAI,CAAC,gBAAgB,CAAC,CAAC,OAAO,CAAC,MAAM,IAAI,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;;AAG7D,QAAA,IAAI,CAAC,aAAa,CAAC,MAAM,GAAG,CAAC,CAAC;;QAG9B,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,WAAW,CAAC;KACzD;AAED;;AAEG;AACK,IAAA,MAAM,oBAAoB,GAAA;AAChC,QAAA,IAAI,IAAI,CAAC,qBAAqB,EAAE;YAC9B,OAAO;SACR;AACD,QAAA,IAAI,CAAC,qBAAqB,GAAG,IAAI,CAAC;AAElC,QAAA,OAAO,IAAI,CAAC,aAAa,CAAC,MAAM,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE;YACvD,MAAM,YAAY,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,EAAG,CAAC;AACjD,YAAA,IAAI;AACF,gBAAA,MAAM,KAAK,GAAG,IAAI,UAAU,CAAC,YAAY,CAAC,CAAC;AAC3C,gBAAA,MAAM,UAAU,GAAG,KAAK,CAAC,MAAM,CAAC;AAEhC,gBAAA,MAAM,WAAW,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,YAAY,CACrD,CAAC,EACD,UAAU,EACV,yBAAyB,CAC1B,CAAC;;gBAGF,MAAM,WAAW,GAAG,WAAW,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC;AAClD,gBAAA,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,UAAU,EAAE,CAAC,EAAE,EAAE;AACnC,oBAAA,WAAW,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC;iBACnC;gBAED,MAAM,MAAM,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,kBAAkB,EAAE,CAAC;AAC3D,gBAAA,MAAM,CAAC,MAAM,GAAG,WAAW,CAAC;gBAC5B,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,WAAW,CAAC,CAAC;;AAGnD,gBAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;AACnC,gBAAA,MAAM,CAAC,OAAO,GAAG,MAAK;AACpB,oBAAA,IAAI,CAAC,gBAAgB,GAAG,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAClD,CAAC,IAAI,CAAC,KAAK,MAAM,CAClB,CAAC;AACJ,iBAAC,CAAC;;;AAIF,gBAAA,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,GAAG,CAC3B,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,WAAW,EAClC,IAAI,CAAC,aAAa,CACnB,CAAC;AACF,gBAAA,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC;;AAGjC,gBAAA,IAAI,CAAC,aAAa,IAAI,WAAW,CAAC,QAAQ,CAAC;aAC5C;YAAC,OAAO,CAAC,EAAE;AACV,gBAAA,MAAM,CAAC,KAAK,CAAC,sBAAsB,EAAE,CAAC,CAAC,CAAC;aACzC;SACF;AAED,QAAA,IAAI,CAAC,qBAAqB,GAAG,KAAK,CAAC;KACpC;AAED;;AAEG;AACK,IAAA,MAAM,cAAc,GAAA;QAC1B,MAAM,gBAAgB,GAAG,IAAI,CAAC,WAAW,CAAC,OAAO,EAAE,CAAC;AACpD,QAAA,OAAO,CAAC,IAAI,CAAC,SAAS,EAAE;AACtB,YAAA,MAAM,MAAM,GAAG,MAAM,OAAO,CAAC,IAAI,CAAC;gBAChC,gBAAgB,CAAC,IAAI,EAAE;gBACvB,IAAI,CAAC,YAAY,CAAC,OAAO;AAC1B,aAAA,CAAC,CAAC;YAEH,IAAI,IAAI,CAAC,SAAS,IAAI,CAAC,MAAM,IAAI,MAAM,CAAC,IAAI,EAAE;gBAC5C,MAAM;aACP;AAED,YAAA,MAAM,OAAO,GAAG,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAA,IAAI,OAAO,CAAC,IAAI,KAAK,eAAe,EAAE;gBACpC,MAAM,aAAa,GAAG,OAA4B,CAAC;AACnD,gBAAA,IAAI,aAAa,CAAC,WAAW,EAAE;oBAC7B,IAAI,CAAC,iBAAiB,EAAE,CAAC;iBAC1B;gBAED,MAAM,SAAS,GAAG,aAAa,CAAC,SAAS,EAAE,KAAK,CAAC,IAAI,CAAC,IAAI,IACxD,IAAI,CAAC,UAAU,EAAE,QAAQ,CAAC,UAAU,CAAC,QAAQ,CAAC,CAC/C,CAAC;AACF,gBAAA,IAAI,SAAS,EAAE,UAAU,EAAE;AACzB,oBAAA,MAAM,SAAS,GAAG,UAAU,CAAC,IAAI,CAC/B,IAAI,CAAC,SAAS,CAAC,UAAU,CAAC,IAAI,CAAC,EAC/B,CAAC,IAAI,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CACrB,CAAC,MAAM,CAAC;AACT,oBAAA,IAAI,CAAC,cAAc,CAAC,SAAS,CAAC,CAAC;iBAChC;aACF;AAAM,iBAAA,IAAI,OAAO,CAAC,IAAI,KAAK,UAAU,EAAE;AACtC,gBAAA,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,sBAAsB,EAAE;AACxC,oBAAA,MAAM,CAAC,IAAI,CACT,wHAAwH,CACzH,CAAC;iBACH;qBAAM;AACL,oBAAA,IAAI;AACF,wBAAA,MAAM,gBAAgB,GAAG,MAAM,IAAI,CAAC,OAAO,CAAC,sBAAsB,CAChE,OAAO,CAAC,aAAa,CACtB,CAAC;AACF,wBAAA,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE;4BACnB,KAAK,IAAI,CAAC,WAAW,CAAC,qBAAqB,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC;yBACjE;qBACF;oBAAC,OAAO,CAAC,EAAE;AACV,wBAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,iCAAA,EAAqC,CAAW,CAAC,OAAO,CAAA,CAAE,CAC3D,CAAC;qBACH;iBACF;aACF;SACF;KACF;AACF,CAAA;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA6CG;AACI,eAAe,sBAAsB,CAC1C,WAAwB,EACxB,UAAyC,EAAE,EAAA;AAE3C,IAAA,IAAI,WAAW,CAAC,QAAQ,EAAE;QACxB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,0DAA0D,CAC3D,CAAC;KACH;AAED,IAAA,IAAI,WAAW,CAAC,cAAc,EAAE;QAC9B,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,gEAAgE,CACjE,CAAC;KACH;;IAGD,IACE,OAAO,gBAAgB,KAAK,WAAW;QACvC,OAAO,YAAY,KAAK,WAAW;QACnC,OAAO,SAAS,KAAK,WAAW;AAChC,QAAA,CAAC,SAAS,CAAC,YAAY,EACvB;QACA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,kHAAkH,CACnH,CAAC;KACH;AAED,IAAA,IAAI,YAAsC,CAAC;AAC3C,IAAA,IAAI;;;AAGF,QAAA,YAAY,GAAG,IAAI,YAAY,EAAE,CAAC;AAClC,QAAA,IAAI,YAAY,CAAC,KAAK,KAAK,WAAW,EAAE;AACtC,YAAA,MAAM,YAAY,CAAC,MAAM,EAAE,CAAC;SAC7B;;;QAID,MAAM,WAAW,GAAG,MAAM,SAAS,CAAC,YAAY,CAAC,YAAY,CAAC;AAC5D,YAAA,KAAK,EAAE,IAAI;AACZ,SAAA,CAAC,CAAC;;;QAIH,MAAM,WAAW,GAAG,IAAI,IAAI,CAAC,CAAC,2BAA2B,CAAC,EAAE;AAC1D,YAAA,IAAI,EAAE,wBAAwB;AAC/B,SAAA,CAAC,CAAC;QACH,MAAM,UAAU,GAAG,GAAG,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC;QACpD,MAAM,YAAY,CAAC,YAAY,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC;;QAGtD,MAAM,UAAU,GAAG,YAAY,CAAC,uBAAuB,CAAC,WAAW,CAAC,CAAC;QACrE,MAAM,WAAW,GAAG,IAAI,gBAAgB,CACtC,YAAY,EACZ,oBAAoB,EACpB;AACE,YAAA,gBAAgB,EAAE,EAAE,gBAAgB,EAAE,wBAAwB,EAAE;AACjE,SAAA,CACF,CAAC;AACF,QAAA,UAAU,CAAC,OAAO,CAAC,WAAW,CAAC,CAAC;;QAGhC,MAAM,MAAM,GAAG,IAAI,uBAAuB,CAAC,WAAW,EAAE,OAAO,EAAE;YAC/D,YAAY;YACZ,WAAW;YACX,UAAU;YACV,WAAW;AACZ,SAAA,CAAC,CAAC;QAEH,OAAO,EAAE,IAAI,EAAE,MAAM,MAAM,CAAC,IAAI,EAAE,EAAE,CAAC;KACtC;IAAC,OAAO,CAAC,EAAE;;QAEV,IAAI,YAAY,IAAI,YAAY,CAAC,KAAK,KAAK,QAAQ,EAAE;AACnD,YAAA,KAAK,YAAY,CAAC,KAAK,EAAE,CAAC;SAC3B;;;QAID,IAAI,CAAC,YAAY,OAAO,IAAI,CAAC,YAAY,YAAY,EAAE;AACrD,YAAA,MAAM,CAAC,CAAC;SACT;;AAGD,QAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,sCAAA,EAA0C,CAAW,CAAC,OAAO,CAAA,CAAE,CAChE,CAAC;KACH;AACH;;AChfA;;;;;;;;;;;;;;;AAeG;AA6CH;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2BG;SACa,KAAK,CAACC,QAAmBC,UAAM,EAAE,EAAE,OAAmB,EAAA;AACpE,IAAAD,KAAG,GAAGE,uBAAkB,CAACF,KAAG,CAAC,CAAC;;IAE9B,MAAM,UAAU,GAAmBG,gBAAY,CAACH,KAAG,EAAE,OAAO,CAAC,CAAC;IAE9D,MAAM,OAAO,GAAG,OAAO,EAAE,OAAO,IAAI,IAAI,eAAe,EAAE,CAAC;AAE1D,IAAA,MAAM,YAAY,GAA+B;AAC/C,QAAA,2BAA2B,EAAE,OAAO,EAAE,2BAA2B,IAAI,KAAK;KAC3E,CAAC;AAEF,IAAA,MAAM,UAAU,GAAG,wBAAwB,CAAC,OAAO,CAAC,CAAC;AACrD,IAAA,MAAM,UAAU,GAAG,UAAU,CAAC,YAAY,CAAC;QACzC,UAAU;AACX,KAAA,CAAC,CAAC;AAEH,IAAA,UAAU,CAAC,OAAO,GAAG,YAAY,CAAC;AAElC,IAAA,OAAO,UAAU,CAAC;AACpB,CAAC;AAED;;;;;AAKG;SACa,kBAAkB,CAChC,EAAM,EACN,WAAuC,EACvC,cAA+B,EAAA;;IAG/B,MAAM,YAAY,GAAG,WAA2B,CAAC;AACjD,IAAA,IAAI,aAA0B,CAAC;AAC/B,IAAA,IAAI,YAAY,CAAC,IAAI,EAAE;AACrB,QAAA,aAAa,GAAG,YAAY,CAAC,aAAa,IAAI;AAC5C,YAAA,KAAK,EAAE,6BAA6B;SACrC,CAAC;KACH;SAAM;QACL,aAAa,GAAG,WAA0B,CAAC;KAC5C;AAED,IAAA,IAAI,CAAC,aAAa,CAAC,KAAK,EAAE;QACxB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,QAAQ,EACpB,CAAoF,kFAAA,CAAA,CACrF,CAAC;KACH;AAED;;;AAGG;AACH,IAAA,MAAM,aAAa,GAAI,EAAgB,CAAC,oBAAoB,GAC1D,YAAY,CAAC,IAAI,EACjB,OAAO,MAAM,KAAK,WAAW,GAAG,SAAS,GAAG,MAAM,EAClD,YAAY,CAAC,cAAc,CAC5B,CAAC;IAEF,OAAO,IAAI,eAAe,CAAC,EAAE,EAAE,aAAa,EAAE,cAAc,EAAE,aAAa,CAAC,CAAC;AAC/E,CAAC;AAED;;;;;;;;;;;;;AAaG;SACa,cAAc,CAC5B,EAAM,EACN,WAA8B,EAC9B,cAA+B,EAAA;AAE/B,IAAA,IAAI,CAAC,WAAW,CAAC,KAAK,EAAE;QACtB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,QAAQ,EACpB,CAAgF,8EAAA,CAAA,CACjF,CAAC;KACH;IACD,OAAO,IAAI,WAAW,CAAC,EAAE,EAAE,WAAW,EAAE,cAAc,CAAC,CAAC;AAC1D,CAAC;AAED;;;;;;;;;;;AAWG;AACa,SAAA,sBAAsB,CACpC,EAAM,EACN,WAA4B,EAAA;AAE5B,IAAA,IAAI,CAAC,WAAW,CAAC,KAAK,EAAE;QACtB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,QAAQ,EACpB,CAAuH,qHAAA,CAAA,CACxH,CAAC;KACH;AACD,IAAA,MAAM,gBAAgB,GAAG,IAAI,oBAAoB,EAAE,CAAC;IACpD,OAAO,IAAI,mBAAmB,CAAC,EAAE,EAAE,WAAW,EAAE,gBAAgB,CAAC,CAAC;AACpE;;AC3MA;;;;AAIG;AA4BH,SAAS,UAAU,GAAA;AACjB,IAAAI,sBAAkB,CAChB,IAAIC,mBAAS,CACX,OAAO,EACP,CAAC,SAAS,EAAE,EAAE,kBAAkB,EAAE,KAAI;QACpC,IAAI,CAAC,kBAAkB,EAAE;YACvB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,6CAA6C,CAC9C,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAAG,wBAAwB,CAAC,kBAAkB,CAAC,CAAC;;QAG7D,MAAM,GAAG,GAAG,SAAS,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC,YAAY,EAAE,CAAC;QACxD,MAAM,IAAI,GAAG,SAAS,CAAC,WAAW,CAAC,eAAe,CAAC,CAAC;QACpD,MAAM,gBAAgB,GAAG,SAAS,CAAC,WAAW,CAAC,oBAAoB,CAAC,CAAC;QACrE,OAAO,IAAI,SAAS,CAAC,GAAG,EAAE,OAAO,EAAE,IAAI,EAAE,gBAAgB,CAAC,CAAC;AAC7D,KAAC,sCAEF,CAAC,oBAAoB,CAAC,IAAI,CAAC,CAC7B,CAAC;AAEF,IAAAC,mBAAe,CAAC,IAAI,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;;AAEvC,IAAAA,mBAAe,CAAC,IAAI,EAAE,OAAO,EAAE,SAAkB,CAAC,CAAC;AACrD,CAAC;AAED,UAAU,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}
\ No newline at end of file diff --git a/frontend-old/node_modules/@firebase/ai/dist/index.node.mjs b/frontend-old/node_modules/@firebase/ai/dist/index.node.mjs new file mode 100644 index 0000000..46a24b4 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/index.node.mjs @@ -0,0 +1,3921 @@ +import { _isFirebaseServerApp, _getProvider, getApp, _registerComponent, registerVersion } from '@firebase/app'; +import { Component } from '@firebase/component'; +import { FirebaseError, Deferred, getModularInstance } from '@firebase/util'; +import { Logger } from '@firebase/logger'; + +var name = "@firebase/ai"; +var version = "2.5.0"; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const AI_TYPE = 'AI'; +const DEFAULT_LOCATION = 'us-central1'; +const DEFAULT_DOMAIN = 'firebasevertexai.googleapis.com'; +const DEFAULT_API_VERSION = 'v1beta'; +const PACKAGE_VERSION = version; +const LANGUAGE_TAG = 'gl-js'; +const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000; +/** + * Defines the name of the default in-cloud model to use for hybrid inference. + */ +const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite'; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Possible roles. + * @public + */ +const POSSIBLE_ROLES = ['user', 'model', 'function', 'system']; +/** + * Harm categories that would cause prompts or candidates to be blocked. + * @public + */ +const HarmCategory = { + HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH', + HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', + HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT', + HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT' +}; +/** + * Threshold above which a prompt or candidate will be blocked. + * @public + */ +const HarmBlockThreshold = { + /** + * Content with `NEGLIGIBLE` will be allowed. + */ + BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE', + /** + * Content with `NEGLIGIBLE` and `LOW` will be allowed. + */ + BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE', + /** + * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed. + */ + BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH', + /** + * All content will be allowed. + */ + BLOCK_NONE: 'BLOCK_NONE', + /** + * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding + * to the {@link (HarmCategory:type)} will not be present in the response. + */ + OFF: 'OFF' +}; +/** + * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}). + * + * @public + */ +const HarmBlockMethod = { + /** + * The harm block method uses both probability and severity scores. + */ + SEVERITY: 'SEVERITY', + /** + * The harm block method uses the probability score. + */ + PROBABILITY: 'PROBABILITY' +}; +/** + * Probability that a prompt or candidate matches a harm category. + * @public + */ +const HarmProbability = { + /** + * Content has a negligible chance of being unsafe. + */ + NEGLIGIBLE: 'NEGLIGIBLE', + /** + * Content has a low chance of being unsafe. + */ + LOW: 'LOW', + /** + * Content has a medium chance of being unsafe. + */ + MEDIUM: 'MEDIUM', + /** + * Content has a high chance of being unsafe. + */ + HIGH: 'HIGH' +}; +/** + * Harm severity levels. + * @public + */ +const HarmSeverity = { + /** + * Negligible level of harm severity. + */ + HARM_SEVERITY_NEGLIGIBLE: 'HARM_SEVERITY_NEGLIGIBLE', + /** + * Low level of harm severity. + */ + HARM_SEVERITY_LOW: 'HARM_SEVERITY_LOW', + /** + * Medium level of harm severity. + */ + HARM_SEVERITY_MEDIUM: 'HARM_SEVERITY_MEDIUM', + /** + * High level of harm severity. + */ + HARM_SEVERITY_HIGH: 'HARM_SEVERITY_HIGH', + /** + * Harm severity is not supported. + * + * @remarks + * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback. + */ + HARM_SEVERITY_UNSUPPORTED: 'HARM_SEVERITY_UNSUPPORTED' +}; +/** + * Reason that a prompt was blocked. + * @public + */ +const BlockReason = { + /** + * Content was blocked by safety settings. + */ + SAFETY: 'SAFETY', + /** + * Content was blocked, but the reason is uncategorized. + */ + OTHER: 'OTHER', + /** + * Content was blocked because it contained terms from the terminology blocklist. + */ + BLOCKLIST: 'BLOCKLIST', + /** + * Content was blocked due to prohibited content. + */ + PROHIBITED_CONTENT: 'PROHIBITED_CONTENT' +}; +/** + * Reason that a candidate finished. + * @public + */ +const FinishReason = { + /** + * Natural stop point of the model or provided stop sequence. + */ + STOP: 'STOP', + /** + * The maximum number of tokens as specified in the request was reached. + */ + MAX_TOKENS: 'MAX_TOKENS', + /** + * The candidate content was flagged for safety reasons. + */ + SAFETY: 'SAFETY', + /** + * The candidate content was flagged for recitation reasons. + */ + RECITATION: 'RECITATION', + /** + * Unknown reason. + */ + OTHER: 'OTHER', + /** + * The candidate content contained forbidden terms. + */ + BLOCKLIST: 'BLOCKLIST', + /** + * The candidate content potentially contained prohibited content. + */ + PROHIBITED_CONTENT: 'PROHIBITED_CONTENT', + /** + * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII). + */ + SPII: 'SPII', + /** + * The function call generated by the model was invalid. + */ + MALFORMED_FUNCTION_CALL: 'MALFORMED_FUNCTION_CALL' +}; +/** + * @public + */ +const FunctionCallingMode = { + /** + * Default model behavior; model decides to predict either a function call + * or a natural language response. + */ + AUTO: 'AUTO', + /** + * Model is constrained to always predicting a function call only. + * If `allowed_function_names` is set, the predicted function call will be + * limited to any one of `allowed_function_names`, else the predicted + * function call will be any one of the provided `function_declarations`. + */ + ANY: 'ANY', + /** + * Model will not predict any function call. Model behavior is same as when + * not passing any function declarations. + */ + NONE: 'NONE' +}; +/** + * Content part modality. + * @public + */ +const Modality = { + /** + * Unspecified modality. + */ + MODALITY_UNSPECIFIED: 'MODALITY_UNSPECIFIED', + /** + * Plain text. + */ + TEXT: 'TEXT', + /** + * Image. + */ + IMAGE: 'IMAGE', + /** + * Video. + */ + VIDEO: 'VIDEO', + /** + * Audio. + */ + AUDIO: 'AUDIO', + /** + * Document (for example, PDF). + */ + DOCUMENT: 'DOCUMENT' +}; +/** + * Generation modalities to be returned in generation responses. + * + * @beta + */ +const ResponseModality = { + /** + * Text. + * @beta + */ + TEXT: 'TEXT', + /** + * Image. + * @beta + */ + IMAGE: 'IMAGE', + /** + * Audio. + * @beta + */ + AUDIO: 'AUDIO' +}; +/** + * Determines whether inference happens on-device or in-cloud. + * + * @remarks + * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an + * on-device model. If on-device inference is not available, the SDK + * will fall back to using a cloud-hosted model. + * <br/> + * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an + * on-device model. The SDK will not fall back to a cloud-hosted model. + * If on-device inference is not available, inference methods will throw. + * <br/> + * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a + * cloud-hosted model. The SDK will not fall back to an on-device model. + * <br/> + * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a + * cloud-hosted model. If not available, the SDK will fall back to an + * on-device model. + * + * @beta + */ +const InferenceMode = { + 'PREFER_ON_DEVICE': 'prefer_on_device', + 'ONLY_ON_DEVICE': 'only_on_device', + 'ONLY_IN_CLOUD': 'only_in_cloud', + 'PREFER_IN_CLOUD': 'prefer_in_cloud' +}; +/** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ +const InferenceSource = { + 'ON_DEVICE': 'on_device', + 'IN_CLOUD': 'in_cloud' +}; +/** + * Represents the result of the code execution. + * + * @beta + */ +const Outcome = { + UNSPECIFIED: 'OUTCOME_UNSPECIFIED', + OK: 'OUTCOME_OK', + FAILED: 'OUTCOME_FAILED', + DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED' +}; +/** + * The programming language of the code. + * + * @beta + */ +const Language = { + UNSPECIFIED: 'LANGUAGE_UNSPECIFIED', + PYTHON: 'PYTHON' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * The status of a URL retrieval. + * + * @remarks + * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status. + * <br/> + * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful. + * <br/> + * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed. + * <br/> + * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall. + * <br/> + * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe. + * <br/> + * + * @beta + */ +const URLRetrievalStatus = { + /** + * Unspecified retrieval status. + */ + URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED', + /** + * The URL retrieval was successful. + */ + URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS', + /** + * The URL retrieval failed. + */ + URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR', + /** + * The URL retrieval failed because the content is behind a paywall. + */ + URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL', + /** + * The URL retrieval failed because the content is unsafe. + */ + URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE' +}; +/** + * The types of responses that can be returned by {@link LiveSession.receive}. + * + * @beta + */ +const LiveResponseType = { + SERVER_CONTENT: 'serverContent', + TOOL_CALL: 'toolCall', + TOOL_CALL_CANCELLATION: 'toolCallCancellation' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Standardized error codes that {@link AIError} can have. + * + * @public + */ +const AIErrorCode = { + /** A generic error occurred. */ + ERROR: 'error', + /** An error occurred in a request. */ + REQUEST_ERROR: 'request-error', + /** An error occurred in a response. */ + RESPONSE_ERROR: 'response-error', + /** An error occurred while performing a fetch. */ + FETCH_ERROR: 'fetch-error', + /** An error occurred because an operation was attempted on a closed session. */ + SESSION_CLOSED: 'session-closed', + /** An error associated with a Content object. */ + INVALID_CONTENT: 'invalid-content', + /** An error due to the Firebase API not being enabled in the Console. */ + API_NOT_ENABLED: 'api-not-enabled', + /** An error due to invalid Schema input. */ + INVALID_SCHEMA: 'invalid-schema', + /** An error occurred due to a missing Firebase API key. */ + NO_API_KEY: 'no-api-key', + /** An error occurred due to a missing Firebase app ID. */ + NO_APP_ID: 'no-app-id', + /** An error occurred due to a model name not being specified during initialization. */ + NO_MODEL: 'no-model', + /** An error occurred due to a missing project ID. */ + NO_PROJECT_ID: 'no-project-id', + /** An error occurred while parsing. */ + PARSE_FAILED: 'parse-failed', + /** An error occurred due an attempt to use an unsupported feature. */ + UNSUPPORTED: 'unsupported' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Contains the list of OpenAPI data types + * as defined by the + * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification} + * @public + */ +const SchemaType = { + /** String type. */ + STRING: 'string', + /** Number type. */ + NUMBER: 'number', + /** Integer type. */ + INTEGER: 'integer', + /** Boolean type. */ + BOOLEAN: 'boolean', + /** Array type. */ + ARRAY: 'array', + /** Object type. */ + OBJECT: 'object' +}; + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * A filter level controlling how aggressively to filter sensitive content. + * + * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI + * are assessed against a list of safety filters, which include 'harmful categories' (for example, + * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to + * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines} + * for more details. + * + * @public + */ +const ImagenSafetyFilterLevel = { + /** + * The most aggressive filtering level; most strict blocking. + */ + BLOCK_LOW_AND_ABOVE: 'block_low_and_above', + /** + * Blocks some sensitive prompts and responses. + */ + BLOCK_MEDIUM_AND_ABOVE: 'block_medium_and_above', + /** + * Blocks few sensitive prompts and responses. + */ + BLOCK_ONLY_HIGH: 'block_only_high', + /** + * The least aggressive filtering level; blocks very few sensitive prompts and responses. + * + * Access to this feature is restricted and may require your case to be reviewed and approved by + * Cloud support. + */ + BLOCK_NONE: 'block_none' +}; +/** + * A filter level controlling whether generation of images containing people or faces is allowed. + * + * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a> + * documentation for more details. + * + * @public + */ +const ImagenPersonFilterLevel = { + /** + * Disallow generation of images containing people or faces; images of people are filtered out. + */ + BLOCK_ALL: 'dont_allow', + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + ALLOW_ADULT: 'allow_adult', + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + ALLOW_ALL: 'allow_all' +}; +/** + * Aspect ratios for Imagen images. + * + * To specify an aspect ratio for generated images, set the `aspectRatio` property in your + * {@link ImagenGenerationConfig}. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details and examples of the supported aspect ratios. + * + * @public + */ +const ImagenAspectRatio = { + /** + * Square (1:1) aspect ratio. + */ + 'SQUARE': '1:1', + /** + * Landscape (3:4) aspect ratio. + */ + 'LANDSCAPE_3x4': '3:4', + /** + * Portrait (4:3) aspect ratio. + */ + 'PORTRAIT_4x3': '4:3', + /** + * Landscape (16:9) aspect ratio. + */ + 'LANDSCAPE_16x9': '16:9', + /** + * Portrait (9:16) aspect ratio. + */ + 'PORTRAIT_9x16': '9:16' +}; + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * An enum-like object containing constants that represent the supported backends + * for the Firebase AI SDK. + * This determines which backend service (Vertex AI Gemini API or Gemini Developer API) + * the SDK will communicate with. + * + * These values are assigned to the `backendType` property within the specific backend + * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify + * which service to target. + * + * @public + */ +const BackendType = { + /** + * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud. + * Use this constant when creating a {@link VertexAIBackend} configuration. + */ + VERTEX_AI: 'VERTEX_AI', + /** + * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}). + * Use this constant when creating a {@link GoogleAIBackend} configuration. + */ + GOOGLE_AI: 'GOOGLE_AI' +}; // Using 'as const' makes the string values literal types + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Abstract base class representing the configuration for an AI service backend. + * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for + * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and + * {@link VertexAIBackend} for the Vertex AI Gemini API. + * + * @public + */ +class Backend { + /** + * Protected constructor for use by subclasses. + * @param type - The backend type. + */ + constructor(type) { + this.backendType = type; + } +} +/** + * Configuration class for the Gemini Developer API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Gemini Developer API as the backend. + * + * @public + */ +class GoogleAIBackend extends Backend { + /** + * Creates a configuration object for the Gemini Developer API backend. + */ + constructor() { + super(BackendType.GOOGLE_AI); + } +} +/** + * Configuration class for the Vertex AI Gemini API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend. + * + * @public + */ +class VertexAIBackend extends Backend { + /** + * Creates a configuration object for the Vertex AI backend. + * + * @param location - The region identifier, defaulting to `us-central1`; + * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + constructor(location = DEFAULT_LOCATION) { + super(BackendType.VERTEX_AI); + if (!location) { + this.location = DEFAULT_LOCATION; + } + else { + this.location = location; + } + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class AIService { + constructor(app, backend, authProvider, appCheckProvider, chromeAdapterFactory) { + this.app = app; + this.backend = backend; + this.chromeAdapterFactory = chromeAdapterFactory; + const appCheck = appCheckProvider?.getImmediate({ optional: true }); + const auth = authProvider?.getImmediate({ optional: true }); + this.auth = auth || null; + this.appCheck = appCheck || null; + if (backend instanceof VertexAIBackend) { + this.location = backend.location; + } + else { + this.location = ''; + } + } + _delete() { + return Promise.resolve(); + } + set options(optionsToSet) { + this._options = optionsToSet; + } + get options() { + return this._options; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Error class for the Firebase AI SDK. + * + * @public + */ +class AIError extends FirebaseError { + /** + * Constructs a new instance of the `AIError` class. + * + * @param code - The error code from {@link (AIErrorCode:type)}. + * @param message - A human-readable message describing the error. + * @param customErrorData - Optional error data. + */ + constructor(code, message, customErrorData) { + // Match error format used by FirebaseError from ErrorFactory + const service = AI_TYPE; + const fullCode = `${service}/${code}`; + const fullMessage = `${service}: ${message} (${fullCode})`; + super(code, fullMessage); + this.code = code; + this.customErrorData = customErrorData; + // FirebaseError initializes a stack trace, but it assumes the error is created from the error + // factory. Since we break this assumption, we set the stack trace to be originating from this + // constructor. + // This is only supported in V8. + if (Error.captureStackTrace) { + // Allows us to initialize the stack trace without including the constructor itself at the + // top level of the stack trace. + Error.captureStackTrace(this, AIError); + } + // Allows instanceof AIError in ES5/ES6 + // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work + // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget + // which we can now use since we no longer target ES5. + Object.setPrototypeOf(this, AIError.prototype); + // Since Error is an interface, we don't inherit toString and so we define it ourselves. + this.toString = () => fullMessage; + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI} + * instances by backend type. + * + * @internal + */ +function encodeInstanceIdentifier(backend) { + if (backend instanceof GoogleAIBackend) { + return `${AI_TYPE}/googleai`; + } + else if (backend instanceof VertexAIBackend) { + return `${AI_TYPE}/vertexai/${backend.location}`; + } + else { + throw new AIError(AIErrorCode.ERROR, `Invalid backend: ${JSON.stringify(backend.backendType)}`); + } +} +/** + * Decodes an instance identifier string into a {@link Backend}. + * + * @internal + */ +function decodeInstanceIdentifier(instanceIdentifier) { + const identifierParts = instanceIdentifier.split('/'); + if (identifierParts[0] !== AI_TYPE) { + throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier, unknown prefix '${identifierParts[0]}'`); + } + const backendType = identifierParts[1]; + switch (backendType) { + case 'vertexai': + const location = identifierParts[2]; + if (!location) { + throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier, unknown location '${instanceIdentifier}'`); + } + return new VertexAIBackend(location); + case 'googleai': + return new GoogleAIBackend(); + default: + throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier string: '${instanceIdentifier}'`); + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Base class for Firebase AI model APIs. + * + * Instances of this class are associated with a specific Firebase AI {@link Backend} + * and provide methods for interacting with the configured generative model. + * + * @public + */ +class AIModel { + /** + * Constructs a new instance of the {@link AIModel} class. + * + * This constructor should only be called from subclasses that provide + * a model API. + * + * @param ai - an {@link AI} instance. + * @param modelName - The name of the model being used. It can be in one of the following formats: + * - `my-model` (short name, will resolve to `publishers/google/models/my-model`) + * - `models/my-model` (will resolve to `publishers/google/models/my-model`) + * - `publishers/my-publisher/models/my-model` (fully qualified model name) + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @internal + */ + constructor(ai, modelName) { + if (!ai.app?.options?.apiKey) { + throw new AIError(AIErrorCode.NO_API_KEY, `The "apiKey" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`); + } + else if (!ai.app?.options?.projectId) { + throw new AIError(AIErrorCode.NO_PROJECT_ID, `The "projectId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`); + } + else if (!ai.app?.options?.appId) { + throw new AIError(AIErrorCode.NO_APP_ID, `The "appId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`); + } + else { + this._apiSettings = { + apiKey: ai.app.options.apiKey, + project: ai.app.options.projectId, + appId: ai.app.options.appId, + automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled, + location: ai.location, + backend: ai.backend + }; + if (_isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) { + const token = ai.app.settings.appCheckToken; + this._apiSettings.getAppCheckToken = () => { + return Promise.resolve({ token }); + }; + } + else if (ai.appCheck) { + if (ai.options?.useLimitedUseAppCheckTokens) { + this._apiSettings.getAppCheckToken = () => ai.appCheck.getLimitedUseToken(); + } + else { + this._apiSettings.getAppCheckToken = () => ai.appCheck.getToken(); + } + } + if (ai.auth) { + this._apiSettings.getAuthToken = () => ai.auth.getToken(); + } + this.model = AIModel.normalizeModelName(modelName, this._apiSettings.backend.backendType); + } + } + /** + * Normalizes the given model name to a fully qualified model resource name. + * + * @param modelName - The model name to normalize. + * @returns The fully qualified model resource name. + * + * @internal + */ + static normalizeModelName(modelName, backendType) { + if (backendType === BackendType.GOOGLE_AI) { + return AIModel.normalizeGoogleAIModelName(modelName); + } + else { + return AIModel.normalizeVertexAIModelName(modelName); + } + } + /** + * @internal + */ + static normalizeGoogleAIModelName(modelName) { + return `models/${modelName}`; + } + /** + * @internal + */ + static normalizeVertexAIModelName(modelName) { + let model; + if (modelName.includes('/')) { + if (modelName.startsWith('models/')) { + // Add 'publishers/google' if the user is only passing in 'models/model-name'. + model = `publishers/google/${modelName}`; + } + else { + // Any other custom format (e.g. tuned models) must be passed in correctly. + model = modelName; + } + } + else { + // If path is not included, assume it's a non-tuned model. + model = `publishers/google/models/${modelName}`; + } + return model; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const logger = new Logger('@firebase/vertexai'); + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +var Task; +(function (Task) { + Task["GENERATE_CONTENT"] = "generateContent"; + Task["STREAM_GENERATE_CONTENT"] = "streamGenerateContent"; + Task["COUNT_TOKENS"] = "countTokens"; + Task["PREDICT"] = "predict"; +})(Task || (Task = {})); +class RequestUrl { + constructor(model, task, apiSettings, stream, requestOptions) { + this.model = model; + this.task = task; + this.apiSettings = apiSettings; + this.stream = stream; + this.requestOptions = requestOptions; + } + toString() { + const url = new URL(this.baseUrl); // Throws if the URL is invalid + url.pathname = `/${this.apiVersion}/${this.modelPath}:${this.task}`; + url.search = this.queryParams.toString(); + return url.toString(); + } + get baseUrl() { + return this.requestOptions?.baseUrl || `https://${DEFAULT_DOMAIN}`; + } + get apiVersion() { + return DEFAULT_API_VERSION; // TODO: allow user-set options if that feature becomes available + } + get modelPath() { + if (this.apiSettings.backend instanceof GoogleAIBackend) { + return `projects/${this.apiSettings.project}/${this.model}`; + } + else if (this.apiSettings.backend instanceof VertexAIBackend) { + return `projects/${this.apiSettings.project}/locations/${this.apiSettings.backend.location}/${this.model}`; + } + else { + throw new AIError(AIErrorCode.ERROR, `Invalid backend: ${JSON.stringify(this.apiSettings.backend)}`); + } + } + get queryParams() { + const params = new URLSearchParams(); + if (this.stream) { + params.set('alt', 'sse'); + } + return params; + } +} +class WebSocketUrl { + constructor(apiSettings) { + this.apiSettings = apiSettings; + } + toString() { + const url = new URL(`wss://${DEFAULT_DOMAIN}`); + url.pathname = this.pathname; + const queryParams = new URLSearchParams(); + queryParams.set('key', this.apiSettings.apiKey); + url.search = queryParams.toString(); + return url.toString(); + } + get pathname() { + if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + return 'ws/google.firebase.vertexai.v1beta.GenerativeService/BidiGenerateContent'; + } + else { + return `ws/google.firebase.vertexai.v1beta.LlmBidiService/BidiGenerateContent/locations/${this.apiSettings.location}`; + } + } +} +/** + * Log language and "fire/version" to x-goog-api-client + */ +function getClientHeaders() { + const loggingTags = []; + loggingTags.push(`${LANGUAGE_TAG}/${PACKAGE_VERSION}`); + loggingTags.push(`fire/${PACKAGE_VERSION}`); + return loggingTags.join(' '); +} +async function getHeaders(url) { + const headers = new Headers(); + headers.append('Content-Type', 'application/json'); + headers.append('x-goog-api-client', getClientHeaders()); + headers.append('x-goog-api-key', url.apiSettings.apiKey); + if (url.apiSettings.automaticDataCollectionEnabled) { + headers.append('X-Firebase-Appid', url.apiSettings.appId); + } + if (url.apiSettings.getAppCheckToken) { + const appCheckToken = await url.apiSettings.getAppCheckToken(); + if (appCheckToken) { + headers.append('X-Firebase-AppCheck', appCheckToken.token); + if (appCheckToken.error) { + logger.warn(`Unable to obtain a valid App Check token: ${appCheckToken.error.message}`); + } + } + } + if (url.apiSettings.getAuthToken) { + const authToken = await url.apiSettings.getAuthToken(); + if (authToken) { + headers.append('Authorization', `Firebase ${authToken.accessToken}`); + } + } + return headers; +} +async function constructRequest(model, task, apiSettings, stream, body, requestOptions) { + const url = new RequestUrl(model, task, apiSettings, stream, requestOptions); + return { + url: url.toString(), + fetchOptions: { + method: 'POST', + headers: await getHeaders(url), + body + } + }; +} +async function makeRequest(model, task, apiSettings, stream, body, requestOptions) { + const url = new RequestUrl(model, task, apiSettings, stream, requestOptions); + let response; + let fetchTimeoutId; + try { + const request = await constructRequest(model, task, apiSettings, stream, body, requestOptions); + // Timeout is 180s by default + const timeoutMillis = requestOptions?.timeout != null && requestOptions.timeout >= 0 + ? requestOptions.timeout + : DEFAULT_FETCH_TIMEOUT_MS; + const abortController = new AbortController(); + fetchTimeoutId = setTimeout(() => abortController.abort(), timeoutMillis); + request.fetchOptions.signal = abortController.signal; + response = await fetch(request.url, request.fetchOptions); + if (!response.ok) { + let message = ''; + let errorDetails; + try { + const json = await response.json(); + message = json.error.message; + if (json.error.details) { + message += ` ${JSON.stringify(json.error.details)}`; + errorDetails = json.error.details; + } + } + catch (e) { + // ignored + } + if (response.status === 403 && + errorDetails && + errorDetails.some((detail) => detail.reason === 'SERVICE_DISABLED') && + errorDetails.some((detail) => detail.links?.[0]?.description.includes('Google developers console API activation'))) { + throw new AIError(AIErrorCode.API_NOT_ENABLED, `The Firebase AI SDK requires the Firebase AI ` + + `API ('firebasevertexai.googleapis.com') to be enabled in your ` + + `Firebase project. Enable this API by visiting the Firebase Console ` + + `at https://console.firebase.google.com/project/${url.apiSettings.project}/genai/ ` + + `and clicking "Get started". If you enabled this API recently, ` + + `wait a few minutes for the action to propagate to our systems and ` + + `then retry.`, { + status: response.status, + statusText: response.statusText, + errorDetails + }); + } + throw new AIError(AIErrorCode.FETCH_ERROR, `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`, { + status: response.status, + statusText: response.statusText, + errorDetails + }); + } + } + catch (e) { + let err = e; + if (e.code !== AIErrorCode.FETCH_ERROR && + e.code !== AIErrorCode.API_NOT_ENABLED && + e instanceof Error) { + err = new AIError(AIErrorCode.ERROR, `Error fetching from ${url.toString()}: ${e.message}`); + err.stack = e.stack; + } + throw err; + } + finally { + if (fetchTimeoutId) { + clearTimeout(fetchTimeoutId); + } + } + return response; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Check that at least one candidate exists and does not have a bad + * finish reason. Warns if multiple candidates exist. + */ +function hasValidCandidates(response) { + if (response.candidates && response.candidates.length > 0) { + if (response.candidates.length > 1) { + logger.warn(`This response had ${response.candidates.length} ` + + `candidates. Returning text from the first candidate only. ` + + `Access response.candidates directly to use the other candidates.`); + } + if (hadBadFinishReason(response.candidates[0])) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage(response)}. Response body stored in error.response`, { + response + }); + } + return true; + } + else { + return false; + } +} +/** + * Creates an EnhancedGenerateContentResponse object that has helper functions and + * other modifications that improve usability. + */ +function createEnhancedContentResponse(response, inferenceSource = InferenceSource.IN_CLOUD) { + /** + * The Vertex AI backend omits default values. + * This causes the `index` property to be omitted from the first candidate in the + * response, since it has index 0, and 0 is a default value. + * See: https://github.com/firebase/firebase-js-sdk/issues/8566 + */ + if (response.candidates && !response.candidates[0].hasOwnProperty('index')) { + response.candidates[0].index = 0; + } + const responseWithHelpers = addHelpers(response); + responseWithHelpers.inferenceSource = inferenceSource; + return responseWithHelpers; +} +/** + * Adds convenience helper methods to a response object, including stream + * chunks (as long as each chunk is a complete GenerateContentResponse JSON). + */ +function addHelpers(response) { + response.text = () => { + if (hasValidCandidates(response)) { + return getText(response, part => !part.thought); + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Text not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return ''; + }; + response.thoughtSummary = () => { + if (hasValidCandidates(response)) { + const result = getText(response, part => !!part.thought); + return result === '' ? undefined : result; + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Thought summary not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return undefined; + }; + response.inlineDataParts = () => { + if (hasValidCandidates(response)) { + return getInlineDataParts(response); + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Data not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return undefined; + }; + response.functionCalls = () => { + if (hasValidCandidates(response)) { + return getFunctionCalls(response); + } + else if (response.promptFeedback) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Function call not available. ${formatBlockErrorMessage(response)}`, { + response + }); + } + return undefined; + }; + return response; +} +/** + * Returns all text from the first candidate's parts, filtering by whether + * `partFilter()` returns true. + * + * @param response - The `GenerateContentResponse` from which to extract text. + * @param partFilter - Only return `Part`s for which this returns true + */ +function getText(response, partFilter) { + const textStrings = []; + if (response.candidates?.[0].content?.parts) { + for (const part of response.candidates?.[0].content?.parts) { + if (part.text && partFilter(part)) { + textStrings.push(part.text); + } + } + } + if (textStrings.length > 0) { + return textStrings.join(''); + } + else { + return ''; + } +} +/** + * Returns every {@link FunctionCall} associated with first candidate. + */ +function getFunctionCalls(response) { + const functionCalls = []; + if (response.candidates?.[0].content?.parts) { + for (const part of response.candidates?.[0].content?.parts) { + if (part.functionCall) { + functionCalls.push(part.functionCall); + } + } + } + if (functionCalls.length > 0) { + return functionCalls; + } + else { + return undefined; + } +} +/** + * Returns every {@link InlineDataPart} in the first candidate if present. + * + * @internal + */ +function getInlineDataParts(response) { + const data = []; + if (response.candidates?.[0].content?.parts) { + for (const part of response.candidates?.[0].content?.parts) { + if (part.inlineData) { + data.push(part); + } + } + } + if (data.length > 0) { + return data; + } + else { + return undefined; + } +} +const badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY]; +function hadBadFinishReason(candidate) { + return (!!candidate.finishReason && + badFinishReasons.some(reason => reason === candidate.finishReason)); +} +function formatBlockErrorMessage(response) { + let message = ''; + if ((!response.candidates || response.candidates.length === 0) && + response.promptFeedback) { + message += 'Response was blocked'; + if (response.promptFeedback?.blockReason) { + message += ` due to ${response.promptFeedback.blockReason}`; + } + if (response.promptFeedback?.blockReasonMessage) { + message += `: ${response.promptFeedback.blockReasonMessage}`; + } + } + else if (response.candidates?.[0]) { + const firstCandidate = response.candidates[0]; + if (hadBadFinishReason(firstCandidate)) { + message += `Candidate was blocked due to ${firstCandidate.finishReason}`; + if (firstCandidate.finishMessage) { + message += `: ${firstCandidate.finishMessage}`; + } + } + } + return message; +} +/** + * Convert a generic successful fetch response body to an Imagen response object + * that can be returned to the user. This converts the REST APIs response format to our + * APIs representation of a response. + * + * @internal + */ +async function handlePredictResponse(response) { + const responseJson = await response.json(); + const images = []; + let filteredReason = undefined; + // The backend should always send a non-empty array of predictions if the response was successful. + if (!responseJson.predictions || responseJson.predictions?.length === 0) { + throw new AIError(AIErrorCode.RESPONSE_ERROR, 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.'); + } + for (const prediction of responseJson.predictions) { + if (prediction.raiFilteredReason) { + filteredReason = prediction.raiFilteredReason; + } + else if (prediction.mimeType && prediction.bytesBase64Encoded) { + images.push({ + mimeType: prediction.mimeType, + bytesBase64Encoded: prediction.bytesBase64Encoded + }); + } + else if (prediction.mimeType && prediction.gcsUri) { + images.push({ + mimeType: prediction.mimeType, + gcsURI: prediction.gcsUri + }); + } + else if (prediction.safetyAttributes) ; + else { + throw new AIError(AIErrorCode.RESPONSE_ERROR, `Unexpected element in 'predictions' array in response: '${JSON.stringify(prediction)}'`); + } + } + return { images, filteredReason }; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI). + * The public API prioritizes the format used by the Vertex AI Gemini API. + * We avoid having two sets of types by translating requests and responses between the two API formats. + * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API + * with minimal code changes. + * + * In here are functions that map requests and responses between the two API formats. + * Requests in the Vertex AI format are mapped to the Google AI format before being sent. + * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user. + */ +/** + * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI. + * + * @param generateContentRequest The {@link GenerateContentRequest} to map. + * @returns A {@link GenerateContentResponse} that conforms to the Google AI format. + * + * @throws If the request contains properties that are unsupported by Google AI. + * + * @internal + */ +function mapGenerateContentRequest(generateContentRequest) { + generateContentRequest.safetySettings?.forEach(safetySetting => { + if (safetySetting.method) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'SafetySetting.method is not supported in the the Gemini Developer API. Please remove this property.'); + } + }); + if (generateContentRequest.generationConfig?.topK) { + const roundedTopK = Math.round(generateContentRequest.generationConfig.topK); + if (roundedTopK !== generateContentRequest.generationConfig.topK) { + logger.warn('topK in GenerationConfig has been rounded to the nearest integer to match the format for requests to the Gemini Developer API.'); + generateContentRequest.generationConfig.topK = roundedTopK; + } + } + return generateContentRequest; +} +/** + * Maps a {@link GenerateContentResponse} from Google AI to the format of the + * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API. + * + * @param googleAIResponse The {@link GenerateContentResponse} from Google AI. + * @returns A {@link GenerateContentResponse} that conforms to the public API's format. + * + * @internal + */ +function mapGenerateContentResponse(googleAIResponse) { + const generateContentResponse = { + candidates: googleAIResponse.candidates + ? mapGenerateContentCandidates(googleAIResponse.candidates) + : undefined, + prompt: googleAIResponse.promptFeedback + ? mapPromptFeedback(googleAIResponse.promptFeedback) + : undefined, + usageMetadata: googleAIResponse.usageMetadata + }; + return generateContentResponse; +} +/** + * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI. + * + * @param countTokensRequest The {@link CountTokensRequest} to map. + * @param model The model to count tokens with. + * @returns A {@link CountTokensRequest} that conforms to the Google AI format. + * + * @internal + */ +function mapCountTokensRequest(countTokensRequest, model) { + const mappedCountTokensRequest = { + generateContentRequest: { + model, + ...countTokensRequest + } + }; + return mappedCountTokensRequest; +} +/** + * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms + * to the Vertex AI API format. + * + * @param candidates The {@link GoogleAIGenerateContentCandidate} to map. + * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format. + * + * @throws If any {@link Part} in the candidates has a `videoMetadata` property. + * + * @internal + */ +function mapGenerateContentCandidates(candidates) { + const mappedCandidates = []; + let mappedSafetyRatings; + if (mappedCandidates) { + candidates.forEach(candidate => { + // Map citationSources to citations. + let citationMetadata; + if (candidate.citationMetadata) { + citationMetadata = { + citations: candidate.citationMetadata.citationSources + }; + } + // Assign missing candidate SafetyRatings properties to their defaults if undefined. + if (candidate.safetyRatings) { + mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => { + return { + ...safetyRating, + severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + probabilityScore: safetyRating.probabilityScore ?? 0, + severityScore: safetyRating.severityScore ?? 0 + }; + }); + } + // videoMetadata is not supported. + // Throw early since developers may send a long video as input and only expect to pay + // for inference on a small portion of the video. + if (candidate.content?.parts?.some(part => part?.videoMetadata)) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.'); + } + const mappedCandidate = { + index: candidate.index, + content: candidate.content, + finishReason: candidate.finishReason, + finishMessage: candidate.finishMessage, + safetyRatings: mappedSafetyRatings, + citationMetadata, + groundingMetadata: candidate.groundingMetadata, + urlContextMetadata: candidate.urlContextMetadata + }; + mappedCandidates.push(mappedCandidate); + }); + } + return mappedCandidates; +} +function mapPromptFeedback(promptFeedback) { + // Assign missing SafetyRating properties to their defaults if undefined. + const mappedSafetyRatings = []; + promptFeedback.safetyRatings.forEach(safetyRating => { + mappedSafetyRatings.push({ + category: safetyRating.category, + probability: safetyRating.probability, + severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + probabilityScore: safetyRating.probabilityScore ?? 0, + severityScore: safetyRating.severityScore ?? 0, + blocked: safetyRating.blocked + }); + }); + const mappedPromptFeedback = { + blockReason: promptFeedback.blockReason, + safetyRatings: mappedSafetyRatings, + blockReasonMessage: promptFeedback.blockReasonMessage + }; + return mappedPromptFeedback; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/; +/** + * Process a response.body stream from the backend and return an + * iterator that provides one complete GenerateContentResponse at a time + * and a promise that resolves with a single aggregated + * GenerateContentResponse. + * + * @param response - Response from a fetch call + */ +function processStream(response, apiSettings, inferenceSource) { + const inputStream = response.body.pipeThrough(new TextDecoderStream('utf8', { fatal: true })); + const responseStream = getResponseStream(inputStream); + const [stream1, stream2] = responseStream.tee(); + return { + stream: generateResponseSequence(stream1, apiSettings, inferenceSource), + response: getResponsePromise(stream2, apiSettings, inferenceSource) + }; +} +async function getResponsePromise(stream, apiSettings, inferenceSource) { + const allResponses = []; + const reader = stream.getReader(); + while (true) { + const { done, value } = await reader.read(); + if (done) { + let generateContentResponse = aggregateResponses(allResponses); + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + generateContentResponse = mapGenerateContentResponse(generateContentResponse); + } + return createEnhancedContentResponse(generateContentResponse, inferenceSource); + } + allResponses.push(value); + } +} +async function* generateResponseSequence(stream, apiSettings, inferenceSource) { + const reader = stream.getReader(); + while (true) { + const { value, done } = await reader.read(); + if (done) { + break; + } + let enhancedResponse; + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + enhancedResponse = createEnhancedContentResponse(mapGenerateContentResponse(value), inferenceSource); + } + else { + enhancedResponse = createEnhancedContentResponse(value, inferenceSource); + } + const firstCandidate = enhancedResponse.candidates?.[0]; + // Don't yield a response with no useful data for the developer. + if (!firstCandidate?.content?.parts && + !firstCandidate?.finishReason && + !firstCandidate?.citationMetadata && + !firstCandidate?.urlContextMetadata) { + continue; + } + yield enhancedResponse; + } +} +/** + * Reads a raw stream from the fetch response and join incomplete + * chunks, returning a new stream that provides a single complete + * GenerateContentResponse in each iteration. + */ +function getResponseStream(inputStream) { + const reader = inputStream.getReader(); + const stream = new ReadableStream({ + start(controller) { + let currentText = ''; + return pump(); + function pump() { + return reader.read().then(({ value, done }) => { + if (done) { + if (currentText.trim()) { + controller.error(new AIError(AIErrorCode.PARSE_FAILED, 'Failed to parse stream')); + return; + } + controller.close(); + return; + } + currentText += value; + let match = currentText.match(responseLineRE); + let parsedResponse; + while (match) { + try { + parsedResponse = JSON.parse(match[1]); + } + catch (e) { + controller.error(new AIError(AIErrorCode.PARSE_FAILED, `Error parsing JSON response: "${match[1]}`)); + return; + } + controller.enqueue(parsedResponse); + currentText = currentText.substring(match[0].length); + match = currentText.match(responseLineRE); + } + return pump(); + }); + } + } + }); + return stream; +} +/** + * Aggregates an array of `GenerateContentResponse`s into a single + * GenerateContentResponse. + */ +function aggregateResponses(responses) { + const lastResponse = responses[responses.length - 1]; + const aggregatedResponse = { + promptFeedback: lastResponse?.promptFeedback + }; + for (const response of responses) { + if (response.candidates) { + for (const candidate of response.candidates) { + // Index will be undefined if it's the first index (0), so we should use 0 if it's undefined. + // See: https://github.com/firebase/firebase-js-sdk/issues/8566 + const i = candidate.index || 0; + if (!aggregatedResponse.candidates) { + aggregatedResponse.candidates = []; + } + if (!aggregatedResponse.candidates[i]) { + aggregatedResponse.candidates[i] = { + index: candidate.index + }; + } + // Keep overwriting, the last one will be final + aggregatedResponse.candidates[i].citationMetadata = + candidate.citationMetadata; + aggregatedResponse.candidates[i].finishReason = candidate.finishReason; + aggregatedResponse.candidates[i].finishMessage = + candidate.finishMessage; + aggregatedResponse.candidates[i].safetyRatings = + candidate.safetyRatings; + aggregatedResponse.candidates[i].groundingMetadata = + candidate.groundingMetadata; + // The urlContextMetadata object is defined in the first chunk of the response stream. + // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to + // make sure that we don't overwrite the first value urlContextMetadata object with undefined. + // FIXME: What happens if we receive a second, valid urlContextMetadata object? + const urlContextMetadata = candidate.urlContextMetadata; + if (typeof urlContextMetadata === 'object' && + urlContextMetadata !== null && + Object.keys(urlContextMetadata).length > 0) { + aggregatedResponse.candidates[i].urlContextMetadata = + urlContextMetadata; + } + /** + * Candidates should always have content and parts, but this handles + * possible malformed responses. + */ + if (candidate.content) { + // Skip a candidate without parts. + if (!candidate.content.parts) { + continue; + } + if (!aggregatedResponse.candidates[i].content) { + aggregatedResponse.candidates[i].content = { + role: candidate.content.role || 'user', + parts: [] + }; + } + for (const part of candidate.content.parts) { + const newPart = { ...part }; + // The backend can send empty text parts. If these are sent back + // (e.g. in chat history), the backend will respond with an error. + // To prevent this, ignore empty text parts. + if (part.text === '') { + continue; + } + if (Object.keys(newPart).length > 0) { + aggregatedResponse.candidates[i].content.parts.push(newPart); + } + } + } + } + } + } + return aggregatedResponse; +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const errorsCausingFallback = [ + // most network errors + AIErrorCode.FETCH_ERROR, + // fallback code for all other errors in makeRequest + AIErrorCode.ERROR, + // error due to API not being enabled in project + AIErrorCode.API_NOT_ENABLED +]; +/** + * Dispatches a request to the appropriate backend (on-device or in-cloud) + * based on the inference mode. + * + * @param request - The request to be sent. + * @param chromeAdapter - The on-device model adapter. + * @param onDeviceCall - The function to call for on-device inference. + * @param inCloudCall - The function to call for in-cloud inference. + * @returns The response from the backend. + */ +async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) { + if (!chromeAdapter) { + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + } + switch (chromeAdapter.mode) { + case InferenceMode.ONLY_ON_DEVICE: + if (await chromeAdapter.isAvailable(request)) { + return { + response: await onDeviceCall(), + inferenceSource: InferenceSource.ON_DEVICE + }; + } + throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.'); + case InferenceMode.ONLY_IN_CLOUD: + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + case InferenceMode.PREFER_IN_CLOUD: + try { + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + } + catch (e) { + if (e instanceof AIError && errorsCausingFallback.includes(e.code)) { + return { + response: await onDeviceCall(), + inferenceSource: InferenceSource.ON_DEVICE + }; + } + throw e; + } + case InferenceMode.PREFER_ON_DEVICE: + if (await chromeAdapter.isAvailable(request)) { + return { + response: await onDeviceCall(), + inferenceSource: InferenceSource.ON_DEVICE + }; + } + return { + response: await inCloudCall(), + inferenceSource: InferenceSource.IN_CLOUD + }; + default: + throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +async function generateContentStreamOnCloud(apiSettings, model, params, requestOptions) { + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + params = mapGenerateContentRequest(params); + } + return makeRequest(model, Task.STREAM_GENERATE_CONTENT, apiSettings, + /* stream */ true, JSON.stringify(params), requestOptions); +} +async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) { + const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions)); + return processStream(callResult.response, apiSettings); // TODO: Map streaming responses +} +async function generateContentOnCloud(apiSettings, model, params, requestOptions) { + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + params = mapGenerateContentRequest(params); + } + return makeRequest(model, Task.GENERATE_CONTENT, apiSettings, + /* stream */ false, JSON.stringify(params), requestOptions); +} +async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) { + const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions)); + const generateContentResponse = await processGenerateContentResponse(callResult.response, apiSettings); + const enhancedResponse = createEnhancedContentResponse(generateContentResponse, callResult.inferenceSource); + return { + response: enhancedResponse + }; +} +async function processGenerateContentResponse(response, apiSettings) { + const responseJson = await response.json(); + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + return mapGenerateContentResponse(responseJson); + } + else { + return responseJson; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +function formatSystemInstruction(input) { + // null or undefined + if (input == null) { + return undefined; + } + else if (typeof input === 'string') { + return { role: 'system', parts: [{ text: input }] }; + } + else if (input.text) { + return { role: 'system', parts: [input] }; + } + else if (input.parts) { + if (!input.role) { + return { role: 'system', parts: input.parts }; + } + else { + return input; + } + } +} +function formatNewContent(request) { + let newParts = []; + if (typeof request === 'string') { + newParts = [{ text: request }]; + } + else { + for (const partOrString of request) { + if (typeof partOrString === 'string') { + newParts.push({ text: partOrString }); + } + else { + newParts.push(partOrString); + } + } + } + return assignRoleToPartsAndValidateSendMessageRequest(newParts); +} +/** + * When multiple Part types (i.e. FunctionResponsePart and TextPart) are + * passed in a single Part array, we may need to assign different roles to each + * part. Currently only FunctionResponsePart requires a role other than 'user'. + * @private + * @param parts Array of parts to pass to the model + * @returns Array of content items + */ +function assignRoleToPartsAndValidateSendMessageRequest(parts) { + const userContent = { role: 'user', parts: [] }; + const functionContent = { role: 'function', parts: [] }; + let hasUserContent = false; + let hasFunctionContent = false; + for (const part of parts) { + if ('functionResponse' in part) { + functionContent.parts.push(part); + hasFunctionContent = true; + } + else { + userContent.parts.push(part); + hasUserContent = true; + } + } + if (hasUserContent && hasFunctionContent) { + throw new AIError(AIErrorCode.INVALID_CONTENT, 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.'); + } + if (!hasUserContent && !hasFunctionContent) { + throw new AIError(AIErrorCode.INVALID_CONTENT, 'No Content is provided for sending chat message.'); + } + if (hasUserContent) { + return userContent; + } + return functionContent; +} +function formatGenerateContentInput(params) { + let formattedRequest; + if (params.contents) { + formattedRequest = params; + } + else { + // Array or string + const content = formatNewContent(params); + formattedRequest = { contents: [content] }; + } + if (params.systemInstruction) { + formattedRequest.systemInstruction = formatSystemInstruction(params.systemInstruction); + } + return formattedRequest; +} +/** + * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format + * that is expected from the REST API. + * + * @internal + */ +function createPredictRequestBody(prompt, { gcsURI, imageFormat, addWatermark, numberOfImages = 1, negativePrompt, aspectRatio, safetyFilterLevel, personFilterLevel }) { + // Properties that are undefined will be omitted from the JSON string that is sent in the request. + const body = { + instances: [ + { + prompt + } + ], + parameters: { + storageUri: gcsURI, + negativePrompt, + sampleCount: numberOfImages, + aspectRatio, + outputOptions: imageFormat, + addWatermark, + safetyFilterLevel, + personGeneration: personFilterLevel, + includeRaiReason: true, + includeSafetyAttributes: true + } + }; + return body; +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// https://ai.google.dev/api/rest/v1beta/Content#part +const VALID_PART_FIELDS = [ + 'text', + 'inlineData', + 'functionCall', + 'functionResponse', + 'thought', + 'thoughtSignature' +]; +const VALID_PARTS_PER_ROLE = { + user: ['text', 'inlineData'], + function: ['functionResponse'], + model: ['text', 'functionCall', 'thought', 'thoughtSignature'], + // System instructions shouldn't be in history anyway. + system: ['text'] +}; +const VALID_PREVIOUS_CONTENT_ROLES = { + user: ['model'], + function: ['model'], + model: ['user', 'function'], + // System instructions shouldn't be in history. + system: [] +}; +function validateChatHistory(history) { + let prevContent = null; + for (const currContent of history) { + const { role, parts } = currContent; + if (!prevContent && role !== 'user') { + throw new AIError(AIErrorCode.INVALID_CONTENT, `First Content should be with role 'user', got ${role}`); + } + if (!POSSIBLE_ROLES.includes(role)) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(POSSIBLE_ROLES)}`); + } + if (!Array.isArray(parts)) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Content should have 'parts' property with an array of Parts`); + } + if (parts.length === 0) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Each Content should have at least one part`); + } + const countFields = { + text: 0, + inlineData: 0, + functionCall: 0, + functionResponse: 0, + thought: 0, + thoughtSignature: 0, + executableCode: 0, + codeExecutionResult: 0 + }; + for (const part of parts) { + for (const key of VALID_PART_FIELDS) { + if (key in part) { + countFields[key] += 1; + } + } + } + const validParts = VALID_PARTS_PER_ROLE[role]; + for (const key of VALID_PART_FIELDS) { + if (!validParts.includes(key) && countFields[key] > 0) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't contain '${key}' part`); + } + } + if (prevContent) { + const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role]; + if (!validPreviousContentRoles.includes(prevContent.role)) { + throw new AIError(AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't follow '${prevContent.role}'. Valid previous roles: ${JSON.stringify(VALID_PREVIOUS_CONTENT_ROLES)}`); + } + } + prevContent = currContent; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Do not log a message for this error. + */ +const SILENT_ERROR = 'SILENT_ERROR'; +/** + * ChatSession class that enables sending chat messages and stores + * history of sent and received messages so far. + * + * @public + */ +class ChatSession { + constructor(apiSettings, model, chromeAdapter, params, requestOptions) { + this.model = model; + this.chromeAdapter = chromeAdapter; + this.params = params; + this.requestOptions = requestOptions; + this._history = []; + this._sendPromise = Promise.resolve(); + this._apiSettings = apiSettings; + if (params?.history) { + validateChatHistory(params.history); + this._history = params.history; + } + } + /** + * Gets the chat history so far. Blocked prompts are not added to history. + * Neither blocked candidates nor the prompts that generated them are added + * to history. + */ + async getHistory() { + await this._sendPromise; + return this._history; + } + /** + * Sends a chat message and receives a non-streaming + * {@link GenerateContentResult} + */ + async sendMessage(request) { + await this._sendPromise; + const newContent = formatNewContent(request); + const generateContentRequest = { + safetySettings: this.params?.safetySettings, + generationConfig: this.params?.generationConfig, + tools: this.params?.tools, + toolConfig: this.params?.toolConfig, + systemInstruction: this.params?.systemInstruction, + contents: [...this._history, newContent] + }; + let finalResult = {}; + // Add onto the chain. + this._sendPromise = this._sendPromise + .then(() => generateContent(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter, this.requestOptions)) + .then(result => { + if (result.response.candidates && + result.response.candidates.length > 0) { + this._history.push(newContent); + const responseContent = { + parts: result.response.candidates?.[0].content.parts || [], + // Response seems to come back without a role set. + role: result.response.candidates?.[0].content.role || 'model' + }; + this._history.push(responseContent); + } + else { + const blockErrorMessage = formatBlockErrorMessage(result.response); + if (blockErrorMessage) { + logger.warn(`sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`); + } + } + finalResult = result; + }); + await this._sendPromise; + return finalResult; + } + /** + * Sends a chat message and receives the response as a + * {@link GenerateContentStreamResult} containing an iterable stream + * and a response promise. + */ + async sendMessageStream(request) { + await this._sendPromise; + const newContent = formatNewContent(request); + const generateContentRequest = { + safetySettings: this.params?.safetySettings, + generationConfig: this.params?.generationConfig, + tools: this.params?.tools, + toolConfig: this.params?.toolConfig, + systemInstruction: this.params?.systemInstruction, + contents: [...this._history, newContent] + }; + const streamPromise = generateContentStream(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter, this.requestOptions); + // Add onto the chain. + this._sendPromise = this._sendPromise + .then(() => streamPromise) + // This must be handled to avoid unhandled rejection, but jump + // to the final catch block with a label to not log this error. + .catch(_ignored => { + throw new Error(SILENT_ERROR); + }) + .then(streamResult => streamResult.response) + .then(response => { + if (response.candidates && response.candidates.length > 0) { + this._history.push(newContent); + const responseContent = { ...response.candidates[0].content }; + // Response seems to come back without a role set. + if (!responseContent.role) { + responseContent.role = 'model'; + } + this._history.push(responseContent); + } + else { + const blockErrorMessage = formatBlockErrorMessage(response); + if (blockErrorMessage) { + logger.warn(`sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`); + } + } + }) + .catch(e => { + // Errors in streamPromise are already catchable by the user as + // streamPromise is returned. + // Avoid duplicating the error message in logs. + if (e.message !== SILENT_ERROR) { + // Users do not have access to _sendPromise to catch errors + // downstream from streamPromise, so they should not throw. + logger.error(e); + } + }); + return streamPromise; + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +async function countTokensOnCloud(apiSettings, model, params, requestOptions) { + let body = ''; + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + const mappedParams = mapCountTokensRequest(params, model); + body = JSON.stringify(mappedParams); + } + else { + body = JSON.stringify(params); + } + const response = await makeRequest(model, Task.COUNT_TOKENS, apiSettings, false, body, requestOptions); + return response.json(); +} +async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) { + if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.'); + } + return countTokensOnCloud(apiSettings, model, params, requestOptions); +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Class for generative model APIs. + * @public + */ +class GenerativeModel extends AIModel { + constructor(ai, modelParams, requestOptions, chromeAdapter) { + super(ai, modelParams.model); + this.chromeAdapter = chromeAdapter; + this.generationConfig = modelParams.generationConfig || {}; + this.safetySettings = modelParams.safetySettings || []; + this.tools = modelParams.tools; + this.toolConfig = modelParams.toolConfig; + this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction); + this.requestOptions = requestOptions || {}; + } + /** + * Makes a single non-streaming call to the model + * and returns an object containing a single {@link GenerateContentResponse}. + */ + async generateContent(request) { + const formattedParams = formatGenerateContentInput(request); + return generateContent(this._apiSettings, this.model, { + generationConfig: this.generationConfig, + safetySettings: this.safetySettings, + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + ...formattedParams + }, this.chromeAdapter, this.requestOptions); + } + /** + * Makes a single streaming call to the model + * and returns an object containing an iterable stream that iterates + * over all chunks in the streaming response as well as + * a promise that returns the final aggregated response. + */ + async generateContentStream(request) { + const formattedParams = formatGenerateContentInput(request); + return generateContentStream(this._apiSettings, this.model, { + generationConfig: this.generationConfig, + safetySettings: this.safetySettings, + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + ...formattedParams + }, this.chromeAdapter, this.requestOptions); + } + /** + * Gets a new {@link ChatSession} instance which can be used for + * multi-turn chats. + */ + startChat(startChatParams) { + return new ChatSession(this._apiSettings, this.model, this.chromeAdapter, { + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + generationConfig: this.generationConfig, + safetySettings: this.safetySettings, + /** + * Overrides params inherited from GenerativeModel with those explicitly set in the + * StartChatParams. For example, if startChatParams.generationConfig is set, it'll override + * this.generationConfig. + */ + ...startChatParams + }, this.requestOptions); + } + /** + * Counts the tokens in the provided request. + */ + async countTokens(request) { + const formattedParams = formatGenerateContentInput(request); + return countTokens(this._apiSettings, this.model, formattedParams, this.chromeAdapter); + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Represents an active, real-time, bidirectional conversation with the model. + * + * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}. + * + * @beta + */ +class LiveSession { + /** + * @internal + */ + constructor(webSocketHandler, serverMessages) { + this.webSocketHandler = webSocketHandler; + this.serverMessages = serverMessages; + /** + * Indicates whether this Live session is closed. + * + * @beta + */ + this.isClosed = false; + /** + * Indicates whether this Live session is being controlled by an `AudioConversationController`. + * + * @beta + */ + this.inConversation = false; + } + /** + * Sends content to the server. + * + * @param request - The message to send to the model. + * @param turnComplete - Indicates if the turn is complete. Defaults to false. + * @throws If this session has been closed. + * + * @beta + */ + async send(request, turnComplete = true) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const newContent = formatNewContent(request); + const message = { + clientContent: { + turns: [newContent], + turnComplete + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends text to the server in realtime. + * + * @example + * ```javascript + * liveSession.sendTextRealtime("Hello, how are you?"); + * ``` + * + * @param text - The text data to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendTextRealtime(text) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + realtimeInput: { + text + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends audio data to the server in realtime. + * + * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz + * little-endian. + * + * @example + * ```javascript + * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian. + * const blob = { mimeType: "audio/pcm", data: pcmData }; + * liveSession.sendAudioRealtime(blob); + * ``` + * + * @param blob - The base64-encoded PCM data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + async sendAudioRealtime(blob) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + realtimeInput: { + audio: blob + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends video data to the server in realtime. + * + * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It + * is recommended to set `mimeType` to `image/jpeg`. + * + * @example + * ```javascript + * // const videoFrame = ... base64-encoded JPEG data + * const blob = { mimeType: "image/jpeg", data: videoFrame }; + * liveSession.sendVideoRealtime(blob); + * ``` + * @param blob - The base64-encoded video data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + async sendVideoRealtime(blob) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + realtimeInput: { + video: blob + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Sends function responses to the server. + * + * @param functionResponses - The function responses to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendFunctionResponses(functionResponses) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const message = { + toolResponse: { + functionResponses + } + }; + this.webSocketHandler.send(JSON.stringify(message)); + } + /** + * Yields messages received from the server. + * This can only be used by one consumer at a time. + * + * @returns An `AsyncGenerator` that yields server messages as they arrive. + * @throws If the session is already closed, or if we receive a response that we don't support. + * + * @beta + */ + async *receive() { + if (this.isClosed) { + throw new AIError(AIErrorCode.SESSION_CLOSED, 'Cannot read from a Live session that is closed. Try starting a new Live session.'); + } + for await (const message of this.serverMessages) { + if (message && typeof message === 'object') { + if (LiveResponseType.SERVER_CONTENT in message) { + yield { + type: 'serverContent', + ...message + .serverContent + }; + } + else if (LiveResponseType.TOOL_CALL in message) { + yield { + type: 'toolCall', + ...message + .toolCall + }; + } + else if (LiveResponseType.TOOL_CALL_CANCELLATION in message) { + yield { + type: 'toolCallCancellation', + ...message.toolCallCancellation + }; + } + else { + logger.warn(`Received an unknown message type from the server: ${JSON.stringify(message)}`); + } + } + else { + logger.warn(`Received an invalid message from the server: ${JSON.stringify(message)}`); + } + } + } + /** + * Closes this session. + * All methods on this session will throw an error once this resolves. + * + * @beta + */ + async close() { + if (!this.isClosed) { + this.isClosed = true; + await this.webSocketHandler.close(1000, 'Client closed session.'); + } + } + /** + * Sends realtime input to the server. + * + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * @param mediaChunks - The media chunks to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendMediaChunks(mediaChunks) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + // The backend does not support sending more than one mediaChunk in one message. + // Work around this limitation by sending mediaChunks in separate messages. + mediaChunks.forEach(mediaChunk => { + const message = { + realtimeInput: { mediaChunks: [mediaChunk] } + }; + this.webSocketHandler.send(JSON.stringify(message)); + }); + } + /** + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * Sends a stream of {@link GenerativeContentBlob}. + * + * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send. + * @throws If this session has been closed. + * + * @beta + */ + async sendMediaStream(mediaChunkStream) { + if (this.isClosed) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.'); + } + const reader = mediaChunkStream.getReader(); + while (true) { + try { + const { done, value } = await reader.read(); + if (done) { + break; + } + else if (!value) { + throw new Error('Missing chunk in reader, but reader is not done.'); + } + await this.sendMediaChunks([value]); + } + catch (e) { + // Re-throw any errors that occur during stream consumption or sending. + const message = e instanceof Error ? e.message : 'Error processing media stream.'; + throw new AIError(AIErrorCode.REQUEST_ERROR, message); + } + } + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal + * interactions with Gemini. + * + * This class should only be instantiated with {@link getLiveGenerativeModel}. + * + * @beta + */ +class LiveGenerativeModel extends AIModel { + /** + * @internal + */ + constructor(ai, modelParams, + /** + * @internal + */ + _webSocketHandler) { + super(ai, modelParams.model); + this._webSocketHandler = _webSocketHandler; + this.generationConfig = modelParams.generationConfig || {}; + this.tools = modelParams.tools; + this.toolConfig = modelParams.toolConfig; + this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction); + } + /** + * Starts a {@link LiveSession}. + * + * @returns A {@link LiveSession}. + * @throws If the connection failed to be established with the server. + * + * @beta + */ + async connect() { + const url = new WebSocketUrl(this._apiSettings); + await this._webSocketHandler.connect(url.toString()); + let fullModelPath; + if (this._apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + fullModelPath = `projects/${this._apiSettings.project}/${this.model}`; + } + else { + fullModelPath = `projects/${this._apiSettings.project}/locations/${this._apiSettings.location}/${this.model}`; + } + // inputAudioTranscription and outputAudioTranscription are on the generation config in the public API, + // but the backend expects them to be in the `setup` message. + const { inputAudioTranscription, outputAudioTranscription, ...generationConfig } = this.generationConfig; + const setupMessage = { + setup: { + model: fullModelPath, + generationConfig, + tools: this.tools, + toolConfig: this.toolConfig, + systemInstruction: this.systemInstruction, + inputAudioTranscription, + outputAudioTranscription + } + }; + try { + // Begin listening for server messages, and begin the handshake by sending the 'setupMessage' + const serverMessages = this._webSocketHandler.listen(); + this._webSocketHandler.send(JSON.stringify(setupMessage)); + // Verify we received the handshake response 'setupComplete' + const firstMessage = (await serverMessages.next()).value; + if (!firstMessage || + !(typeof firstMessage === 'object') || + !('setupComplete' in firstMessage)) { + await this._webSocketHandler.close(1011, 'Handshake failure'); + throw new AIError(AIErrorCode.RESPONSE_ERROR, 'Server connection handshake failed. The server did not respond with a setupComplete message.'); + } + return new LiveSession(this._webSocketHandler, serverMessages); + } + catch (e) { + // Ensure connection is closed on any setup error + await this._webSocketHandler.close(); + throw e; + } + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Class for Imagen model APIs. + * + * This class provides methods for generating images using the Imagen model. + * + * @example + * ```javascript + * const imagen = new ImagenModel( + * ai, + * { + * model: 'imagen-3.0-generate-002' + * } + * ); + * + * const response = await imagen.generateImages('A photo of a cat'); + * if (response.images.length > 0) { + * console.log(response.images[0].bytesBase64Encoded); + * } + * ``` + * + * @public + */ +class ImagenModel extends AIModel { + /** + * Constructs a new instance of the {@link ImagenModel} class. + * + * @param ai - an {@link AI} instance. + * @param modelParams - Parameters to use when making requests to Imagen. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + */ + constructor(ai, modelParams, requestOptions) { + const { model, generationConfig, safetySettings } = modelParams; + super(ai, model); + this.requestOptions = requestOptions; + this.generationConfig = generationConfig; + this.safetySettings = safetySettings; + } + /** + * Generates images using the Imagen model and returns them as + * base64-encoded strings. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the generated images. + * + * @throws If the request to generate images fails. This happens if the + * prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + * + * @public + */ + async generateImages(prompt) { + const body = createPredictRequestBody(prompt, { + ...this.generationConfig, + ...this.safetySettings + }); + const response = await makeRequest(this.model, Task.PREDICT, this._apiSettings, + /* stream */ false, JSON.stringify(body), this.requestOptions); + return handlePredictResponse(response); + } + /** + * Generates images to Cloud Storage for Firebase using the Imagen model. + * + * @internal This method is temporarily internal. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket. + * This should be a directory. For example, `gs://my-bucket/my-directory/`. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the URLs of the generated images. + * + * @throws If the request fails to generate images fails. This happens if + * the prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + */ + async generateImagesGCS(prompt, gcsURI) { + const body = createPredictRequestBody(prompt, { + gcsURI, + ...this.generationConfig, + ...this.safetySettings + }); + const response = await makeRequest(this.model, Task.PREDICT, this._apiSettings, + /* stream */ false, JSON.stringify(body), this.requestOptions); + return handlePredictResponse(response); + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22. + * + * @internal + */ +class WebSocketHandlerImpl { + constructor() { + if (typeof WebSocket === 'undefined') { + throw new AIError(AIErrorCode.UNSUPPORTED, 'The WebSocket API is not available in this environment. ' + + 'The "Live" feature is not supported here. It is supported in ' + + 'modern browser windows, Web Workers with WebSocket support, and Node >= 22.'); + } + } + connect(url) { + return new Promise((resolve, reject) => { + this.ws = new WebSocket(url); + this.ws.binaryType = 'blob'; // Only important to set in Node + this.ws.addEventListener('open', () => resolve(), { once: true }); + this.ws.addEventListener('error', () => reject(new AIError(AIErrorCode.FETCH_ERROR, `Error event raised on WebSocket`)), { once: true }); + this.ws.addEventListener('close', (closeEvent) => { + if (closeEvent.reason) { + logger.warn(`WebSocket connection closed by server. Reason: '${closeEvent.reason}'`); + } + }); + }); + } + send(data) { + if (!this.ws || this.ws.readyState !== WebSocket.OPEN) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not open.'); + } + this.ws.send(data); + } + async *listen() { + if (!this.ws) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not connected.'); + } + const messageQueue = []; + const errorQueue = []; + let resolvePromise = null; + let isClosed = false; + const messageListener = async (event) => { + let data; + if (event.data instanceof Blob) { + data = await event.data.text(); + } + else if (typeof event.data === 'string') { + data = event.data; + } + else { + errorQueue.push(new AIError(AIErrorCode.PARSE_FAILED, `Failed to parse WebSocket response. Expected data to be a Blob or string, but was ${typeof event.data}.`)); + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + return; + } + try { + const obj = JSON.parse(data); + messageQueue.push(obj); + } + catch (e) { + const err = e; + errorQueue.push(new AIError(AIErrorCode.PARSE_FAILED, `Error parsing WebSocket message to JSON: ${err.message}`)); + } + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + }; + const errorListener = () => { + errorQueue.push(new AIError(AIErrorCode.FETCH_ERROR, 'WebSocket connection error.')); + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + }; + const closeListener = (event) => { + if (event.reason) { + logger.warn(`WebSocket connection closed by the server with reason: ${event.reason}`); + } + isClosed = true; + if (resolvePromise) { + resolvePromise(); + resolvePromise = null; + } + // Clean up listeners to prevent memory leaks + this.ws?.removeEventListener('message', messageListener); + this.ws?.removeEventListener('close', closeListener); + this.ws?.removeEventListener('error', errorListener); + }; + this.ws.addEventListener('message', messageListener); + this.ws.addEventListener('close', closeListener); + this.ws.addEventListener('error', errorListener); + while (!isClosed) { + if (errorQueue.length > 0) { + const error = errorQueue.shift(); + throw error; + } + if (messageQueue.length > 0) { + yield messageQueue.shift(); + } + else { + await new Promise(resolve => { + resolvePromise = resolve; + }); + } + } + // If the loop terminated because isClosed is true, check for any final errors + if (errorQueue.length > 0) { + const error = errorQueue.shift(); + throw error; + } + } + close(code, reason) { + return new Promise(resolve => { + if (!this.ws) { + return resolve(); + } + this.ws.addEventListener('close', () => resolve(), { once: true }); + // Calling 'close' during these states results in an error. + if (this.ws.readyState === WebSocket.CLOSED || + this.ws.readyState === WebSocket.CONNECTING) { + return resolve(); + } + if (this.ws.readyState !== WebSocket.CLOSING) { + this.ws.close(code, reason); + } + }); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Parent class encompassing all Schema types, with static methods that + * allow building specific Schema types. This class can be converted with + * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints. + * (This string conversion is automatically done when calling SDK methods.) + * @public + */ +class Schema { + constructor(schemaParams) { + // TODO(dlarocque): Enforce this with union types + if (!schemaParams.type && !schemaParams.anyOf) { + throw new AIError(AIErrorCode.INVALID_SCHEMA, "A schema must have either a 'type' or an 'anyOf' array of sub-schemas."); + } + // eslint-disable-next-line guard-for-in + for (const paramKey in schemaParams) { + this[paramKey] = schemaParams[paramKey]; + } + // Ensure these are explicitly set to avoid TS errors. + this.type = schemaParams.type; + this.format = schemaParams.hasOwnProperty('format') + ? schemaParams.format + : undefined; + this.nullable = schemaParams.hasOwnProperty('nullable') + ? !!schemaParams.nullable + : false; + } + /** + * Defines how this Schema should be serialized as JSON. + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior + * @internal + */ + toJSON() { + const obj = { + type: this.type + }; + for (const prop in this) { + if (this.hasOwnProperty(prop) && this[prop] !== undefined) { + if (prop !== 'required' || this.type === SchemaType.OBJECT) { + obj[prop] = this[prop]; + } + } + } + return obj; + } + static array(arrayParams) { + return new ArraySchema(arrayParams, arrayParams.items); + } + static object(objectParams) { + return new ObjectSchema(objectParams, objectParams.properties, objectParams.optionalProperties); + } + // eslint-disable-next-line id-blacklist + static string(stringParams) { + return new StringSchema(stringParams); + } + static enumString(stringParams) { + return new StringSchema(stringParams, stringParams.enum); + } + static integer(integerParams) { + return new IntegerSchema(integerParams); + } + // eslint-disable-next-line id-blacklist + static number(numberParams) { + return new NumberSchema(numberParams); + } + // eslint-disable-next-line id-blacklist + static boolean(booleanParams) { + return new BooleanSchema(booleanParams); + } + static anyOf(anyOfParams) { + return new AnyOfSchema(anyOfParams); + } +} +/** + * Schema class for "integer" types. + * @public + */ +class IntegerSchema extends Schema { + constructor(schemaParams) { + super({ + type: SchemaType.INTEGER, + ...schemaParams + }); + } +} +/** + * Schema class for "number" types. + * @public + */ +class NumberSchema extends Schema { + constructor(schemaParams) { + super({ + type: SchemaType.NUMBER, + ...schemaParams + }); + } +} +/** + * Schema class for "boolean" types. + * @public + */ +class BooleanSchema extends Schema { + constructor(schemaParams) { + super({ + type: SchemaType.BOOLEAN, + ...schemaParams + }); + } +} +/** + * Schema class for "string" types. Can be used with or without + * enum values. + * @public + */ +class StringSchema extends Schema { + constructor(schemaParams, enumValues) { + super({ + type: SchemaType.STRING, + ...schemaParams + }); + this.enum = enumValues; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + if (this.enum) { + obj['enum'] = this.enum; + } + return obj; + } +} +/** + * Schema class for "array" types. + * The `items` param should refer to the type of item that can be a member + * of the array. + * @public + */ +class ArraySchema extends Schema { + constructor(schemaParams, items) { + super({ + type: SchemaType.ARRAY, + ...schemaParams + }); + this.items = items; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + obj.items = this.items.toJSON(); + return obj; + } +} +/** + * Schema class for "object" types. + * The `properties` param must be a map of `Schema` objects. + * @public + */ +class ObjectSchema extends Schema { + constructor(schemaParams, properties, optionalProperties = []) { + super({ + type: SchemaType.OBJECT, + ...schemaParams + }); + this.properties = properties; + this.optionalProperties = optionalProperties; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + obj.properties = { ...this.properties }; + const required = []; + if (this.optionalProperties) { + for (const propertyKey of this.optionalProperties) { + if (!this.properties.hasOwnProperty(propertyKey)) { + throw new AIError(AIErrorCode.INVALID_SCHEMA, `Property "${propertyKey}" specified in "optionalProperties" does not exist.`); + } + } + } + for (const propertyKey in this.properties) { + if (this.properties.hasOwnProperty(propertyKey)) { + obj.properties[propertyKey] = this.properties[propertyKey].toJSON(); + if (!this.optionalProperties.includes(propertyKey)) { + required.push(propertyKey); + } + } + } + if (required.length > 0) { + obj.required = required; + } + delete obj.optionalProperties; + return obj; + } +} +/** + * Schema class representing a value that can conform to any of the provided sub-schemas. This is + * useful when a field can accept multiple distinct types or structures. + * @public + */ +class AnyOfSchema extends Schema { + constructor(schemaParams) { + if (schemaParams.anyOf.length === 0) { + throw new AIError(AIErrorCode.INVALID_SCHEMA, "The 'anyOf' array must not be empty."); + } + super({ + ...schemaParams, + type: undefined // anyOf schemas do not have an explicit type + }); + this.anyOf = schemaParams.anyOf; + } + /** + * @internal + */ + toJSON() { + const obj = super.toJSON(); + // Ensure the 'anyOf' property contains serialized SchemaRequest objects. + if (this.anyOf && Array.isArray(this.anyOf)) { + obj.anyOf = this.anyOf.map(s => s.toJSON()); + } + return obj; + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Defines the image format for images generated by Imagen. + * + * Use this class to specify the desired format (JPEG or PNG) and compression quality + * for images generated by Imagen. This is typically included as part of + * {@link ImagenModelParams}. + * + * @example + * ```javascript + * const imagenModelParams = { + * // ... other ImagenModelParams + * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75. + * } + * ``` + * + * @public + */ +class ImagenImageFormat { + constructor() { + this.mimeType = 'image/png'; + } + /** + * Creates an {@link ImagenImageFormat} for a JPEG image. + * + * @param compressionQuality - The level of compression (a number between 0 and 100). + * @returns An {@link ImagenImageFormat} object for a JPEG image. + * + * @public + */ + static jpeg(compressionQuality) { + if (compressionQuality && + (compressionQuality < 0 || compressionQuality > 100)) { + logger.warn(`Invalid JPEG compression quality of ${compressionQuality} specified; the supported range is [0, 100].`); + } + return { mimeType: 'image/jpeg', compressionQuality }; + } + /** + * Creates an {@link ImagenImageFormat} for a PNG image. + * + * @returns An {@link ImagenImageFormat} object for a PNG image. + * + * @public + */ + static png() { + return { mimeType: 'image/png' }; + } +} + +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +const SERVER_INPUT_SAMPLE_RATE = 16000; +const SERVER_OUTPUT_SAMPLE_RATE = 24000; +const AUDIO_PROCESSOR_NAME = 'audio-processor'; +/** + * The JS for an `AudioWorkletProcessor`. + * This processor is responsible for taking raw audio from the microphone, + * converting it to the required 16-bit 16kHz PCM, and posting it back to the main thread. + * + * See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor + * + * It is defined as a string here so that it can be converted into a `Blob` + * and loaded at runtime. + */ +const audioProcessorWorkletString = ` + class AudioProcessor extends AudioWorkletProcessor { + constructor(options) { + super(); + this.targetSampleRate = options.processorOptions.targetSampleRate; + // 'sampleRate' is a global variable available inside the AudioWorkletGlobalScope, + // representing the native sample rate of the AudioContext. + this.inputSampleRate = sampleRate; + } + + /** + * This method is called by the browser's audio engine for each block of audio data. + * Input is a single input, with a single channel (input[0][0]). + */ + process(inputs) { + const input = inputs[0]; + if (input && input.length > 0 && input[0].length > 0) { + const pcmData = input[0]; // Float32Array of raw audio samples. + + // Simple linear interpolation for resampling. + const resampled = new Float32Array(Math.round(pcmData.length * this.targetSampleRate / this.inputSampleRate)); + const ratio = pcmData.length / resampled.length; + for (let i = 0; i < resampled.length; i++) { + resampled[i] = pcmData[Math.floor(i * ratio)]; + } + + // Convert Float32 (-1, 1) samples to Int16 (-32768, 32767) + const resampledInt16 = new Int16Array(resampled.length); + for (let i = 0; i < resampled.length; i++) { + const sample = Math.max(-1, Math.min(1, resampled[i])); + if (sample < 0) { + resampledInt16[i] = sample * 32768; + } else { + resampledInt16[i] = sample * 32767; + } + } + + this.port.postMessage(resampledInt16); + } + // Return true to keep the processor alive and processing the next audio block. + return true; + } + } + + // Register the processor with a name that can be used to instantiate it from the main thread. + registerProcessor('${AUDIO_PROCESSOR_NAME}', AudioProcessor); +`; +/** + * Encapsulates the core logic of an audio conversation. + * + * @internal + */ +class AudioConversationRunner { + constructor(liveSession, options, deps) { + this.liveSession = liveSession; + this.options = options; + this.deps = deps; + /** A flag to indicate if the conversation has been stopped. */ + this.isStopped = false; + /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */ + this.stopDeferred = new Deferred(); + /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */ + this.playbackQueue = []; + /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */ + this.scheduledSources = []; + /** A high-precision timeline pointer for scheduling gapless audio playback. */ + this.nextStartTime = 0; + /** A mutex to prevent the playback processing loop from running multiple times concurrently. */ + this.isPlaybackLoopRunning = false; + this.liveSession.inConversation = true; + // Start listening for messages from the server. + this.receiveLoopPromise = this.runReceiveLoop().finally(() => this.cleanup()); + // Set up the handler for receiving processed audio data from the worklet. + // Message data has been resampled to 16kHz 16-bit PCM. + this.deps.workletNode.port.onmessage = event => { + if (this.isStopped) { + return; + } + const pcm16 = event.data; + const base64 = btoa(String.fromCharCode.apply(null, Array.from(new Uint8Array(pcm16.buffer)))); + const chunk = { + mimeType: 'audio/pcm', + data: base64 + }; + void this.liveSession.sendAudioRealtime(chunk); + }; + } + /** + * Stops the conversation and unblocks the main receive loop. + */ + async stop() { + if (this.isStopped) { + return; + } + this.isStopped = true; + this.stopDeferred.resolve(); // Unblock the receive loop + await this.receiveLoopPromise; // Wait for the loop and cleanup to finish + } + /** + * Cleans up all audio resources (nodes, stream tracks, context) and marks the + * session as no longer in a conversation. + */ + cleanup() { + this.interruptPlayback(); // Ensure all audio is stopped on final cleanup. + this.deps.workletNode.port.onmessage = null; + this.deps.workletNode.disconnect(); + this.deps.sourceNode.disconnect(); + this.deps.mediaStream.getTracks().forEach(track => track.stop()); + if (this.deps.audioContext.state !== 'closed') { + void this.deps.audioContext.close(); + } + this.liveSession.inConversation = false; + } + /** + * Adds audio data to the queue and ensures the playback loop is running. + */ + enqueueAndPlay(audioData) { + this.playbackQueue.push(audioData); + // Will no-op if it's already running. + void this.processPlaybackQueue(); + } + /** + * Stops all current and pending audio playback and clears the queue. This is + * called when the server indicates the model's speech was interrupted with + * `LiveServerContent.modelTurn.interrupted`. + */ + interruptPlayback() { + // Stop all sources that have been scheduled. The onended event will fire for each, + // which will clean up the scheduledSources array. + [...this.scheduledSources].forEach(source => source.stop(0)); + // Clear the internal buffer of unprocessed audio chunks. + this.playbackQueue.length = 0; + // Reset the playback clock to start fresh. + this.nextStartTime = this.deps.audioContext.currentTime; + } + /** + * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence. + */ + async processPlaybackQueue() { + if (this.isPlaybackLoopRunning) { + return; + } + this.isPlaybackLoopRunning = true; + while (this.playbackQueue.length > 0 && !this.isStopped) { + const pcmRawBuffer = this.playbackQueue.shift(); + try { + const pcm16 = new Int16Array(pcmRawBuffer); + const frameCount = pcm16.length; + const audioBuffer = this.deps.audioContext.createBuffer(1, frameCount, SERVER_OUTPUT_SAMPLE_RATE); + // Convert 16-bit PCM to 32-bit PCM, required by the Web Audio API. + const channelData = audioBuffer.getChannelData(0); + for (let i = 0; i < frameCount; i++) { + channelData[i] = pcm16[i] / 32768; // Normalize to Float32 range [-1.0, 1.0] + } + const source = this.deps.audioContext.createBufferSource(); + source.buffer = audioBuffer; + source.connect(this.deps.audioContext.destination); + // Track the source and set up a handler to remove it from tracking when it finishes. + this.scheduledSources.push(source); + source.onended = () => { + this.scheduledSources = this.scheduledSources.filter(s => s !== source); + }; + // To prevent gaps, schedule the next chunk to start either now (if we're catching up) + // or exactly when the previous chunk is scheduled to end. + this.nextStartTime = Math.max(this.deps.audioContext.currentTime, this.nextStartTime); + source.start(this.nextStartTime); + // Update the schedule for the *next* chunk. + this.nextStartTime += audioBuffer.duration; + } + catch (e) { + logger.error('Error playing audio:', e); + } + } + this.isPlaybackLoopRunning = false; + } + /** + * The main loop that listens for and processes messages from the server. + */ + async runReceiveLoop() { + const messageGenerator = this.liveSession.receive(); + while (!this.isStopped) { + const result = await Promise.race([ + messageGenerator.next(), + this.stopDeferred.promise + ]); + if (this.isStopped || !result || result.done) { + break; + } + const message = result.value; + if (message.type === 'serverContent') { + const serverContent = message; + if (serverContent.interrupted) { + this.interruptPlayback(); + } + const audioPart = serverContent.modelTurn?.parts.find(part => part.inlineData?.mimeType.startsWith('audio/')); + if (audioPart?.inlineData) { + const audioData = Uint8Array.from(atob(audioPart.inlineData.data), c => c.charCodeAt(0)).buffer; + this.enqueueAndPlay(audioData); + } + } + else if (message.type === 'toolCall') { + if (!this.options.functionCallingHandler) { + logger.warn('Received tool call message, but StartAudioConversationOptions.functionCallingHandler is undefined. Ignoring tool call.'); + } + else { + try { + const functionResponse = await this.options.functionCallingHandler(message.functionCalls); + if (!this.isStopped) { + void this.liveSession.sendFunctionResponses([functionResponse]); + } + } + catch (e) { + throw new AIError(AIErrorCode.ERROR, `Function calling handler failed: ${e.message}`); + } + } + } + } + } +} +/** + * Starts a real-time, bidirectional audio conversation with the model. This helper function manages + * the complexities of microphone access, audio recording, playback, and interruptions. + * + * @remarks Important: This function must be called in response to a user gesture + * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}. + * + * @example + * ```javascript + * const liveSession = await model.connect(); + * let conversationController; + * + * // This function must be called from within a click handler. + * async function startConversation() { + * try { + * conversationController = await startAudioConversation(liveSession); + * } catch (e) { + * // Handle AI-specific errors + * if (e instanceof AIError) { + * console.error("AI Error:", e.message); + * } + * // Handle microphone permission and hardware errors + * else if (e instanceof DOMException) { + * console.error("Microphone Error:", e.message); + * } + * // Handle other unexpected errors + * else { + * console.error("An unexpected error occurred:", e); + * } + * } + * } + * + * // Later, to stop the conversation: + * // if (conversationController) { + * // await conversationController.stop(); + * // } + * ``` + * + * @param liveSession - An active {@link LiveSession} instance. + * @param options - Configuration options for the audio conversation. + * @returns A `Promise` that resolves with an {@link AudioConversationController}. + * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`). + * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions. + * + * @beta + */ +async function startAudioConversation(liveSession, options = {}) { + if (liveSession.isClosed) { + throw new AIError(AIErrorCode.SESSION_CLOSED, 'Cannot start audio conversation on a closed LiveSession.'); + } + if (liveSession.inConversation) { + throw new AIError(AIErrorCode.REQUEST_ERROR, 'An audio conversation is already in progress for this session.'); + } + // Check for necessary Web API support. + if (typeof AudioWorkletNode === 'undefined' || + typeof AudioContext === 'undefined' || + typeof navigator === 'undefined' || + !navigator.mediaDevices) { + throw new AIError(AIErrorCode.UNSUPPORTED, 'Audio conversation is not supported in this environment. It requires the Web Audio API and AudioWorklet support.'); + } + let audioContext; + try { + // 1. Set up the audio context. This must be in response to a user gesture. + // See: https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy + audioContext = new AudioContext(); + if (audioContext.state === 'suspended') { + await audioContext.resume(); + } + // 2. Prompt for microphone access and get the media stream. + // This can throw a variety of permission or hardware-related errors. + const mediaStream = await navigator.mediaDevices.getUserMedia({ + audio: true + }); + // 3. Load the AudioWorklet processor. + // See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet + const workletBlob = new Blob([audioProcessorWorkletString], { + type: 'application/javascript' + }); + const workletURL = URL.createObjectURL(workletBlob); + await audioContext.audioWorklet.addModule(workletURL); + // 4. Create the audio graph: Microphone -> Source Node -> Worklet Node + const sourceNode = audioContext.createMediaStreamSource(mediaStream); + const workletNode = new AudioWorkletNode(audioContext, AUDIO_PROCESSOR_NAME, { + processorOptions: { targetSampleRate: SERVER_INPUT_SAMPLE_RATE } + }); + sourceNode.connect(workletNode); + // 5. Instantiate and return the runner which manages the conversation. + const runner = new AudioConversationRunner(liveSession, options, { + audioContext, + mediaStream, + sourceNode, + workletNode + }); + return { stop: () => runner.stop() }; + } + catch (e) { + // Ensure the audio context is closed on any setup error. + if (audioContext && audioContext.state !== 'closed') { + void audioContext.close(); + } + // Re-throw specific, known error types directly. The user may want to handle `DOMException` + // errors differently (for example, if permission to access audio device was denied). + if (e instanceof AIError || e instanceof DOMException) { + throw e; + } + // Wrap any other unexpected errors in a standard AIError. + throw new AIError(AIErrorCode.ERROR, `Failed to initialize audio recording: ${e.message}`); + } +} + +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Returns the default {@link AI} instance that is associated with the provided + * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the + * default settings. + * + * @example + * ```javascript + * const ai = getAI(app); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Gemini Developer API (via Google AI). + * const ai = getAI(app, { backend: new GoogleAIBackend() }); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Vertex AI Gemini API. + * const ai = getAI(app, { backend: new VertexAIBackend() }); + * ``` + * + * @param app - The {@link @firebase/app#FirebaseApp} to use. + * @param options - {@link AIOptions} that configure the AI instance. + * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}. + * + * @public + */ +function getAI(app = getApp(), options) { + app = getModularInstance(app); + // Dependencies + const AIProvider = _getProvider(app, AI_TYPE); + const backend = options?.backend ?? new GoogleAIBackend(); + const finalOptions = { + useLimitedUseAppCheckTokens: options?.useLimitedUseAppCheckTokens ?? false + }; + const identifier = encodeInstanceIdentifier(backend); + const aiInstance = AIProvider.getImmediate({ + identifier + }); + aiInstance.options = finalOptions; + return aiInstance; +} +/** + * Returns a {@link GenerativeModel} class with methods for inference + * and other functionality. + * + * @public + */ +function getGenerativeModel(ai, modelParams, requestOptions) { + // Uses the existence of HybridParams.mode to clarify the type of the modelParams input. + const hybridParams = modelParams; + let inCloudParams; + if (hybridParams.mode) { + inCloudParams = hybridParams.inCloudParams || { + model: DEFAULT_HYBRID_IN_CLOUD_MODEL + }; + } + else { + inCloudParams = modelParams; + } + if (!inCloudParams.model) { + throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`); + } + /** + * An AIService registered by index.node.ts will not have a + * chromeAdapterFactory() method. + */ + const chromeAdapter = ai.chromeAdapterFactory?.(hybridParams.mode, typeof window === 'undefined' ? undefined : window, hybridParams.onDeviceParams); + return new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter); +} +/** + * Returns an {@link ImagenModel} class with methods for using Imagen. + * + * Only Imagen 3 models (named `imagen-3.0-*`) are supported. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when making Imagen requests. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @public + */ +function getImagenModel(ai, modelParams, requestOptions) { + if (!modelParams.model) { + throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })`); + } + return new ImagenModel(ai, modelParams, requestOptions); +} +/** + * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication. + * + * The Live API is only supported in modern browser windows and Node >= 22. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when setting up a {@link LiveSession}. + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @beta + */ +function getLiveGenerativeModel(ai, modelParams) { + if (!modelParams.model) { + throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name for getLiveGenerativeModel. Example: getLiveGenerativeModel(ai, { model: 'my-model-name' })`); + } + const webSocketHandler = new WebSocketHandlerImpl(); + return new LiveGenerativeModel(ai, modelParams, webSocketHandler); +} + +/** + * The Firebase AI Web SDK. + * + * @packageDocumentation + */ +function registerAI() { + _registerComponent(new Component(AI_TYPE, (container, { instanceIdentifier }) => { + if (!instanceIdentifier) { + throw new AIError(AIErrorCode.ERROR, 'AIService instance identifier is undefined.'); + } + const backend = decodeInstanceIdentifier(instanceIdentifier); + // getImmediate for FirebaseApp will always succeed + const app = container.getProvider('app').getImmediate(); + const auth = container.getProvider('auth-internal'); + const appCheckProvider = container.getProvider('app-check-internal'); + return new AIService(app, backend, auth, appCheckProvider); + }, "PUBLIC" /* ComponentType.PUBLIC */).setMultipleInstances(true)); + registerVersion(name, version, 'node'); + // BUILD_TARGET will be replaced by values like esm, cjs, etc during the compilation + registerVersion(name, version, 'esm2020'); +} +registerAI(); + +export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, InferenceSource, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, URLRetrievalStatus, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation }; +//# sourceMappingURL=index.node.mjs.map diff --git a/frontend-old/node_modules/@firebase/ai/dist/index.node.mjs.map b/frontend-old/node_modules/@firebase/ai/dist/index.node.mjs.map new file mode 100644 index 0000000..bdb4150 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/index.node.mjs.map @@ -0,0 +1 @@ +{"version":3,"file":"index.node.mjs","sources":["../src/constants.ts","../src/types/enums.ts","../src/types/responses.ts","../src/types/error.ts","../src/types/schema.ts","../src/types/imagen/requests.ts","../src/public-types.ts","../src/backend.ts","../src/service.ts","../src/errors.ts","../src/helpers.ts","../src/models/ai-model.ts","../src/logger.ts","../src/requests/request.ts","../src/requests/response-helpers.ts","../src/googleai-mappers.ts","../src/requests/stream-reader.ts","../src/requests/hybrid-helpers.ts","../src/methods/generate-content.ts","../src/requests/request-helpers.ts","../src/methods/chat-session-helpers.ts","../src/methods/chat-session.ts","../src/methods/count-tokens.ts","../src/models/generative-model.ts","../src/methods/live-session.ts","../src/models/live-generative-model.ts","../src/models/imagen-model.ts","../src/websocket.ts","../src/requests/schema-builder.ts","../src/requests/imagen-image-format.ts","../src/methods/live-session-helpers.ts","../src/api.ts","../src/index.node.ts"],"sourcesContent":["/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { version } from '../package.json';\n\nexport const AI_TYPE = 'AI';\n\nexport const DEFAULT_LOCATION = 'us-central1';\n\nexport const DEFAULT_DOMAIN = 'firebasevertexai.googleapis.com';\n\nexport const DEFAULT_API_VERSION = 'v1beta';\n\nexport const PACKAGE_VERSION = version;\n\nexport const LANGUAGE_TAG = 'gl-js';\n\nexport const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000;\n\n/**\n * Defines the name of the default in-cloud model to use for hybrid inference.\n */\nexport const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite';\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Role is the producer of the content.\n * @public\n */\nexport type Role = (typeof POSSIBLE_ROLES)[number];\n\n/**\n * Possible roles.\n * @public\n */\nexport const POSSIBLE_ROLES = ['user', 'model', 'function', 'system'] as const;\n\n/**\n * Harm categories that would cause prompts or candidates to be blocked.\n * @public\n */\nexport const HarmCategory = {\n HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH',\n HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT',\n HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT'\n} as const;\n\n/**\n * Harm categories that would cause prompts or candidates to be blocked.\n * @public\n */\nexport type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory];\n\n/**\n * Threshold above which a prompt or candidate will be blocked.\n * @public\n */\nexport const HarmBlockThreshold = {\n /**\n * Content with `NEGLIGIBLE` will be allowed.\n */\n BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE',\n /**\n * Content with `NEGLIGIBLE` and `LOW` will be allowed.\n */\n BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE',\n /**\n * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.\n */\n BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH',\n /**\n * All content will be allowed.\n */\n BLOCK_NONE: 'BLOCK_NONE',\n /**\n * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding\n * to the {@link (HarmCategory:type)} will not be present in the response.\n */\n OFF: 'OFF'\n} as const;\n\n/**\n * Threshold above which a prompt or candidate will be blocked.\n * @public\n */\nexport type HarmBlockThreshold =\n (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold];\n\n/**\n * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).\n *\n * @public\n */\nexport const HarmBlockMethod = {\n /**\n * The harm block method uses both probability and severity scores.\n */\n SEVERITY: 'SEVERITY',\n /**\n * The harm block method uses the probability score.\n */\n PROBABILITY: 'PROBABILITY'\n} as const;\n\n/**\n * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).\n *\n * @public\n */\nexport type HarmBlockMethod =\n (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod];\n\n/**\n * Probability that a prompt or candidate matches a harm category.\n * @public\n */\nexport const HarmProbability = {\n /**\n * Content has a negligible chance of being unsafe.\n */\n NEGLIGIBLE: 'NEGLIGIBLE',\n /**\n * Content has a low chance of being unsafe.\n */\n LOW: 'LOW',\n /**\n * Content has a medium chance of being unsafe.\n */\n MEDIUM: 'MEDIUM',\n /**\n * Content has a high chance of being unsafe.\n */\n HIGH: 'HIGH'\n} as const;\n\n/**\n * Probability that a prompt or candidate matches a harm category.\n * @public\n */\nexport type HarmProbability =\n (typeof HarmProbability)[keyof typeof HarmProbability];\n\n/**\n * Harm severity levels.\n * @public\n */\nexport const HarmSeverity = {\n /**\n * Negligible level of harm severity.\n */\n HARM_SEVERITY_NEGLIGIBLE: 'HARM_SEVERITY_NEGLIGIBLE',\n /**\n * Low level of harm severity.\n */\n HARM_SEVERITY_LOW: 'HARM_SEVERITY_LOW',\n /**\n * Medium level of harm severity.\n */\n HARM_SEVERITY_MEDIUM: 'HARM_SEVERITY_MEDIUM',\n /**\n * High level of harm severity.\n */\n HARM_SEVERITY_HIGH: 'HARM_SEVERITY_HIGH',\n /**\n * Harm severity is not supported.\n *\n * @remarks\n * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.\n */\n HARM_SEVERITY_UNSUPPORTED: 'HARM_SEVERITY_UNSUPPORTED'\n} as const;\n\n/**\n * Harm severity levels.\n * @public\n */\nexport type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity];\n\n/**\n * Reason that a prompt was blocked.\n * @public\n */\nexport const BlockReason = {\n /**\n * Content was blocked by safety settings.\n */\n SAFETY: 'SAFETY',\n /**\n * Content was blocked, but the reason is uncategorized.\n */\n OTHER: 'OTHER',\n /**\n * Content was blocked because it contained terms from the terminology blocklist.\n */\n BLOCKLIST: 'BLOCKLIST',\n /**\n * Content was blocked due to prohibited content.\n */\n PROHIBITED_CONTENT: 'PROHIBITED_CONTENT'\n} as const;\n\n/**\n * Reason that a prompt was blocked.\n * @public\n */\nexport type BlockReason = (typeof BlockReason)[keyof typeof BlockReason];\n\n/**\n * Reason that a candidate finished.\n * @public\n */\nexport const FinishReason = {\n /**\n * Natural stop point of the model or provided stop sequence.\n */\n STOP: 'STOP',\n /**\n * The maximum number of tokens as specified in the request was reached.\n */\n MAX_TOKENS: 'MAX_TOKENS',\n /**\n * The candidate content was flagged for safety reasons.\n */\n SAFETY: 'SAFETY',\n /**\n * The candidate content was flagged for recitation reasons.\n */\n RECITATION: 'RECITATION',\n /**\n * Unknown reason.\n */\n OTHER: 'OTHER',\n /**\n * The candidate content contained forbidden terms.\n */\n BLOCKLIST: 'BLOCKLIST',\n /**\n * The candidate content potentially contained prohibited content.\n */\n PROHIBITED_CONTENT: 'PROHIBITED_CONTENT',\n /**\n * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).\n */\n SPII: 'SPII',\n /**\n * The function call generated by the model was invalid.\n */\n MALFORMED_FUNCTION_CALL: 'MALFORMED_FUNCTION_CALL'\n} as const;\n\n/**\n * Reason that a candidate finished.\n * @public\n */\nexport type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];\n\n/**\n * @public\n */\nexport const FunctionCallingMode = {\n /**\n * Default model behavior; model decides to predict either a function call\n * or a natural language response.\n */\n AUTO: 'AUTO',\n /**\n * Model is constrained to always predicting a function call only.\n * If `allowed_function_names` is set, the predicted function call will be\n * limited to any one of `allowed_function_names`, else the predicted\n * function call will be any one of the provided `function_declarations`.\n */\n ANY: 'ANY',\n /**\n * Model will not predict any function call. Model behavior is same as when\n * not passing any function declarations.\n */\n NONE: 'NONE'\n} as const;\n\n/**\n * @public\n */\nexport type FunctionCallingMode =\n (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode];\n\n/**\n * Content part modality.\n * @public\n */\nexport const Modality = {\n /**\n * Unspecified modality.\n */\n MODALITY_UNSPECIFIED: 'MODALITY_UNSPECIFIED',\n /**\n * Plain text.\n */\n TEXT: 'TEXT',\n /**\n * Image.\n */\n IMAGE: 'IMAGE',\n /**\n * Video.\n */\n VIDEO: 'VIDEO',\n /**\n * Audio.\n */\n AUDIO: 'AUDIO',\n /**\n * Document (for example, PDF).\n */\n DOCUMENT: 'DOCUMENT'\n} as const;\n\n/**\n * Content part modality.\n * @public\n */\nexport type Modality = (typeof Modality)[keyof typeof Modality];\n\n/**\n * Generation modalities to be returned in generation responses.\n *\n * @beta\n */\nexport const ResponseModality = {\n /**\n * Text.\n * @beta\n */\n TEXT: 'TEXT',\n /**\n * Image.\n * @beta\n */\n IMAGE: 'IMAGE',\n /**\n * Audio.\n * @beta\n */\n AUDIO: 'AUDIO'\n} as const;\n\n/**\n * Generation modalities to be returned in generation responses.\n *\n * @beta\n */\nexport type ResponseModality =\n (typeof ResponseModality)[keyof typeof ResponseModality];\n\n/**\n * Determines whether inference happens on-device or in-cloud.\n *\n * @remarks\n * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an\n * on-device model. If on-device inference is not available, the SDK\n * will fall back to using a cloud-hosted model.\n * <br/>\n * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an\n * on-device model. The SDK will not fall back to a cloud-hosted model.\n * If on-device inference is not available, inference methods will throw.\n * <br/>\n * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a\n * cloud-hosted model. The SDK will not fall back to an on-device model.\n * <br/>\n * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a\n * cloud-hosted model. If not available, the SDK will fall back to an\n * on-device model.\n *\n * @beta\n */\nexport const InferenceMode = {\n 'PREFER_ON_DEVICE': 'prefer_on_device',\n 'ONLY_ON_DEVICE': 'only_on_device',\n 'ONLY_IN_CLOUD': 'only_in_cloud',\n 'PREFER_IN_CLOUD': 'prefer_in_cloud'\n} as const;\n\n/**\n * Determines whether inference happens on-device or in-cloud.\n *\n * @beta\n */\nexport type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];\n\n/**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\nexport const InferenceSource = {\n 'ON_DEVICE': 'on_device',\n 'IN_CLOUD': 'in_cloud'\n} as const;\n\n/**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\nexport type InferenceSource =\n (typeof InferenceSource)[keyof typeof InferenceSource];\n\n/**\n * Represents the result of the code execution.\n *\n * @beta\n */\nexport const Outcome = {\n UNSPECIFIED: 'OUTCOME_UNSPECIFIED',\n OK: 'OUTCOME_OK',\n FAILED: 'OUTCOME_FAILED',\n DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'\n};\n\n/**\n * Represents the result of the code execution.\n *\n * @beta\n */\nexport type Outcome = (typeof Outcome)[keyof typeof Outcome];\n\n/**\n * The programming language of the code.\n *\n * @beta\n */\nexport const Language = {\n UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',\n PYTHON: 'PYTHON'\n};\n\n/**\n * The programming language of the code.\n *\n * @beta\n */\nexport type Language = (typeof Language)[keyof typeof Language];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, FunctionCall, InlineDataPart } from './content';\nimport {\n BlockReason,\n FinishReason,\n HarmCategory,\n HarmProbability,\n HarmSeverity,\n InferenceSource,\n Modality\n} from './enums';\n\n/**\n * Result object returned from {@link GenerativeModel.generateContent} call.\n *\n * @public\n */\nexport interface GenerateContentResult {\n response: EnhancedGenerateContentResponse;\n}\n\n/**\n * Result object returned from {@link GenerativeModel.generateContentStream} call.\n * Iterate over `stream` to get chunks as they come in and/or\n * use the `response` promise to get the aggregated response when\n * the stream is done.\n *\n * @public\n */\nexport interface GenerateContentStreamResult {\n stream: AsyncGenerator<EnhancedGenerateContentResponse>;\n response: Promise<EnhancedGenerateContentResponse>;\n}\n\n/**\n * Response object wrapped with helper methods.\n *\n * @public\n */\nexport interface EnhancedGenerateContentResponse\n extends GenerateContentResponse {\n /**\n * Returns the text string from the response, if available.\n * Throws if the prompt or candidate was blocked.\n */\n text: () => string;\n /**\n * Aggregates and returns every {@link InlineDataPart} from the first candidate of\n * {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n */\n inlineDataParts: () => InlineDataPart[] | undefined;\n /**\n * Aggregates and returns every {@link FunctionCall} from the first candidate of\n * {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n */\n functionCalls: () => FunctionCall[] | undefined;\n /**\n * Aggregates and returns every {@link TextPart} with their `thought` property set\n * to `true` from the first candidate of {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n *\n * @remarks\n * Thought summaries provide a brief overview of the model's internal thinking process,\n * offering insight into how it arrived at the final answer. This can be useful for\n * debugging, understanding the model's reasoning, and verifying its accuracy.\n *\n * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is\n * set to `true`.\n */\n thoughtSummary: () => string | undefined;\n /**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\n inferenceSource?: InferenceSource;\n}\n\n/**\n * Individual response from {@link GenerativeModel.generateContent} and\n * {@link GenerativeModel.generateContentStream}.\n * `generateContentStream()` will return one in each chunk until\n * the stream is done.\n * @public\n */\nexport interface GenerateContentResponse {\n candidates?: GenerateContentCandidate[];\n promptFeedback?: PromptFeedback;\n usageMetadata?: UsageMetadata;\n}\n\n/**\n * Usage metadata about a {@link GenerateContentResponse}.\n *\n * @public\n */\nexport interface UsageMetadata {\n promptTokenCount: number;\n candidatesTokenCount: number;\n /**\n * The number of tokens used by the model's internal \"thinking\" process.\n */\n thoughtsTokenCount?: number;\n totalTokenCount: number;\n /**\n * The number of tokens used by tools.\n */\n toolUsePromptTokenCount?: number;\n promptTokensDetails?: ModalityTokenCount[];\n candidatesTokensDetails?: ModalityTokenCount[];\n /**\n * A list of tokens used by tools, broken down by modality.\n */\n toolUsePromptTokensDetails?: ModalityTokenCount[];\n}\n\n/**\n * Represents token counting info for a single modality.\n *\n * @public\n */\nexport interface ModalityTokenCount {\n /** The modality associated with this token count. */\n modality: Modality;\n /** The number of tokens counted. */\n tokenCount: number;\n}\n\n/**\n * If the prompt was blocked, this will be populated with `blockReason` and\n * the relevant `safetyRatings`.\n * @public\n */\nexport interface PromptFeedback {\n blockReason?: BlockReason;\n safetyRatings: SafetyRating[];\n /**\n * A human-readable description of the `blockReason`.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n blockReasonMessage?: string;\n}\n\n/**\n * A candidate returned as part of a {@link GenerateContentResponse}.\n * @public\n */\nexport interface GenerateContentCandidate {\n index: number;\n content: Content;\n finishReason?: FinishReason;\n finishMessage?: string;\n safetyRatings?: SafetyRating[];\n citationMetadata?: CitationMetadata;\n groundingMetadata?: GroundingMetadata;\n urlContextMetadata?: URLContextMetadata;\n}\n\n/**\n * Citation metadata that may be found on a {@link GenerateContentCandidate}.\n * @public\n */\nexport interface CitationMetadata {\n citations: Citation[];\n}\n\n/**\n * A single citation.\n * @public\n */\nexport interface Citation {\n startIndex?: number;\n endIndex?: number;\n uri?: string;\n license?: string;\n /**\n * The title of the cited source, if available.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n title?: string;\n /**\n * The publication date of the cited source, if available.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n publicationDate?: Date;\n}\n\n/**\n * Metadata returned when grounding is enabled.\n *\n * Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}).\n *\n * Important: If using Grounding with Google Search, you are required to comply with the\n * \"Grounding with Google Search\" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}\n * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}\n * section within the Service Specific Terms).\n *\n * @public\n */\nexport interface GroundingMetadata {\n /**\n * Google Search entry point for web searches. This contains an HTML/CSS snippet that must be\n * embedded in an app to display a Google Search entry point for follow-up web searches related to\n * a model's \"Grounded Response\".\n */\n searchEntryPoint?: SearchEntrypoint;\n /**\n * A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content\n * (for example, from a web page). that the model used to ground its response.\n */\n groundingChunks?: GroundingChunk[];\n /**\n * A list of {@link GroundingSupport} objects. Each object details how specific segments of the\n * model's response are supported by the `groundingChunks`.\n */\n groundingSupports?: GroundingSupport[];\n /**\n * A list of web search queries that the model performed to gather the grounding information.\n * These can be used to allow users to explore the search results themselves.\n */\n webSearchQueries?: string[];\n /**\n * @deprecated Use {@link GroundingSupport} instead.\n */\n retrievalQueries?: string[];\n}\n\n/**\n * Google search entry point.\n *\n * @public\n */\nexport interface SearchEntrypoint {\n /**\n * HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid\n * undesired interaction with the rest of the page's CSS.\n *\n * To ensure proper rendering and prevent CSS conflicts, it is recommended\n * to encapsulate this `renderedContent` within a shadow DOM when embedding it\n * into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}.\n *\n * @example\n * ```javascript\n * const container = document.createElement('div');\n * document.body.appendChild(container);\n * container.attachShadow({ mode: 'open' }).innerHTML = renderedContent;\n * ```\n */\n renderedContent?: string;\n}\n\n/**\n * Represents a chunk of retrieved data that supports a claim in the model's response. This is part\n * of the grounding information provided when grounding is enabled.\n *\n * @public\n */\nexport interface GroundingChunk {\n /**\n * Contains details if the grounding chunk is from a web source.\n */\n web?: WebGroundingChunk;\n}\n\n/**\n * A grounding chunk from the web.\n *\n * Important: If using Grounding with Google Search, you are required to comply with the\n * {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for \"Grounding with Google Search\".\n *\n * @public\n */\nexport interface WebGroundingChunk {\n /**\n * The URI of the retrieved web page.\n */\n uri?: string;\n /**\n * The title of the retrieved web page.\n */\n title?: string;\n /**\n * The domain of the original URI from which the content was retrieved.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be\n * `undefined`.\n */\n domain?: string;\n}\n\n/**\n * Provides information about how a specific segment of the model's response is supported by the\n * retrieved grounding chunks.\n *\n * @public\n */\nexport interface GroundingSupport {\n /**\n * Specifies the segment of the model's response content that this grounding support pertains to.\n */\n segment?: Segment;\n /**\n * A list of indices that refer to specific {@link GroundingChunk} objects within the\n * {@link GroundingMetadata.groundingChunks} array. These referenced chunks\n * are the sources that support the claim made in the associated `segment` of the response.\n * For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`,\n * and `groundingChunks[4]` are the retrieved content supporting this part of the response.\n */\n groundingChunkIndices?: number[];\n}\n\n/**\n * Represents a specific segment within a {@link Content} object, often used to\n * pinpoint the exact location of text or data that grounding information refers to.\n *\n * @public\n */\nexport interface Segment {\n /**\n * The zero-based index of the {@link Part} object within the `parts` array\n * of its parent {@link Content} object. This identifies which part of the\n * content the segment belongs to.\n */\n partIndex: number;\n /**\n * The zero-based start index of the segment within the specified `Part`,\n * measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the\n * beginning of the part's content (e.g., `Part.text`).\n */\n startIndex: number;\n /**\n * The zero-based end index of the segment within the specified `Part`,\n * measured in UTF-8 bytes. This offset is exclusive, meaning the character\n * at this index is not included in the segment.\n */\n endIndex: number;\n /**\n * The text corresponding to the segment from the response.\n */\n text: string;\n}\n\n/**\n * Metadata related to {@link URLContextTool}.\n *\n * @beta\n */\nexport interface URLContextMetadata {\n /**\n * List of URL metadata used to provide context to the Gemini model.\n */\n urlMetadata: URLMetadata[];\n}\n\n/**\n * Metadata for a single URL retrieved by the {@link URLContextTool} tool.\n *\n * @beta\n */\nexport interface URLMetadata {\n /**\n * The retrieved URL.\n */\n retrievedUrl?: string;\n /**\n * The status of the URL retrieval.\n */\n urlRetrievalStatus?: URLRetrievalStatus;\n}\n\n/**\n * The status of a URL retrieval.\n *\n * @remarks\n * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.\n * <br/>\n *\n * @beta\n */\nexport const URLRetrievalStatus = {\n /**\n * Unspecified retrieval status.\n */\n URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED',\n /**\n * The URL retrieval was successful.\n */\n URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS',\n /**\n * The URL retrieval failed.\n */\n URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR',\n /**\n * The URL retrieval failed because the content is behind a paywall.\n */\n URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL',\n /**\n * The URL retrieval failed because the content is unsafe.\n */\n URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE'\n};\n\n/**\n * The status of a URL retrieval.\n *\n * @remarks\n * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.\n * <br/>\n *\n * @beta\n */\nexport type URLRetrievalStatus =\n (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus];\n\n/**\n * @public\n */\nexport interface WebAttribution {\n uri: string;\n title: string;\n}\n\n/**\n * @public\n */\nexport interface RetrievedContextAttribution {\n uri: string;\n title: string;\n}\n\n/**\n * Protobuf google.type.Date\n * @public\n */\nexport interface Date {\n year: number;\n month: number;\n day: number;\n}\n\n/**\n * A safety rating associated with a {@link GenerateContentCandidate}\n * @public\n */\nexport interface SafetyRating {\n category: HarmCategory;\n probability: HarmProbability;\n /**\n * The harm severity level.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`.\n */\n severity: HarmSeverity;\n /**\n * The probability score of the harm category.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.\n */\n probabilityScore: number;\n /**\n * The severity score of the harm category.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.\n */\n severityScore: number;\n blocked: boolean;\n}\n\n/**\n * Response from calling {@link GenerativeModel.countTokens}.\n * @public\n */\nexport interface CountTokensResponse {\n /**\n * The total number of tokens counted across all instances from the request.\n */\n totalTokens: number;\n /**\n * @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`.\n *\n * The total number of billable characters counted across all instances\n * from the request.\n */\n totalBillableCharacters?: number;\n /**\n * The breakdown, by modality, of how many tokens are consumed by the prompt.\n */\n promptTokensDetails?: ModalityTokenCount[];\n}\n\n/**\n * An incremental content update from the model.\n *\n * @beta\n */\nexport interface LiveServerContent {\n type: 'serverContent';\n /**\n * The content that the model has generated as part of the current conversation with the user.\n */\n modelTurn?: Content;\n /**\n * Indicates whether the turn is complete. This is `undefined` if the turn is not complete.\n */\n turnComplete?: boolean;\n /**\n * Indicates whether the model was interrupted by the client. An interruption occurs when\n * the client sends a message before the model finishes it's turn. This is `undefined` if the\n * model was not interrupted.\n */\n interrupted?: boolean;\n /**\n * Transcription of the audio that was input to the model.\n */\n inputTranscription?: Transcription;\n /**\n * Transcription of the audio output from the model.\n */\n outputTranscription?: Transcription;\n}\n\n/**\n * Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription\n * is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on\n * the {@link LiveGenerationConfig}.\n *\n * @beta\n */\n\nexport interface Transcription {\n /**\n * The text transcription of the audio.\n */\n text?: string;\n}\n\n/**\n * A request from the model for the client to execute one or more functions.\n *\n * @beta\n */\nexport interface LiveServerToolCall {\n type: 'toolCall';\n /**\n * An array of function calls to run.\n */\n functionCalls: FunctionCall[];\n}\n\n/**\n * Notification to cancel a previous function call triggered by {@link LiveServerToolCall}.\n *\n * @beta\n */\nexport interface LiveServerToolCallCancellation {\n type: 'toolCallCancellation';\n /**\n * IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}.\n */\n functionIds: string[];\n}\n\n/**\n * The types of responses that can be returned by {@link LiveSession.receive}.\n *\n * @beta\n */\nexport const LiveResponseType = {\n SERVER_CONTENT: 'serverContent',\n TOOL_CALL: 'toolCall',\n TOOL_CALL_CANCELLATION: 'toolCallCancellation'\n};\n\n/**\n * The types of responses that can be returned by {@link LiveSession.receive}.\n * This is a property on all messages that can be used for type narrowing. This property is not\n * returned by the server, it is assigned to a server message object once it's parsed.\n *\n * @beta\n */\nexport type LiveResponseType =\n (typeof LiveResponseType)[keyof typeof LiveResponseType];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { GenerateContentResponse } from './responses';\n\n/**\n * Details object that may be included in an error response.\n *\n * @public\n */\nexport interface ErrorDetails {\n '@type'?: string;\n\n /** The reason for the error. */\n reason?: string;\n\n /** The domain where the error occurred. */\n domain?: string;\n\n /** Additional metadata about the error. */\n metadata?: Record<string, unknown>;\n\n /** Any other relevant information about the error. */\n [key: string]: unknown;\n}\n\n/**\n * Details object that contains data originating from a bad HTTP response.\n *\n * @public\n */\nexport interface CustomErrorData {\n /** HTTP status code of the error response. */\n status?: number;\n\n /** HTTP status text of the error response. */\n statusText?: string;\n\n /** Response from a {@link GenerateContentRequest} */\n response?: GenerateContentResponse;\n\n /** Optional additional details about the error. */\n errorDetails?: ErrorDetails[];\n}\n\n/**\n * Standardized error codes that {@link AIError} can have.\n *\n * @public\n */\nexport const AIErrorCode = {\n /** A generic error occurred. */\n ERROR: 'error',\n\n /** An error occurred in a request. */\n REQUEST_ERROR: 'request-error',\n\n /** An error occurred in a response. */\n RESPONSE_ERROR: 'response-error',\n\n /** An error occurred while performing a fetch. */\n FETCH_ERROR: 'fetch-error',\n\n /** An error occurred because an operation was attempted on a closed session. */\n SESSION_CLOSED: 'session-closed',\n\n /** An error associated with a Content object. */\n INVALID_CONTENT: 'invalid-content',\n\n /** An error due to the Firebase API not being enabled in the Console. */\n API_NOT_ENABLED: 'api-not-enabled',\n\n /** An error due to invalid Schema input. */\n INVALID_SCHEMA: 'invalid-schema',\n\n /** An error occurred due to a missing Firebase API key. */\n NO_API_KEY: 'no-api-key',\n\n /** An error occurred due to a missing Firebase app ID. */\n NO_APP_ID: 'no-app-id',\n\n /** An error occurred due to a model name not being specified during initialization. */\n NO_MODEL: 'no-model',\n\n /** An error occurred due to a missing project ID. */\n NO_PROJECT_ID: 'no-project-id',\n\n /** An error occurred while parsing. */\n PARSE_FAILED: 'parse-failed',\n\n /** An error occurred due an attempt to use an unsupported feature. */\n UNSUPPORTED: 'unsupported'\n} as const;\n\n/**\n * Standardized error codes that {@link AIError} can have.\n *\n * @public\n */\nexport type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Contains the list of OpenAPI data types\n * as defined by the\n * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}\n * @public\n */\nexport const SchemaType = {\n /** String type. */\n STRING: 'string',\n /** Number type. */\n NUMBER: 'number',\n /** Integer type. */\n INTEGER: 'integer',\n /** Boolean type. */\n BOOLEAN: 'boolean',\n /** Array type. */\n ARRAY: 'array',\n /** Object type. */\n OBJECT: 'object'\n} as const;\n\n/**\n * Contains the list of OpenAPI data types\n * as defined by the\n * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}\n * @public\n */\nexport type SchemaType = (typeof SchemaType)[keyof typeof SchemaType];\n\n/**\n * Basic {@link Schema} properties shared across several Schema-related\n * types.\n * @public\n */\nexport interface SchemaShared<T> {\n /**\n * An array of {@link Schema}. The generated data must be valid against any of the schemas\n * listed in this array. This allows specifying multiple possible structures or types for a\n * single field.\n */\n anyOf?: T[];\n /** Optional. The format of the property.\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or\n * `'date-time'`, otherwise requests will fail.\n */\n format?: string;\n /** Optional. The description of the property. */\n description?: string;\n /**\n * The title of the property. This helps document the schema's purpose but does not typically\n * constrain the generated value. It can subtly guide the model by clarifying the intent of a\n * field.\n */\n title?: string;\n /** Optional. The items of the property. */\n items?: T;\n /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n minItems?: number;\n /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n maxItems?: number;\n /** Optional. Map of `Schema` objects. */\n properties?: {\n [k: string]: T;\n };\n /** A hint suggesting the order in which the keys should appear in the generated JSON string. */\n propertyOrdering?: string[];\n /** Optional. The enum of the property. */\n enum?: string[];\n /** Optional. The example of the property. */\n example?: unknown;\n /** Optional. Whether the property is nullable. */\n nullable?: boolean;\n /** The minimum value of a numeric type. */\n minimum?: number;\n /** The maximum value of a numeric type. */\n maximum?: number;\n [key: string]: unknown;\n}\n\n/**\n * Params passed to {@link Schema} static methods to create specific\n * {@link Schema} classes.\n * @public\n */\nexport interface SchemaParams extends SchemaShared<SchemaInterface> {}\n\n/**\n * Final format for {@link Schema} params passed to backend requests.\n * @public\n */\nexport interface SchemaRequest extends SchemaShared<SchemaRequest> {\n /**\n * The type of the property. this can only be undefined when using `anyOf` schemas,\n * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }.\n */\n type?: SchemaType;\n /** Optional. Array of required property. */\n required?: string[];\n}\n\n/**\n * Interface for {@link Schema} class.\n * @public\n */\nexport interface SchemaInterface extends SchemaShared<SchemaInterface> {\n /**\n * The type of the property. this can only be undefined when using `anyof` schemas,\n * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}.\n */\n type?: SchemaType;\n}\n\n/**\n * Interface for JSON parameters in a schema of {@link (SchemaType:type)}\n * \"object\" when not using the `Schema.object()` helper.\n * @public\n */\nexport interface ObjectSchemaRequest extends SchemaRequest {\n type: 'object';\n /**\n * This is not a property accepted in the final request to the backend, but is\n * a client-side convenience property that is only usable by constructing\n * a schema through the `Schema.object()` helper method. Populating this\n * property will cause response errors if the object is not wrapped with\n * `Schema.object()`.\n */\n optionalProperties?: never;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ImagenImageFormat } from '../../requests/imagen-image-format';\n\n/**\n * Parameters for configuring an {@link ImagenModel}.\n *\n * @public\n */\nexport interface ImagenModelParams {\n /**\n * The Imagen model to use for generating images.\n * For example: `imagen-3.0-generate-002`.\n *\n * Only Imagen 3 models (named `imagen-3.0-*`) are supported.\n *\n * See {@link https://firebase.google.com/docs/vertex-ai/models | model versions}\n * for a full list of supported Imagen 3 models.\n */\n model: string;\n /**\n * Configuration options for generating images with Imagen.\n */\n generationConfig?: ImagenGenerationConfig;\n /**\n * Safety settings for filtering potentially inappropriate content.\n */\n safetySettings?: ImagenSafetySettings;\n}\n\n/**\n * Configuration options for generating images with Imagen.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for\n * more details.\n *\n * @public\n */\nexport interface ImagenGenerationConfig {\n /**\n * A description of what should be omitted from the generated images.\n *\n * Support for negative prompts depends on the Imagen model.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details.\n *\n * This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions\n * greater than `imagen-3.0-generate-002`.\n */\n negativePrompt?: string;\n /**\n * The number of images to generate. The default value is 1.\n *\n * The number of sample images that may be generated in each request depends on the model\n * (typically up to 4); see the <a href=\"http://firebase.google.com/docs/vertex-ai/model-parameters#imagen\">sampleCount</a>\n * documentation for more details.\n */\n numberOfImages?: number;\n /**\n * The aspect ratio of the generated images. The default value is square 1:1.\n * Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)}\n * for more details.\n */\n aspectRatio?: ImagenAspectRatio;\n /**\n * The image format of the generated images. The default is PNG.\n *\n * See {@link ImagenImageFormat} for more details.\n */\n imageFormat?: ImagenImageFormat;\n /**\n * Whether to add an invisible watermark to generated images.\n *\n * If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate\n * that they are AI generated. If set to `false`, watermarking will be disabled.\n *\n * For Imagen 3 models, the default value is `true`; see the <a href=\"http://firebase.google.com/docs/vertex-ai/model-parameters#imagen\">addWatermark</a>\n * documentation for more details.\n *\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true,\n * and cannot be turned off.\n */\n addWatermark?: boolean;\n}\n\n/**\n * A filter level controlling how aggressively to filter sensitive content.\n *\n * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI\n * are assessed against a list of safety filters, which include 'harmful categories' (for example,\n * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to\n * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}\n * for more details.\n *\n * @public\n */\nexport const ImagenSafetyFilterLevel = {\n /**\n * The most aggressive filtering level; most strict blocking.\n */\n BLOCK_LOW_AND_ABOVE: 'block_low_and_above',\n /**\n * Blocks some sensitive prompts and responses.\n */\n BLOCK_MEDIUM_AND_ABOVE: 'block_medium_and_above',\n /**\n * Blocks few sensitive prompts and responses.\n */\n BLOCK_ONLY_HIGH: 'block_only_high',\n /**\n * The least aggressive filtering level; blocks very few sensitive prompts and responses.\n *\n * Access to this feature is restricted and may require your case to be reviewed and approved by\n * Cloud support.\n */\n BLOCK_NONE: 'block_none'\n} as const;\n\n/**\n * A filter level controlling how aggressively to filter sensitive content.\n *\n * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI\n * are assessed against a list of safety filters, which include 'harmful categories' (for example,\n * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to\n * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}\n * for more details.\n *\n * @public\n */\nexport type ImagenSafetyFilterLevel =\n (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel];\n\n/**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n *\n * See the <a href=\"http://firebase.google.com/docs/vertex-ai/generate-images\">personGeneration</a>\n * documentation for more details.\n *\n * @public\n */\nexport const ImagenPersonFilterLevel = {\n /**\n * Disallow generation of images containing people or faces; images of people are filtered out.\n */\n BLOCK_ALL: 'dont_allow',\n /**\n * Allow generation of images containing adults only; images of children are filtered out.\n *\n * Generation of images containing people or faces may require your use case to be\n * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}\n * for more details.\n */\n ALLOW_ADULT: 'allow_adult',\n /**\n * Allow generation of images containing adults only; images of children are filtered out.\n *\n * Generation of images containing people or faces may require your use case to be\n * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}\n * for more details.\n */\n ALLOW_ALL: 'allow_all'\n} as const;\n\n/**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n *\n * See the <a href=\"http://firebase.google.com/docs/vertex-ai/generate-images\">personGeneration</a>\n * documentation for more details.\n *\n * @public\n */\nexport type ImagenPersonFilterLevel =\n (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel];\n\n/**\n * Settings for controlling the aggressiveness of filtering out sensitive content.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details.\n *\n * @public\n */\nexport interface ImagenSafetySettings {\n /**\n * A filter level controlling how aggressive to filter out sensitive content from generated\n * images.\n */\n safetyFilterLevel?: ImagenSafetyFilterLevel;\n /**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n */\n personFilterLevel?: ImagenPersonFilterLevel;\n}\n\n/**\n * Aspect ratios for Imagen images.\n *\n * To specify an aspect ratio for generated images, set the `aspectRatio` property in your\n * {@link ImagenGenerationConfig}.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details and examples of the supported aspect ratios.\n *\n * @public\n */\nexport const ImagenAspectRatio = {\n /**\n * Square (1:1) aspect ratio.\n */\n 'SQUARE': '1:1',\n /**\n * Landscape (3:4) aspect ratio.\n */\n 'LANDSCAPE_3x4': '3:4',\n /**\n * Portrait (4:3) aspect ratio.\n */\n 'PORTRAIT_4x3': '4:3',\n /**\n * Landscape (16:9) aspect ratio.\n */\n 'LANDSCAPE_16x9': '16:9',\n /**\n * Portrait (9:16) aspect ratio.\n */\n 'PORTRAIT_9x16': '9:16'\n} as const;\n\n/**\n * Aspect ratios for Imagen images.\n *\n * To specify an aspect ratio for generated images, set the `aspectRatio` property in your\n * {@link ImagenGenerationConfig}.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details and examples of the supported aspect ratios.\n *\n * @public\n */\nexport type ImagenAspectRatio =\n (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp } from '@firebase/app';\nimport { Backend } from './backend';\n\nexport * from './types';\n\n/**\n * An instance of the Firebase AI SDK.\n *\n * Do not create this instance directly. Instead, use {@link getAI | getAI()}.\n *\n * @public\n */\nexport interface AI {\n /**\n * The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with.\n */\n app: FirebaseApp;\n /**\n * A {@link Backend} instance that specifies the configuration for the target backend,\n * either the Gemini Developer API (using {@link GoogleAIBackend}) or the\n * Vertex AI Gemini API (using {@link VertexAIBackend}).\n */\n backend: Backend;\n /**\n * Options applied to this {@link AI} instance.\n */\n options?: AIOptions;\n /**\n * @deprecated use `AI.backend.location` instead.\n *\n * The location configured for this AI service instance, relevant for Vertex AI backends.\n */\n location: string;\n}\n\n/**\n * An enum-like object containing constants that represent the supported backends\n * for the Firebase AI SDK.\n * This determines which backend service (Vertex AI Gemini API or Gemini Developer API)\n * the SDK will communicate with.\n *\n * These values are assigned to the `backendType` property within the specific backend\n * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify\n * which service to target.\n *\n * @public\n */\nexport const BackendType = {\n /**\n * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud.\n * Use this constant when creating a {@link VertexAIBackend} configuration.\n */\n VERTEX_AI: 'VERTEX_AI',\n\n /**\n * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}).\n * Use this constant when creating a {@link GoogleAIBackend} configuration.\n */\n GOOGLE_AI: 'GOOGLE_AI'\n} as const; // Using 'as const' makes the string values literal types\n\n/**\n * Type alias representing valid backend types.\n * It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`.\n *\n * @public\n */\nexport type BackendType = (typeof BackendType)[keyof typeof BackendType];\n\n/**\n * Options for initializing the AI service using {@link getAI | getAI()}.\n * This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API)\n * and configuring its specific options (like location for Vertex AI).\n *\n * @public\n */\nexport interface AIOptions {\n /**\n * The backend configuration to use for the AI service instance.\n * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}).\n */\n backend?: Backend;\n /**\n * Whether to use App Check limited use tokens. Defaults to false.\n */\n useLimitedUseAppCheckTokens?: boolean;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { DEFAULT_LOCATION } from './constants';\nimport { BackendType } from './public-types';\n\n/**\n * Abstract base class representing the configuration for an AI service backend.\n * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for\n * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and\n * {@link VertexAIBackend} for the Vertex AI Gemini API.\n *\n * @public\n */\nexport abstract class Backend {\n /**\n * Specifies the backend type.\n */\n readonly backendType: BackendType;\n\n /**\n * Protected constructor for use by subclasses.\n * @param type - The backend type.\n */\n protected constructor(type: BackendType) {\n this.backendType = type;\n }\n}\n\n/**\n * Configuration class for the Gemini Developer API.\n *\n * Use this with {@link AIOptions} when initializing the AI service via\n * {@link getAI | getAI()} to specify the Gemini Developer API as the backend.\n *\n * @public\n */\nexport class GoogleAIBackend extends Backend {\n /**\n * Creates a configuration object for the Gemini Developer API backend.\n */\n constructor() {\n super(BackendType.GOOGLE_AI);\n }\n}\n\n/**\n * Configuration class for the Vertex AI Gemini API.\n *\n * Use this with {@link AIOptions} when initializing the AI service via\n * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend.\n *\n * @public\n */\nexport class VertexAIBackend extends Backend {\n /**\n * The region identifier.\n * See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}\n * for a list of supported locations.\n */\n readonly location: string;\n\n /**\n * Creates a configuration object for the Vertex AI backend.\n *\n * @param location - The region identifier, defaulting to `us-central1`;\n * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}\n * for a list of supported locations.\n */\n constructor(location: string = DEFAULT_LOCATION) {\n super(BackendType.VERTEX_AI);\n if (!location) {\n this.location = DEFAULT_LOCATION;\n } else {\n this.location = location;\n }\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp, _FirebaseService } from '@firebase/app';\nimport { AI, AIOptions, InferenceMode, OnDeviceParams } from './public-types';\nimport {\n AppCheckInternalComponentName,\n FirebaseAppCheckInternal\n} from '@firebase/app-check-interop-types';\nimport { Provider } from '@firebase/component';\nimport {\n FirebaseAuthInternal,\n FirebaseAuthInternalName\n} from '@firebase/auth-interop-types';\nimport { Backend, VertexAIBackend } from './backend';\nimport { ChromeAdapterImpl } from './methods/chrome-adapter';\n\nexport class AIService implements AI, _FirebaseService {\n auth: FirebaseAuthInternal | null;\n appCheck: FirebaseAppCheckInternal | null;\n _options?: Omit<AIOptions, 'backend'>;\n location: string; // This is here for backwards-compatibility\n\n constructor(\n public app: FirebaseApp,\n public backend: Backend,\n authProvider?: Provider<FirebaseAuthInternalName>,\n appCheckProvider?: Provider<AppCheckInternalComponentName>,\n public chromeAdapterFactory?: (\n mode: InferenceMode,\n window?: Window,\n params?: OnDeviceParams\n ) => ChromeAdapterImpl | undefined\n ) {\n const appCheck = appCheckProvider?.getImmediate({ optional: true });\n const auth = authProvider?.getImmediate({ optional: true });\n this.auth = auth || null;\n this.appCheck = appCheck || null;\n\n if (backend instanceof VertexAIBackend) {\n this.location = backend.location;\n } else {\n this.location = '';\n }\n }\n\n _delete(): Promise<void> {\n return Promise.resolve();\n }\n\n set options(optionsToSet: AIOptions) {\n this._options = optionsToSet;\n }\n\n get options(): AIOptions | undefined {\n return this._options;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseError } from '@firebase/util';\nimport { AIErrorCode, CustomErrorData } from './types';\nimport { AI_TYPE } from './constants';\n\n/**\n * Error class for the Firebase AI SDK.\n *\n * @public\n */\nexport class AIError extends FirebaseError {\n /**\n * Constructs a new instance of the `AIError` class.\n *\n * @param code - The error code from {@link (AIErrorCode:type)}.\n * @param message - A human-readable message describing the error.\n * @param customErrorData - Optional error data.\n */\n constructor(\n readonly code: AIErrorCode,\n message: string,\n readonly customErrorData?: CustomErrorData\n ) {\n // Match error format used by FirebaseError from ErrorFactory\n const service = AI_TYPE;\n const fullCode = `${service}/${code}`;\n const fullMessage = `${service}: ${message} (${fullCode})`;\n super(code, fullMessage);\n\n // FirebaseError initializes a stack trace, but it assumes the error is created from the error\n // factory. Since we break this assumption, we set the stack trace to be originating from this\n // constructor.\n // This is only supported in V8.\n if (Error.captureStackTrace) {\n // Allows us to initialize the stack trace without including the constructor itself at the\n // top level of the stack trace.\n Error.captureStackTrace(this, AIError);\n }\n\n // Allows instanceof AIError in ES5/ES6\n // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work\n // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget\n // which we can now use since we no longer target ES5.\n Object.setPrototypeOf(this, AIError.prototype);\n\n // Since Error is an interface, we don't inherit toString and so we define it ourselves.\n this.toString = () => fullMessage;\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AI_TYPE } from './constants';\nimport { AIError } from './errors';\nimport { AIErrorCode } from './types';\nimport { Backend, GoogleAIBackend, VertexAIBackend } from './backend';\n\n/**\n * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI}\n * instances by backend type.\n *\n * @internal\n */\nexport function encodeInstanceIdentifier(backend: Backend): string {\n if (backend instanceof GoogleAIBackend) {\n return `${AI_TYPE}/googleai`;\n } else if (backend instanceof VertexAIBackend) {\n return `${AI_TYPE}/vertexai/${backend.location}`;\n } else {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid backend: ${JSON.stringify(backend.backendType)}`\n );\n }\n}\n\n/**\n * Decodes an instance identifier string into a {@link Backend}.\n *\n * @internal\n */\nexport function decodeInstanceIdentifier(instanceIdentifier: string): Backend {\n const identifierParts = instanceIdentifier.split('/');\n if (identifierParts[0] !== AI_TYPE) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier, unknown prefix '${identifierParts[0]}'`\n );\n }\n const backendType = identifierParts[1];\n switch (backendType) {\n case 'vertexai':\n const location: string | undefined = identifierParts[2];\n if (!location) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier, unknown location '${instanceIdentifier}'`\n );\n }\n return new VertexAIBackend(location);\n case 'googleai':\n return new GoogleAIBackend();\n default:\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier string: '${instanceIdentifier}'`\n );\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { AIErrorCode, AI, BackendType } from '../public-types';\nimport { AIService } from '../service';\nimport { ApiSettings } from '../types/internal';\nimport { _isFirebaseServerApp } from '@firebase/app';\n\n/**\n * Base class for Firebase AI model APIs.\n *\n * Instances of this class are associated with a specific Firebase AI {@link Backend}\n * and provide methods for interacting with the configured generative model.\n *\n * @public\n */\nexport abstract class AIModel {\n /**\n * The fully qualified model resource name to use for generating images\n * (for example, `publishers/google/models/imagen-3.0-generate-002`).\n */\n readonly model: string;\n\n /**\n * @internal\n */\n _apiSettings: ApiSettings;\n\n /**\n * Constructs a new instance of the {@link AIModel} class.\n *\n * This constructor should only be called from subclasses that provide\n * a model API.\n *\n * @param ai - an {@link AI} instance.\n * @param modelName - The name of the model being used. It can be in one of the following formats:\n * - `my-model` (short name, will resolve to `publishers/google/models/my-model`)\n * - `models/my-model` (will resolve to `publishers/google/models/my-model`)\n * - `publishers/my-publisher/models/my-model` (fully qualified model name)\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @internal\n */\n protected constructor(ai: AI, modelName: string) {\n if (!ai.app?.options?.apiKey) {\n throw new AIError(\n AIErrorCode.NO_API_KEY,\n `The \"apiKey\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`\n );\n } else if (!ai.app?.options?.projectId) {\n throw new AIError(\n AIErrorCode.NO_PROJECT_ID,\n `The \"projectId\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`\n );\n } else if (!ai.app?.options?.appId) {\n throw new AIError(\n AIErrorCode.NO_APP_ID,\n `The \"appId\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`\n );\n } else {\n this._apiSettings = {\n apiKey: ai.app.options.apiKey,\n project: ai.app.options.projectId,\n appId: ai.app.options.appId,\n automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled,\n location: ai.location,\n backend: ai.backend\n };\n\n if (_isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) {\n const token = ai.app.settings.appCheckToken;\n this._apiSettings.getAppCheckToken = () => {\n return Promise.resolve({ token });\n };\n } else if ((ai as AIService).appCheck) {\n if (ai.options?.useLimitedUseAppCheckTokens) {\n this._apiSettings.getAppCheckToken = () =>\n (ai as AIService).appCheck!.getLimitedUseToken();\n } else {\n this._apiSettings.getAppCheckToken = () =>\n (ai as AIService).appCheck!.getToken();\n }\n }\n\n if ((ai as AIService).auth) {\n this._apiSettings.getAuthToken = () =>\n (ai as AIService).auth!.getToken();\n }\n\n this.model = AIModel.normalizeModelName(\n modelName,\n this._apiSettings.backend.backendType\n );\n }\n }\n\n /**\n * Normalizes the given model name to a fully qualified model resource name.\n *\n * @param modelName - The model name to normalize.\n * @returns The fully qualified model resource name.\n *\n * @internal\n */\n static normalizeModelName(\n modelName: string,\n backendType: BackendType\n ): string {\n if (backendType === BackendType.GOOGLE_AI) {\n return AIModel.normalizeGoogleAIModelName(modelName);\n } else {\n return AIModel.normalizeVertexAIModelName(modelName);\n }\n }\n\n /**\n * @internal\n */\n private static normalizeGoogleAIModelName(modelName: string): string {\n return `models/${modelName}`;\n }\n\n /**\n * @internal\n */\n private static normalizeVertexAIModelName(modelName: string): string {\n let model: string;\n if (modelName.includes('/')) {\n if (modelName.startsWith('models/')) {\n // Add 'publishers/google' if the user is only passing in 'models/model-name'.\n model = `publishers/google/${modelName}`;\n } else {\n // Any other custom format (e.g. tuned models) must be passed in correctly.\n model = modelName;\n }\n } else {\n // If path is not included, assume it's a non-tuned model.\n model = `publishers/google/models/${modelName}`;\n }\n\n return model;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Logger } from '@firebase/logger';\n\nexport const logger = new Logger('@firebase/vertexai');\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ErrorDetails, RequestOptions, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\nimport { ApiSettings } from '../types/internal';\nimport {\n DEFAULT_API_VERSION,\n DEFAULT_DOMAIN,\n DEFAULT_FETCH_TIMEOUT_MS,\n LANGUAGE_TAG,\n PACKAGE_VERSION\n} from '../constants';\nimport { logger } from '../logger';\nimport { GoogleAIBackend, VertexAIBackend } from '../backend';\nimport { BackendType } from '../public-types';\n\nexport enum Task {\n GENERATE_CONTENT = 'generateContent',\n STREAM_GENERATE_CONTENT = 'streamGenerateContent',\n COUNT_TOKENS = 'countTokens',\n PREDICT = 'predict'\n}\n\nexport class RequestUrl {\n constructor(\n public model: string,\n public task: Task,\n public apiSettings: ApiSettings,\n public stream: boolean,\n public requestOptions?: RequestOptions\n ) {}\n toString(): string {\n const url = new URL(this.baseUrl); // Throws if the URL is invalid\n url.pathname = `/${this.apiVersion}/${this.modelPath}:${this.task}`;\n url.search = this.queryParams.toString();\n return url.toString();\n }\n\n private get baseUrl(): string {\n return this.requestOptions?.baseUrl || `https://${DEFAULT_DOMAIN}`;\n }\n\n private get apiVersion(): string {\n return DEFAULT_API_VERSION; // TODO: allow user-set options if that feature becomes available\n }\n\n private get modelPath(): string {\n if (this.apiSettings.backend instanceof GoogleAIBackend) {\n return `projects/${this.apiSettings.project}/${this.model}`;\n } else if (this.apiSettings.backend instanceof VertexAIBackend) {\n return `projects/${this.apiSettings.project}/locations/${this.apiSettings.backend.location}/${this.model}`;\n } else {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid backend: ${JSON.stringify(this.apiSettings.backend)}`\n );\n }\n }\n\n private get queryParams(): URLSearchParams {\n const params = new URLSearchParams();\n if (this.stream) {\n params.set('alt', 'sse');\n }\n\n return params;\n }\n}\n\nexport class WebSocketUrl {\n constructor(public apiSettings: ApiSettings) {}\n toString(): string {\n const url = new URL(`wss://${DEFAULT_DOMAIN}`);\n url.pathname = this.pathname;\n\n const queryParams = new URLSearchParams();\n queryParams.set('key', this.apiSettings.apiKey);\n url.search = queryParams.toString();\n\n return url.toString();\n }\n\n private get pathname(): string {\n if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n return 'ws/google.firebase.vertexai.v1beta.GenerativeService/BidiGenerateContent';\n } else {\n return `ws/google.firebase.vertexai.v1beta.LlmBidiService/BidiGenerateContent/locations/${this.apiSettings.location}`;\n }\n }\n}\n\n/**\n * Log language and \"fire/version\" to x-goog-api-client\n */\nfunction getClientHeaders(): string {\n const loggingTags = [];\n loggingTags.push(`${LANGUAGE_TAG}/${PACKAGE_VERSION}`);\n loggingTags.push(`fire/${PACKAGE_VERSION}`);\n return loggingTags.join(' ');\n}\n\nexport async function getHeaders(url: RequestUrl): Promise<Headers> {\n const headers = new Headers();\n headers.append('Content-Type', 'application/json');\n headers.append('x-goog-api-client', getClientHeaders());\n headers.append('x-goog-api-key', url.apiSettings.apiKey);\n if (url.apiSettings.automaticDataCollectionEnabled) {\n headers.append('X-Firebase-Appid', url.apiSettings.appId);\n }\n if (url.apiSettings.getAppCheckToken) {\n const appCheckToken = await url.apiSettings.getAppCheckToken();\n if (appCheckToken) {\n headers.append('X-Firebase-AppCheck', appCheckToken.token);\n if (appCheckToken.error) {\n logger.warn(\n `Unable to obtain a valid App Check token: ${appCheckToken.error.message}`\n );\n }\n }\n }\n\n if (url.apiSettings.getAuthToken) {\n const authToken = await url.apiSettings.getAuthToken();\n if (authToken) {\n headers.append('Authorization', `Firebase ${authToken.accessToken}`);\n }\n }\n\n return headers;\n}\n\nexport async function constructRequest(\n model: string,\n task: Task,\n apiSettings: ApiSettings,\n stream: boolean,\n body: string,\n requestOptions?: RequestOptions\n): Promise<{ url: string; fetchOptions: RequestInit }> {\n const url = new RequestUrl(model, task, apiSettings, stream, requestOptions);\n return {\n url: url.toString(),\n fetchOptions: {\n method: 'POST',\n headers: await getHeaders(url),\n body\n }\n };\n}\n\nexport async function makeRequest(\n model: string,\n task: Task,\n apiSettings: ApiSettings,\n stream: boolean,\n body: string,\n requestOptions?: RequestOptions\n): Promise<Response> {\n const url = new RequestUrl(model, task, apiSettings, stream, requestOptions);\n let response;\n let fetchTimeoutId: string | number | NodeJS.Timeout | undefined;\n try {\n const request = await constructRequest(\n model,\n task,\n apiSettings,\n stream,\n body,\n requestOptions\n );\n // Timeout is 180s by default\n const timeoutMillis =\n requestOptions?.timeout != null && requestOptions.timeout >= 0\n ? requestOptions.timeout\n : DEFAULT_FETCH_TIMEOUT_MS;\n const abortController = new AbortController();\n fetchTimeoutId = setTimeout(() => abortController.abort(), timeoutMillis);\n request.fetchOptions.signal = abortController.signal;\n\n response = await fetch(request.url, request.fetchOptions);\n if (!response.ok) {\n let message = '';\n let errorDetails;\n try {\n const json = await response.json();\n message = json.error.message;\n if (json.error.details) {\n message += ` ${JSON.stringify(json.error.details)}`;\n errorDetails = json.error.details;\n }\n } catch (e) {\n // ignored\n }\n if (\n response.status === 403 &&\n errorDetails &&\n errorDetails.some(\n (detail: ErrorDetails) => detail.reason === 'SERVICE_DISABLED'\n ) &&\n errorDetails.some((detail: ErrorDetails) =>\n (\n detail.links as Array<Record<string, string>>\n )?.[0]?.description.includes(\n 'Google developers console API activation'\n )\n )\n ) {\n throw new AIError(\n AIErrorCode.API_NOT_ENABLED,\n `The Firebase AI SDK requires the Firebase AI ` +\n `API ('firebasevertexai.googleapis.com') to be enabled in your ` +\n `Firebase project. Enable this API by visiting the Firebase Console ` +\n `at https://console.firebase.google.com/project/${url.apiSettings.project}/genai/ ` +\n `and clicking \"Get started\". If you enabled this API recently, ` +\n `wait a few minutes for the action to propagate to our systems and ` +\n `then retry.`,\n {\n status: response.status,\n statusText: response.statusText,\n errorDetails\n }\n );\n }\n throw new AIError(\n AIErrorCode.FETCH_ERROR,\n `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`,\n {\n status: response.status,\n statusText: response.statusText,\n errorDetails\n }\n );\n }\n } catch (e) {\n let err = e as Error;\n if (\n (e as AIError).code !== AIErrorCode.FETCH_ERROR &&\n (e as AIError).code !== AIErrorCode.API_NOT_ENABLED &&\n e instanceof Error\n ) {\n err = new AIError(\n AIErrorCode.ERROR,\n `Error fetching from ${url.toString()}: ${e.message}`\n );\n err.stack = e.stack;\n }\n\n throw err;\n } finally {\n if (fetchTimeoutId) {\n clearTimeout(fetchTimeoutId);\n }\n }\n return response;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n EnhancedGenerateContentResponse,\n FinishReason,\n FunctionCall,\n GenerateContentCandidate,\n GenerateContentResponse,\n ImagenGCSImage,\n ImagenInlineImage,\n AIErrorCode,\n InlineDataPart,\n Part,\n InferenceSource\n} from '../types';\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport { ImagenResponseInternal } from '../types/internal';\n\n/**\n * Check that at least one candidate exists and does not have a bad\n * finish reason. Warns if multiple candidates exist.\n */\nfunction hasValidCandidates(response: GenerateContentResponse): boolean {\n if (response.candidates && response.candidates.length > 0) {\n if (response.candidates.length > 1) {\n logger.warn(\n `This response had ${response.candidates.length} ` +\n `candidates. Returning text from the first candidate only. ` +\n `Access response.candidates directly to use the other candidates.`\n );\n }\n if (hadBadFinishReason(response.candidates[0])) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Response error: ${formatBlockErrorMessage(\n response\n )}. Response body stored in error.response`,\n {\n response\n }\n );\n }\n return true;\n } else {\n return false;\n }\n}\n\n/**\n * Creates an EnhancedGenerateContentResponse object that has helper functions and\n * other modifications that improve usability.\n */\nexport function createEnhancedContentResponse(\n response: GenerateContentResponse,\n inferenceSource: InferenceSource = InferenceSource.IN_CLOUD\n): EnhancedGenerateContentResponse {\n /**\n * The Vertex AI backend omits default values.\n * This causes the `index` property to be omitted from the first candidate in the\n * response, since it has index 0, and 0 is a default value.\n * See: https://github.com/firebase/firebase-js-sdk/issues/8566\n */\n if (response.candidates && !response.candidates[0].hasOwnProperty('index')) {\n response.candidates[0].index = 0;\n }\n\n const responseWithHelpers = addHelpers(response);\n responseWithHelpers.inferenceSource = inferenceSource;\n return responseWithHelpers;\n}\n\n/**\n * Adds convenience helper methods to a response object, including stream\n * chunks (as long as each chunk is a complete GenerateContentResponse JSON).\n */\nexport function addHelpers(\n response: GenerateContentResponse\n): EnhancedGenerateContentResponse {\n (response as EnhancedGenerateContentResponse).text = () => {\n if (hasValidCandidates(response)) {\n return getText(response, part => !part.thought);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Text not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return '';\n };\n (response as EnhancedGenerateContentResponse).thoughtSummary = () => {\n if (hasValidCandidates(response)) {\n const result = getText(response, part => !!part.thought);\n return result === '' ? undefined : result;\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Thought summary not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n (response as EnhancedGenerateContentResponse).inlineDataParts = ():\n | InlineDataPart[]\n | undefined => {\n if (hasValidCandidates(response)) {\n return getInlineDataParts(response);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Data not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n (response as EnhancedGenerateContentResponse).functionCalls = () => {\n if (hasValidCandidates(response)) {\n return getFunctionCalls(response);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Function call not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n return response as EnhancedGenerateContentResponse;\n}\n\n/**\n * Returns all text from the first candidate's parts, filtering by whether\n * `partFilter()` returns true.\n *\n * @param response - The `GenerateContentResponse` from which to extract text.\n * @param partFilter - Only return `Part`s for which this returns true\n */\nexport function getText(\n response: GenerateContentResponse,\n partFilter: (part: Part) => boolean\n): string {\n const textStrings = [];\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.text && partFilter(part)) {\n textStrings.push(part.text);\n }\n }\n }\n if (textStrings.length > 0) {\n return textStrings.join('');\n } else {\n return '';\n }\n}\n\n/**\n * Returns every {@link FunctionCall} associated with first candidate.\n */\nexport function getFunctionCalls(\n response: GenerateContentResponse\n): FunctionCall[] | undefined {\n const functionCalls: FunctionCall[] = [];\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.functionCall) {\n functionCalls.push(part.functionCall);\n }\n }\n }\n if (functionCalls.length > 0) {\n return functionCalls;\n } else {\n return undefined;\n }\n}\n\n/**\n * Returns every {@link InlineDataPart} in the first candidate if present.\n *\n * @internal\n */\nexport function getInlineDataParts(\n response: GenerateContentResponse\n): InlineDataPart[] | undefined {\n const data: InlineDataPart[] = [];\n\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.inlineData) {\n data.push(part);\n }\n }\n }\n\n if (data.length > 0) {\n return data;\n } else {\n return undefined;\n }\n}\n\nconst badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY];\n\nfunction hadBadFinishReason(candidate: GenerateContentCandidate): boolean {\n return (\n !!candidate.finishReason &&\n badFinishReasons.some(reason => reason === candidate.finishReason)\n );\n}\n\nexport function formatBlockErrorMessage(\n response: GenerateContentResponse\n): string {\n let message = '';\n if (\n (!response.candidates || response.candidates.length === 0) &&\n response.promptFeedback\n ) {\n message += 'Response was blocked';\n if (response.promptFeedback?.blockReason) {\n message += ` due to ${response.promptFeedback.blockReason}`;\n }\n if (response.promptFeedback?.blockReasonMessage) {\n message += `: ${response.promptFeedback.blockReasonMessage}`;\n }\n } else if (response.candidates?.[0]) {\n const firstCandidate = response.candidates[0];\n if (hadBadFinishReason(firstCandidate)) {\n message += `Candidate was blocked due to ${firstCandidate.finishReason}`;\n if (firstCandidate.finishMessage) {\n message += `: ${firstCandidate.finishMessage}`;\n }\n }\n }\n return message;\n}\n\n/**\n * Convert a generic successful fetch response body to an Imagen response object\n * that can be returned to the user. This converts the REST APIs response format to our\n * APIs representation of a response.\n *\n * @internal\n */\nexport async function handlePredictResponse<\n T extends ImagenInlineImage | ImagenGCSImage\n>(response: Response): Promise<{ images: T[]; filteredReason?: string }> {\n const responseJson: ImagenResponseInternal = await response.json();\n\n const images: T[] = [];\n let filteredReason: string | undefined = undefined;\n\n // The backend should always send a non-empty array of predictions if the response was successful.\n if (!responseJson.predictions || responseJson.predictions?.length === 0) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.'\n );\n }\n\n for (const prediction of responseJson.predictions) {\n if (prediction.raiFilteredReason) {\n filteredReason = prediction.raiFilteredReason;\n } else if (prediction.mimeType && prediction.bytesBase64Encoded) {\n images.push({\n mimeType: prediction.mimeType,\n bytesBase64Encoded: prediction.bytesBase64Encoded\n } as T);\n } else if (prediction.mimeType && prediction.gcsUri) {\n images.push({\n mimeType: prediction.mimeType,\n gcsURI: prediction.gcsUri\n } as T);\n } else if (prediction.safetyAttributes) {\n // Ignore safetyAttributes \"prediction\" to avoid throwing an error below.\n } else {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Unexpected element in 'predictions' array in response: '${JSON.stringify(\n prediction\n )}'`\n );\n }\n }\n\n return { images, filteredReason };\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from './errors';\nimport { logger } from './logger';\nimport {\n CitationMetadata,\n CountTokensRequest,\n GenerateContentCandidate,\n GenerateContentRequest,\n GenerateContentResponse,\n HarmSeverity,\n InlineDataPart,\n PromptFeedback,\n SafetyRating,\n AIErrorCode\n} from './types';\nimport {\n GoogleAIGenerateContentResponse,\n GoogleAIGenerateContentCandidate,\n GoogleAICountTokensRequest\n} from './types/googleai';\n\n/**\n * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI).\n * The public API prioritizes the format used by the Vertex AI Gemini API.\n * We avoid having two sets of types by translating requests and responses between the two API formats.\n * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API\n * with minimal code changes.\n *\n * In here are functions that map requests and responses between the two API formats.\n * Requests in the Vertex AI format are mapped to the Google AI format before being sent.\n * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user.\n */\n\n/**\n * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI.\n *\n * @param generateContentRequest The {@link GenerateContentRequest} to map.\n * @returns A {@link GenerateContentResponse} that conforms to the Google AI format.\n *\n * @throws If the request contains properties that are unsupported by Google AI.\n *\n * @internal\n */\nexport function mapGenerateContentRequest(\n generateContentRequest: GenerateContentRequest\n): GenerateContentRequest {\n generateContentRequest.safetySettings?.forEach(safetySetting => {\n if (safetySetting.method) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'SafetySetting.method is not supported in the the Gemini Developer API. Please remove this property.'\n );\n }\n });\n\n if (generateContentRequest.generationConfig?.topK) {\n const roundedTopK = Math.round(\n generateContentRequest.generationConfig.topK\n );\n\n if (roundedTopK !== generateContentRequest.generationConfig.topK) {\n logger.warn(\n 'topK in GenerationConfig has been rounded to the nearest integer to match the format for requests to the Gemini Developer API.'\n );\n generateContentRequest.generationConfig.topK = roundedTopK;\n }\n }\n\n return generateContentRequest;\n}\n\n/**\n * Maps a {@link GenerateContentResponse} from Google AI to the format of the\n * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API.\n *\n * @param googleAIResponse The {@link GenerateContentResponse} from Google AI.\n * @returns A {@link GenerateContentResponse} that conforms to the public API's format.\n *\n * @internal\n */\nexport function mapGenerateContentResponse(\n googleAIResponse: GoogleAIGenerateContentResponse\n): GenerateContentResponse {\n const generateContentResponse = {\n candidates: googleAIResponse.candidates\n ? mapGenerateContentCandidates(googleAIResponse.candidates)\n : undefined,\n prompt: googleAIResponse.promptFeedback\n ? mapPromptFeedback(googleAIResponse.promptFeedback)\n : undefined,\n usageMetadata: googleAIResponse.usageMetadata\n };\n\n return generateContentResponse;\n}\n\n/**\n * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI.\n *\n * @param countTokensRequest The {@link CountTokensRequest} to map.\n * @param model The model to count tokens with.\n * @returns A {@link CountTokensRequest} that conforms to the Google AI format.\n *\n * @internal\n */\nexport function mapCountTokensRequest(\n countTokensRequest: CountTokensRequest,\n model: string\n): GoogleAICountTokensRequest {\n const mappedCountTokensRequest: GoogleAICountTokensRequest = {\n generateContentRequest: {\n model,\n ...countTokensRequest\n }\n };\n\n return mappedCountTokensRequest;\n}\n\n/**\n * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms\n * to the Vertex AI API format.\n *\n * @param candidates The {@link GoogleAIGenerateContentCandidate} to map.\n * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format.\n *\n * @throws If any {@link Part} in the candidates has a `videoMetadata` property.\n *\n * @internal\n */\nexport function mapGenerateContentCandidates(\n candidates: GoogleAIGenerateContentCandidate[]\n): GenerateContentCandidate[] {\n const mappedCandidates: GenerateContentCandidate[] = [];\n let mappedSafetyRatings: SafetyRating[];\n if (mappedCandidates) {\n candidates.forEach(candidate => {\n // Map citationSources to citations.\n let citationMetadata: CitationMetadata | undefined;\n if (candidate.citationMetadata) {\n citationMetadata = {\n citations: candidate.citationMetadata.citationSources\n };\n }\n\n // Assign missing candidate SafetyRatings properties to their defaults if undefined.\n if (candidate.safetyRatings) {\n mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => {\n return {\n ...safetyRating,\n severity:\n safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,\n probabilityScore: safetyRating.probabilityScore ?? 0,\n severityScore: safetyRating.severityScore ?? 0\n };\n });\n }\n\n // videoMetadata is not supported.\n // Throw early since developers may send a long video as input and only expect to pay\n // for inference on a small portion of the video.\n if (\n candidate.content?.parts?.some(\n part => (part as InlineDataPart)?.videoMetadata\n )\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.'\n );\n }\n\n const mappedCandidate = {\n index: candidate.index,\n content: candidate.content,\n finishReason: candidate.finishReason,\n finishMessage: candidate.finishMessage,\n safetyRatings: mappedSafetyRatings,\n citationMetadata,\n groundingMetadata: candidate.groundingMetadata,\n urlContextMetadata: candidate.urlContextMetadata\n };\n mappedCandidates.push(mappedCandidate);\n });\n }\n\n return mappedCandidates;\n}\n\nexport function mapPromptFeedback(\n promptFeedback: PromptFeedback\n): PromptFeedback {\n // Assign missing SafetyRating properties to their defaults if undefined.\n const mappedSafetyRatings: SafetyRating[] = [];\n promptFeedback.safetyRatings.forEach(safetyRating => {\n mappedSafetyRatings.push({\n category: safetyRating.category,\n probability: safetyRating.probability,\n severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,\n probabilityScore: safetyRating.probabilityScore ?? 0,\n severityScore: safetyRating.severityScore ?? 0,\n blocked: safetyRating.blocked\n });\n });\n\n const mappedPromptFeedback: PromptFeedback = {\n blockReason: promptFeedback.blockReason,\n safetyRatings: mappedSafetyRatings,\n blockReasonMessage: promptFeedback.blockReasonMessage\n };\n return mappedPromptFeedback;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n EnhancedGenerateContentResponse,\n GenerateContentCandidate,\n GenerateContentResponse,\n GenerateContentStreamResult,\n Part,\n AIErrorCode\n} from '../types';\nimport { AIError } from '../errors';\nimport { createEnhancedContentResponse } from './response-helpers';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { GoogleAIGenerateContentResponse } from '../types/googleai';\nimport { ApiSettings } from '../types/internal';\nimport {\n BackendType,\n InferenceSource,\n URLContextMetadata\n} from '../public-types';\n\nconst responseLineRE = /^data\\: (.*)(?:\\n\\n|\\r\\r|\\r\\n\\r\\n)/;\n\n/**\n * Process a response.body stream from the backend and return an\n * iterator that provides one complete GenerateContentResponse at a time\n * and a promise that resolves with a single aggregated\n * GenerateContentResponse.\n *\n * @param response - Response from a fetch call\n */\nexport function processStream(\n response: Response,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): GenerateContentStreamResult {\n const inputStream = response.body!.pipeThrough(\n new TextDecoderStream('utf8', { fatal: true })\n );\n const responseStream =\n getResponseStream<GenerateContentResponse>(inputStream);\n const [stream1, stream2] = responseStream.tee();\n return {\n stream: generateResponseSequence(stream1, apiSettings, inferenceSource),\n response: getResponsePromise(stream2, apiSettings, inferenceSource)\n };\n}\n\nasync function getResponsePromise(\n stream: ReadableStream<GenerateContentResponse>,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): Promise<EnhancedGenerateContentResponse> {\n const allResponses: GenerateContentResponse[] = [];\n const reader = stream.getReader();\n while (true) {\n const { done, value } = await reader.read();\n if (done) {\n let generateContentResponse = aggregateResponses(allResponses);\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n generateContentResponse = GoogleAIMapper.mapGenerateContentResponse(\n generateContentResponse as GoogleAIGenerateContentResponse\n );\n }\n return createEnhancedContentResponse(\n generateContentResponse,\n inferenceSource\n );\n }\n\n allResponses.push(value);\n }\n}\n\nasync function* generateResponseSequence(\n stream: ReadableStream<GenerateContentResponse>,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): AsyncGenerator<EnhancedGenerateContentResponse> {\n const reader = stream.getReader();\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n break;\n }\n\n let enhancedResponse: EnhancedGenerateContentResponse;\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n enhancedResponse = createEnhancedContentResponse(\n GoogleAIMapper.mapGenerateContentResponse(\n value as GoogleAIGenerateContentResponse\n ),\n inferenceSource\n );\n } else {\n enhancedResponse = createEnhancedContentResponse(value, inferenceSource);\n }\n\n const firstCandidate = enhancedResponse.candidates?.[0];\n // Don't yield a response with no useful data for the developer.\n if (\n !firstCandidate?.content?.parts &&\n !firstCandidate?.finishReason &&\n !firstCandidate?.citationMetadata &&\n !firstCandidate?.urlContextMetadata\n ) {\n continue;\n }\n\n yield enhancedResponse;\n }\n}\n\n/**\n * Reads a raw stream from the fetch response and join incomplete\n * chunks, returning a new stream that provides a single complete\n * GenerateContentResponse in each iteration.\n */\nexport function getResponseStream<T>(\n inputStream: ReadableStream<string>\n): ReadableStream<T> {\n const reader = inputStream.getReader();\n const stream = new ReadableStream<T>({\n start(controller) {\n let currentText = '';\n return pump();\n function pump(): Promise<(() => Promise<void>) | undefined> {\n return reader.read().then(({ value, done }) => {\n if (done) {\n if (currentText.trim()) {\n controller.error(\n new AIError(AIErrorCode.PARSE_FAILED, 'Failed to parse stream')\n );\n return;\n }\n controller.close();\n return;\n }\n\n currentText += value;\n let match = currentText.match(responseLineRE);\n let parsedResponse: T;\n while (match) {\n try {\n parsedResponse = JSON.parse(match[1]);\n } catch (e) {\n controller.error(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Error parsing JSON response: \"${match[1]}`\n )\n );\n return;\n }\n controller.enqueue(parsedResponse);\n currentText = currentText.substring(match[0].length);\n match = currentText.match(responseLineRE);\n }\n return pump();\n });\n }\n }\n });\n return stream;\n}\n\n/**\n * Aggregates an array of `GenerateContentResponse`s into a single\n * GenerateContentResponse.\n */\nexport function aggregateResponses(\n responses: GenerateContentResponse[]\n): GenerateContentResponse {\n const lastResponse = responses[responses.length - 1];\n const aggregatedResponse: GenerateContentResponse = {\n promptFeedback: lastResponse?.promptFeedback\n };\n for (const response of responses) {\n if (response.candidates) {\n for (const candidate of response.candidates) {\n // Index will be undefined if it's the first index (0), so we should use 0 if it's undefined.\n // See: https://github.com/firebase/firebase-js-sdk/issues/8566\n const i = candidate.index || 0;\n if (!aggregatedResponse.candidates) {\n aggregatedResponse.candidates = [];\n }\n if (!aggregatedResponse.candidates[i]) {\n aggregatedResponse.candidates[i] = {\n index: candidate.index\n } as GenerateContentCandidate;\n }\n // Keep overwriting, the last one will be final\n aggregatedResponse.candidates[i].citationMetadata =\n candidate.citationMetadata;\n aggregatedResponse.candidates[i].finishReason = candidate.finishReason;\n aggregatedResponse.candidates[i].finishMessage =\n candidate.finishMessage;\n aggregatedResponse.candidates[i].safetyRatings =\n candidate.safetyRatings;\n aggregatedResponse.candidates[i].groundingMetadata =\n candidate.groundingMetadata;\n\n // The urlContextMetadata object is defined in the first chunk of the response stream.\n // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to\n // make sure that we don't overwrite the first value urlContextMetadata object with undefined.\n // FIXME: What happens if we receive a second, valid urlContextMetadata object?\n const urlContextMetadata = candidate.urlContextMetadata as unknown;\n if (\n typeof urlContextMetadata === 'object' &&\n urlContextMetadata !== null &&\n Object.keys(urlContextMetadata).length > 0\n ) {\n aggregatedResponse.candidates[i].urlContextMetadata =\n urlContextMetadata as URLContextMetadata;\n }\n\n /**\n * Candidates should always have content and parts, but this handles\n * possible malformed responses.\n */\n if (candidate.content) {\n // Skip a candidate without parts.\n if (!candidate.content.parts) {\n continue;\n }\n if (!aggregatedResponse.candidates[i].content) {\n aggregatedResponse.candidates[i].content = {\n role: candidate.content.role || 'user',\n parts: []\n };\n }\n for (const part of candidate.content.parts) {\n const newPart: Part = { ...part };\n // The backend can send empty text parts. If these are sent back\n // (e.g. in chat history), the backend will respond with an error.\n // To prevent this, ignore empty text parts.\n if (part.text === '') {\n continue;\n }\n if (Object.keys(newPart).length > 0) {\n aggregatedResponse.candidates[i].content.parts.push(\n newPart as Part\n );\n }\n }\n }\n }\n }\n }\n return aggregatedResponse;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport {\n GenerateContentRequest,\n InferenceMode,\n AIErrorCode,\n ChromeAdapter,\n InferenceSource\n} from '../types';\nimport { ChromeAdapterImpl } from '../methods/chrome-adapter';\n\nconst errorsCausingFallback: AIErrorCode[] = [\n // most network errors\n AIErrorCode.FETCH_ERROR,\n // fallback code for all other errors in makeRequest\n AIErrorCode.ERROR,\n // error due to API not being enabled in project\n AIErrorCode.API_NOT_ENABLED\n];\n\ninterface CallResult<Response> {\n response: Response;\n inferenceSource: InferenceSource;\n}\n\n/**\n * Dispatches a request to the appropriate backend (on-device or in-cloud)\n * based on the inference mode.\n *\n * @param request - The request to be sent.\n * @param chromeAdapter - The on-device model adapter.\n * @param onDeviceCall - The function to call for on-device inference.\n * @param inCloudCall - The function to call for in-cloud inference.\n * @returns The response from the backend.\n */\nexport async function callCloudOrDevice<Response>(\n request: GenerateContentRequest,\n chromeAdapter: ChromeAdapter | undefined,\n onDeviceCall: () => Promise<Response>,\n inCloudCall: () => Promise<Response>\n): Promise<CallResult<Response>> {\n if (!chromeAdapter) {\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n }\n switch ((chromeAdapter as ChromeAdapterImpl).mode) {\n case InferenceMode.ONLY_ON_DEVICE:\n if (await chromeAdapter.isAvailable(request)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.'\n );\n case InferenceMode.ONLY_IN_CLOUD:\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n case InferenceMode.PREFER_IN_CLOUD:\n try {\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n } catch (e) {\n if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n throw e;\n }\n case InferenceMode.PREFER_ON_DEVICE:\n if (await chromeAdapter.isAvailable(request)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n default:\n throw new AIError(\n AIErrorCode.ERROR,\n `Unexpected infererence mode: ${\n (chromeAdapter as ChromeAdapterImpl).mode\n }`\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n GenerateContentRequest,\n GenerateContentResponse,\n GenerateContentResult,\n GenerateContentStreamResult,\n RequestOptions\n} from '../types';\nimport { Task, makeRequest } from '../requests/request';\nimport { createEnhancedContentResponse } from '../requests/response-helpers';\nimport { processStream } from '../requests/stream-reader';\nimport { ApiSettings } from '../types/internal';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { BackendType } from '../public-types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport { callCloudOrDevice } from '../requests/hybrid-helpers';\n\nasync function generateContentStreamOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n requestOptions?: RequestOptions\n): Promise<Response> {\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n params = GoogleAIMapper.mapGenerateContentRequest(params);\n }\n return makeRequest(\n model,\n Task.STREAM_GENERATE_CONTENT,\n apiSettings,\n /* stream */ true,\n JSON.stringify(params),\n requestOptions\n );\n}\n\nexport async function generateContentStream(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<GenerateContentStreamResult> {\n const callResult = await callCloudOrDevice(\n params,\n chromeAdapter,\n () => chromeAdapter!.generateContentStream(params),\n () =>\n generateContentStreamOnCloud(apiSettings, model, params, requestOptions)\n );\n return processStream(callResult.response, apiSettings); // TODO: Map streaming responses\n}\n\nasync function generateContentOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n requestOptions?: RequestOptions\n): Promise<Response> {\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n params = GoogleAIMapper.mapGenerateContentRequest(params);\n }\n return makeRequest(\n model,\n Task.GENERATE_CONTENT,\n apiSettings,\n /* stream */ false,\n JSON.stringify(params),\n requestOptions\n );\n}\n\nexport async function generateContent(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<GenerateContentResult> {\n const callResult = await callCloudOrDevice(\n params,\n chromeAdapter,\n () => chromeAdapter!.generateContent(params),\n () => generateContentOnCloud(apiSettings, model, params, requestOptions)\n );\n const generateContentResponse = await processGenerateContentResponse(\n callResult.response,\n apiSettings\n );\n const enhancedResponse = createEnhancedContentResponse(\n generateContentResponse,\n callResult.inferenceSource\n );\n return {\n response: enhancedResponse\n };\n}\n\nasync function processGenerateContentResponse(\n response: Response,\n apiSettings: ApiSettings\n): Promise<GenerateContentResponse> {\n const responseJson = await response.json();\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n return GoogleAIMapper.mapGenerateContentResponse(responseJson);\n } else {\n return responseJson;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, GenerateContentRequest, Part, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\nimport { ImagenGenerationParams, PredictRequestBody } from '../types/internal';\n\nexport function formatSystemInstruction(\n input?: string | Part | Content\n): Content | undefined {\n // null or undefined\n if (input == null) {\n return undefined;\n } else if (typeof input === 'string') {\n return { role: 'system', parts: [{ text: input }] } as Content;\n } else if ((input as Part).text) {\n return { role: 'system', parts: [input as Part] };\n } else if ((input as Content).parts) {\n if (!(input as Content).role) {\n return { role: 'system', parts: (input as Content).parts };\n } else {\n return input as Content;\n }\n }\n}\n\nexport function formatNewContent(\n request: string | Array<string | Part>\n): Content {\n let newParts: Part[] = [];\n if (typeof request === 'string') {\n newParts = [{ text: request }];\n } else {\n for (const partOrString of request) {\n if (typeof partOrString === 'string') {\n newParts.push({ text: partOrString });\n } else {\n newParts.push(partOrString);\n }\n }\n }\n return assignRoleToPartsAndValidateSendMessageRequest(newParts);\n}\n\n/**\n * When multiple Part types (i.e. FunctionResponsePart and TextPart) are\n * passed in a single Part array, we may need to assign different roles to each\n * part. Currently only FunctionResponsePart requires a role other than 'user'.\n * @private\n * @param parts Array of parts to pass to the model\n * @returns Array of content items\n */\nfunction assignRoleToPartsAndValidateSendMessageRequest(\n parts: Part[]\n): Content {\n const userContent: Content = { role: 'user', parts: [] };\n const functionContent: Content = { role: 'function', parts: [] };\n let hasUserContent = false;\n let hasFunctionContent = false;\n for (const part of parts) {\n if ('functionResponse' in part) {\n functionContent.parts.push(part);\n hasFunctionContent = true;\n } else {\n userContent.parts.push(part);\n hasUserContent = true;\n }\n }\n\n if (hasUserContent && hasFunctionContent) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.'\n );\n }\n\n if (!hasUserContent && !hasFunctionContent) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n 'No Content is provided for sending chat message.'\n );\n }\n\n if (hasUserContent) {\n return userContent;\n }\n\n return functionContent;\n}\n\nexport function formatGenerateContentInput(\n params: GenerateContentRequest | string | Array<string | Part>\n): GenerateContentRequest {\n let formattedRequest: GenerateContentRequest;\n if ((params as GenerateContentRequest).contents) {\n formattedRequest = params as GenerateContentRequest;\n } else {\n // Array or string\n const content = formatNewContent(params as string | Array<string | Part>);\n formattedRequest = { contents: [content] };\n }\n if ((params as GenerateContentRequest).systemInstruction) {\n formattedRequest.systemInstruction = formatSystemInstruction(\n (params as GenerateContentRequest).systemInstruction\n );\n }\n return formattedRequest;\n}\n\n/**\n * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format\n * that is expected from the REST API.\n *\n * @internal\n */\nexport function createPredictRequestBody(\n prompt: string,\n {\n gcsURI,\n imageFormat,\n addWatermark,\n numberOfImages = 1,\n negativePrompt,\n aspectRatio,\n safetyFilterLevel,\n personFilterLevel\n }: ImagenGenerationParams\n): PredictRequestBody {\n // Properties that are undefined will be omitted from the JSON string that is sent in the request.\n const body: PredictRequestBody = {\n instances: [\n {\n prompt\n }\n ],\n parameters: {\n storageUri: gcsURI,\n negativePrompt,\n sampleCount: numberOfImages,\n aspectRatio,\n outputOptions: imageFormat,\n addWatermark,\n safetyFilterLevel,\n personGeneration: personFilterLevel,\n includeRaiReason: true,\n includeSafetyAttributes: true\n }\n };\n return body;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, POSSIBLE_ROLES, Part, Role, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\n\n// https://ai.google.dev/api/rest/v1beta/Content#part\n\nconst VALID_PART_FIELDS: Array<keyof Part> = [\n 'text',\n 'inlineData',\n 'functionCall',\n 'functionResponse',\n 'thought',\n 'thoughtSignature'\n];\n\nconst VALID_PARTS_PER_ROLE: { [key in Role]: Array<keyof Part> } = {\n user: ['text', 'inlineData'],\n function: ['functionResponse'],\n model: ['text', 'functionCall', 'thought', 'thoughtSignature'],\n // System instructions shouldn't be in history anyway.\n system: ['text']\n};\n\nconst VALID_PREVIOUS_CONTENT_ROLES: { [key in Role]: Role[] } = {\n user: ['model'],\n function: ['model'],\n model: ['user', 'function'],\n // System instructions shouldn't be in history.\n system: []\n};\n\nexport function validateChatHistory(history: Content[]): void {\n let prevContent: Content | null = null;\n for (const currContent of history) {\n const { role, parts } = currContent;\n if (!prevContent && role !== 'user') {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `First Content should be with role 'user', got ${role}`\n );\n }\n if (!POSSIBLE_ROLES.includes(role)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(\n POSSIBLE_ROLES\n )}`\n );\n }\n\n if (!Array.isArray(parts)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content should have 'parts' property with an array of Parts`\n );\n }\n\n if (parts.length === 0) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Each Content should have at least one part`\n );\n }\n\n const countFields: Record<keyof Part, number> = {\n text: 0,\n inlineData: 0,\n functionCall: 0,\n functionResponse: 0,\n thought: 0,\n thoughtSignature: 0,\n executableCode: 0,\n codeExecutionResult: 0\n };\n\n for (const part of parts) {\n for (const key of VALID_PART_FIELDS) {\n if (key in part) {\n countFields[key] += 1;\n }\n }\n }\n const validParts = VALID_PARTS_PER_ROLE[role];\n for (const key of VALID_PART_FIELDS) {\n if (!validParts.includes(key) && countFields[key] > 0) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content with role '${role}' can't contain '${key}' part`\n );\n }\n }\n\n if (prevContent) {\n const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role];\n if (!validPreviousContentRoles.includes(prevContent.role)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content with role '${role}' can't follow '${\n prevContent.role\n }'. Valid previous roles: ${JSON.stringify(\n VALID_PREVIOUS_CONTENT_ROLES\n )}`\n );\n }\n }\n prevContent = currContent;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n Content,\n GenerateContentRequest,\n GenerateContentResult,\n GenerateContentStreamResult,\n Part,\n RequestOptions,\n StartChatParams\n} from '../types';\nimport { formatNewContent } from '../requests/request-helpers';\nimport { formatBlockErrorMessage } from '../requests/response-helpers';\nimport { validateChatHistory } from './chat-session-helpers';\nimport { generateContent, generateContentStream } from './generate-content';\nimport { ApiSettings } from '../types/internal';\nimport { logger } from '../logger';\nimport { ChromeAdapter } from '../types/chrome-adapter';\n\n/**\n * Do not log a message for this error.\n */\nconst SILENT_ERROR = 'SILENT_ERROR';\n\n/**\n * ChatSession class that enables sending chat messages and stores\n * history of sent and received messages so far.\n *\n * @public\n */\nexport class ChatSession {\n private _apiSettings: ApiSettings;\n private _history: Content[] = [];\n private _sendPromise: Promise<void> = Promise.resolve();\n\n constructor(\n apiSettings: ApiSettings,\n public model: string,\n private chromeAdapter?: ChromeAdapter,\n public params?: StartChatParams,\n public requestOptions?: RequestOptions\n ) {\n this._apiSettings = apiSettings;\n if (params?.history) {\n validateChatHistory(params.history);\n this._history = params.history;\n }\n }\n\n /**\n * Gets the chat history so far. Blocked prompts are not added to history.\n * Neither blocked candidates nor the prompts that generated them are added\n * to history.\n */\n async getHistory(): Promise<Content[]> {\n await this._sendPromise;\n return this._history;\n }\n\n /**\n * Sends a chat message and receives a non-streaming\n * {@link GenerateContentResult}\n */\n async sendMessage(\n request: string | Array<string | Part>\n ): Promise<GenerateContentResult> {\n await this._sendPromise;\n const newContent = formatNewContent(request);\n const generateContentRequest: GenerateContentRequest = {\n safetySettings: this.params?.safetySettings,\n generationConfig: this.params?.generationConfig,\n tools: this.params?.tools,\n toolConfig: this.params?.toolConfig,\n systemInstruction: this.params?.systemInstruction,\n contents: [...this._history, newContent]\n };\n let finalResult = {} as GenerateContentResult;\n // Add onto the chain.\n this._sendPromise = this._sendPromise\n .then(() =>\n generateContent(\n this._apiSettings,\n this.model,\n generateContentRequest,\n this.chromeAdapter,\n this.requestOptions\n )\n )\n .then(result => {\n if (\n result.response.candidates &&\n result.response.candidates.length > 0\n ) {\n this._history.push(newContent);\n const responseContent: Content = {\n parts: result.response.candidates?.[0].content.parts || [],\n // Response seems to come back without a role set.\n role: result.response.candidates?.[0].content.role || 'model'\n };\n this._history.push(responseContent);\n } else {\n const blockErrorMessage = formatBlockErrorMessage(result.response);\n if (blockErrorMessage) {\n logger.warn(\n `sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`\n );\n }\n }\n finalResult = result;\n });\n await this._sendPromise;\n return finalResult;\n }\n\n /**\n * Sends a chat message and receives the response as a\n * {@link GenerateContentStreamResult} containing an iterable stream\n * and a response promise.\n */\n async sendMessageStream(\n request: string | Array<string | Part>\n ): Promise<GenerateContentStreamResult> {\n await this._sendPromise;\n const newContent = formatNewContent(request);\n const generateContentRequest: GenerateContentRequest = {\n safetySettings: this.params?.safetySettings,\n generationConfig: this.params?.generationConfig,\n tools: this.params?.tools,\n toolConfig: this.params?.toolConfig,\n systemInstruction: this.params?.systemInstruction,\n contents: [...this._history, newContent]\n };\n const streamPromise = generateContentStream(\n this._apiSettings,\n this.model,\n generateContentRequest,\n this.chromeAdapter,\n this.requestOptions\n );\n\n // Add onto the chain.\n this._sendPromise = this._sendPromise\n .then(() => streamPromise)\n // This must be handled to avoid unhandled rejection, but jump\n // to the final catch block with a label to not log this error.\n .catch(_ignored => {\n throw new Error(SILENT_ERROR);\n })\n .then(streamResult => streamResult.response)\n .then(response => {\n if (response.candidates && response.candidates.length > 0) {\n this._history.push(newContent);\n const responseContent = { ...response.candidates[0].content };\n // Response seems to come back without a role set.\n if (!responseContent.role) {\n responseContent.role = 'model';\n }\n this._history.push(responseContent);\n } else {\n const blockErrorMessage = formatBlockErrorMessage(response);\n if (blockErrorMessage) {\n logger.warn(\n `sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`\n );\n }\n }\n })\n .catch(e => {\n // Errors in streamPromise are already catchable by the user as\n // streamPromise is returned.\n // Avoid duplicating the error message in logs.\n if (e.message !== SILENT_ERROR) {\n // Users do not have access to _sendPromise to catch errors\n // downstream from streamPromise, so they should not throw.\n logger.error(e);\n }\n });\n return streamPromise;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport {\n CountTokensRequest,\n CountTokensResponse,\n InferenceMode,\n RequestOptions,\n AIErrorCode\n} from '../types';\nimport { Task, makeRequest } from '../requests/request';\nimport { ApiSettings } from '../types/internal';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { BackendType } from '../public-types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport { ChromeAdapterImpl } from './chrome-adapter';\n\nexport async function countTokensOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: CountTokensRequest,\n requestOptions?: RequestOptions\n): Promise<CountTokensResponse> {\n let body: string = '';\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n const mappedParams = GoogleAIMapper.mapCountTokensRequest(params, model);\n body = JSON.stringify(mappedParams);\n } else {\n body = JSON.stringify(params);\n }\n const response = await makeRequest(\n model,\n Task.COUNT_TOKENS,\n apiSettings,\n false,\n body,\n requestOptions\n );\n return response.json();\n}\n\nexport async function countTokens(\n apiSettings: ApiSettings,\n model: string,\n params: CountTokensRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<CountTokensResponse> {\n if (\n (chromeAdapter as ChromeAdapterImpl)?.mode === InferenceMode.ONLY_ON_DEVICE\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'countTokens() is not supported for on-device models.'\n );\n }\n return countTokensOnCloud(apiSettings, model, params, requestOptions);\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n generateContent,\n generateContentStream\n} from '../methods/generate-content';\nimport {\n Content,\n CountTokensRequest,\n CountTokensResponse,\n GenerateContentRequest,\n GenerateContentResult,\n GenerateContentStreamResult,\n GenerationConfig,\n ModelParams,\n Part,\n RequestOptions,\n SafetySetting,\n StartChatParams,\n Tool,\n ToolConfig\n} from '../types';\nimport { ChatSession } from '../methods/chat-session';\nimport { countTokens } from '../methods/count-tokens';\nimport {\n formatGenerateContentInput,\n formatSystemInstruction\n} from '../requests/request-helpers';\nimport { AI } from '../public-types';\nimport { AIModel } from './ai-model';\nimport { ChromeAdapter } from '../types/chrome-adapter';\n\n/**\n * Class for generative model APIs.\n * @public\n */\nexport class GenerativeModel extends AIModel {\n generationConfig: GenerationConfig;\n safetySettings: SafetySetting[];\n requestOptions?: RequestOptions;\n tools?: Tool[];\n toolConfig?: ToolConfig;\n systemInstruction?: Content;\n\n constructor(\n ai: AI,\n modelParams: ModelParams,\n requestOptions?: RequestOptions,\n private chromeAdapter?: ChromeAdapter\n ) {\n super(ai, modelParams.model);\n this.generationConfig = modelParams.generationConfig || {};\n this.safetySettings = modelParams.safetySettings || [];\n this.tools = modelParams.tools;\n this.toolConfig = modelParams.toolConfig;\n this.systemInstruction = formatSystemInstruction(\n modelParams.systemInstruction\n );\n this.requestOptions = requestOptions || {};\n }\n\n /**\n * Makes a single non-streaming call to the model\n * and returns an object containing a single {@link GenerateContentResponse}.\n */\n async generateContent(\n request: GenerateContentRequest | string | Array<string | Part>\n ): Promise<GenerateContentResult> {\n const formattedParams = formatGenerateContentInput(request);\n return generateContent(\n this._apiSettings,\n this.model,\n {\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n ...formattedParams\n },\n this.chromeAdapter,\n this.requestOptions\n );\n }\n\n /**\n * Makes a single streaming call to the model\n * and returns an object containing an iterable stream that iterates\n * over all chunks in the streaming response as well as\n * a promise that returns the final aggregated response.\n */\n async generateContentStream(\n request: GenerateContentRequest | string | Array<string | Part>\n ): Promise<GenerateContentStreamResult> {\n const formattedParams = formatGenerateContentInput(request);\n return generateContentStream(\n this._apiSettings,\n this.model,\n {\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n ...formattedParams\n },\n this.chromeAdapter,\n this.requestOptions\n );\n }\n\n /**\n * Gets a new {@link ChatSession} instance which can be used for\n * multi-turn chats.\n */\n startChat(startChatParams?: StartChatParams): ChatSession {\n return new ChatSession(\n this._apiSettings,\n this.model,\n this.chromeAdapter,\n {\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n /**\n * Overrides params inherited from GenerativeModel with those explicitly set in the\n * StartChatParams. For example, if startChatParams.generationConfig is set, it'll override\n * this.generationConfig.\n */\n ...startChatParams\n },\n this.requestOptions\n );\n }\n\n /**\n * Counts the tokens in the provided request.\n */\n async countTokens(\n request: CountTokensRequest | string | Array<string | Part>\n ): Promise<CountTokensResponse> {\n const formattedParams = formatGenerateContentInput(request);\n return countTokens(\n this._apiSettings,\n this.model,\n formattedParams,\n this.chromeAdapter\n );\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n AIErrorCode,\n FunctionResponse,\n GenerativeContentBlob,\n LiveResponseType,\n LiveServerContent,\n LiveServerToolCall,\n LiveServerToolCallCancellation,\n Part\n} from '../public-types';\nimport { formatNewContent } from '../requests/request-helpers';\nimport { AIError } from '../errors';\nimport { WebSocketHandler } from '../websocket';\nimport { logger } from '../logger';\nimport {\n _LiveClientContent,\n _LiveClientRealtimeInput,\n _LiveClientToolResponse\n} from '../types/live-responses';\n\n/**\n * Represents an active, real-time, bidirectional conversation with the model.\n *\n * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}.\n *\n * @beta\n */\nexport class LiveSession {\n /**\n * Indicates whether this Live session is closed.\n *\n * @beta\n */\n isClosed = false;\n /**\n * Indicates whether this Live session is being controlled by an `AudioConversationController`.\n *\n * @beta\n */\n inConversation = false;\n\n /**\n * @internal\n */\n constructor(\n private webSocketHandler: WebSocketHandler,\n private serverMessages: AsyncGenerator<unknown>\n ) {}\n\n /**\n * Sends content to the server.\n *\n * @param request - The message to send to the model.\n * @param turnComplete - Indicates if the turn is complete. Defaults to false.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async send(\n request: string | Array<string | Part>,\n turnComplete = true\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const newContent = formatNewContent(request);\n\n const message: _LiveClientContent = {\n clientContent: {\n turns: [newContent],\n turnComplete\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends text to the server in realtime.\n *\n * @example\n * ```javascript\n * liveSession.sendTextRealtime(\"Hello, how are you?\");\n * ```\n *\n * @param text - The text data to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendTextRealtime(text: string): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n text\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends audio data to the server in realtime.\n *\n * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz\n * little-endian.\n *\n * @example\n * ```javascript\n * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian.\n * const blob = { mimeType: \"audio/pcm\", data: pcmData };\n * liveSession.sendAudioRealtime(blob);\n * ```\n *\n * @param blob - The base64-encoded PCM data to send to the server in realtime.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendAudioRealtime(blob: GenerativeContentBlob): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n audio: blob\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends video data to the server in realtime.\n *\n * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It\n * is recommended to set `mimeType` to `image/jpeg`.\n *\n * @example\n * ```javascript\n * // const videoFrame = ... base64-encoded JPEG data\n * const blob = { mimeType: \"image/jpeg\", data: videoFrame };\n * liveSession.sendVideoRealtime(blob);\n * ```\n * @param blob - The base64-encoded video data to send to the server in realtime.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendVideoRealtime(blob: GenerativeContentBlob): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n video: blob\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends function responses to the server.\n *\n * @param functionResponses - The function responses to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendFunctionResponses(\n functionResponses: FunctionResponse[]\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientToolResponse = {\n toolResponse: {\n functionResponses\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Yields messages received from the server.\n * This can only be used by one consumer at a time.\n *\n * @returns An `AsyncGenerator` that yields server messages as they arrive.\n * @throws If the session is already closed, or if we receive a response that we don't support.\n *\n * @beta\n */\n async *receive(): AsyncGenerator<\n LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation\n > {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.SESSION_CLOSED,\n 'Cannot read from a Live session that is closed. Try starting a new Live session.'\n );\n }\n for await (const message of this.serverMessages) {\n if (message && typeof message === 'object') {\n if (LiveResponseType.SERVER_CONTENT in message) {\n yield {\n type: 'serverContent',\n ...(message as { serverContent: Omit<LiveServerContent, 'type'> })\n .serverContent\n } as LiveServerContent;\n } else if (LiveResponseType.TOOL_CALL in message) {\n yield {\n type: 'toolCall',\n ...(message as { toolCall: Omit<LiveServerToolCall, 'type'> })\n .toolCall\n } as LiveServerToolCall;\n } else if (LiveResponseType.TOOL_CALL_CANCELLATION in message) {\n yield {\n type: 'toolCallCancellation',\n ...(\n message as {\n toolCallCancellation: Omit<\n LiveServerToolCallCancellation,\n 'type'\n >;\n }\n ).toolCallCancellation\n } as LiveServerToolCallCancellation;\n } else {\n logger.warn(\n `Received an unknown message type from the server: ${JSON.stringify(\n message\n )}`\n );\n }\n } else {\n logger.warn(\n `Received an invalid message from the server: ${JSON.stringify(\n message\n )}`\n );\n }\n }\n }\n\n /**\n * Closes this session.\n * All methods on this session will throw an error once this resolves.\n *\n * @beta\n */\n async close(): Promise<void> {\n if (!this.isClosed) {\n this.isClosed = true;\n await this.webSocketHandler.close(1000, 'Client closed session.');\n }\n }\n\n /**\n * Sends realtime input to the server.\n *\n * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.\n *\n * @param mediaChunks - The media chunks to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n // The backend does not support sending more than one mediaChunk in one message.\n // Work around this limitation by sending mediaChunks in separate messages.\n mediaChunks.forEach(mediaChunk => {\n const message: _LiveClientRealtimeInput = {\n realtimeInput: { mediaChunks: [mediaChunk] }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n });\n }\n\n /**\n * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.\n *\n * Sends a stream of {@link GenerativeContentBlob}.\n *\n * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendMediaStream(\n mediaChunkStream: ReadableStream<GenerativeContentBlob>\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const reader = mediaChunkStream.getReader();\n while (true) {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n break;\n } else if (!value) {\n throw new Error('Missing chunk in reader, but reader is not done.');\n }\n\n await this.sendMediaChunks([value]);\n } catch (e) {\n // Re-throw any errors that occur during stream consumption or sending.\n const message =\n e instanceof Error ? e.message : 'Error processing media stream.';\n throw new AIError(AIErrorCode.REQUEST_ERROR, message);\n }\n }\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIModel } from './ai-model';\nimport { LiveSession } from '../methods/live-session';\nimport { AIError } from '../errors';\nimport {\n AI,\n AIErrorCode,\n BackendType,\n Content,\n LiveGenerationConfig,\n LiveModelParams,\n Tool,\n ToolConfig\n} from '../public-types';\nimport { WebSocketHandler } from '../websocket';\nimport { WebSocketUrl } from '../requests/request';\nimport { formatSystemInstruction } from '../requests/request-helpers';\nimport { _LiveClientSetup } from '../types/live-responses';\n\n/**\n * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal\n * interactions with Gemini.\n *\n * This class should only be instantiated with {@link getLiveGenerativeModel}.\n *\n * @beta\n */\nexport class LiveGenerativeModel extends AIModel {\n generationConfig: LiveGenerationConfig;\n tools?: Tool[];\n toolConfig?: ToolConfig;\n systemInstruction?: Content;\n\n /**\n * @internal\n */\n constructor(\n ai: AI,\n modelParams: LiveModelParams,\n /**\n * @internal\n */\n private _webSocketHandler: WebSocketHandler\n ) {\n super(ai, modelParams.model);\n this.generationConfig = modelParams.generationConfig || {};\n this.tools = modelParams.tools;\n this.toolConfig = modelParams.toolConfig;\n this.systemInstruction = formatSystemInstruction(\n modelParams.systemInstruction\n );\n }\n\n /**\n * Starts a {@link LiveSession}.\n *\n * @returns A {@link LiveSession}.\n * @throws If the connection failed to be established with the server.\n *\n * @beta\n */\n async connect(): Promise<LiveSession> {\n const url = new WebSocketUrl(this._apiSettings);\n await this._webSocketHandler.connect(url.toString());\n\n let fullModelPath: string;\n if (this._apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n fullModelPath = `projects/${this._apiSettings.project}/${this.model}`;\n } else {\n fullModelPath = `projects/${this._apiSettings.project}/locations/${this._apiSettings.location}/${this.model}`;\n }\n\n // inputAudioTranscription and outputAudioTranscription are on the generation config in the public API,\n // but the backend expects them to be in the `setup` message.\n const {\n inputAudioTranscription,\n outputAudioTranscription,\n ...generationConfig\n } = this.generationConfig;\n\n const setupMessage: _LiveClientSetup = {\n setup: {\n model: fullModelPath,\n generationConfig,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n inputAudioTranscription,\n outputAudioTranscription\n }\n };\n\n try {\n // Begin listening for server messages, and begin the handshake by sending the 'setupMessage'\n const serverMessages = this._webSocketHandler.listen();\n this._webSocketHandler.send(JSON.stringify(setupMessage));\n\n // Verify we received the handshake response 'setupComplete'\n const firstMessage = (await serverMessages.next()).value;\n if (\n !firstMessage ||\n !(typeof firstMessage === 'object') ||\n !('setupComplete' in firstMessage)\n ) {\n await this._webSocketHandler.close(1011, 'Handshake failure');\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n 'Server connection handshake failed. The server did not respond with a setupComplete message.'\n );\n }\n\n return new LiveSession(this._webSocketHandler, serverMessages);\n } catch (e) {\n // Ensure connection is closed on any setup error\n await this._webSocketHandler.close();\n throw e;\n }\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AI } from '../public-types';\nimport { Task, makeRequest } from '../requests/request';\nimport { createPredictRequestBody } from '../requests/request-helpers';\nimport { handlePredictResponse } from '../requests/response-helpers';\nimport {\n ImagenGCSImage,\n ImagenGenerationConfig,\n ImagenInlineImage,\n RequestOptions,\n ImagenModelParams,\n ImagenGenerationResponse,\n ImagenSafetySettings\n} from '../types';\nimport { AIModel } from './ai-model';\n\n/**\n * Class for Imagen model APIs.\n *\n * This class provides methods for generating images using the Imagen model.\n *\n * @example\n * ```javascript\n * const imagen = new ImagenModel(\n * ai,\n * {\n * model: 'imagen-3.0-generate-002'\n * }\n * );\n *\n * const response = await imagen.generateImages('A photo of a cat');\n * if (response.images.length > 0) {\n * console.log(response.images[0].bytesBase64Encoded);\n * }\n * ```\n *\n * @public\n */\nexport class ImagenModel extends AIModel {\n /**\n * The Imagen generation configuration.\n */\n generationConfig?: ImagenGenerationConfig;\n /**\n * Safety settings for filtering inappropriate content.\n */\n safetySettings?: ImagenSafetySettings;\n\n /**\n * Constructs a new instance of the {@link ImagenModel} class.\n *\n * @param ai - an {@link AI} instance.\n * @param modelParams - Parameters to use when making requests to Imagen.\n * @param requestOptions - Additional options to use when making requests.\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n */\n constructor(\n ai: AI,\n modelParams: ImagenModelParams,\n public requestOptions?: RequestOptions\n ) {\n const { model, generationConfig, safetySettings } = modelParams;\n super(ai, model);\n this.generationConfig = generationConfig;\n this.safetySettings = safetySettings;\n }\n\n /**\n * Generates images using the Imagen model and returns them as\n * base64-encoded strings.\n *\n * @param prompt - A text prompt describing the image(s) to generate.\n * @returns A promise that resolves to an {@link ImagenGenerationResponse}\n * object containing the generated images.\n *\n * @throws If the request to generate images fails. This happens if the\n * prompt is blocked.\n *\n * @remarks\n * If the prompt was not blocked, but one or more of the generated images were filtered, the\n * returned object will have a `filteredReason` property.\n * If all images are filtered, the `images` array will be empty.\n *\n * @public\n */\n async generateImages(\n prompt: string\n ): Promise<ImagenGenerationResponse<ImagenInlineImage>> {\n const body = createPredictRequestBody(prompt, {\n ...this.generationConfig,\n ...this.safetySettings\n });\n const response = await makeRequest(\n this.model,\n Task.PREDICT,\n this._apiSettings,\n /* stream */ false,\n JSON.stringify(body),\n this.requestOptions\n );\n return handlePredictResponse<ImagenInlineImage>(response);\n }\n\n /**\n * Generates images to Cloud Storage for Firebase using the Imagen model.\n *\n * @internal This method is temporarily internal.\n *\n * @param prompt - A text prompt describing the image(s) to generate.\n * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket.\n * This should be a directory. For example, `gs://my-bucket/my-directory/`.\n * @returns A promise that resolves to an {@link ImagenGenerationResponse}\n * object containing the URLs of the generated images.\n *\n * @throws If the request fails to generate images fails. This happens if\n * the prompt is blocked.\n *\n * @remarks\n * If the prompt was not blocked, but one or more of the generated images were filtered, the\n * returned object will have a `filteredReason` property.\n * If all images are filtered, the `images` array will be empty.\n */\n async generateImagesGCS(\n prompt: string,\n gcsURI: string\n ): Promise<ImagenGenerationResponse<ImagenGCSImage>> {\n const body = createPredictRequestBody(prompt, {\n gcsURI,\n ...this.generationConfig,\n ...this.safetySettings\n });\n const response = await makeRequest(\n this.model,\n Task.PREDICT,\n this._apiSettings,\n /* stream */ false,\n JSON.stringify(body),\n this.requestOptions\n );\n return handlePredictResponse<ImagenGCSImage>(response);\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from './errors';\nimport { logger } from './logger';\nimport { AIErrorCode } from './types';\n\n/**\n * A standardized interface for interacting with a WebSocket connection.\n * This abstraction allows the SDK to use the appropriate WebSocket implementation\n * for the current JS environment (Browser vs. Node) without\n * changing the core logic of the `LiveSession`.\n * @internal\n */\n\nexport interface WebSocketHandler {\n /**\n * Establishes a connection to the given URL.\n *\n * @param url The WebSocket URL (e.g., wss://...).\n * @returns A promise that resolves on successful connection or rejects on failure.\n */\n connect(url: string): Promise<void>;\n\n /**\n * Sends data over the WebSocket.\n *\n * @param data The string or binary data to send.\n */\n send(data: string | ArrayBuffer): void;\n\n /**\n * Returns an async generator that yields parsed JSON objects from the server.\n * The yielded type is `unknown` because the handler cannot guarantee the shape of the data.\n * The consumer is responsible for type validation.\n * The generator terminates when the connection is closed.\n *\n * @returns A generator that allows consumers to pull messages using a `for await...of` loop.\n */\n listen(): AsyncGenerator<unknown>;\n\n /**\n * Closes the WebSocket connection.\n *\n * @param code - A numeric status code explaining why the connection is closing.\n * @param reason - A human-readable string explaining why the connection is closing.\n */\n close(code?: number, reason?: string): Promise<void>;\n}\n\n/**\n * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22.\n *\n * @internal\n */\nexport class WebSocketHandlerImpl implements WebSocketHandler {\n private ws?: WebSocket;\n\n constructor() {\n if (typeof WebSocket === 'undefined') {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'The WebSocket API is not available in this environment. ' +\n 'The \"Live\" feature is not supported here. It is supported in ' +\n 'modern browser windows, Web Workers with WebSocket support, and Node >= 22.'\n );\n }\n }\n\n connect(url: string): Promise<void> {\n return new Promise((resolve, reject) => {\n this.ws = new WebSocket(url);\n this.ws.binaryType = 'blob'; // Only important to set in Node\n this.ws.addEventListener('open', () => resolve(), { once: true });\n this.ws.addEventListener(\n 'error',\n () =>\n reject(\n new AIError(\n AIErrorCode.FETCH_ERROR,\n `Error event raised on WebSocket`\n )\n ),\n { once: true }\n );\n this.ws!.addEventListener('close', (closeEvent: CloseEvent) => {\n if (closeEvent.reason) {\n logger.warn(\n `WebSocket connection closed by server. Reason: '${closeEvent.reason}'`\n );\n }\n });\n });\n }\n\n send(data: string | ArrayBuffer): void {\n if (!this.ws || this.ws.readyState !== WebSocket.OPEN) {\n throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not open.');\n }\n this.ws.send(data);\n }\n\n async *listen(): AsyncGenerator<unknown> {\n if (!this.ws) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'WebSocket is not connected.'\n );\n }\n\n const messageQueue: unknown[] = [];\n const errorQueue: Error[] = [];\n let resolvePromise: (() => void) | null = null;\n let isClosed = false;\n\n const messageListener = async (event: MessageEvent): Promise<void> => {\n let data: string;\n if (event.data instanceof Blob) {\n data = await event.data.text();\n } else if (typeof event.data === 'string') {\n data = event.data;\n } else {\n errorQueue.push(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Failed to parse WebSocket response. Expected data to be a Blob or string, but was ${typeof event.data}.`\n )\n );\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n return;\n }\n\n try {\n const obj = JSON.parse(data) as unknown;\n messageQueue.push(obj);\n } catch (e) {\n const err = e as Error;\n errorQueue.push(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Error parsing WebSocket message to JSON: ${err.message}`\n )\n );\n }\n\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n };\n\n const errorListener = (): void => {\n errorQueue.push(\n new AIError(AIErrorCode.FETCH_ERROR, 'WebSocket connection error.')\n );\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n };\n\n const closeListener = (event: CloseEvent): void => {\n if (event.reason) {\n logger.warn(\n `WebSocket connection closed by the server with reason: ${event.reason}`\n );\n }\n isClosed = true;\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n // Clean up listeners to prevent memory leaks\n this.ws?.removeEventListener('message', messageListener);\n this.ws?.removeEventListener('close', closeListener);\n this.ws?.removeEventListener('error', errorListener);\n };\n\n this.ws.addEventListener('message', messageListener);\n this.ws.addEventListener('close', closeListener);\n this.ws.addEventListener('error', errorListener);\n\n while (!isClosed) {\n if (errorQueue.length > 0) {\n const error = errorQueue.shift()!;\n throw error;\n }\n if (messageQueue.length > 0) {\n yield messageQueue.shift()!;\n } else {\n await new Promise<void>(resolve => {\n resolvePromise = resolve;\n });\n }\n }\n\n // If the loop terminated because isClosed is true, check for any final errors\n if (errorQueue.length > 0) {\n const error = errorQueue.shift()!;\n throw error;\n }\n }\n\n close(code?: number, reason?: string): Promise<void> {\n return new Promise(resolve => {\n if (!this.ws) {\n return resolve();\n }\n\n this.ws.addEventListener('close', () => resolve(), { once: true });\n // Calling 'close' during these states results in an error.\n if (\n this.ws.readyState === WebSocket.CLOSED ||\n this.ws.readyState === WebSocket.CONNECTING\n ) {\n return resolve();\n }\n\n if (this.ws.readyState !== WebSocket.CLOSING) {\n this.ws.close(code, reason);\n }\n });\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { AIErrorCode } from '../types';\nimport {\n SchemaInterface,\n SchemaType,\n SchemaParams,\n SchemaRequest\n} from '../types/schema';\n\n/**\n * Parent class encompassing all Schema types, with static methods that\n * allow building specific Schema types. This class can be converted with\n * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints.\n * (This string conversion is automatically done when calling SDK methods.)\n * @public\n */\nexport abstract class Schema implements SchemaInterface {\n /**\n * Optional. The type of the property.\n * This can only be undefined when using `anyOf` schemas, which do not have an\n * explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification}.\n */\n type?: SchemaType;\n /** Optional. The format of the property.\n * Supported formats:<br/>\n * <ul>\n * <li>for NUMBER type: \"float\", \"double\"</li>\n * <li>for INTEGER type: \"int32\", \"int64\"</li>\n * <li>for STRING type: \"email\", \"byte\", etc</li>\n * </ul>\n */\n format?: string;\n /** Optional. The description of the property. */\n description?: string;\n /** Optional. The items of the property. */\n items?: SchemaInterface;\n /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n minItems?: number;\n /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n maxItems?: number;\n /** Optional. Whether the property is nullable. Defaults to false. */\n nullable: boolean;\n /** Optional. The example of the property. */\n example?: unknown;\n /**\n * Allows user to add other schema properties that have not yet\n * been officially added to the SDK.\n */\n [key: string]: unknown;\n\n constructor(schemaParams: SchemaInterface) {\n // TODO(dlarocque): Enforce this with union types\n if (!schemaParams.type && !schemaParams.anyOf) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n \"A schema must have either a 'type' or an 'anyOf' array of sub-schemas.\"\n );\n }\n // eslint-disable-next-line guard-for-in\n for (const paramKey in schemaParams) {\n this[paramKey] = schemaParams[paramKey];\n }\n // Ensure these are explicitly set to avoid TS errors.\n this.type = schemaParams.type;\n this.format = schemaParams.hasOwnProperty('format')\n ? schemaParams.format\n : undefined;\n this.nullable = schemaParams.hasOwnProperty('nullable')\n ? !!schemaParams.nullable\n : false;\n }\n\n /**\n * Defines how this Schema should be serialized as JSON.\n * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj: { type?: SchemaType; [key: string]: unknown } = {\n type: this.type\n };\n for (const prop in this) {\n if (this.hasOwnProperty(prop) && this[prop] !== undefined) {\n if (prop !== 'required' || this.type === SchemaType.OBJECT) {\n obj[prop] = this[prop];\n }\n }\n }\n return obj as SchemaRequest;\n }\n\n static array(arrayParams: SchemaParams & { items: Schema }): ArraySchema {\n return new ArraySchema(arrayParams, arrayParams.items);\n }\n\n static object(\n objectParams: SchemaParams & {\n properties: {\n [k: string]: Schema;\n };\n optionalProperties?: string[];\n }\n ): ObjectSchema {\n return new ObjectSchema(\n objectParams,\n objectParams.properties,\n objectParams.optionalProperties\n );\n }\n\n // eslint-disable-next-line id-blacklist\n static string(stringParams?: SchemaParams): StringSchema {\n return new StringSchema(stringParams);\n }\n\n static enumString(\n stringParams: SchemaParams & { enum: string[] }\n ): StringSchema {\n return new StringSchema(stringParams, stringParams.enum);\n }\n\n static integer(integerParams?: SchemaParams): IntegerSchema {\n return new IntegerSchema(integerParams);\n }\n\n // eslint-disable-next-line id-blacklist\n static number(numberParams?: SchemaParams): NumberSchema {\n return new NumberSchema(numberParams);\n }\n\n // eslint-disable-next-line id-blacklist\n static boolean(booleanParams?: SchemaParams): BooleanSchema {\n return new BooleanSchema(booleanParams);\n }\n\n static anyOf(\n anyOfParams: SchemaParams & { anyOf: TypedSchema[] }\n ): AnyOfSchema {\n return new AnyOfSchema(anyOfParams);\n }\n}\n\n/**\n * A type that includes all specific Schema types.\n * @public\n */\nexport type TypedSchema =\n | IntegerSchema\n | NumberSchema\n | StringSchema\n | BooleanSchema\n | ObjectSchema\n | ArraySchema\n | AnyOfSchema;\n\n/**\n * Schema class for \"integer\" types.\n * @public\n */\nexport class IntegerSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.INTEGER,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"number\" types.\n * @public\n */\nexport class NumberSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.NUMBER,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"boolean\" types.\n * @public\n */\nexport class BooleanSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.BOOLEAN,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"string\" types. Can be used with or without\n * enum values.\n * @public\n */\nexport class StringSchema extends Schema {\n enum?: string[];\n constructor(schemaParams?: SchemaParams, enumValues?: string[]) {\n super({\n type: SchemaType.STRING,\n ...schemaParams\n });\n this.enum = enumValues;\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n if (this.enum) {\n obj['enum'] = this.enum;\n }\n return obj as SchemaRequest;\n }\n}\n\n/**\n * Schema class for \"array\" types.\n * The `items` param should refer to the type of item that can be a member\n * of the array.\n * @public\n */\nexport class ArraySchema extends Schema {\n constructor(schemaParams: SchemaParams, public items: TypedSchema) {\n super({\n type: SchemaType.ARRAY,\n ...schemaParams\n });\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n obj.items = this.items.toJSON();\n return obj;\n }\n}\n\n/**\n * Schema class for \"object\" types.\n * The `properties` param must be a map of `Schema` objects.\n * @public\n */\nexport class ObjectSchema extends Schema {\n constructor(\n schemaParams: SchemaParams,\n public properties: {\n [k: string]: TypedSchema;\n },\n public optionalProperties: string[] = []\n ) {\n super({\n type: SchemaType.OBJECT,\n ...schemaParams\n });\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n obj.properties = { ...this.properties };\n const required = [];\n if (this.optionalProperties) {\n for (const propertyKey of this.optionalProperties) {\n if (!this.properties.hasOwnProperty(propertyKey)) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n `Property \"${propertyKey}\" specified in \"optionalProperties\" does not exist.`\n );\n }\n }\n }\n for (const propertyKey in this.properties) {\n if (this.properties.hasOwnProperty(propertyKey)) {\n obj.properties[propertyKey] = this.properties[\n propertyKey\n ].toJSON() as SchemaRequest;\n if (!this.optionalProperties.includes(propertyKey)) {\n required.push(propertyKey);\n }\n }\n }\n if (required.length > 0) {\n obj.required = required;\n }\n delete obj.optionalProperties;\n return obj as SchemaRequest;\n }\n}\n\n/**\n * Schema class representing a value that can conform to any of the provided sub-schemas. This is\n * useful when a field can accept multiple distinct types or structures.\n * @public\n */\nexport class AnyOfSchema extends Schema {\n anyOf: TypedSchema[]; // Re-define field to narrow to required type\n constructor(schemaParams: SchemaParams & { anyOf: TypedSchema[] }) {\n if (schemaParams.anyOf.length === 0) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n \"The 'anyOf' array must not be empty.\"\n );\n }\n super({\n ...schemaParams,\n type: undefined // anyOf schemas do not have an explicit type\n });\n this.anyOf = schemaParams.anyOf;\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n // Ensure the 'anyOf' property contains serialized SchemaRequest objects.\n if (this.anyOf && Array.isArray(this.anyOf)) {\n obj.anyOf = (this.anyOf as TypedSchema[]).map(s => s.toJSON());\n }\n return obj;\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { logger } from '../logger';\n\n/**\n * Defines the image format for images generated by Imagen.\n *\n * Use this class to specify the desired format (JPEG or PNG) and compression quality\n * for images generated by Imagen. This is typically included as part of\n * {@link ImagenModelParams}.\n *\n * @example\n * ```javascript\n * const imagenModelParams = {\n * // ... other ImagenModelParams\n * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75.\n * }\n * ```\n *\n * @public\n */\nexport class ImagenImageFormat {\n /**\n * The MIME type.\n */\n mimeType: string;\n /**\n * The level of compression (a number between 0 and 100).\n */\n compressionQuality?: number;\n\n private constructor() {\n this.mimeType = 'image/png';\n }\n\n /**\n * Creates an {@link ImagenImageFormat} for a JPEG image.\n *\n * @param compressionQuality - The level of compression (a number between 0 and 100).\n * @returns An {@link ImagenImageFormat} object for a JPEG image.\n *\n * @public\n */\n static jpeg(compressionQuality?: number): ImagenImageFormat {\n if (\n compressionQuality &&\n (compressionQuality < 0 || compressionQuality > 100)\n ) {\n logger.warn(\n `Invalid JPEG compression quality of ${compressionQuality} specified; the supported range is [0, 100].`\n );\n }\n return { mimeType: 'image/jpeg', compressionQuality };\n }\n\n /**\n * Creates an {@link ImagenImageFormat} for a PNG image.\n *\n * @returns An {@link ImagenImageFormat} object for a PNG image.\n *\n * @public\n */\n static png(): ImagenImageFormat {\n return { mimeType: 'image/png' };\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport {\n AIErrorCode,\n FunctionCall,\n FunctionResponse,\n GenerativeContentBlob,\n LiveServerContent\n} from '../types';\nimport { LiveSession } from './live-session';\nimport { Deferred } from '@firebase/util';\n\nconst SERVER_INPUT_SAMPLE_RATE = 16_000;\nconst SERVER_OUTPUT_SAMPLE_RATE = 24_000;\n\nconst AUDIO_PROCESSOR_NAME = 'audio-processor';\n\n/**\n * The JS for an `AudioWorkletProcessor`.\n * This processor is responsible for taking raw audio from the microphone,\n * converting it to the required 16-bit 16kHz PCM, and posting it back to the main thread.\n *\n * See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor\n *\n * It is defined as a string here so that it can be converted into a `Blob`\n * and loaded at runtime.\n */\nconst audioProcessorWorkletString = `\n class AudioProcessor extends AudioWorkletProcessor {\n constructor(options) {\n super();\n this.targetSampleRate = options.processorOptions.targetSampleRate;\n // 'sampleRate' is a global variable available inside the AudioWorkletGlobalScope,\n // representing the native sample rate of the AudioContext.\n this.inputSampleRate = sampleRate;\n }\n\n /**\n * This method is called by the browser's audio engine for each block of audio data.\n * Input is a single input, with a single channel (input[0][0]).\n */\n process(inputs) {\n const input = inputs[0];\n if (input && input.length > 0 && input[0].length > 0) {\n const pcmData = input[0]; // Float32Array of raw audio samples.\n \n // Simple linear interpolation for resampling.\n const resampled = new Float32Array(Math.round(pcmData.length * this.targetSampleRate / this.inputSampleRate));\n const ratio = pcmData.length / resampled.length;\n for (let i = 0; i < resampled.length; i++) {\n resampled[i] = pcmData[Math.floor(i * ratio)];\n }\n\n // Convert Float32 (-1, 1) samples to Int16 (-32768, 32767)\n const resampledInt16 = new Int16Array(resampled.length);\n for (let i = 0; i < resampled.length; i++) {\n const sample = Math.max(-1, Math.min(1, resampled[i]));\n if (sample < 0) {\n resampledInt16[i] = sample * 32768;\n } else {\n resampledInt16[i] = sample * 32767;\n }\n }\n \n this.port.postMessage(resampledInt16);\n }\n // Return true to keep the processor alive and processing the next audio block.\n return true;\n }\n }\n\n // Register the processor with a name that can be used to instantiate it from the main thread.\n registerProcessor('${AUDIO_PROCESSOR_NAME}', AudioProcessor);\n`;\n\n/**\n * A controller for managing an active audio conversation.\n *\n * @beta\n */\nexport interface AudioConversationController {\n /**\n * Stops the audio conversation, closes the microphone connection, and\n * cleans up resources. Returns a promise that resolves when cleanup is complete.\n */\n stop: () => Promise<void>;\n}\n\n/**\n * Options for {@link startAudioConversation}.\n *\n * @beta\n */\nexport interface StartAudioConversationOptions {\n /**\n * An async handler that is called when the model requests a function to be executed.\n * The handler should perform the function call and return the result as a `Part`,\n * which will then be sent back to the model.\n */\n functionCallingHandler?: (\n functionCalls: FunctionCall[]\n ) => Promise<FunctionResponse>;\n}\n\n/**\n * Dependencies needed by the {@link AudioConversationRunner}.\n *\n * @internal\n */\ninterface RunnerDependencies {\n audioContext: AudioContext;\n mediaStream: MediaStream;\n sourceNode: MediaStreamAudioSourceNode;\n workletNode: AudioWorkletNode;\n}\n\n/**\n * Encapsulates the core logic of an audio conversation.\n *\n * @internal\n */\nexport class AudioConversationRunner {\n /** A flag to indicate if the conversation has been stopped. */\n private isStopped = false;\n /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */\n private readonly stopDeferred = new Deferred<void>();\n /** A promise that tracks the lifecycle of the main `runReceiveLoop`. */\n private readonly receiveLoopPromise: Promise<void>;\n\n /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */\n private readonly playbackQueue: ArrayBuffer[] = [];\n /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */\n private scheduledSources: AudioBufferSourceNode[] = [];\n /** A high-precision timeline pointer for scheduling gapless audio playback. */\n private nextStartTime = 0;\n /** A mutex to prevent the playback processing loop from running multiple times concurrently. */\n private isPlaybackLoopRunning = false;\n\n constructor(\n private readonly liveSession: LiveSession,\n private readonly options: StartAudioConversationOptions,\n private readonly deps: RunnerDependencies\n ) {\n this.liveSession.inConversation = true;\n\n // Start listening for messages from the server.\n this.receiveLoopPromise = this.runReceiveLoop().finally(() =>\n this.cleanup()\n );\n\n // Set up the handler for receiving processed audio data from the worklet.\n // Message data has been resampled to 16kHz 16-bit PCM.\n this.deps.workletNode.port.onmessage = event => {\n if (this.isStopped) {\n return;\n }\n\n const pcm16 = event.data as Int16Array;\n const base64 = btoa(\n String.fromCharCode.apply(\n null,\n Array.from(new Uint8Array(pcm16.buffer))\n )\n );\n\n const chunk: GenerativeContentBlob = {\n mimeType: 'audio/pcm',\n data: base64\n };\n void this.liveSession.sendAudioRealtime(chunk);\n };\n }\n\n /**\n * Stops the conversation and unblocks the main receive loop.\n */\n async stop(): Promise<void> {\n if (this.isStopped) {\n return;\n }\n this.isStopped = true;\n this.stopDeferred.resolve(); // Unblock the receive loop\n await this.receiveLoopPromise; // Wait for the loop and cleanup to finish\n }\n\n /**\n * Cleans up all audio resources (nodes, stream tracks, context) and marks the\n * session as no longer in a conversation.\n */\n private cleanup(): void {\n this.interruptPlayback(); // Ensure all audio is stopped on final cleanup.\n this.deps.workletNode.port.onmessage = null;\n this.deps.workletNode.disconnect();\n this.deps.sourceNode.disconnect();\n this.deps.mediaStream.getTracks().forEach(track => track.stop());\n if (this.deps.audioContext.state !== 'closed') {\n void this.deps.audioContext.close();\n }\n this.liveSession.inConversation = false;\n }\n\n /**\n * Adds audio data to the queue and ensures the playback loop is running.\n */\n private enqueueAndPlay(audioData: ArrayBuffer): void {\n this.playbackQueue.push(audioData);\n // Will no-op if it's already running.\n void this.processPlaybackQueue();\n }\n\n /**\n * Stops all current and pending audio playback and clears the queue. This is\n * called when the server indicates the model's speech was interrupted with\n * `LiveServerContent.modelTurn.interrupted`.\n */\n private interruptPlayback(): void {\n // Stop all sources that have been scheduled. The onended event will fire for each,\n // which will clean up the scheduledSources array.\n [...this.scheduledSources].forEach(source => source.stop(0));\n\n // Clear the internal buffer of unprocessed audio chunks.\n this.playbackQueue.length = 0;\n\n // Reset the playback clock to start fresh.\n this.nextStartTime = this.deps.audioContext.currentTime;\n }\n\n /**\n * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence.\n */\n private async processPlaybackQueue(): Promise<void> {\n if (this.isPlaybackLoopRunning) {\n return;\n }\n this.isPlaybackLoopRunning = true;\n\n while (this.playbackQueue.length > 0 && !this.isStopped) {\n const pcmRawBuffer = this.playbackQueue.shift()!;\n try {\n const pcm16 = new Int16Array(pcmRawBuffer);\n const frameCount = pcm16.length;\n\n const audioBuffer = this.deps.audioContext.createBuffer(\n 1,\n frameCount,\n SERVER_OUTPUT_SAMPLE_RATE\n );\n\n // Convert 16-bit PCM to 32-bit PCM, required by the Web Audio API.\n const channelData = audioBuffer.getChannelData(0);\n for (let i = 0; i < frameCount; i++) {\n channelData[i] = pcm16[i] / 32768; // Normalize to Float32 range [-1.0, 1.0]\n }\n\n const source = this.deps.audioContext.createBufferSource();\n source.buffer = audioBuffer;\n source.connect(this.deps.audioContext.destination);\n\n // Track the source and set up a handler to remove it from tracking when it finishes.\n this.scheduledSources.push(source);\n source.onended = () => {\n this.scheduledSources = this.scheduledSources.filter(\n s => s !== source\n );\n };\n\n // To prevent gaps, schedule the next chunk to start either now (if we're catching up)\n // or exactly when the previous chunk is scheduled to end.\n this.nextStartTime = Math.max(\n this.deps.audioContext.currentTime,\n this.nextStartTime\n );\n source.start(this.nextStartTime);\n\n // Update the schedule for the *next* chunk.\n this.nextStartTime += audioBuffer.duration;\n } catch (e) {\n logger.error('Error playing audio:', e);\n }\n }\n\n this.isPlaybackLoopRunning = false;\n }\n\n /**\n * The main loop that listens for and processes messages from the server.\n */\n private async runReceiveLoop(): Promise<void> {\n const messageGenerator = this.liveSession.receive();\n while (!this.isStopped) {\n const result = await Promise.race([\n messageGenerator.next(),\n this.stopDeferred.promise\n ]);\n\n if (this.isStopped || !result || result.done) {\n break;\n }\n\n const message = result.value;\n if (message.type === 'serverContent') {\n const serverContent = message as LiveServerContent;\n if (serverContent.interrupted) {\n this.interruptPlayback();\n }\n\n const audioPart = serverContent.modelTurn?.parts.find(part =>\n part.inlineData?.mimeType.startsWith('audio/')\n );\n if (audioPart?.inlineData) {\n const audioData = Uint8Array.from(\n atob(audioPart.inlineData.data),\n c => c.charCodeAt(0)\n ).buffer;\n this.enqueueAndPlay(audioData);\n }\n } else if (message.type === 'toolCall') {\n if (!this.options.functionCallingHandler) {\n logger.warn(\n 'Received tool call message, but StartAudioConversationOptions.functionCallingHandler is undefined. Ignoring tool call.'\n );\n } else {\n try {\n const functionResponse = await this.options.functionCallingHandler(\n message.functionCalls\n );\n if (!this.isStopped) {\n void this.liveSession.sendFunctionResponses([functionResponse]);\n }\n } catch (e) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Function calling handler failed: ${(e as Error).message}`\n );\n }\n }\n }\n }\n }\n}\n\n/**\n * Starts a real-time, bidirectional audio conversation with the model. This helper function manages\n * the complexities of microphone access, audio recording, playback, and interruptions.\n *\n * @remarks Important: This function must be called in response to a user gesture\n * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}.\n *\n * @example\n * ```javascript\n * const liveSession = await model.connect();\n * let conversationController;\n *\n * // This function must be called from within a click handler.\n * async function startConversation() {\n * try {\n * conversationController = await startAudioConversation(liveSession);\n * } catch (e) {\n * // Handle AI-specific errors\n * if (e instanceof AIError) {\n * console.error(\"AI Error:\", e.message);\n * }\n * // Handle microphone permission and hardware errors\n * else if (e instanceof DOMException) {\n * console.error(\"Microphone Error:\", e.message);\n * }\n * // Handle other unexpected errors\n * else {\n * console.error(\"An unexpected error occurred:\", e);\n * }\n * }\n * }\n *\n * // Later, to stop the conversation:\n * // if (conversationController) {\n * // await conversationController.stop();\n * // }\n * ```\n *\n * @param liveSession - An active {@link LiveSession} instance.\n * @param options - Configuration options for the audio conversation.\n * @returns A `Promise` that resolves with an {@link AudioConversationController}.\n * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`).\n * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions.\n *\n * @beta\n */\nexport async function startAudioConversation(\n liveSession: LiveSession,\n options: StartAudioConversationOptions = {}\n): Promise<AudioConversationController> {\n if (liveSession.isClosed) {\n throw new AIError(\n AIErrorCode.SESSION_CLOSED,\n 'Cannot start audio conversation on a closed LiveSession.'\n );\n }\n\n if (liveSession.inConversation) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'An audio conversation is already in progress for this session.'\n );\n }\n\n // Check for necessary Web API support.\n if (\n typeof AudioWorkletNode === 'undefined' ||\n typeof AudioContext === 'undefined' ||\n typeof navigator === 'undefined' ||\n !navigator.mediaDevices\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Audio conversation is not supported in this environment. It requires the Web Audio API and AudioWorklet support.'\n );\n }\n\n let audioContext: AudioContext | undefined;\n try {\n // 1. Set up the audio context. This must be in response to a user gesture.\n // See: https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy\n audioContext = new AudioContext();\n if (audioContext.state === 'suspended') {\n await audioContext.resume();\n }\n\n // 2. Prompt for microphone access and get the media stream.\n // This can throw a variety of permission or hardware-related errors.\n const mediaStream = await navigator.mediaDevices.getUserMedia({\n audio: true\n });\n\n // 3. Load the AudioWorklet processor.\n // See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet\n const workletBlob = new Blob([audioProcessorWorkletString], {\n type: 'application/javascript'\n });\n const workletURL = URL.createObjectURL(workletBlob);\n await audioContext.audioWorklet.addModule(workletURL);\n\n // 4. Create the audio graph: Microphone -> Source Node -> Worklet Node\n const sourceNode = audioContext.createMediaStreamSource(mediaStream);\n const workletNode = new AudioWorkletNode(\n audioContext,\n AUDIO_PROCESSOR_NAME,\n {\n processorOptions: { targetSampleRate: SERVER_INPUT_SAMPLE_RATE }\n }\n );\n sourceNode.connect(workletNode);\n\n // 5. Instantiate and return the runner which manages the conversation.\n const runner = new AudioConversationRunner(liveSession, options, {\n audioContext,\n mediaStream,\n sourceNode,\n workletNode\n });\n\n return { stop: () => runner.stop() };\n } catch (e) {\n // Ensure the audio context is closed on any setup error.\n if (audioContext && audioContext.state !== 'closed') {\n void audioContext.close();\n }\n\n // Re-throw specific, known error types directly. The user may want to handle `DOMException`\n // errors differently (for example, if permission to access audio device was denied).\n if (e instanceof AIError || e instanceof DOMException) {\n throw e;\n }\n\n // Wrap any other unexpected errors in a standard AIError.\n throw new AIError(\n AIErrorCode.ERROR,\n `Failed to initialize audio recording: ${(e as Error).message}`\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp, getApp, _getProvider } from '@firebase/app';\nimport { Provider } from '@firebase/component';\nimport { getModularInstance } from '@firebase/util';\nimport { AI_TYPE, DEFAULT_HYBRID_IN_CLOUD_MODEL } from './constants';\nimport { AIService } from './service';\nimport { AI, AIOptions } from './public-types';\nimport {\n ImagenModelParams,\n HybridParams,\n ModelParams,\n RequestOptions,\n AIErrorCode,\n LiveModelParams\n} from './types';\nimport { AIError } from './errors';\nimport {\n AIModel,\n GenerativeModel,\n LiveGenerativeModel,\n ImagenModel\n} from './models';\nimport { encodeInstanceIdentifier } from './helpers';\nimport { GoogleAIBackend } from './backend';\nimport { WebSocketHandlerImpl } from './websocket';\n\nexport { ChatSession } from './methods/chat-session';\nexport { LiveSession } from './methods/live-session';\nexport * from './requests/schema-builder';\nexport { ImagenImageFormat } from './requests/imagen-image-format';\nexport { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel, AIError };\nexport { Backend, VertexAIBackend, GoogleAIBackend } from './backend';\nexport {\n startAudioConversation,\n AudioConversationController,\n StartAudioConversationOptions\n} from './methods/live-session-helpers';\n\ndeclare module '@firebase/component' {\n interface NameServiceMapping {\n [AI_TYPE]: AIService;\n }\n}\n\n/**\n * Returns the default {@link AI} instance that is associated with the provided\n * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the\n * default settings.\n *\n * @example\n * ```javascript\n * const ai = getAI(app);\n * ```\n *\n * @example\n * ```javascript\n * // Get an AI instance configured to use the Gemini Developer API (via Google AI).\n * const ai = getAI(app, { backend: new GoogleAIBackend() });\n * ```\n *\n * @example\n * ```javascript\n * // Get an AI instance configured to use the Vertex AI Gemini API.\n * const ai = getAI(app, { backend: new VertexAIBackend() });\n * ```\n *\n * @param app - The {@link @firebase/app#FirebaseApp} to use.\n * @param options - {@link AIOptions} that configure the AI instance.\n * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}.\n *\n * @public\n */\nexport function getAI(app: FirebaseApp = getApp(), options?: AIOptions): AI {\n app = getModularInstance(app);\n // Dependencies\n const AIProvider: Provider<'AI'> = _getProvider(app, AI_TYPE);\n\n const backend = options?.backend ?? new GoogleAIBackend();\n\n const finalOptions: Omit<AIOptions, 'backend'> = {\n useLimitedUseAppCheckTokens: options?.useLimitedUseAppCheckTokens ?? false\n };\n\n const identifier = encodeInstanceIdentifier(backend);\n const aiInstance = AIProvider.getImmediate({\n identifier\n });\n\n aiInstance.options = finalOptions;\n\n return aiInstance;\n}\n\n/**\n * Returns a {@link GenerativeModel} class with methods for inference\n * and other functionality.\n *\n * @public\n */\nexport function getGenerativeModel(\n ai: AI,\n modelParams: ModelParams | HybridParams,\n requestOptions?: RequestOptions\n): GenerativeModel {\n // Uses the existence of HybridParams.mode to clarify the type of the modelParams input.\n const hybridParams = modelParams as HybridParams;\n let inCloudParams: ModelParams;\n if (hybridParams.mode) {\n inCloudParams = hybridParams.inCloudParams || {\n model: DEFAULT_HYBRID_IN_CLOUD_MODEL\n };\n } else {\n inCloudParams = modelParams as ModelParams;\n }\n\n if (!inCloudParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`\n );\n }\n\n /**\n * An AIService registered by index.node.ts will not have a\n * chromeAdapterFactory() method.\n */\n const chromeAdapter = (ai as AIService).chromeAdapterFactory?.(\n hybridParams.mode,\n typeof window === 'undefined' ? undefined : window,\n hybridParams.onDeviceParams\n );\n\n return new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter);\n}\n\n/**\n * Returns an {@link ImagenModel} class with methods for using Imagen.\n *\n * Only Imagen 3 models (named `imagen-3.0-*`) are supported.\n *\n * @param ai - An {@link AI} instance.\n * @param modelParams - Parameters to use when making Imagen requests.\n * @param requestOptions - Additional options to use when making requests.\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @public\n */\nexport function getImagenModel(\n ai: AI,\n modelParams: ImagenModelParams,\n requestOptions?: RequestOptions\n): ImagenModel {\n if (!modelParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })`\n );\n }\n return new ImagenModel(ai, modelParams, requestOptions);\n}\n\n/**\n * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication.\n *\n * The Live API is only supported in modern browser windows and Node >= 22.\n *\n * @param ai - An {@link AI} instance.\n * @param modelParams - Parameters to use when setting up a {@link LiveSession}.\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @beta\n */\nexport function getLiveGenerativeModel(\n ai: AI,\n modelParams: LiveModelParams\n): LiveGenerativeModel {\n if (!modelParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name for getLiveGenerativeModel. Example: getLiveGenerativeModel(ai, { model: 'my-model-name' })`\n );\n }\n const webSocketHandler = new WebSocketHandlerImpl();\n return new LiveGenerativeModel(ai, modelParams, webSocketHandler);\n}\n","/**\n * The Firebase AI Web SDK.\n *\n * @packageDocumentation\n */\n\n/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { registerVersion, _registerComponent } from '@firebase/app';\nimport { AIService } from './service';\nimport { AI_TYPE } from './constants';\nimport { Component, ComponentType } from '@firebase/component';\nimport { name, version } from '../package.json';\nimport { decodeInstanceIdentifier } from './helpers';\nimport { AIError } from './errors';\nimport { AIErrorCode } from './public-types';\n\nfunction registerAI(): void {\n _registerComponent(\n new Component(\n AI_TYPE,\n (container, { instanceIdentifier }) => {\n if (!instanceIdentifier) {\n throw new AIError(\n AIErrorCode.ERROR,\n 'AIService instance identifier is undefined.'\n );\n }\n\n const backend = decodeInstanceIdentifier(instanceIdentifier);\n\n // getImmediate for FirebaseApp will always succeed\n const app = container.getProvider('app').getImmediate();\n const auth = container.getProvider('auth-internal');\n const appCheckProvider = container.getProvider('app-check-internal');\n return new AIService(app, backend, auth, appCheckProvider);\n },\n ComponentType.PUBLIC\n ).setMultipleInstances(true)\n );\n\n registerVersion(name, version, 'node');\n // BUILD_TARGET will be replaced by values like esm, cjs, etc during the compilation\n registerVersion(name, version, '__BUILD_TARGET__');\n}\n\nregisterAI();\n\nexport * from './api';\nexport * from './public-types';\n"],"names":["GoogleAIMapper.mapGenerateContentResponse","GoogleAIMapper.mapGenerateContentRequest","GoogleAIMapper.mapCountTokensRequest"],"mappings":";;;;;;;;AAAA;;;;;;;;;;;;;;;AAeG;AAII,MAAM,OAAO,GAAG,IAAI,CAAC;AAErB,MAAM,gBAAgB,GAAG,aAAa,CAAC;AAEvC,MAAM,cAAc,GAAG,iCAAiC,CAAC;AAEzD,MAAM,mBAAmB,GAAG,QAAQ,CAAC;AAErC,MAAM,eAAe,GAAG,OAAO,CAAC;AAEhC,MAAM,YAAY,GAAG,OAAO,CAAC;AAE7B,MAAM,wBAAwB,GAAG,GAAG,GAAG,IAAI,CAAC;AAEnD;;AAEG;AACI,MAAM,6BAA6B,GAAG,uBAAuB;;ACpCpE;;;;;;;;;;;;;;;AAeG;AAQH;;;AAGG;AACI,MAAM,cAAc,GAAG,CAAC,MAAM,EAAE,OAAO,EAAE,UAAU,EAAE,QAAQ,EAAW;AAE/E;;;AAGG;AACU,MAAA,YAAY,GAAG;AAC1B,IAAA,yBAAyB,EAAE,2BAA2B;AACtD,IAAA,+BAA+B,EAAE,iCAAiC;AAClE,IAAA,wBAAwB,EAAE,0BAA0B;AACpD,IAAA,+BAA+B,EAAE,iCAAiC;EACzD;AAQX;;;AAGG;AACU,MAAA,kBAAkB,GAAG;AAChC;;AAEG;AACH,IAAA,mBAAmB,EAAE,qBAAqB;AAC1C;;AAEG;AACH,IAAA,sBAAsB,EAAE,wBAAwB;AAChD;;AAEG;AACH,IAAA,eAAe,EAAE,iBAAiB;AAClC;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;;AAGG;AACH,IAAA,GAAG,EAAE,KAAK;EACD;AASX;;;;AAIG;AACU,MAAA,eAAe,GAAG;AAC7B;;AAEG;AACH,IAAA,QAAQ,EAAE,UAAU;AACpB;;AAEG;AACH,IAAA,WAAW,EAAE,aAAa;EACjB;AAUX;;;AAGG;AACU,MAAA,eAAe,GAAG;AAC7B;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;AAEG;AACH,IAAA,GAAG,EAAE,KAAK;AACV;;AAEG;AACH,IAAA,MAAM,EAAE,QAAQ;AAChB;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;EACH;AASX;;;AAGG;AACU,MAAA,YAAY,GAAG;AAC1B;;AAEG;AACH,IAAA,wBAAwB,EAAE,0BAA0B;AACpD;;AAEG;AACH,IAAA,iBAAiB,EAAE,mBAAmB;AACtC;;AAEG;AACH,IAAA,oBAAoB,EAAE,sBAAsB;AAC5C;;AAEG;AACH,IAAA,kBAAkB,EAAE,oBAAoB;AACxC;;;;;AAKG;AACH,IAAA,yBAAyB,EAAE,2BAA2B;EAC7C;AAQX;;;AAGG;AACU,MAAA,WAAW,GAAG;AACzB;;AAEG;AACH,IAAA,MAAM,EAAE,QAAQ;AAChB;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,SAAS,EAAE,WAAW;AACtB;;AAEG;AACH,IAAA,kBAAkB,EAAE,oBAAoB;EAC/B;AAQX;;;AAGG;AACU,MAAA,YAAY,GAAG;AAC1B;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;AAEG;AACH,IAAA,MAAM,EAAE,QAAQ;AAChB;;AAEG;AACH,IAAA,UAAU,EAAE,YAAY;AACxB;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,SAAS,EAAE,WAAW;AACtB;;AAEG;AACH,IAAA,kBAAkB,EAAE,oBAAoB;AACxC;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;AAEG;AACH,IAAA,uBAAuB,EAAE,yBAAyB;EACzC;AAQX;;AAEG;AACU,MAAA,mBAAmB,GAAG;AACjC;;;AAGG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;;;;AAKG;AACH,IAAA,GAAG,EAAE,KAAK;AACV;;;AAGG;AACH,IAAA,IAAI,EAAE,MAAM;EACH;AAQX;;;AAGG;AACU,MAAA,QAAQ,GAAG;AACtB;;AAEG;AACH,IAAA,oBAAoB,EAAE,sBAAsB;AAC5C;;AAEG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;AAEG;AACH,IAAA,QAAQ,EAAE,UAAU;EACX;AAQX;;;;AAIG;AACU,MAAA,gBAAgB,GAAG;AAC9B;;;AAGG;AACH,IAAA,IAAI,EAAE,MAAM;AACZ;;;AAGG;AACH,IAAA,KAAK,EAAE,OAAO;AACd;;;AAGG;AACH,IAAA,KAAK,EAAE,OAAO;EACL;AAUX;;;;;;;;;;;;;;;;;;;;AAoBG;AACU,MAAA,aAAa,GAAG;AAC3B,IAAA,kBAAkB,EAAE,kBAAkB;AACtC,IAAA,gBAAgB,EAAE,gBAAgB;AAClC,IAAA,eAAe,EAAE,eAAe;AAChC,IAAA,iBAAiB,EAAE,iBAAiB;EAC3B;AASX;;;;AAIG;AACU,MAAA,eAAe,GAAG;AAC7B,IAAA,WAAW,EAAE,WAAW;AACxB,IAAA,UAAU,EAAE,UAAU;EACb;AAUX;;;;AAIG;AACU,MAAA,OAAO,GAAG;AACrB,IAAA,WAAW,EAAE,qBAAqB;AAClC,IAAA,EAAE,EAAE,YAAY;AAChB,IAAA,MAAM,EAAE,gBAAgB;AACxB,IAAA,iBAAiB,EAAE,2BAA2B;EAC9C;AASF;;;;AAIG;AACU,MAAA,QAAQ,GAAG;AACtB,IAAA,WAAW,EAAE,sBAAsB;AACnC,IAAA,MAAM,EAAE,QAAQ;;;ACzalB;;;;;;;;;;;;;;;AAeG;AA4XH;;;;;;;;;;;;;;;;AAgBG;AACU,MAAA,kBAAkB,GAAG;AAChC;;AAEG;AACH,IAAA,gCAAgC,EAAE,kCAAkC;AACpE;;AAEG;AACH,IAAA,4BAA4B,EAAE,8BAA8B;AAC5D;;AAEG;AACH,IAAA,0BAA0B,EAAE,4BAA4B;AACxD;;AAEG;AACH,IAAA,4BAA4B,EAAE,8BAA8B;AAC5D;;AAEG;AACH,IAAA,2BAA2B,EAAE,6BAA6B;EAC1D;AA6KF;;;;AAIG;AACU,MAAA,gBAAgB,GAAG;AAC9B,IAAA,cAAc,EAAE,eAAe;AAC/B,IAAA,SAAS,EAAE,UAAU;AACrB,IAAA,sBAAsB,EAAE,sBAAsB;;;ACtmBhD;;;;;;;;;;;;;;;AAeG;AA4CH;;;;AAIG;AACU,MAAA,WAAW,GAAG;;AAEzB,IAAA,KAAK,EAAE,OAAO;;AAGd,IAAA,aAAa,EAAE,eAAe;;AAG9B,IAAA,cAAc,EAAE,gBAAgB;;AAGhC,IAAA,WAAW,EAAE,aAAa;;AAG1B,IAAA,cAAc,EAAE,gBAAgB;;AAGhC,IAAA,eAAe,EAAE,iBAAiB;;AAGlC,IAAA,eAAe,EAAE,iBAAiB;;AAGlC,IAAA,cAAc,EAAE,gBAAgB;;AAGhC,IAAA,UAAU,EAAE,YAAY;;AAGxB,IAAA,SAAS,EAAE,WAAW;;AAGtB,IAAA,QAAQ,EAAE,UAAU;;AAGpB,IAAA,aAAa,EAAE,eAAe;;AAG9B,IAAA,YAAY,EAAE,cAAc;;AAG5B,IAAA,WAAW,EAAE,aAAa;;;ACzG5B;;;;;;;;;;;;;;;AAeG;AAEH;;;;;AAKG;AACU,MAAA,UAAU,GAAG;;AAExB,IAAA,MAAM,EAAE,QAAQ;;AAEhB,IAAA,MAAM,EAAE,QAAQ;;AAEhB,IAAA,OAAO,EAAE,SAAS;;AAElB,IAAA,OAAO,EAAE,SAAS;;AAElB,IAAA,KAAK,EAAE,OAAO;;AAEd,IAAA,MAAM,EAAE,QAAQ;;;ACnClB;;;;;;;;;;;;;;;AAeG;AAqFH;;;;;;;;;;;AAWG;AACU,MAAA,uBAAuB,GAAG;AACrC;;AAEG;AACH,IAAA,mBAAmB,EAAE,qBAAqB;AAC1C;;AAEG;AACH,IAAA,sBAAsB,EAAE,wBAAwB;AAChD;;AAEG;AACH,IAAA,eAAe,EAAE,iBAAiB;AAClC;;;;;AAKG;AACH,IAAA,UAAU,EAAE,YAAY;EACf;AAiBX;;;;;;;AAOG;AACU,MAAA,uBAAuB,GAAG;AACrC;;AAEG;AACH,IAAA,SAAS,EAAE,YAAY;AACvB;;;;;;AAMG;AACH,IAAA,WAAW,EAAE,aAAa;AAC1B;;;;;;AAMG;AACH,IAAA,SAAS,EAAE,WAAW;EACb;AAiCX;;;;;;;;;;AAUG;AACU,MAAA,iBAAiB,GAAG;AAC/B;;AAEG;AACH,IAAA,QAAQ,EAAE,KAAK;AACf;;AAEG;AACH,IAAA,eAAe,EAAE,KAAK;AACtB;;AAEG;AACH,IAAA,cAAc,EAAE,KAAK;AACrB;;AAEG;AACH,IAAA,gBAAgB,EAAE,MAAM;AACxB;;AAEG;AACH,IAAA,eAAe,EAAE,MAAM;;;AClPzB;;;;;;;;;;;;;;;AAeG;AAqCH;;;;;;;;;;;AAWG;AACU,MAAA,WAAW,GAAG;AACzB;;;AAGG;AACH,IAAA,SAAS,EAAE,WAAW;AAEtB;;;AAGG;AACH,IAAA,SAAS,EAAE,WAAW;AACd,EAAC;;AC5EX;;;;;;;;;;;;;;;AAeG;AAKH;;;;;;;AAOG;MACmB,OAAO,CAAA;AAM3B;;;AAGG;AACH,IAAA,WAAA,CAAsB,IAAiB,EAAA;AACrC,QAAA,IAAI,CAAC,WAAW,GAAG,IAAI,CAAC;KACzB;AACF,CAAA;AAED;;;;;;;AAOG;AACG,MAAO,eAAgB,SAAQ,OAAO,CAAA;AAC1C;;AAEG;AACH,IAAA,WAAA,GAAA;AACE,QAAA,KAAK,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC;KAC9B;AACF,CAAA;AAED;;;;;;;AAOG;AACG,MAAO,eAAgB,SAAQ,OAAO,CAAA;AAQ1C;;;;;;AAMG;AACH,IAAA,WAAA,CAAY,WAAmB,gBAAgB,EAAA;AAC7C,QAAA,KAAK,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC;QAC7B,IAAI,CAAC,QAAQ,EAAE;AACb,YAAA,IAAI,CAAC,QAAQ,GAAG,gBAAgB,CAAC;SAClC;aAAM;AACL,YAAA,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;SAC1B;KACF;AACF;;AC3FD;;;;;;;;;;;;;;;AAeG;MAgBU,SAAS,CAAA;IAMpB,WACS,CAAA,GAAgB,EAChB,OAAgB,EACvB,YAAiD,EACjD,gBAA0D,EACnD,oBAI2B,EAAA;QAR3B,IAAG,CAAA,GAAA,GAAH,GAAG,CAAa;QAChB,IAAO,CAAA,OAAA,GAAP,OAAO,CAAS;QAGhB,IAAoB,CAAA,oBAAA,GAApB,oBAAoB,CAIO;AAElC,QAAA,MAAM,QAAQ,GAAG,gBAAgB,EAAE,YAAY,CAAC,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,CAAC;AACpE,QAAA,MAAM,IAAI,GAAG,YAAY,EAAE,YAAY,CAAC,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,CAAC;AAC5D,QAAA,IAAI,CAAC,IAAI,GAAG,IAAI,IAAI,IAAI,CAAC;AACzB,QAAA,IAAI,CAAC,QAAQ,GAAG,QAAQ,IAAI,IAAI,CAAC;AAEjC,QAAA,IAAI,OAAO,YAAY,eAAe,EAAE;AACtC,YAAA,IAAI,CAAC,QAAQ,GAAG,OAAO,CAAC,QAAQ,CAAC;SAClC;aAAM;AACL,YAAA,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAC;SACpB;KACF;IAED,OAAO,GAAA;AACL,QAAA,OAAO,OAAO,CAAC,OAAO,EAAE,CAAC;KAC1B;IAED,IAAI,OAAO,CAAC,YAAuB,EAAA;AACjC,QAAA,IAAI,CAAC,QAAQ,GAAG,YAAY,CAAC;KAC9B;AAED,IAAA,IAAI,OAAO,GAAA;QACT,OAAO,IAAI,CAAC,QAAQ,CAAC;KACtB;AACF;;ACvED;;;;;;;;;;;;;;;AAeG;AAMH;;;;AAIG;AACG,MAAO,OAAQ,SAAQ,aAAa,CAAA;AACxC;;;;;;AAMG;AACH,IAAA,WAAA,CACW,IAAiB,EAC1B,OAAe,EACN,eAAiC,EAAA;;QAG1C,MAAM,OAAO,GAAG,OAAO,CAAC;AACxB,QAAA,MAAM,QAAQ,GAAG,CAAA,EAAG,OAAO,CAAI,CAAA,EAAA,IAAI,EAAE,CAAC;QACtC,MAAM,WAAW,GAAG,CAAG,EAAA,OAAO,KAAK,OAAO,CAAA,EAAA,EAAK,QAAQ,CAAA,CAAA,CAAG,CAAC;AAC3D,QAAA,KAAK,CAAC,IAAI,EAAE,WAAW,CAAC,CAAC;QARhB,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAa;QAEjB,IAAe,CAAA,eAAA,GAAf,eAAe,CAAkB;;;;;AAY1C,QAAA,IAAI,KAAK,CAAC,iBAAiB,EAAE;;;AAG3B,YAAA,KAAK,CAAC,iBAAiB,CAAC,IAAI,EAAE,OAAO,CAAC,CAAC;SACxC;;;;;QAMD,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,OAAO,CAAC,SAAS,CAAC,CAAC;;AAG/C,QAAA,IAAI,CAAC,QAAQ,GAAG,MAAM,WAAW,CAAC;KACnC;AACF;;AChED;;;;;;;;;;;;;;;AAeG;AAOH;;;;;AAKG;AACG,SAAU,wBAAwB,CAAC,OAAgB,EAAA;AACvD,IAAA,IAAI,OAAO,YAAY,eAAe,EAAE;QACtC,OAAO,CAAA,EAAG,OAAO,CAAA,SAAA,CAAW,CAAC;KAC9B;AAAM,SAAA,IAAI,OAAO,YAAY,eAAe,EAAE;AAC7C,QAAA,OAAO,GAAG,OAAO,CAAA,UAAA,EAAa,OAAO,CAAC,QAAQ,EAAE,CAAC;KAClD;SAAM;AACL,QAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAoB,iBAAA,EAAA,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,WAAW,CAAC,CAAA,CAAE,CAC1D,CAAC;KACH;AACH,CAAC;AAED;;;;AAIG;AACG,SAAU,wBAAwB,CAAC,kBAA0B,EAAA;IACjE,MAAM,eAAe,GAAG,kBAAkB,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;AACtD,IAAA,IAAI,eAAe,CAAC,CAAC,CAAC,KAAK,OAAO,EAAE;AAClC,QAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAgD,6CAAA,EAAA,eAAe,CAAC,CAAC,CAAC,CAAA,CAAA,CAAG,CACtE,CAAC;KACH;AACD,IAAA,MAAM,WAAW,GAAG,eAAe,CAAC,CAAC,CAAC,CAAC;IACvC,QAAQ,WAAW;AACjB,QAAA,KAAK,UAAU;AACb,YAAA,MAAM,QAAQ,GAAuB,eAAe,CAAC,CAAC,CAAC,CAAC;YACxD,IAAI,CAAC,QAAQ,EAAE;gBACb,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAkD,+CAAA,EAAA,kBAAkB,CAAG,CAAA,CAAA,CACxE,CAAC;aACH;AACD,YAAA,OAAO,IAAI,eAAe,CAAC,QAAQ,CAAC,CAAC;AACvC,QAAA,KAAK,UAAU;YACb,OAAO,IAAI,eAAe,EAAE,CAAC;AAC/B,QAAA;YACE,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAwC,qCAAA,EAAA,kBAAkB,CAAG,CAAA,CAAA,CAC9D,CAAC;KACL;AACH;;ACzEA;;;;;;;;;;;;;;;AAeG;AAQH;;;;;;;AAOG;MACmB,OAAO,CAAA;AAY3B;;;;;;;;;;;;;;;;AAgBG;IACH,WAAsB,CAAA,EAAM,EAAE,SAAiB,EAAA;QAC7C,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,MAAM,EAAE;YAC5B,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,UAAU,EACtB,CAAuH,qHAAA,CAAA,CACxH,CAAC;SACH;aAAM,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,SAAS,EAAE;YACtC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,CAA6H,2HAAA,CAAA,CAC9H,CAAC;SACH;aAAM,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE;YAClC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,SAAS,EACrB,CAAqH,mHAAA,CAAA,CACtH,CAAC;SACH;aAAM;YACL,IAAI,CAAC,YAAY,GAAG;AAClB,gBAAA,MAAM,EAAE,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM;AAC7B,gBAAA,OAAO,EAAE,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,SAAS;AACjC,gBAAA,KAAK,EAAE,EAAE,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK;AAC3B,gBAAA,8BAA8B,EAAE,EAAE,CAAC,GAAG,CAAC,8BAA8B;gBACrE,QAAQ,EAAE,EAAE,CAAC,QAAQ;gBACrB,OAAO,EAAE,EAAE,CAAC,OAAO;aACpB,CAAC;AAEF,YAAA,IAAI,oBAAoB,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,aAAa,EAAE;gBACjE,MAAM,KAAK,GAAG,EAAE,CAAC,GAAG,CAAC,QAAQ,CAAC,aAAa,CAAC;AAC5C,gBAAA,IAAI,CAAC,YAAY,CAAC,gBAAgB,GAAG,MAAK;oBACxC,OAAO,OAAO,CAAC,OAAO,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC;AACpC,iBAAC,CAAC;aACH;AAAM,iBAAA,IAAK,EAAgB,CAAC,QAAQ,EAAE;AACrC,gBAAA,IAAI,EAAE,CAAC,OAAO,EAAE,2BAA2B,EAAE;AAC3C,oBAAA,IAAI,CAAC,YAAY,CAAC,gBAAgB,GAAG,MAClC,EAAgB,CAAC,QAAS,CAAC,kBAAkB,EAAE,CAAC;iBACpD;qBAAM;AACL,oBAAA,IAAI,CAAC,YAAY,CAAC,gBAAgB,GAAG,MAClC,EAAgB,CAAC,QAAS,CAAC,QAAQ,EAAE,CAAC;iBAC1C;aACF;AAED,YAAA,IAAK,EAAgB,CAAC,IAAI,EAAE;AAC1B,gBAAA,IAAI,CAAC,YAAY,CAAC,YAAY,GAAG,MAC9B,EAAgB,CAAC,IAAK,CAAC,QAAQ,EAAE,CAAC;aACtC;AAED,YAAA,IAAI,CAAC,KAAK,GAAG,OAAO,CAAC,kBAAkB,CACrC,SAAS,EACT,IAAI,CAAC,YAAY,CAAC,OAAO,CAAC,WAAW,CACtC,CAAC;SACH;KACF;AAED;;;;;;;AAOG;AACH,IAAA,OAAO,kBAAkB,CACvB,SAAiB,EACjB,WAAwB,EAAA;AAExB,QAAA,IAAI,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AACzC,YAAA,OAAO,OAAO,CAAC,0BAA0B,CAAC,SAAS,CAAC,CAAC;SACtD;aAAM;AACL,YAAA,OAAO,OAAO,CAAC,0BAA0B,CAAC,SAAS,CAAC,CAAC;SACtD;KACF;AAED;;AAEG;IACK,OAAO,0BAA0B,CAAC,SAAiB,EAAA;QACzD,OAAO,CAAA,OAAA,EAAU,SAAS,CAAA,CAAE,CAAC;KAC9B;AAED;;AAEG;IACK,OAAO,0BAA0B,CAAC,SAAiB,EAAA;AACzD,QAAA,IAAI,KAAa,CAAC;AAClB,QAAA,IAAI,SAAS,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE;AAC3B,YAAA,IAAI,SAAS,CAAC,UAAU,CAAC,SAAS,CAAC,EAAE;;AAEnC,gBAAA,KAAK,GAAG,CAAA,kBAAA,EAAqB,SAAS,CAAA,CAAE,CAAC;aAC1C;iBAAM;;gBAEL,KAAK,GAAG,SAAS,CAAC;aACnB;SACF;aAAM;;AAEL,YAAA,KAAK,GAAG,CAAA,yBAAA,EAA4B,SAAS,CAAA,CAAE,CAAC;SACjD;AAED,QAAA,OAAO,KAAK,CAAC;KACd;AACF;;AC/JD;;;;;;;;;;;;;;;AAeG;AAII,MAAM,MAAM,GAAG,IAAI,MAAM,CAAC,oBAAoB,CAAC;;ACnBtD;;;;;;;;;;;;;;;AAeG;AAgBH,IAAY,IAKX,CAAA;AALD,CAAA,UAAY,IAAI,EAAA;AACd,IAAA,IAAA,CAAA,kBAAA,CAAA,GAAA,iBAAoC,CAAA;AACpC,IAAA,IAAA,CAAA,yBAAA,CAAA,GAAA,uBAAiD,CAAA;AACjD,IAAA,IAAA,CAAA,cAAA,CAAA,GAAA,aAA4B,CAAA;AAC5B,IAAA,IAAA,CAAA,SAAA,CAAA,GAAA,SAAmB,CAAA;AACrB,CAAC,EALW,IAAI,KAAJ,IAAI,GAKf,EAAA,CAAA,CAAA,CAAA;MAEY,UAAU,CAAA;IACrB,WACS,CAAA,KAAa,EACb,IAAU,EACV,WAAwB,EACxB,MAAe,EACf,cAA+B,EAAA;QAJ/B,IAAK,CAAA,KAAA,GAAL,KAAK,CAAQ;QACb,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAM;QACV,IAAW,CAAA,WAAA,GAAX,WAAW,CAAa;QACxB,IAAM,CAAA,MAAA,GAAN,MAAM,CAAS;QACf,IAAc,CAAA,cAAA,GAAd,cAAc,CAAiB;KACpC;IACJ,QAAQ,GAAA;QACN,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;AAClC,QAAA,GAAG,CAAC,QAAQ,GAAG,CAAI,CAAA,EAAA,IAAI,CAAC,UAAU,CAAA,CAAA,EAAI,IAAI,CAAC,SAAS,CAAI,CAAA,EAAA,IAAI,CAAC,IAAI,EAAE,CAAC;QACpE,GAAG,CAAC,MAAM,GAAG,IAAI,CAAC,WAAW,CAAC,QAAQ,EAAE,CAAC;AACzC,QAAA,OAAO,GAAG,CAAC,QAAQ,EAAE,CAAC;KACvB;AAED,IAAA,IAAY,OAAO,GAAA;QACjB,OAAO,IAAI,CAAC,cAAc,EAAE,OAAO,IAAI,CAAA,QAAA,EAAW,cAAc,CAAA,CAAE,CAAC;KACpE;AAED,IAAA,IAAY,UAAU,GAAA;QACpB,OAAO,mBAAmB,CAAC;KAC5B;AAED,IAAA,IAAY,SAAS,GAAA;QACnB,IAAI,IAAI,CAAC,WAAW,CAAC,OAAO,YAAY,eAAe,EAAE;YACvD,OAAO,CAAA,SAAA,EAAY,IAAI,CAAC,WAAW,CAAC,OAAO,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,CAAA,CAAE,CAAC;SAC7D;aAAM,IAAI,IAAI,CAAC,WAAW,CAAC,OAAO,YAAY,eAAe,EAAE;AAC9D,YAAA,OAAO,YAAY,IAAI,CAAC,WAAW,CAAC,OAAO,cAAc,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,QAAQ,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,EAAE,CAAC;SAC5G;aAAM;YACL,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,oBAAoB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,CAAA,CAAE,CAC/D,CAAC;SACH;KACF;AAED,IAAA,IAAY,WAAW,GAAA;AACrB,QAAA,MAAM,MAAM,GAAG,IAAI,eAAe,EAAE,CAAC;AACrC,QAAA,IAAI,IAAI,CAAC,MAAM,EAAE;AACf,YAAA,MAAM,CAAC,GAAG,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;SAC1B;AAED,QAAA,OAAO,MAAM,CAAC;KACf;AACF,CAAA;MAEY,YAAY,CAAA;AACvB,IAAA,WAAA,CAAmB,WAAwB,EAAA;QAAxB,IAAW,CAAA,WAAA,GAAX,WAAW,CAAa;KAAI;IAC/C,QAAQ,GAAA;QACN,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,CAAS,MAAA,EAAA,cAAc,CAAE,CAAA,CAAC,CAAC;AAC/C,QAAA,GAAG,CAAC,QAAQ,GAAG,IAAI,CAAC,QAAQ,CAAC;AAE7B,QAAA,MAAM,WAAW,GAAG,IAAI,eAAe,EAAE,CAAC;QAC1C,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;AAChD,QAAA,GAAG,CAAC,MAAM,GAAG,WAAW,CAAC,QAAQ,EAAE,CAAC;AAEpC,QAAA,OAAO,GAAG,CAAC,QAAQ,EAAE,CAAC;KACvB;AAED,IAAA,IAAY,QAAQ,GAAA;AAClB,QAAA,IAAI,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAClE,YAAA,OAAO,0EAA0E,CAAC;SACnF;aAAM;AACL,YAAA,OAAO,mFAAmF,IAAI,CAAC,WAAW,CAAC,QAAQ,EAAE,CAAC;SACvH;KACF;AACF,CAAA;AAED;;AAEG;AACH,SAAS,gBAAgB,GAAA;IACvB,MAAM,WAAW,GAAG,EAAE,CAAC;IACvB,WAAW,CAAC,IAAI,CAAC,CAAA,EAAG,YAAY,CAAI,CAAA,EAAA,eAAe,CAAE,CAAA,CAAC,CAAC;AACvD,IAAA,WAAW,CAAC,IAAI,CAAC,QAAQ,eAAe,CAAA,CAAE,CAAC,CAAC;AAC5C,IAAA,OAAO,WAAW,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;AAC/B,CAAC;AAEM,eAAe,UAAU,CAAC,GAAe,EAAA;AAC9C,IAAA,MAAM,OAAO,GAAG,IAAI,OAAO,EAAE,CAAC;AAC9B,IAAA,OAAO,CAAC,MAAM,CAAC,cAAc,EAAE,kBAAkB,CAAC,CAAC;IACnD,OAAO,CAAC,MAAM,CAAC,mBAAmB,EAAE,gBAAgB,EAAE,CAAC,CAAC;IACxD,OAAO,CAAC,MAAM,CAAC,gBAAgB,EAAE,GAAG,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;AACzD,IAAA,IAAI,GAAG,CAAC,WAAW,CAAC,8BAA8B,EAAE;QAClD,OAAO,CAAC,MAAM,CAAC,kBAAkB,EAAE,GAAG,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC;KAC3D;AACD,IAAA,IAAI,GAAG,CAAC,WAAW,CAAC,gBAAgB,EAAE;QACpC,MAAM,aAAa,GAAG,MAAM,GAAG,CAAC,WAAW,CAAC,gBAAgB,EAAE,CAAC;QAC/D,IAAI,aAAa,EAAE;YACjB,OAAO,CAAC,MAAM,CAAC,qBAAqB,EAAE,aAAa,CAAC,KAAK,CAAC,CAAC;AAC3D,YAAA,IAAI,aAAa,CAAC,KAAK,EAAE;gBACvB,MAAM,CAAC,IAAI,CACT,CAA6C,0CAAA,EAAA,aAAa,CAAC,KAAK,CAAC,OAAO,CAAE,CAAA,CAC3E,CAAC;aACH;SACF;KACF;AAED,IAAA,IAAI,GAAG,CAAC,WAAW,CAAC,YAAY,EAAE;QAChC,MAAM,SAAS,GAAG,MAAM,GAAG,CAAC,WAAW,CAAC,YAAY,EAAE,CAAC;QACvD,IAAI,SAAS,EAAE;YACb,OAAO,CAAC,MAAM,CAAC,eAAe,EAAE,CAAY,SAAA,EAAA,SAAS,CAAC,WAAW,CAAE,CAAA,CAAC,CAAC;SACtE;KACF;AAED,IAAA,OAAO,OAAO,CAAC;AACjB,CAAC;AAEM,eAAe,gBAAgB,CACpC,KAAa,EACb,IAAU,EACV,WAAwB,EACxB,MAAe,EACf,IAAY,EACZ,cAA+B,EAAA;AAE/B,IAAA,MAAM,GAAG,GAAG,IAAI,UAAU,CAAC,KAAK,EAAE,IAAI,EAAE,WAAW,EAAE,MAAM,EAAE,cAAc,CAAC,CAAC;IAC7E,OAAO;AACL,QAAA,GAAG,EAAE,GAAG,CAAC,QAAQ,EAAE;AACnB,QAAA,YAAY,EAAE;AACZ,YAAA,MAAM,EAAE,MAAM;AACd,YAAA,OAAO,EAAE,MAAM,UAAU,CAAC,GAAG,CAAC;YAC9B,IAAI;AACL,SAAA;KACF,CAAC;AACJ,CAAC;AAEM,eAAe,WAAW,CAC/B,KAAa,EACb,IAAU,EACV,WAAwB,EACxB,MAAe,EACf,IAAY,EACZ,cAA+B,EAAA;AAE/B,IAAA,MAAM,GAAG,GAAG,IAAI,UAAU,CAAC,KAAK,EAAE,IAAI,EAAE,WAAW,EAAE,MAAM,EAAE,cAAc,CAAC,CAAC;AAC7E,IAAA,IAAI,QAAQ,CAAC;AACb,IAAA,IAAI,cAA4D,CAAC;AACjE,IAAA,IAAI;AACF,QAAA,MAAM,OAAO,GAAG,MAAM,gBAAgB,CACpC,KAAK,EACL,IAAI,EACJ,WAAW,EACX,MAAM,EACN,IAAI,EACJ,cAAc,CACf,CAAC;;AAEF,QAAA,MAAM,aAAa,GACjB,cAAc,EAAE,OAAO,IAAI,IAAI,IAAI,cAAc,CAAC,OAAO,IAAI,CAAC;cAC1D,cAAc,CAAC,OAAO;cACtB,wBAAwB,CAAC;AAC/B,QAAA,MAAM,eAAe,GAAG,IAAI,eAAe,EAAE,CAAC;AAC9C,QAAA,cAAc,GAAG,UAAU,CAAC,MAAM,eAAe,CAAC,KAAK,EAAE,EAAE,aAAa,CAAC,CAAC;QAC1E,OAAO,CAAC,YAAY,CAAC,MAAM,GAAG,eAAe,CAAC,MAAM,CAAC;AAErD,QAAA,QAAQ,GAAG,MAAM,KAAK,CAAC,OAAO,CAAC,GAAG,EAAE,OAAO,CAAC,YAAY,CAAC,CAAC;AAC1D,QAAA,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE;YAChB,IAAI,OAAO,GAAG,EAAE,CAAC;AACjB,YAAA,IAAI,YAAY,CAAC;AACjB,YAAA,IAAI;AACF,gBAAA,MAAM,IAAI,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;AACnC,gBAAA,OAAO,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC;AAC7B,gBAAA,IAAI,IAAI,CAAC,KAAK,CAAC,OAAO,EAAE;AACtB,oBAAA,OAAO,IAAI,CAAA,CAAA,EAAI,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,EAAE,CAAC;AACpD,oBAAA,YAAY,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC;iBACnC;aACF;YAAC,OAAO,CAAC,EAAE;;aAEX;AACD,YAAA,IACE,QAAQ,CAAC,MAAM,KAAK,GAAG;gBACvB,YAAY;AACZ,gBAAA,YAAY,CAAC,IAAI,CACf,CAAC,MAAoB,KAAK,MAAM,CAAC,MAAM,KAAK,kBAAkB,CAC/D;gBACD,YAAY,CAAC,IAAI,CAAC,CAAC,MAAoB,KAEnC,MAAM,CAAC,KACR,GAAG,CAAC,CAAC,EAAE,WAAW,CAAC,QAAQ,CAC1B,0CAA0C,CAC3C,CACF,EACD;AACA,gBAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA+C,6CAAA,CAAA;oBAC7C,CAAgE,8DAAA,CAAA;oBAChE,CAAqE,mEAAA,CAAA;AACrE,oBAAA,CAAA,+CAAA,EAAkD,GAAG,CAAC,WAAW,CAAC,OAAO,CAAU,QAAA,CAAA;oBACnF,CAAgE,8DAAA,CAAA;oBAChE,CAAoE,kEAAA,CAAA;AACpE,oBAAA,CAAA,WAAA,CAAa,EACf;oBACE,MAAM,EAAE,QAAQ,CAAC,MAAM;oBACvB,UAAU,EAAE,QAAQ,CAAC,UAAU;oBAC/B,YAAY;AACb,iBAAA,CACF,CAAC;aACH;YACD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,uBAAuB,GAAG,CAAA,GAAA,EAAM,QAAQ,CAAC,MAAM,IAAI,QAAQ,CAAC,UAAU,CAAK,EAAA,EAAA,OAAO,EAAE,EACpF;gBACE,MAAM,EAAE,QAAQ,CAAC,MAAM;gBACvB,UAAU,EAAE,QAAQ,CAAC,UAAU;gBAC/B,YAAY;AACb,aAAA,CACF,CAAC;SACH;KACF;IAAC,OAAO,CAAC,EAAE;QACV,IAAI,GAAG,GAAG,CAAU,CAAC;AACrB,QAAA,IACG,CAAa,CAAC,IAAI,KAAK,WAAW,CAAC,WAAW;AAC9C,YAAA,CAAa,CAAC,IAAI,KAAK,WAAW,CAAC,eAAe;YACnD,CAAC,YAAY,KAAK,EAClB;AACA,YAAA,GAAG,GAAG,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,oBAAA,EAAuB,GAAG,CAAC,QAAQ,EAAE,CAAK,EAAA,EAAA,CAAC,CAAC,OAAO,CAAA,CAAE,CACtD,CAAC;AACF,YAAA,GAAG,CAAC,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC;SACrB;AAED,QAAA,MAAM,GAAG,CAAC;KACX;YAAS;QACR,IAAI,cAAc,EAAE;YAClB,YAAY,CAAC,cAAc,CAAC,CAAC;SAC9B;KACF;AACD,IAAA,OAAO,QAAQ,CAAC;AAClB;;AC7QA;;;;;;;;;;;;;;;AAeG;AAmBH;;;AAGG;AACH,SAAS,kBAAkB,CAAC,QAAiC,EAAA;AAC3D,IAAA,IAAI,QAAQ,CAAC,UAAU,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;QACzD,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;YAClC,MAAM,CAAC,IAAI,CACT,CAAA,kBAAA,EAAqB,QAAQ,CAAC,UAAU,CAAC,MAAM,CAAG,CAAA,CAAA;gBAChD,CAA4D,0DAAA,CAAA;AAC5D,gBAAA,CAAA,gEAAA,CAAkE,CACrE,CAAC;SACH;QACD,IAAI,kBAAkB,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,EAAE;AAC9C,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,gBAAA,EAAmB,uBAAuB,CACxC,QAAQ,CACT,0CAA0C,EAC3C;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,IAAI,CAAC;KACb;SAAM;AACL,QAAA,OAAO,KAAK,CAAC;KACd;AACH,CAAC;AAED;;;AAGG;AACG,SAAU,6BAA6B,CAC3C,QAAiC,EACjC,eAAmC,GAAA,eAAe,CAAC,QAAQ,EAAA;AAE3D;;;;;AAKG;AACH,IAAA,IAAI,QAAQ,CAAC,UAAU,IAAI,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,CAAC,EAAE;QAC1E,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC;KAClC;AAED,IAAA,MAAM,mBAAmB,GAAG,UAAU,CAAC,QAAQ,CAAC,CAAC;AACjD,IAAA,mBAAmB,CAAC,eAAe,GAAG,eAAe,CAAC;AACtD,IAAA,OAAO,mBAAmB,CAAC;AAC7B,CAAC;AAED;;;AAGG;AACG,SAAU,UAAU,CACxB,QAAiC,EAAA;AAEhC,IAAA,QAA4C,CAAC,IAAI,GAAG,MAAK;AACxD,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,OAAO,OAAO,CAAC,QAAQ,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;SACjD;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,oBAAA,EAAuB,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EAC1D;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,EAAE,CAAC;AACZ,KAAC,CAAC;AACD,IAAA,QAA4C,CAAC,cAAc,GAAG,MAAK;AAClE,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,MAAM,MAAM,GAAG,OAAO,CAAC,QAAQ,EAAE,IAAI,IAAI,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;YACzD,OAAO,MAAM,KAAK,EAAE,GAAG,SAAS,GAAG,MAAM,CAAC;SAC3C;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,+BAAA,EAAkC,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EACrE;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,SAAS,CAAC;AACnB,KAAC,CAAC;AACD,IAAA,QAA4C,CAAC,eAAe,GAAG,MAEhD;AACd,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,OAAO,kBAAkB,CAAC,QAAQ,CAAC,CAAC;SACrC;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,oBAAA,EAAuB,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EAC1D;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,SAAS,CAAC;AACnB,KAAC,CAAC;AACD,IAAA,QAA4C,CAAC,aAAa,GAAG,MAAK;AACjE,QAAA,IAAI,kBAAkB,CAAC,QAAQ,CAAC,EAAE;AAChC,YAAA,OAAO,gBAAgB,CAAC,QAAQ,CAAC,CAAC;SACnC;AAAM,aAAA,IAAI,QAAQ,CAAC,cAAc,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,6BAAA,EAAgC,uBAAuB,CAAC,QAAQ,CAAC,EAAE,EACnE;gBACE,QAAQ;AACT,aAAA,CACF,CAAC;SACH;AACD,QAAA,OAAO,SAAS,CAAC;AACnB,KAAC,CAAC;AACF,IAAA,OAAO,QAA2C,CAAC;AACrD,CAAC;AAED;;;;;;AAMG;AACa,SAAA,OAAO,CACrB,QAAiC,EACjC,UAAmC,EAAA;IAEnC,MAAM,WAAW,GAAG,EAAE,CAAC;AACvB,IAAA,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC3C,QAAA,KAAK,MAAM,IAAI,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;YAC1D,IAAI,IAAI,CAAC,IAAI,IAAI,UAAU,CAAC,IAAI,CAAC,EAAE;AACjC,gBAAA,WAAW,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;aAC7B;SACF;KACF;AACD,IAAA,IAAI,WAAW,CAAC,MAAM,GAAG,CAAC,EAAE;AAC1B,QAAA,OAAO,WAAW,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;KAC7B;SAAM;AACL,QAAA,OAAO,EAAE,CAAC;KACX;AACH,CAAC;AAED;;AAEG;AACG,SAAU,gBAAgB,CAC9B,QAAiC,EAAA;IAEjC,MAAM,aAAa,GAAmB,EAAE,CAAC;AACzC,IAAA,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC3C,QAAA,KAAK,MAAM,IAAI,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC1D,YAAA,IAAI,IAAI,CAAC,YAAY,EAAE;AACrB,gBAAA,aAAa,CAAC,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;aACvC;SACF;KACF;AACD,IAAA,IAAI,aAAa,CAAC,MAAM,GAAG,CAAC,EAAE;AAC5B,QAAA,OAAO,aAAa,CAAC;KACtB;SAAM;AACL,QAAA,OAAO,SAAS,CAAC;KAClB;AACH,CAAC;AAED;;;;AAIG;AACG,SAAU,kBAAkB,CAChC,QAAiC,EAAA;IAEjC,MAAM,IAAI,GAAqB,EAAE,CAAC;AAElC,IAAA,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC3C,QAAA,KAAK,MAAM,IAAI,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,EAAE;AAC1D,YAAA,IAAI,IAAI,CAAC,UAAU,EAAE;AACnB,gBAAA,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;aACjB;SACF;KACF;AAED,IAAA,IAAI,IAAI,CAAC,MAAM,GAAG,CAAC,EAAE;AACnB,QAAA,OAAO,IAAI,CAAC;KACb;SAAM;AACL,QAAA,OAAO,SAAS,CAAC;KAClB;AACH,CAAC;AAED,MAAM,gBAAgB,GAAG,CAAC,YAAY,CAAC,UAAU,EAAE,YAAY,CAAC,MAAM,CAAC,CAAC;AAExE,SAAS,kBAAkB,CAAC,SAAmC,EAAA;AAC7D,IAAA,QACE,CAAC,CAAC,SAAS,CAAC,YAAY;AACxB,QAAA,gBAAgB,CAAC,IAAI,CAAC,MAAM,IAAI,MAAM,KAAK,SAAS,CAAC,YAAY,CAAC,EAClE;AACJ,CAAC;AAEK,SAAU,uBAAuB,CACrC,QAAiC,EAAA;IAEjC,IAAI,OAAO,GAAG,EAAE,CAAC;AACjB,IAAA,IACE,CAAC,CAAC,QAAQ,CAAC,UAAU,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,KAAK,CAAC;QACzD,QAAQ,CAAC,cAAc,EACvB;QACA,OAAO,IAAI,sBAAsB,CAAC;AAClC,QAAA,IAAI,QAAQ,CAAC,cAAc,EAAE,WAAW,EAAE;YACxC,OAAO,IAAI,WAAW,QAAQ,CAAC,cAAc,CAAC,WAAW,EAAE,CAAC;SAC7D;AACD,QAAA,IAAI,QAAQ,CAAC,cAAc,EAAE,kBAAkB,EAAE;YAC/C,OAAO,IAAI,KAAK,QAAQ,CAAC,cAAc,CAAC,kBAAkB,EAAE,CAAC;SAC9D;KACF;SAAM,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,EAAE;QACnC,MAAM,cAAc,GAAG,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC;AAC9C,QAAA,IAAI,kBAAkB,CAAC,cAAc,CAAC,EAAE;AACtC,YAAA,OAAO,IAAI,CAAgC,6BAAA,EAAA,cAAc,CAAC,YAAY,EAAE,CAAC;AACzE,YAAA,IAAI,cAAc,CAAC,aAAa,EAAE;AAChC,gBAAA,OAAO,IAAI,CAAK,EAAA,EAAA,cAAc,CAAC,aAAa,EAAE,CAAC;aAChD;SACF;KACF;AACD,IAAA,OAAO,OAAO,CAAC;AACjB,CAAC;AAED;;;;;;AAMG;AACI,eAAe,qBAAqB,CAEzC,QAAkB,EAAA;AAClB,IAAA,MAAM,YAAY,GAA2B,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;IAEnE,MAAM,MAAM,GAAQ,EAAE,CAAC;IACvB,IAAI,cAAc,GAAuB,SAAS,CAAC;;AAGnD,IAAA,IAAI,CAAC,YAAY,CAAC,WAAW,IAAI,YAAY,CAAC,WAAW,EAAE,MAAM,KAAK,CAAC,EAAE;QACvE,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,wKAAwK,CACzK,CAAC;KACH;AAED,IAAA,KAAK,MAAM,UAAU,IAAI,YAAY,CAAC,WAAW,EAAE;AACjD,QAAA,IAAI,UAAU,CAAC,iBAAiB,EAAE;AAChC,YAAA,cAAc,GAAG,UAAU,CAAC,iBAAiB,CAAC;SAC/C;aAAM,IAAI,UAAU,CAAC,QAAQ,IAAI,UAAU,CAAC,kBAAkB,EAAE;YAC/D,MAAM,CAAC,IAAI,CAAC;gBACV,QAAQ,EAAE,UAAU,CAAC,QAAQ;gBAC7B,kBAAkB,EAAE,UAAU,CAAC,kBAAkB;AAC7C,aAAA,CAAC,CAAC;SACT;aAAM,IAAI,UAAU,CAAC,QAAQ,IAAI,UAAU,CAAC,MAAM,EAAE;YACnD,MAAM,CAAC,IAAI,CAAC;gBACV,QAAQ,EAAE,UAAU,CAAC,QAAQ;gBAC7B,MAAM,EAAE,UAAU,CAAC,MAAM;AACrB,aAAA,CAAC,CAAC;SACT;AAAM,aAAA,IAAI,UAAU,CAAC,gBAAgB,EAAE,CAEvC;aAAM;AACL,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAA,wDAAA,EAA2D,IAAI,CAAC,SAAS,CACvE,UAAU,CACX,CAAA,CAAA,CAAG,CACL,CAAC;SACH;KACF;AAED,IAAA,OAAO,EAAE,MAAM,EAAE,cAAc,EAAE,CAAC;AACpC;;ACzTA;;;;;;;;;;;;;;;AAeG;AAsBH;;;;;;;;;;AAUG;AAEH;;;;;;;;;AASG;AACG,SAAU,yBAAyB,CACvC,sBAA8C,EAAA;AAE9C,IAAA,sBAAsB,CAAC,cAAc,EAAE,OAAO,CAAC,aAAa,IAAG;AAC7D,QAAA,IAAI,aAAa,CAAC,MAAM,EAAE;YACxB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,qGAAqG,CACtG,CAAC;SACH;AACH,KAAC,CAAC,CAAC;AAEH,IAAA,IAAI,sBAAsB,CAAC,gBAAgB,EAAE,IAAI,EAAE;AACjD,QAAA,MAAM,WAAW,GAAG,IAAI,CAAC,KAAK,CAC5B,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,CAC7C,CAAC;QAEF,IAAI,WAAW,KAAK,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,EAAE;AAChE,YAAA,MAAM,CAAC,IAAI,CACT,gIAAgI,CACjI,CAAC;AACF,YAAA,sBAAsB,CAAC,gBAAgB,CAAC,IAAI,GAAG,WAAW,CAAC;SAC5D;KACF;AAED,IAAA,OAAO,sBAAsB,CAAC;AAChC,CAAC;AAED;;;;;;;;AAQG;AACG,SAAU,0BAA0B,CACxC,gBAAiD,EAAA;AAEjD,IAAA,MAAM,uBAAuB,GAAG;QAC9B,UAAU,EAAE,gBAAgB,CAAC,UAAU;AACrC,cAAE,4BAA4B,CAAC,gBAAgB,CAAC,UAAU,CAAC;AAC3D,cAAE,SAAS;QACb,MAAM,EAAE,gBAAgB,CAAC,cAAc;AACrC,cAAE,iBAAiB,CAAC,gBAAgB,CAAC,cAAc,CAAC;AACpD,cAAE,SAAS;QACb,aAAa,EAAE,gBAAgB,CAAC,aAAa;KAC9C,CAAC;AAEF,IAAA,OAAO,uBAAuB,CAAC;AACjC,CAAC;AAED;;;;;;;;AAQG;AACa,SAAA,qBAAqB,CACnC,kBAAsC,EACtC,KAAa,EAAA;AAEb,IAAA,MAAM,wBAAwB,GAA+B;AAC3D,QAAA,sBAAsB,EAAE;YACtB,KAAK;AACL,YAAA,GAAG,kBAAkB;AACtB,SAAA;KACF,CAAC;AAEF,IAAA,OAAO,wBAAwB,CAAC;AAClC,CAAC;AAED;;;;;;;;;;AAUG;AACG,SAAU,4BAA4B,CAC1C,UAA8C,EAAA;IAE9C,MAAM,gBAAgB,GAA+B,EAAE,CAAC;AACxD,IAAA,IAAI,mBAAmC,CAAC;IACxC,IAAI,gBAAgB,EAAE;AACpB,QAAA,UAAU,CAAC,OAAO,CAAC,SAAS,IAAG;;AAE7B,YAAA,IAAI,gBAA8C,CAAC;AACnD,YAAA,IAAI,SAAS,CAAC,gBAAgB,EAAE;AAC9B,gBAAA,gBAAgB,GAAG;AACjB,oBAAA,SAAS,EAAE,SAAS,CAAC,gBAAgB,CAAC,eAAe;iBACtD,CAAC;aACH;;AAGD,YAAA,IAAI,SAAS,CAAC,aAAa,EAAE;gBAC3B,mBAAmB,GAAG,SAAS,CAAC,aAAa,CAAC,GAAG,CAAC,YAAY,IAAG;oBAC/D,OAAO;AACL,wBAAA,GAAG,YAAY;AACf,wBAAA,QAAQ,EACN,YAAY,CAAC,QAAQ,IAAI,YAAY,CAAC,yBAAyB;AACjE,wBAAA,gBAAgB,EAAE,YAAY,CAAC,gBAAgB,IAAI,CAAC;AACpD,wBAAA,aAAa,EAAE,YAAY,CAAC,aAAa,IAAI,CAAC;qBAC/C,CAAC;AACJ,iBAAC,CAAC,CAAC;aACJ;;;;AAKD,YAAA,IACE,SAAS,CAAC,OAAO,EAAE,KAAK,EAAE,IAAI,CAC5B,IAAI,IAAK,IAAuB,EAAE,aAAa,CAChD,EACD;gBACA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,+FAA+F,CAChG,CAAC;aACH;AAED,YAAA,MAAM,eAAe,GAAG;gBACtB,KAAK,EAAE,SAAS,CAAC,KAAK;gBACtB,OAAO,EAAE,SAAS,CAAC,OAAO;gBAC1B,YAAY,EAAE,SAAS,CAAC,YAAY;gBACpC,aAAa,EAAE,SAAS,CAAC,aAAa;AACtC,gBAAA,aAAa,EAAE,mBAAmB;gBAClC,gBAAgB;gBAChB,iBAAiB,EAAE,SAAS,CAAC,iBAAiB;gBAC9C,kBAAkB,EAAE,SAAS,CAAC,kBAAkB;aACjD,CAAC;AACF,YAAA,gBAAgB,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;AACzC,SAAC,CAAC,CAAC;KACJ;AAED,IAAA,OAAO,gBAAgB,CAAC;AAC1B,CAAC;AAEK,SAAU,iBAAiB,CAC/B,cAA8B,EAAA;;IAG9B,MAAM,mBAAmB,GAAmB,EAAE,CAAC;AAC/C,IAAA,cAAc,CAAC,aAAa,CAAC,OAAO,CAAC,YAAY,IAAG;QAClD,mBAAmB,CAAC,IAAI,CAAC;YACvB,QAAQ,EAAE,YAAY,CAAC,QAAQ;YAC/B,WAAW,EAAE,YAAY,CAAC,WAAW;AACrC,YAAA,QAAQ,EAAE,YAAY,CAAC,QAAQ,IAAI,YAAY,CAAC,yBAAyB;AACzE,YAAA,gBAAgB,EAAE,YAAY,CAAC,gBAAgB,IAAI,CAAC;AACpD,YAAA,aAAa,EAAE,YAAY,CAAC,aAAa,IAAI,CAAC;YAC9C,OAAO,EAAE,YAAY,CAAC,OAAO;AAC9B,SAAA,CAAC,CAAC;AACL,KAAC,CAAC,CAAC;AAEH,IAAA,MAAM,oBAAoB,GAAmB;QAC3C,WAAW,EAAE,cAAc,CAAC,WAAW;AACvC,QAAA,aAAa,EAAE,mBAAmB;QAClC,kBAAkB,EAAE,cAAc,CAAC,kBAAkB;KACtD,CAAC;AACF,IAAA,OAAO,oBAAoB,CAAC;AAC9B;;ACnOA;;;;;;;;;;;;;;;AAeG;AAqBH,MAAM,cAAc,GAAG,oCAAoC,CAAC;AAE5D;;;;;;;AAOG;SACa,aAAa,CAC3B,QAAkB,EAClB,WAAwB,EACxB,eAAiC,EAAA;IAEjC,MAAM,WAAW,GAAG,QAAQ,CAAC,IAAK,CAAC,WAAW,CAC5C,IAAI,iBAAiB,CAAC,MAAM,EAAE,EAAE,KAAK,EAAE,IAAI,EAAE,CAAC,CAC/C,CAAC;AACF,IAAA,MAAM,cAAc,GAClB,iBAAiB,CAA0B,WAAW,CAAC,CAAC;IAC1D,MAAM,CAAC,OAAO,EAAE,OAAO,CAAC,GAAG,cAAc,CAAC,GAAG,EAAE,CAAC;IAChD,OAAO;QACL,MAAM,EAAE,wBAAwB,CAAC,OAAO,EAAE,WAAW,EAAE,eAAe,CAAC;QACvE,QAAQ,EAAE,kBAAkB,CAAC,OAAO,EAAE,WAAW,EAAE,eAAe,CAAC;KACpE,CAAC;AACJ,CAAC;AAED,eAAe,kBAAkB,CAC/B,MAA+C,EAC/C,WAAwB,EACxB,eAAiC,EAAA;IAEjC,MAAM,YAAY,GAA8B,EAAE,CAAC;AACnD,IAAA,MAAM,MAAM,GAAG,MAAM,CAAC,SAAS,EAAE,CAAC;IAClC,OAAO,IAAI,EAAE;QACX,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QAC5C,IAAI,IAAI,EAAE;AACR,YAAA,IAAI,uBAAuB,GAAG,kBAAkB,CAAC,YAAY,CAAC,CAAC;YAC/D,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,gBAAA,uBAAuB,GAAGA,0BAAyC,CACjE,uBAA0D,CAC3D,CAAC;aACH;AACD,YAAA,OAAO,6BAA6B,CAClC,uBAAuB,EACvB,eAAe,CAChB,CAAC;SACH;AAED,QAAA,YAAY,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;KAC1B;AACH,CAAC;AAED,gBAAgB,wBAAwB,CACtC,MAA+C,EAC/C,WAAwB,EACxB,eAAiC,EAAA;AAEjC,IAAA,MAAM,MAAM,GAAG,MAAM,CAAC,SAAS,EAAE,CAAC;IAClC,OAAO,IAAI,EAAE;QACX,MAAM,EAAE,KAAK,EAAE,IAAI,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QAC5C,IAAI,IAAI,EAAE;YACR,MAAM;SACP;AAED,QAAA,IAAI,gBAAiD,CAAC;QACtD,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,YAAA,gBAAgB,GAAG,6BAA6B,CAC9CA,0BAAyC,CACvC,KAAwC,CACzC,EACD,eAAe,CAChB,CAAC;SACH;aAAM;AACL,YAAA,gBAAgB,GAAG,6BAA6B,CAAC,KAAK,EAAE,eAAe,CAAC,CAAC;SAC1E;QAED,MAAM,cAAc,GAAG,gBAAgB,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC;;AAExD,QAAA,IACE,CAAC,cAAc,EAAE,OAAO,EAAE,KAAK;YAC/B,CAAC,cAAc,EAAE,YAAY;YAC7B,CAAC,cAAc,EAAE,gBAAgB;AACjC,YAAA,CAAC,cAAc,EAAE,kBAAkB,EACnC;YACA,SAAS;SACV;AAED,QAAA,MAAM,gBAAgB,CAAC;KACxB;AACH,CAAC;AAED;;;;AAIG;AACG,SAAU,iBAAiB,CAC/B,WAAmC,EAAA;AAEnC,IAAA,MAAM,MAAM,GAAG,WAAW,CAAC,SAAS,EAAE,CAAC;AACvC,IAAA,MAAM,MAAM,GAAG,IAAI,cAAc,CAAI;AACnC,QAAA,KAAK,CAAC,UAAU,EAAA;YACd,IAAI,WAAW,GAAG,EAAE,CAAC;YACrB,OAAO,IAAI,EAAE,CAAC;AACd,YAAA,SAAS,IAAI,GAAA;AACX,gBAAA,OAAO,MAAM,CAAC,IAAI,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,KAAK,EAAE,IAAI,EAAE,KAAI;oBAC5C,IAAI,IAAI,EAAE;AACR,wBAAA,IAAI,WAAW,CAAC,IAAI,EAAE,EAAE;AACtB,4BAAA,UAAU,CAAC,KAAK,CACd,IAAI,OAAO,CAAC,WAAW,CAAC,YAAY,EAAE,wBAAwB,CAAC,CAChE,CAAC;4BACF,OAAO;yBACR;wBACD,UAAU,CAAC,KAAK,EAAE,CAAC;wBACnB,OAAO;qBACR;oBAED,WAAW,IAAI,KAAK,CAAC;oBACrB,IAAI,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC;AAC9C,oBAAA,IAAI,cAAiB,CAAC;oBACtB,OAAO,KAAK,EAAE;AACZ,wBAAA,IAAI;4BACF,cAAc,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;yBACvC;wBAAC,OAAO,CAAC,EAAE;AACV,4BAAA,UAAU,CAAC,KAAK,CACd,IAAI,OAAO,CACT,WAAW,CAAC,YAAY,EACxB,CAAA,8BAAA,EAAiC,KAAK,CAAC,CAAC,CAAC,CAAE,CAAA,CAC5C,CACF,CAAC;4BACF,OAAO;yBACR;AACD,wBAAA,UAAU,CAAC,OAAO,CAAC,cAAc,CAAC,CAAC;AACnC,wBAAA,WAAW,GAAG,WAAW,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;AACrD,wBAAA,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC;qBAC3C;oBACD,OAAO,IAAI,EAAE,CAAC;AAChB,iBAAC,CAAC,CAAC;aACJ;SACF;AACF,KAAA,CAAC,CAAC;AACH,IAAA,OAAO,MAAM,CAAC;AAChB,CAAC;AAED;;;AAGG;AACG,SAAU,kBAAkB,CAChC,SAAoC,EAAA;IAEpC,MAAM,YAAY,GAAG,SAAS,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;AACrD,IAAA,MAAM,kBAAkB,GAA4B;QAClD,cAAc,EAAE,YAAY,EAAE,cAAc;KAC7C,CAAC;AACF,IAAA,KAAK,MAAM,QAAQ,IAAI,SAAS,EAAE;AAChC,QAAA,IAAI,QAAQ,CAAC,UAAU,EAAE;AACvB,YAAA,KAAK,MAAM,SAAS,IAAI,QAAQ,CAAC,UAAU,EAAE;;;AAG3C,gBAAA,MAAM,CAAC,GAAG,SAAS,CAAC,KAAK,IAAI,CAAC,CAAC;AAC/B,gBAAA,IAAI,CAAC,kBAAkB,CAAC,UAAU,EAAE;AAClC,oBAAA,kBAAkB,CAAC,UAAU,GAAG,EAAE,CAAC;iBACpC;gBACD,IAAI,CAAC,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE;AACrC,oBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,GAAG;wBACjC,KAAK,EAAE,SAAS,CAAC,KAAK;qBACK,CAAC;iBAC/B;;AAED,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,gBAAgB;oBAC/C,SAAS,CAAC,gBAAgB,CAAC;gBAC7B,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,YAAY,GAAG,SAAS,CAAC,YAAY,CAAC;AACvE,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,aAAa;oBAC5C,SAAS,CAAC,aAAa,CAAC;AAC1B,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,aAAa;oBAC5C,SAAS,CAAC,aAAa,CAAC;AAC1B,gBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,iBAAiB;oBAChD,SAAS,CAAC,iBAAiB,CAAC;;;;;AAM9B,gBAAA,MAAM,kBAAkB,GAAG,SAAS,CAAC,kBAA6B,CAAC;gBACnE,IACE,OAAO,kBAAkB,KAAK,QAAQ;AACtC,oBAAA,kBAAkB,KAAK,IAAI;oBAC3B,MAAM,CAAC,IAAI,CAAC,kBAAkB,CAAC,CAAC,MAAM,GAAG,CAAC,EAC1C;AACA,oBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,kBAAkB;AACjD,wBAAA,kBAAwC,CAAC;iBAC5C;AAED;;;AAGG;AACH,gBAAA,IAAI,SAAS,CAAC,OAAO,EAAE;;AAErB,oBAAA,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,KAAK,EAAE;wBAC5B,SAAS;qBACV;oBACD,IAAI,CAAC,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE;AAC7C,wBAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,GAAG;AACzC,4BAAA,IAAI,EAAE,SAAS,CAAC,OAAO,CAAC,IAAI,IAAI,MAAM;AACtC,4BAAA,KAAK,EAAE,EAAE;yBACV,CAAC;qBACH;oBACD,KAAK,MAAM,IAAI,IAAI,SAAS,CAAC,OAAO,CAAC,KAAK,EAAE;AAC1C,wBAAA,MAAM,OAAO,GAAS,EAAE,GAAG,IAAI,EAAE,CAAC;;;;AAIlC,wBAAA,IAAI,IAAI,CAAC,IAAI,KAAK,EAAE,EAAE;4BACpB,SAAS;yBACV;wBACD,IAAI,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;AACnC,4BAAA,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CACjD,OAAe,CAChB,CAAC;yBACH;qBACF;iBACF;aACF;SACF;KACF;AACD,IAAA,OAAO,kBAAkB,CAAC;AAC5B;;ACzQA;;;;;;;;;;;;;;;AAeG;AAYH,MAAM,qBAAqB,GAAkB;;AAE3C,IAAA,WAAW,CAAC,WAAW;;AAEvB,IAAA,WAAW,CAAC,KAAK;;AAEjB,IAAA,WAAW,CAAC,eAAe;CAC5B,CAAC;AAOF;;;;;;;;;AASG;AACI,eAAe,iBAAiB,CACrC,OAA+B,EAC/B,aAAwC,EACxC,YAAqC,EACrC,WAAoC,EAAA;IAEpC,IAAI,CAAC,aAAa,EAAE;QAClB,OAAO;YACL,QAAQ,EAAE,MAAM,WAAW,EAAE;YAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;SAC1C,CAAC;KACH;AACD,IAAA,QAAS,aAAmC,CAAC,IAAI;QAC/C,KAAK,aAAa,CAAC,cAAc;YAC/B,IAAI,MAAM,aAAa,CAAC,WAAW,CAAC,OAAO,CAAC,EAAE;gBAC5C,OAAO;oBACL,QAAQ,EAAE,MAAM,YAAY,EAAE;oBAC9B,eAAe,EAAE,eAAe,CAAC,SAAS;iBAC3C,CAAC;aACH;YACD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,4EAA4E,CAC7E,CAAC;QACJ,KAAK,aAAa,CAAC,aAAa;YAC9B,OAAO;gBACL,QAAQ,EAAE,MAAM,WAAW,EAAE;gBAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;aAC1C,CAAC;QACJ,KAAK,aAAa,CAAC,eAAe;AAChC,YAAA,IAAI;gBACF,OAAO;oBACL,QAAQ,EAAE,MAAM,WAAW,EAAE;oBAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;iBAC1C,CAAC;aACH;YAAC,OAAO,CAAC,EAAE;AACV,gBAAA,IAAI,CAAC,YAAY,OAAO,IAAI,qBAAqB,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE;oBAClE,OAAO;wBACL,QAAQ,EAAE,MAAM,YAAY,EAAE;wBAC9B,eAAe,EAAE,eAAe,CAAC,SAAS;qBAC3C,CAAC;iBACH;AACD,gBAAA,MAAM,CAAC,CAAC;aACT;QACH,KAAK,aAAa,CAAC,gBAAgB;YACjC,IAAI,MAAM,aAAa,CAAC,WAAW,CAAC,OAAO,CAAC,EAAE;gBAC5C,OAAO;oBACL,QAAQ,EAAE,MAAM,YAAY,EAAE;oBAC9B,eAAe,EAAE,eAAe,CAAC,SAAS;iBAC3C,CAAC;aACH;YACD,OAAO;gBACL,QAAQ,EAAE,MAAM,WAAW,EAAE;gBAC7B,eAAe,EAAE,eAAe,CAAC,QAAQ;aAC1C,CAAC;AACJ,QAAA;AACE,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,6BAAA,EACG,aAAmC,CAAC,IACvC,CAAA,CAAE,CACH,CAAC;KACL;AACH;;AClHA;;;;;;;;;;;;;;;AAeG;AAkBH,eAAe,4BAA4B,CACzC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,cAA+B,EAAA;IAE/B,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,QAAA,MAAM,GAAGC,yBAAwC,CAAC,MAAM,CAAC,CAAC;KAC3D;IACD,OAAO,WAAW,CAChB,KAAK,EACL,IAAI,CAAC,uBAAuB,EAC5B,WAAW;AACX,iBAAa,IAAI,EACjB,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,EACtB,cAAc,CACf,CAAC;AACJ,CAAC;AAEM,eAAe,qBAAqB,CACzC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,aAA6B,EAC7B,cAA+B,EAAA;AAE/B,IAAA,MAAM,UAAU,GAAG,MAAM,iBAAiB,CACxC,MAAM,EACN,aAAa,EACb,MAAM,aAAc,CAAC,qBAAqB,CAAC,MAAM,CAAC,EAClD,MACE,4BAA4B,CAAC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,cAAc,CAAC,CAC3E,CAAC;IACF,OAAO,aAAa,CAAC,UAAU,CAAC,QAAQ,EAAE,WAAW,CAAC,CAAC;AACzD,CAAC;AAED,eAAe,sBAAsB,CACnC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,cAA+B,EAAA;IAE/B,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,QAAA,MAAM,GAAGA,yBAAwC,CAAC,MAAM,CAAC,CAAC;KAC3D;IACD,OAAO,WAAW,CAChB,KAAK,EACL,IAAI,CAAC,gBAAgB,EACrB,WAAW;AACX,iBAAa,KAAK,EAClB,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,EACtB,cAAc,CACf,CAAC;AACJ,CAAC;AAEM,eAAe,eAAe,CACnC,WAAwB,EACxB,KAAa,EACb,MAA8B,EAC9B,aAA6B,EAC7B,cAA+B,EAAA;AAE/B,IAAA,MAAM,UAAU,GAAG,MAAM,iBAAiB,CACxC,MAAM,EACN,aAAa,EACb,MAAM,aAAc,CAAC,eAAe,CAAC,MAAM,CAAC,EAC5C,MAAM,sBAAsB,CAAC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,cAAc,CAAC,CACzE,CAAC;IACF,MAAM,uBAAuB,GAAG,MAAM,8BAA8B,CAClE,UAAU,CAAC,QAAQ,EACnB,WAAW,CACZ,CAAC;IACF,MAAM,gBAAgB,GAAG,6BAA6B,CACpD,uBAAuB,EACvB,UAAU,CAAC,eAAe,CAC3B,CAAC;IACF,OAAO;AACL,QAAA,QAAQ,EAAE,gBAAgB;KAC3B,CAAC;AACJ,CAAC;AAED,eAAe,8BAA8B,CAC3C,QAAkB,EAClB,WAAwB,EAAA;AAExB,IAAA,MAAM,YAAY,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;IAC3C,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AAC7D,QAAA,OAAOD,0BAAyC,CAAC,YAAY,CAAC,CAAC;KAChE;SAAM;AACL,QAAA,OAAO,YAAY,CAAC;KACrB;AACH;;AC5HA;;;;;;;;;;;;;;;AAeG;AAMG,SAAU,uBAAuB,CACrC,KAA+B,EAAA;;AAG/B,IAAA,IAAI,KAAK,IAAI,IAAI,EAAE;AACjB,QAAA,OAAO,SAAS,CAAC;KAClB;AAAM,SAAA,IAAI,OAAO,KAAK,KAAK,QAAQ,EAAE;AACpC,QAAA,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,CAAC,EAAa,CAAC;KAChE;AAAM,SAAA,IAAK,KAAc,CAAC,IAAI,EAAE;QAC/B,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC,KAAa,CAAC,EAAE,CAAC;KACnD;AAAM,SAAA,IAAK,KAAiB,CAAC,KAAK,EAAE;AACnC,QAAA,IAAI,CAAE,KAAiB,CAAC,IAAI,EAAE;YAC5B,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAG,KAAiB,CAAC,KAAK,EAAE,CAAC;SAC5D;aAAM;AACL,YAAA,OAAO,KAAgB,CAAC;SACzB;KACF;AACH,CAAC;AAEK,SAAU,gBAAgB,CAC9B,OAAsC,EAAA;IAEtC,IAAI,QAAQ,GAAW,EAAE,CAAC;AAC1B,IAAA,IAAI,OAAO,OAAO,KAAK,QAAQ,EAAE;QAC/B,QAAQ,GAAG,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC;KAChC;SAAM;AACL,QAAA,KAAK,MAAM,YAAY,IAAI,OAAO,EAAE;AAClC,YAAA,IAAI,OAAO,YAAY,KAAK,QAAQ,EAAE;gBACpC,QAAQ,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;aACvC;iBAAM;AACL,gBAAA,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;aAC7B;SACF;KACF;AACD,IAAA,OAAO,8CAA8C,CAAC,QAAQ,CAAC,CAAC;AAClE,CAAC;AAED;;;;;;;AAOG;AACH,SAAS,8CAA8C,CACrD,KAAa,EAAA;IAEb,MAAM,WAAW,GAAY,EAAE,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,EAAE,EAAE,CAAC;IACzD,MAAM,eAAe,GAAY,EAAE,IAAI,EAAE,UAAU,EAAE,KAAK,EAAE,EAAE,EAAE,CAAC;IACjE,IAAI,cAAc,GAAG,KAAK,CAAC;IAC3B,IAAI,kBAAkB,GAAG,KAAK,CAAC;AAC/B,IAAA,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE;AACxB,QAAA,IAAI,kBAAkB,IAAI,IAAI,EAAE;AAC9B,YAAA,eAAe,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YACjC,kBAAkB,GAAG,IAAI,CAAC;SAC3B;aAAM;AACL,YAAA,WAAW,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YAC7B,cAAc,GAAG,IAAI,CAAC;SACvB;KACF;AAED,IAAA,IAAI,cAAc,IAAI,kBAAkB,EAAE;QACxC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,4HAA4H,CAC7H,CAAC;KACH;AAED,IAAA,IAAI,CAAC,cAAc,IAAI,CAAC,kBAAkB,EAAE;QAC1C,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,kDAAkD,CACnD,CAAC;KACH;IAED,IAAI,cAAc,EAAE;AAClB,QAAA,OAAO,WAAW,CAAC;KACpB;AAED,IAAA,OAAO,eAAe,CAAC;AACzB,CAAC;AAEK,SAAU,0BAA0B,CACxC,MAA8D,EAAA;AAE9D,IAAA,IAAI,gBAAwC,CAAC;AAC7C,IAAA,IAAK,MAAiC,CAAC,QAAQ,EAAE;QAC/C,gBAAgB,GAAG,MAAgC,CAAC;KACrD;SAAM;;AAEL,QAAA,MAAM,OAAO,GAAG,gBAAgB,CAAC,MAAuC,CAAC,CAAC;QAC1E,gBAAgB,GAAG,EAAE,QAAQ,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC;KAC5C;AACD,IAAA,IAAK,MAAiC,CAAC,iBAAiB,EAAE;QACxD,gBAAgB,CAAC,iBAAiB,GAAG,uBAAuB,CACzD,MAAiC,CAAC,iBAAiB,CACrD,CAAC;KACH;AACD,IAAA,OAAO,gBAAgB,CAAC;AAC1B,CAAC;AAED;;;;;AAKG;AACG,SAAU,wBAAwB,CACtC,MAAc,EACd,EACE,MAAM,EACN,WAAW,EACX,YAAY,EACZ,cAAc,GAAG,CAAC,EAClB,cAAc,EACd,WAAW,EACX,iBAAiB,EACjB,iBAAiB,EACM,EAAA;;AAGzB,IAAA,MAAM,IAAI,GAAuB;AAC/B,QAAA,SAAS,EAAE;AACT,YAAA;gBACE,MAAM;AACP,aAAA;AACF,SAAA;AACD,QAAA,UAAU,EAAE;AACV,YAAA,UAAU,EAAE,MAAM;YAClB,cAAc;AACd,YAAA,WAAW,EAAE,cAAc;YAC3B,WAAW;AACX,YAAA,aAAa,EAAE,WAAW;YAC1B,YAAY;YACZ,iBAAiB;AACjB,YAAA,gBAAgB,EAAE,iBAAiB;AACnC,YAAA,gBAAgB,EAAE,IAAI;AACtB,YAAA,uBAAuB,EAAE,IAAI;AAC9B,SAAA;KACF,CAAC;AACF,IAAA,OAAO,IAAI,CAAC;AACd;;ACnKA;;;;;;;;;;;;;;;AAeG;AAKH;AAEA,MAAM,iBAAiB,GAAsB;IAC3C,MAAM;IACN,YAAY;IACZ,cAAc;IACd,kBAAkB;IAClB,SAAS;IACT,kBAAkB;CACnB,CAAC;AAEF,MAAM,oBAAoB,GAAyC;AACjE,IAAA,IAAI,EAAE,CAAC,MAAM,EAAE,YAAY,CAAC;IAC5B,QAAQ,EAAE,CAAC,kBAAkB,CAAC;IAC9B,KAAK,EAAE,CAAC,MAAM,EAAE,cAAc,EAAE,SAAS,EAAE,kBAAkB,CAAC;;IAE9D,MAAM,EAAE,CAAC,MAAM,CAAC;CACjB,CAAC;AAEF,MAAM,4BAA4B,GAA8B;IAC9D,IAAI,EAAE,CAAC,OAAO,CAAC;IACf,QAAQ,EAAE,CAAC,OAAO,CAAC;AACnB,IAAA,KAAK,EAAE,CAAC,MAAM,EAAE,UAAU,CAAC;;AAE3B,IAAA,MAAM,EAAE,EAAE;CACX,CAAC;AAEI,SAAU,mBAAmB,CAAC,OAAkB,EAAA;IACpD,IAAI,WAAW,GAAmB,IAAI,CAAC;AACvC,IAAA,KAAK,MAAM,WAAW,IAAI,OAAO,EAAE;AACjC,QAAA,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,WAAW,CAAC;AACpC,QAAA,IAAI,CAAC,WAAW,IAAI,IAAI,KAAK,MAAM,EAAE;YACnC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAAiD,8CAAA,EAAA,IAAI,CAAE,CAAA,CACxD,CAAC;SACH;QACD,IAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,IAAI,CAAC,EAAE;AAClC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA4C,yCAAA,EAAA,IAAI,CAAyB,sBAAA,EAAA,IAAI,CAAC,SAAS,CACrF,cAAc,CACf,CAAA,CAAE,CACJ,CAAC;SACH;QAED,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE;YACzB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA6D,2DAAA,CAAA,CAC9D,CAAC;SACH;AAED,QAAA,IAAI,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE;YACtB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAA4C,0CAAA,CAAA,CAC7C,CAAC;SACH;AAED,QAAA,MAAM,WAAW,GAA+B;AAC9C,YAAA,IAAI,EAAE,CAAC;AACP,YAAA,UAAU,EAAE,CAAC;AACb,YAAA,YAAY,EAAE,CAAC;AACf,YAAA,gBAAgB,EAAE,CAAC;AACnB,YAAA,OAAO,EAAE,CAAC;AACV,YAAA,gBAAgB,EAAE,CAAC;AACnB,YAAA,cAAc,EAAE,CAAC;AACjB,YAAA,mBAAmB,EAAE,CAAC;SACvB,CAAC;AAEF,QAAA,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE;AACxB,YAAA,KAAK,MAAM,GAAG,IAAI,iBAAiB,EAAE;AACnC,gBAAA,IAAI,GAAG,IAAI,IAAI,EAAE;AACf,oBAAA,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;iBACvB;aACF;SACF;AACD,QAAA,MAAM,UAAU,GAAG,oBAAoB,CAAC,IAAI,CAAC,CAAC;AAC9C,QAAA,KAAK,MAAM,GAAG,IAAI,iBAAiB,EAAE;AACnC,YAAA,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE;AACrD,gBAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAAA,mBAAA,EAAsB,IAAI,CAAA,iBAAA,EAAoB,GAAG,CAAA,MAAA,CAAQ,CAC1D,CAAC;aACH;SACF;QAED,IAAI,WAAW,EAAE;AACf,YAAA,MAAM,yBAAyB,GAAG,4BAA4B,CAAC,IAAI,CAAC,CAAC;YACrE,IAAI,CAAC,yBAAyB,CAAC,QAAQ,CAAC,WAAW,CAAC,IAAI,CAAC,EAAE;gBACzD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,eAAe,EAC3B,CAAsB,mBAAA,EAAA,IAAI,CACxB,gBAAA,EAAA,WAAW,CAAC,IACd,CAAA,yBAAA,EAA4B,IAAI,CAAC,SAAS,CACxC,4BAA4B,CAC7B,CAAE,CAAA,CACJ,CAAC;aACH;SACF;QACD,WAAW,GAAG,WAAW,CAAC;KAC3B;AACH;;AC3HA;;;;;;;;;;;;;;;AAeG;AAmBH;;AAEG;AACH,MAAM,YAAY,GAAG,cAAc,CAAC;AAEpC;;;;;AAKG;MACU,WAAW,CAAA;IAKtB,WACE,CAAA,WAAwB,EACjB,KAAa,EACZ,aAA6B,EAC9B,MAAwB,EACxB,cAA+B,EAAA;QAH/B,IAAK,CAAA,KAAA,GAAL,KAAK,CAAQ;QACZ,IAAa,CAAA,aAAA,GAAb,aAAa,CAAgB;QAC9B,IAAM,CAAA,MAAA,GAAN,MAAM,CAAkB;QACxB,IAAc,CAAA,cAAA,GAAd,cAAc,CAAiB;QARhC,IAAQ,CAAA,QAAA,GAAc,EAAE,CAAC;AACzB,QAAA,IAAA,CAAA,YAAY,GAAkB,OAAO,CAAC,OAAO,EAAE,CAAC;AAStD,QAAA,IAAI,CAAC,YAAY,GAAG,WAAW,CAAC;AAChC,QAAA,IAAI,MAAM,EAAE,OAAO,EAAE;AACnB,YAAA,mBAAmB,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;AACpC,YAAA,IAAI,CAAC,QAAQ,GAAG,MAAM,CAAC,OAAO,CAAC;SAChC;KACF;AAED;;;;AAIG;AACH,IAAA,MAAM,UAAU,GAAA;QACd,MAAM,IAAI,CAAC,YAAY,CAAC;QACxB,OAAO,IAAI,CAAC,QAAQ,CAAC;KACtB;AAED;;;AAGG;IACH,MAAM,WAAW,CACf,OAAsC,EAAA;QAEtC,MAAM,IAAI,CAAC,YAAY,CAAC;AACxB,QAAA,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;AAC7C,QAAA,MAAM,sBAAsB,GAA2B;AACrD,YAAA,cAAc,EAAE,IAAI,CAAC,MAAM,EAAE,cAAc;AAC3C,YAAA,gBAAgB,EAAE,IAAI,CAAC,MAAM,EAAE,gBAAgB;AAC/C,YAAA,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,KAAK;AACzB,YAAA,UAAU,EAAE,IAAI,CAAC,MAAM,EAAE,UAAU;AACnC,YAAA,iBAAiB,EAAE,IAAI,CAAC,MAAM,EAAE,iBAAiB;YACjD,QAAQ,EAAE,CAAC,GAAG,IAAI,CAAC,QAAQ,EAAE,UAAU,CAAC;SACzC,CAAC;QACF,IAAI,WAAW,GAAG,EAA2B,CAAC;;AAE9C,QAAA,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY;aAClC,IAAI,CAAC,MACJ,eAAe,CACb,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,sBAAsB,EACtB,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CACF;aACA,IAAI,CAAC,MAAM,IAAG;AACb,YAAA,IACE,MAAM,CAAC,QAAQ,CAAC,UAAU;gBAC1B,MAAM,CAAC,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EACrC;AACA,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;AAC/B,gBAAA,MAAM,eAAe,GAAY;AAC/B,oBAAA,KAAK,EAAE,MAAM,CAAC,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,KAAK,IAAI,EAAE;;AAE1D,oBAAA,IAAI,EAAE,MAAM,CAAC,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,IAAI,OAAO;iBAC9D,CAAC;AACF,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;aACrC;iBAAM;gBACL,MAAM,iBAAiB,GAAG,uBAAuB,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC;gBACnE,IAAI,iBAAiB,EAAE;AACrB,oBAAA,MAAM,CAAC,IAAI,CACT,mCAAmC,iBAAiB,CAAA,sCAAA,CAAwC,CAC7F,CAAC;iBACH;aACF;YACD,WAAW,GAAG,MAAM,CAAC;AACvB,SAAC,CAAC,CAAC;QACL,MAAM,IAAI,CAAC,YAAY,CAAC;AACxB,QAAA,OAAO,WAAW,CAAC;KACpB;AAED;;;;AAIG;IACH,MAAM,iBAAiB,CACrB,OAAsC,EAAA;QAEtC,MAAM,IAAI,CAAC,YAAY,CAAC;AACxB,QAAA,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;AAC7C,QAAA,MAAM,sBAAsB,GAA2B;AACrD,YAAA,cAAc,EAAE,IAAI,CAAC,MAAM,EAAE,cAAc;AAC3C,YAAA,gBAAgB,EAAE,IAAI,CAAC,MAAM,EAAE,gBAAgB;AAC/C,YAAA,KAAK,EAAE,IAAI,CAAC,MAAM,EAAE,KAAK;AACzB,YAAA,UAAU,EAAE,IAAI,CAAC,MAAM,EAAE,UAAU;AACnC,YAAA,iBAAiB,EAAE,IAAI,CAAC,MAAM,EAAE,iBAAiB;YACjD,QAAQ,EAAE,CAAC,GAAG,IAAI,CAAC,QAAQ,EAAE,UAAU,CAAC;SACzC,CAAC;QACF,MAAM,aAAa,GAAG,qBAAqB,CACzC,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,sBAAsB,EACtB,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CAAC;;AAGF,QAAA,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY;AAClC,aAAA,IAAI,CAAC,MAAM,aAAa,CAAC;;;aAGzB,KAAK,CAAC,QAAQ,IAAG;AAChB,YAAA,MAAM,IAAI,KAAK,CAAC,YAAY,CAAC,CAAC;AAChC,SAAC,CAAC;aACD,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,QAAQ,CAAC;aAC3C,IAAI,CAAC,QAAQ,IAAG;AACf,YAAA,IAAI,QAAQ,CAAC,UAAU,IAAI,QAAQ,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACzD,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;AAC/B,gBAAA,MAAM,eAAe,GAAG,EAAE,GAAG,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,CAAC;;AAE9D,gBAAA,IAAI,CAAC,eAAe,CAAC,IAAI,EAAE;AACzB,oBAAA,eAAe,CAAC,IAAI,GAAG,OAAO,CAAC;iBAChC;AACD,gBAAA,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;aACrC;iBAAM;AACL,gBAAA,MAAM,iBAAiB,GAAG,uBAAuB,CAAC,QAAQ,CAAC,CAAC;gBAC5D,IAAI,iBAAiB,EAAE;AACrB,oBAAA,MAAM,CAAC,IAAI,CACT,yCAAyC,iBAAiB,CAAA,sCAAA,CAAwC,CACnG,CAAC;iBACH;aACF;AACH,SAAC,CAAC;aACD,KAAK,CAAC,CAAC,IAAG;;;;AAIT,YAAA,IAAI,CAAC,CAAC,OAAO,KAAK,YAAY,EAAE;;;AAG9B,gBAAA,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;aACjB;AACH,SAAC,CAAC,CAAC;AACL,QAAA,OAAO,aAAa,CAAC;KACtB;AACF;;AClMD;;;;;;;;;;;;;;;AAeG;AAiBI,eAAe,kBAAkB,CACtC,WAAwB,EACxB,KAAa,EACb,MAA0B,EAC1B,cAA+B,EAAA;IAE/B,IAAI,IAAI,GAAW,EAAE,CAAC;IACtB,IAAI,WAAW,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;QAC7D,MAAM,YAAY,GAAGE,qBAAoC,CAAC,MAAM,EAAE,KAAK,CAAC,CAAC;AACzE,QAAA,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,YAAY,CAAC,CAAC;KACrC;SAAM;AACL,QAAA,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC;KAC/B;AACD,IAAA,MAAM,QAAQ,GAAG,MAAM,WAAW,CAChC,KAAK,EACL,IAAI,CAAC,YAAY,EACjB,WAAW,EACX,KAAK,EACL,IAAI,EACJ,cAAc,CACf,CAAC;AACF,IAAA,OAAO,QAAQ,CAAC,IAAI,EAAE,CAAC;AACzB,CAAC;AAEM,eAAe,WAAW,CAC/B,WAAwB,EACxB,KAAa,EACb,MAA0B,EAC1B,aAA6B,EAC7B,cAA+B,EAAA;IAE/B,IACG,aAAmC,EAAE,IAAI,KAAK,aAAa,CAAC,cAAc,EAC3E;QACA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,sDAAsD,CACvD,CAAC;KACH;IACD,OAAO,kBAAkB,CAAC,WAAW,EAAE,KAAK,EAAE,MAAM,EAAE,cAAc,CAAC,CAAC;AACxE;;ACxEA;;;;;;;;;;;;;;;AAeG;AAgCH;;;AAGG;AACG,MAAO,eAAgB,SAAQ,OAAO,CAAA;AAQ1C,IAAA,WAAA,CACE,EAAM,EACN,WAAwB,EACxB,cAA+B,EACvB,aAA6B,EAAA;AAErC,QAAA,KAAK,CAAC,EAAE,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;QAFrB,IAAa,CAAA,aAAA,GAAb,aAAa,CAAgB;QAGrC,IAAI,CAAC,gBAAgB,GAAG,WAAW,CAAC,gBAAgB,IAAI,EAAE,CAAC;QAC3D,IAAI,CAAC,cAAc,GAAG,WAAW,CAAC,cAAc,IAAI,EAAE,CAAC;AACvD,QAAA,IAAI,CAAC,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC;AAC/B,QAAA,IAAI,CAAC,UAAU,GAAG,WAAW,CAAC,UAAU,CAAC;QACzC,IAAI,CAAC,iBAAiB,GAAG,uBAAuB,CAC9C,WAAW,CAAC,iBAAiB,CAC9B,CAAC;AACF,QAAA,IAAI,CAAC,cAAc,GAAG,cAAc,IAAI,EAAE,CAAC;KAC5C;AAED;;;AAGG;IACH,MAAM,eAAe,CACnB,OAA+D,EAAA;AAE/D,QAAA,MAAM,eAAe,GAAG,0BAA0B,CAAC,OAAO,CAAC,CAAC;QAC5D,OAAO,eAAe,CACpB,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV;YACE,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,cAAc,EAAE,IAAI,CAAC,cAAc;YACnC,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU,EAAE,IAAI,CAAC,UAAU;YAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;AACzC,YAAA,GAAG,eAAe;SACnB,EACD,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CAAC;KACH;AAED;;;;;AAKG;IACH,MAAM,qBAAqB,CACzB,OAA+D,EAAA;AAE/D,QAAA,MAAM,eAAe,GAAG,0BAA0B,CAAC,OAAO,CAAC,CAAC;QAC5D,OAAO,qBAAqB,CAC1B,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV;YACE,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,cAAc,EAAE,IAAI,CAAC,cAAc;YACnC,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU,EAAE,IAAI,CAAC,UAAU;YAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;AACzC,YAAA,GAAG,eAAe;SACnB,EACD,IAAI,CAAC,aAAa,EAClB,IAAI,CAAC,cAAc,CACpB,CAAC;KACH;AAED;;;AAGG;AACH,IAAA,SAAS,CAAC,eAAiC,EAAA;AACzC,QAAA,OAAO,IAAI,WAAW,CACpB,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,IAAI,CAAC,aAAa,EAClB;YACE,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU,EAAE,IAAI,CAAC,UAAU;YAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;YACzC,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,cAAc,EAAE,IAAI,CAAC,cAAc;AACnC;;;;AAIG;AACH,YAAA,GAAG,eAAe;AACnB,SAAA,EACD,IAAI,CAAC,cAAc,CACpB,CAAC;KACH;AAED;;AAEG;IACH,MAAM,WAAW,CACf,OAA2D,EAAA;AAE3D,QAAA,MAAM,eAAe,GAAG,0BAA0B,CAAC,OAAO,CAAC,CAAC;AAC5D,QAAA,OAAO,WAAW,CAChB,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,KAAK,EACV,eAAe,EACf,IAAI,CAAC,aAAa,CACnB,CAAC;KACH;AACF;;ACtKD;;;;;;;;;;;;;;;AAeG;AAsBH;;;;;;AAMG;MACU,WAAW,CAAA;AActB;;AAEG;IACH,WACU,CAAA,gBAAkC,EAClC,cAAuC,EAAA;QADvC,IAAgB,CAAA,gBAAA,GAAhB,gBAAgB,CAAkB;QAClC,IAAc,CAAA,cAAA,GAAd,cAAc,CAAyB;AAlBjD;;;;AAIG;QACH,IAAQ,CAAA,QAAA,GAAG,KAAK,CAAC;AACjB;;;;AAIG;QACH,IAAc,CAAA,cAAA,GAAG,KAAK,CAAC;KAQnB;AAEJ;;;;;;;;AAQG;AACH,IAAA,MAAM,IAAI,CACR,OAAsC,EACtC,YAAY,GAAG,IAAI,EAAA;AAEnB,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;AAE7C,QAAA,MAAM,OAAO,GAAuB;AAClC,YAAA,aAAa,EAAE;gBACb,KAAK,EAAE,CAAC,UAAU,CAAC;gBACnB,YAAY;AACb,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;;;;;AAYG;IACH,MAAM,gBAAgB,CAAC,IAAY,EAAA;AACjC,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA6B;AACxC,YAAA,aAAa,EAAE;gBACb,IAAI;AACL,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;;;;;;;;;;AAiBG;IACH,MAAM,iBAAiB,CAAC,IAA2B,EAAA;AACjD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA6B;AACxC,YAAA,aAAa,EAAE;AACb,gBAAA,KAAK,EAAE,IAAI;AACZ,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;;;;;;;;;AAgBG;IACH,MAAM,iBAAiB,CAAC,IAA2B,EAAA;AACjD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA6B;AACxC,YAAA,aAAa,EAAE;AACb,gBAAA,KAAK,EAAE,IAAI;AACZ,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;AAOG;IACH,MAAM,qBAAqB,CACzB,iBAAqC,EAAA;AAErC,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAA4B;AACvC,YAAA,YAAY,EAAE;gBACZ,iBAAiB;AAClB,aAAA;SACF,CAAC;AACF,QAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;KACrD;AAED;;;;;;;;AAQG;IACH,OAAO,OAAO,GAAA;AAGZ,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,kFAAkF,CACnF,CAAC;SACH;QACD,WAAW,MAAM,OAAO,IAAI,IAAI,CAAC,cAAc,EAAE;AAC/C,YAAA,IAAI,OAAO,IAAI,OAAO,OAAO,KAAK,QAAQ,EAAE;AAC1C,gBAAA,IAAI,gBAAgB,CAAC,cAAc,IAAI,OAAO,EAAE;oBAC9C,MAAM;AACJ,wBAAA,IAAI,EAAE,eAAe;AACrB,wBAAA,GAAI,OAA8D;6BAC/D,aAAa;qBACI,CAAC;iBACxB;AAAM,qBAAA,IAAI,gBAAgB,CAAC,SAAS,IAAI,OAAO,EAAE;oBAChD,MAAM;AACJ,wBAAA,IAAI,EAAE,UAAU;AAChB,wBAAA,GAAI,OAA0D;6BAC3D,QAAQ;qBACU,CAAC;iBACzB;AAAM,qBAAA,IAAI,gBAAgB,CAAC,sBAAsB,IAAI,OAAO,EAAE;oBAC7D,MAAM;AACJ,wBAAA,IAAI,EAAE,sBAAsB;wBAC5B,GACE,OAMD,CAAC,oBAAoB;qBACW,CAAC;iBACrC;qBAAM;AACL,oBAAA,MAAM,CAAC,IAAI,CACT,CAAA,kDAAA,EAAqD,IAAI,CAAC,SAAS,CACjE,OAAO,CACR,CAAE,CAAA,CACJ,CAAC;iBACH;aACF;iBAAM;AACL,gBAAA,MAAM,CAAC,IAAI,CACT,CAAA,6CAAA,EAAgD,IAAI,CAAC,SAAS,CAC5D,OAAO,CACR,CAAE,CAAA,CACJ,CAAC;aACH;SACF;KACF;AAED;;;;;AAKG;AACH,IAAA,MAAM,KAAK,GAAA;AACT,QAAA,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;AAClB,YAAA,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC;YACrB,MAAM,IAAI,CAAC,gBAAgB,CAAC,KAAK,CAAC,IAAI,EAAE,wBAAwB,CAAC,CAAC;SACnE;KACF;AAED;;;;;;;;;AASG;IACH,MAAM,eAAe,CAAC,WAAoC,EAAA;AACxD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;;;AAID,QAAA,WAAW,CAAC,OAAO,CAAC,UAAU,IAAG;AAC/B,YAAA,MAAM,OAAO,GAA6B;AACxC,gBAAA,aAAa,EAAE,EAAE,WAAW,EAAE,CAAC,UAAU,CAAC,EAAE;aAC7C,CAAC;AACF,YAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;AACtD,SAAC,CAAC,CAAC;KACJ;AAED;;;;;;;;;AASG;IACH,MAAM,eAAe,CACnB,gBAAuD,EAAA;AAEvD,QAAA,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,sDAAsD,CACvD,CAAC;SACH;AAED,QAAA,MAAM,MAAM,GAAG,gBAAgB,CAAC,SAAS,EAAE,CAAC;QAC5C,OAAO,IAAI,EAAE;AACX,YAAA,IAAI;gBACF,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;gBAE5C,IAAI,IAAI,EAAE;oBACR,MAAM;iBACP;qBAAM,IAAI,CAAC,KAAK,EAAE;AACjB,oBAAA,MAAM,IAAI,KAAK,CAAC,kDAAkD,CAAC,CAAC;iBACrE;gBAED,MAAM,IAAI,CAAC,eAAe,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;aACrC;YAAC,OAAO,CAAC,EAAE;;AAEV,gBAAA,MAAM,OAAO,GACX,CAAC,YAAY,KAAK,GAAG,CAAC,CAAC,OAAO,GAAG,gCAAgC,CAAC;gBACpE,MAAM,IAAI,OAAO,CAAC,WAAW,CAAC,aAAa,EAAE,OAAO,CAAC,CAAC;aACvD;SACF;KACF;AACF;;ACzWD;;;;;;;;;;;;;;;AAeG;AAoBH;;;;;;;AAOG;AACG,MAAO,mBAAoB,SAAQ,OAAO,CAAA;AAM9C;;AAEG;IACH,WACE,CAAA,EAAM,EACN,WAA4B;AAC5B;;AAEG;IACK,iBAAmC,EAAA;AAE3C,QAAA,KAAK,CAAC,EAAE,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;QAFrB,IAAiB,CAAA,iBAAA,GAAjB,iBAAiB,CAAkB;QAG3C,IAAI,CAAC,gBAAgB,GAAG,WAAW,CAAC,gBAAgB,IAAI,EAAE,CAAC;AAC3D,QAAA,IAAI,CAAC,KAAK,GAAG,WAAW,CAAC,KAAK,CAAC;AAC/B,QAAA,IAAI,CAAC,UAAU,GAAG,WAAW,CAAC,UAAU,CAAC;QACzC,IAAI,CAAC,iBAAiB,GAAG,uBAAuB,CAC9C,WAAW,CAAC,iBAAiB,CAC9B,CAAC;KACH;AAED;;;;;;;AAOG;AACH,IAAA,MAAM,OAAO,GAAA;QACX,MAAM,GAAG,GAAG,IAAI,YAAY,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAChD,MAAM,IAAI,CAAC,iBAAiB,CAAC,OAAO,CAAC,GAAG,CAAC,QAAQ,EAAE,CAAC,CAAC;AAErD,QAAA,IAAI,aAAqB,CAAC;AAC1B,QAAA,IAAI,IAAI,CAAC,YAAY,CAAC,OAAO,CAAC,WAAW,KAAK,WAAW,CAAC,SAAS,EAAE;AACnE,YAAA,aAAa,GAAG,CAAA,SAAA,EAAY,IAAI,CAAC,YAAY,CAAC,OAAO,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,CAAA,CAAE,CAAC;SACvE;aAAM;AACL,YAAA,aAAa,GAAG,CAAY,SAAA,EAAA,IAAI,CAAC,YAAY,CAAC,OAAO,CAAc,WAAA,EAAA,IAAI,CAAC,YAAY,CAAC,QAAQ,CAAA,CAAA,EAAI,IAAI,CAAC,KAAK,EAAE,CAAC;SAC/G;;;AAID,QAAA,MAAM,EACJ,uBAAuB,EACvB,wBAAwB,EACxB,GAAG,gBAAgB,EACpB,GAAG,IAAI,CAAC,gBAAgB,CAAC;AAE1B,QAAA,MAAM,YAAY,GAAqB;AACrC,YAAA,KAAK,EAAE;AACL,gBAAA,KAAK,EAAE,aAAa;gBACpB,gBAAgB;gBAChB,KAAK,EAAE,IAAI,CAAC,KAAK;gBACjB,UAAU,EAAE,IAAI,CAAC,UAAU;gBAC3B,iBAAiB,EAAE,IAAI,CAAC,iBAAiB;gBACzC,uBAAuB;gBACvB,wBAAwB;AACzB,aAAA;SACF,CAAC;AAEF,QAAA,IAAI;;YAEF,MAAM,cAAc,GAAG,IAAI,CAAC,iBAAiB,CAAC,MAAM,EAAE,CAAC;AACvD,YAAA,IAAI,CAAC,iBAAiB,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,YAAY,CAAC,CAAC,CAAC;;YAG1D,MAAM,YAAY,GAAG,CAAC,MAAM,cAAc,CAAC,IAAI,EAAE,EAAE,KAAK,CAAC;AACzD,YAAA,IACE,CAAC,YAAY;AACb,gBAAA,EAAE,OAAO,YAAY,KAAK,QAAQ,CAAC;AACnC,gBAAA,EAAE,eAAe,IAAI,YAAY,CAAC,EAClC;gBACA,MAAM,IAAI,CAAC,iBAAiB,CAAC,KAAK,CAAC,IAAI,EAAE,mBAAmB,CAAC,CAAC;gBAC9D,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,8FAA8F,CAC/F,CAAC;aACH;YAED,OAAO,IAAI,WAAW,CAAC,IAAI,CAAC,iBAAiB,EAAE,cAAc,CAAC,CAAC;SAChE;QAAC,OAAO,CAAC,EAAE;;AAEV,YAAA,MAAM,IAAI,CAAC,iBAAiB,CAAC,KAAK,EAAE,CAAC;AACrC,YAAA,MAAM,CAAC,CAAC;SACT;KACF;AACF;;ACtID;;;;;;;;;;;;;;;AAeG;AAiBH;;;;;;;;;;;;;;;;;;;;;AAqBG;AACG,MAAO,WAAY,SAAQ,OAAO,CAAA;AAUtC;;;;;;;;;AASG;AACH,IAAA,WAAA,CACE,EAAM,EACN,WAA8B,EACvB,cAA+B,EAAA;QAEtC,MAAM,EAAE,KAAK,EAAE,gBAAgB,EAAE,cAAc,EAAE,GAAG,WAAW,CAAC;AAChE,QAAA,KAAK,CAAC,EAAE,EAAE,KAAK,CAAC,CAAC;QAHV,IAAc,CAAA,cAAA,GAAd,cAAc,CAAiB;AAItC,QAAA,IAAI,CAAC,gBAAgB,GAAG,gBAAgB,CAAC;AACzC,QAAA,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;KACtC;AAED;;;;;;;;;;;;;;;;;AAiBG;IACH,MAAM,cAAc,CAClB,MAAc,EAAA;AAEd,QAAA,MAAM,IAAI,GAAG,wBAAwB,CAAC,MAAM,EAAE;YAC5C,GAAG,IAAI,CAAC,gBAAgB;YACxB,GAAG,IAAI,CAAC,cAAc;AACvB,SAAA,CAAC,CAAC;AACH,QAAA,MAAM,QAAQ,GAAG,MAAM,WAAW,CAChC,IAAI,CAAC,KAAK,EACV,IAAI,CAAC,OAAO,EACZ,IAAI,CAAC,YAAY;AACjB,qBAAa,KAAK,EAClB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,EACpB,IAAI,CAAC,cAAc,CACpB,CAAC;AACF,QAAA,OAAO,qBAAqB,CAAoB,QAAQ,CAAC,CAAC;KAC3D;AAED;;;;;;;;;;;;;;;;;;AAkBG;AACH,IAAA,MAAM,iBAAiB,CACrB,MAAc,EACd,MAAc,EAAA;AAEd,QAAA,MAAM,IAAI,GAAG,wBAAwB,CAAC,MAAM,EAAE;YAC5C,MAAM;YACN,GAAG,IAAI,CAAC,gBAAgB;YACxB,GAAG,IAAI,CAAC,cAAc;AACvB,SAAA,CAAC,CAAC;AACH,QAAA,MAAM,QAAQ,GAAG,MAAM,WAAW,CAChC,IAAI,CAAC,KAAK,EACV,IAAI,CAAC,OAAO,EACZ,IAAI,CAAC,YAAY;AACjB,qBAAa,KAAK,EAClB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,EACpB,IAAI,CAAC,cAAc,CACpB,CAAC;AACF,QAAA,OAAO,qBAAqB,CAAiB,QAAQ,CAAC,CAAC;KACxD;AACF;;AC/JD;;;;;;;;;;;;;;;AAeG;AAiDH;;;;AAIG;MACU,oBAAoB,CAAA;AAG/B,IAAA,WAAA,GAAA;AACE,QAAA,IAAI,OAAO,SAAS,KAAK,WAAW,EAAE;AACpC,YAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,0DAA0D;gBACxD,+DAA+D;AAC/D,gBAAA,6EAA6E,CAChF,CAAC;SACH;KACF;AAED,IAAA,OAAO,CAAC,GAAW,EAAA;QACjB,OAAO,IAAI,OAAO,CAAC,CAAC,OAAO,EAAE,MAAM,KAAI;YACrC,IAAI,CAAC,EAAE,GAAG,IAAI,SAAS,CAAC,GAAG,CAAC,CAAC;YAC7B,IAAI,CAAC,EAAE,CAAC,UAAU,GAAG,MAAM,CAAC;AAC5B,YAAA,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,MAAM,EAAE,MAAM,OAAO,EAAE,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC,CAAC;AAClE,YAAA,IAAI,CAAC,EAAE,CAAC,gBAAgB,CACtB,OAAO,EACP,MACE,MAAM,CACJ,IAAI,OAAO,CACT,WAAW,CAAC,WAAW,EACvB,CAAA,+BAAA,CAAiC,CAClC,CACF,EACH,EAAE,IAAI,EAAE,IAAI,EAAE,CACf,CAAC;YACF,IAAI,CAAC,EAAG,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,UAAsB,KAAI;AAC5D,gBAAA,IAAI,UAAU,CAAC,MAAM,EAAE;oBACrB,MAAM,CAAC,IAAI,CACT,CAAA,gDAAA,EAAmD,UAAU,CAAC,MAAM,CAAG,CAAA,CAAA,CACxE,CAAC;iBACH;AACH,aAAC,CAAC,CAAC;AACL,SAAC,CAAC,CAAC;KACJ;AAED,IAAA,IAAI,CAAC,IAA0B,EAAA;AAC7B,QAAA,IAAI,CAAC,IAAI,CAAC,EAAE,IAAI,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,IAAI,EAAE;YACrD,MAAM,IAAI,OAAO,CAAC,WAAW,CAAC,aAAa,EAAE,wBAAwB,CAAC,CAAC;SACxE;AACD,QAAA,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;KACpB;IAED,OAAO,MAAM,GAAA;AACX,QAAA,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE;YACZ,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,6BAA6B,CAC9B,CAAC;SACH;QAED,MAAM,YAAY,GAAc,EAAE,CAAC;QACnC,MAAM,UAAU,GAAY,EAAE,CAAC;QAC/B,IAAI,cAAc,GAAwB,IAAI,CAAC;QAC/C,IAAI,QAAQ,GAAG,KAAK,CAAC;AAErB,QAAA,MAAM,eAAe,GAAG,OAAO,KAAmB,KAAmB;AACnE,YAAA,IAAI,IAAY,CAAC;AACjB,YAAA,IAAI,KAAK,CAAC,IAAI,YAAY,IAAI,EAAE;gBAC9B,IAAI,GAAG,MAAM,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC;aAChC;AAAM,iBAAA,IAAI,OAAO,KAAK,CAAC,IAAI,KAAK,QAAQ,EAAE;AACzC,gBAAA,IAAI,GAAG,KAAK,CAAC,IAAI,CAAC;aACnB;iBAAM;AACL,gBAAA,UAAU,CAAC,IAAI,CACb,IAAI,OAAO,CACT,WAAW,CAAC,YAAY,EACxB,CAAA,kFAAA,EAAqF,OAAO,KAAK,CAAC,IAAI,CAAG,CAAA,CAAA,CAC1G,CACF,CAAC;gBACF,IAAI,cAAc,EAAE;AAClB,oBAAA,cAAc,EAAE,CAAC;oBACjB,cAAc,GAAG,IAAI,CAAC;iBACvB;gBACD,OAAO;aACR;AAED,YAAA,IAAI;gBACF,MAAM,GAAG,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAY,CAAC;AACxC,gBAAA,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;aACxB;YAAC,OAAO,CAAC,EAAE;gBACV,MAAM,GAAG,GAAG,CAAU,CAAC;AACvB,gBAAA,UAAU,CAAC,IAAI,CACb,IAAI,OAAO,CACT,WAAW,CAAC,YAAY,EACxB,4CAA4C,GAAG,CAAC,OAAO,CAAE,CAAA,CAC1D,CACF,CAAC;aACH;YAED,IAAI,cAAc,EAAE;AAClB,gBAAA,cAAc,EAAE,CAAC;gBACjB,cAAc,GAAG,IAAI,CAAC;aACvB;AACH,SAAC,CAAC;QAEF,MAAM,aAAa,GAAG,MAAW;AAC/B,YAAA,UAAU,CAAC,IAAI,CACb,IAAI,OAAO,CAAC,WAAW,CAAC,WAAW,EAAE,6BAA6B,CAAC,CACpE,CAAC;YACF,IAAI,cAAc,EAAE;AAClB,gBAAA,cAAc,EAAE,CAAC;gBACjB,cAAc,GAAG,IAAI,CAAC;aACvB;AACH,SAAC,CAAC;AAEF,QAAA,MAAM,aAAa,GAAG,CAAC,KAAiB,KAAU;AAChD,YAAA,IAAI,KAAK,CAAC,MAAM,EAAE;gBAChB,MAAM,CAAC,IAAI,CACT,CAAA,uDAAA,EAA0D,KAAK,CAAC,MAAM,CAAE,CAAA,CACzE,CAAC;aACH;YACD,QAAQ,GAAG,IAAI,CAAC;YAChB,IAAI,cAAc,EAAE;AAClB,gBAAA,cAAc,EAAE,CAAC;gBACjB,cAAc,GAAG,IAAI,CAAC;aACvB;;YAED,IAAI,CAAC,EAAE,EAAE,mBAAmB,CAAC,SAAS,EAAE,eAAe,CAAC,CAAC;YACzD,IAAI,CAAC,EAAE,EAAE,mBAAmB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;YACrD,IAAI,CAAC,EAAE,EAAE,mBAAmB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;AACvD,SAAC,CAAC;QAEF,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,SAAS,EAAE,eAAe,CAAC,CAAC;QACrD,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;QACjD,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,OAAO,EAAE,aAAa,CAAC,CAAC;QAEjD,OAAO,CAAC,QAAQ,EAAE;AAChB,YAAA,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACzB,gBAAA,MAAM,KAAK,GAAG,UAAU,CAAC,KAAK,EAAG,CAAC;AAClC,gBAAA,MAAM,KAAK,CAAC;aACb;AACD,YAAA,IAAI,YAAY,CAAC,MAAM,GAAG,CAAC,EAAE;AAC3B,gBAAA,MAAM,YAAY,CAAC,KAAK,EAAG,CAAC;aAC7B;iBAAM;AACL,gBAAA,MAAM,IAAI,OAAO,CAAO,OAAO,IAAG;oBAChC,cAAc,GAAG,OAAO,CAAC;AAC3B,iBAAC,CAAC,CAAC;aACJ;SACF;;AAGD,QAAA,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACzB,YAAA,MAAM,KAAK,GAAG,UAAU,CAAC,KAAK,EAAG,CAAC;AAClC,YAAA,MAAM,KAAK,CAAC;SACb;KACF;IAED,KAAK,CAAC,IAAa,EAAE,MAAe,EAAA;AAClC,QAAA,OAAO,IAAI,OAAO,CAAC,OAAO,IAAG;AAC3B,YAAA,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE;gBACZ,OAAO,OAAO,EAAE,CAAC;aAClB;AAED,YAAA,IAAI,CAAC,EAAE,CAAC,gBAAgB,CAAC,OAAO,EAAE,MAAM,OAAO,EAAE,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC,CAAC;;YAEnE,IACE,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,MAAM;gBACvC,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,UAAU,EAC3C;gBACA,OAAO,OAAO,EAAE,CAAC;aAClB;YAED,IAAI,IAAI,CAAC,EAAE,CAAC,UAAU,KAAK,SAAS,CAAC,OAAO,EAAE;gBAC5C,IAAI,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,EAAE,MAAM,CAAC,CAAC;aAC7B;AACH,SAAC,CAAC,CAAC;KACJ;AACF;;AChPD;;;;;;;;;;;;;;;AAeG;AAWH;;;;;;AAMG;MACmB,MAAM,CAAA;AAkC1B,IAAA,WAAA,CAAY,YAA6B,EAAA;;QAEvC,IAAI,CAAC,YAAY,CAAC,IAAI,IAAI,CAAC,YAAY,CAAC,KAAK,EAAE;YAC7C,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,wEAAwE,CACzE,CAAC;SACH;;AAED,QAAA,KAAK,MAAM,QAAQ,IAAI,YAAY,EAAE;YACnC,IAAI,CAAC,QAAQ,CAAC,GAAG,YAAY,CAAC,QAAQ,CAAC,CAAC;SACzC;;AAED,QAAA,IAAI,CAAC,IAAI,GAAG,YAAY,CAAC,IAAI,CAAC;QAC9B,IAAI,CAAC,MAAM,GAAG,YAAY,CAAC,cAAc,CAAC,QAAQ,CAAC;cAC/C,YAAY,CAAC,MAAM;cACnB,SAAS,CAAC;QACd,IAAI,CAAC,QAAQ,GAAG,YAAY,CAAC,cAAc,CAAC,UAAU,CAAC;AACrD,cAAE,CAAC,CAAC,YAAY,CAAC,QAAQ;cACvB,KAAK,CAAC;KACX;AAED;;;;AAIG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAkD;YACzD,IAAI,EAAE,IAAI,CAAC,IAAI;SAChB,CAAC;AACF,QAAA,KAAK,MAAM,IAAI,IAAI,IAAI,EAAE;AACvB,YAAA,IAAI,IAAI,CAAC,cAAc,CAAC,IAAI,CAAC,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK,SAAS,EAAE;AACzD,gBAAA,IAAI,IAAI,KAAK,UAAU,IAAI,IAAI,CAAC,IAAI,KAAK,UAAU,CAAC,MAAM,EAAE;oBAC1D,GAAG,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC,CAAC;iBACxB;aACF;SACF;AACD,QAAA,OAAO,GAAoB,CAAC;KAC7B;IAED,OAAO,KAAK,CAAC,WAA6C,EAAA;QACxD,OAAO,IAAI,WAAW,CAAC,WAAW,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;KACxD;IAED,OAAO,MAAM,CACX,YAKC,EAAA;AAED,QAAA,OAAO,IAAI,YAAY,CACrB,YAAY,EACZ,YAAY,CAAC,UAAU,EACvB,YAAY,CAAC,kBAAkB,CAChC,CAAC;KACH;;IAGD,OAAO,MAAM,CAAC,YAA2B,EAAA;AACvC,QAAA,OAAO,IAAI,YAAY,CAAC,YAAY,CAAC,CAAC;KACvC;IAED,OAAO,UAAU,CACf,YAA+C,EAAA;QAE/C,OAAO,IAAI,YAAY,CAAC,YAAY,EAAE,YAAY,CAAC,IAAI,CAAC,CAAC;KAC1D;IAED,OAAO,OAAO,CAAC,aAA4B,EAAA;AACzC,QAAA,OAAO,IAAI,aAAa,CAAC,aAAa,CAAC,CAAC;KACzC;;IAGD,OAAO,MAAM,CAAC,YAA2B,EAAA;AACvC,QAAA,OAAO,IAAI,YAAY,CAAC,YAAY,CAAC,CAAC;KACvC;;IAGD,OAAO,OAAO,CAAC,aAA4B,EAAA;AACzC,QAAA,OAAO,IAAI,aAAa,CAAC,aAAa,CAAC,CAAC;KACzC;IAED,OAAO,KAAK,CACV,WAAoD,EAAA;AAEpD,QAAA,OAAO,IAAI,WAAW,CAAC,WAAW,CAAC,CAAC;KACrC;AACF,CAAA;AAeD;;;AAGG;AACG,MAAO,aAAc,SAAQ,MAAM,CAAA;AACvC,IAAA,WAAA,CAAY,YAA2B,EAAA;AACrC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,OAAO;AACxB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;KACJ;AACF,CAAA;AAED;;;AAGG;AACG,MAAO,YAAa,SAAQ,MAAM,CAAA;AACtC,IAAA,WAAA,CAAY,YAA2B,EAAA;AACrC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,MAAM;AACvB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;KACJ;AACF,CAAA;AAED;;;AAGG;AACG,MAAO,aAAc,SAAQ,MAAM,CAAA;AACvC,IAAA,WAAA,CAAY,YAA2B,EAAA;AACrC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,OAAO;AACxB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;KACJ;AACF,CAAA;AAED;;;;AAIG;AACG,MAAO,YAAa,SAAQ,MAAM,CAAA;IAEtC,WAAY,CAAA,YAA2B,EAAE,UAAqB,EAAA;AAC5D,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,MAAM;AACvB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;AACH,QAAA,IAAI,CAAC,IAAI,GAAG,UAAU,CAAC;KACxB;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;AAC3B,QAAA,IAAI,IAAI,CAAC,IAAI,EAAE;AACb,YAAA,GAAG,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC;SACzB;AACD,QAAA,OAAO,GAAoB,CAAC;KAC7B;AACF,CAAA;AAED;;;;;AAKG;AACG,MAAO,WAAY,SAAQ,MAAM,CAAA;IACrC,WAAY,CAAA,YAA0B,EAAS,KAAkB,EAAA;AAC/D,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,KAAK;AACtB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;QAJ0C,IAAK,CAAA,KAAA,GAAL,KAAK,CAAa;KAKhE;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;QAC3B,GAAG,CAAC,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC;AAChC,QAAA,OAAO,GAAG,CAAC;KACZ;AACF,CAAA;AAED;;;;AAIG;AACG,MAAO,YAAa,SAAQ,MAAM,CAAA;AACtC,IAAA,WAAA,CACE,YAA0B,EACnB,UAEN,EACM,qBAA+B,EAAE,EAAA;AAExC,QAAA,KAAK,CAAC;YACJ,IAAI,EAAE,UAAU,CAAC,MAAM;AACvB,YAAA,GAAG,YAAY;AAChB,SAAA,CAAC,CAAC;QARI,IAAU,CAAA,UAAA,GAAV,UAAU,CAEhB;QACM,IAAkB,CAAA,kBAAA,GAAlB,kBAAkB,CAAe;KAMzC;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;QAC3B,GAAG,CAAC,UAAU,GAAG,EAAE,GAAG,IAAI,CAAC,UAAU,EAAE,CAAC;QACxC,MAAM,QAAQ,GAAG,EAAE,CAAC;AACpB,QAAA,IAAI,IAAI,CAAC,kBAAkB,EAAE;AAC3B,YAAA,KAAK,MAAM,WAAW,IAAI,IAAI,CAAC,kBAAkB,EAAE;gBACjD,IAAI,CAAC,IAAI,CAAC,UAAU,CAAC,cAAc,CAAC,WAAW,CAAC,EAAE;oBAChD,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,CAAa,UAAA,EAAA,WAAW,CAAqD,mDAAA,CAAA,CAC9E,CAAC;iBACH;aACF;SACF;AACD,QAAA,KAAK,MAAM,WAAW,IAAI,IAAI,CAAC,UAAU,EAAE;YACzC,IAAI,IAAI,CAAC,UAAU,CAAC,cAAc,CAAC,WAAW,CAAC,EAAE;AAC/C,gBAAA,GAAG,CAAC,UAAU,CAAC,WAAW,CAAC,GAAG,IAAI,CAAC,UAAU,CAC3C,WAAW,CACZ,CAAC,MAAM,EAAmB,CAAC;gBAC5B,IAAI,CAAC,IAAI,CAAC,kBAAkB,CAAC,QAAQ,CAAC,WAAW,CAAC,EAAE;AAClD,oBAAA,QAAQ,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;iBAC5B;aACF;SACF;AACD,QAAA,IAAI,QAAQ,CAAC,MAAM,GAAG,CAAC,EAAE;AACvB,YAAA,GAAG,CAAC,QAAQ,GAAG,QAAQ,CAAC;SACzB;QACD,OAAO,GAAG,CAAC,kBAAkB,CAAC;AAC9B,QAAA,OAAO,GAAoB,CAAC;KAC7B;AACF,CAAA;AAED;;;;AAIG;AACG,MAAO,WAAY,SAAQ,MAAM,CAAA;AAErC,IAAA,WAAA,CAAY,YAAqD,EAAA;QAC/D,IAAI,YAAY,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE;YACnC,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,sCAAsC,CACvC,CAAC;SACH;AACD,QAAA,KAAK,CAAC;AACJ,YAAA,GAAG,YAAY;YACf,IAAI,EAAE,SAAS;AAChB,SAAA,CAAC,CAAC;AACH,QAAA,IAAI,CAAC,KAAK,GAAG,YAAY,CAAC,KAAK,CAAC;KACjC;AAED;;AAEG;IACH,MAAM,GAAA;AACJ,QAAA,MAAM,GAAG,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;;AAE3B,QAAA,IAAI,IAAI,CAAC,KAAK,IAAI,KAAK,CAAC,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,EAAE;AAC3C,YAAA,GAAG,CAAC,KAAK,GAAI,IAAI,CAAC,KAAuB,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC;SAChE;AACD,QAAA,OAAO,GAAG,CAAC;KACZ;AACF;;AC5VD;;;;;;;;;;;;;;;AAeG;AAIH;;;;;;;;;;;;;;;;AAgBG;MACU,iBAAiB,CAAA;AAU5B,IAAA,WAAA,GAAA;AACE,QAAA,IAAI,CAAC,QAAQ,GAAG,WAAW,CAAC;KAC7B;AAED;;;;;;;AAOG;IACH,OAAO,IAAI,CAAC,kBAA2B,EAAA;AACrC,QAAA,IACE,kBAAkB;aACjB,kBAAkB,GAAG,CAAC,IAAI,kBAAkB,GAAG,GAAG,CAAC,EACpD;AACA,YAAA,MAAM,CAAC,IAAI,CACT,uCAAuC,kBAAkB,CAAA,4CAAA,CAA8C,CACxG,CAAC;SACH;AACD,QAAA,OAAO,EAAE,QAAQ,EAAE,YAAY,EAAE,kBAAkB,EAAE,CAAC;KACvD;AAED;;;;;;AAMG;AACH,IAAA,OAAO,GAAG,GAAA;AACR,QAAA,OAAO,EAAE,QAAQ,EAAE,WAAW,EAAE,CAAC;KAClC;AACF;;AChFD;;;;;;;;;;;;;;;AAeG;AAcH,MAAM,wBAAwB,GAAG,KAAM,CAAC;AACxC,MAAM,yBAAyB,GAAG,KAAM,CAAC;AAEzC,MAAM,oBAAoB,GAAG,iBAAiB,CAAC;AAE/C;;;;;;;;;AASG;AACH,MAAM,2BAA2B,GAAG,CAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;uBA6Cb,oBAAoB,CAAA;CAC1C,CAAC;AA2CF;;;;AAIG;MACU,uBAAuB,CAAA;AAiBlC,IAAA,WAAA,CACmB,WAAwB,EACxB,OAAsC,EACtC,IAAwB,EAAA;QAFxB,IAAW,CAAA,WAAA,GAAX,WAAW,CAAa;QACxB,IAAO,CAAA,OAAA,GAAP,OAAO,CAA+B;QACtC,IAAI,CAAA,IAAA,GAAJ,IAAI,CAAoB;;QAlBnC,IAAS,CAAA,SAAA,GAAG,KAAK,CAAC;;AAET,QAAA,IAAA,CAAA,YAAY,GAAG,IAAI,QAAQ,EAAQ,CAAC;;QAKpC,IAAa,CAAA,aAAA,GAAkB,EAAE,CAAC;;QAE3C,IAAgB,CAAA,gBAAA,GAA4B,EAAE,CAAC;;QAE/C,IAAa,CAAA,aAAA,GAAG,CAAC,CAAC;;QAElB,IAAqB,CAAA,qBAAA,GAAG,KAAK,CAAC;AAOpC,QAAA,IAAI,CAAC,WAAW,CAAC,cAAc,GAAG,IAAI,CAAC;;AAGvC,QAAA,IAAI,CAAC,kBAAkB,GAAG,IAAI,CAAC,cAAc,EAAE,CAAC,OAAO,CAAC,MACtD,IAAI,CAAC,OAAO,EAAE,CACf,CAAC;;;QAIF,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,SAAS,GAAG,KAAK,IAAG;AAC7C,YAAA,IAAI,IAAI,CAAC,SAAS,EAAE;gBAClB,OAAO;aACR;AAED,YAAA,MAAM,KAAK,GAAG,KAAK,CAAC,IAAkB,CAAC;YACvC,MAAM,MAAM,GAAG,IAAI,CACjB,MAAM,CAAC,YAAY,CAAC,KAAK,CACvB,IAAI,EACJ,KAAK,CAAC,IAAI,CAAC,IAAI,UAAU,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CACzC,CACF,CAAC;AAEF,YAAA,MAAM,KAAK,GAA0B;AACnC,gBAAA,QAAQ,EAAE,WAAW;AACrB,gBAAA,IAAI,EAAE,MAAM;aACb,CAAC;YACF,KAAK,IAAI,CAAC,WAAW,CAAC,iBAAiB,CAAC,KAAK,CAAC,CAAC;AACjD,SAAC,CAAC;KACH;AAED;;AAEG;AACH,IAAA,MAAM,IAAI,GAAA;AACR,QAAA,IAAI,IAAI,CAAC,SAAS,EAAE;YAClB,OAAO;SACR;AACD,QAAA,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC;AACtB,QAAA,IAAI,CAAC,YAAY,CAAC,OAAO,EAAE,CAAC;AAC5B,QAAA,MAAM,IAAI,CAAC,kBAAkB,CAAC;KAC/B;AAED;;;AAGG;IACK,OAAO,GAAA;AACb,QAAA,IAAI,CAAC,iBAAiB,EAAE,CAAC;QACzB,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC;AAC5C,QAAA,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,UAAU,EAAE,CAAC;AACnC,QAAA,IAAI,CAAC,IAAI,CAAC,UAAU,CAAC,UAAU,EAAE,CAAC;AAClC,QAAA,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,SAAS,EAAE,CAAC,OAAO,CAAC,KAAK,IAAI,KAAK,CAAC,IAAI,EAAE,CAAC,CAAC;QACjE,IAAI,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,KAAK,KAAK,QAAQ,EAAE;YAC7C,KAAK,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,KAAK,EAAE,CAAC;SACrC;AACD,QAAA,IAAI,CAAC,WAAW,CAAC,cAAc,GAAG,KAAK,CAAC;KACzC;AAED;;AAEG;AACK,IAAA,cAAc,CAAC,SAAsB,EAAA;AAC3C,QAAA,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;;AAEnC,QAAA,KAAK,IAAI,CAAC,oBAAoB,EAAE,CAAC;KAClC;AAED;;;;AAIG;IACK,iBAAiB,GAAA;;;AAGvB,QAAA,CAAC,GAAG,IAAI,CAAC,gBAAgB,CAAC,CAAC,OAAO,CAAC,MAAM,IAAI,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;;AAG7D,QAAA,IAAI,CAAC,aAAa,CAAC,MAAM,GAAG,CAAC,CAAC;;QAG9B,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,WAAW,CAAC;KACzD;AAED;;AAEG;AACK,IAAA,MAAM,oBAAoB,GAAA;AAChC,QAAA,IAAI,IAAI,CAAC,qBAAqB,EAAE;YAC9B,OAAO;SACR;AACD,QAAA,IAAI,CAAC,qBAAqB,GAAG,IAAI,CAAC;AAElC,QAAA,OAAO,IAAI,CAAC,aAAa,CAAC,MAAM,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE;YACvD,MAAM,YAAY,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,EAAG,CAAC;AACjD,YAAA,IAAI;AACF,gBAAA,MAAM,KAAK,GAAG,IAAI,UAAU,CAAC,YAAY,CAAC,CAAC;AAC3C,gBAAA,MAAM,UAAU,GAAG,KAAK,CAAC,MAAM,CAAC;AAEhC,gBAAA,MAAM,WAAW,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,YAAY,CACrD,CAAC,EACD,UAAU,EACV,yBAAyB,CAC1B,CAAC;;gBAGF,MAAM,WAAW,GAAG,WAAW,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC;AAClD,gBAAA,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,UAAU,EAAE,CAAC,EAAE,EAAE;AACnC,oBAAA,WAAW,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC;iBACnC;gBAED,MAAM,MAAM,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,kBAAkB,EAAE,CAAC;AAC3D,gBAAA,MAAM,CAAC,MAAM,GAAG,WAAW,CAAC;gBAC5B,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,WAAW,CAAC,CAAC;;AAGnD,gBAAA,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;AACnC,gBAAA,MAAM,CAAC,OAAO,GAAG,MAAK;AACpB,oBAAA,IAAI,CAAC,gBAAgB,GAAG,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAClD,CAAC,IAAI,CAAC,KAAK,MAAM,CAClB,CAAC;AACJ,iBAAC,CAAC;;;AAIF,gBAAA,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,GAAG,CAC3B,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,WAAW,EAClC,IAAI,CAAC,aAAa,CACnB,CAAC;AACF,gBAAA,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC;;AAGjC,gBAAA,IAAI,CAAC,aAAa,IAAI,WAAW,CAAC,QAAQ,CAAC;aAC5C;YAAC,OAAO,CAAC,EAAE;AACV,gBAAA,MAAM,CAAC,KAAK,CAAC,sBAAsB,EAAE,CAAC,CAAC,CAAC;aACzC;SACF;AAED,QAAA,IAAI,CAAC,qBAAqB,GAAG,KAAK,CAAC;KACpC;AAED;;AAEG;AACK,IAAA,MAAM,cAAc,GAAA;QAC1B,MAAM,gBAAgB,GAAG,IAAI,CAAC,WAAW,CAAC,OAAO,EAAE,CAAC;AACpD,QAAA,OAAO,CAAC,IAAI,CAAC,SAAS,EAAE;AACtB,YAAA,MAAM,MAAM,GAAG,MAAM,OAAO,CAAC,IAAI,CAAC;gBAChC,gBAAgB,CAAC,IAAI,EAAE;gBACvB,IAAI,CAAC,YAAY,CAAC,OAAO;AAC1B,aAAA,CAAC,CAAC;YAEH,IAAI,IAAI,CAAC,SAAS,IAAI,CAAC,MAAM,IAAI,MAAM,CAAC,IAAI,EAAE;gBAC5C,MAAM;aACP;AAED,YAAA,MAAM,OAAO,GAAG,MAAM,CAAC,KAAK,CAAC;AAC7B,YAAA,IAAI,OAAO,CAAC,IAAI,KAAK,eAAe,EAAE;gBACpC,MAAM,aAAa,GAAG,OAA4B,CAAC;AACnD,gBAAA,IAAI,aAAa,CAAC,WAAW,EAAE;oBAC7B,IAAI,CAAC,iBAAiB,EAAE,CAAC;iBAC1B;gBAED,MAAM,SAAS,GAAG,aAAa,CAAC,SAAS,EAAE,KAAK,CAAC,IAAI,CAAC,IAAI,IACxD,IAAI,CAAC,UAAU,EAAE,QAAQ,CAAC,UAAU,CAAC,QAAQ,CAAC,CAC/C,CAAC;AACF,gBAAA,IAAI,SAAS,EAAE,UAAU,EAAE;AACzB,oBAAA,MAAM,SAAS,GAAG,UAAU,CAAC,IAAI,CAC/B,IAAI,CAAC,SAAS,CAAC,UAAU,CAAC,IAAI,CAAC,EAC/B,CAAC,IAAI,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,CACrB,CAAC,MAAM,CAAC;AACT,oBAAA,IAAI,CAAC,cAAc,CAAC,SAAS,CAAC,CAAC;iBAChC;aACF;AAAM,iBAAA,IAAI,OAAO,CAAC,IAAI,KAAK,UAAU,EAAE;AACtC,gBAAA,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,sBAAsB,EAAE;AACxC,oBAAA,MAAM,CAAC,IAAI,CACT,wHAAwH,CACzH,CAAC;iBACH;qBAAM;AACL,oBAAA,IAAI;AACF,wBAAA,MAAM,gBAAgB,GAAG,MAAM,IAAI,CAAC,OAAO,CAAC,sBAAsB,CAChE,OAAO,CAAC,aAAa,CACtB,CAAC;AACF,wBAAA,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE;4BACnB,KAAK,IAAI,CAAC,WAAW,CAAC,qBAAqB,CAAC,CAAC,gBAAgB,CAAC,CAAC,CAAC;yBACjE;qBACF;oBAAC,OAAO,CAAC,EAAE;AACV,wBAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,iCAAA,EAAqC,CAAW,CAAC,OAAO,CAAA,CAAE,CAC3D,CAAC;qBACH;iBACF;aACF;SACF;KACF;AACF,CAAA;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA6CG;AACI,eAAe,sBAAsB,CAC1C,WAAwB,EACxB,UAAyC,EAAE,EAAA;AAE3C,IAAA,IAAI,WAAW,CAAC,QAAQ,EAAE;QACxB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,cAAc,EAC1B,0DAA0D,CAC3D,CAAC;KACH;AAED,IAAA,IAAI,WAAW,CAAC,cAAc,EAAE;QAC9B,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,aAAa,EACzB,gEAAgE,CACjE,CAAC;KACH;;IAGD,IACE,OAAO,gBAAgB,KAAK,WAAW;QACvC,OAAO,YAAY,KAAK,WAAW;QACnC,OAAO,SAAS,KAAK,WAAW;AAChC,QAAA,CAAC,SAAS,CAAC,YAAY,EACvB;QACA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,WAAW,EACvB,kHAAkH,CACnH,CAAC;KACH;AAED,IAAA,IAAI,YAAsC,CAAC;AAC3C,IAAA,IAAI;;;AAGF,QAAA,YAAY,GAAG,IAAI,YAAY,EAAE,CAAC;AAClC,QAAA,IAAI,YAAY,CAAC,KAAK,KAAK,WAAW,EAAE;AACtC,YAAA,MAAM,YAAY,CAAC,MAAM,EAAE,CAAC;SAC7B;;;QAID,MAAM,WAAW,GAAG,MAAM,SAAS,CAAC,YAAY,CAAC,YAAY,CAAC;AAC5D,YAAA,KAAK,EAAE,IAAI;AACZ,SAAA,CAAC,CAAC;;;QAIH,MAAM,WAAW,GAAG,IAAI,IAAI,CAAC,CAAC,2BAA2B,CAAC,EAAE;AAC1D,YAAA,IAAI,EAAE,wBAAwB;AAC/B,SAAA,CAAC,CAAC;QACH,MAAM,UAAU,GAAG,GAAG,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC;QACpD,MAAM,YAAY,CAAC,YAAY,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC;;QAGtD,MAAM,UAAU,GAAG,YAAY,CAAC,uBAAuB,CAAC,WAAW,CAAC,CAAC;QACrE,MAAM,WAAW,GAAG,IAAI,gBAAgB,CACtC,YAAY,EACZ,oBAAoB,EACpB;AACE,YAAA,gBAAgB,EAAE,EAAE,gBAAgB,EAAE,wBAAwB,EAAE;AACjE,SAAA,CACF,CAAC;AACF,QAAA,UAAU,CAAC,OAAO,CAAC,WAAW,CAAC,CAAC;;QAGhC,MAAM,MAAM,GAAG,IAAI,uBAAuB,CAAC,WAAW,EAAE,OAAO,EAAE;YAC/D,YAAY;YACZ,WAAW;YACX,UAAU;YACV,WAAW;AACZ,SAAA,CAAC,CAAC;QAEH,OAAO,EAAE,IAAI,EAAE,MAAM,MAAM,CAAC,IAAI,EAAE,EAAE,CAAC;KACtC;IAAC,OAAO,CAAC,EAAE;;QAEV,IAAI,YAAY,IAAI,YAAY,CAAC,KAAK,KAAK,QAAQ,EAAE;AACnD,YAAA,KAAK,YAAY,CAAC,KAAK,EAAE,CAAC;SAC3B;;;QAID,IAAI,CAAC,YAAY,OAAO,IAAI,CAAC,YAAY,YAAY,EAAE;AACrD,YAAA,MAAM,CAAC,CAAC;SACT;;AAGD,QAAA,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,CAAA,sCAAA,EAA0C,CAAW,CAAC,OAAO,CAAA,CAAE,CAChE,CAAC;KACH;AACH;;AChfA;;;;;;;;;;;;;;;AAeG;AA6CH;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2BG;SACa,KAAK,CAAC,MAAmB,MAAM,EAAE,EAAE,OAAmB,EAAA;AACpE,IAAA,GAAG,GAAG,kBAAkB,CAAC,GAAG,CAAC,CAAC;;IAE9B,MAAM,UAAU,GAAmB,YAAY,CAAC,GAAG,EAAE,OAAO,CAAC,CAAC;IAE9D,MAAM,OAAO,GAAG,OAAO,EAAE,OAAO,IAAI,IAAI,eAAe,EAAE,CAAC;AAE1D,IAAA,MAAM,YAAY,GAA+B;AAC/C,QAAA,2BAA2B,EAAE,OAAO,EAAE,2BAA2B,IAAI,KAAK;KAC3E,CAAC;AAEF,IAAA,MAAM,UAAU,GAAG,wBAAwB,CAAC,OAAO,CAAC,CAAC;AACrD,IAAA,MAAM,UAAU,GAAG,UAAU,CAAC,YAAY,CAAC;QACzC,UAAU;AACX,KAAA,CAAC,CAAC;AAEH,IAAA,UAAU,CAAC,OAAO,GAAG,YAAY,CAAC;AAElC,IAAA,OAAO,UAAU,CAAC;AACpB,CAAC;AAED;;;;;AAKG;SACa,kBAAkB,CAChC,EAAM,EACN,WAAuC,EACvC,cAA+B,EAAA;;IAG/B,MAAM,YAAY,GAAG,WAA2B,CAAC;AACjD,IAAA,IAAI,aAA0B,CAAC;AAC/B,IAAA,IAAI,YAAY,CAAC,IAAI,EAAE;AACrB,QAAA,aAAa,GAAG,YAAY,CAAC,aAAa,IAAI;AAC5C,YAAA,KAAK,EAAE,6BAA6B;SACrC,CAAC;KACH;SAAM;QACL,aAAa,GAAG,WAA0B,CAAC;KAC5C;AAED,IAAA,IAAI,CAAC,aAAa,CAAC,KAAK,EAAE;QACxB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,QAAQ,EACpB,CAAoF,kFAAA,CAAA,CACrF,CAAC;KACH;AAED;;;AAGG;AACH,IAAA,MAAM,aAAa,GAAI,EAAgB,CAAC,oBAAoB,GAC1D,YAAY,CAAC,IAAI,EACjB,OAAO,MAAM,KAAK,WAAW,GAAG,SAAS,GAAG,MAAM,EAClD,YAAY,CAAC,cAAc,CAC5B,CAAC;IAEF,OAAO,IAAI,eAAe,CAAC,EAAE,EAAE,aAAa,EAAE,cAAc,EAAE,aAAa,CAAC,CAAC;AAC/E,CAAC;AAED;;;;;;;;;;;;;AAaG;SACa,cAAc,CAC5B,EAAM,EACN,WAA8B,EAC9B,cAA+B,EAAA;AAE/B,IAAA,IAAI,CAAC,WAAW,CAAC,KAAK,EAAE;QACtB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,QAAQ,EACpB,CAAgF,8EAAA,CAAA,CACjF,CAAC;KACH;IACD,OAAO,IAAI,WAAW,CAAC,EAAE,EAAE,WAAW,EAAE,cAAc,CAAC,CAAC;AAC1D,CAAC;AAED;;;;;;;;;;;AAWG;AACa,SAAA,sBAAsB,CACpC,EAAM,EACN,WAA4B,EAAA;AAE5B,IAAA,IAAI,CAAC,WAAW,CAAC,KAAK,EAAE;QACtB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,QAAQ,EACpB,CAAuH,qHAAA,CAAA,CACxH,CAAC;KACH;AACD,IAAA,MAAM,gBAAgB,GAAG,IAAI,oBAAoB,EAAE,CAAC;IACpD,OAAO,IAAI,mBAAmB,CAAC,EAAE,EAAE,WAAW,EAAE,gBAAgB,CAAC,CAAC;AACpE;;AC3MA;;;;AAIG;AA4BH,SAAS,UAAU,GAAA;AACjB,IAAA,kBAAkB,CAChB,IAAI,SAAS,CACX,OAAO,EACP,CAAC,SAAS,EAAE,EAAE,kBAAkB,EAAE,KAAI;QACpC,IAAI,CAAC,kBAAkB,EAAE;YACvB,MAAM,IAAI,OAAO,CACf,WAAW,CAAC,KAAK,EACjB,6CAA6C,CAC9C,CAAC;SACH;AAED,QAAA,MAAM,OAAO,GAAG,wBAAwB,CAAC,kBAAkB,CAAC,CAAC;;QAG7D,MAAM,GAAG,GAAG,SAAS,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC,YAAY,EAAE,CAAC;QACxD,MAAM,IAAI,GAAG,SAAS,CAAC,WAAW,CAAC,eAAe,CAAC,CAAC;QACpD,MAAM,gBAAgB,GAAG,SAAS,CAAC,WAAW,CAAC,oBAAoB,CAAC,CAAC;QACrE,OAAO,IAAI,SAAS,CAAC,GAAG,EAAE,OAAO,EAAE,IAAI,EAAE,gBAAgB,CAAC,CAAC;AAC7D,KAAC,sCAEF,CAAC,oBAAoB,CAAC,IAAI,CAAC,CAC7B,CAAC;AAEF,IAAA,eAAe,CAAC,IAAI,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;;AAEvC,IAAA,eAAe,CAAC,IAAI,EAAE,OAAO,EAAE,SAAkB,CAAC,CAAC;AACrD,CAAC;AAED,UAAU,EAAE;;;;"}
\ No newline at end of file diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/api.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/api.d.ts new file mode 100644 index 0000000..491268b --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/api.d.ts @@ -0,0 +1,99 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { FirebaseApp } from '@firebase/app'; +import { AI_TYPE } from './constants'; +import { AIService } from './service'; +import { AI, AIOptions } from './public-types'; +import { ImagenModelParams, HybridParams, ModelParams, RequestOptions, LiveModelParams } from './types'; +import { AIError } from './errors'; +import { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel } from './models'; +export { ChatSession } from './methods/chat-session'; +export { LiveSession } from './methods/live-session'; +export * from './requests/schema-builder'; +export { ImagenImageFormat } from './requests/imagen-image-format'; +export { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel, AIError }; +export { Backend, VertexAIBackend, GoogleAIBackend } from './backend'; +export { startAudioConversation, AudioConversationController, StartAudioConversationOptions } from './methods/live-session-helpers'; +declare module '@firebase/component' { + interface NameServiceMapping { + [AI_TYPE]: AIService; + } +} +/** + * Returns the default {@link AI} instance that is associated with the provided + * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the + * default settings. + * + * @example + * ```javascript + * const ai = getAI(app); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Gemini Developer API (via Google AI). + * const ai = getAI(app, { backend: new GoogleAIBackend() }); + * ``` + * + * @example + * ```javascript + * // Get an AI instance configured to use the Vertex AI Gemini API. + * const ai = getAI(app, { backend: new VertexAIBackend() }); + * ``` + * + * @param app - The {@link @firebase/app#FirebaseApp} to use. + * @param options - {@link AIOptions} that configure the AI instance. + * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}. + * + * @public + */ +export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI; +/** + * Returns a {@link GenerativeModel} class with methods for inference + * and other functionality. + * + * @public + */ +export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel; +/** + * Returns an {@link ImagenModel} class with methods for using Imagen. + * + * Only Imagen 3 models (named `imagen-3.0-*`) are supported. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when making Imagen requests. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @public + */ +export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; +/** + * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication. + * + * The Live API is only supported in modern browser windows and Node >= 22. + * + * @param ai - An {@link AI} instance. + * @param modelParams - Parameters to use when setting up a {@link LiveSession}. + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @beta + */ +export declare function getLiveGenerativeModel(ai: AI, modelParams: LiveModelParams): LiveGenerativeModel; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/backend.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/backend.d.ts new file mode 100644 index 0000000..2a1e9e6 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/backend.d.ts @@ -0,0 +1,74 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { BackendType } from './public-types'; +/** + * Abstract base class representing the configuration for an AI service backend. + * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for + * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and + * {@link VertexAIBackend} for the Vertex AI Gemini API. + * + * @public + */ +export declare abstract class Backend { + /** + * Specifies the backend type. + */ + readonly backendType: BackendType; + /** + * Protected constructor for use by subclasses. + * @param type - The backend type. + */ + protected constructor(type: BackendType); +} +/** + * Configuration class for the Gemini Developer API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Gemini Developer API as the backend. + * + * @public + */ +export declare class GoogleAIBackend extends Backend { + /** + * Creates a configuration object for the Gemini Developer API backend. + */ + constructor(); +} +/** + * Configuration class for the Vertex AI Gemini API. + * + * Use this with {@link AIOptions} when initializing the AI service via + * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend. + * + * @public + */ +export declare class VertexAIBackend extends Backend { + /** + * The region identifier. + * See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + readonly location: string; + /** + * Creates a configuration object for the Vertex AI backend. + * + * @param location - The region identifier, defaulting to `us-central1`; + * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + constructor(location?: string); +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/constants.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/constants.d.ts new file mode 100644 index 0000000..9d89d40 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/constants.d.ts @@ -0,0 +1,27 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +export declare const AI_TYPE = "AI"; +export declare const DEFAULT_LOCATION = "us-central1"; +export declare const DEFAULT_DOMAIN = "firebasevertexai.googleapis.com"; +export declare const DEFAULT_API_VERSION = "v1beta"; +export declare const PACKAGE_VERSION: string; +export declare const LANGUAGE_TAG = "gl-js"; +export declare const DEFAULT_FETCH_TIMEOUT_MS: number; +/** + * Defines the name of the default in-cloud model to use for hybrid inference. + */ +export declare const DEFAULT_HYBRID_IN_CLOUD_MODEL = "gemini-2.0-flash-lite"; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/errors.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/errors.d.ts new file mode 100644 index 0000000..cb0a0fe --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/errors.d.ts @@ -0,0 +1,35 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { FirebaseError } from '@firebase/util'; +import { AIErrorCode, CustomErrorData } from './types'; +/** + * Error class for the Firebase AI SDK. + * + * @public + */ +export declare class AIError extends FirebaseError { + readonly code: AIErrorCode; + readonly customErrorData?: CustomErrorData | undefined; + /** + * Constructs a new instance of the `AIError` class. + * + * @param code - The error code from {@link (AIErrorCode:type)}. + * @param message - A human-readable message describing the error. + * @param customErrorData - Optional error data. + */ + constructor(code: AIErrorCode, message: string, customErrorData?: CustomErrorData | undefined); +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/factory-browser.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/factory-browser.d.ts new file mode 100644 index 0000000..4dd134a --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/factory-browser.d.ts @@ -0,0 +1,19 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { ComponentContainer, InstanceFactoryOptions } from '@firebase/component'; +import { AIService } from './service'; +export declare function factory(container: ComponentContainer, { instanceIdentifier }: InstanceFactoryOptions): AIService; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/googleai-mappers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/googleai-mappers.d.ts new file mode 100644 index 0000000..ae6a19d --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/googleai-mappers.d.ts @@ -0,0 +1,73 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { CountTokensRequest, GenerateContentCandidate, GenerateContentRequest, GenerateContentResponse, PromptFeedback } from './types'; +import { GoogleAIGenerateContentResponse, GoogleAIGenerateContentCandidate, GoogleAICountTokensRequest } from './types/googleai'; +/** + * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI). + * The public API prioritizes the format used by the Vertex AI Gemini API. + * We avoid having two sets of types by translating requests and responses between the two API formats. + * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API + * with minimal code changes. + * + * In here are functions that map requests and responses between the two API formats. + * Requests in the Vertex AI format are mapped to the Google AI format before being sent. + * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user. + */ +/** + * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI. + * + * @param generateContentRequest The {@link GenerateContentRequest} to map. + * @returns A {@link GenerateContentResponse} that conforms to the Google AI format. + * + * @throws If the request contains properties that are unsupported by Google AI. + * + * @internal + */ +export declare function mapGenerateContentRequest(generateContentRequest: GenerateContentRequest): GenerateContentRequest; +/** + * Maps a {@link GenerateContentResponse} from Google AI to the format of the + * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API. + * + * @param googleAIResponse The {@link GenerateContentResponse} from Google AI. + * @returns A {@link GenerateContentResponse} that conforms to the public API's format. + * + * @internal + */ +export declare function mapGenerateContentResponse(googleAIResponse: GoogleAIGenerateContentResponse): GenerateContentResponse; +/** + * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI. + * + * @param countTokensRequest The {@link CountTokensRequest} to map. + * @param model The model to count tokens with. + * @returns A {@link CountTokensRequest} that conforms to the Google AI format. + * + * @internal + */ +export declare function mapCountTokensRequest(countTokensRequest: CountTokensRequest, model: string): GoogleAICountTokensRequest; +/** + * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms + * to the Vertex AI API format. + * + * @param candidates The {@link GoogleAIGenerateContentCandidate} to map. + * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format. + * + * @throws If any {@link Part} in the candidates has a `videoMetadata` property. + * + * @internal + */ +export declare function mapGenerateContentCandidates(candidates: GoogleAIGenerateContentCandidate[]): GenerateContentCandidate[]; +export declare function mapPromptFeedback(promptFeedback: PromptFeedback): PromptFeedback; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/helpers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/helpers.d.ts new file mode 100644 index 0000000..705ffec --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/helpers.d.ts @@ -0,0 +1,30 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Backend } from './backend'; +/** + * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI} + * instances by backend type. + * + * @internal + */ +export declare function encodeInstanceIdentifier(backend: Backend): string; +/** + * Decodes an instance identifier string into a {@link Backend}. + * + * @internal + */ +export declare function decodeInstanceIdentifier(instanceIdentifier: string): Backend; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/index.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/index.d.ts new file mode 100644 index 0000000..a377500 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/index.d.ts @@ -0,0 +1,13 @@ +/** + * The Firebase AI Web SDK. + * + * @packageDocumentation + */ +import { LanguageModel } from './types/language-model'; +declare global { + interface Window { + LanguageModel: LanguageModel; + } +} +export * from './api'; +export * from './public-types'; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/index.node.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/index.node.d.ts new file mode 100644 index 0000000..e96f4c5 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/index.node.d.ts @@ -0,0 +1,7 @@ +/** + * The Firebase AI Web SDK. + * + * @packageDocumentation + */ +export * from './api'; +export * from './public-types'; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/logger.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/logger.d.ts new file mode 100644 index 0000000..5991ed1 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/logger.d.ts @@ -0,0 +1,18 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Logger } from '@firebase/logger'; +export declare const logger: Logger; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/methods/chat-session-helpers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/methods/chat-session-helpers.d.ts new file mode 100644 index 0000000..65e4eef --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/methods/chat-session-helpers.d.ts @@ -0,0 +1,18 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Content } from '../types'; +export declare function validateChatHistory(history: Content[]): void; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/methods/chat-session.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/methods/chat-session.d.ts new file mode 100644 index 0000000..2f2557a --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/methods/chat-session.d.ts @@ -0,0 +1,52 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Content, GenerateContentResult, GenerateContentStreamResult, Part, RequestOptions, StartChatParams } from '../types'; +import { ApiSettings } from '../types/internal'; +import { ChromeAdapter } from '../types/chrome-adapter'; +/** + * ChatSession class that enables sending chat messages and stores + * history of sent and received messages so far. + * + * @public + */ +export declare class ChatSession { + model: string; + private chromeAdapter?; + params?: StartChatParams | undefined; + requestOptions?: RequestOptions | undefined; + private _apiSettings; + private _history; + private _sendPromise; + constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); + /** + * Gets the chat history so far. Blocked prompts are not added to history. + * Neither blocked candidates nor the prompts that generated them are added + * to history. + */ + getHistory(): Promise<Content[]>; + /** + * Sends a chat message and receives a non-streaming + * {@link GenerateContentResult} + */ + sendMessage(request: string | Array<string | Part>): Promise<GenerateContentResult>; + /** + * Sends a chat message and receives the response as a + * {@link GenerateContentStreamResult} containing an iterable stream + * and a response promise. + */ + sendMessageStream(request: string | Array<string | Part>): Promise<GenerateContentStreamResult>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/methods/chrome-adapter.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/methods/chrome-adapter.d.ts new file mode 100644 index 0000000..5bd0a99 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/methods/chrome-adapter.d.ts @@ -0,0 +1,124 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { CountTokensRequest, GenerateContentRequest, InferenceMode, OnDeviceParams } from '../types'; +import { ChromeAdapter } from '../types/chrome-adapter'; +import { LanguageModel } from '../types/language-model'; +/** + * Defines an inference "backend" that uses Chrome's on-device model, + * and encapsulates logic for detecting when on-device inference is + * possible. + */ +export declare class ChromeAdapterImpl implements ChromeAdapter { + languageModelProvider: LanguageModel; + mode: InferenceMode; + static SUPPORTED_MIME_TYPES: string[]; + private isDownloading; + private downloadPromise; + private oldSession; + onDeviceParams: OnDeviceParams; + constructor(languageModelProvider: LanguageModel, mode: InferenceMode, onDeviceParams?: OnDeviceParams); + /** + * Checks if a given request can be made on-device. + * + * Encapsulates a few concerns: + * the mode + * API existence + * prompt formatting + * model availability, including triggering download if necessary + * + * + * Pros: callers needn't be concerned with details of on-device availability.</p> + * Cons: this method spans a few concerns and splits request validation from usage. + * If instance variables weren't already part of the API, we could consider a better + * separation of concerns. + */ + isAvailable(request: GenerateContentRequest): Promise<boolean>; + /** + * Generates content on device. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContent} for generating content in + * Cloud. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + * @returns {@link Response}, so we can reuse common response formatting. + */ + generateContent(request: GenerateContentRequest): Promise<Response>; + /** + * Generates content stream on device. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in + * Cloud. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + * @returns {@link Response}, so we can reuse common response formatting. + */ + generateContentStream(request: GenerateContentRequest): Promise<Response>; + countTokens(_request: CountTokensRequest): Promise<Response>; + /** + * Asserts inference for the given request can be performed by an on-device model. + */ + private static isOnDeviceRequest; + /** + * Encapsulates logic to get availability and download a model if one is downloadable. + */ + private downloadIfAvailable; + /** + * Triggers out-of-band download of an on-device model. + * + * Chrome only downloads models as needed. Chrome knows a model is needed when code calls + * LanguageModel.create. + * + * Since Chrome manages the download, the SDK can only avoid redundant download requests by + * tracking if a download has previously been requested. + */ + private download; + /** + * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object. + */ + private static toLanguageModelMessage; + /** + * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object. + */ + private static toLanguageModelMessageContent; + /** + * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string. + */ + private static toLanguageModelMessageRole; + /** + * Abstracts Chrome session creation. + * + * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all + * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all + * inference. + * + * Chrome will remove a model from memory if it's no longer in use, so this method ensures a + * new session is created before an old session is destroyed. + */ + private createSession; + /** + * Formats string returned by Chrome as a {@link Response} returned by Firebase AI. + */ + private static toResponse; + /** + * Formats string stream returned by Chrome as SSE returned by Firebase AI. + */ + private static toStreamResponse; +} +/** + * Creates a ChromeAdapterImpl on demand. + */ +export declare function chromeAdapterFactory(mode: InferenceMode, window?: Window, params?: OnDeviceParams): ChromeAdapterImpl | undefined; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/methods/count-tokens.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/methods/count-tokens.d.ts new file mode 100644 index 0000000..9f94f86 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/methods/count-tokens.d.ts @@ -0,0 +1,21 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { CountTokensRequest, CountTokensResponse, RequestOptions } from '../types'; +import { ApiSettings } from '../types/internal'; +import { ChromeAdapter } from '../types/chrome-adapter'; +export declare function countTokensOnCloud(apiSettings: ApiSettings, model: string, params: CountTokensRequest, requestOptions?: RequestOptions): Promise<CountTokensResponse>; +export declare function countTokens(apiSettings: ApiSettings, model: string, params: CountTokensRequest, chromeAdapter?: ChromeAdapter, requestOptions?: RequestOptions): Promise<CountTokensResponse>; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/methods/generate-content.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/methods/generate-content.d.ts new file mode 100644 index 0000000..96493bd --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/methods/generate-content.d.ts @@ -0,0 +1,21 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { GenerateContentRequest, GenerateContentResult, GenerateContentStreamResult, RequestOptions } from '../types'; +import { ApiSettings } from '../types/internal'; +import { ChromeAdapter } from '../types/chrome-adapter'; +export declare function generateContentStream(apiSettings: ApiSettings, model: string, params: GenerateContentRequest, chromeAdapter?: ChromeAdapter, requestOptions?: RequestOptions): Promise<GenerateContentStreamResult>; +export declare function generateContent(apiSettings: ApiSettings, model: string, params: GenerateContentRequest, chromeAdapter?: ChromeAdapter, requestOptions?: RequestOptions): Promise<GenerateContentResult>; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/methods/live-session-helpers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/methods/live-session-helpers.d.ts new file mode 100644 index 0000000..c6f8dea --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/methods/live-session-helpers.d.ts @@ -0,0 +1,154 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { FunctionCall, FunctionResponse } from '../types'; +import { LiveSession } from './live-session'; +/** + * A controller for managing an active audio conversation. + * + * @beta + */ +export interface AudioConversationController { + /** + * Stops the audio conversation, closes the microphone connection, and + * cleans up resources. Returns a promise that resolves when cleanup is complete. + */ + stop: () => Promise<void>; +} +/** + * Options for {@link startAudioConversation}. + * + * @beta + */ +export interface StartAudioConversationOptions { + /** + * An async handler that is called when the model requests a function to be executed. + * The handler should perform the function call and return the result as a `Part`, + * which will then be sent back to the model. + */ + functionCallingHandler?: (functionCalls: FunctionCall[]) => Promise<FunctionResponse>; +} +/** + * Dependencies needed by the {@link AudioConversationRunner}. + * + * @internal + */ +interface RunnerDependencies { + audioContext: AudioContext; + mediaStream: MediaStream; + sourceNode: MediaStreamAudioSourceNode; + workletNode: AudioWorkletNode; +} +/** + * Encapsulates the core logic of an audio conversation. + * + * @internal + */ +export declare class AudioConversationRunner { + private readonly liveSession; + private readonly options; + private readonly deps; + /** A flag to indicate if the conversation has been stopped. */ + private isStopped; + /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */ + private readonly stopDeferred; + /** A promise that tracks the lifecycle of the main `runReceiveLoop`. */ + private readonly receiveLoopPromise; + /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */ + private readonly playbackQueue; + /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */ + private scheduledSources; + /** A high-precision timeline pointer for scheduling gapless audio playback. */ + private nextStartTime; + /** A mutex to prevent the playback processing loop from running multiple times concurrently. */ + private isPlaybackLoopRunning; + constructor(liveSession: LiveSession, options: StartAudioConversationOptions, deps: RunnerDependencies); + /** + * Stops the conversation and unblocks the main receive loop. + */ + stop(): Promise<void>; + /** + * Cleans up all audio resources (nodes, stream tracks, context) and marks the + * session as no longer in a conversation. + */ + private cleanup; + /** + * Adds audio data to the queue and ensures the playback loop is running. + */ + private enqueueAndPlay; + /** + * Stops all current and pending audio playback and clears the queue. This is + * called when the server indicates the model's speech was interrupted with + * `LiveServerContent.modelTurn.interrupted`. + */ + private interruptPlayback; + /** + * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence. + */ + private processPlaybackQueue; + /** + * The main loop that listens for and processes messages from the server. + */ + private runReceiveLoop; +} +/** + * Starts a real-time, bidirectional audio conversation with the model. This helper function manages + * the complexities of microphone access, audio recording, playback, and interruptions. + * + * @remarks Important: This function must be called in response to a user gesture + * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}. + * + * @example + * ```javascript + * const liveSession = await model.connect(); + * let conversationController; + * + * // This function must be called from within a click handler. + * async function startConversation() { + * try { + * conversationController = await startAudioConversation(liveSession); + * } catch (e) { + * // Handle AI-specific errors + * if (e instanceof AIError) { + * console.error("AI Error:", e.message); + * } + * // Handle microphone permission and hardware errors + * else if (e instanceof DOMException) { + * console.error("Microphone Error:", e.message); + * } + * // Handle other unexpected errors + * else { + * console.error("An unexpected error occurred:", e); + * } + * } + * } + * + * // Later, to stop the conversation: + * // if (conversationController) { + * // await conversationController.stop(); + * // } + * ``` + * + * @param liveSession - An active {@link LiveSession} instance. + * @param options - Configuration options for the audio conversation. + * @returns A `Promise` that resolves with an {@link AudioConversationController}. + * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`). + * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions. + * + * @beta + */ +export declare function startAudioConversation(liveSession: LiveSession, options?: StartAudioConversationOptions): Promise<AudioConversationController>; +export {}; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/methods/live-session.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/methods/live-session.d.ts new file mode 100644 index 0000000..92ecbe5 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/methods/live-session.d.ts @@ -0,0 +1,154 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { FunctionResponse, GenerativeContentBlob, LiveServerContent, LiveServerToolCall, LiveServerToolCallCancellation, Part } from '../public-types'; +import { WebSocketHandler } from '../websocket'; +/** + * Represents an active, real-time, bidirectional conversation with the model. + * + * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}. + * + * @beta + */ +export declare class LiveSession { + private webSocketHandler; + private serverMessages; + /** + * Indicates whether this Live session is closed. + * + * @beta + */ + isClosed: boolean; + /** + * Indicates whether this Live session is being controlled by an `AudioConversationController`. + * + * @beta + */ + inConversation: boolean; + /** + * @internal + */ + constructor(webSocketHandler: WebSocketHandler, serverMessages: AsyncGenerator<unknown>); + /** + * Sends content to the server. + * + * @param request - The message to send to the model. + * @param turnComplete - Indicates if the turn is complete. Defaults to false. + * @throws If this session has been closed. + * + * @beta + */ + send(request: string | Array<string | Part>, turnComplete?: boolean): Promise<void>; + /** + * Sends text to the server in realtime. + * + * @example + * ```javascript + * liveSession.sendTextRealtime("Hello, how are you?"); + * ``` + * + * @param text - The text data to send. + * @throws If this session has been closed. + * + * @beta + */ + sendTextRealtime(text: string): Promise<void>; + /** + * Sends audio data to the server in realtime. + * + * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz + * little-endian. + * + * @example + * ```javascript + * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian. + * const blob = { mimeType: "audio/pcm", data: pcmData }; + * liveSession.sendAudioRealtime(blob); + * ``` + * + * @param blob - The base64-encoded PCM data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + sendAudioRealtime(blob: GenerativeContentBlob): Promise<void>; + /** + * Sends video data to the server in realtime. + * + * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It + * is recommended to set `mimeType` to `image/jpeg`. + * + * @example + * ```javascript + * // const videoFrame = ... base64-encoded JPEG data + * const blob = { mimeType: "image/jpeg", data: videoFrame }; + * liveSession.sendVideoRealtime(blob); + * ``` + * @param blob - The base64-encoded video data to send to the server in realtime. + * @throws If this session has been closed. + * + * @beta + */ + sendVideoRealtime(blob: GenerativeContentBlob): Promise<void>; + /** + * Sends function responses to the server. + * + * @param functionResponses - The function responses to send. + * @throws If this session has been closed. + * + * @beta + */ + sendFunctionResponses(functionResponses: FunctionResponse[]): Promise<void>; + /** + * Yields messages received from the server. + * This can only be used by one consumer at a time. + * + * @returns An `AsyncGenerator` that yields server messages as they arrive. + * @throws If the session is already closed, or if we receive a response that we don't support. + * + * @beta + */ + receive(): AsyncGenerator<LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation>; + /** + * Closes this session. + * All methods on this session will throw an error once this resolves. + * + * @beta + */ + close(): Promise<void>; + /** + * Sends realtime input to the server. + * + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * @param mediaChunks - The media chunks to send. + * @throws If this session has been closed. + * + * @beta + */ + sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void>; + /** + * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead. + * + * Sends a stream of {@link GenerativeContentBlob}. + * + * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send. + * @throws If this session has been closed. + * + * @beta + */ + sendMediaStream(mediaChunkStream: ReadableStream<GenerativeContentBlob>): Promise<void>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/models/ai-model.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/models/ai-model.d.ts new file mode 100644 index 0000000..2d5462b --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/models/ai-model.d.ts @@ -0,0 +1,72 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { AI, BackendType } from '../public-types'; +import { ApiSettings } from '../types/internal'; +/** + * Base class for Firebase AI model APIs. + * + * Instances of this class are associated with a specific Firebase AI {@link Backend} + * and provide methods for interacting with the configured generative model. + * + * @public + */ +export declare abstract class AIModel { + /** + * The fully qualified model resource name to use for generating images + * (for example, `publishers/google/models/imagen-3.0-generate-002`). + */ + readonly model: string; + /** + * @internal + */ + _apiSettings: ApiSettings; + /** + * Constructs a new instance of the {@link AIModel} class. + * + * This constructor should only be called from subclasses that provide + * a model API. + * + * @param ai - an {@link AI} instance. + * @param modelName - The name of the model being used. It can be in one of the following formats: + * - `my-model` (short name, will resolve to `publishers/google/models/my-model`) + * - `models/my-model` (will resolve to `publishers/google/models/my-model`) + * - `publishers/my-publisher/models/my-model` (fully qualified model name) + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + * + * @internal + */ + protected constructor(ai: AI, modelName: string); + /** + * Normalizes the given model name to a fully qualified model resource name. + * + * @param modelName - The model name to normalize. + * @returns The fully qualified model resource name. + * + * @internal + */ + static normalizeModelName(modelName: string, backendType: BackendType): string; + /** + * @internal + */ + private static normalizeGoogleAIModelName; + /** + * @internal + */ + private static normalizeVertexAIModelName; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/models/generative-model.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/models/generative-model.d.ts new file mode 100644 index 0000000..87fd067 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/models/generative-model.d.ts @@ -0,0 +1,56 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Content, CountTokensRequest, CountTokensResponse, GenerateContentRequest, GenerateContentResult, GenerateContentStreamResult, GenerationConfig, ModelParams, Part, RequestOptions, SafetySetting, StartChatParams, Tool, ToolConfig } from '../types'; +import { ChatSession } from '../methods/chat-session'; +import { AI } from '../public-types'; +import { AIModel } from './ai-model'; +import { ChromeAdapter } from '../types/chrome-adapter'; +/** + * Class for generative model APIs. + * @public + */ +export declare class GenerativeModel extends AIModel { + private chromeAdapter?; + generationConfig: GenerationConfig; + safetySettings: SafetySetting[]; + requestOptions?: RequestOptions; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: Content; + constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions, chromeAdapter?: ChromeAdapter | undefined); + /** + * Makes a single non-streaming call to the model + * and returns an object containing a single {@link GenerateContentResponse}. + */ + generateContent(request: GenerateContentRequest | string | Array<string | Part>): Promise<GenerateContentResult>; + /** + * Makes a single streaming call to the model + * and returns an object containing an iterable stream that iterates + * over all chunks in the streaming response as well as + * a promise that returns the final aggregated response. + */ + generateContentStream(request: GenerateContentRequest | string | Array<string | Part>): Promise<GenerateContentStreamResult>; + /** + * Gets a new {@link ChatSession} instance which can be used for + * multi-turn chats. + */ + startChat(startChatParams?: StartChatParams): ChatSession; + /** + * Counts the tokens in the provided request. + */ + countTokens(request: CountTokensRequest | string | Array<string | Part>): Promise<CountTokensResponse>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/models/imagen-model.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/models/imagen-model.d.ts new file mode 100644 index 0000000..699f2a2 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/models/imagen-model.d.ts @@ -0,0 +1,102 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { AI } from '../public-types'; +import { ImagenGCSImage, ImagenGenerationConfig, ImagenInlineImage, RequestOptions, ImagenModelParams, ImagenGenerationResponse, ImagenSafetySettings } from '../types'; +import { AIModel } from './ai-model'; +/** + * Class for Imagen model APIs. + * + * This class provides methods for generating images using the Imagen model. + * + * @example + * ```javascript + * const imagen = new ImagenModel( + * ai, + * { + * model: 'imagen-3.0-generate-002' + * } + * ); + * + * const response = await imagen.generateImages('A photo of a cat'); + * if (response.images.length > 0) { + * console.log(response.images[0].bytesBase64Encoded); + * } + * ``` + * + * @public + */ +export declare class ImagenModel extends AIModel { + requestOptions?: RequestOptions | undefined; + /** + * The Imagen generation configuration. + */ + generationConfig?: ImagenGenerationConfig; + /** + * Safety settings for filtering inappropriate content. + */ + safetySettings?: ImagenSafetySettings; + /** + * Constructs a new instance of the {@link ImagenModel} class. + * + * @param ai - an {@link AI} instance. + * @param modelParams - Parameters to use when making requests to Imagen. + * @param requestOptions - Additional options to use when making requests. + * + * @throws If the `apiKey` or `projectId` fields are missing in your + * Firebase config. + */ + constructor(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined); + /** + * Generates images using the Imagen model and returns them as + * base64-encoded strings. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the generated images. + * + * @throws If the request to generate images fails. This happens if the + * prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + * + * @public + */ + generateImages(prompt: string): Promise<ImagenGenerationResponse<ImagenInlineImage>>; + /** + * Generates images to Cloud Storage for Firebase using the Imagen model. + * + * @internal This method is temporarily internal. + * + * @param prompt - A text prompt describing the image(s) to generate. + * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket. + * This should be a directory. For example, `gs://my-bucket/my-directory/`. + * @returns A promise that resolves to an {@link ImagenGenerationResponse} + * object containing the URLs of the generated images. + * + * @throws If the request fails to generate images fails. This happens if + * the prompt is blocked. + * + * @remarks + * If the prompt was not blocked, but one or more of the generated images were filtered, the + * returned object will have a `filteredReason` property. + * If all images are filtered, the `images` array will be empty. + */ + generateImagesGCS(prompt: string, gcsURI: string): Promise<ImagenGenerationResponse<ImagenGCSImage>>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/models/index.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/models/index.d.ts new file mode 100644 index 0000000..3d79da7 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/models/index.d.ts @@ -0,0 +1,20 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +export * from './ai-model'; +export * from './generative-model'; +export * from './live-generative-model'; +export * from './imagen-model'; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/models/live-generative-model.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/models/live-generative-model.d.ts new file mode 100644 index 0000000..cf0b896 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/models/live-generative-model.d.ts @@ -0,0 +1,55 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { AIModel } from './ai-model'; +import { LiveSession } from '../methods/live-session'; +import { AI, Content, LiveGenerationConfig, LiveModelParams, Tool, ToolConfig } from '../public-types'; +import { WebSocketHandler } from '../websocket'; +/** + * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal + * interactions with Gemini. + * + * This class should only be instantiated with {@link getLiveGenerativeModel}. + * + * @beta + */ +export declare class LiveGenerativeModel extends AIModel { + /** + * @internal + */ + private _webSocketHandler; + generationConfig: LiveGenerationConfig; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: Content; + /** + * @internal + */ + constructor(ai: AI, modelParams: LiveModelParams, + /** + * @internal + */ + _webSocketHandler: WebSocketHandler); + /** + * Starts a {@link LiveSession}. + * + * @returns A {@link LiveSession}. + * @throws If the connection failed to be established with the server. + * + * @beta + */ + connect(): Promise<LiveSession>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/public-types.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/public-types.d.ts new file mode 100644 index 0000000..21620ed --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/public-types.d.ts @@ -0,0 +1,97 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { FirebaseApp } from '@firebase/app'; +import { Backend } from './backend'; +export * from './types'; +/** + * An instance of the Firebase AI SDK. + * + * Do not create this instance directly. Instead, use {@link getAI | getAI()}. + * + * @public + */ +export interface AI { + /** + * The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with. + */ + app: FirebaseApp; + /** + * A {@link Backend} instance that specifies the configuration for the target backend, + * either the Gemini Developer API (using {@link GoogleAIBackend}) or the + * Vertex AI Gemini API (using {@link VertexAIBackend}). + */ + backend: Backend; + /** + * Options applied to this {@link AI} instance. + */ + options?: AIOptions; + /** + * @deprecated use `AI.backend.location` instead. + * + * The location configured for this AI service instance, relevant for Vertex AI backends. + */ + location: string; +} +/** + * An enum-like object containing constants that represent the supported backends + * for the Firebase AI SDK. + * This determines which backend service (Vertex AI Gemini API or Gemini Developer API) + * the SDK will communicate with. + * + * These values are assigned to the `backendType` property within the specific backend + * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify + * which service to target. + * + * @public + */ +export declare const BackendType: { + /** + * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud. + * Use this constant when creating a {@link VertexAIBackend} configuration. + */ + readonly VERTEX_AI: "VERTEX_AI"; + /** + * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}). + * Use this constant when creating a {@link GoogleAIBackend} configuration. + */ + readonly GOOGLE_AI: "GOOGLE_AI"; +}; +/** + * Type alias representing valid backend types. + * It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`. + * + * @public + */ +export type BackendType = (typeof BackendType)[keyof typeof BackendType]; +/** + * Options for initializing the AI service using {@link getAI | getAI()}. + * This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API) + * and configuring its specific options (like location for Vertex AI). + * + * @public + */ +export interface AIOptions { + /** + * The backend configuration to use for the AI service instance. + * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}). + */ + backend?: Backend; + /** + * Whether to use App Check limited use tokens. Defaults to false. + */ + useLimitedUseAppCheckTokens?: boolean; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/requests/hybrid-helpers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/requests/hybrid-helpers.d.ts new file mode 100644 index 0000000..b52e6bf --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/requests/hybrid-helpers.d.ts @@ -0,0 +1,33 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { GenerateContentRequest, ChromeAdapter, InferenceSource } from '../types'; +interface CallResult<Response> { + response: Response; + inferenceSource: InferenceSource; +} +/** + * Dispatches a request to the appropriate backend (on-device or in-cloud) + * based on the inference mode. + * + * @param request - The request to be sent. + * @param chromeAdapter - The on-device model adapter. + * @param onDeviceCall - The function to call for on-device inference. + * @param inCloudCall - The function to call for in-cloud inference. + * @returns The response from the backend. + */ +export declare function callCloudOrDevice<Response>(request: GenerateContentRequest, chromeAdapter: ChromeAdapter | undefined, onDeviceCall: () => Promise<Response>, inCloudCall: () => Promise<Response>): Promise<CallResult<Response>>; +export {}; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/requests/imagen-image-format.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/requests/imagen-image-format.d.ts new file mode 100644 index 0000000..2f3eddb --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/requests/imagen-image-format.d.ts @@ -0,0 +1,61 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Defines the image format for images generated by Imagen. + * + * Use this class to specify the desired format (JPEG or PNG) and compression quality + * for images generated by Imagen. This is typically included as part of + * {@link ImagenModelParams}. + * + * @example + * ```javascript + * const imagenModelParams = { + * // ... other ImagenModelParams + * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75. + * } + * ``` + * + * @public + */ +export declare class ImagenImageFormat { + /** + * The MIME type. + */ + mimeType: string; + /** + * The level of compression (a number between 0 and 100). + */ + compressionQuality?: number; + private constructor(); + /** + * Creates an {@link ImagenImageFormat} for a JPEG image. + * + * @param compressionQuality - The level of compression (a number between 0 and 100). + * @returns An {@link ImagenImageFormat} object for a JPEG image. + * + * @public + */ + static jpeg(compressionQuality?: number): ImagenImageFormat; + /** + * Creates an {@link ImagenImageFormat} for a PNG image. + * + * @returns An {@link ImagenImageFormat} object for a PNG image. + * + * @public + */ + static png(): ImagenImageFormat; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/requests/request-helpers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/requests/request-helpers.d.ts new file mode 100644 index 0000000..fa79626 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/requests/request-helpers.d.ts @@ -0,0 +1,28 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Content, GenerateContentRequest, Part } from '../types'; +import { ImagenGenerationParams, PredictRequestBody } from '../types/internal'; +export declare function formatSystemInstruction(input?: string | Part | Content): Content | undefined; +export declare function formatNewContent(request: string | Array<string | Part>): Content; +export declare function formatGenerateContentInput(params: GenerateContentRequest | string | Array<string | Part>): GenerateContentRequest; +/** + * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format + * that is expected from the REST API. + * + * @internal + */ +export declare function createPredictRequestBody(prompt: string, { gcsURI, imageFormat, addWatermark, numberOfImages, negativePrompt, aspectRatio, safetyFilterLevel, personFilterLevel }: ImagenGenerationParams): PredictRequestBody; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/requests/request.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/requests/request.d.ts new file mode 100644 index 0000000..b0aed14 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/requests/request.d.ts @@ -0,0 +1,49 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { RequestOptions } from '../types'; +import { ApiSettings } from '../types/internal'; +export declare enum Task { + GENERATE_CONTENT = "generateContent", + STREAM_GENERATE_CONTENT = "streamGenerateContent", + COUNT_TOKENS = "countTokens", + PREDICT = "predict" +} +export declare class RequestUrl { + model: string; + task: Task; + apiSettings: ApiSettings; + stream: boolean; + requestOptions?: RequestOptions | undefined; + constructor(model: string, task: Task, apiSettings: ApiSettings, stream: boolean, requestOptions?: RequestOptions | undefined); + toString(): string; + private get baseUrl(); + private get apiVersion(); + private get modelPath(); + private get queryParams(); +} +export declare class WebSocketUrl { + apiSettings: ApiSettings; + constructor(apiSettings: ApiSettings); + toString(): string; + private get pathname(); +} +export declare function getHeaders(url: RequestUrl): Promise<Headers>; +export declare function constructRequest(model: string, task: Task, apiSettings: ApiSettings, stream: boolean, body: string, requestOptions?: RequestOptions): Promise<{ + url: string; + fetchOptions: RequestInit; +}>; +export declare function makeRequest(model: string, task: Task, apiSettings: ApiSettings, stream: boolean, body: string, requestOptions?: RequestOptions): Promise<Response>; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/requests/response-helpers.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/requests/response-helpers.d.ts new file mode 100644 index 0000000..d0aded1 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/requests/response-helpers.d.ts @@ -0,0 +1,57 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { EnhancedGenerateContentResponse, FunctionCall, GenerateContentResponse, ImagenGCSImage, ImagenInlineImage, InlineDataPart, Part, InferenceSource } from '../types'; +/** + * Creates an EnhancedGenerateContentResponse object that has helper functions and + * other modifications that improve usability. + */ +export declare function createEnhancedContentResponse(response: GenerateContentResponse, inferenceSource?: InferenceSource): EnhancedGenerateContentResponse; +/** + * Adds convenience helper methods to a response object, including stream + * chunks (as long as each chunk is a complete GenerateContentResponse JSON). + */ +export declare function addHelpers(response: GenerateContentResponse): EnhancedGenerateContentResponse; +/** + * Returns all text from the first candidate's parts, filtering by whether + * `partFilter()` returns true. + * + * @param response - The `GenerateContentResponse` from which to extract text. + * @param partFilter - Only return `Part`s for which this returns true + */ +export declare function getText(response: GenerateContentResponse, partFilter: (part: Part) => boolean): string; +/** + * Returns every {@link FunctionCall} associated with first candidate. + */ +export declare function getFunctionCalls(response: GenerateContentResponse): FunctionCall[] | undefined; +/** + * Returns every {@link InlineDataPart} in the first candidate if present. + * + * @internal + */ +export declare function getInlineDataParts(response: GenerateContentResponse): InlineDataPart[] | undefined; +export declare function formatBlockErrorMessage(response: GenerateContentResponse): string; +/** + * Convert a generic successful fetch response body to an Imagen response object + * that can be returned to the user. This converts the REST APIs response format to our + * APIs representation of a response. + * + * @internal + */ +export declare function handlePredictResponse<T extends ImagenInlineImage | ImagenGCSImage>(response: Response): Promise<{ + images: T[]; + filteredReason?: string; +}>; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/requests/schema-builder.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/requests/schema-builder.d.ts new file mode 100644 index 0000000..e23e74f --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/requests/schema-builder.d.ts @@ -0,0 +1,170 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { SchemaInterface, SchemaType, SchemaParams, SchemaRequest } from '../types/schema'; +/** + * Parent class encompassing all Schema types, with static methods that + * allow building specific Schema types. This class can be converted with + * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints. + * (This string conversion is automatically done when calling SDK methods.) + * @public + */ +export declare abstract class Schema implements SchemaInterface { + /** + * Optional. The type of the property. + * This can only be undefined when using `anyOf` schemas, which do not have an + * explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification}. + */ + type?: SchemaType; + /** Optional. The format of the property. + * Supported formats:<br/> + * <ul> + * <li>for NUMBER type: "float", "double"</li> + * <li>for INTEGER type: "int32", "int64"</li> + * <li>for STRING type: "email", "byte", etc</li> + * </ul> + */ + format?: string; + /** Optional. The description of the property. */ + description?: string; + /** Optional. The items of the property. */ + items?: SchemaInterface; + /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + minItems?: number; + /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + maxItems?: number; + /** Optional. Whether the property is nullable. Defaults to false. */ + nullable: boolean; + /** Optional. The example of the property. */ + example?: unknown; + /** + * Allows user to add other schema properties that have not yet + * been officially added to the SDK. + */ + [key: string]: unknown; + constructor(schemaParams: SchemaInterface); + /** + * Defines how this Schema should be serialized as JSON. + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior + * @internal + */ + toJSON(): SchemaRequest; + static array(arrayParams: SchemaParams & { + items: Schema; + }): ArraySchema; + static object(objectParams: SchemaParams & { + properties: { + [k: string]: Schema; + }; + optionalProperties?: string[]; + }): ObjectSchema; + static string(stringParams?: SchemaParams): StringSchema; + static enumString(stringParams: SchemaParams & { + enum: string[]; + }): StringSchema; + static integer(integerParams?: SchemaParams): IntegerSchema; + static number(numberParams?: SchemaParams): NumberSchema; + static boolean(booleanParams?: SchemaParams): BooleanSchema; + static anyOf(anyOfParams: SchemaParams & { + anyOf: TypedSchema[]; + }): AnyOfSchema; +} +/** + * A type that includes all specific Schema types. + * @public + */ +export type TypedSchema = IntegerSchema | NumberSchema | StringSchema | BooleanSchema | ObjectSchema | ArraySchema | AnyOfSchema; +/** + * Schema class for "integer" types. + * @public + */ +export declare class IntegerSchema extends Schema { + constructor(schemaParams?: SchemaParams); +} +/** + * Schema class for "number" types. + * @public + */ +export declare class NumberSchema extends Schema { + constructor(schemaParams?: SchemaParams); +} +/** + * Schema class for "boolean" types. + * @public + */ +export declare class BooleanSchema extends Schema { + constructor(schemaParams?: SchemaParams); +} +/** + * Schema class for "string" types. Can be used with or without + * enum values. + * @public + */ +export declare class StringSchema extends Schema { + enum?: string[]; + constructor(schemaParams?: SchemaParams, enumValues?: string[]); + /** + * @internal + */ + toJSON(): SchemaRequest; +} +/** + * Schema class for "array" types. + * The `items` param should refer to the type of item that can be a member + * of the array. + * @public + */ +export declare class ArraySchema extends Schema { + items: TypedSchema; + constructor(schemaParams: SchemaParams, items: TypedSchema); + /** + * @internal + */ + toJSON(): SchemaRequest; +} +/** + * Schema class for "object" types. + * The `properties` param must be a map of `Schema` objects. + * @public + */ +export declare class ObjectSchema extends Schema { + properties: { + [k: string]: TypedSchema; + }; + optionalProperties: string[]; + constructor(schemaParams: SchemaParams, properties: { + [k: string]: TypedSchema; + }, optionalProperties?: string[]); + /** + * @internal + */ + toJSON(): SchemaRequest; +} +/** + * Schema class representing a value that can conform to any of the provided sub-schemas. This is + * useful when a field can accept multiple distinct types or structures. + * @public + */ +export declare class AnyOfSchema extends Schema { + anyOf: TypedSchema[]; + constructor(schemaParams: SchemaParams & { + anyOf: TypedSchema[]; + }); + /** + * @internal + */ + toJSON(): SchemaRequest; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/requests/stream-reader.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/requests/stream-reader.d.ts new file mode 100644 index 0000000..4ffb0da --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/requests/stream-reader.d.ts @@ -0,0 +1,39 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { GenerateContentResponse, GenerateContentStreamResult } from '../types'; +import { ApiSettings } from '../types/internal'; +import { InferenceSource } from '../public-types'; +/** + * Process a response.body stream from the backend and return an + * iterator that provides one complete GenerateContentResponse at a time + * and a promise that resolves with a single aggregated + * GenerateContentResponse. + * + * @param response - Response from a fetch call + */ +export declare function processStream(response: Response, apiSettings: ApiSettings, inferenceSource?: InferenceSource): GenerateContentStreamResult; +/** + * Reads a raw stream from the fetch response and join incomplete + * chunks, returning a new stream that provides a single complete + * GenerateContentResponse in each iteration. + */ +export declare function getResponseStream<T>(inputStream: ReadableStream<string>): ReadableStream<T>; +/** + * Aggregates an array of `GenerateContentResponse`s into a single + * GenerateContentResponse. + */ +export declare function aggregateResponses(responses: GenerateContentResponse[]): GenerateContentResponse; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/service.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/service.d.ts new file mode 100644 index 0000000..b0da890 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/service.d.ts @@ -0,0 +1,36 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { FirebaseApp, _FirebaseService } from '@firebase/app'; +import { AI, AIOptions, InferenceMode, OnDeviceParams } from './public-types'; +import { AppCheckInternalComponentName, FirebaseAppCheckInternal } from '@firebase/app-check-interop-types'; +import { Provider } from '@firebase/component'; +import { FirebaseAuthInternal, FirebaseAuthInternalName } from '@firebase/auth-interop-types'; +import { Backend } from './backend'; +import { ChromeAdapterImpl } from './methods/chrome-adapter'; +export declare class AIService implements AI, _FirebaseService { + app: FirebaseApp; + backend: Backend; + chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined; + auth: FirebaseAuthInternal | null; + appCheck: FirebaseAppCheckInternal | null; + _options?: Omit<AIOptions, 'backend'>; + location: string; + constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>, chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined); + _delete(): Promise<void>; + set options(optionsToSet: AIOptions); + get options(): AIOptions | undefined; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/tsdoc-metadata.json b/frontend-old/node_modules/@firebase/ai/dist/src/tsdoc-metadata.json new file mode 100644 index 0000000..6af1f6a --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/tsdoc-metadata.json @@ -0,0 +1,11 @@ +// This file is read by tools that parse documentation comments conforming to the TSDoc standard. +// It should be published with your NPM package. It should not be tracked by Git. +{ + "tsdocVersion": "0.12", + "toolPackages": [ + { + "packageName": "@microsoft/api-extractor", + "packageVersion": "0.1.2" + } + ] +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/chrome-adapter.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/chrome-adapter.d.ts new file mode 100644 index 0000000..6092353 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/chrome-adapter.d.ts @@ -0,0 +1,56 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { CountTokensRequest, GenerateContentRequest } from './requests'; +/** + * Defines an inference "backend" that uses Chrome's on-device model, + * and encapsulates logic for detecting when on-device inference is + * possible. + * + * These methods should not be called directly by the user. + * + * @beta + */ +export interface ChromeAdapter { + /** + * Checks if the on-device model is capable of handling a given + * request. + * @param request - A potential request to be passed to the model. + */ + isAvailable(request: GenerateContentRequest): Promise<boolean>; + /** + * Generates content using on-device inference. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContent} for generating + * content using in-cloud inference. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + */ + generateContent(request: GenerateContentRequest): Promise<Response>; + /** + * Generates a content stream using on-device inference. + * + * @remarks + * This is comparable to {@link GenerativeModel.generateContentStream} for generating + * a content stream using in-cloud inference. + * @param request - a standard Firebase AI {@link GenerateContentRequest} + */ + generateContentStream(request: GenerateContentRequest): Promise<Response>; + /** + * @internal + */ + countTokens(request: CountTokensRequest): Promise<Response>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/content.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/content.d.ts new file mode 100644 index 0000000..a760547 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/content.d.ts @@ -0,0 +1,265 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Language, Outcome, Role } from './enums'; +/** + * Content type for both prompts and response candidates. + * @public + */ +export interface Content { + role: Role; + parts: Part[]; +} +/** + * Content part - includes text, image/video, or function call/response + * part types. + * @public + */ +export type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart | ExecutableCodePart | CodeExecutionResultPart; +/** + * Content part interface if the part represents a text string. + * @public + */ +export interface TextPart { + text: string; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: string; + executableCode?: never; + codeExecutionResult?: never; +} +/** + * Content part interface if the part represents an image. + * @public + */ +export interface InlineDataPart { + text?: never; + inlineData: GenerativeContentBlob; + functionCall?: never; + functionResponse?: never; + /** + * Applicable if `inlineData` is a video. + */ + videoMetadata?: VideoMetadata; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: never; +} +/** + * Describes the input video content. + * @public + */ +export interface VideoMetadata { + /** + * The start offset of the video in + * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format. + */ + startOffset: string; + /** + * The end offset of the video in + * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format. + */ + endOffset: string; +} +/** + * Content part interface if the part represents a {@link FunctionCall}. + * @public + */ +export interface FunctionCallPart { + text?: never; + inlineData?: never; + functionCall: FunctionCall; + functionResponse?: never; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: never; +} +/** + * Content part interface if the part represents {@link FunctionResponse}. + * @public + */ +export interface FunctionResponsePart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse: FunctionResponse; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: never; +} +/** + * Content part interface if the part represents {@link FileData} + * @public + */ +export interface FileDataPart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + fileData: FileData; + thought?: boolean; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: never; +} +/** + * Represents the code that is executed by the model. + * + * @beta + */ +export interface ExecutableCodePart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + fileData: never; + thought?: never; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: ExecutableCode; + codeExecutionResult?: never; +} +/** + * Represents the code execution result from the model. + * + * @beta + */ +export interface CodeExecutionResultPart { + text?: never; + inlineData?: never; + functionCall?: never; + functionResponse?: never; + fileData: never; + thought?: never; + /** + * @internal + */ + thoughtSignature?: never; + executableCode?: never; + codeExecutionResult?: CodeExecutionResult; +} +/** + * An interface for executable code returned by the model. + * + * @beta + */ +export interface ExecutableCode { + /** + * The programming language of the code. + */ + language?: Language; + /** + * The source code to be executed. + */ + code?: string; +} +/** + * The results of code execution run by the model. + * + * @beta + */ +export interface CodeExecutionResult { + /** + * The result of the code execution. + */ + outcome?: Outcome; + /** + * The output from the code execution, or an error message + * if it failed. + */ + output?: string; +} +/** + * A predicted {@link FunctionCall} returned from the model + * that contains a string representing the {@link FunctionDeclaration.name} + * and a structured JSON object containing the parameters and their values. + * @public + */ +export interface FunctionCall { + /** + * The id of the function call. This must be sent back in the associated {@link FunctionResponse}. + * + * + * @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be + * `undefined`. + */ + id?: string; + name: string; + args: object; +} +/** + * The result output from a {@link FunctionCall} that contains a string + * representing the {@link FunctionDeclaration.name} + * and a structured JSON object containing any output + * from the function is used as context to the model. + * This should contain the result of a {@link FunctionCall} + * made based on model prediction. + * @public + */ +export interface FunctionResponse { + /** + * The id of the {@link FunctionCall}. + * + * @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be + * `undefined`. + */ + id?: string; + name: string; + response: object; +} +/** + * Interface for sending an image. + * @public + */ +export interface GenerativeContentBlob { + mimeType: string; + /** + * Image as a base64 string. + */ + data: string; +} +/** + * Data pointing to a file uploaded on Google Cloud Storage. + * @public + */ +export interface FileData { + mimeType: string; + fileUri: string; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/enums.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/enums.d.ts new file mode 100644 index 0000000..170a299 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/enums.d.ts @@ -0,0 +1,398 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Role is the producer of the content. + * @public + */ +export type Role = (typeof POSSIBLE_ROLES)[number]; +/** + * Possible roles. + * @public + */ +export declare const POSSIBLE_ROLES: readonly ["user", "model", "function", "system"]; +/** + * Harm categories that would cause prompts or candidates to be blocked. + * @public + */ +export declare const HarmCategory: { + readonly HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH"; + readonly HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT"; + readonly HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT"; + readonly HARM_CATEGORY_DANGEROUS_CONTENT: "HARM_CATEGORY_DANGEROUS_CONTENT"; +}; +/** + * Harm categories that would cause prompts or candidates to be blocked. + * @public + */ +export type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory]; +/** + * Threshold above which a prompt or candidate will be blocked. + * @public + */ +export declare const HarmBlockThreshold: { + /** + * Content with `NEGLIGIBLE` will be allowed. + */ + readonly BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE"; + /** + * Content with `NEGLIGIBLE` and `LOW` will be allowed. + */ + readonly BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE"; + /** + * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed. + */ + readonly BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH"; + /** + * All content will be allowed. + */ + readonly BLOCK_NONE: "BLOCK_NONE"; + /** + * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding + * to the {@link (HarmCategory:type)} will not be present in the response. + */ + readonly OFF: "OFF"; +}; +/** + * Threshold above which a prompt or candidate will be blocked. + * @public + */ +export type HarmBlockThreshold = (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold]; +/** + * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}). + * + * @public + */ +export declare const HarmBlockMethod: { + /** + * The harm block method uses both probability and severity scores. + */ + readonly SEVERITY: "SEVERITY"; + /** + * The harm block method uses the probability score. + */ + readonly PROBABILITY: "PROBABILITY"; +}; +/** + * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}). + * + * @public + */ +export type HarmBlockMethod = (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod]; +/** + * Probability that a prompt or candidate matches a harm category. + * @public + */ +export declare const HarmProbability: { + /** + * Content has a negligible chance of being unsafe. + */ + readonly NEGLIGIBLE: "NEGLIGIBLE"; + /** + * Content has a low chance of being unsafe. + */ + readonly LOW: "LOW"; + /** + * Content has a medium chance of being unsafe. + */ + readonly MEDIUM: "MEDIUM"; + /** + * Content has a high chance of being unsafe. + */ + readonly HIGH: "HIGH"; +}; +/** + * Probability that a prompt or candidate matches a harm category. + * @public + */ +export type HarmProbability = (typeof HarmProbability)[keyof typeof HarmProbability]; +/** + * Harm severity levels. + * @public + */ +export declare const HarmSeverity: { + /** + * Negligible level of harm severity. + */ + readonly HARM_SEVERITY_NEGLIGIBLE: "HARM_SEVERITY_NEGLIGIBLE"; + /** + * Low level of harm severity. + */ + readonly HARM_SEVERITY_LOW: "HARM_SEVERITY_LOW"; + /** + * Medium level of harm severity. + */ + readonly HARM_SEVERITY_MEDIUM: "HARM_SEVERITY_MEDIUM"; + /** + * High level of harm severity. + */ + readonly HARM_SEVERITY_HIGH: "HARM_SEVERITY_HIGH"; + /** + * Harm severity is not supported. + * + * @remarks + * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback. + */ + readonly HARM_SEVERITY_UNSUPPORTED: "HARM_SEVERITY_UNSUPPORTED"; +}; +/** + * Harm severity levels. + * @public + */ +export type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity]; +/** + * Reason that a prompt was blocked. + * @public + */ +export declare const BlockReason: { + /** + * Content was blocked by safety settings. + */ + readonly SAFETY: "SAFETY"; + /** + * Content was blocked, but the reason is uncategorized. + */ + readonly OTHER: "OTHER"; + /** + * Content was blocked because it contained terms from the terminology blocklist. + */ + readonly BLOCKLIST: "BLOCKLIST"; + /** + * Content was blocked due to prohibited content. + */ + readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT"; +}; +/** + * Reason that a prompt was blocked. + * @public + */ +export type BlockReason = (typeof BlockReason)[keyof typeof BlockReason]; +/** + * Reason that a candidate finished. + * @public + */ +export declare const FinishReason: { + /** + * Natural stop point of the model or provided stop sequence. + */ + readonly STOP: "STOP"; + /** + * The maximum number of tokens as specified in the request was reached. + */ + readonly MAX_TOKENS: "MAX_TOKENS"; + /** + * The candidate content was flagged for safety reasons. + */ + readonly SAFETY: "SAFETY"; + /** + * The candidate content was flagged for recitation reasons. + */ + readonly RECITATION: "RECITATION"; + /** + * Unknown reason. + */ + readonly OTHER: "OTHER"; + /** + * The candidate content contained forbidden terms. + */ + readonly BLOCKLIST: "BLOCKLIST"; + /** + * The candidate content potentially contained prohibited content. + */ + readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT"; + /** + * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII). + */ + readonly SPII: "SPII"; + /** + * The function call generated by the model was invalid. + */ + readonly MALFORMED_FUNCTION_CALL: "MALFORMED_FUNCTION_CALL"; +}; +/** + * Reason that a candidate finished. + * @public + */ +export type FinishReason = (typeof FinishReason)[keyof typeof FinishReason]; +/** + * @public + */ +export declare const FunctionCallingMode: { + /** + * Default model behavior; model decides to predict either a function call + * or a natural language response. + */ + readonly AUTO: "AUTO"; + /** + * Model is constrained to always predicting a function call only. + * If `allowed_function_names` is set, the predicted function call will be + * limited to any one of `allowed_function_names`, else the predicted + * function call will be any one of the provided `function_declarations`. + */ + readonly ANY: "ANY"; + /** + * Model will not predict any function call. Model behavior is same as when + * not passing any function declarations. + */ + readonly NONE: "NONE"; +}; +/** + * @public + */ +export type FunctionCallingMode = (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode]; +/** + * Content part modality. + * @public + */ +export declare const Modality: { + /** + * Unspecified modality. + */ + readonly MODALITY_UNSPECIFIED: "MODALITY_UNSPECIFIED"; + /** + * Plain text. + */ + readonly TEXT: "TEXT"; + /** + * Image. + */ + readonly IMAGE: "IMAGE"; + /** + * Video. + */ + readonly VIDEO: "VIDEO"; + /** + * Audio. + */ + readonly AUDIO: "AUDIO"; + /** + * Document (for example, PDF). + */ + readonly DOCUMENT: "DOCUMENT"; +}; +/** + * Content part modality. + * @public + */ +export type Modality = (typeof Modality)[keyof typeof Modality]; +/** + * Generation modalities to be returned in generation responses. + * + * @beta + */ +export declare const ResponseModality: { + /** + * Text. + * @beta + */ + readonly TEXT: "TEXT"; + /** + * Image. + * @beta + */ + readonly IMAGE: "IMAGE"; + /** + * Audio. + * @beta + */ + readonly AUDIO: "AUDIO"; +}; +/** + * Generation modalities to be returned in generation responses. + * + * @beta + */ +export type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseModality]; +/** + * Determines whether inference happens on-device or in-cloud. + * + * @remarks + * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an + * on-device model. If on-device inference is not available, the SDK + * will fall back to using a cloud-hosted model. + * <br/> + * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an + * on-device model. The SDK will not fall back to a cloud-hosted model. + * If on-device inference is not available, inference methods will throw. + * <br/> + * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a + * cloud-hosted model. The SDK will not fall back to an on-device model. + * <br/> + * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a + * cloud-hosted model. If not available, the SDK will fall back to an + * on-device model. + * + * @beta + */ +export declare const InferenceMode: { + readonly PREFER_ON_DEVICE: "prefer_on_device"; + readonly ONLY_ON_DEVICE: "only_on_device"; + readonly ONLY_IN_CLOUD: "only_in_cloud"; + readonly PREFER_IN_CLOUD: "prefer_in_cloud"; +}; +/** + * Determines whether inference happens on-device or in-cloud. + * + * @beta + */ +export type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode]; +/** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ +export declare const InferenceSource: { + readonly ON_DEVICE: "on_device"; + readonly IN_CLOUD: "in_cloud"; +}; +/** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ +export type InferenceSource = (typeof InferenceSource)[keyof typeof InferenceSource]; +/** + * Represents the result of the code execution. + * + * @beta + */ +export declare const Outcome: { + UNSPECIFIED: string; + OK: string; + FAILED: string; + DEADLINE_EXCEEDED: string; +}; +/** + * Represents the result of the code execution. + * + * @beta + */ +export type Outcome = (typeof Outcome)[keyof typeof Outcome]; +/** + * The programming language of the code. + * + * @beta + */ +export declare const Language: { + UNSPECIFIED: string; + PYTHON: string; +}; +/** + * The programming language of the code. + * + * @beta + */ +export type Language = (typeof Language)[keyof typeof Language]; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/error.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/error.d.ts new file mode 100644 index 0000000..82e6bb4 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/error.d.ts @@ -0,0 +1,89 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { GenerateContentResponse } from './responses'; +/** + * Details object that may be included in an error response. + * + * @public + */ +export interface ErrorDetails { + '@type'?: string; + /** The reason for the error. */ + reason?: string; + /** The domain where the error occurred. */ + domain?: string; + /** Additional metadata about the error. */ + metadata?: Record<string, unknown>; + /** Any other relevant information about the error. */ + [key: string]: unknown; +} +/** + * Details object that contains data originating from a bad HTTP response. + * + * @public + */ +export interface CustomErrorData { + /** HTTP status code of the error response. */ + status?: number; + /** HTTP status text of the error response. */ + statusText?: string; + /** Response from a {@link GenerateContentRequest} */ + response?: GenerateContentResponse; + /** Optional additional details about the error. */ + errorDetails?: ErrorDetails[]; +} +/** + * Standardized error codes that {@link AIError} can have. + * + * @public + */ +export declare const AIErrorCode: { + /** A generic error occurred. */ + readonly ERROR: "error"; + /** An error occurred in a request. */ + readonly REQUEST_ERROR: "request-error"; + /** An error occurred in a response. */ + readonly RESPONSE_ERROR: "response-error"; + /** An error occurred while performing a fetch. */ + readonly FETCH_ERROR: "fetch-error"; + /** An error occurred because an operation was attempted on a closed session. */ + readonly SESSION_CLOSED: "session-closed"; + /** An error associated with a Content object. */ + readonly INVALID_CONTENT: "invalid-content"; + /** An error due to the Firebase API not being enabled in the Console. */ + readonly API_NOT_ENABLED: "api-not-enabled"; + /** An error due to invalid Schema input. */ + readonly INVALID_SCHEMA: "invalid-schema"; + /** An error occurred due to a missing Firebase API key. */ + readonly NO_API_KEY: "no-api-key"; + /** An error occurred due to a missing Firebase app ID. */ + readonly NO_APP_ID: "no-app-id"; + /** An error occurred due to a model name not being specified during initialization. */ + readonly NO_MODEL: "no-model"; + /** An error occurred due to a missing project ID. */ + readonly NO_PROJECT_ID: "no-project-id"; + /** An error occurred while parsing. */ + readonly PARSE_FAILED: "parse-failed"; + /** An error occurred due an attempt to use an unsupported feature. */ + readonly UNSUPPORTED: "unsupported"; +}; +/** + * Standardized error codes that {@link AIError} can have. + * + * @public + */ +export type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode]; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/googleai.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/googleai.d.ts new file mode 100644 index 0000000..7060f48 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/googleai.d.ts @@ -0,0 +1,57 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Tool, GenerationConfig, Citation, FinishReason, GroundingMetadata, PromptFeedback, SafetyRating, UsageMetadata, URLContextMetadata } from '../public-types'; +import { Content, Part } from './content'; +/** + * @internal + */ +export interface GoogleAICountTokensRequest { + generateContentRequest: { + model: string; + contents: Content[]; + systemInstruction?: string | Part | Content; + tools?: Tool[]; + generationConfig?: GenerationConfig; + }; +} +/** + * @internal + */ +export interface GoogleAIGenerateContentResponse { + candidates?: GoogleAIGenerateContentCandidate[]; + promptFeedback?: PromptFeedback; + usageMetadata?: UsageMetadata; +} +/** + * @internal + */ +export interface GoogleAIGenerateContentCandidate { + index: number; + content: Content; + finishReason?: FinishReason; + finishMessage?: string; + safetyRatings?: SafetyRating[]; + citationMetadata?: GoogleAICitationMetadata; + groundingMetadata?: GroundingMetadata; + urlContextMetadata?: URLContextMetadata; +} +/** + * @internal + */ +export interface GoogleAICitationMetadata { + citationSources: Citation[]; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/index.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/index.d.ts new file mode 100644 index 0000000..c56c5bc --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/index.d.ts @@ -0,0 +1,18 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +export * from './requests'; +export * from './responses'; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/internal.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/internal.d.ts new file mode 100644 index 0000000..7d5824d --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/internal.d.ts @@ -0,0 +1,134 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { ImagenGenerationConfig, ImagenSafetySettings } from './requests'; +/** + * A response from the REST API is expected to look like this in the success case: + * { + * "predictions": [ + * { + * "mimeType": "image/png", + * "bytesBase64Encoded": "iVBORw0KG..." + * }, + * { + * "mimeType": "image/png", + * "bytesBase64Encoded": "i4BOtw0KG..." + * } + * ] + * } + * + * And like this in the failure case: + * { + * "predictions": [ + * { + * "raiFilteredReason": "..." + * } + * ] + * } + * + * @internal + */ +export interface ImagenResponseInternal { + predictions?: Array<{ + /** + * The MIME type of the generated image. + */ + mimeType?: string; + /** + * The image data encoded as a base64 string. + */ + bytesBase64Encoded?: string; + /** + * The GCS URI where the image was stored. + */ + gcsUri?: string; + /** + * The reason why the image was filtered. + */ + raiFilteredReason?: string; + /** + * The safety attributes. + * + * This type is currently unused in the SDK. It is sent back because our requests set + * `includeSafetyAttributes`. This property is currently only used to avoid throwing an error + * when encountering this unsupported prediction type. + */ + safetyAttributes?: unknown; + }>; +} +/** + * The parameters to be sent in the request body of the HTTP call + * to the Vertex AI backend. + * + * We need a seperate internal-only interface for this because the REST + * API expects different parameter names than what we show to our users. + * + * Sample request body JSON: + * { + * "instances": [ + * { + * "prompt": "Portrait of a golden retriever on a beach." + * } + * ], + * "parameters": { + * "mimeType": "image/png", + * "safetyFilterLevel": "block_low_and_above", + * "personGeneration": "allow_all", + * "sampleCount": 2, + * "includeRaiReason": true, + * "includeSafetyAttributes": true, + * "aspectRatio": "9:16" + * } + * } + * + * See the Google Cloud docs: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api#-drest + * + * @internal + */ +export interface PredictRequestBody { + instances: [ + { + prompt: string; + } + ]; + parameters: { + sampleCount: number; + aspectRatio?: string; + outputOptions?: { + mimeType: string; + compressionQuality?: number; + }; + negativePrompt?: string; + storageUri?: string; + addWatermark?: boolean; + safetyFilterLevel?: string; + personGeneration?: string; + includeRaiReason: boolean; + includeSafetyAttributes: boolean; + }; +} +/** + * Contains all possible REST API paramaters that are provided by the caller. + * + * @internal + */ +export type ImagenGenerationParams = { + /** + * The Cloud Storage for Firebase bucket URI where the images should be stored + * (for GCS requests only). + */ + gcsURI?: string; +} & ImagenGenerationConfig & ImagenSafetySettings; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/requests.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/requests.d.ts new file mode 100644 index 0000000..31083fa --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/requests.d.ts @@ -0,0 +1,245 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { ImagenImageFormat } from '../../requests/imagen-image-format'; +/** + * Parameters for configuring an {@link ImagenModel}. + * + * @public + */ +export interface ImagenModelParams { + /** + * The Imagen model to use for generating images. + * For example: `imagen-3.0-generate-002`. + * + * Only Imagen 3 models (named `imagen-3.0-*`) are supported. + * + * See {@link https://firebase.google.com/docs/vertex-ai/models | model versions} + * for a full list of supported Imagen 3 models. + */ + model: string; + /** + * Configuration options for generating images with Imagen. + */ + generationConfig?: ImagenGenerationConfig; + /** + * Safety settings for filtering potentially inappropriate content. + */ + safetySettings?: ImagenSafetySettings; +} +/** + * Configuration options for generating images with Imagen. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for + * more details. + * + * @public + */ +export interface ImagenGenerationConfig { + /** + * A description of what should be omitted from the generated images. + * + * Support for negative prompts depends on the Imagen model. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details. + * + * This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions + * greater than `imagen-3.0-generate-002`. + */ + negativePrompt?: string; + /** + * The number of images to generate. The default value is 1. + * + * The number of sample images that may be generated in each request depends on the model + * (typically up to 4); see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">sampleCount</a> + * documentation for more details. + */ + numberOfImages?: number; + /** + * The aspect ratio of the generated images. The default value is square 1:1. + * Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)} + * for more details. + */ + aspectRatio?: ImagenAspectRatio; + /** + * The image format of the generated images. The default is PNG. + * + * See {@link ImagenImageFormat} for more details. + */ + imageFormat?: ImagenImageFormat; + /** + * Whether to add an invisible watermark to generated images. + * + * If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate + * that they are AI generated. If set to `false`, watermarking will be disabled. + * + * For Imagen 3 models, the default value is `true`; see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">addWatermark</a> + * documentation for more details. + * + * When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true, + * and cannot be turned off. + */ + addWatermark?: boolean; +} +/** + * A filter level controlling how aggressively to filter sensitive content. + * + * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI + * are assessed against a list of safety filters, which include 'harmful categories' (for example, + * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to + * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines} + * for more details. + * + * @public + */ +export declare const ImagenSafetyFilterLevel: { + /** + * The most aggressive filtering level; most strict blocking. + */ + readonly BLOCK_LOW_AND_ABOVE: "block_low_and_above"; + /** + * Blocks some sensitive prompts and responses. + */ + readonly BLOCK_MEDIUM_AND_ABOVE: "block_medium_and_above"; + /** + * Blocks few sensitive prompts and responses. + */ + readonly BLOCK_ONLY_HIGH: "block_only_high"; + /** + * The least aggressive filtering level; blocks very few sensitive prompts and responses. + * + * Access to this feature is restricted and may require your case to be reviewed and approved by + * Cloud support. + */ + readonly BLOCK_NONE: "block_none"; +}; +/** + * A filter level controlling how aggressively to filter sensitive content. + * + * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI + * are assessed against a list of safety filters, which include 'harmful categories' (for example, + * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to + * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines} + * for more details. + * + * @public + */ +export type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel]; +/** + * A filter level controlling whether generation of images containing people or faces is allowed. + * + * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a> + * documentation for more details. + * + * @public + */ +export declare const ImagenPersonFilterLevel: { + /** + * Disallow generation of images containing people or faces; images of people are filtered out. + */ + readonly BLOCK_ALL: "dont_allow"; + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + readonly ALLOW_ADULT: "allow_adult"; + /** + * Allow generation of images containing adults only; images of children are filtered out. + * + * Generation of images containing people or faces may require your use case to be + * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines} + * for more details. + */ + readonly ALLOW_ALL: "allow_all"; +}; +/** + * A filter level controlling whether generation of images containing people or faces is allowed. + * + * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a> + * documentation for more details. + * + * @public + */ +export type ImagenPersonFilterLevel = (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel]; +/** + * Settings for controlling the aggressiveness of filtering out sensitive content. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details. + * + * @public + */ +export interface ImagenSafetySettings { + /** + * A filter level controlling how aggressive to filter out sensitive content from generated + * images. + */ + safetyFilterLevel?: ImagenSafetyFilterLevel; + /** + * A filter level controlling whether generation of images containing people or faces is allowed. + */ + personFilterLevel?: ImagenPersonFilterLevel; +} +/** + * Aspect ratios for Imagen images. + * + * To specify an aspect ratio for generated images, set the `aspectRatio` property in your + * {@link ImagenGenerationConfig}. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details and examples of the supported aspect ratios. + * + * @public + */ +export declare const ImagenAspectRatio: { + /** + * Square (1:1) aspect ratio. + */ + readonly SQUARE: "1:1"; + /** + * Landscape (3:4) aspect ratio. + */ + readonly LANDSCAPE_3x4: "3:4"; + /** + * Portrait (4:3) aspect ratio. + */ + readonly PORTRAIT_4x3: "4:3"; + /** + * Landscape (16:9) aspect ratio. + */ + readonly LANDSCAPE_16x9: "16:9"; + /** + * Portrait (9:16) aspect ratio. + */ + readonly PORTRAIT_9x16: "9:16"; +}; +/** + * Aspect ratios for Imagen images. + * + * To specify an aspect ratio for generated images, set the `aspectRatio` property in your + * {@link ImagenGenerationConfig}. + * + * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation } + * for more details and examples of the supported aspect ratios. + * + * @public + */ +export type ImagenAspectRatio = (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio]; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/responses.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/responses.d.ts new file mode 100644 index 0000000..f5dfc0f --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/imagen/responses.d.ts @@ -0,0 +1,79 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * An image generated by Imagen, represented as inline data. + * + * @public + */ +export interface ImagenInlineImage { + /** + * The MIME type of the image; either `"image/png"` or `"image/jpeg"`. + * + * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}. + */ + mimeType: string; + /** + * The base64-encoded image data. + */ + bytesBase64Encoded: string; +} +/** + * An image generated by Imagen, stored in a Cloud Storage for Firebase bucket. + * + * This feature is not available yet. + * @public + */ +export interface ImagenGCSImage { + /** + * The MIME type of the image; either `"image/png"` or `"image/jpeg"`. + * + * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}. + */ + mimeType: string; + /** + * The URI of the file stored in a Cloud Storage for Firebase bucket. + * + * @example `"gs://bucket-name/path/sample_0.jpg"`. + */ + gcsURI: string; +} +/** + * The response from a request to generate images with Imagen. + * + * @public + */ +export interface ImagenGenerationResponse<T extends ImagenInlineImage | ImagenGCSImage> { + /** + * The images generated by Imagen. + * + * The number of images generated may be fewer than the number requested if one or more were + * filtered out; see `filteredReason`. + */ + images: T[]; + /** + * The reason that images were filtered out. This property will only be defined if one + * or more images were filtered. + * + * Images may be filtered out due to the {@link (ImagenSafetyFilterLevel:type)}, + * {@link (ImagenPersonFilterLevel:type)}, or filtering included in the model. + * The filter levels may be adjusted in your {@link ImagenSafetySettings}. + * + * See the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen | Responsible AI and usage guidelines for Imagen} + * for more details. + */ + filteredReason?: string; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/index.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/index.d.ts new file mode 100644 index 0000000..a8508d4 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/index.d.ts @@ -0,0 +1,26 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +export * from './content'; +export * from './enums'; +export * from './requests'; +export * from './responses'; +export * from './error'; +export * from './schema'; +export * from './imagen'; +export * from './googleai'; +export { LanguageModelCreateOptions, LanguageModelCreateCoreOptions, LanguageModelExpected, LanguageModelMessage, LanguageModelMessageContent, LanguageModelMessageContentValue, LanguageModelMessageRole, LanguageModelMessageType, LanguageModelPromptOptions } from './language-model'; +export * from './chrome-adapter'; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/internal.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/internal.d.ts new file mode 100644 index 0000000..3c16979 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/internal.d.ts @@ -0,0 +1,33 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { AppCheckTokenResult } from '@firebase/app-check-interop-types'; +import { FirebaseAuthTokenData } from '@firebase/auth-interop-types'; +import { Backend } from '../backend'; +export * from './imagen/internal'; +export interface ApiSettings { + apiKey: string; + project: string; + appId: string; + automaticDataCollectionEnabled?: boolean; + /** + * @deprecated Use `backend.location` instead. + */ + location: string; + backend: Backend; + getAuthToken?: () => Promise<FirebaseAuthTokenData | null>; + getAppCheckToken?: () => Promise<AppCheckTokenResult>; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/language-model.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/language-model.d.ts new file mode 100644 index 0000000..9361a1f --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/language-model.d.ts @@ -0,0 +1,107 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * The subset of the Prompt API + * (see {@link https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl } + * required for hybrid functionality. + * + * @internal + */ +export interface LanguageModel extends EventTarget { + create(options?: LanguageModelCreateOptions): Promise<LanguageModel>; + availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>; + prompt(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<string>; + promptStreaming(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): ReadableStream; + measureInputUsage(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<number>; + destroy(): undefined; +} +/** + * @internal + */ +export declare enum Availability { + 'UNAVAILABLE' = "unavailable", + 'DOWNLOADABLE' = "downloadable", + 'DOWNLOADING' = "downloading", + 'AVAILABLE' = "available" +} +/** + * Configures the creation of an on-device language model session. + * @beta + */ +export interface LanguageModelCreateCoreOptions { + topK?: number; + temperature?: number; + expectedInputs?: LanguageModelExpected[]; +} +/** + * Configures the creation of an on-device language model session. + * @beta + */ +export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions { + signal?: AbortSignal; + initialPrompts?: LanguageModelMessage[]; +} +/** + * Options for an on-device language model prompt. + * @beta + */ +export interface LanguageModelPromptOptions { + responseConstraint?: object; +} +/** + * Options for the expected inputs for an on-device language model. + * @beta + */ export interface LanguageModelExpected { + type: LanguageModelMessageType; + languages?: string[]; +} +/** + * An on-device language model prompt. + * @beta + */ +export type LanguageModelPrompt = LanguageModelMessage[]; +/** + * An on-device language model message. + * @beta + */ +export interface LanguageModelMessage { + role: LanguageModelMessageRole; + content: LanguageModelMessageContent[]; +} +/** + * An on-device language model content object. + * @beta + */ +export interface LanguageModelMessageContent { + type: LanguageModelMessageType; + value: LanguageModelMessageContentValue; +} +/** + * Allowable roles for on-device language model usage. + * @beta + */ +export type LanguageModelMessageRole = 'system' | 'user' | 'assistant'; +/** + * Allowable types for on-device language model messages. + * @beta + */ +export type LanguageModelMessageType = 'text' | 'image' | 'audio'; +/** + * Content formats that can be provided as on-device message content. + * @beta + */ +export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/live-responses.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/live-responses.d.ts new file mode 100644 index 0000000..8270db9 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/live-responses.d.ts @@ -0,0 +1,79 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Content, FunctionResponse, GenerativeContentBlob, Part } from './content'; +import { AudioTranscriptionConfig, LiveGenerationConfig, Tool, ToolConfig } from './requests'; +import { Transcription } from './responses'; +/** + * User input that is sent to the model. + * + * @internal + */ +export interface _LiveClientContent { + clientContent: { + turns: [Content]; + turnComplete: boolean; + inputTranscription?: Transcription; + outputTranscription?: Transcription; + }; +} +/** + * User input that is sent to the model in real time. + * + * @internal + */ +export interface _LiveClientRealtimeInput { + realtimeInput: { + text?: string; + audio?: GenerativeContentBlob; + video?: GenerativeContentBlob; + /** + * @deprecated Use `text`, `audio`, and `video` instead. + */ + mediaChunks?: GenerativeContentBlob[]; + }; +} +/** + * Function responses that are sent to the model in real time. + */ +export interface _LiveClientToolResponse { + toolResponse: { + functionResponses: FunctionResponse[]; + }; +} +/** + * The first message in a Live session, used to configure generation options. + * + * @internal + */ +export interface _LiveClientSetup { + setup: { + model: string; + generationConfig?: _LiveGenerationConfig; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; + inputAudioTranscription?: AudioTranscriptionConfig; + outputAudioTranscription?: AudioTranscriptionConfig; + }; +} +/** + * The Live Generation Config. + * + * The public API ({@link LiveGenerationConfig}) has `inputAudioTranscription` and `outputAudioTranscription`, + * but the server expects these fields to be in the top-level `setup` message. This was a conscious API decision. + */ +export type _LiveGenerationConfig = Omit<LiveGenerationConfig, 'inputAudioTranscription' | 'outputAudioTranscription'>; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/requests.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/requests.d.ts new file mode 100644 index 0000000..6df8be1 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/requests.d.ts @@ -0,0 +1,464 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { ObjectSchema, TypedSchema } from '../requests/schema-builder'; +import { Content, Part } from './content'; +import { LanguageModelCreateOptions, LanguageModelPromptOptions } from './language-model'; +import { FunctionCallingMode, HarmBlockMethod, HarmBlockThreshold, HarmCategory, InferenceMode, ResponseModality } from './enums'; +import { ObjectSchemaRequest, SchemaRequest } from './schema'; +/** + * Base parameters for a number of methods. + * @public + */ +export interface BaseParams { + safetySettings?: SafetySetting[]; + generationConfig?: GenerationConfig; +} +/** + * Params passed to {@link getGenerativeModel}. + * @public + */ +export interface ModelParams extends BaseParams { + model: string; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} +/** + * Params passed to {@link getLiveGenerativeModel}. + * @beta + */ +export interface LiveModelParams { + model: string; + generationConfig?: LiveGenerationConfig; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} +/** + * Request sent through {@link GenerativeModel.generateContent} + * @public + */ +export interface GenerateContentRequest extends BaseParams { + contents: Content[]; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} +/** + * Safety setting that can be sent as part of request parameters. + * @public + */ +export interface SafetySetting { + category: HarmCategory; + threshold: HarmBlockThreshold; + /** + * The harm block method. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), an {@link AIError} will be + * thrown if this property is defined. + */ + method?: HarmBlockMethod; +} +/** + * Config options for content-related requests + * @public + */ +export interface GenerationConfig { + candidateCount?: number; + stopSequences?: string[]; + maxOutputTokens?: number; + temperature?: number; + topP?: number; + topK?: number; + presencePenalty?: number; + frequencyPenalty?: number; + /** + * Output response MIME type of the generated candidate text. + * Supported MIME types are `text/plain` (default, text output), + * `application/json` (JSON response in the candidates), and + * `text/x.enum`. + */ + responseMimeType?: string; + /** + * Output response schema of the generated candidate text. This + * value can be a class generated with a {@link Schema} static method + * like `Schema.string()` or `Schema.object()` or it can be a plain + * JS object matching the {@link SchemaRequest} interface. + * <br/>Note: This only applies when the specified `responseMimeType` supports a schema; currently + * this is limited to `application/json` and `text/x.enum`. + */ + responseSchema?: TypedSchema | SchemaRequest; + /** + * Generation modalities to be returned in generation responses. + * + * @remarks + * - Multimodal response generation is only supported by some Gemini models and versions; see {@link https://firebase.google.com/docs/vertex-ai/models | model versions}. + * - Only image generation (`ResponseModality.IMAGE`) is supported. + * + * @beta + */ + responseModalities?: ResponseModality[]; + /** + * Configuration for "thinking" behavior of compatible Gemini models. + */ + thinkingConfig?: ThinkingConfig; +} +/** + * Configuration parameters used by {@link LiveGenerativeModel} to control live content generation. + * + * @beta + */ +export interface LiveGenerationConfig { + /** + * Configuration for speech synthesis. + */ + speechConfig?: SpeechConfig; + /** + * Specifies the maximum number of tokens that can be generated in the response. The number of + * tokens per word varies depending on the language outputted. Is unbounded by default. + */ + maxOutputTokens?: number; + /** + * Controls the degree of randomness in token selection. A `temperature` value of 0 means that the highest + * probability tokens are always selected. In this case, responses for a given prompt are mostly + * deterministic, but a small amount of variation is still possible. + */ + temperature?: number; + /** + * Changes how the model selects tokens for output. Tokens are + * selected from the most to least probable until the sum of their probabilities equals the `topP` + * value. For example, if tokens A, B, and C have probabilities of 0.3, 0.2, and 0.1 respectively + * and the `topP` value is 0.5, then the model will select either A or B as the next token by using + * the `temperature` and exclude C as a candidate. Defaults to 0.95 if unset. + */ + topP?: number; + /** + * Changes how the model selects token for output. A `topK` value of 1 means the select token is + * the most probable among all tokens in the model's vocabulary, while a `topK` value 3 means that + * the next token is selected from among the 3 most probably using probabilities sampled. Tokens + * are then further filtered with the highest selected `temperature` sampling. Defaults to 40 + * if unspecified. + */ + topK?: number; + /** + * Positive penalties. + */ + presencePenalty?: number; + /** + * Frequency penalties. + */ + frequencyPenalty?: number; + /** + * The modalities of the response. + */ + responseModalities?: ResponseModality[]; + /** + * Enables transcription of audio input. + * + * When enabled, the model will respond with transcriptions of your audio input in the `inputTranscriptions` property + * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across + * messages, so you may only receive small amounts of text per message. For example, if you ask the model + * "How are you today?", the model may transcribe that input across three messages, broken up as "How a", "re yo", "u today?". + */ + inputAudioTranscription?: AudioTranscriptionConfig; + /** + * Enables transcription of audio input. + * + * When enabled, the model will respond with transcriptions of its audio output in the `outputTranscription` property + * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across + * messages, so you may only receive small amounts of text per message. For example, if the model says + * "How are you today?", the model may transcribe that output across three messages, broken up as "How a", "re yo", "u today?". + */ + outputAudioTranscription?: AudioTranscriptionConfig; +} +/** + * Params for {@link GenerativeModel.startChat}. + * @public + */ +export interface StartChatParams extends BaseParams { + history?: Content[]; + tools?: Tool[]; + toolConfig?: ToolConfig; + systemInstruction?: string | Part | Content; +} +/** + * Params for calling {@link GenerativeModel.countTokens} + * @public + */ +export interface CountTokensRequest { + contents: Content[]; + /** + * Instructions that direct the model to behave a certain way. + */ + systemInstruction?: string | Part | Content; + /** + * {@link Tool} configuration. + */ + tools?: Tool[]; + /** + * Configuration options that control how the model generates a response. + */ + generationConfig?: GenerationConfig; +} +/** + * Params passed to {@link getGenerativeModel}. + * @public + */ +export interface RequestOptions { + /** + * Request timeout in milliseconds. Defaults to 180 seconds (180000ms). + */ + timeout?: number; + /** + * Base url for endpoint. Defaults to + * https://firebasevertexai.googleapis.com, which is the + * {@link https://console.cloud.google.com/apis/library/firebasevertexai.googleapis.com?project=_ | Firebase AI Logic API} + * (used regardless of your chosen Gemini API provider). + */ + baseUrl?: string; +} +/** + * Defines a tool that model can call to access external knowledge. + * @public + */ +export type Tool = FunctionDeclarationsTool | GoogleSearchTool | CodeExecutionTool | URLContextTool; +/** + * Structured representation of a function declaration as defined by the + * {@link https://spec.openapis.org/oas/v3.0.3 | OpenAPI 3.0 specification}. + * Included + * in this declaration are the function name and parameters. This + * `FunctionDeclaration` is a representation of a block of code that can be used + * as a Tool by the model and executed by the client. + * @public + */ +export interface FunctionDeclaration { + /** + * The name of the function to call. Must start with a letter or an + * underscore. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with + * a max length of 64. + */ + name: string; + /** + * Description and purpose of the function. Model uses it to decide + * how and whether to call the function. + */ + description: string; + /** + * Optional. Describes the parameters to this function in JSON Schema Object + * format. Reflects the Open API 3.03 Parameter Object. Parameter names are + * case-sensitive. For a function with no parameters, this can be left unset. + */ + parameters?: ObjectSchema | ObjectSchemaRequest; +} +/** + * A tool that allows a Gemini model to connect to Google Search to access and incorporate + * up-to-date information from the web into its responses. + * + * Important: If using Grounding with Google Search, you are required to comply with the + * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API} + * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms} + * section within the Service Specific Terms). + * + * @public + */ +export interface GoogleSearchTool { + /** + * Specifies the Google Search configuration. + * Currently, this is an empty object, but it's reserved for future configuration options. + * + * When using this feature, you are required to comply with the "Grounding with Google Search" + * usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API} + * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms} + * section within the Service Specific Terms). + */ + googleSearch: GoogleSearch; +} +/** + * A tool that enables the model to use code execution. + * + * @beta + */ +export interface CodeExecutionTool { + /** + * Specifies the Google Search configuration. + * Currently, this is an empty object, but it's reserved for future configuration options. + */ + codeExecution: {}; +} +/** + * Specifies the Google Search configuration. + * + * @remarks Currently, this is an empty object, but it's reserved for future configuration options. + * + * @public + */ +export interface GoogleSearch { +} +/** + * A tool that allows you to provide additional context to the models in the form of public web + * URLs. By including URLs in your request, the Gemini model will access the content from those + * pages to inform and enhance its response. + * + * @beta + */ +export interface URLContextTool { + /** + * Specifies the URL Context configuration. + */ + urlContext: URLContext; +} +/** + * Specifies the URL Context configuration. + * + * @beta + */ +export interface URLContext { +} +/** + * A `FunctionDeclarationsTool` is a piece of code that enables the system to + * interact with external systems to perform an action, or set of actions, + * outside of knowledge and scope of the model. + * @public + */ +export interface FunctionDeclarationsTool { + /** + * Optional. One or more function declarations + * to be passed to the model along with the current user query. Model may + * decide to call a subset of these functions by populating + * {@link FunctionCall} in the response. User should + * provide a {@link FunctionResponse} for each + * function call in the next turn. Based on the function responses, the model will + * generate the final response back to the user. Maximum 64 function + * declarations can be provided. + */ + functionDeclarations?: FunctionDeclaration[]; +} +/** + * Tool config. This config is shared for all tools provided in the request. + * @public + */ +export interface ToolConfig { + functionCallingConfig?: FunctionCallingConfig; +} +/** + * @public + */ +export interface FunctionCallingConfig { + mode?: FunctionCallingMode; + allowedFunctionNames?: string[]; +} +/** + * Encapsulates configuration for on-device inference. + * + * @beta + */ +export interface OnDeviceParams { + createOptions?: LanguageModelCreateOptions; + promptOptions?: LanguageModelPromptOptions; +} +/** + * Configures hybrid inference. + * @beta + */ +export interface HybridParams { + /** + * Specifies on-device or in-cloud inference. Defaults to prefer on-device. + */ + mode: InferenceMode; + /** + * Optional. Specifies advanced params for on-device inference. + */ + onDeviceParams?: OnDeviceParams; + /** + * Optional. Specifies advanced params for in-cloud inference. + */ + inCloudParams?: ModelParams; +} +/** + * Configuration for "thinking" behavior of compatible Gemini models. + * + * Certain models utilize a thinking process before generating a response. This allows them to + * reason through complex problems and plan a more coherent and accurate answer. + * + * @public + */ +export interface ThinkingConfig { + /** + * The thinking budget, in tokens. + * + * This parameter sets an upper limit on the number of tokens the model can use for its internal + * "thinking" process. A higher budget may result in higher quality responses for complex tasks + * but can also increase latency and cost. + * + * If you don't specify a budget, the model will determine the appropriate amount + * of thinking based on the complexity of the prompt. + * + * An error will be thrown if you set a thinking budget for a model that does not support this + * feature or if the specified budget is not within the model's supported range. + */ + thinkingBudget?: number; + /** + * Whether to include "thought summaries" in the model's response. + * + * @remarks + * Thought summaries provide a brief overview of the model's internal thinking process, + * offering insight into how it arrived at the final answer. This can be useful for + * debugging, understanding the model's reasoning, and verifying its accuracy. + */ + includeThoughts?: boolean; +} +/** + * Configuration for a pre-built voice. + * + * @beta + */ +export interface PrebuiltVoiceConfig { + /** + * The voice name to use for speech synthesis. + * + * For a full list of names and demos of what each voice sounds like, see {@link https://cloud.google.com/text-to-speech/docs/chirp3-hd | Chirp 3: HD Voices}. + */ + voiceName?: string; +} +/** + * Configuration for the voice to used in speech synthesis. + * + * @beta + */ +export interface VoiceConfig { + /** + * Configures the voice using a pre-built voice configuration. + */ + prebuiltVoiceConfig?: PrebuiltVoiceConfig; +} +/** + * Configures speech synthesis. + * + * @beta + */ +export interface SpeechConfig { + /** + * Configures the voice to be used in speech synthesis. + */ + voiceConfig?: VoiceConfig; +} +/** + * The audio transcription configuration. + */ +export interface AudioTranscriptionConfig { +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/responses.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/responses.d.ts new file mode 100644 index 0000000..8896455 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/responses.d.ts @@ -0,0 +1,582 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Content, FunctionCall, InlineDataPart } from './content'; +import { BlockReason, FinishReason, HarmCategory, HarmProbability, HarmSeverity, InferenceSource, Modality } from './enums'; +/** + * Result object returned from {@link GenerativeModel.generateContent} call. + * + * @public + */ +export interface GenerateContentResult { + response: EnhancedGenerateContentResponse; +} +/** + * Result object returned from {@link GenerativeModel.generateContentStream} call. + * Iterate over `stream` to get chunks as they come in and/or + * use the `response` promise to get the aggregated response when + * the stream is done. + * + * @public + */ +export interface GenerateContentStreamResult { + stream: AsyncGenerator<EnhancedGenerateContentResponse>; + response: Promise<EnhancedGenerateContentResponse>; +} +/** + * Response object wrapped with helper methods. + * + * @public + */ +export interface EnhancedGenerateContentResponse extends GenerateContentResponse { + /** + * Returns the text string from the response, if available. + * Throws if the prompt or candidate was blocked. + */ + text: () => string; + /** + * Aggregates and returns every {@link InlineDataPart} from the first candidate of + * {@link GenerateContentResponse}. + * + * @throws If the prompt or candidate was blocked. + */ + inlineDataParts: () => InlineDataPart[] | undefined; + /** + * Aggregates and returns every {@link FunctionCall} from the first candidate of + * {@link GenerateContentResponse}. + * + * @throws If the prompt or candidate was blocked. + */ + functionCalls: () => FunctionCall[] | undefined; + /** + * Aggregates and returns every {@link TextPart} with their `thought` property set + * to `true` from the first candidate of {@link GenerateContentResponse}. + * + * @throws If the prompt or candidate was blocked. + * + * @remarks + * Thought summaries provide a brief overview of the model's internal thinking process, + * offering insight into how it arrived at the final answer. This can be useful for + * debugging, understanding the model's reasoning, and verifying its accuracy. + * + * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is + * set to `true`. + */ + thoughtSummary: () => string | undefined; + /** + * Indicates whether inference happened on-device or in-cloud. + * + * @beta + */ + inferenceSource?: InferenceSource; +} +/** + * Individual response from {@link GenerativeModel.generateContent} and + * {@link GenerativeModel.generateContentStream}. + * `generateContentStream()` will return one in each chunk until + * the stream is done. + * @public + */ +export interface GenerateContentResponse { + candidates?: GenerateContentCandidate[]; + promptFeedback?: PromptFeedback; + usageMetadata?: UsageMetadata; +} +/** + * Usage metadata about a {@link GenerateContentResponse}. + * + * @public + */ +export interface UsageMetadata { + promptTokenCount: number; + candidatesTokenCount: number; + /** + * The number of tokens used by the model's internal "thinking" process. + */ + thoughtsTokenCount?: number; + totalTokenCount: number; + /** + * The number of tokens used by tools. + */ + toolUsePromptTokenCount?: number; + promptTokensDetails?: ModalityTokenCount[]; + candidatesTokensDetails?: ModalityTokenCount[]; + /** + * A list of tokens used by tools, broken down by modality. + */ + toolUsePromptTokensDetails?: ModalityTokenCount[]; +} +/** + * Represents token counting info for a single modality. + * + * @public + */ +export interface ModalityTokenCount { + /** The modality associated with this token count. */ + modality: Modality; + /** The number of tokens counted. */ + tokenCount: number; +} +/** + * If the prompt was blocked, this will be populated with `blockReason` and + * the relevant `safetyRatings`. + * @public + */ +export interface PromptFeedback { + blockReason?: BlockReason; + safetyRatings: SafetyRating[]; + /** + * A human-readable description of the `blockReason`. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + */ + blockReasonMessage?: string; +} +/** + * A candidate returned as part of a {@link GenerateContentResponse}. + * @public + */ +export interface GenerateContentCandidate { + index: number; + content: Content; + finishReason?: FinishReason; + finishMessage?: string; + safetyRatings?: SafetyRating[]; + citationMetadata?: CitationMetadata; + groundingMetadata?: GroundingMetadata; + urlContextMetadata?: URLContextMetadata; +} +/** + * Citation metadata that may be found on a {@link GenerateContentCandidate}. + * @public + */ +export interface CitationMetadata { + citations: Citation[]; +} +/** + * A single citation. + * @public + */ +export interface Citation { + startIndex?: number; + endIndex?: number; + uri?: string; + license?: string; + /** + * The title of the cited source, if available. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + */ + title?: string; + /** + * The publication date of the cited source, if available. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + */ + publicationDate?: Date; +} +/** + * Metadata returned when grounding is enabled. + * + * Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}). + * + * Important: If using Grounding with Google Search, you are required to comply with the + * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API} + * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms} + * section within the Service Specific Terms). + * + * @public + */ +export interface GroundingMetadata { + /** + * Google Search entry point for web searches. This contains an HTML/CSS snippet that must be + * embedded in an app to display a Google Search entry point for follow-up web searches related to + * a model's "Grounded Response". + */ + searchEntryPoint?: SearchEntrypoint; + /** + * A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content + * (for example, from a web page). that the model used to ground its response. + */ + groundingChunks?: GroundingChunk[]; + /** + * A list of {@link GroundingSupport} objects. Each object details how specific segments of the + * model's response are supported by the `groundingChunks`. + */ + groundingSupports?: GroundingSupport[]; + /** + * A list of web search queries that the model performed to gather the grounding information. + * These can be used to allow users to explore the search results themselves. + */ + webSearchQueries?: string[]; + /** + * @deprecated Use {@link GroundingSupport} instead. + */ + retrievalQueries?: string[]; +} +/** + * Google search entry point. + * + * @public + */ +export interface SearchEntrypoint { + /** + * HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid + * undesired interaction with the rest of the page's CSS. + * + * To ensure proper rendering and prevent CSS conflicts, it is recommended + * to encapsulate this `renderedContent` within a shadow DOM when embedding it + * into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}. + * + * @example + * ```javascript + * const container = document.createElement('div'); + * document.body.appendChild(container); + * container.attachShadow({ mode: 'open' }).innerHTML = renderedContent; + * ``` + */ + renderedContent?: string; +} +/** + * Represents a chunk of retrieved data that supports a claim in the model's response. This is part + * of the grounding information provided when grounding is enabled. + * + * @public + */ +export interface GroundingChunk { + /** + * Contains details if the grounding chunk is from a web source. + */ + web?: WebGroundingChunk; +} +/** + * A grounding chunk from the web. + * + * Important: If using Grounding with Google Search, you are required to comply with the + * {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for "Grounding with Google Search". + * + * @public + */ +export interface WebGroundingChunk { + /** + * The URI of the retrieved web page. + */ + uri?: string; + /** + * The title of the retrieved web page. + */ + title?: string; + /** + * The domain of the original URI from which the content was retrieved. + * + * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be + * `undefined`. + */ + domain?: string; +} +/** + * Provides information about how a specific segment of the model's response is supported by the + * retrieved grounding chunks. + * + * @public + */ +export interface GroundingSupport { + /** + * Specifies the segment of the model's response content that this grounding support pertains to. + */ + segment?: Segment; + /** + * A list of indices that refer to specific {@link GroundingChunk} objects within the + * {@link GroundingMetadata.groundingChunks} array. These referenced chunks + * are the sources that support the claim made in the associated `segment` of the response. + * For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`, + * and `groundingChunks[4]` are the retrieved content supporting this part of the response. + */ + groundingChunkIndices?: number[]; +} +/** + * Represents a specific segment within a {@link Content} object, often used to + * pinpoint the exact location of text or data that grounding information refers to. + * + * @public + */ +export interface Segment { + /** + * The zero-based index of the {@link Part} object within the `parts` array + * of its parent {@link Content} object. This identifies which part of the + * content the segment belongs to. + */ + partIndex: number; + /** + * The zero-based start index of the segment within the specified `Part`, + * measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the + * beginning of the part's content (e.g., `Part.text`). + */ + startIndex: number; + /** + * The zero-based end index of the segment within the specified `Part`, + * measured in UTF-8 bytes. This offset is exclusive, meaning the character + * at this index is not included in the segment. + */ + endIndex: number; + /** + * The text corresponding to the segment from the response. + */ + text: string; +} +/** + * Metadata related to {@link URLContextTool}. + * + * @beta + */ +export interface URLContextMetadata { + /** + * List of URL metadata used to provide context to the Gemini model. + */ + urlMetadata: URLMetadata[]; +} +/** + * Metadata for a single URL retrieved by the {@link URLContextTool} tool. + * + * @beta + */ +export interface URLMetadata { + /** + * The retrieved URL. + */ + retrievedUrl?: string; + /** + * The status of the URL retrieval. + */ + urlRetrievalStatus?: URLRetrievalStatus; +} +/** + * The status of a URL retrieval. + * + * @remarks + * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status. + * <br/> + * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful. + * <br/> + * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed. + * <br/> + * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall. + * <br/> + * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe. + * <br/> + * + * @beta + */ +export declare const URLRetrievalStatus: { + /** + * Unspecified retrieval status. + */ + URL_RETRIEVAL_STATUS_UNSPECIFIED: string; + /** + * The URL retrieval was successful. + */ + URL_RETRIEVAL_STATUS_SUCCESS: string; + /** + * The URL retrieval failed. + */ + URL_RETRIEVAL_STATUS_ERROR: string; + /** + * The URL retrieval failed because the content is behind a paywall. + */ + URL_RETRIEVAL_STATUS_PAYWALL: string; + /** + * The URL retrieval failed because the content is unsafe. + */ + URL_RETRIEVAL_STATUS_UNSAFE: string; +}; +/** + * The status of a URL retrieval. + * + * @remarks + * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status. + * <br/> + * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful. + * <br/> + * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed. + * <br/> + * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall. + * <br/> + * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe. + * <br/> + * + * @beta + */ +export type URLRetrievalStatus = (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus]; +/** + * @public + */ +export interface WebAttribution { + uri: string; + title: string; +} +/** + * @public + */ +export interface RetrievedContextAttribution { + uri: string; + title: string; +} +/** + * Protobuf google.type.Date + * @public + */ +export interface Date { + year: number; + month: number; + day: number; +} +/** + * A safety rating associated with a {@link GenerateContentCandidate} + * @public + */ +export interface SafetyRating { + category: HarmCategory; + probability: HarmProbability; + /** + * The harm severity level. + * + * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`. + */ + severity: HarmSeverity; + /** + * The probability score of the harm category. + * + * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0. + */ + probabilityScore: number; + /** + * The severity score of the harm category. + * + * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}). + * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0. + */ + severityScore: number; + blocked: boolean; +} +/** + * Response from calling {@link GenerativeModel.countTokens}. + * @public + */ +export interface CountTokensResponse { + /** + * The total number of tokens counted across all instances from the request. + */ + totalTokens: number; + /** + * @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`. + * + * The total number of billable characters counted across all instances + * from the request. + */ + totalBillableCharacters?: number; + /** + * The breakdown, by modality, of how many tokens are consumed by the prompt. + */ + promptTokensDetails?: ModalityTokenCount[]; +} +/** + * An incremental content update from the model. + * + * @beta + */ +export interface LiveServerContent { + type: 'serverContent'; + /** + * The content that the model has generated as part of the current conversation with the user. + */ + modelTurn?: Content; + /** + * Indicates whether the turn is complete. This is `undefined` if the turn is not complete. + */ + turnComplete?: boolean; + /** + * Indicates whether the model was interrupted by the client. An interruption occurs when + * the client sends a message before the model finishes it's turn. This is `undefined` if the + * model was not interrupted. + */ + interrupted?: boolean; + /** + * Transcription of the audio that was input to the model. + */ + inputTranscription?: Transcription; + /** + * Transcription of the audio output from the model. + */ + outputTranscription?: Transcription; +} +/** + * Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription + * is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on + * the {@link LiveGenerationConfig}. + * + * @beta + */ +export interface Transcription { + /** + * The text transcription of the audio. + */ + text?: string; +} +/** + * A request from the model for the client to execute one or more functions. + * + * @beta + */ +export interface LiveServerToolCall { + type: 'toolCall'; + /** + * An array of function calls to run. + */ + functionCalls: FunctionCall[]; +} +/** + * Notification to cancel a previous function call triggered by {@link LiveServerToolCall}. + * + * @beta + */ +export interface LiveServerToolCallCancellation { + type: 'toolCallCancellation'; + /** + * IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}. + */ + functionIds: string[]; +} +/** + * The types of responses that can be returned by {@link LiveSession.receive}. + * + * @beta + */ +export declare const LiveResponseType: { + SERVER_CONTENT: string; + TOOL_CALL: string; + TOOL_CALL_CANCELLATION: string; +}; +/** + * The types of responses that can be returned by {@link LiveSession.receive}. + * This is a property on all messages that can be used for type narrowing. This property is not + * returned by the server, it is assigned to a server message object once it's parsed. + * + * @beta + */ +export type LiveResponseType = (typeof LiveResponseType)[keyof typeof LiveResponseType]; diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/types/schema.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/types/schema.d.ts new file mode 100644 index 0000000..7abb2d1 --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/types/schema.d.ts @@ -0,0 +1,139 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Contains the list of OpenAPI data types + * as defined by the + * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification} + * @public + */ +export declare const SchemaType: { + /** String type. */ + readonly STRING: "string"; + /** Number type. */ + readonly NUMBER: "number"; + /** Integer type. */ + readonly INTEGER: "integer"; + /** Boolean type. */ + readonly BOOLEAN: "boolean"; + /** Array type. */ + readonly ARRAY: "array"; + /** Object type. */ + readonly OBJECT: "object"; +}; +/** + * Contains the list of OpenAPI data types + * as defined by the + * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification} + * @public + */ +export type SchemaType = (typeof SchemaType)[keyof typeof SchemaType]; +/** + * Basic {@link Schema} properties shared across several Schema-related + * types. + * @public + */ +export interface SchemaShared<T> { + /** + * An array of {@link Schema}. The generated data must be valid against any of the schemas + * listed in this array. This allows specifying multiple possible structures or types for a + * single field. + */ + anyOf?: T[]; + /** Optional. The format of the property. + * When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or + * `'date-time'`, otherwise requests will fail. + */ + format?: string; + /** Optional. The description of the property. */ + description?: string; + /** + * The title of the property. This helps document the schema's purpose but does not typically + * constrain the generated value. It can subtly guide the model by clarifying the intent of a + * field. + */ + title?: string; + /** Optional. The items of the property. */ + items?: T; + /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + minItems?: number; + /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */ + maxItems?: number; + /** Optional. Map of `Schema` objects. */ + properties?: { + [k: string]: T; + }; + /** A hint suggesting the order in which the keys should appear in the generated JSON string. */ + propertyOrdering?: string[]; + /** Optional. The enum of the property. */ + enum?: string[]; + /** Optional. The example of the property. */ + example?: unknown; + /** Optional. Whether the property is nullable. */ + nullable?: boolean; + /** The minimum value of a numeric type. */ + minimum?: number; + /** The maximum value of a numeric type. */ + maximum?: number; + [key: string]: unknown; +} +/** + * Params passed to {@link Schema} static methods to create specific + * {@link Schema} classes. + * @public + */ +export interface SchemaParams extends SchemaShared<SchemaInterface> { +} +/** + * Final format for {@link Schema} params passed to backend requests. + * @public + */ +export interface SchemaRequest extends SchemaShared<SchemaRequest> { + /** + * The type of the property. this can only be undefined when using `anyOf` schemas, + * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }. + */ + type?: SchemaType; + /** Optional. Array of required property. */ + required?: string[]; +} +/** + * Interface for {@link Schema} class. + * @public + */ +export interface SchemaInterface extends SchemaShared<SchemaInterface> { + /** + * The type of the property. this can only be undefined when using `anyof` schemas, + * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}. + */ + type?: SchemaType; +} +/** + * Interface for JSON parameters in a schema of {@link (SchemaType:type)} + * "object" when not using the `Schema.object()` helper. + * @public + */ +export interface ObjectSchemaRequest extends SchemaRequest { + type: 'object'; + /** + * This is not a property accepted in the final request to the backend, but is + * a client-side convenience property that is only usable by constructing + * a schema through the `Schema.object()` helper method. Populating this + * property will cause response errors if the object is not wrapped with + * `Schema.object()`. + */ + optionalProperties?: never; +} diff --git a/frontend-old/node_modules/@firebase/ai/dist/src/websocket.d.ts b/frontend-old/node_modules/@firebase/ai/dist/src/websocket.d.ts new file mode 100644 index 0000000..e2d511c --- /dev/null +++ b/frontend-old/node_modules/@firebase/ai/dist/src/websocket.d.ts @@ -0,0 +1,67 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * A standardized interface for interacting with a WebSocket connection. + * This abstraction allows the SDK to use the appropriate WebSocket implementation + * for the current JS environment (Browser vs. Node) without + * changing the core logic of the `LiveSession`. + * @internal + */ +export interface WebSocketHandler { + /** + * Establishes a connection to the given URL. + * + * @param url The WebSocket URL (e.g., wss://...). + * @returns A promise that resolves on successful connection or rejects on failure. + */ + connect(url: string): Promise<void>; + /** + * Sends data over the WebSocket. + * + * @param data The string or binary data to send. + */ + send(data: string | ArrayBuffer): void; + /** + * Returns an async generator that yields parsed JSON objects from the server. + * The yielded type is `unknown` because the handler cannot guarantee the shape of the data. + * The consumer is responsible for type validation. + * The generator terminates when the connection is closed. + * + * @returns A generator that allows consumers to pull messages using a `for await...of` loop. + */ + listen(): AsyncGenerator<unknown>; + /** + * Closes the WebSocket connection. + * + * @param code - A numeric status code explaining why the connection is closing. + * @param reason - A human-readable string explaining why the connection is closing. + */ + close(code?: number, reason?: string): Promise<void>; +} +/** + * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22. + * + * @internal + */ +export declare class WebSocketHandlerImpl implements WebSocketHandler { + private ws?; + constructor(); + connect(url: string): Promise<void>; + send(data: string | ArrayBuffer): void; + listen(): AsyncGenerator<unknown>; + close(code?: number, reason?: string): Promise<void>; +} |
