diff options
Diffstat (limited to 'frontend-old/node_modules/firebase/firebase-ai.js.map')
| -rw-r--r-- | frontend-old/node_modules/firebase/firebase-ai.js.map | 1 |
1 files changed, 0 insertions, 1 deletions
diff --git a/frontend-old/node_modules/firebase/firebase-ai.js.map b/frontend-old/node_modules/firebase/firebase-ai.js.map deleted file mode 100644 index a1b2798..0000000 --- a/frontend-old/node_modules/firebase/firebase-ai.js.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"file":"firebase-ai.js","sources":["../util/src/deferred.ts","../util/src/errors.ts","../component/src/component.ts","../logger/src/logger.ts","../ai/src/constants.ts","../ai/src/errors.ts","../ai/src/types/enums.ts","../ai/src/types/responses.ts","../ai/src/types/error.ts","../ai/src/types/schema.ts","../ai/src/types/imagen/requests.ts","../ai/src/public-types.ts","../ai/src/backend.ts","../ai/src/logger.ts","../ai/src/types/language-model.ts","../ai/src/methods/chrome-adapter.ts","../ai/src/service.ts","../ai/src/factory-browser.ts","../ai/src/helpers.ts","../ai/src/models/ai-model.ts","../ai/src/requests/request.ts","../ai/src/requests/response-helpers.ts","../ai/src/googleai-mappers.ts","../ai/src/requests/stream-reader.ts","../ai/src/requests/hybrid-helpers.ts","../ai/src/methods/generate-content.ts","../ai/src/requests/request-helpers.ts","../ai/src/methods/chat-session-helpers.ts","../ai/src/methods/chat-session.ts","../ai/src/methods/count-tokens.ts","../ai/src/models/generative-model.ts","../ai/src/methods/live-session.ts","../ai/src/models/live-generative-model.ts","../ai/src/models/imagen-model.ts","../ai/src/websocket.ts","../ai/src/requests/schema-builder.ts","../ai/src/requests/imagen-image-format.ts","../ai/src/methods/live-session-helpers.ts","../ai/src/api.ts","../util/src/compat.ts","../ai/src/index.ts"],"sourcesContent":["/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nexport class Deferred<R> {\n promise: Promise<R>;\n reject: (value?: unknown) => void = () => {};\n resolve: (value?: unknown) => void = () => {};\n constructor() {\n this.promise = new Promise((resolve, reject) => {\n this.resolve = resolve as (value?: unknown) => void;\n this.reject = reject as (value?: unknown) => void;\n });\n }\n\n /**\n * Our API internals are not promisified and cannot because our callback APIs have subtle expectations around\n * invoking promises inline, which Promises are forbidden to do. This method accepts an optional node-style callback\n * and returns a node-style callback which will resolve or reject the Deferred's promise.\n */\n wrapCallback(\n callback?: (error?: unknown, value?: unknown) => void\n ): (error: unknown, value?: unknown) => void {\n return (error, value?) => {\n if (error) {\n this.reject(error);\n } else {\n this.resolve(value);\n }\n if (typeof callback === 'function') {\n // Attaching noop handler just in case developer wasn't expecting\n // promises\n this.promise.catch(() => {});\n\n // Some of our callbacks don't expect a value and our own tests\n // assert that the parameter length is 1\n if (callback.length === 1) {\n callback(error);\n } else {\n callback(error, value);\n }\n }\n };\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * @fileoverview Standardized Firebase Error.\n *\n * Usage:\n *\n * // TypeScript string literals for type-safe codes\n * type Err =\n * 'unknown' |\n * 'object-not-found'\n * ;\n *\n * // Closure enum for type-safe error codes\n * // at-enum {string}\n * var Err = {\n * UNKNOWN: 'unknown',\n * OBJECT_NOT_FOUND: 'object-not-found',\n * }\n *\n * let errors: Map<Err, string> = {\n * 'generic-error': \"Unknown error\",\n * 'file-not-found': \"Could not find file: {$file}\",\n * };\n *\n * // Type-safe function - must pass a valid error code as param.\n * let error = new ErrorFactory<Err>('service', 'Service', errors);\n *\n * ...\n * throw error.create(Err.GENERIC);\n * ...\n * throw error.create(Err.FILE_NOT_FOUND, {'file': fileName});\n * ...\n * // Service: Could not file file: foo.txt (service/file-not-found).\n *\n * catch (e) {\n * assert(e.message === \"Could not find file: foo.txt.\");\n * if ((e as FirebaseError)?.code === 'service/file-not-found') {\n * console.log(\"Could not read file: \" + e['file']);\n * }\n * }\n */\n\nexport type ErrorMap<ErrorCode extends string> = {\n readonly [K in ErrorCode]: string;\n};\n\nconst ERROR_NAME = 'FirebaseError';\n\nexport interface StringLike {\n toString(): string;\n}\n\nexport interface ErrorData {\n [key: string]: unknown;\n}\n\n// Based on code from:\n// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Error#Custom_Error_Types\nexport class FirebaseError extends Error {\n /** The custom name for all FirebaseErrors. */\n readonly name: string = ERROR_NAME;\n\n constructor(\n /** The error code for this error. */\n readonly code: string,\n message: string,\n /** Custom data for this error. */\n public customData?: Record<string, unknown>\n ) {\n super(message);\n\n // Fix For ES5\n // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work\n // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget\n // which we can now use since we no longer target ES5.\n Object.setPrototypeOf(this, FirebaseError.prototype);\n\n // Maintains proper stack trace for where our error was thrown.\n // Only available on V8.\n if (Error.captureStackTrace) {\n Error.captureStackTrace(this, ErrorFactory.prototype.create);\n }\n }\n}\n\nexport class ErrorFactory<\n ErrorCode extends string,\n ErrorParams extends { readonly [K in ErrorCode]?: ErrorData } = {}\n> {\n constructor(\n private readonly service: string,\n private readonly serviceName: string,\n private readonly errors: ErrorMap<ErrorCode>\n ) {}\n\n create<K extends ErrorCode>(\n code: K,\n ...data: K extends keyof ErrorParams ? [ErrorParams[K]] : []\n ): FirebaseError {\n const customData = (data[0] as ErrorData) || {};\n const fullCode = `${this.service}/${code}`;\n const template = this.errors[code];\n\n const message = template ? replaceTemplate(template, customData) : 'Error';\n // Service Name: Error message (service/code).\n const fullMessage = `${this.serviceName}: ${message} (${fullCode}).`;\n\n const error = new FirebaseError(fullCode, fullMessage, customData);\n\n return error;\n }\n}\n\nfunction replaceTemplate(template: string, data: ErrorData): string {\n return template.replace(PATTERN, (_, key) => {\n const value = data[key];\n return value != null ? String(value) : `<${key}?>`;\n });\n}\n\nconst PATTERN = /\\{\\$([^}]+)}/g;\n","/**\n * @license\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nimport {\n InstantiationMode,\n InstanceFactory,\n ComponentType,\n Dictionary,\n Name,\n onInstanceCreatedCallback\n} from './types';\n\n/**\n * Component for service name T, e.g. `auth`, `auth-internal`\n */\nexport class Component<T extends Name = Name> {\n multipleInstances = false;\n /**\n * Properties to be added to the service namespace\n */\n serviceProps: Dictionary = {};\n\n instantiationMode = InstantiationMode.LAZY;\n\n onInstanceCreated: onInstanceCreatedCallback<T> | null = null;\n\n /**\n *\n * @param name The public service name, e.g. app, auth, firestore, database\n * @param instanceFactory Service factory responsible for creating the public interface\n * @param type whether the service provided by the component is public or private\n */\n constructor(\n readonly name: T,\n readonly instanceFactory: InstanceFactory<T>,\n readonly type: ComponentType\n ) {}\n\n setInstantiationMode(mode: InstantiationMode): this {\n this.instantiationMode = mode;\n return this;\n }\n\n setMultipleInstances(multipleInstances: boolean): this {\n this.multipleInstances = multipleInstances;\n return this;\n }\n\n setServiceProps(props: Dictionary): this {\n this.serviceProps = props;\n return this;\n }\n\n setInstanceCreatedCallback(callback: onInstanceCreatedCallback<T>): this {\n this.onInstanceCreated = callback;\n return this;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nexport type LogLevelString =\n | 'debug'\n | 'verbose'\n | 'info'\n | 'warn'\n | 'error'\n | 'silent';\n\nexport interface LogOptions {\n level: LogLevelString;\n}\n\nexport type LogCallback = (callbackParams: LogCallbackParams) => void;\n\nexport interface LogCallbackParams {\n level: LogLevelString;\n message: string;\n args: unknown[];\n type: string;\n}\n\n/**\n * A container for all of the Logger instances\n */\nexport const instances: Logger[] = [];\n\n/**\n * The JS SDK supports 5 log levels and also allows a user the ability to\n * silence the logs altogether.\n *\n * The order is a follows:\n * DEBUG < VERBOSE < INFO < WARN < ERROR\n *\n * All of the log types above the current log level will be captured (i.e. if\n * you set the log level to `INFO`, errors will still be logged, but `DEBUG` and\n * `VERBOSE` logs will not)\n */\nexport enum LogLevel {\n DEBUG,\n VERBOSE,\n INFO,\n WARN,\n ERROR,\n SILENT\n}\n\nconst levelStringToEnum: { [key in LogLevelString]: LogLevel } = {\n 'debug': LogLevel.DEBUG,\n 'verbose': LogLevel.VERBOSE,\n 'info': LogLevel.INFO,\n 'warn': LogLevel.WARN,\n 'error': LogLevel.ERROR,\n 'silent': LogLevel.SILENT\n};\n\n/**\n * The default log level\n */\nconst defaultLogLevel: LogLevel = LogLevel.INFO;\n\n/**\n * We allow users the ability to pass their own log handler. We will pass the\n * type of log, the current log level, and any other arguments passed (i.e. the\n * messages that the user wants to log) to this function.\n */\nexport type LogHandler = (\n loggerInstance: Logger,\n logType: LogLevel,\n ...args: unknown[]\n) => void;\n\n/**\n * By default, `console.debug` is not displayed in the developer console (in\n * chrome). To avoid forcing users to have to opt-in to these logs twice\n * (i.e. once for firebase, and once in the console), we are sending `DEBUG`\n * logs to the `console.log` function.\n */\nconst ConsoleMethod = {\n [LogLevel.DEBUG]: 'log',\n [LogLevel.VERBOSE]: 'log',\n [LogLevel.INFO]: 'info',\n [LogLevel.WARN]: 'warn',\n [LogLevel.ERROR]: 'error'\n};\n\n/**\n * The default log handler will forward DEBUG, VERBOSE, INFO, WARN, and ERROR\n * messages on to their corresponding console counterparts (if the log method\n * is supported by the current log level)\n */\nconst defaultLogHandler: LogHandler = (instance, logType, ...args): void => {\n if (logType < instance.logLevel) {\n return;\n }\n const now = new Date().toISOString();\n const method = ConsoleMethod[logType as keyof typeof ConsoleMethod];\n if (method) {\n console[method as 'log' | 'info' | 'warn' | 'error'](\n `[${now}] ${instance.name}:`,\n ...args\n );\n } else {\n throw new Error(\n `Attempted to log a message with an invalid logType (value: ${logType})`\n );\n }\n};\n\nexport class Logger {\n /**\n * Gives you an instance of a Logger to capture messages according to\n * Firebase's logging scheme.\n *\n * @param name The name that the logs will be associated with\n */\n constructor(public name: string) {\n /**\n * Capture the current instance for later use\n */\n instances.push(this);\n }\n\n /**\n * The log level of the given Logger instance.\n */\n private _logLevel = defaultLogLevel;\n\n get logLevel(): LogLevel {\n return this._logLevel;\n }\n\n set logLevel(val: LogLevel) {\n if (!(val in LogLevel)) {\n throw new TypeError(`Invalid value \"${val}\" assigned to \\`logLevel\\``);\n }\n this._logLevel = val;\n }\n\n // Workaround for setter/getter having to be the same type.\n setLogLevel(val: LogLevel | LogLevelString): void {\n this._logLevel = typeof val === 'string' ? levelStringToEnum[val] : val;\n }\n\n /**\n * The main (internal) log handler for the Logger instance.\n * Can be set to a new function in internal package code but not by user.\n */\n private _logHandler: LogHandler = defaultLogHandler;\n get logHandler(): LogHandler {\n return this._logHandler;\n }\n set logHandler(val: LogHandler) {\n if (typeof val !== 'function') {\n throw new TypeError('Value assigned to `logHandler` must be a function');\n }\n this._logHandler = val;\n }\n\n /**\n * The optional, additional, user-defined log handler for the Logger instance.\n */\n private _userLogHandler: LogHandler | null = null;\n get userLogHandler(): LogHandler | null {\n return this._userLogHandler;\n }\n set userLogHandler(val: LogHandler | null) {\n this._userLogHandler = val;\n }\n\n /**\n * The functions below are all based on the `console` interface\n */\n\n debug(...args: unknown[]): void {\n this._userLogHandler && this._userLogHandler(this, LogLevel.DEBUG, ...args);\n this._logHandler(this, LogLevel.DEBUG, ...args);\n }\n log(...args: unknown[]): void {\n this._userLogHandler &&\n this._userLogHandler(this, LogLevel.VERBOSE, ...args);\n this._logHandler(this, LogLevel.VERBOSE, ...args);\n }\n info(...args: unknown[]): void {\n this._userLogHandler && this._userLogHandler(this, LogLevel.INFO, ...args);\n this._logHandler(this, LogLevel.INFO, ...args);\n }\n warn(...args: unknown[]): void {\n this._userLogHandler && this._userLogHandler(this, LogLevel.WARN, ...args);\n this._logHandler(this, LogLevel.WARN, ...args);\n }\n error(...args: unknown[]): void {\n this._userLogHandler && this._userLogHandler(this, LogLevel.ERROR, ...args);\n this._logHandler(this, LogLevel.ERROR, ...args);\n }\n}\n\nexport function setLogLevel(level: LogLevelString | LogLevel): void {\n instances.forEach(inst => {\n inst.setLogLevel(level);\n });\n}\n\nexport function setUserLogHandler(\n logCallback: LogCallback | null,\n options?: LogOptions\n): void {\n for (const instance of instances) {\n let customLogLevel: LogLevel | null = null;\n if (options && options.level) {\n customLogLevel = levelStringToEnum[options.level];\n }\n if (logCallback === null) {\n instance.userLogHandler = null;\n } else {\n instance.userLogHandler = (\n instance: Logger,\n level: LogLevel,\n ...args: unknown[]\n ) => {\n const message = args\n .map(arg => {\n if (arg == null) {\n return null;\n } else if (typeof arg === 'string') {\n return arg;\n } else if (typeof arg === 'number' || typeof arg === 'boolean') {\n return arg.toString();\n } else if (arg instanceof Error) {\n return arg.message;\n } else {\n try {\n return JSON.stringify(arg);\n } catch (ignored) {\n return null;\n }\n }\n })\n .filter(arg => arg)\n .join(' ');\n if (level >= (customLogLevel ?? instance.logLevel)) {\n logCallback({\n level: LogLevel[level].toLowerCase() as LogLevelString,\n message,\n args,\n type: instance.name\n });\n }\n };\n }\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { version } from '../package.json';\n\nexport const AI_TYPE = 'AI';\n\nexport const DEFAULT_LOCATION = 'us-central1';\n\nexport const DEFAULT_DOMAIN = 'firebasevertexai.googleapis.com';\n\nexport const DEFAULT_API_VERSION = 'v1beta';\n\nexport const PACKAGE_VERSION = version;\n\nexport const LANGUAGE_TAG = 'gl-js';\n\nexport const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000;\n\n/**\n * Defines the name of the default in-cloud model to use for hybrid inference.\n */\nexport const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite';\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseError } from '@firebase/util';\nimport { AIErrorCode, CustomErrorData } from './types';\nimport { AI_TYPE } from './constants';\n\n/**\n * Error class for the Firebase AI SDK.\n *\n * @public\n */\nexport class AIError extends FirebaseError {\n /**\n * Constructs a new instance of the `AIError` class.\n *\n * @param code - The error code from {@link (AIErrorCode:type)}.\n * @param message - A human-readable message describing the error.\n * @param customErrorData - Optional error data.\n */\n constructor(\n readonly code: AIErrorCode,\n message: string,\n readonly customErrorData?: CustomErrorData\n ) {\n // Match error format used by FirebaseError from ErrorFactory\n const service = AI_TYPE;\n const fullCode = `${service}/${code}`;\n const fullMessage = `${service}: ${message} (${fullCode})`;\n super(code, fullMessage);\n\n // FirebaseError initializes a stack trace, but it assumes the error is created from the error\n // factory. Since we break this assumption, we set the stack trace to be originating from this\n // constructor.\n // This is only supported in V8.\n if (Error.captureStackTrace) {\n // Allows us to initialize the stack trace without including the constructor itself at the\n // top level of the stack trace.\n Error.captureStackTrace(this, AIError);\n }\n\n // Allows instanceof AIError in ES5/ES6\n // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work\n // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget\n // which we can now use since we no longer target ES5.\n Object.setPrototypeOf(this, AIError.prototype);\n\n // Since Error is an interface, we don't inherit toString and so we define it ourselves.\n this.toString = () => fullMessage;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Role is the producer of the content.\n * @public\n */\nexport type Role = (typeof POSSIBLE_ROLES)[number];\n\n/**\n * Possible roles.\n * @public\n */\nexport const POSSIBLE_ROLES = ['user', 'model', 'function', 'system'] as const;\n\n/**\n * Harm categories that would cause prompts or candidates to be blocked.\n * @public\n */\nexport const HarmCategory = {\n HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH',\n HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT',\n HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT'\n} as const;\n\n/**\n * Harm categories that would cause prompts or candidates to be blocked.\n * @public\n */\nexport type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory];\n\n/**\n * Threshold above which a prompt or candidate will be blocked.\n * @public\n */\nexport const HarmBlockThreshold = {\n /**\n * Content with `NEGLIGIBLE` will be allowed.\n */\n BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE',\n /**\n * Content with `NEGLIGIBLE` and `LOW` will be allowed.\n */\n BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE',\n /**\n * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.\n */\n BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH',\n /**\n * All content will be allowed.\n */\n BLOCK_NONE: 'BLOCK_NONE',\n /**\n * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding\n * to the {@link (HarmCategory:type)} will not be present in the response.\n */\n OFF: 'OFF'\n} as const;\n\n/**\n * Threshold above which a prompt or candidate will be blocked.\n * @public\n */\nexport type HarmBlockThreshold =\n (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold];\n\n/**\n * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).\n *\n * @public\n */\nexport const HarmBlockMethod = {\n /**\n * The harm block method uses both probability and severity scores.\n */\n SEVERITY: 'SEVERITY',\n /**\n * The harm block method uses the probability score.\n */\n PROBABILITY: 'PROBABILITY'\n} as const;\n\n/**\n * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).\n *\n * @public\n */\nexport type HarmBlockMethod =\n (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod];\n\n/**\n * Probability that a prompt or candidate matches a harm category.\n * @public\n */\nexport const HarmProbability = {\n /**\n * Content has a negligible chance of being unsafe.\n */\n NEGLIGIBLE: 'NEGLIGIBLE',\n /**\n * Content has a low chance of being unsafe.\n */\n LOW: 'LOW',\n /**\n * Content has a medium chance of being unsafe.\n */\n MEDIUM: 'MEDIUM',\n /**\n * Content has a high chance of being unsafe.\n */\n HIGH: 'HIGH'\n} as const;\n\n/**\n * Probability that a prompt or candidate matches a harm category.\n * @public\n */\nexport type HarmProbability =\n (typeof HarmProbability)[keyof typeof HarmProbability];\n\n/**\n * Harm severity levels.\n * @public\n */\nexport const HarmSeverity = {\n /**\n * Negligible level of harm severity.\n */\n HARM_SEVERITY_NEGLIGIBLE: 'HARM_SEVERITY_NEGLIGIBLE',\n /**\n * Low level of harm severity.\n */\n HARM_SEVERITY_LOW: 'HARM_SEVERITY_LOW',\n /**\n * Medium level of harm severity.\n */\n HARM_SEVERITY_MEDIUM: 'HARM_SEVERITY_MEDIUM',\n /**\n * High level of harm severity.\n */\n HARM_SEVERITY_HIGH: 'HARM_SEVERITY_HIGH',\n /**\n * Harm severity is not supported.\n *\n * @remarks\n * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.\n */\n HARM_SEVERITY_UNSUPPORTED: 'HARM_SEVERITY_UNSUPPORTED'\n} as const;\n\n/**\n * Harm severity levels.\n * @public\n */\nexport type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity];\n\n/**\n * Reason that a prompt was blocked.\n * @public\n */\nexport const BlockReason = {\n /**\n * Content was blocked by safety settings.\n */\n SAFETY: 'SAFETY',\n /**\n * Content was blocked, but the reason is uncategorized.\n */\n OTHER: 'OTHER',\n /**\n * Content was blocked because it contained terms from the terminology blocklist.\n */\n BLOCKLIST: 'BLOCKLIST',\n /**\n * Content was blocked due to prohibited content.\n */\n PROHIBITED_CONTENT: 'PROHIBITED_CONTENT'\n} as const;\n\n/**\n * Reason that a prompt was blocked.\n * @public\n */\nexport type BlockReason = (typeof BlockReason)[keyof typeof BlockReason];\n\n/**\n * Reason that a candidate finished.\n * @public\n */\nexport const FinishReason = {\n /**\n * Natural stop point of the model or provided stop sequence.\n */\n STOP: 'STOP',\n /**\n * The maximum number of tokens as specified in the request was reached.\n */\n MAX_TOKENS: 'MAX_TOKENS',\n /**\n * The candidate content was flagged for safety reasons.\n */\n SAFETY: 'SAFETY',\n /**\n * The candidate content was flagged for recitation reasons.\n */\n RECITATION: 'RECITATION',\n /**\n * Unknown reason.\n */\n OTHER: 'OTHER',\n /**\n * The candidate content contained forbidden terms.\n */\n BLOCKLIST: 'BLOCKLIST',\n /**\n * The candidate content potentially contained prohibited content.\n */\n PROHIBITED_CONTENT: 'PROHIBITED_CONTENT',\n /**\n * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).\n */\n SPII: 'SPII',\n /**\n * The function call generated by the model was invalid.\n */\n MALFORMED_FUNCTION_CALL: 'MALFORMED_FUNCTION_CALL'\n} as const;\n\n/**\n * Reason that a candidate finished.\n * @public\n */\nexport type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];\n\n/**\n * @public\n */\nexport const FunctionCallingMode = {\n /**\n * Default model behavior; model decides to predict either a function call\n * or a natural language response.\n */\n AUTO: 'AUTO',\n /**\n * Model is constrained to always predicting a function call only.\n * If `allowed_function_names` is set, the predicted function call will be\n * limited to any one of `allowed_function_names`, else the predicted\n * function call will be any one of the provided `function_declarations`.\n */\n ANY: 'ANY',\n /**\n * Model will not predict any function call. Model behavior is same as when\n * not passing any function declarations.\n */\n NONE: 'NONE'\n} as const;\n\n/**\n * @public\n */\nexport type FunctionCallingMode =\n (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode];\n\n/**\n * Content part modality.\n * @public\n */\nexport const Modality = {\n /**\n * Unspecified modality.\n */\n MODALITY_UNSPECIFIED: 'MODALITY_UNSPECIFIED',\n /**\n * Plain text.\n */\n TEXT: 'TEXT',\n /**\n * Image.\n */\n IMAGE: 'IMAGE',\n /**\n * Video.\n */\n VIDEO: 'VIDEO',\n /**\n * Audio.\n */\n AUDIO: 'AUDIO',\n /**\n * Document (for example, PDF).\n */\n DOCUMENT: 'DOCUMENT'\n} as const;\n\n/**\n * Content part modality.\n * @public\n */\nexport type Modality = (typeof Modality)[keyof typeof Modality];\n\n/**\n * Generation modalities to be returned in generation responses.\n *\n * @beta\n */\nexport const ResponseModality = {\n /**\n * Text.\n * @beta\n */\n TEXT: 'TEXT',\n /**\n * Image.\n * @beta\n */\n IMAGE: 'IMAGE',\n /**\n * Audio.\n * @beta\n */\n AUDIO: 'AUDIO'\n} as const;\n\n/**\n * Generation modalities to be returned in generation responses.\n *\n * @beta\n */\nexport type ResponseModality =\n (typeof ResponseModality)[keyof typeof ResponseModality];\n\n/**\n * Determines whether inference happens on-device or in-cloud.\n *\n * @remarks\n * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an\n * on-device model. If on-device inference is not available, the SDK\n * will fall back to using a cloud-hosted model.\n * <br/>\n * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an\n * on-device model. The SDK will not fall back to a cloud-hosted model.\n * If on-device inference is not available, inference methods will throw.\n * <br/>\n * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a\n * cloud-hosted model. The SDK will not fall back to an on-device model.\n * <br/>\n * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a\n * cloud-hosted model. If not available, the SDK will fall back to an\n * on-device model.\n *\n * @beta\n */\nexport const InferenceMode = {\n 'PREFER_ON_DEVICE': 'prefer_on_device',\n 'ONLY_ON_DEVICE': 'only_on_device',\n 'ONLY_IN_CLOUD': 'only_in_cloud',\n 'PREFER_IN_CLOUD': 'prefer_in_cloud'\n} as const;\n\n/**\n * Determines whether inference happens on-device or in-cloud.\n *\n * @beta\n */\nexport type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];\n\n/**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\nexport const InferenceSource = {\n 'ON_DEVICE': 'on_device',\n 'IN_CLOUD': 'in_cloud'\n} as const;\n\n/**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\nexport type InferenceSource =\n (typeof InferenceSource)[keyof typeof InferenceSource];\n\n/**\n * Represents the result of the code execution.\n *\n * @beta\n */\nexport const Outcome = {\n UNSPECIFIED: 'OUTCOME_UNSPECIFIED',\n OK: 'OUTCOME_OK',\n FAILED: 'OUTCOME_FAILED',\n DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'\n};\n\n/**\n * Represents the result of the code execution.\n *\n * @beta\n */\nexport type Outcome = (typeof Outcome)[keyof typeof Outcome];\n\n/**\n * The programming language of the code.\n *\n * @beta\n */\nexport const Language = {\n UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',\n PYTHON: 'PYTHON'\n};\n\n/**\n * The programming language of the code.\n *\n * @beta\n */\nexport type Language = (typeof Language)[keyof typeof Language];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, FunctionCall, InlineDataPart } from './content';\nimport {\n BlockReason,\n FinishReason,\n HarmCategory,\n HarmProbability,\n HarmSeverity,\n InferenceSource,\n Modality\n} from './enums';\n\n/**\n * Result object returned from {@link GenerativeModel.generateContent} call.\n *\n * @public\n */\nexport interface GenerateContentResult {\n response: EnhancedGenerateContentResponse;\n}\n\n/**\n * Result object returned from {@link GenerativeModel.generateContentStream} call.\n * Iterate over `stream` to get chunks as they come in and/or\n * use the `response` promise to get the aggregated response when\n * the stream is done.\n *\n * @public\n */\nexport interface GenerateContentStreamResult {\n stream: AsyncGenerator<EnhancedGenerateContentResponse>;\n response: Promise<EnhancedGenerateContentResponse>;\n}\n\n/**\n * Response object wrapped with helper methods.\n *\n * @public\n */\nexport interface EnhancedGenerateContentResponse\n extends GenerateContentResponse {\n /**\n * Returns the text string from the response, if available.\n * Throws if the prompt or candidate was blocked.\n */\n text: () => string;\n /**\n * Aggregates and returns every {@link InlineDataPart} from the first candidate of\n * {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n */\n inlineDataParts: () => InlineDataPart[] | undefined;\n /**\n * Aggregates and returns every {@link FunctionCall} from the first candidate of\n * {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n */\n functionCalls: () => FunctionCall[] | undefined;\n /**\n * Aggregates and returns every {@link TextPart} with their `thought` property set\n * to `true` from the first candidate of {@link GenerateContentResponse}.\n *\n * @throws If the prompt or candidate was blocked.\n *\n * @remarks\n * Thought summaries provide a brief overview of the model's internal thinking process,\n * offering insight into how it arrived at the final answer. This can be useful for\n * debugging, understanding the model's reasoning, and verifying its accuracy.\n *\n * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is\n * set to `true`.\n */\n thoughtSummary: () => string | undefined;\n /**\n * Indicates whether inference happened on-device or in-cloud.\n *\n * @beta\n */\n inferenceSource?: InferenceSource;\n}\n\n/**\n * Individual response from {@link GenerativeModel.generateContent} and\n * {@link GenerativeModel.generateContentStream}.\n * `generateContentStream()` will return one in each chunk until\n * the stream is done.\n * @public\n */\nexport interface GenerateContentResponse {\n candidates?: GenerateContentCandidate[];\n promptFeedback?: PromptFeedback;\n usageMetadata?: UsageMetadata;\n}\n\n/**\n * Usage metadata about a {@link GenerateContentResponse}.\n *\n * @public\n */\nexport interface UsageMetadata {\n promptTokenCount: number;\n candidatesTokenCount: number;\n /**\n * The number of tokens used by the model's internal \"thinking\" process.\n */\n thoughtsTokenCount?: number;\n totalTokenCount: number;\n /**\n * The number of tokens used by tools.\n */\n toolUsePromptTokenCount?: number;\n promptTokensDetails?: ModalityTokenCount[];\n candidatesTokensDetails?: ModalityTokenCount[];\n /**\n * A list of tokens used by tools, broken down by modality.\n */\n toolUsePromptTokensDetails?: ModalityTokenCount[];\n}\n\n/**\n * Represents token counting info for a single modality.\n *\n * @public\n */\nexport interface ModalityTokenCount {\n /** The modality associated with this token count. */\n modality: Modality;\n /** The number of tokens counted. */\n tokenCount: number;\n}\n\n/**\n * If the prompt was blocked, this will be populated with `blockReason` and\n * the relevant `safetyRatings`.\n * @public\n */\nexport interface PromptFeedback {\n blockReason?: BlockReason;\n safetyRatings: SafetyRating[];\n /**\n * A human-readable description of the `blockReason`.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n blockReasonMessage?: string;\n}\n\n/**\n * A candidate returned as part of a {@link GenerateContentResponse}.\n * @public\n */\nexport interface GenerateContentCandidate {\n index: number;\n content: Content;\n finishReason?: FinishReason;\n finishMessage?: string;\n safetyRatings?: SafetyRating[];\n citationMetadata?: CitationMetadata;\n groundingMetadata?: GroundingMetadata;\n urlContextMetadata?: URLContextMetadata;\n}\n\n/**\n * Citation metadata that may be found on a {@link GenerateContentCandidate}.\n * @public\n */\nexport interface CitationMetadata {\n citations: Citation[];\n}\n\n/**\n * A single citation.\n * @public\n */\nexport interface Citation {\n startIndex?: number;\n endIndex?: number;\n uri?: string;\n license?: string;\n /**\n * The title of the cited source, if available.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n title?: string;\n /**\n * The publication date of the cited source, if available.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n */\n publicationDate?: Date;\n}\n\n/**\n * Metadata returned when grounding is enabled.\n *\n * Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}).\n *\n * Important: If using Grounding with Google Search, you are required to comply with the\n * \"Grounding with Google Search\" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}\n * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}\n * section within the Service Specific Terms).\n *\n * @public\n */\nexport interface GroundingMetadata {\n /**\n * Google Search entry point for web searches. This contains an HTML/CSS snippet that must be\n * embedded in an app to display a Google Search entry point for follow-up web searches related to\n * a model's \"Grounded Response\".\n */\n searchEntryPoint?: SearchEntrypoint;\n /**\n * A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content\n * (for example, from a web page). that the model used to ground its response.\n */\n groundingChunks?: GroundingChunk[];\n /**\n * A list of {@link GroundingSupport} objects. Each object details how specific segments of the\n * model's response are supported by the `groundingChunks`.\n */\n groundingSupports?: GroundingSupport[];\n /**\n * A list of web search queries that the model performed to gather the grounding information.\n * These can be used to allow users to explore the search results themselves.\n */\n webSearchQueries?: string[];\n /**\n * @deprecated Use {@link GroundingSupport} instead.\n */\n retrievalQueries?: string[];\n}\n\n/**\n * Google search entry point.\n *\n * @public\n */\nexport interface SearchEntrypoint {\n /**\n * HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid\n * undesired interaction with the rest of the page's CSS.\n *\n * To ensure proper rendering and prevent CSS conflicts, it is recommended\n * to encapsulate this `renderedContent` within a shadow DOM when embedding it\n * into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}.\n *\n * @example\n * ```javascript\n * const container = document.createElement('div');\n * document.body.appendChild(container);\n * container.attachShadow({ mode: 'open' }).innerHTML = renderedContent;\n * ```\n */\n renderedContent?: string;\n}\n\n/**\n * Represents a chunk of retrieved data that supports a claim in the model's response. This is part\n * of the grounding information provided when grounding is enabled.\n *\n * @public\n */\nexport interface GroundingChunk {\n /**\n * Contains details if the grounding chunk is from a web source.\n */\n web?: WebGroundingChunk;\n}\n\n/**\n * A grounding chunk from the web.\n *\n * Important: If using Grounding with Google Search, you are required to comply with the\n * {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for \"Grounding with Google Search\".\n *\n * @public\n */\nexport interface WebGroundingChunk {\n /**\n * The URI of the retrieved web page.\n */\n uri?: string;\n /**\n * The title of the retrieved web page.\n */\n title?: string;\n /**\n * The domain of the original URI from which the content was retrieved.\n *\n * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be\n * `undefined`.\n */\n domain?: string;\n}\n\n/**\n * Provides information about how a specific segment of the model's response is supported by the\n * retrieved grounding chunks.\n *\n * @public\n */\nexport interface GroundingSupport {\n /**\n * Specifies the segment of the model's response content that this grounding support pertains to.\n */\n segment?: Segment;\n /**\n * A list of indices that refer to specific {@link GroundingChunk} objects within the\n * {@link GroundingMetadata.groundingChunks} array. These referenced chunks\n * are the sources that support the claim made in the associated `segment` of the response.\n * For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`,\n * and `groundingChunks[4]` are the retrieved content supporting this part of the response.\n */\n groundingChunkIndices?: number[];\n}\n\n/**\n * Represents a specific segment within a {@link Content} object, often used to\n * pinpoint the exact location of text or data that grounding information refers to.\n *\n * @public\n */\nexport interface Segment {\n /**\n * The zero-based index of the {@link Part} object within the `parts` array\n * of its parent {@link Content} object. This identifies which part of the\n * content the segment belongs to.\n */\n partIndex: number;\n /**\n * The zero-based start index of the segment within the specified `Part`,\n * measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the\n * beginning of the part's content (e.g., `Part.text`).\n */\n startIndex: number;\n /**\n * The zero-based end index of the segment within the specified `Part`,\n * measured in UTF-8 bytes. This offset is exclusive, meaning the character\n * at this index is not included in the segment.\n */\n endIndex: number;\n /**\n * The text corresponding to the segment from the response.\n */\n text: string;\n}\n\n/**\n * Metadata related to {@link URLContextTool}.\n *\n * @beta\n */\nexport interface URLContextMetadata {\n /**\n * List of URL metadata used to provide context to the Gemini model.\n */\n urlMetadata: URLMetadata[];\n}\n\n/**\n * Metadata for a single URL retrieved by the {@link URLContextTool} tool.\n *\n * @beta\n */\nexport interface URLMetadata {\n /**\n * The retrieved URL.\n */\n retrievedUrl?: string;\n /**\n * The status of the URL retrieval.\n */\n urlRetrievalStatus?: URLRetrievalStatus;\n}\n\n/**\n * The status of a URL retrieval.\n *\n * @remarks\n * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.\n * <br/>\n *\n * @beta\n */\nexport const URLRetrievalStatus = {\n /**\n * Unspecified retrieval status.\n */\n URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED',\n /**\n * The URL retrieval was successful.\n */\n URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS',\n /**\n * The URL retrieval failed.\n */\n URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR',\n /**\n * The URL retrieval failed because the content is behind a paywall.\n */\n URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL',\n /**\n * The URL retrieval failed because the content is unsafe.\n */\n URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE'\n};\n\n/**\n * The status of a URL retrieval.\n *\n * @remarks\n * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.\n * <br/>\n * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.\n * <br/>\n *\n * @beta\n */\nexport type URLRetrievalStatus =\n (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus];\n\n/**\n * @public\n */\nexport interface WebAttribution {\n uri: string;\n title: string;\n}\n\n/**\n * @public\n */\nexport interface RetrievedContextAttribution {\n uri: string;\n title: string;\n}\n\n/**\n * Protobuf google.type.Date\n * @public\n */\nexport interface Date {\n year: number;\n month: number;\n day: number;\n}\n\n/**\n * A safety rating associated with a {@link GenerateContentCandidate}\n * @public\n */\nexport interface SafetyRating {\n category: HarmCategory;\n probability: HarmProbability;\n /**\n * The harm severity level.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`.\n */\n severity: HarmSeverity;\n /**\n * The probability score of the harm category.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.\n */\n probabilityScore: number;\n /**\n * The severity score of the harm category.\n *\n * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.\n */\n severityScore: number;\n blocked: boolean;\n}\n\n/**\n * Response from calling {@link GenerativeModel.countTokens}.\n * @public\n */\nexport interface CountTokensResponse {\n /**\n * The total number of tokens counted across all instances from the request.\n */\n totalTokens: number;\n /**\n * @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`.\n *\n * The total number of billable characters counted across all instances\n * from the request.\n */\n totalBillableCharacters?: number;\n /**\n * The breakdown, by modality, of how many tokens are consumed by the prompt.\n */\n promptTokensDetails?: ModalityTokenCount[];\n}\n\n/**\n * An incremental content update from the model.\n *\n * @beta\n */\nexport interface LiveServerContent {\n type: 'serverContent';\n /**\n * The content that the model has generated as part of the current conversation with the user.\n */\n modelTurn?: Content;\n /**\n * Indicates whether the turn is complete. This is `undefined` if the turn is not complete.\n */\n turnComplete?: boolean;\n /**\n * Indicates whether the model was interrupted by the client. An interruption occurs when\n * the client sends a message before the model finishes it's turn. This is `undefined` if the\n * model was not interrupted.\n */\n interrupted?: boolean;\n /**\n * Transcription of the audio that was input to the model.\n */\n inputTranscription?: Transcription;\n /**\n * Transcription of the audio output from the model.\n */\n outputTranscription?: Transcription;\n}\n\n/**\n * Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription\n * is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on\n * the {@link LiveGenerationConfig}.\n *\n * @beta\n */\n\nexport interface Transcription {\n /**\n * The text transcription of the audio.\n */\n text?: string;\n}\n\n/**\n * A request from the model for the client to execute one or more functions.\n *\n * @beta\n */\nexport interface LiveServerToolCall {\n type: 'toolCall';\n /**\n * An array of function calls to run.\n */\n functionCalls: FunctionCall[];\n}\n\n/**\n * Notification to cancel a previous function call triggered by {@link LiveServerToolCall}.\n *\n * @beta\n */\nexport interface LiveServerToolCallCancellation {\n type: 'toolCallCancellation';\n /**\n * IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}.\n */\n functionIds: string[];\n}\n\n/**\n * The types of responses that can be returned by {@link LiveSession.receive}.\n *\n * @beta\n */\nexport const LiveResponseType = {\n SERVER_CONTENT: 'serverContent',\n TOOL_CALL: 'toolCall',\n TOOL_CALL_CANCELLATION: 'toolCallCancellation'\n};\n\n/**\n * The types of responses that can be returned by {@link LiveSession.receive}.\n * This is a property on all messages that can be used for type narrowing. This property is not\n * returned by the server, it is assigned to a server message object once it's parsed.\n *\n * @beta\n */\nexport type LiveResponseType =\n (typeof LiveResponseType)[keyof typeof LiveResponseType];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { GenerateContentResponse } from './responses';\n\n/**\n * Details object that may be included in an error response.\n *\n * @public\n */\nexport interface ErrorDetails {\n '@type'?: string;\n\n /** The reason for the error. */\n reason?: string;\n\n /** The domain where the error occurred. */\n domain?: string;\n\n /** Additional metadata about the error. */\n metadata?: Record<string, unknown>;\n\n /** Any other relevant information about the error. */\n [key: string]: unknown;\n}\n\n/**\n * Details object that contains data originating from a bad HTTP response.\n *\n * @public\n */\nexport interface CustomErrorData {\n /** HTTP status code of the error response. */\n status?: number;\n\n /** HTTP status text of the error response. */\n statusText?: string;\n\n /** Response from a {@link GenerateContentRequest} */\n response?: GenerateContentResponse;\n\n /** Optional additional details about the error. */\n errorDetails?: ErrorDetails[];\n}\n\n/**\n * Standardized error codes that {@link AIError} can have.\n *\n * @public\n */\nexport const AIErrorCode = {\n /** A generic error occurred. */\n ERROR: 'error',\n\n /** An error occurred in a request. */\n REQUEST_ERROR: 'request-error',\n\n /** An error occurred in a response. */\n RESPONSE_ERROR: 'response-error',\n\n /** An error occurred while performing a fetch. */\n FETCH_ERROR: 'fetch-error',\n\n /** An error occurred because an operation was attempted on a closed session. */\n SESSION_CLOSED: 'session-closed',\n\n /** An error associated with a Content object. */\n INVALID_CONTENT: 'invalid-content',\n\n /** An error due to the Firebase API not being enabled in the Console. */\n API_NOT_ENABLED: 'api-not-enabled',\n\n /** An error due to invalid Schema input. */\n INVALID_SCHEMA: 'invalid-schema',\n\n /** An error occurred due to a missing Firebase API key. */\n NO_API_KEY: 'no-api-key',\n\n /** An error occurred due to a missing Firebase app ID. */\n NO_APP_ID: 'no-app-id',\n\n /** An error occurred due to a model name not being specified during initialization. */\n NO_MODEL: 'no-model',\n\n /** An error occurred due to a missing project ID. */\n NO_PROJECT_ID: 'no-project-id',\n\n /** An error occurred while parsing. */\n PARSE_FAILED: 'parse-failed',\n\n /** An error occurred due an attempt to use an unsupported feature. */\n UNSUPPORTED: 'unsupported'\n} as const;\n\n/**\n * Standardized error codes that {@link AIError} can have.\n *\n * @public\n */\nexport type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Contains the list of OpenAPI data types\n * as defined by the\n * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}\n * @public\n */\nexport const SchemaType = {\n /** String type. */\n STRING: 'string',\n /** Number type. */\n NUMBER: 'number',\n /** Integer type. */\n INTEGER: 'integer',\n /** Boolean type. */\n BOOLEAN: 'boolean',\n /** Array type. */\n ARRAY: 'array',\n /** Object type. */\n OBJECT: 'object'\n} as const;\n\n/**\n * Contains the list of OpenAPI data types\n * as defined by the\n * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}\n * @public\n */\nexport type SchemaType = (typeof SchemaType)[keyof typeof SchemaType];\n\n/**\n * Basic {@link Schema} properties shared across several Schema-related\n * types.\n * @public\n */\nexport interface SchemaShared<T> {\n /**\n * An array of {@link Schema}. The generated data must be valid against any of the schemas\n * listed in this array. This allows specifying multiple possible structures or types for a\n * single field.\n */\n anyOf?: T[];\n /** Optional. The format of the property.\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or\n * `'date-time'`, otherwise requests will fail.\n */\n format?: string;\n /** Optional. The description of the property. */\n description?: string;\n /**\n * The title of the property. This helps document the schema's purpose but does not typically\n * constrain the generated value. It can subtly guide the model by clarifying the intent of a\n * field.\n */\n title?: string;\n /** Optional. The items of the property. */\n items?: T;\n /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n minItems?: number;\n /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n maxItems?: number;\n /** Optional. Map of `Schema` objects. */\n properties?: {\n [k: string]: T;\n };\n /** A hint suggesting the order in which the keys should appear in the generated JSON string. */\n propertyOrdering?: string[];\n /** Optional. The enum of the property. */\n enum?: string[];\n /** Optional. The example of the property. */\n example?: unknown;\n /** Optional. Whether the property is nullable. */\n nullable?: boolean;\n /** The minimum value of a numeric type. */\n minimum?: number;\n /** The maximum value of a numeric type. */\n maximum?: number;\n [key: string]: unknown;\n}\n\n/**\n * Params passed to {@link Schema} static methods to create specific\n * {@link Schema} classes.\n * @public\n */\nexport interface SchemaParams extends SchemaShared<SchemaInterface> {}\n\n/**\n * Final format for {@link Schema} params passed to backend requests.\n * @public\n */\nexport interface SchemaRequest extends SchemaShared<SchemaRequest> {\n /**\n * The type of the property. this can only be undefined when using `anyOf` schemas,\n * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }.\n */\n type?: SchemaType;\n /** Optional. Array of required property. */\n required?: string[];\n}\n\n/**\n * Interface for {@link Schema} class.\n * @public\n */\nexport interface SchemaInterface extends SchemaShared<SchemaInterface> {\n /**\n * The type of the property. this can only be undefined when using `anyof` schemas,\n * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}.\n */\n type?: SchemaType;\n}\n\n/**\n * Interface for JSON parameters in a schema of {@link (SchemaType:type)}\n * \"object\" when not using the `Schema.object()` helper.\n * @public\n */\nexport interface ObjectSchemaRequest extends SchemaRequest {\n type: 'object';\n /**\n * This is not a property accepted in the final request to the backend, but is\n * a client-side convenience property that is only usable by constructing\n * a schema through the `Schema.object()` helper method. Populating this\n * property will cause response errors if the object is not wrapped with\n * `Schema.object()`.\n */\n optionalProperties?: never;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ImagenImageFormat } from '../../requests/imagen-image-format';\n\n/**\n * Parameters for configuring an {@link ImagenModel}.\n *\n * @public\n */\nexport interface ImagenModelParams {\n /**\n * The Imagen model to use for generating images.\n * For example: `imagen-3.0-generate-002`.\n *\n * Only Imagen 3 models (named `imagen-3.0-*`) are supported.\n *\n * See {@link https://firebase.google.com/docs/vertex-ai/models | model versions}\n * for a full list of supported Imagen 3 models.\n */\n model: string;\n /**\n * Configuration options for generating images with Imagen.\n */\n generationConfig?: ImagenGenerationConfig;\n /**\n * Safety settings for filtering potentially inappropriate content.\n */\n safetySettings?: ImagenSafetySettings;\n}\n\n/**\n * Configuration options for generating images with Imagen.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for\n * more details.\n *\n * @public\n */\nexport interface ImagenGenerationConfig {\n /**\n * A description of what should be omitted from the generated images.\n *\n * Support for negative prompts depends on the Imagen model.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details.\n *\n * This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions\n * greater than `imagen-3.0-generate-002`.\n */\n negativePrompt?: string;\n /**\n * The number of images to generate. The default value is 1.\n *\n * The number of sample images that may be generated in each request depends on the model\n * (typically up to 4); see the <a href=\"http://firebase.google.com/docs/vertex-ai/model-parameters#imagen\">sampleCount</a>\n * documentation for more details.\n */\n numberOfImages?: number;\n /**\n * The aspect ratio of the generated images. The default value is square 1:1.\n * Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)}\n * for more details.\n */\n aspectRatio?: ImagenAspectRatio;\n /**\n * The image format of the generated images. The default is PNG.\n *\n * See {@link ImagenImageFormat} for more details.\n */\n imageFormat?: ImagenImageFormat;\n /**\n * Whether to add an invisible watermark to generated images.\n *\n * If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate\n * that they are AI generated. If set to `false`, watermarking will be disabled.\n *\n * For Imagen 3 models, the default value is `true`; see the <a href=\"http://firebase.google.com/docs/vertex-ai/model-parameters#imagen\">addWatermark</a>\n * documentation for more details.\n *\n * When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true,\n * and cannot be turned off.\n */\n addWatermark?: boolean;\n}\n\n/**\n * A filter level controlling how aggressively to filter sensitive content.\n *\n * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI\n * are assessed against a list of safety filters, which include 'harmful categories' (for example,\n * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to\n * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}\n * for more details.\n *\n * @public\n */\nexport const ImagenSafetyFilterLevel = {\n /**\n * The most aggressive filtering level; most strict blocking.\n */\n BLOCK_LOW_AND_ABOVE: 'block_low_and_above',\n /**\n * Blocks some sensitive prompts and responses.\n */\n BLOCK_MEDIUM_AND_ABOVE: 'block_medium_and_above',\n /**\n * Blocks few sensitive prompts and responses.\n */\n BLOCK_ONLY_HIGH: 'block_only_high',\n /**\n * The least aggressive filtering level; blocks very few sensitive prompts and responses.\n *\n * Access to this feature is restricted and may require your case to be reviewed and approved by\n * Cloud support.\n */\n BLOCK_NONE: 'block_none'\n} as const;\n\n/**\n * A filter level controlling how aggressively to filter sensitive content.\n *\n * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI\n * are assessed against a list of safety filters, which include 'harmful categories' (for example,\n * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to\n * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}\n * for more details.\n *\n * @public\n */\nexport type ImagenSafetyFilterLevel =\n (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel];\n\n/**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n *\n * See the <a href=\"http://firebase.google.com/docs/vertex-ai/generate-images\">personGeneration</a>\n * documentation for more details.\n *\n * @public\n */\nexport const ImagenPersonFilterLevel = {\n /**\n * Disallow generation of images containing people or faces; images of people are filtered out.\n */\n BLOCK_ALL: 'dont_allow',\n /**\n * Allow generation of images containing adults only; images of children are filtered out.\n *\n * Generation of images containing people or faces may require your use case to be\n * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}\n * for more details.\n */\n ALLOW_ADULT: 'allow_adult',\n /**\n * Allow generation of images containing adults only; images of children are filtered out.\n *\n * Generation of images containing people or faces may require your use case to be\n * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}\n * for more details.\n */\n ALLOW_ALL: 'allow_all'\n} as const;\n\n/**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n *\n * See the <a href=\"http://firebase.google.com/docs/vertex-ai/generate-images\">personGeneration</a>\n * documentation for more details.\n *\n * @public\n */\nexport type ImagenPersonFilterLevel =\n (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel];\n\n/**\n * Settings for controlling the aggressiveness of filtering out sensitive content.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details.\n *\n * @public\n */\nexport interface ImagenSafetySettings {\n /**\n * A filter level controlling how aggressive to filter out sensitive content from generated\n * images.\n */\n safetyFilterLevel?: ImagenSafetyFilterLevel;\n /**\n * A filter level controlling whether generation of images containing people or faces is allowed.\n */\n personFilterLevel?: ImagenPersonFilterLevel;\n}\n\n/**\n * Aspect ratios for Imagen images.\n *\n * To specify an aspect ratio for generated images, set the `aspectRatio` property in your\n * {@link ImagenGenerationConfig}.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details and examples of the supported aspect ratios.\n *\n * @public\n */\nexport const ImagenAspectRatio = {\n /**\n * Square (1:1) aspect ratio.\n */\n 'SQUARE': '1:1',\n /**\n * Landscape (3:4) aspect ratio.\n */\n 'LANDSCAPE_3x4': '3:4',\n /**\n * Portrait (4:3) aspect ratio.\n */\n 'PORTRAIT_4x3': '4:3',\n /**\n * Landscape (16:9) aspect ratio.\n */\n 'LANDSCAPE_16x9': '16:9',\n /**\n * Portrait (9:16) aspect ratio.\n */\n 'PORTRAIT_9x16': '9:16'\n} as const;\n\n/**\n * Aspect ratios for Imagen images.\n *\n * To specify an aspect ratio for generated images, set the `aspectRatio` property in your\n * {@link ImagenGenerationConfig}.\n *\n * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }\n * for more details and examples of the supported aspect ratios.\n *\n * @public\n */\nexport type ImagenAspectRatio =\n (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio];\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp } from '@firebase/app';\nimport { Backend } from './backend';\n\nexport * from './types';\n\n/**\n * An instance of the Firebase AI SDK.\n *\n * Do not create this instance directly. Instead, use {@link getAI | getAI()}.\n *\n * @public\n */\nexport interface AI {\n /**\n * The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with.\n */\n app: FirebaseApp;\n /**\n * A {@link Backend} instance that specifies the configuration for the target backend,\n * either the Gemini Developer API (using {@link GoogleAIBackend}) or the\n * Vertex AI Gemini API (using {@link VertexAIBackend}).\n */\n backend: Backend;\n /**\n * Options applied to this {@link AI} instance.\n */\n options?: AIOptions;\n /**\n * @deprecated use `AI.backend.location` instead.\n *\n * The location configured for this AI service instance, relevant for Vertex AI backends.\n */\n location: string;\n}\n\n/**\n * An enum-like object containing constants that represent the supported backends\n * for the Firebase AI SDK.\n * This determines which backend service (Vertex AI Gemini API or Gemini Developer API)\n * the SDK will communicate with.\n *\n * These values are assigned to the `backendType` property within the specific backend\n * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify\n * which service to target.\n *\n * @public\n */\nexport const BackendType = {\n /**\n * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud.\n * Use this constant when creating a {@link VertexAIBackend} configuration.\n */\n VERTEX_AI: 'VERTEX_AI',\n\n /**\n * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}).\n * Use this constant when creating a {@link GoogleAIBackend} configuration.\n */\n GOOGLE_AI: 'GOOGLE_AI'\n} as const; // Using 'as const' makes the string values literal types\n\n/**\n * Type alias representing valid backend types.\n * It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`.\n *\n * @public\n */\nexport type BackendType = (typeof BackendType)[keyof typeof BackendType];\n\n/**\n * Options for initializing the AI service using {@link getAI | getAI()}.\n * This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API)\n * and configuring its specific options (like location for Vertex AI).\n *\n * @public\n */\nexport interface AIOptions {\n /**\n * The backend configuration to use for the AI service instance.\n * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}).\n */\n backend?: Backend;\n /**\n * Whether to use App Check limited use tokens. Defaults to false.\n */\n useLimitedUseAppCheckTokens?: boolean;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { DEFAULT_LOCATION } from './constants';\nimport { BackendType } from './public-types';\n\n/**\n * Abstract base class representing the configuration for an AI service backend.\n * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for\n * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and\n * {@link VertexAIBackend} for the Vertex AI Gemini API.\n *\n * @public\n */\nexport abstract class Backend {\n /**\n * Specifies the backend type.\n */\n readonly backendType: BackendType;\n\n /**\n * Protected constructor for use by subclasses.\n * @param type - The backend type.\n */\n protected constructor(type: BackendType) {\n this.backendType = type;\n }\n}\n\n/**\n * Configuration class for the Gemini Developer API.\n *\n * Use this with {@link AIOptions} when initializing the AI service via\n * {@link getAI | getAI()} to specify the Gemini Developer API as the backend.\n *\n * @public\n */\nexport class GoogleAIBackend extends Backend {\n /**\n * Creates a configuration object for the Gemini Developer API backend.\n */\n constructor() {\n super(BackendType.GOOGLE_AI);\n }\n}\n\n/**\n * Configuration class for the Vertex AI Gemini API.\n *\n * Use this with {@link AIOptions} when initializing the AI service via\n * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend.\n *\n * @public\n */\nexport class VertexAIBackend extends Backend {\n /**\n * The region identifier.\n * See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}\n * for a list of supported locations.\n */\n readonly location: string;\n\n /**\n * Creates a configuration object for the Vertex AI backend.\n *\n * @param location - The region identifier, defaulting to `us-central1`;\n * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}\n * for a list of supported locations.\n */\n constructor(location: string = DEFAULT_LOCATION) {\n super(BackendType.VERTEX_AI);\n if (!location) {\n this.location = DEFAULT_LOCATION;\n } else {\n this.location = location;\n }\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Logger } from '@firebase/logger';\n\nexport const logger = new Logger('@firebase/vertexai');\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * The subset of the Prompt API\n * (see {@link https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl }\n * required for hybrid functionality.\n *\n * @internal\n */\nexport interface LanguageModel extends EventTarget {\n create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;\n availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;\n prompt(\n input: LanguageModelPrompt,\n options?: LanguageModelPromptOptions\n ): Promise<string>;\n promptStreaming(\n input: LanguageModelPrompt,\n options?: LanguageModelPromptOptions\n ): ReadableStream;\n measureInputUsage(\n input: LanguageModelPrompt,\n options?: LanguageModelPromptOptions\n ): Promise<number>;\n destroy(): undefined;\n}\n\n/**\n * @internal\n */\nexport enum Availability {\n 'UNAVAILABLE' = 'unavailable',\n 'DOWNLOADABLE' = 'downloadable',\n 'DOWNLOADING' = 'downloading',\n 'AVAILABLE' = 'available'\n}\n\n/**\n * Configures the creation of an on-device language model session.\n * @beta\n */\nexport interface LanguageModelCreateCoreOptions {\n topK?: number;\n temperature?: number;\n expectedInputs?: LanguageModelExpected[];\n}\n\n/**\n * Configures the creation of an on-device language model session.\n * @beta\n */\nexport interface LanguageModelCreateOptions\n extends LanguageModelCreateCoreOptions {\n signal?: AbortSignal;\n initialPrompts?: LanguageModelMessage[];\n}\n\n/**\n * Options for an on-device language model prompt.\n * @beta\n */\nexport interface LanguageModelPromptOptions {\n responseConstraint?: object;\n // TODO: Restore AbortSignal once the API is defined.\n}\n\n/**\n * Options for the expected inputs for an on-device language model.\n * @beta\n */ export interface LanguageModelExpected {\n type: LanguageModelMessageType;\n languages?: string[];\n}\n\n/**\n * An on-device language model prompt.\n * @beta\n */\nexport type LanguageModelPrompt = LanguageModelMessage[];\n\n/**\n * An on-device language model message.\n * @beta\n */\nexport interface LanguageModelMessage {\n role: LanguageModelMessageRole;\n content: LanguageModelMessageContent[];\n}\n\n/**\n * An on-device language model content object.\n * @beta\n */\nexport interface LanguageModelMessageContent {\n type: LanguageModelMessageType;\n value: LanguageModelMessageContentValue;\n}\n\n/**\n * Allowable roles for on-device language model usage.\n * @beta\n */\nexport type LanguageModelMessageRole = 'system' | 'user' | 'assistant';\n\n/**\n * Allowable types for on-device language model messages.\n * @beta\n */\nexport type LanguageModelMessageType = 'text' | 'image' | 'audio';\n\n/**\n * Content formats that can be provided as on-device message content.\n * @beta\n */\nexport type LanguageModelMessageContentValue =\n | ImageBitmapSource\n | AudioBuffer\n | BufferSource\n | string;\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport {\n CountTokensRequest,\n GenerateContentRequest,\n InferenceMode,\n Part,\n AIErrorCode,\n OnDeviceParams,\n Content,\n Role\n} from '../types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport {\n Availability,\n LanguageModel,\n LanguageModelExpected,\n LanguageModelMessage,\n LanguageModelMessageContent,\n LanguageModelMessageRole\n} from '../types/language-model';\n\n// Defaults to support image inputs for convenience.\nconst defaultExpectedInputs: LanguageModelExpected[] = [{ type: 'image' }];\n\n/**\n * Defines an inference \"backend\" that uses Chrome's on-device model,\n * and encapsulates logic for detecting when on-device inference is\n * possible.\n */\nexport class ChromeAdapterImpl implements ChromeAdapter {\n // Visible for testing\n static SUPPORTED_MIME_TYPES = ['image/jpeg', 'image/png'];\n private isDownloading = false;\n private downloadPromise: Promise<LanguageModel | void> | undefined;\n private oldSession: LanguageModel | undefined;\n onDeviceParams: OnDeviceParams = {\n createOptions: {\n expectedInputs: defaultExpectedInputs\n }\n };\n constructor(\n public languageModelProvider: LanguageModel,\n public mode: InferenceMode,\n onDeviceParams?: OnDeviceParams\n ) {\n if (onDeviceParams) {\n this.onDeviceParams = onDeviceParams;\n if (!this.onDeviceParams.createOptions) {\n this.onDeviceParams.createOptions = {\n expectedInputs: defaultExpectedInputs\n };\n } else if (!this.onDeviceParams.createOptions.expectedInputs) {\n this.onDeviceParams.createOptions.expectedInputs =\n defaultExpectedInputs;\n }\n }\n }\n\n /**\n * Checks if a given request can be made on-device.\n *\n * Encapsulates a few concerns:\n * the mode\n * API existence\n * prompt formatting\n * model availability, including triggering download if necessary\n *\n *\n * Pros: callers needn't be concerned with details of on-device availability.</p>\n * Cons: this method spans a few concerns and splits request validation from usage.\n * If instance variables weren't already part of the API, we could consider a better\n * separation of concerns.\n */\n async isAvailable(request: GenerateContentRequest): Promise<boolean> {\n if (!this.mode) {\n logger.debug(\n `On-device inference unavailable because mode is undefined.`\n );\n return false;\n }\n if (this.mode === InferenceMode.ONLY_IN_CLOUD) {\n logger.debug(\n `On-device inference unavailable because mode is \"only_in_cloud\".`\n );\n return false;\n }\n\n // Triggers out-of-band download so model will eventually become available.\n const availability = await this.downloadIfAvailable();\n\n if (this.mode === InferenceMode.ONLY_ON_DEVICE) {\n // If it will never be available due to API inavailability, throw.\n if (availability === Availability.UNAVAILABLE) {\n throw new AIError(\n AIErrorCode.API_NOT_ENABLED,\n 'Local LanguageModel API not available in this environment.'\n );\n } else if (\n availability === Availability.DOWNLOADABLE ||\n availability === Availability.DOWNLOADING\n ) {\n // TODO(chholland): Better user experience during download - progress?\n logger.debug(`Waiting for download of LanguageModel to complete.`);\n await this.downloadPromise;\n return true;\n }\n return true;\n }\n\n // Applies prefer_on_device logic.\n if (availability !== Availability.AVAILABLE) {\n logger.debug(\n `On-device inference unavailable because availability is \"${availability}\".`\n );\n return false;\n }\n if (!ChromeAdapterImpl.isOnDeviceRequest(request)) {\n logger.debug(\n `On-device inference unavailable because request is incompatible.`\n );\n return false;\n }\n\n return true;\n }\n\n /**\n * Generates content on device.\n *\n * @remarks\n * This is comparable to {@link GenerativeModel.generateContent} for generating content in\n * Cloud.\n * @param request - a standard Firebase AI {@link GenerateContentRequest}\n * @returns {@link Response}, so we can reuse common response formatting.\n */\n async generateContent(request: GenerateContentRequest): Promise<Response> {\n const session = await this.createSession();\n const contents = await Promise.all(\n request.contents.map(ChromeAdapterImpl.toLanguageModelMessage)\n );\n const text = await session.prompt(\n contents,\n this.onDeviceParams.promptOptions\n );\n return ChromeAdapterImpl.toResponse(text);\n }\n\n /**\n * Generates content stream on device.\n *\n * @remarks\n * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in\n * Cloud.\n * @param request - a standard Firebase AI {@link GenerateContentRequest}\n * @returns {@link Response}, so we can reuse common response formatting.\n */\n async generateContentStream(\n request: GenerateContentRequest\n ): Promise<Response> {\n const session = await this.createSession();\n const contents = await Promise.all(\n request.contents.map(ChromeAdapterImpl.toLanguageModelMessage)\n );\n const stream = session.promptStreaming(\n contents,\n this.onDeviceParams.promptOptions\n );\n return ChromeAdapterImpl.toStreamResponse(stream);\n }\n\n async countTokens(_request: CountTokensRequest): Promise<Response> {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'Count Tokens is not yet available for on-device model.'\n );\n }\n\n /**\n * Asserts inference for the given request can be performed by an on-device model.\n */\n private static isOnDeviceRequest(request: GenerateContentRequest): boolean {\n // Returns false if the prompt is empty.\n if (request.contents.length === 0) {\n logger.debug('Empty prompt rejected for on-device inference.');\n return false;\n }\n\n for (const content of request.contents) {\n if (content.role === 'function') {\n logger.debug(`\"Function\" role rejected for on-device inference.`);\n return false;\n }\n\n // Returns false if request contains an image with an unsupported mime type.\n for (const part of content.parts) {\n if (\n part.inlineData &&\n ChromeAdapterImpl.SUPPORTED_MIME_TYPES.indexOf(\n part.inlineData.mimeType\n ) === -1\n ) {\n logger.debug(\n `Unsupported mime type \"${part.inlineData.mimeType}\" rejected for on-device inference.`\n );\n return false;\n }\n }\n }\n\n return true;\n }\n\n /**\n * Encapsulates logic to get availability and download a model if one is downloadable.\n */\n private async downloadIfAvailable(): Promise<Availability | undefined> {\n const availability = await this.languageModelProvider?.availability(\n this.onDeviceParams.createOptions\n );\n\n if (availability === Availability.DOWNLOADABLE) {\n this.download();\n }\n\n return availability;\n }\n\n /**\n * Triggers out-of-band download of an on-device model.\n *\n * Chrome only downloads models as needed. Chrome knows a model is needed when code calls\n * LanguageModel.create.\n *\n * Since Chrome manages the download, the SDK can only avoid redundant download requests by\n * tracking if a download has previously been requested.\n */\n private download(): void {\n if (this.isDownloading) {\n return;\n }\n this.isDownloading = true;\n this.downloadPromise = this.languageModelProvider\n ?.create(this.onDeviceParams.createOptions)\n .finally(() => {\n this.isDownloading = false;\n });\n }\n\n /**\n * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.\n */\n private static async toLanguageModelMessage(\n content: Content\n ): Promise<LanguageModelMessage> {\n const languageModelMessageContents = await Promise.all(\n content.parts.map(ChromeAdapterImpl.toLanguageModelMessageContent)\n );\n return {\n role: ChromeAdapterImpl.toLanguageModelMessageRole(content.role),\n content: languageModelMessageContents\n };\n }\n\n /**\n * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.\n */\n private static async toLanguageModelMessageContent(\n part: Part\n ): Promise<LanguageModelMessageContent> {\n if (part.text) {\n return {\n type: 'text',\n value: part.text\n };\n } else if (part.inlineData) {\n const formattedImageContent = await fetch(\n `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`\n );\n const imageBlob = await formattedImageContent.blob();\n const imageBitmap = await createImageBitmap(imageBlob);\n return {\n type: 'image',\n value: imageBitmap\n };\n }\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n `Processing of this Part type is not currently supported.`\n );\n }\n\n /**\n * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.\n */\n private static toLanguageModelMessageRole(\n role: Role\n ): LanguageModelMessageRole {\n // Assumes 'function' rule has been filtered by isOnDeviceRequest\n return role === 'model' ? 'assistant' : 'user';\n }\n\n /**\n * Abstracts Chrome session creation.\n *\n * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all\n * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all\n * inference.\n *\n * Chrome will remove a model from memory if it's no longer in use, so this method ensures a\n * new session is created before an old session is destroyed.\n */\n private async createSession(): Promise<LanguageModel> {\n if (!this.languageModelProvider) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Chrome AI requested for unsupported browser version.'\n );\n }\n const newSession = await this.languageModelProvider.create(\n this.onDeviceParams.createOptions\n );\n if (this.oldSession) {\n this.oldSession.destroy();\n }\n // Holds session reference, so model isn't unloaded from memory.\n this.oldSession = newSession;\n return newSession;\n }\n\n /**\n * Formats string returned by Chrome as a {@link Response} returned by Firebase AI.\n */\n private static toResponse(text: string): Response {\n return {\n json: async () => ({\n candidates: [\n {\n content: {\n parts: [{ text }]\n }\n }\n ]\n })\n } as Response;\n }\n\n /**\n * Formats string stream returned by Chrome as SSE returned by Firebase AI.\n */\n private static toStreamResponse(stream: ReadableStream<string>): Response {\n const encoder = new TextEncoder();\n return {\n body: stream.pipeThrough(\n new TransformStream({\n transform(chunk, controller) {\n const json = JSON.stringify({\n candidates: [\n {\n content: {\n role: 'model',\n parts: [{ text: chunk }]\n }\n }\n ]\n });\n controller.enqueue(encoder.encode(`data: ${json}\\n\\n`));\n }\n })\n )\n } as Response;\n }\n}\n\n/**\n * Creates a ChromeAdapterImpl on demand.\n */\nexport function chromeAdapterFactory(\n mode: InferenceMode,\n window?: Window,\n params?: OnDeviceParams\n): ChromeAdapterImpl | undefined {\n // Do not initialize a ChromeAdapter if we are not in hybrid mode.\n if (typeof window !== 'undefined' && mode) {\n return new ChromeAdapterImpl(\n (window as Window).LanguageModel as LanguageModel,\n mode,\n params\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp, _FirebaseService } from '@firebase/app';\nimport { AI, AIOptions, InferenceMode, OnDeviceParams } from './public-types';\nimport {\n AppCheckInternalComponentName,\n FirebaseAppCheckInternal\n} from '@firebase/app-check-interop-types';\nimport { Provider } from '@firebase/component';\nimport {\n FirebaseAuthInternal,\n FirebaseAuthInternalName\n} from '@firebase/auth-interop-types';\nimport { Backend, VertexAIBackend } from './backend';\nimport { ChromeAdapterImpl } from './methods/chrome-adapter';\n\nexport class AIService implements AI, _FirebaseService {\n auth: FirebaseAuthInternal | null;\n appCheck: FirebaseAppCheckInternal | null;\n _options?: Omit<AIOptions, 'backend'>;\n location: string; // This is here for backwards-compatibility\n\n constructor(\n public app: FirebaseApp,\n public backend: Backend,\n authProvider?: Provider<FirebaseAuthInternalName>,\n appCheckProvider?: Provider<AppCheckInternalComponentName>,\n public chromeAdapterFactory?: (\n mode: InferenceMode,\n window?: Window,\n params?: OnDeviceParams\n ) => ChromeAdapterImpl | undefined\n ) {\n const appCheck = appCheckProvider?.getImmediate({ optional: true });\n const auth = authProvider?.getImmediate({ optional: true });\n this.auth = auth || null;\n this.appCheck = appCheck || null;\n\n if (backend instanceof VertexAIBackend) {\n this.location = backend.location;\n } else {\n this.location = '';\n }\n }\n\n _delete(): Promise<void> {\n return Promise.resolve();\n }\n\n set options(optionsToSet: AIOptions) {\n this._options = optionsToSet;\n }\n\n get options(): AIOptions | undefined {\n return this._options;\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n ComponentContainer,\n InstanceFactoryOptions\n} from '@firebase/component';\nimport { AIError } from './errors';\nimport { decodeInstanceIdentifier } from './helpers';\nimport { chromeAdapterFactory } from './methods/chrome-adapter';\nimport { AIService } from './service';\nimport { AIErrorCode } from './types';\n\nexport function factory(\n container: ComponentContainer,\n { instanceIdentifier }: InstanceFactoryOptions\n): AIService {\n if (!instanceIdentifier) {\n throw new AIError(\n AIErrorCode.ERROR,\n 'AIService instance identifier is undefined.'\n );\n }\n\n const backend = decodeInstanceIdentifier(instanceIdentifier);\n\n // getImmediate for FirebaseApp will always succeed\n const app = container.getProvider('app').getImmediate();\n const auth = container.getProvider('auth-internal');\n const appCheckProvider = container.getProvider('app-check-internal');\n\n return new AIService(\n app,\n backend,\n auth,\n appCheckProvider,\n chromeAdapterFactory\n );\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AI_TYPE } from './constants';\nimport { AIError } from './errors';\nimport { AIErrorCode } from './types';\nimport { Backend, GoogleAIBackend, VertexAIBackend } from './backend';\n\n/**\n * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI}\n * instances by backend type.\n *\n * @internal\n */\nexport function encodeInstanceIdentifier(backend: Backend): string {\n if (backend instanceof GoogleAIBackend) {\n return `${AI_TYPE}/googleai`;\n } else if (backend instanceof VertexAIBackend) {\n return `${AI_TYPE}/vertexai/${backend.location}`;\n } else {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid backend: ${JSON.stringify(backend.backendType)}`\n );\n }\n}\n\n/**\n * Decodes an instance identifier string into a {@link Backend}.\n *\n * @internal\n */\nexport function decodeInstanceIdentifier(instanceIdentifier: string): Backend {\n const identifierParts = instanceIdentifier.split('/');\n if (identifierParts[0] !== AI_TYPE) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier, unknown prefix '${identifierParts[0]}'`\n );\n }\n const backendType = identifierParts[1];\n switch (backendType) {\n case 'vertexai':\n const location: string | undefined = identifierParts[2];\n if (!location) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier, unknown location '${instanceIdentifier}'`\n );\n }\n return new VertexAIBackend(location);\n case 'googleai':\n return new GoogleAIBackend();\n default:\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid instance identifier string: '${instanceIdentifier}'`\n );\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { AIErrorCode, AI, BackendType } from '../public-types';\nimport { AIService } from '../service';\nimport { ApiSettings } from '../types/internal';\nimport { _isFirebaseServerApp } from '@firebase/app';\n\n/**\n * Base class for Firebase AI model APIs.\n *\n * Instances of this class are associated with a specific Firebase AI {@link Backend}\n * and provide methods for interacting with the configured generative model.\n *\n * @public\n */\nexport abstract class AIModel {\n /**\n * The fully qualified model resource name to use for generating images\n * (for example, `publishers/google/models/imagen-3.0-generate-002`).\n */\n readonly model: string;\n\n /**\n * @internal\n */\n _apiSettings: ApiSettings;\n\n /**\n * Constructs a new instance of the {@link AIModel} class.\n *\n * This constructor should only be called from subclasses that provide\n * a model API.\n *\n * @param ai - an {@link AI} instance.\n * @param modelName - The name of the model being used. It can be in one of the following formats:\n * - `my-model` (short name, will resolve to `publishers/google/models/my-model`)\n * - `models/my-model` (will resolve to `publishers/google/models/my-model`)\n * - `publishers/my-publisher/models/my-model` (fully qualified model name)\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @internal\n */\n protected constructor(ai: AI, modelName: string) {\n if (!ai.app?.options?.apiKey) {\n throw new AIError(\n AIErrorCode.NO_API_KEY,\n `The \"apiKey\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`\n );\n } else if (!ai.app?.options?.projectId) {\n throw new AIError(\n AIErrorCode.NO_PROJECT_ID,\n `The \"projectId\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`\n );\n } else if (!ai.app?.options?.appId) {\n throw new AIError(\n AIErrorCode.NO_APP_ID,\n `The \"appId\" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`\n );\n } else {\n this._apiSettings = {\n apiKey: ai.app.options.apiKey,\n project: ai.app.options.projectId,\n appId: ai.app.options.appId,\n automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled,\n location: ai.location,\n backend: ai.backend\n };\n\n if (_isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) {\n const token = ai.app.settings.appCheckToken;\n this._apiSettings.getAppCheckToken = () => {\n return Promise.resolve({ token });\n };\n } else if ((ai as AIService).appCheck) {\n if (ai.options?.useLimitedUseAppCheckTokens) {\n this._apiSettings.getAppCheckToken = () =>\n (ai as AIService).appCheck!.getLimitedUseToken();\n } else {\n this._apiSettings.getAppCheckToken = () =>\n (ai as AIService).appCheck!.getToken();\n }\n }\n\n if ((ai as AIService).auth) {\n this._apiSettings.getAuthToken = () =>\n (ai as AIService).auth!.getToken();\n }\n\n this.model = AIModel.normalizeModelName(\n modelName,\n this._apiSettings.backend.backendType\n );\n }\n }\n\n /**\n * Normalizes the given model name to a fully qualified model resource name.\n *\n * @param modelName - The model name to normalize.\n * @returns The fully qualified model resource name.\n *\n * @internal\n */\n static normalizeModelName(\n modelName: string,\n backendType: BackendType\n ): string {\n if (backendType === BackendType.GOOGLE_AI) {\n return AIModel.normalizeGoogleAIModelName(modelName);\n } else {\n return AIModel.normalizeVertexAIModelName(modelName);\n }\n }\n\n /**\n * @internal\n */\n private static normalizeGoogleAIModelName(modelName: string): string {\n return `models/${modelName}`;\n }\n\n /**\n * @internal\n */\n private static normalizeVertexAIModelName(modelName: string): string {\n let model: string;\n if (modelName.includes('/')) {\n if (modelName.startsWith('models/')) {\n // Add 'publishers/google' if the user is only passing in 'models/model-name'.\n model = `publishers/google/${modelName}`;\n } else {\n // Any other custom format (e.g. tuned models) must be passed in correctly.\n model = modelName;\n }\n } else {\n // If path is not included, assume it's a non-tuned model.\n model = `publishers/google/models/${modelName}`;\n }\n\n return model;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ErrorDetails, RequestOptions, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\nimport { ApiSettings } from '../types/internal';\nimport {\n DEFAULT_API_VERSION,\n DEFAULT_DOMAIN,\n DEFAULT_FETCH_TIMEOUT_MS,\n LANGUAGE_TAG,\n PACKAGE_VERSION\n} from '../constants';\nimport { logger } from '../logger';\nimport { GoogleAIBackend, VertexAIBackend } from '../backend';\nimport { BackendType } from '../public-types';\n\nexport enum Task {\n GENERATE_CONTENT = 'generateContent',\n STREAM_GENERATE_CONTENT = 'streamGenerateContent',\n COUNT_TOKENS = 'countTokens',\n PREDICT = 'predict'\n}\n\nexport class RequestUrl {\n constructor(\n public model: string,\n public task: Task,\n public apiSettings: ApiSettings,\n public stream: boolean,\n public requestOptions?: RequestOptions\n ) {}\n toString(): string {\n const url = new URL(this.baseUrl); // Throws if the URL is invalid\n url.pathname = `/${this.apiVersion}/${this.modelPath}:${this.task}`;\n url.search = this.queryParams.toString();\n return url.toString();\n }\n\n private get baseUrl(): string {\n return this.requestOptions?.baseUrl || `https://${DEFAULT_DOMAIN}`;\n }\n\n private get apiVersion(): string {\n return DEFAULT_API_VERSION; // TODO: allow user-set options if that feature becomes available\n }\n\n private get modelPath(): string {\n if (this.apiSettings.backend instanceof GoogleAIBackend) {\n return `projects/${this.apiSettings.project}/${this.model}`;\n } else if (this.apiSettings.backend instanceof VertexAIBackend) {\n return `projects/${this.apiSettings.project}/locations/${this.apiSettings.backend.location}/${this.model}`;\n } else {\n throw new AIError(\n AIErrorCode.ERROR,\n `Invalid backend: ${JSON.stringify(this.apiSettings.backend)}`\n );\n }\n }\n\n private get queryParams(): URLSearchParams {\n const params = new URLSearchParams();\n if (this.stream) {\n params.set('alt', 'sse');\n }\n\n return params;\n }\n}\n\nexport class WebSocketUrl {\n constructor(public apiSettings: ApiSettings) {}\n toString(): string {\n const url = new URL(`wss://${DEFAULT_DOMAIN}`);\n url.pathname = this.pathname;\n\n const queryParams = new URLSearchParams();\n queryParams.set('key', this.apiSettings.apiKey);\n url.search = queryParams.toString();\n\n return url.toString();\n }\n\n private get pathname(): string {\n if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n return 'ws/google.firebase.vertexai.v1beta.GenerativeService/BidiGenerateContent';\n } else {\n return `ws/google.firebase.vertexai.v1beta.LlmBidiService/BidiGenerateContent/locations/${this.apiSettings.location}`;\n }\n }\n}\n\n/**\n * Log language and \"fire/version\" to x-goog-api-client\n */\nfunction getClientHeaders(): string {\n const loggingTags = [];\n loggingTags.push(`${LANGUAGE_TAG}/${PACKAGE_VERSION}`);\n loggingTags.push(`fire/${PACKAGE_VERSION}`);\n return loggingTags.join(' ');\n}\n\nexport async function getHeaders(url: RequestUrl): Promise<Headers> {\n const headers = new Headers();\n headers.append('Content-Type', 'application/json');\n headers.append('x-goog-api-client', getClientHeaders());\n headers.append('x-goog-api-key', url.apiSettings.apiKey);\n if (url.apiSettings.automaticDataCollectionEnabled) {\n headers.append('X-Firebase-Appid', url.apiSettings.appId);\n }\n if (url.apiSettings.getAppCheckToken) {\n const appCheckToken = await url.apiSettings.getAppCheckToken();\n if (appCheckToken) {\n headers.append('X-Firebase-AppCheck', appCheckToken.token);\n if (appCheckToken.error) {\n logger.warn(\n `Unable to obtain a valid App Check token: ${appCheckToken.error.message}`\n );\n }\n }\n }\n\n if (url.apiSettings.getAuthToken) {\n const authToken = await url.apiSettings.getAuthToken();\n if (authToken) {\n headers.append('Authorization', `Firebase ${authToken.accessToken}`);\n }\n }\n\n return headers;\n}\n\nexport async function constructRequest(\n model: string,\n task: Task,\n apiSettings: ApiSettings,\n stream: boolean,\n body: string,\n requestOptions?: RequestOptions\n): Promise<{ url: string; fetchOptions: RequestInit }> {\n const url = new RequestUrl(model, task, apiSettings, stream, requestOptions);\n return {\n url: url.toString(),\n fetchOptions: {\n method: 'POST',\n headers: await getHeaders(url),\n body\n }\n };\n}\n\nexport async function makeRequest(\n model: string,\n task: Task,\n apiSettings: ApiSettings,\n stream: boolean,\n body: string,\n requestOptions?: RequestOptions\n): Promise<Response> {\n const url = new RequestUrl(model, task, apiSettings, stream, requestOptions);\n let response;\n let fetchTimeoutId: string | number | NodeJS.Timeout | undefined;\n try {\n const request = await constructRequest(\n model,\n task,\n apiSettings,\n stream,\n body,\n requestOptions\n );\n // Timeout is 180s by default\n const timeoutMillis =\n requestOptions?.timeout != null && requestOptions.timeout >= 0\n ? requestOptions.timeout\n : DEFAULT_FETCH_TIMEOUT_MS;\n const abortController = new AbortController();\n fetchTimeoutId = setTimeout(() => abortController.abort(), timeoutMillis);\n request.fetchOptions.signal = abortController.signal;\n\n response = await fetch(request.url, request.fetchOptions);\n if (!response.ok) {\n let message = '';\n let errorDetails;\n try {\n const json = await response.json();\n message = json.error.message;\n if (json.error.details) {\n message += ` ${JSON.stringify(json.error.details)}`;\n errorDetails = json.error.details;\n }\n } catch (e) {\n // ignored\n }\n if (\n response.status === 403 &&\n errorDetails &&\n errorDetails.some(\n (detail: ErrorDetails) => detail.reason === 'SERVICE_DISABLED'\n ) &&\n errorDetails.some((detail: ErrorDetails) =>\n (\n detail.links as Array<Record<string, string>>\n )?.[0]?.description.includes(\n 'Google developers console API activation'\n )\n )\n ) {\n throw new AIError(\n AIErrorCode.API_NOT_ENABLED,\n `The Firebase AI SDK requires the Firebase AI ` +\n `API ('firebasevertexai.googleapis.com') to be enabled in your ` +\n `Firebase project. Enable this API by visiting the Firebase Console ` +\n `at https://console.firebase.google.com/project/${url.apiSettings.project}/genai/ ` +\n `and clicking \"Get started\". If you enabled this API recently, ` +\n `wait a few minutes for the action to propagate to our systems and ` +\n `then retry.`,\n {\n status: response.status,\n statusText: response.statusText,\n errorDetails\n }\n );\n }\n throw new AIError(\n AIErrorCode.FETCH_ERROR,\n `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`,\n {\n status: response.status,\n statusText: response.statusText,\n errorDetails\n }\n );\n }\n } catch (e) {\n let err = e as Error;\n if (\n (e as AIError).code !== AIErrorCode.FETCH_ERROR &&\n (e as AIError).code !== AIErrorCode.API_NOT_ENABLED &&\n e instanceof Error\n ) {\n err = new AIError(\n AIErrorCode.ERROR,\n `Error fetching from ${url.toString()}: ${e.message}`\n );\n err.stack = e.stack;\n }\n\n throw err;\n } finally {\n if (fetchTimeoutId) {\n clearTimeout(fetchTimeoutId);\n }\n }\n return response;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n EnhancedGenerateContentResponse,\n FinishReason,\n FunctionCall,\n GenerateContentCandidate,\n GenerateContentResponse,\n ImagenGCSImage,\n ImagenInlineImage,\n AIErrorCode,\n InlineDataPart,\n Part,\n InferenceSource\n} from '../types';\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport { ImagenResponseInternal } from '../types/internal';\n\n/**\n * Check that at least one candidate exists and does not have a bad\n * finish reason. Warns if multiple candidates exist.\n */\nfunction hasValidCandidates(response: GenerateContentResponse): boolean {\n if (response.candidates && response.candidates.length > 0) {\n if (response.candidates.length > 1) {\n logger.warn(\n `This response had ${response.candidates.length} ` +\n `candidates. Returning text from the first candidate only. ` +\n `Access response.candidates directly to use the other candidates.`\n );\n }\n if (hadBadFinishReason(response.candidates[0])) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Response error: ${formatBlockErrorMessage(\n response\n )}. Response body stored in error.response`,\n {\n response\n }\n );\n }\n return true;\n } else {\n return false;\n }\n}\n\n/**\n * Creates an EnhancedGenerateContentResponse object that has helper functions and\n * other modifications that improve usability.\n */\nexport function createEnhancedContentResponse(\n response: GenerateContentResponse,\n inferenceSource: InferenceSource = InferenceSource.IN_CLOUD\n): EnhancedGenerateContentResponse {\n /**\n * The Vertex AI backend omits default values.\n * This causes the `index` property to be omitted from the first candidate in the\n * response, since it has index 0, and 0 is a default value.\n * See: https://github.com/firebase/firebase-js-sdk/issues/8566\n */\n if (response.candidates && !response.candidates[0].hasOwnProperty('index')) {\n response.candidates[0].index = 0;\n }\n\n const responseWithHelpers = addHelpers(response);\n responseWithHelpers.inferenceSource = inferenceSource;\n return responseWithHelpers;\n}\n\n/**\n * Adds convenience helper methods to a response object, including stream\n * chunks (as long as each chunk is a complete GenerateContentResponse JSON).\n */\nexport function addHelpers(\n response: GenerateContentResponse\n): EnhancedGenerateContentResponse {\n (response as EnhancedGenerateContentResponse).text = () => {\n if (hasValidCandidates(response)) {\n return getText(response, part => !part.thought);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Text not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return '';\n };\n (response as EnhancedGenerateContentResponse).thoughtSummary = () => {\n if (hasValidCandidates(response)) {\n const result = getText(response, part => !!part.thought);\n return result === '' ? undefined : result;\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Thought summary not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n (response as EnhancedGenerateContentResponse).inlineDataParts = ():\n | InlineDataPart[]\n | undefined => {\n if (hasValidCandidates(response)) {\n return getInlineDataParts(response);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Data not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n (response as EnhancedGenerateContentResponse).functionCalls = () => {\n if (hasValidCandidates(response)) {\n return getFunctionCalls(response);\n } else if (response.promptFeedback) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Function call not available. ${formatBlockErrorMessage(response)}`,\n {\n response\n }\n );\n }\n return undefined;\n };\n return response as EnhancedGenerateContentResponse;\n}\n\n/**\n * Returns all text from the first candidate's parts, filtering by whether\n * `partFilter()` returns true.\n *\n * @param response - The `GenerateContentResponse` from which to extract text.\n * @param partFilter - Only return `Part`s for which this returns true\n */\nexport function getText(\n response: GenerateContentResponse,\n partFilter: (part: Part) => boolean\n): string {\n const textStrings = [];\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.text && partFilter(part)) {\n textStrings.push(part.text);\n }\n }\n }\n if (textStrings.length > 0) {\n return textStrings.join('');\n } else {\n return '';\n }\n}\n\n/**\n * Returns every {@link FunctionCall} associated with first candidate.\n */\nexport function getFunctionCalls(\n response: GenerateContentResponse\n): FunctionCall[] | undefined {\n const functionCalls: FunctionCall[] = [];\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.functionCall) {\n functionCalls.push(part.functionCall);\n }\n }\n }\n if (functionCalls.length > 0) {\n return functionCalls;\n } else {\n return undefined;\n }\n}\n\n/**\n * Returns every {@link InlineDataPart} in the first candidate if present.\n *\n * @internal\n */\nexport function getInlineDataParts(\n response: GenerateContentResponse\n): InlineDataPart[] | undefined {\n const data: InlineDataPart[] = [];\n\n if (response.candidates?.[0].content?.parts) {\n for (const part of response.candidates?.[0].content?.parts) {\n if (part.inlineData) {\n data.push(part);\n }\n }\n }\n\n if (data.length > 0) {\n return data;\n } else {\n return undefined;\n }\n}\n\nconst badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY];\n\nfunction hadBadFinishReason(candidate: GenerateContentCandidate): boolean {\n return (\n !!candidate.finishReason &&\n badFinishReasons.some(reason => reason === candidate.finishReason)\n );\n}\n\nexport function formatBlockErrorMessage(\n response: GenerateContentResponse\n): string {\n let message = '';\n if (\n (!response.candidates || response.candidates.length === 0) &&\n response.promptFeedback\n ) {\n message += 'Response was blocked';\n if (response.promptFeedback?.blockReason) {\n message += ` due to ${response.promptFeedback.blockReason}`;\n }\n if (response.promptFeedback?.blockReasonMessage) {\n message += `: ${response.promptFeedback.blockReasonMessage}`;\n }\n } else if (response.candidates?.[0]) {\n const firstCandidate = response.candidates[0];\n if (hadBadFinishReason(firstCandidate)) {\n message += `Candidate was blocked due to ${firstCandidate.finishReason}`;\n if (firstCandidate.finishMessage) {\n message += `: ${firstCandidate.finishMessage}`;\n }\n }\n }\n return message;\n}\n\n/**\n * Convert a generic successful fetch response body to an Imagen response object\n * that can be returned to the user. This converts the REST APIs response format to our\n * APIs representation of a response.\n *\n * @internal\n */\nexport async function handlePredictResponse<\n T extends ImagenInlineImage | ImagenGCSImage\n>(response: Response): Promise<{ images: T[]; filteredReason?: string }> {\n const responseJson: ImagenResponseInternal = await response.json();\n\n const images: T[] = [];\n let filteredReason: string | undefined = undefined;\n\n // The backend should always send a non-empty array of predictions if the response was successful.\n if (!responseJson.predictions || responseJson.predictions?.length === 0) {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.'\n );\n }\n\n for (const prediction of responseJson.predictions) {\n if (prediction.raiFilteredReason) {\n filteredReason = prediction.raiFilteredReason;\n } else if (prediction.mimeType && prediction.bytesBase64Encoded) {\n images.push({\n mimeType: prediction.mimeType,\n bytesBase64Encoded: prediction.bytesBase64Encoded\n } as T);\n } else if (prediction.mimeType && prediction.gcsUri) {\n images.push({\n mimeType: prediction.mimeType,\n gcsURI: prediction.gcsUri\n } as T);\n } else if (prediction.safetyAttributes) {\n // Ignore safetyAttributes \"prediction\" to avoid throwing an error below.\n } else {\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n `Unexpected element in 'predictions' array in response: '${JSON.stringify(\n prediction\n )}'`\n );\n }\n }\n\n return { images, filteredReason };\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from './errors';\nimport { logger } from './logger';\nimport {\n CitationMetadata,\n CountTokensRequest,\n GenerateContentCandidate,\n GenerateContentRequest,\n GenerateContentResponse,\n HarmSeverity,\n InlineDataPart,\n PromptFeedback,\n SafetyRating,\n AIErrorCode\n} from './types';\nimport {\n GoogleAIGenerateContentResponse,\n GoogleAIGenerateContentCandidate,\n GoogleAICountTokensRequest\n} from './types/googleai';\n\n/**\n * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI).\n * The public API prioritizes the format used by the Vertex AI Gemini API.\n * We avoid having two sets of types by translating requests and responses between the two API formats.\n * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API\n * with minimal code changes.\n *\n * In here are functions that map requests and responses between the two API formats.\n * Requests in the Vertex AI format are mapped to the Google AI format before being sent.\n * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user.\n */\n\n/**\n * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI.\n *\n * @param generateContentRequest The {@link GenerateContentRequest} to map.\n * @returns A {@link GenerateContentResponse} that conforms to the Google AI format.\n *\n * @throws If the request contains properties that are unsupported by Google AI.\n *\n * @internal\n */\nexport function mapGenerateContentRequest(\n generateContentRequest: GenerateContentRequest\n): GenerateContentRequest {\n generateContentRequest.safetySettings?.forEach(safetySetting => {\n if (safetySetting.method) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'SafetySetting.method is not supported in the the Gemini Developer API. Please remove this property.'\n );\n }\n });\n\n if (generateContentRequest.generationConfig?.topK) {\n const roundedTopK = Math.round(\n generateContentRequest.generationConfig.topK\n );\n\n if (roundedTopK !== generateContentRequest.generationConfig.topK) {\n logger.warn(\n 'topK in GenerationConfig has been rounded to the nearest integer to match the format for requests to the Gemini Developer API.'\n );\n generateContentRequest.generationConfig.topK = roundedTopK;\n }\n }\n\n return generateContentRequest;\n}\n\n/**\n * Maps a {@link GenerateContentResponse} from Google AI to the format of the\n * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API.\n *\n * @param googleAIResponse The {@link GenerateContentResponse} from Google AI.\n * @returns A {@link GenerateContentResponse} that conforms to the public API's format.\n *\n * @internal\n */\nexport function mapGenerateContentResponse(\n googleAIResponse: GoogleAIGenerateContentResponse\n): GenerateContentResponse {\n const generateContentResponse = {\n candidates: googleAIResponse.candidates\n ? mapGenerateContentCandidates(googleAIResponse.candidates)\n : undefined,\n prompt: googleAIResponse.promptFeedback\n ? mapPromptFeedback(googleAIResponse.promptFeedback)\n : undefined,\n usageMetadata: googleAIResponse.usageMetadata\n };\n\n return generateContentResponse;\n}\n\n/**\n * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI.\n *\n * @param countTokensRequest The {@link CountTokensRequest} to map.\n * @param model The model to count tokens with.\n * @returns A {@link CountTokensRequest} that conforms to the Google AI format.\n *\n * @internal\n */\nexport function mapCountTokensRequest(\n countTokensRequest: CountTokensRequest,\n model: string\n): GoogleAICountTokensRequest {\n const mappedCountTokensRequest: GoogleAICountTokensRequest = {\n generateContentRequest: {\n model,\n ...countTokensRequest\n }\n };\n\n return mappedCountTokensRequest;\n}\n\n/**\n * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms\n * to the Vertex AI API format.\n *\n * @param candidates The {@link GoogleAIGenerateContentCandidate} to map.\n * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format.\n *\n * @throws If any {@link Part} in the candidates has a `videoMetadata` property.\n *\n * @internal\n */\nexport function mapGenerateContentCandidates(\n candidates: GoogleAIGenerateContentCandidate[]\n): GenerateContentCandidate[] {\n const mappedCandidates: GenerateContentCandidate[] = [];\n let mappedSafetyRatings: SafetyRating[];\n if (mappedCandidates) {\n candidates.forEach(candidate => {\n // Map citationSources to citations.\n let citationMetadata: CitationMetadata | undefined;\n if (candidate.citationMetadata) {\n citationMetadata = {\n citations: candidate.citationMetadata.citationSources\n };\n }\n\n // Assign missing candidate SafetyRatings properties to their defaults if undefined.\n if (candidate.safetyRatings) {\n mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => {\n return {\n ...safetyRating,\n severity:\n safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,\n probabilityScore: safetyRating.probabilityScore ?? 0,\n severityScore: safetyRating.severityScore ?? 0\n };\n });\n }\n\n // videoMetadata is not supported.\n // Throw early since developers may send a long video as input and only expect to pay\n // for inference on a small portion of the video.\n if (\n candidate.content?.parts?.some(\n part => (part as InlineDataPart)?.videoMetadata\n )\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.'\n );\n }\n\n const mappedCandidate = {\n index: candidate.index,\n content: candidate.content,\n finishReason: candidate.finishReason,\n finishMessage: candidate.finishMessage,\n safetyRatings: mappedSafetyRatings,\n citationMetadata,\n groundingMetadata: candidate.groundingMetadata,\n urlContextMetadata: candidate.urlContextMetadata\n };\n mappedCandidates.push(mappedCandidate);\n });\n }\n\n return mappedCandidates;\n}\n\nexport function mapPromptFeedback(\n promptFeedback: PromptFeedback\n): PromptFeedback {\n // Assign missing SafetyRating properties to their defaults if undefined.\n const mappedSafetyRatings: SafetyRating[] = [];\n promptFeedback.safetyRatings.forEach(safetyRating => {\n mappedSafetyRatings.push({\n category: safetyRating.category,\n probability: safetyRating.probability,\n severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,\n probabilityScore: safetyRating.probabilityScore ?? 0,\n severityScore: safetyRating.severityScore ?? 0,\n blocked: safetyRating.blocked\n });\n });\n\n const mappedPromptFeedback: PromptFeedback = {\n blockReason: promptFeedback.blockReason,\n safetyRatings: mappedSafetyRatings,\n blockReasonMessage: promptFeedback.blockReasonMessage\n };\n return mappedPromptFeedback;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n EnhancedGenerateContentResponse,\n GenerateContentCandidate,\n GenerateContentResponse,\n GenerateContentStreamResult,\n Part,\n AIErrorCode\n} from '../types';\nimport { AIError } from '../errors';\nimport { createEnhancedContentResponse } from './response-helpers';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { GoogleAIGenerateContentResponse } from '../types/googleai';\nimport { ApiSettings } from '../types/internal';\nimport {\n BackendType,\n InferenceSource,\n URLContextMetadata\n} from '../public-types';\n\nconst responseLineRE = /^data\\: (.*)(?:\\n\\n|\\r\\r|\\r\\n\\r\\n)/;\n\n/**\n * Process a response.body stream from the backend and return an\n * iterator that provides one complete GenerateContentResponse at a time\n * and a promise that resolves with a single aggregated\n * GenerateContentResponse.\n *\n * @param response - Response from a fetch call\n */\nexport function processStream(\n response: Response,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): GenerateContentStreamResult {\n const inputStream = response.body!.pipeThrough(\n new TextDecoderStream('utf8', { fatal: true })\n );\n const responseStream =\n getResponseStream<GenerateContentResponse>(inputStream);\n const [stream1, stream2] = responseStream.tee();\n return {\n stream: generateResponseSequence(stream1, apiSettings, inferenceSource),\n response: getResponsePromise(stream2, apiSettings, inferenceSource)\n };\n}\n\nasync function getResponsePromise(\n stream: ReadableStream<GenerateContentResponse>,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): Promise<EnhancedGenerateContentResponse> {\n const allResponses: GenerateContentResponse[] = [];\n const reader = stream.getReader();\n while (true) {\n const { done, value } = await reader.read();\n if (done) {\n let generateContentResponse = aggregateResponses(allResponses);\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n generateContentResponse = GoogleAIMapper.mapGenerateContentResponse(\n generateContentResponse as GoogleAIGenerateContentResponse\n );\n }\n return createEnhancedContentResponse(\n generateContentResponse,\n inferenceSource\n );\n }\n\n allResponses.push(value);\n }\n}\n\nasync function* generateResponseSequence(\n stream: ReadableStream<GenerateContentResponse>,\n apiSettings: ApiSettings,\n inferenceSource?: InferenceSource\n): AsyncGenerator<EnhancedGenerateContentResponse> {\n const reader = stream.getReader();\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n break;\n }\n\n let enhancedResponse: EnhancedGenerateContentResponse;\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n enhancedResponse = createEnhancedContentResponse(\n GoogleAIMapper.mapGenerateContentResponse(\n value as GoogleAIGenerateContentResponse\n ),\n inferenceSource\n );\n } else {\n enhancedResponse = createEnhancedContentResponse(value, inferenceSource);\n }\n\n const firstCandidate = enhancedResponse.candidates?.[0];\n // Don't yield a response with no useful data for the developer.\n if (\n !firstCandidate?.content?.parts &&\n !firstCandidate?.finishReason &&\n !firstCandidate?.citationMetadata &&\n !firstCandidate?.urlContextMetadata\n ) {\n continue;\n }\n\n yield enhancedResponse;\n }\n}\n\n/**\n * Reads a raw stream from the fetch response and join incomplete\n * chunks, returning a new stream that provides a single complete\n * GenerateContentResponse in each iteration.\n */\nexport function getResponseStream<T>(\n inputStream: ReadableStream<string>\n): ReadableStream<T> {\n const reader = inputStream.getReader();\n const stream = new ReadableStream<T>({\n start(controller) {\n let currentText = '';\n return pump();\n function pump(): Promise<(() => Promise<void>) | undefined> {\n return reader.read().then(({ value, done }) => {\n if (done) {\n if (currentText.trim()) {\n controller.error(\n new AIError(AIErrorCode.PARSE_FAILED, 'Failed to parse stream')\n );\n return;\n }\n controller.close();\n return;\n }\n\n currentText += value;\n let match = currentText.match(responseLineRE);\n let parsedResponse: T;\n while (match) {\n try {\n parsedResponse = JSON.parse(match[1]);\n } catch (e) {\n controller.error(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Error parsing JSON response: \"${match[1]}`\n )\n );\n return;\n }\n controller.enqueue(parsedResponse);\n currentText = currentText.substring(match[0].length);\n match = currentText.match(responseLineRE);\n }\n return pump();\n });\n }\n }\n });\n return stream;\n}\n\n/**\n * Aggregates an array of `GenerateContentResponse`s into a single\n * GenerateContentResponse.\n */\nexport function aggregateResponses(\n responses: GenerateContentResponse[]\n): GenerateContentResponse {\n const lastResponse = responses[responses.length - 1];\n const aggregatedResponse: GenerateContentResponse = {\n promptFeedback: lastResponse?.promptFeedback\n };\n for (const response of responses) {\n if (response.candidates) {\n for (const candidate of response.candidates) {\n // Index will be undefined if it's the first index (0), so we should use 0 if it's undefined.\n // See: https://github.com/firebase/firebase-js-sdk/issues/8566\n const i = candidate.index || 0;\n if (!aggregatedResponse.candidates) {\n aggregatedResponse.candidates = [];\n }\n if (!aggregatedResponse.candidates[i]) {\n aggregatedResponse.candidates[i] = {\n index: candidate.index\n } as GenerateContentCandidate;\n }\n // Keep overwriting, the last one will be final\n aggregatedResponse.candidates[i].citationMetadata =\n candidate.citationMetadata;\n aggregatedResponse.candidates[i].finishReason = candidate.finishReason;\n aggregatedResponse.candidates[i].finishMessage =\n candidate.finishMessage;\n aggregatedResponse.candidates[i].safetyRatings =\n candidate.safetyRatings;\n aggregatedResponse.candidates[i].groundingMetadata =\n candidate.groundingMetadata;\n\n // The urlContextMetadata object is defined in the first chunk of the response stream.\n // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to\n // make sure that we don't overwrite the first value urlContextMetadata object with undefined.\n // FIXME: What happens if we receive a second, valid urlContextMetadata object?\n const urlContextMetadata = candidate.urlContextMetadata as unknown;\n if (\n typeof urlContextMetadata === 'object' &&\n urlContextMetadata !== null &&\n Object.keys(urlContextMetadata).length > 0\n ) {\n aggregatedResponse.candidates[i].urlContextMetadata =\n urlContextMetadata as URLContextMetadata;\n }\n\n /**\n * Candidates should always have content and parts, but this handles\n * possible malformed responses.\n */\n if (candidate.content) {\n // Skip a candidate without parts.\n if (!candidate.content.parts) {\n continue;\n }\n if (!aggregatedResponse.candidates[i].content) {\n aggregatedResponse.candidates[i].content = {\n role: candidate.content.role || 'user',\n parts: []\n };\n }\n for (const part of candidate.content.parts) {\n const newPart: Part = { ...part };\n // The backend can send empty text parts. If these are sent back\n // (e.g. in chat history), the backend will respond with an error.\n // To prevent this, ignore empty text parts.\n if (part.text === '') {\n continue;\n }\n if (Object.keys(newPart).length > 0) {\n aggregatedResponse.candidates[i].content.parts.push(\n newPart as Part\n );\n }\n }\n }\n }\n }\n }\n return aggregatedResponse;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport {\n GenerateContentRequest,\n InferenceMode,\n AIErrorCode,\n ChromeAdapter,\n InferenceSource\n} from '../types';\nimport { ChromeAdapterImpl } from '../methods/chrome-adapter';\n\nconst errorsCausingFallback: AIErrorCode[] = [\n // most network errors\n AIErrorCode.FETCH_ERROR,\n // fallback code for all other errors in makeRequest\n AIErrorCode.ERROR,\n // error due to API not being enabled in project\n AIErrorCode.API_NOT_ENABLED\n];\n\ninterface CallResult<Response> {\n response: Response;\n inferenceSource: InferenceSource;\n}\n\n/**\n * Dispatches a request to the appropriate backend (on-device or in-cloud)\n * based on the inference mode.\n *\n * @param request - The request to be sent.\n * @param chromeAdapter - The on-device model adapter.\n * @param onDeviceCall - The function to call for on-device inference.\n * @param inCloudCall - The function to call for in-cloud inference.\n * @returns The response from the backend.\n */\nexport async function callCloudOrDevice<Response>(\n request: GenerateContentRequest,\n chromeAdapter: ChromeAdapter | undefined,\n onDeviceCall: () => Promise<Response>,\n inCloudCall: () => Promise<Response>\n): Promise<CallResult<Response>> {\n if (!chromeAdapter) {\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n }\n switch ((chromeAdapter as ChromeAdapterImpl).mode) {\n case InferenceMode.ONLY_ON_DEVICE:\n if (await chromeAdapter.isAvailable(request)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.'\n );\n case InferenceMode.ONLY_IN_CLOUD:\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n case InferenceMode.PREFER_IN_CLOUD:\n try {\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n } catch (e) {\n if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n throw e;\n }\n case InferenceMode.PREFER_ON_DEVICE:\n if (await chromeAdapter.isAvailable(request)) {\n return {\n response: await onDeviceCall(),\n inferenceSource: InferenceSource.ON_DEVICE\n };\n }\n return {\n response: await inCloudCall(),\n inferenceSource: InferenceSource.IN_CLOUD\n };\n default:\n throw new AIError(\n AIErrorCode.ERROR,\n `Unexpected infererence mode: ${\n (chromeAdapter as ChromeAdapterImpl).mode\n }`\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n GenerateContentRequest,\n GenerateContentResponse,\n GenerateContentResult,\n GenerateContentStreamResult,\n RequestOptions\n} from '../types';\nimport { Task, makeRequest } from '../requests/request';\nimport { createEnhancedContentResponse } from '../requests/response-helpers';\nimport { processStream } from '../requests/stream-reader';\nimport { ApiSettings } from '../types/internal';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { BackendType } from '../public-types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport { callCloudOrDevice } from '../requests/hybrid-helpers';\n\nasync function generateContentStreamOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n requestOptions?: RequestOptions\n): Promise<Response> {\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n params = GoogleAIMapper.mapGenerateContentRequest(params);\n }\n return makeRequest(\n model,\n Task.STREAM_GENERATE_CONTENT,\n apiSettings,\n /* stream */ true,\n JSON.stringify(params),\n requestOptions\n );\n}\n\nexport async function generateContentStream(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<GenerateContentStreamResult> {\n const callResult = await callCloudOrDevice(\n params,\n chromeAdapter,\n () => chromeAdapter!.generateContentStream(params),\n () =>\n generateContentStreamOnCloud(apiSettings, model, params, requestOptions)\n );\n return processStream(callResult.response, apiSettings); // TODO: Map streaming responses\n}\n\nasync function generateContentOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n requestOptions?: RequestOptions\n): Promise<Response> {\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n params = GoogleAIMapper.mapGenerateContentRequest(params);\n }\n return makeRequest(\n model,\n Task.GENERATE_CONTENT,\n apiSettings,\n /* stream */ false,\n JSON.stringify(params),\n requestOptions\n );\n}\n\nexport async function generateContent(\n apiSettings: ApiSettings,\n model: string,\n params: GenerateContentRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<GenerateContentResult> {\n const callResult = await callCloudOrDevice(\n params,\n chromeAdapter,\n () => chromeAdapter!.generateContent(params),\n () => generateContentOnCloud(apiSettings, model, params, requestOptions)\n );\n const generateContentResponse = await processGenerateContentResponse(\n callResult.response,\n apiSettings\n );\n const enhancedResponse = createEnhancedContentResponse(\n generateContentResponse,\n callResult.inferenceSource\n );\n return {\n response: enhancedResponse\n };\n}\n\nasync function processGenerateContentResponse(\n response: Response,\n apiSettings: ApiSettings\n): Promise<GenerateContentResponse> {\n const responseJson = await response.json();\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n return GoogleAIMapper.mapGenerateContentResponse(responseJson);\n } else {\n return responseJson;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, GenerateContentRequest, Part, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\nimport { ImagenGenerationParams, PredictRequestBody } from '../types/internal';\n\nexport function formatSystemInstruction(\n input?: string | Part | Content\n): Content | undefined {\n // null or undefined\n if (input == null) {\n return undefined;\n } else if (typeof input === 'string') {\n return { role: 'system', parts: [{ text: input }] } as Content;\n } else if ((input as Part).text) {\n return { role: 'system', parts: [input as Part] };\n } else if ((input as Content).parts) {\n if (!(input as Content).role) {\n return { role: 'system', parts: (input as Content).parts };\n } else {\n return input as Content;\n }\n }\n}\n\nexport function formatNewContent(\n request: string | Array<string | Part>\n): Content {\n let newParts: Part[] = [];\n if (typeof request === 'string') {\n newParts = [{ text: request }];\n } else {\n for (const partOrString of request) {\n if (typeof partOrString === 'string') {\n newParts.push({ text: partOrString });\n } else {\n newParts.push(partOrString);\n }\n }\n }\n return assignRoleToPartsAndValidateSendMessageRequest(newParts);\n}\n\n/**\n * When multiple Part types (i.e. FunctionResponsePart and TextPart) are\n * passed in a single Part array, we may need to assign different roles to each\n * part. Currently only FunctionResponsePart requires a role other than 'user'.\n * @private\n * @param parts Array of parts to pass to the model\n * @returns Array of content items\n */\nfunction assignRoleToPartsAndValidateSendMessageRequest(\n parts: Part[]\n): Content {\n const userContent: Content = { role: 'user', parts: [] };\n const functionContent: Content = { role: 'function', parts: [] };\n let hasUserContent = false;\n let hasFunctionContent = false;\n for (const part of parts) {\n if ('functionResponse' in part) {\n functionContent.parts.push(part);\n hasFunctionContent = true;\n } else {\n userContent.parts.push(part);\n hasUserContent = true;\n }\n }\n\n if (hasUserContent && hasFunctionContent) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.'\n );\n }\n\n if (!hasUserContent && !hasFunctionContent) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n 'No Content is provided for sending chat message.'\n );\n }\n\n if (hasUserContent) {\n return userContent;\n }\n\n return functionContent;\n}\n\nexport function formatGenerateContentInput(\n params: GenerateContentRequest | string | Array<string | Part>\n): GenerateContentRequest {\n let formattedRequest: GenerateContentRequest;\n if ((params as GenerateContentRequest).contents) {\n formattedRequest = params as GenerateContentRequest;\n } else {\n // Array or string\n const content = formatNewContent(params as string | Array<string | Part>);\n formattedRequest = { contents: [content] };\n }\n if ((params as GenerateContentRequest).systemInstruction) {\n formattedRequest.systemInstruction = formatSystemInstruction(\n (params as GenerateContentRequest).systemInstruction\n );\n }\n return formattedRequest;\n}\n\n/**\n * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format\n * that is expected from the REST API.\n *\n * @internal\n */\nexport function createPredictRequestBody(\n prompt: string,\n {\n gcsURI,\n imageFormat,\n addWatermark,\n numberOfImages = 1,\n negativePrompt,\n aspectRatio,\n safetyFilterLevel,\n personFilterLevel\n }: ImagenGenerationParams\n): PredictRequestBody {\n // Properties that are undefined will be omitted from the JSON string that is sent in the request.\n const body: PredictRequestBody = {\n instances: [\n {\n prompt\n }\n ],\n parameters: {\n storageUri: gcsURI,\n negativePrompt,\n sampleCount: numberOfImages,\n aspectRatio,\n outputOptions: imageFormat,\n addWatermark,\n safetyFilterLevel,\n personGeneration: personFilterLevel,\n includeRaiReason: true,\n includeSafetyAttributes: true\n }\n };\n return body;\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Content, POSSIBLE_ROLES, Part, Role, AIErrorCode } from '../types';\nimport { AIError } from '../errors';\n\n// https://ai.google.dev/api/rest/v1beta/Content#part\n\nconst VALID_PART_FIELDS: Array<keyof Part> = [\n 'text',\n 'inlineData',\n 'functionCall',\n 'functionResponse',\n 'thought',\n 'thoughtSignature'\n];\n\nconst VALID_PARTS_PER_ROLE: { [key in Role]: Array<keyof Part> } = {\n user: ['text', 'inlineData'],\n function: ['functionResponse'],\n model: ['text', 'functionCall', 'thought', 'thoughtSignature'],\n // System instructions shouldn't be in history anyway.\n system: ['text']\n};\n\nconst VALID_PREVIOUS_CONTENT_ROLES: { [key in Role]: Role[] } = {\n user: ['model'],\n function: ['model'],\n model: ['user', 'function'],\n // System instructions shouldn't be in history.\n system: []\n};\n\nexport function validateChatHistory(history: Content[]): void {\n let prevContent: Content | null = null;\n for (const currContent of history) {\n const { role, parts } = currContent;\n if (!prevContent && role !== 'user') {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `First Content should be with role 'user', got ${role}`\n );\n }\n if (!POSSIBLE_ROLES.includes(role)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(\n POSSIBLE_ROLES\n )}`\n );\n }\n\n if (!Array.isArray(parts)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content should have 'parts' property with an array of Parts`\n );\n }\n\n if (parts.length === 0) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Each Content should have at least one part`\n );\n }\n\n const countFields: Record<keyof Part, number> = {\n text: 0,\n inlineData: 0,\n functionCall: 0,\n functionResponse: 0,\n thought: 0,\n thoughtSignature: 0,\n executableCode: 0,\n codeExecutionResult: 0\n };\n\n for (const part of parts) {\n for (const key of VALID_PART_FIELDS) {\n if (key in part) {\n countFields[key] += 1;\n }\n }\n }\n const validParts = VALID_PARTS_PER_ROLE[role];\n for (const key of VALID_PART_FIELDS) {\n if (!validParts.includes(key) && countFields[key] > 0) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content with role '${role}' can't contain '${key}' part`\n );\n }\n }\n\n if (prevContent) {\n const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role];\n if (!validPreviousContentRoles.includes(prevContent.role)) {\n throw new AIError(\n AIErrorCode.INVALID_CONTENT,\n `Content with role '${role}' can't follow '${\n prevContent.role\n }'. Valid previous roles: ${JSON.stringify(\n VALID_PREVIOUS_CONTENT_ROLES\n )}`\n );\n }\n }\n prevContent = currContent;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n Content,\n GenerateContentRequest,\n GenerateContentResult,\n GenerateContentStreamResult,\n Part,\n RequestOptions,\n StartChatParams\n} from '../types';\nimport { formatNewContent } from '../requests/request-helpers';\nimport { formatBlockErrorMessage } from '../requests/response-helpers';\nimport { validateChatHistory } from './chat-session-helpers';\nimport { generateContent, generateContentStream } from './generate-content';\nimport { ApiSettings } from '../types/internal';\nimport { logger } from '../logger';\nimport { ChromeAdapter } from '../types/chrome-adapter';\n\n/**\n * Do not log a message for this error.\n */\nconst SILENT_ERROR = 'SILENT_ERROR';\n\n/**\n * ChatSession class that enables sending chat messages and stores\n * history of sent and received messages so far.\n *\n * @public\n */\nexport class ChatSession {\n private _apiSettings: ApiSettings;\n private _history: Content[] = [];\n private _sendPromise: Promise<void> = Promise.resolve();\n\n constructor(\n apiSettings: ApiSettings,\n public model: string,\n private chromeAdapter?: ChromeAdapter,\n public params?: StartChatParams,\n public requestOptions?: RequestOptions\n ) {\n this._apiSettings = apiSettings;\n if (params?.history) {\n validateChatHistory(params.history);\n this._history = params.history;\n }\n }\n\n /**\n * Gets the chat history so far. Blocked prompts are not added to history.\n * Neither blocked candidates nor the prompts that generated them are added\n * to history.\n */\n async getHistory(): Promise<Content[]> {\n await this._sendPromise;\n return this._history;\n }\n\n /**\n * Sends a chat message and receives a non-streaming\n * {@link GenerateContentResult}\n */\n async sendMessage(\n request: string | Array<string | Part>\n ): Promise<GenerateContentResult> {\n await this._sendPromise;\n const newContent = formatNewContent(request);\n const generateContentRequest: GenerateContentRequest = {\n safetySettings: this.params?.safetySettings,\n generationConfig: this.params?.generationConfig,\n tools: this.params?.tools,\n toolConfig: this.params?.toolConfig,\n systemInstruction: this.params?.systemInstruction,\n contents: [...this._history, newContent]\n };\n let finalResult = {} as GenerateContentResult;\n // Add onto the chain.\n this._sendPromise = this._sendPromise\n .then(() =>\n generateContent(\n this._apiSettings,\n this.model,\n generateContentRequest,\n this.chromeAdapter,\n this.requestOptions\n )\n )\n .then(result => {\n if (\n result.response.candidates &&\n result.response.candidates.length > 0\n ) {\n this._history.push(newContent);\n const responseContent: Content = {\n parts: result.response.candidates?.[0].content.parts || [],\n // Response seems to come back without a role set.\n role: result.response.candidates?.[0].content.role || 'model'\n };\n this._history.push(responseContent);\n } else {\n const blockErrorMessage = formatBlockErrorMessage(result.response);\n if (blockErrorMessage) {\n logger.warn(\n `sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`\n );\n }\n }\n finalResult = result;\n });\n await this._sendPromise;\n return finalResult;\n }\n\n /**\n * Sends a chat message and receives the response as a\n * {@link GenerateContentStreamResult} containing an iterable stream\n * and a response promise.\n */\n async sendMessageStream(\n request: string | Array<string | Part>\n ): Promise<GenerateContentStreamResult> {\n await this._sendPromise;\n const newContent = formatNewContent(request);\n const generateContentRequest: GenerateContentRequest = {\n safetySettings: this.params?.safetySettings,\n generationConfig: this.params?.generationConfig,\n tools: this.params?.tools,\n toolConfig: this.params?.toolConfig,\n systemInstruction: this.params?.systemInstruction,\n contents: [...this._history, newContent]\n };\n const streamPromise = generateContentStream(\n this._apiSettings,\n this.model,\n generateContentRequest,\n this.chromeAdapter,\n this.requestOptions\n );\n\n // Add onto the chain.\n this._sendPromise = this._sendPromise\n .then(() => streamPromise)\n // This must be handled to avoid unhandled rejection, but jump\n // to the final catch block with a label to not log this error.\n .catch(_ignored => {\n throw new Error(SILENT_ERROR);\n })\n .then(streamResult => streamResult.response)\n .then(response => {\n if (response.candidates && response.candidates.length > 0) {\n this._history.push(newContent);\n const responseContent = { ...response.candidates[0].content };\n // Response seems to come back without a role set.\n if (!responseContent.role) {\n responseContent.role = 'model';\n }\n this._history.push(responseContent);\n } else {\n const blockErrorMessage = formatBlockErrorMessage(response);\n if (blockErrorMessage) {\n logger.warn(\n `sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`\n );\n }\n }\n })\n .catch(e => {\n // Errors in streamPromise are already catchable by the user as\n // streamPromise is returned.\n // Avoid duplicating the error message in logs.\n if (e.message !== SILENT_ERROR) {\n // Users do not have access to _sendPromise to catch errors\n // downstream from streamPromise, so they should not throw.\n logger.error(e);\n }\n });\n return streamPromise;\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport {\n CountTokensRequest,\n CountTokensResponse,\n InferenceMode,\n RequestOptions,\n AIErrorCode\n} from '../types';\nimport { Task, makeRequest } from '../requests/request';\nimport { ApiSettings } from '../types/internal';\nimport * as GoogleAIMapper from '../googleai-mappers';\nimport { BackendType } from '../public-types';\nimport { ChromeAdapter } from '../types/chrome-adapter';\nimport { ChromeAdapterImpl } from './chrome-adapter';\n\nexport async function countTokensOnCloud(\n apiSettings: ApiSettings,\n model: string,\n params: CountTokensRequest,\n requestOptions?: RequestOptions\n): Promise<CountTokensResponse> {\n let body: string = '';\n if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n const mappedParams = GoogleAIMapper.mapCountTokensRequest(params, model);\n body = JSON.stringify(mappedParams);\n } else {\n body = JSON.stringify(params);\n }\n const response = await makeRequest(\n model,\n Task.COUNT_TOKENS,\n apiSettings,\n false,\n body,\n requestOptions\n );\n return response.json();\n}\n\nexport async function countTokens(\n apiSettings: ApiSettings,\n model: string,\n params: CountTokensRequest,\n chromeAdapter?: ChromeAdapter,\n requestOptions?: RequestOptions\n): Promise<CountTokensResponse> {\n if (\n (chromeAdapter as ChromeAdapterImpl)?.mode === InferenceMode.ONLY_ON_DEVICE\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'countTokens() is not supported for on-device models.'\n );\n }\n return countTokensOnCloud(apiSettings, model, params, requestOptions);\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n generateContent,\n generateContentStream\n} from '../methods/generate-content';\nimport {\n Content,\n CountTokensRequest,\n CountTokensResponse,\n GenerateContentRequest,\n GenerateContentResult,\n GenerateContentStreamResult,\n GenerationConfig,\n ModelParams,\n Part,\n RequestOptions,\n SafetySetting,\n StartChatParams,\n Tool,\n ToolConfig\n} from '../types';\nimport { ChatSession } from '../methods/chat-session';\nimport { countTokens } from '../methods/count-tokens';\nimport {\n formatGenerateContentInput,\n formatSystemInstruction\n} from '../requests/request-helpers';\nimport { AI } from '../public-types';\nimport { AIModel } from './ai-model';\nimport { ChromeAdapter } from '../types/chrome-adapter';\n\n/**\n * Class for generative model APIs.\n * @public\n */\nexport class GenerativeModel extends AIModel {\n generationConfig: GenerationConfig;\n safetySettings: SafetySetting[];\n requestOptions?: RequestOptions;\n tools?: Tool[];\n toolConfig?: ToolConfig;\n systemInstruction?: Content;\n\n constructor(\n ai: AI,\n modelParams: ModelParams,\n requestOptions?: RequestOptions,\n private chromeAdapter?: ChromeAdapter\n ) {\n super(ai, modelParams.model);\n this.generationConfig = modelParams.generationConfig || {};\n this.safetySettings = modelParams.safetySettings || [];\n this.tools = modelParams.tools;\n this.toolConfig = modelParams.toolConfig;\n this.systemInstruction = formatSystemInstruction(\n modelParams.systemInstruction\n );\n this.requestOptions = requestOptions || {};\n }\n\n /**\n * Makes a single non-streaming call to the model\n * and returns an object containing a single {@link GenerateContentResponse}.\n */\n async generateContent(\n request: GenerateContentRequest | string | Array<string | Part>\n ): Promise<GenerateContentResult> {\n const formattedParams = formatGenerateContentInput(request);\n return generateContent(\n this._apiSettings,\n this.model,\n {\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n ...formattedParams\n },\n this.chromeAdapter,\n this.requestOptions\n );\n }\n\n /**\n * Makes a single streaming call to the model\n * and returns an object containing an iterable stream that iterates\n * over all chunks in the streaming response as well as\n * a promise that returns the final aggregated response.\n */\n async generateContentStream(\n request: GenerateContentRequest | string | Array<string | Part>\n ): Promise<GenerateContentStreamResult> {\n const formattedParams = formatGenerateContentInput(request);\n return generateContentStream(\n this._apiSettings,\n this.model,\n {\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n ...formattedParams\n },\n this.chromeAdapter,\n this.requestOptions\n );\n }\n\n /**\n * Gets a new {@link ChatSession} instance which can be used for\n * multi-turn chats.\n */\n startChat(startChatParams?: StartChatParams): ChatSession {\n return new ChatSession(\n this._apiSettings,\n this.model,\n this.chromeAdapter,\n {\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n generationConfig: this.generationConfig,\n safetySettings: this.safetySettings,\n /**\n * Overrides params inherited from GenerativeModel with those explicitly set in the\n * StartChatParams. For example, if startChatParams.generationConfig is set, it'll override\n * this.generationConfig.\n */\n ...startChatParams\n },\n this.requestOptions\n );\n }\n\n /**\n * Counts the tokens in the provided request.\n */\n async countTokens(\n request: CountTokensRequest | string | Array<string | Part>\n ): Promise<CountTokensResponse> {\n const formattedParams = formatGenerateContentInput(request);\n return countTokens(\n this._apiSettings,\n this.model,\n formattedParams,\n this.chromeAdapter\n );\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n AIErrorCode,\n FunctionResponse,\n GenerativeContentBlob,\n LiveResponseType,\n LiveServerContent,\n LiveServerToolCall,\n LiveServerToolCallCancellation,\n Part\n} from '../public-types';\nimport { formatNewContent } from '../requests/request-helpers';\nimport { AIError } from '../errors';\nimport { WebSocketHandler } from '../websocket';\nimport { logger } from '../logger';\nimport {\n _LiveClientContent,\n _LiveClientRealtimeInput,\n _LiveClientToolResponse\n} from '../types/live-responses';\n\n/**\n * Represents an active, real-time, bidirectional conversation with the model.\n *\n * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}.\n *\n * @beta\n */\nexport class LiveSession {\n /**\n * Indicates whether this Live session is closed.\n *\n * @beta\n */\n isClosed = false;\n /**\n * Indicates whether this Live session is being controlled by an `AudioConversationController`.\n *\n * @beta\n */\n inConversation = false;\n\n /**\n * @internal\n */\n constructor(\n private webSocketHandler: WebSocketHandler,\n private serverMessages: AsyncGenerator<unknown>\n ) {}\n\n /**\n * Sends content to the server.\n *\n * @param request - The message to send to the model.\n * @param turnComplete - Indicates if the turn is complete. Defaults to false.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async send(\n request: string | Array<string | Part>,\n turnComplete = true\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const newContent = formatNewContent(request);\n\n const message: _LiveClientContent = {\n clientContent: {\n turns: [newContent],\n turnComplete\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends text to the server in realtime.\n *\n * @example\n * ```javascript\n * liveSession.sendTextRealtime(\"Hello, how are you?\");\n * ```\n *\n * @param text - The text data to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendTextRealtime(text: string): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n text\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends audio data to the server in realtime.\n *\n * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz\n * little-endian.\n *\n * @example\n * ```javascript\n * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian.\n * const blob = { mimeType: \"audio/pcm\", data: pcmData };\n * liveSession.sendAudioRealtime(blob);\n * ```\n *\n * @param blob - The base64-encoded PCM data to send to the server in realtime.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendAudioRealtime(blob: GenerativeContentBlob): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n audio: blob\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends video data to the server in realtime.\n *\n * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It\n * is recommended to set `mimeType` to `image/jpeg`.\n *\n * @example\n * ```javascript\n * // const videoFrame = ... base64-encoded JPEG data\n * const blob = { mimeType: \"image/jpeg\", data: videoFrame };\n * liveSession.sendVideoRealtime(blob);\n * ```\n * @param blob - The base64-encoded video data to send to the server in realtime.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendVideoRealtime(blob: GenerativeContentBlob): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientRealtimeInput = {\n realtimeInput: {\n video: blob\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Sends function responses to the server.\n *\n * @param functionResponses - The function responses to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendFunctionResponses(\n functionResponses: FunctionResponse[]\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const message: _LiveClientToolResponse = {\n toolResponse: {\n functionResponses\n }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n }\n\n /**\n * Yields messages received from the server.\n * This can only be used by one consumer at a time.\n *\n * @returns An `AsyncGenerator` that yields server messages as they arrive.\n * @throws If the session is already closed, or if we receive a response that we don't support.\n *\n * @beta\n */\n async *receive(): AsyncGenerator<\n LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation\n > {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.SESSION_CLOSED,\n 'Cannot read from a Live session that is closed. Try starting a new Live session.'\n );\n }\n for await (const message of this.serverMessages) {\n if (message && typeof message === 'object') {\n if (LiveResponseType.SERVER_CONTENT in message) {\n yield {\n type: 'serverContent',\n ...(message as { serverContent: Omit<LiveServerContent, 'type'> })\n .serverContent\n } as LiveServerContent;\n } else if (LiveResponseType.TOOL_CALL in message) {\n yield {\n type: 'toolCall',\n ...(message as { toolCall: Omit<LiveServerToolCall, 'type'> })\n .toolCall\n } as LiveServerToolCall;\n } else if (LiveResponseType.TOOL_CALL_CANCELLATION in message) {\n yield {\n type: 'toolCallCancellation',\n ...(\n message as {\n toolCallCancellation: Omit<\n LiveServerToolCallCancellation,\n 'type'\n >;\n }\n ).toolCallCancellation\n } as LiveServerToolCallCancellation;\n } else {\n logger.warn(\n `Received an unknown message type from the server: ${JSON.stringify(\n message\n )}`\n );\n }\n } else {\n logger.warn(\n `Received an invalid message from the server: ${JSON.stringify(\n message\n )}`\n );\n }\n }\n }\n\n /**\n * Closes this session.\n * All methods on this session will throw an error once this resolves.\n *\n * @beta\n */\n async close(): Promise<void> {\n if (!this.isClosed) {\n this.isClosed = true;\n await this.webSocketHandler.close(1000, 'Client closed session.');\n }\n }\n\n /**\n * Sends realtime input to the server.\n *\n * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.\n *\n * @param mediaChunks - The media chunks to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n // The backend does not support sending more than one mediaChunk in one message.\n // Work around this limitation by sending mediaChunks in separate messages.\n mediaChunks.forEach(mediaChunk => {\n const message: _LiveClientRealtimeInput = {\n realtimeInput: { mediaChunks: [mediaChunk] }\n };\n this.webSocketHandler.send(JSON.stringify(message));\n });\n }\n\n /**\n * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.\n *\n * Sends a stream of {@link GenerativeContentBlob}.\n *\n * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send.\n * @throws If this session has been closed.\n *\n * @beta\n */\n async sendMediaStream(\n mediaChunkStream: ReadableStream<GenerativeContentBlob>\n ): Promise<void> {\n if (this.isClosed) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'This LiveSession has been closed and cannot be used.'\n );\n }\n\n const reader = mediaChunkStream.getReader();\n while (true) {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n break;\n } else if (!value) {\n throw new Error('Missing chunk in reader, but reader is not done.');\n }\n\n await this.sendMediaChunks([value]);\n } catch (e) {\n // Re-throw any errors that occur during stream consumption or sending.\n const message =\n e instanceof Error ? e.message : 'Error processing media stream.';\n throw new AIError(AIErrorCode.REQUEST_ERROR, message);\n }\n }\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIModel } from './ai-model';\nimport { LiveSession } from '../methods/live-session';\nimport { AIError } from '../errors';\nimport {\n AI,\n AIErrorCode,\n BackendType,\n Content,\n LiveGenerationConfig,\n LiveModelParams,\n Tool,\n ToolConfig\n} from '../public-types';\nimport { WebSocketHandler } from '../websocket';\nimport { WebSocketUrl } from '../requests/request';\nimport { formatSystemInstruction } from '../requests/request-helpers';\nimport { _LiveClientSetup } from '../types/live-responses';\n\n/**\n * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal\n * interactions with Gemini.\n *\n * This class should only be instantiated with {@link getLiveGenerativeModel}.\n *\n * @beta\n */\nexport class LiveGenerativeModel extends AIModel {\n generationConfig: LiveGenerationConfig;\n tools?: Tool[];\n toolConfig?: ToolConfig;\n systemInstruction?: Content;\n\n /**\n * @internal\n */\n constructor(\n ai: AI,\n modelParams: LiveModelParams,\n /**\n * @internal\n */\n private _webSocketHandler: WebSocketHandler\n ) {\n super(ai, modelParams.model);\n this.generationConfig = modelParams.generationConfig || {};\n this.tools = modelParams.tools;\n this.toolConfig = modelParams.toolConfig;\n this.systemInstruction = formatSystemInstruction(\n modelParams.systemInstruction\n );\n }\n\n /**\n * Starts a {@link LiveSession}.\n *\n * @returns A {@link LiveSession}.\n * @throws If the connection failed to be established with the server.\n *\n * @beta\n */\n async connect(): Promise<LiveSession> {\n const url = new WebSocketUrl(this._apiSettings);\n await this._webSocketHandler.connect(url.toString());\n\n let fullModelPath: string;\n if (this._apiSettings.backend.backendType === BackendType.GOOGLE_AI) {\n fullModelPath = `projects/${this._apiSettings.project}/${this.model}`;\n } else {\n fullModelPath = `projects/${this._apiSettings.project}/locations/${this._apiSettings.location}/${this.model}`;\n }\n\n // inputAudioTranscription and outputAudioTranscription are on the generation config in the public API,\n // but the backend expects them to be in the `setup` message.\n const {\n inputAudioTranscription,\n outputAudioTranscription,\n ...generationConfig\n } = this.generationConfig;\n\n const setupMessage: _LiveClientSetup = {\n setup: {\n model: fullModelPath,\n generationConfig,\n tools: this.tools,\n toolConfig: this.toolConfig,\n systemInstruction: this.systemInstruction,\n inputAudioTranscription,\n outputAudioTranscription\n }\n };\n\n try {\n // Begin listening for server messages, and begin the handshake by sending the 'setupMessage'\n const serverMessages = this._webSocketHandler.listen();\n this._webSocketHandler.send(JSON.stringify(setupMessage));\n\n // Verify we received the handshake response 'setupComplete'\n const firstMessage = (await serverMessages.next()).value;\n if (\n !firstMessage ||\n !(typeof firstMessage === 'object') ||\n !('setupComplete' in firstMessage)\n ) {\n await this._webSocketHandler.close(1011, 'Handshake failure');\n throw new AIError(\n AIErrorCode.RESPONSE_ERROR,\n 'Server connection handshake failed. The server did not respond with a setupComplete message.'\n );\n }\n\n return new LiveSession(this._webSocketHandler, serverMessages);\n } catch (e) {\n // Ensure connection is closed on any setup error\n await this._webSocketHandler.close();\n throw e;\n }\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AI } from '../public-types';\nimport { Task, makeRequest } from '../requests/request';\nimport { createPredictRequestBody } from '../requests/request-helpers';\nimport { handlePredictResponse } from '../requests/response-helpers';\nimport {\n ImagenGCSImage,\n ImagenGenerationConfig,\n ImagenInlineImage,\n RequestOptions,\n ImagenModelParams,\n ImagenGenerationResponse,\n ImagenSafetySettings\n} from '../types';\nimport { AIModel } from './ai-model';\n\n/**\n * Class for Imagen model APIs.\n *\n * This class provides methods for generating images using the Imagen model.\n *\n * @example\n * ```javascript\n * const imagen = new ImagenModel(\n * ai,\n * {\n * model: 'imagen-3.0-generate-002'\n * }\n * );\n *\n * const response = await imagen.generateImages('A photo of a cat');\n * if (response.images.length > 0) {\n * console.log(response.images[0].bytesBase64Encoded);\n * }\n * ```\n *\n * @public\n */\nexport class ImagenModel extends AIModel {\n /**\n * The Imagen generation configuration.\n */\n generationConfig?: ImagenGenerationConfig;\n /**\n * Safety settings for filtering inappropriate content.\n */\n safetySettings?: ImagenSafetySettings;\n\n /**\n * Constructs a new instance of the {@link ImagenModel} class.\n *\n * @param ai - an {@link AI} instance.\n * @param modelParams - Parameters to use when making requests to Imagen.\n * @param requestOptions - Additional options to use when making requests.\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n */\n constructor(\n ai: AI,\n modelParams: ImagenModelParams,\n public requestOptions?: RequestOptions\n ) {\n const { model, generationConfig, safetySettings } = modelParams;\n super(ai, model);\n this.generationConfig = generationConfig;\n this.safetySettings = safetySettings;\n }\n\n /**\n * Generates images using the Imagen model and returns them as\n * base64-encoded strings.\n *\n * @param prompt - A text prompt describing the image(s) to generate.\n * @returns A promise that resolves to an {@link ImagenGenerationResponse}\n * object containing the generated images.\n *\n * @throws If the request to generate images fails. This happens if the\n * prompt is blocked.\n *\n * @remarks\n * If the prompt was not blocked, but one or more of the generated images were filtered, the\n * returned object will have a `filteredReason` property.\n * If all images are filtered, the `images` array will be empty.\n *\n * @public\n */\n async generateImages(\n prompt: string\n ): Promise<ImagenGenerationResponse<ImagenInlineImage>> {\n const body = createPredictRequestBody(prompt, {\n ...this.generationConfig,\n ...this.safetySettings\n });\n const response = await makeRequest(\n this.model,\n Task.PREDICT,\n this._apiSettings,\n /* stream */ false,\n JSON.stringify(body),\n this.requestOptions\n );\n return handlePredictResponse<ImagenInlineImage>(response);\n }\n\n /**\n * Generates images to Cloud Storage for Firebase using the Imagen model.\n *\n * @internal This method is temporarily internal.\n *\n * @param prompt - A text prompt describing the image(s) to generate.\n * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket.\n * This should be a directory. For example, `gs://my-bucket/my-directory/`.\n * @returns A promise that resolves to an {@link ImagenGenerationResponse}\n * object containing the URLs of the generated images.\n *\n * @throws If the request fails to generate images fails. This happens if\n * the prompt is blocked.\n *\n * @remarks\n * If the prompt was not blocked, but one or more of the generated images were filtered, the\n * returned object will have a `filteredReason` property.\n * If all images are filtered, the `images` array will be empty.\n */\n async generateImagesGCS(\n prompt: string,\n gcsURI: string\n ): Promise<ImagenGenerationResponse<ImagenGCSImage>> {\n const body = createPredictRequestBody(prompt, {\n gcsURI,\n ...this.generationConfig,\n ...this.safetySettings\n });\n const response = await makeRequest(\n this.model,\n Task.PREDICT,\n this._apiSettings,\n /* stream */ false,\n JSON.stringify(body),\n this.requestOptions\n );\n return handlePredictResponse<ImagenGCSImage>(response);\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from './errors';\nimport { logger } from './logger';\nimport { AIErrorCode } from './types';\n\n/**\n * A standardized interface for interacting with a WebSocket connection.\n * This abstraction allows the SDK to use the appropriate WebSocket implementation\n * for the current JS environment (Browser vs. Node) without\n * changing the core logic of the `LiveSession`.\n * @internal\n */\n\nexport interface WebSocketHandler {\n /**\n * Establishes a connection to the given URL.\n *\n * @param url The WebSocket URL (e.g., wss://...).\n * @returns A promise that resolves on successful connection or rejects on failure.\n */\n connect(url: string): Promise<void>;\n\n /**\n * Sends data over the WebSocket.\n *\n * @param data The string or binary data to send.\n */\n send(data: string | ArrayBuffer): void;\n\n /**\n * Returns an async generator that yields parsed JSON objects from the server.\n * The yielded type is `unknown` because the handler cannot guarantee the shape of the data.\n * The consumer is responsible for type validation.\n * The generator terminates when the connection is closed.\n *\n * @returns A generator that allows consumers to pull messages using a `for await...of` loop.\n */\n listen(): AsyncGenerator<unknown>;\n\n /**\n * Closes the WebSocket connection.\n *\n * @param code - A numeric status code explaining why the connection is closing.\n * @param reason - A human-readable string explaining why the connection is closing.\n */\n close(code?: number, reason?: string): Promise<void>;\n}\n\n/**\n * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22.\n *\n * @internal\n */\nexport class WebSocketHandlerImpl implements WebSocketHandler {\n private ws?: WebSocket;\n\n constructor() {\n if (typeof WebSocket === 'undefined') {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'The WebSocket API is not available in this environment. ' +\n 'The \"Live\" feature is not supported here. It is supported in ' +\n 'modern browser windows, Web Workers with WebSocket support, and Node >= 22.'\n );\n }\n }\n\n connect(url: string): Promise<void> {\n return new Promise((resolve, reject) => {\n this.ws = new WebSocket(url);\n this.ws.binaryType = 'blob'; // Only important to set in Node\n this.ws.addEventListener('open', () => resolve(), { once: true });\n this.ws.addEventListener(\n 'error',\n () =>\n reject(\n new AIError(\n AIErrorCode.FETCH_ERROR,\n `Error event raised on WebSocket`\n )\n ),\n { once: true }\n );\n this.ws!.addEventListener('close', (closeEvent: CloseEvent) => {\n if (closeEvent.reason) {\n logger.warn(\n `WebSocket connection closed by server. Reason: '${closeEvent.reason}'`\n );\n }\n });\n });\n }\n\n send(data: string | ArrayBuffer): void {\n if (!this.ws || this.ws.readyState !== WebSocket.OPEN) {\n throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not open.');\n }\n this.ws.send(data);\n }\n\n async *listen(): AsyncGenerator<unknown> {\n if (!this.ws) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'WebSocket is not connected.'\n );\n }\n\n const messageQueue: unknown[] = [];\n const errorQueue: Error[] = [];\n let resolvePromise: (() => void) | null = null;\n let isClosed = false;\n\n const messageListener = async (event: MessageEvent): Promise<void> => {\n let data: string;\n if (event.data instanceof Blob) {\n data = await event.data.text();\n } else if (typeof event.data === 'string') {\n data = event.data;\n } else {\n errorQueue.push(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Failed to parse WebSocket response. Expected data to be a Blob or string, but was ${typeof event.data}.`\n )\n );\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n return;\n }\n\n try {\n const obj = JSON.parse(data) as unknown;\n messageQueue.push(obj);\n } catch (e) {\n const err = e as Error;\n errorQueue.push(\n new AIError(\n AIErrorCode.PARSE_FAILED,\n `Error parsing WebSocket message to JSON: ${err.message}`\n )\n );\n }\n\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n };\n\n const errorListener = (): void => {\n errorQueue.push(\n new AIError(AIErrorCode.FETCH_ERROR, 'WebSocket connection error.')\n );\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n };\n\n const closeListener = (event: CloseEvent): void => {\n if (event.reason) {\n logger.warn(\n `WebSocket connection closed by the server with reason: ${event.reason}`\n );\n }\n isClosed = true;\n if (resolvePromise) {\n resolvePromise();\n resolvePromise = null;\n }\n // Clean up listeners to prevent memory leaks\n this.ws?.removeEventListener('message', messageListener);\n this.ws?.removeEventListener('close', closeListener);\n this.ws?.removeEventListener('error', errorListener);\n };\n\n this.ws.addEventListener('message', messageListener);\n this.ws.addEventListener('close', closeListener);\n this.ws.addEventListener('error', errorListener);\n\n while (!isClosed) {\n if (errorQueue.length > 0) {\n const error = errorQueue.shift()!;\n throw error;\n }\n if (messageQueue.length > 0) {\n yield messageQueue.shift()!;\n } else {\n await new Promise<void>(resolve => {\n resolvePromise = resolve;\n });\n }\n }\n\n // If the loop terminated because isClosed is true, check for any final errors\n if (errorQueue.length > 0) {\n const error = errorQueue.shift()!;\n throw error;\n }\n }\n\n close(code?: number, reason?: string): Promise<void> {\n return new Promise(resolve => {\n if (!this.ws) {\n return resolve();\n }\n\n this.ws.addEventListener('close', () => resolve(), { once: true });\n // Calling 'close' during these states results in an error.\n if (\n this.ws.readyState === WebSocket.CLOSED ||\n this.ws.readyState === WebSocket.CONNECTING\n ) {\n return resolve();\n }\n\n if (this.ws.readyState !== WebSocket.CLOSING) {\n this.ws.close(code, reason);\n }\n });\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { AIErrorCode } from '../types';\nimport {\n SchemaInterface,\n SchemaType,\n SchemaParams,\n SchemaRequest\n} from '../types/schema';\n\n/**\n * Parent class encompassing all Schema types, with static methods that\n * allow building specific Schema types. This class can be converted with\n * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints.\n * (This string conversion is automatically done when calling SDK methods.)\n * @public\n */\nexport abstract class Schema implements SchemaInterface {\n /**\n * Optional. The type of the property.\n * This can only be undefined when using `anyOf` schemas, which do not have an\n * explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification}.\n */\n type?: SchemaType;\n /** Optional. The format of the property.\n * Supported formats:<br/>\n * <ul>\n * <li>for NUMBER type: \"float\", \"double\"</li>\n * <li>for INTEGER type: \"int32\", \"int64\"</li>\n * <li>for STRING type: \"email\", \"byte\", etc</li>\n * </ul>\n */\n format?: string;\n /** Optional. The description of the property. */\n description?: string;\n /** Optional. The items of the property. */\n items?: SchemaInterface;\n /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n minItems?: number;\n /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */\n maxItems?: number;\n /** Optional. Whether the property is nullable. Defaults to false. */\n nullable: boolean;\n /** Optional. The example of the property. */\n example?: unknown;\n /**\n * Allows user to add other schema properties that have not yet\n * been officially added to the SDK.\n */\n [key: string]: unknown;\n\n constructor(schemaParams: SchemaInterface) {\n // TODO(dlarocque): Enforce this with union types\n if (!schemaParams.type && !schemaParams.anyOf) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n \"A schema must have either a 'type' or an 'anyOf' array of sub-schemas.\"\n );\n }\n // eslint-disable-next-line guard-for-in\n for (const paramKey in schemaParams) {\n this[paramKey] = schemaParams[paramKey];\n }\n // Ensure these are explicitly set to avoid TS errors.\n this.type = schemaParams.type;\n this.format = schemaParams.hasOwnProperty('format')\n ? schemaParams.format\n : undefined;\n this.nullable = schemaParams.hasOwnProperty('nullable')\n ? !!schemaParams.nullable\n : false;\n }\n\n /**\n * Defines how this Schema should be serialized as JSON.\n * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj: { type?: SchemaType; [key: string]: unknown } = {\n type: this.type\n };\n for (const prop in this) {\n if (this.hasOwnProperty(prop) && this[prop] !== undefined) {\n if (prop !== 'required' || this.type === SchemaType.OBJECT) {\n obj[prop] = this[prop];\n }\n }\n }\n return obj as SchemaRequest;\n }\n\n static array(arrayParams: SchemaParams & { items: Schema }): ArraySchema {\n return new ArraySchema(arrayParams, arrayParams.items);\n }\n\n static object(\n objectParams: SchemaParams & {\n properties: {\n [k: string]: Schema;\n };\n optionalProperties?: string[];\n }\n ): ObjectSchema {\n return new ObjectSchema(\n objectParams,\n objectParams.properties,\n objectParams.optionalProperties\n );\n }\n\n // eslint-disable-next-line id-blacklist\n static string(stringParams?: SchemaParams): StringSchema {\n return new StringSchema(stringParams);\n }\n\n static enumString(\n stringParams: SchemaParams & { enum: string[] }\n ): StringSchema {\n return new StringSchema(stringParams, stringParams.enum);\n }\n\n static integer(integerParams?: SchemaParams): IntegerSchema {\n return new IntegerSchema(integerParams);\n }\n\n // eslint-disable-next-line id-blacklist\n static number(numberParams?: SchemaParams): NumberSchema {\n return new NumberSchema(numberParams);\n }\n\n // eslint-disable-next-line id-blacklist\n static boolean(booleanParams?: SchemaParams): BooleanSchema {\n return new BooleanSchema(booleanParams);\n }\n\n static anyOf(\n anyOfParams: SchemaParams & { anyOf: TypedSchema[] }\n ): AnyOfSchema {\n return new AnyOfSchema(anyOfParams);\n }\n}\n\n/**\n * A type that includes all specific Schema types.\n * @public\n */\nexport type TypedSchema =\n | IntegerSchema\n | NumberSchema\n | StringSchema\n | BooleanSchema\n | ObjectSchema\n | ArraySchema\n | AnyOfSchema;\n\n/**\n * Schema class for \"integer\" types.\n * @public\n */\nexport class IntegerSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.INTEGER,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"number\" types.\n * @public\n */\nexport class NumberSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.NUMBER,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"boolean\" types.\n * @public\n */\nexport class BooleanSchema extends Schema {\n constructor(schemaParams?: SchemaParams) {\n super({\n type: SchemaType.BOOLEAN,\n ...schemaParams\n });\n }\n}\n\n/**\n * Schema class for \"string\" types. Can be used with or without\n * enum values.\n * @public\n */\nexport class StringSchema extends Schema {\n enum?: string[];\n constructor(schemaParams?: SchemaParams, enumValues?: string[]) {\n super({\n type: SchemaType.STRING,\n ...schemaParams\n });\n this.enum = enumValues;\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n if (this.enum) {\n obj['enum'] = this.enum;\n }\n return obj as SchemaRequest;\n }\n}\n\n/**\n * Schema class for \"array\" types.\n * The `items` param should refer to the type of item that can be a member\n * of the array.\n * @public\n */\nexport class ArraySchema extends Schema {\n constructor(schemaParams: SchemaParams, public items: TypedSchema) {\n super({\n type: SchemaType.ARRAY,\n ...schemaParams\n });\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n obj.items = this.items.toJSON();\n return obj;\n }\n}\n\n/**\n * Schema class for \"object\" types.\n * The `properties` param must be a map of `Schema` objects.\n * @public\n */\nexport class ObjectSchema extends Schema {\n constructor(\n schemaParams: SchemaParams,\n public properties: {\n [k: string]: TypedSchema;\n },\n public optionalProperties: string[] = []\n ) {\n super({\n type: SchemaType.OBJECT,\n ...schemaParams\n });\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n obj.properties = { ...this.properties };\n const required = [];\n if (this.optionalProperties) {\n for (const propertyKey of this.optionalProperties) {\n if (!this.properties.hasOwnProperty(propertyKey)) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n `Property \"${propertyKey}\" specified in \"optionalProperties\" does not exist.`\n );\n }\n }\n }\n for (const propertyKey in this.properties) {\n if (this.properties.hasOwnProperty(propertyKey)) {\n obj.properties[propertyKey] = this.properties[\n propertyKey\n ].toJSON() as SchemaRequest;\n if (!this.optionalProperties.includes(propertyKey)) {\n required.push(propertyKey);\n }\n }\n }\n if (required.length > 0) {\n obj.required = required;\n }\n delete obj.optionalProperties;\n return obj as SchemaRequest;\n }\n}\n\n/**\n * Schema class representing a value that can conform to any of the provided sub-schemas. This is\n * useful when a field can accept multiple distinct types or structures.\n * @public\n */\nexport class AnyOfSchema extends Schema {\n anyOf: TypedSchema[]; // Re-define field to narrow to required type\n constructor(schemaParams: SchemaParams & { anyOf: TypedSchema[] }) {\n if (schemaParams.anyOf.length === 0) {\n throw new AIError(\n AIErrorCode.INVALID_SCHEMA,\n \"The 'anyOf' array must not be empty.\"\n );\n }\n super({\n ...schemaParams,\n type: undefined // anyOf schemas do not have an explicit type\n });\n this.anyOf = schemaParams.anyOf;\n }\n\n /**\n * @internal\n */\n toJSON(): SchemaRequest {\n const obj = super.toJSON();\n // Ensure the 'anyOf' property contains serialized SchemaRequest objects.\n if (this.anyOf && Array.isArray(this.anyOf)) {\n obj.anyOf = (this.anyOf as TypedSchema[]).map(s => s.toJSON());\n }\n return obj;\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { logger } from '../logger';\n\n/**\n * Defines the image format for images generated by Imagen.\n *\n * Use this class to specify the desired format (JPEG or PNG) and compression quality\n * for images generated by Imagen. This is typically included as part of\n * {@link ImagenModelParams}.\n *\n * @example\n * ```javascript\n * const imagenModelParams = {\n * // ... other ImagenModelParams\n * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75.\n * }\n * ```\n *\n * @public\n */\nexport class ImagenImageFormat {\n /**\n * The MIME type.\n */\n mimeType: string;\n /**\n * The level of compression (a number between 0 and 100).\n */\n compressionQuality?: number;\n\n private constructor() {\n this.mimeType = 'image/png';\n }\n\n /**\n * Creates an {@link ImagenImageFormat} for a JPEG image.\n *\n * @param compressionQuality - The level of compression (a number between 0 and 100).\n * @returns An {@link ImagenImageFormat} object for a JPEG image.\n *\n * @public\n */\n static jpeg(compressionQuality?: number): ImagenImageFormat {\n if (\n compressionQuality &&\n (compressionQuality < 0 || compressionQuality > 100)\n ) {\n logger.warn(\n `Invalid JPEG compression quality of ${compressionQuality} specified; the supported range is [0, 100].`\n );\n }\n return { mimeType: 'image/jpeg', compressionQuality };\n }\n\n /**\n * Creates an {@link ImagenImageFormat} for a PNG image.\n *\n * @returns An {@link ImagenImageFormat} object for a PNG image.\n *\n * @public\n */\n static png(): ImagenImageFormat {\n return { mimeType: 'image/png' };\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { AIError } from '../errors';\nimport { logger } from '../logger';\nimport {\n AIErrorCode,\n FunctionCall,\n FunctionResponse,\n GenerativeContentBlob,\n LiveServerContent\n} from '../types';\nimport { LiveSession } from './live-session';\nimport { Deferred } from '@firebase/util';\n\nconst SERVER_INPUT_SAMPLE_RATE = 16_000;\nconst SERVER_OUTPUT_SAMPLE_RATE = 24_000;\n\nconst AUDIO_PROCESSOR_NAME = 'audio-processor';\n\n/**\n * The JS for an `AudioWorkletProcessor`.\n * This processor is responsible for taking raw audio from the microphone,\n * converting it to the required 16-bit 16kHz PCM, and posting it back to the main thread.\n *\n * See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor\n *\n * It is defined as a string here so that it can be converted into a `Blob`\n * and loaded at runtime.\n */\nconst audioProcessorWorkletString = `\n class AudioProcessor extends AudioWorkletProcessor {\n constructor(options) {\n super();\n this.targetSampleRate = options.processorOptions.targetSampleRate;\n // 'sampleRate' is a global variable available inside the AudioWorkletGlobalScope,\n // representing the native sample rate of the AudioContext.\n this.inputSampleRate = sampleRate;\n }\n\n /**\n * This method is called by the browser's audio engine for each block of audio data.\n * Input is a single input, with a single channel (input[0][0]).\n */\n process(inputs) {\n const input = inputs[0];\n if (input && input.length > 0 && input[0].length > 0) {\n const pcmData = input[0]; // Float32Array of raw audio samples.\n \n // Simple linear interpolation for resampling.\n const resampled = new Float32Array(Math.round(pcmData.length * this.targetSampleRate / this.inputSampleRate));\n const ratio = pcmData.length / resampled.length;\n for (let i = 0; i < resampled.length; i++) {\n resampled[i] = pcmData[Math.floor(i * ratio)];\n }\n\n // Convert Float32 (-1, 1) samples to Int16 (-32768, 32767)\n const resampledInt16 = new Int16Array(resampled.length);\n for (let i = 0; i < resampled.length; i++) {\n const sample = Math.max(-1, Math.min(1, resampled[i]));\n if (sample < 0) {\n resampledInt16[i] = sample * 32768;\n } else {\n resampledInt16[i] = sample * 32767;\n }\n }\n \n this.port.postMessage(resampledInt16);\n }\n // Return true to keep the processor alive and processing the next audio block.\n return true;\n }\n }\n\n // Register the processor with a name that can be used to instantiate it from the main thread.\n registerProcessor('${AUDIO_PROCESSOR_NAME}', AudioProcessor);\n`;\n\n/**\n * A controller for managing an active audio conversation.\n *\n * @beta\n */\nexport interface AudioConversationController {\n /**\n * Stops the audio conversation, closes the microphone connection, and\n * cleans up resources. Returns a promise that resolves when cleanup is complete.\n */\n stop: () => Promise<void>;\n}\n\n/**\n * Options for {@link startAudioConversation}.\n *\n * @beta\n */\nexport interface StartAudioConversationOptions {\n /**\n * An async handler that is called when the model requests a function to be executed.\n * The handler should perform the function call and return the result as a `Part`,\n * which will then be sent back to the model.\n */\n functionCallingHandler?: (\n functionCalls: FunctionCall[]\n ) => Promise<FunctionResponse>;\n}\n\n/**\n * Dependencies needed by the {@link AudioConversationRunner}.\n *\n * @internal\n */\ninterface RunnerDependencies {\n audioContext: AudioContext;\n mediaStream: MediaStream;\n sourceNode: MediaStreamAudioSourceNode;\n workletNode: AudioWorkletNode;\n}\n\n/**\n * Encapsulates the core logic of an audio conversation.\n *\n * @internal\n */\nexport class AudioConversationRunner {\n /** A flag to indicate if the conversation has been stopped. */\n private isStopped = false;\n /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */\n private readonly stopDeferred = new Deferred<void>();\n /** A promise that tracks the lifecycle of the main `runReceiveLoop`. */\n private readonly receiveLoopPromise: Promise<void>;\n\n /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */\n private readonly playbackQueue: ArrayBuffer[] = [];\n /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */\n private scheduledSources: AudioBufferSourceNode[] = [];\n /** A high-precision timeline pointer for scheduling gapless audio playback. */\n private nextStartTime = 0;\n /** A mutex to prevent the playback processing loop from running multiple times concurrently. */\n private isPlaybackLoopRunning = false;\n\n constructor(\n private readonly liveSession: LiveSession,\n private readonly options: StartAudioConversationOptions,\n private readonly deps: RunnerDependencies\n ) {\n this.liveSession.inConversation = true;\n\n // Start listening for messages from the server.\n this.receiveLoopPromise = this.runReceiveLoop().finally(() =>\n this.cleanup()\n );\n\n // Set up the handler for receiving processed audio data from the worklet.\n // Message data has been resampled to 16kHz 16-bit PCM.\n this.deps.workletNode.port.onmessage = event => {\n if (this.isStopped) {\n return;\n }\n\n const pcm16 = event.data as Int16Array;\n const base64 = btoa(\n String.fromCharCode.apply(\n null,\n Array.from(new Uint8Array(pcm16.buffer))\n )\n );\n\n const chunk: GenerativeContentBlob = {\n mimeType: 'audio/pcm',\n data: base64\n };\n void this.liveSession.sendAudioRealtime(chunk);\n };\n }\n\n /**\n * Stops the conversation and unblocks the main receive loop.\n */\n async stop(): Promise<void> {\n if (this.isStopped) {\n return;\n }\n this.isStopped = true;\n this.stopDeferred.resolve(); // Unblock the receive loop\n await this.receiveLoopPromise; // Wait for the loop and cleanup to finish\n }\n\n /**\n * Cleans up all audio resources (nodes, stream tracks, context) and marks the\n * session as no longer in a conversation.\n */\n private cleanup(): void {\n this.interruptPlayback(); // Ensure all audio is stopped on final cleanup.\n this.deps.workletNode.port.onmessage = null;\n this.deps.workletNode.disconnect();\n this.deps.sourceNode.disconnect();\n this.deps.mediaStream.getTracks().forEach(track => track.stop());\n if (this.deps.audioContext.state !== 'closed') {\n void this.deps.audioContext.close();\n }\n this.liveSession.inConversation = false;\n }\n\n /**\n * Adds audio data to the queue and ensures the playback loop is running.\n */\n private enqueueAndPlay(audioData: ArrayBuffer): void {\n this.playbackQueue.push(audioData);\n // Will no-op if it's already running.\n void this.processPlaybackQueue();\n }\n\n /**\n * Stops all current and pending audio playback and clears the queue. This is\n * called when the server indicates the model's speech was interrupted with\n * `LiveServerContent.modelTurn.interrupted`.\n */\n private interruptPlayback(): void {\n // Stop all sources that have been scheduled. The onended event will fire for each,\n // which will clean up the scheduledSources array.\n [...this.scheduledSources].forEach(source => source.stop(0));\n\n // Clear the internal buffer of unprocessed audio chunks.\n this.playbackQueue.length = 0;\n\n // Reset the playback clock to start fresh.\n this.nextStartTime = this.deps.audioContext.currentTime;\n }\n\n /**\n * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence.\n */\n private async processPlaybackQueue(): Promise<void> {\n if (this.isPlaybackLoopRunning) {\n return;\n }\n this.isPlaybackLoopRunning = true;\n\n while (this.playbackQueue.length > 0 && !this.isStopped) {\n const pcmRawBuffer = this.playbackQueue.shift()!;\n try {\n const pcm16 = new Int16Array(pcmRawBuffer);\n const frameCount = pcm16.length;\n\n const audioBuffer = this.deps.audioContext.createBuffer(\n 1,\n frameCount,\n SERVER_OUTPUT_SAMPLE_RATE\n );\n\n // Convert 16-bit PCM to 32-bit PCM, required by the Web Audio API.\n const channelData = audioBuffer.getChannelData(0);\n for (let i = 0; i < frameCount; i++) {\n channelData[i] = pcm16[i] / 32768; // Normalize to Float32 range [-1.0, 1.0]\n }\n\n const source = this.deps.audioContext.createBufferSource();\n source.buffer = audioBuffer;\n source.connect(this.deps.audioContext.destination);\n\n // Track the source and set up a handler to remove it from tracking when it finishes.\n this.scheduledSources.push(source);\n source.onended = () => {\n this.scheduledSources = this.scheduledSources.filter(\n s => s !== source\n );\n };\n\n // To prevent gaps, schedule the next chunk to start either now (if we're catching up)\n // or exactly when the previous chunk is scheduled to end.\n this.nextStartTime = Math.max(\n this.deps.audioContext.currentTime,\n this.nextStartTime\n );\n source.start(this.nextStartTime);\n\n // Update the schedule for the *next* chunk.\n this.nextStartTime += audioBuffer.duration;\n } catch (e) {\n logger.error('Error playing audio:', e);\n }\n }\n\n this.isPlaybackLoopRunning = false;\n }\n\n /**\n * The main loop that listens for and processes messages from the server.\n */\n private async runReceiveLoop(): Promise<void> {\n const messageGenerator = this.liveSession.receive();\n while (!this.isStopped) {\n const result = await Promise.race([\n messageGenerator.next(),\n this.stopDeferred.promise\n ]);\n\n if (this.isStopped || !result || result.done) {\n break;\n }\n\n const message = result.value;\n if (message.type === 'serverContent') {\n const serverContent = message as LiveServerContent;\n if (serverContent.interrupted) {\n this.interruptPlayback();\n }\n\n const audioPart = serverContent.modelTurn?.parts.find(part =>\n part.inlineData?.mimeType.startsWith('audio/')\n );\n if (audioPart?.inlineData) {\n const audioData = Uint8Array.from(\n atob(audioPart.inlineData.data),\n c => c.charCodeAt(0)\n ).buffer;\n this.enqueueAndPlay(audioData);\n }\n } else if (message.type === 'toolCall') {\n if (!this.options.functionCallingHandler) {\n logger.warn(\n 'Received tool call message, but StartAudioConversationOptions.functionCallingHandler is undefined. Ignoring tool call.'\n );\n } else {\n try {\n const functionResponse = await this.options.functionCallingHandler(\n message.functionCalls\n );\n if (!this.isStopped) {\n void this.liveSession.sendFunctionResponses([functionResponse]);\n }\n } catch (e) {\n throw new AIError(\n AIErrorCode.ERROR,\n `Function calling handler failed: ${(e as Error).message}`\n );\n }\n }\n }\n }\n }\n}\n\n/**\n * Starts a real-time, bidirectional audio conversation with the model. This helper function manages\n * the complexities of microphone access, audio recording, playback, and interruptions.\n *\n * @remarks Important: This function must be called in response to a user gesture\n * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}.\n *\n * @example\n * ```javascript\n * const liveSession = await model.connect();\n * let conversationController;\n *\n * // This function must be called from within a click handler.\n * async function startConversation() {\n * try {\n * conversationController = await startAudioConversation(liveSession);\n * } catch (e) {\n * // Handle AI-specific errors\n * if (e instanceof AIError) {\n * console.error(\"AI Error:\", e.message);\n * }\n * // Handle microphone permission and hardware errors\n * else if (e instanceof DOMException) {\n * console.error(\"Microphone Error:\", e.message);\n * }\n * // Handle other unexpected errors\n * else {\n * console.error(\"An unexpected error occurred:\", e);\n * }\n * }\n * }\n *\n * // Later, to stop the conversation:\n * // if (conversationController) {\n * // await conversationController.stop();\n * // }\n * ```\n *\n * @param liveSession - An active {@link LiveSession} instance.\n * @param options - Configuration options for the audio conversation.\n * @returns A `Promise` that resolves with an {@link AudioConversationController}.\n * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`).\n * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions.\n *\n * @beta\n */\nexport async function startAudioConversation(\n liveSession: LiveSession,\n options: StartAudioConversationOptions = {}\n): Promise<AudioConversationController> {\n if (liveSession.isClosed) {\n throw new AIError(\n AIErrorCode.SESSION_CLOSED,\n 'Cannot start audio conversation on a closed LiveSession.'\n );\n }\n\n if (liveSession.inConversation) {\n throw new AIError(\n AIErrorCode.REQUEST_ERROR,\n 'An audio conversation is already in progress for this session.'\n );\n }\n\n // Check for necessary Web API support.\n if (\n typeof AudioWorkletNode === 'undefined' ||\n typeof AudioContext === 'undefined' ||\n typeof navigator === 'undefined' ||\n !navigator.mediaDevices\n ) {\n throw new AIError(\n AIErrorCode.UNSUPPORTED,\n 'Audio conversation is not supported in this environment. It requires the Web Audio API and AudioWorklet support.'\n );\n }\n\n let audioContext: AudioContext | undefined;\n try {\n // 1. Set up the audio context. This must be in response to a user gesture.\n // See: https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy\n audioContext = new AudioContext();\n if (audioContext.state === 'suspended') {\n await audioContext.resume();\n }\n\n // 2. Prompt for microphone access and get the media stream.\n // This can throw a variety of permission or hardware-related errors.\n const mediaStream = await navigator.mediaDevices.getUserMedia({\n audio: true\n });\n\n // 3. Load the AudioWorklet processor.\n // See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet\n const workletBlob = new Blob([audioProcessorWorkletString], {\n type: 'application/javascript'\n });\n const workletURL = URL.createObjectURL(workletBlob);\n await audioContext.audioWorklet.addModule(workletURL);\n\n // 4. Create the audio graph: Microphone -> Source Node -> Worklet Node\n const sourceNode = audioContext.createMediaStreamSource(mediaStream);\n const workletNode = new AudioWorkletNode(\n audioContext,\n AUDIO_PROCESSOR_NAME,\n {\n processorOptions: { targetSampleRate: SERVER_INPUT_SAMPLE_RATE }\n }\n );\n sourceNode.connect(workletNode);\n\n // 5. Instantiate and return the runner which manages the conversation.\n const runner = new AudioConversationRunner(liveSession, options, {\n audioContext,\n mediaStream,\n sourceNode,\n workletNode\n });\n\n return { stop: () => runner.stop() };\n } catch (e) {\n // Ensure the audio context is closed on any setup error.\n if (audioContext && audioContext.state !== 'closed') {\n void audioContext.close();\n }\n\n // Re-throw specific, known error types directly. The user may want to handle `DOMException`\n // errors differently (for example, if permission to access audio device was denied).\n if (e instanceof AIError || e instanceof DOMException) {\n throw e;\n }\n\n // Wrap any other unexpected errors in a standard AIError.\n throw new AIError(\n AIErrorCode.ERROR,\n `Failed to initialize audio recording: ${(e as Error).message}`\n );\n }\n}\n","/**\n * @license\n * Copyright 2024 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseApp, getApp, _getProvider } from '@firebase/app';\nimport { Provider } from '@firebase/component';\nimport { getModularInstance } from '@firebase/util';\nimport { AI_TYPE, DEFAULT_HYBRID_IN_CLOUD_MODEL } from './constants';\nimport { AIService } from './service';\nimport { AI, AIOptions } from './public-types';\nimport {\n ImagenModelParams,\n HybridParams,\n ModelParams,\n RequestOptions,\n AIErrorCode,\n LiveModelParams\n} from './types';\nimport { AIError } from './errors';\nimport {\n AIModel,\n GenerativeModel,\n LiveGenerativeModel,\n ImagenModel\n} from './models';\nimport { encodeInstanceIdentifier } from './helpers';\nimport { GoogleAIBackend } from './backend';\nimport { WebSocketHandlerImpl } from './websocket';\n\nexport { ChatSession } from './methods/chat-session';\nexport { LiveSession } from './methods/live-session';\nexport * from './requests/schema-builder';\nexport { ImagenImageFormat } from './requests/imagen-image-format';\nexport { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel, AIError };\nexport { Backend, VertexAIBackend, GoogleAIBackend } from './backend';\nexport {\n startAudioConversation,\n AudioConversationController,\n StartAudioConversationOptions\n} from './methods/live-session-helpers';\n\ndeclare module '@firebase/component' {\n interface NameServiceMapping {\n [AI_TYPE]: AIService;\n }\n}\n\n/**\n * Returns the default {@link AI} instance that is associated with the provided\n * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the\n * default settings.\n *\n * @example\n * ```javascript\n * const ai = getAI(app);\n * ```\n *\n * @example\n * ```javascript\n * // Get an AI instance configured to use the Gemini Developer API (via Google AI).\n * const ai = getAI(app, { backend: new GoogleAIBackend() });\n * ```\n *\n * @example\n * ```javascript\n * // Get an AI instance configured to use the Vertex AI Gemini API.\n * const ai = getAI(app, { backend: new VertexAIBackend() });\n * ```\n *\n * @param app - The {@link @firebase/app#FirebaseApp} to use.\n * @param options - {@link AIOptions} that configure the AI instance.\n * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}.\n *\n * @public\n */\nexport function getAI(app: FirebaseApp = getApp(), options?: AIOptions): AI {\n app = getModularInstance(app);\n // Dependencies\n const AIProvider: Provider<'AI'> = _getProvider(app, AI_TYPE);\n\n const backend = options?.backend ?? new GoogleAIBackend();\n\n const finalOptions: Omit<AIOptions, 'backend'> = {\n useLimitedUseAppCheckTokens: options?.useLimitedUseAppCheckTokens ?? false\n };\n\n const identifier = encodeInstanceIdentifier(backend);\n const aiInstance = AIProvider.getImmediate({\n identifier\n });\n\n aiInstance.options = finalOptions;\n\n return aiInstance;\n}\n\n/**\n * Returns a {@link GenerativeModel} class with methods for inference\n * and other functionality.\n *\n * @public\n */\nexport function getGenerativeModel(\n ai: AI,\n modelParams: ModelParams | HybridParams,\n requestOptions?: RequestOptions\n): GenerativeModel {\n // Uses the existence of HybridParams.mode to clarify the type of the modelParams input.\n const hybridParams = modelParams as HybridParams;\n let inCloudParams: ModelParams;\n if (hybridParams.mode) {\n inCloudParams = hybridParams.inCloudParams || {\n model: DEFAULT_HYBRID_IN_CLOUD_MODEL\n };\n } else {\n inCloudParams = modelParams as ModelParams;\n }\n\n if (!inCloudParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`\n );\n }\n\n /**\n * An AIService registered by index.node.ts will not have a\n * chromeAdapterFactory() method.\n */\n const chromeAdapter = (ai as AIService).chromeAdapterFactory?.(\n hybridParams.mode,\n typeof window === 'undefined' ? undefined : window,\n hybridParams.onDeviceParams\n );\n\n return new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter);\n}\n\n/**\n * Returns an {@link ImagenModel} class with methods for using Imagen.\n *\n * Only Imagen 3 models (named `imagen-3.0-*`) are supported.\n *\n * @param ai - An {@link AI} instance.\n * @param modelParams - Parameters to use when making Imagen requests.\n * @param requestOptions - Additional options to use when making requests.\n *\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @public\n */\nexport function getImagenModel(\n ai: AI,\n modelParams: ImagenModelParams,\n requestOptions?: RequestOptions\n): ImagenModel {\n if (!modelParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })`\n );\n }\n return new ImagenModel(ai, modelParams, requestOptions);\n}\n\n/**\n * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication.\n *\n * The Live API is only supported in modern browser windows and Node >= 22.\n *\n * @param ai - An {@link AI} instance.\n * @param modelParams - Parameters to use when setting up a {@link LiveSession}.\n * @throws If the `apiKey` or `projectId` fields are missing in your\n * Firebase config.\n *\n * @beta\n */\nexport function getLiveGenerativeModel(\n ai: AI,\n modelParams: LiveModelParams\n): LiveGenerativeModel {\n if (!modelParams.model) {\n throw new AIError(\n AIErrorCode.NO_MODEL,\n `Must provide a model name for getLiveGenerativeModel. Example: getLiveGenerativeModel(ai, { model: 'my-model-name' })`\n );\n }\n const webSocketHandler = new WebSocketHandlerImpl();\n return new LiveGenerativeModel(ai, modelParams, webSocketHandler);\n}\n","/**\n * @license\n * Copyright 2021 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nexport interface Compat<T> {\n _delegate: T;\n}\n\nexport function getModularInstance<ExpService>(\n service: Compat<ExpService> | ExpService\n): ExpService {\n if (service && (service as Compat<ExpService>)._delegate) {\n return (service as Compat<ExpService>)._delegate;\n } else {\n return service as ExpService;\n }\n}\n","/**\n * The Firebase AI Web SDK.\n *\n * @packageDocumentation\n */\n\n/**\n * @license\n * Copyright 2025 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { registerVersion, _registerComponent } from '@firebase/app';\nimport { AI_TYPE } from './constants';\nimport { Component, ComponentType } from '@firebase/component';\nimport { name, version } from '../package.json';\nimport { LanguageModel } from './types/language-model';\nimport { factory } from './factory-browser';\n\ndeclare global {\n interface Window {\n LanguageModel: LanguageModel;\n }\n}\n\nfunction registerAI(): void {\n _registerComponent(\n new Component(AI_TYPE, factory, ComponentType.PUBLIC).setMultipleInstances(\n true\n )\n );\n\n registerVersion(name, version);\n // BUILD_TARGET will be replaced by values like esm, cjs, etc during the compilation\n registerVersion(name, version, '__BUILD_TARGET__');\n}\n\nregisterAI();\n\nexport * from './api';\nexport * from './public-types';\n"],"names":["Deferred","constructor","this","reject","resolve","promise","Promise","wrapCallback","callback","error","value","catch","length","FirebaseError","Error","code","message","customData","super","name","Object","setPrototypeOf","prototype","captureStackTrace","ErrorFactory","create","service","serviceName","errors","data","fullCode","template","replaceTemplate","replace","PATTERN","_","key","String","fullMessage","Component","instanceFactory","type","multipleInstances","serviceProps","instantiationMode","onInstanceCreated","setInstantiationMode","mode","setMultipleInstances","setServiceProps","props","setInstanceCreatedCallback","LogLevel","levelStringToEnum","debug","DEBUG","verbose","VERBOSE","info","INFO","warn","WARN","ERROR","silent","SILENT","defaultLogLevel","ConsoleMethod","defaultLogHandler","instance","logType","args","logLevel","now","Date","toISOString","method","console","AI_TYPE","DEFAULT_LOCATION","DEFAULT_DOMAIN","PACKAGE_VERSION","version","AIError","customErrorData","toString","POSSIBLE_ROLES","HarmCategory","HARM_CATEGORY_HATE_SPEECH","HARM_CATEGORY_SEXUALLY_EXPLICIT","HARM_CATEGORY_HARASSMENT","HARM_CATEGORY_DANGEROUS_CONTENT","HarmBlockThreshold","BLOCK_LOW_AND_ABOVE","BLOCK_MEDIUM_AND_ABOVE","BLOCK_ONLY_HIGH","BLOCK_NONE","OFF","HarmBlockMethod","SEVERITY","PROBABILITY","HarmProbability","NEGLIGIBLE","LOW","MEDIUM","HIGH","HarmSeverity","HARM_SEVERITY_NEGLIGIBLE","HARM_SEVERITY_LOW","HARM_SEVERITY_MEDIUM","HARM_SEVERITY_HIGH","HARM_SEVERITY_UNSUPPORTED","BlockReason","SAFETY","OTHER","BLOCKLIST","PROHIBITED_CONTENT","FinishReason","STOP","MAX_TOKENS","RECITATION","SPII","MALFORMED_FUNCTION_CALL","FunctionCallingMode","AUTO","ANY","NONE","Modality","MODALITY_UNSPECIFIED","TEXT","IMAGE","VIDEO","AUDIO","DOCUMENT","ResponseModality","InferenceMode","PREFER_ON_DEVICE","ONLY_ON_DEVICE","ONLY_IN_CLOUD","PREFER_IN_CLOUD","InferenceSource","ON_DEVICE","IN_CLOUD","Outcome","UNSPECIFIED","OK","FAILED","DEADLINE_EXCEEDED","Language","PYTHON","URLRetrievalStatus","URL_RETRIEVAL_STATUS_UNSPECIFIED","URL_RETRIEVAL_STATUS_SUCCESS","URL_RETRIEVAL_STATUS_ERROR","URL_RETRIEVAL_STATUS_PAYWALL","URL_RETRIEVAL_STATUS_UNSAFE","LiveResponseType","SERVER_CONTENT","TOOL_CALL","TOOL_CALL_CANCELLATION","AIErrorCode","REQUEST_ERROR","RESPONSE_ERROR","FETCH_ERROR","SESSION_CLOSED","INVALID_CONTENT","API_NOT_ENABLED","INVALID_SCHEMA","NO_API_KEY","NO_APP_ID","NO_MODEL","NO_PROJECT_ID","PARSE_FAILED","UNSUPPORTED","SchemaType","STRING","NUMBER","INTEGER","BOOLEAN","ARRAY","OBJECT","ImagenSafetyFilterLevel","ImagenPersonFilterLevel","BLOCK_ALL","ALLOW_ADULT","ALLOW_ALL","ImagenAspectRatio","SQUARE","LANDSCAPE_3x4","PORTRAIT_4x3","LANDSCAPE_16x9","PORTRAIT_9x16","BackendType","VERTEX_AI","GOOGLE_AI","Backend","backendType","GoogleAIBackend","VertexAIBackend","location","logger","Logger","_logLevel","_logHandler","_userLogHandler","val","TypeError","setLogLevel","logHandler","userLogHandler","log","Availability","defaultExpectedInputs","ChromeAdapterImpl","languageModelProvider","onDeviceParams","isDownloading","createOptions","expectedInputs","isAvailable","request","availability","downloadIfAvailable","UNAVAILABLE","DOWNLOADABLE","DOWNLOADING","downloadPromise","AVAILABLE","isOnDeviceRequest","generateContent","session","createSession","contents","all","map","toLanguageModelMessage","text","prompt","promptOptions","toResponse","generateContentStream","stream","promptStreaming","toStreamResponse","countTokens","_request","content","role","part","parts","inlineData","SUPPORTED_MIME_TYPES","indexOf","mimeType","download","finally","languageModelMessageContents","toLanguageModelMessageContent","toLanguageModelMessageRole","formattedImageContent","fetch","imageBlob","blob","createImageBitmap","newSession","oldSession","destroy","json","async","candidates","encoder","TextEncoder","body","pipeThrough","TransformStream","transform","chunk","controller","JSON","stringify","enqueue","encode","chromeAdapterFactory","window","params","LanguageModel","AIService","app","backend","authProvider","appCheckProvider","appCheck","getImmediate","optional","auth","_delete","options","optionsToSet","_options","factory","container","instanceIdentifier","decodeInstanceIdentifier","identifierParts","split","getProvider","AIModel","ai","modelName","apiKey","projectId","appId","_apiSettings","project","automaticDataCollectionEnabled","_isFirebaseServerApp","settings","appCheckToken","token","getAppCheckToken","useLimitedUseAppCheckTokens","getLimitedUseToken","getToken","getAuthToken","model","normalizeModelName","normalizeGoogleAIModelName","normalizeVertexAIModelName","includes","startsWith","Task","RequestUrl","task","apiSettings","requestOptions","url","URL","baseUrl","pathname","apiVersion","modelPath","search","queryParams","URLSearchParams","set","WebSocketUrl","getHeaders","headers","Headers","append","getClientHeaders","loggingTags","push","join","authToken","accessToken","makeRequest","response","fetchTimeoutId","constructRequest","fetchOptions","timeoutMillis","timeout","abortController","AbortController","setTimeout","abort","signal","ok","errorDetails","details","e","status","some","detail","reason","links","description","statusText","err","stack","clearTimeout","hasValidCandidates","hadBadFinishReason","formatBlockErrorMessage","createEnhancedContentResponse","inferenceSource","hasOwnProperty","index","responseWithHelpers","addHelpers","getText","thought","promptFeedback","thoughtSummary","result","undefined","inlineDataParts","getInlineDataParts","functionCalls","getFunctionCalls","functionCall","partFilter","textStrings","badFinishReasons","candidate","finishReason","firstCandidate","finishMessage","blockReason","blockReasonMessage","handlePredictResponse","responseJson","images","filteredReason","predictions","prediction","raiFilteredReason","bytesBase64Encoded","gcsUri","gcsURI","safetyAttributes","mapGenerateContentRequest","generateContentRequest","safetySettings","forEach","safetySetting","generationConfig","topK","roundedTopK","Math","round","mapGenerateContentResponse","googleAIResponse","mapGenerateContentCandidates","mapPromptFeedback","usageMetadata","mappedCandidates","mappedSafetyRatings","citationMetadata","citations","citationSources","safetyRatings","safetyRating","severity","probabilityScore","severityScore","videoMetadata","mappedCandidate","groundingMetadata","urlContextMetadata","category","probability","blocked","responseLineRE","processStream","responseStream","getResponseStream","inputStream","reader","getReader","ReadableStream","start","currentText","pump","read","then","done","trim","close","parsedResponse","match","parse","substring","TextDecoderStream","fatal","stream1","stream2","tee","generateResponseSequence","getResponsePromise","allResponses","generateContentResponse","aggregateResponses","GoogleAIMapper.mapGenerateContentResponse","enhancedResponse","responses","lastResponse","aggregatedResponse","i","keys","newPart","errorsCausingFallback","callCloudOrDevice","chromeAdapter","onDeviceCall","inCloudCall","callResult","generateContentStreamOnCloud","GoogleAIMapper.mapGenerateContentRequest","STREAM_GENERATE_CONTENT","generateContentOnCloud","GENERATE_CONTENT","processGenerateContentResponse","formatSystemInstruction","input","formatNewContent","newParts","partOrString","assignRoleToPartsAndValidateSendMessageRequest","userContent","functionContent","hasUserContent","hasFunctionContent","formatGenerateContentInput","formattedRequest","systemInstruction","createPredictRequestBody","imageFormat","addWatermark","numberOfImages","negativePrompt","aspectRatio","safetyFilterLevel","personFilterLevel","instances","parameters","storageUri","sampleCount","outputOptions","personGeneration","includeRaiReason","includeSafetyAttributes","VALID_PART_FIELDS","VALID_PARTS_PER_ROLE","user","function","system","VALID_PREVIOUS_CONTENT_ROLES","SILENT_ERROR","ChatSession","_history","_sendPromise","history","validateChatHistory","prevContent","currContent","Array","isArray","countFields","functionResponse","thoughtSignature","executableCode","codeExecutionResult","validParts","getHistory","sendMessage","newContent","tools","toolConfig","finalResult","responseContent","blockErrorMessage","sendMessageStream","streamPromise","_ignored","streamResult","countTokensOnCloud","mappedParams","mapCountTokensRequest","countTokensRequest","GoogleAIMapper.mapCountTokensRequest","COUNT_TOKENS","GenerativeModel","modelParams","formattedParams","startChat","startChatParams","LiveSession","webSocketHandler","serverMessages","isClosed","inConversation","send","turnComplete","clientContent","turns","sendTextRealtime","realtimeInput","sendAudioRealtime","audio","sendVideoRealtime","video","sendFunctionResponses","functionResponses","toolResponse","receive","serverContent","toolCall","toolCallCancellation","sendMediaChunks","mediaChunks","mediaChunk","sendMediaStream","mediaChunkStream","LiveGenerativeModel","_webSocketHandler","connect","fullModelPath","inputAudioTranscription","outputAudioTranscription","setupMessage","setup","listen","firstMessage","next","ImagenModel","generateImages","PREDICT","generateImagesGCS","WebSocketHandlerImpl","WebSocket","ws","binaryType","addEventListener","once","closeEvent","readyState","OPEN","messageQueue","errorQueue","resolvePromise","messageListener","event","Blob","obj","errorListener","closeListener","removeEventListener","shift","CLOSED","CONNECTING","CLOSING","Schema","schemaParams","anyOf","paramKey","format","nullable","toJSON","prop","array","arrayParams","ArraySchema","items","object","objectParams","ObjectSchema","properties","optionalProperties","string","stringParams","StringSchema","enumString","enum","integer","integerParams","IntegerSchema","number","numberParams","NumberSchema","boolean","booleanParams","BooleanSchema","anyOfParams","AnyOfSchema","enumValues","required","propertyKey","s","ImagenImageFormat","jpeg","compressionQuality","png","AUDIO_PROCESSOR_NAME","audioProcessorWorkletString","AudioConversationRunner","liveSession","deps","isStopped","stopDeferred","playbackQueue","scheduledSources","nextStartTime","isPlaybackLoopRunning","receiveLoopPromise","runReceiveLoop","cleanup","workletNode","port","onmessage","pcm16","btoa","fromCharCode","apply","from","Uint8Array","buffer","stop","interruptPlayback","disconnect","sourceNode","mediaStream","getTracks","track","audioContext","state","enqueueAndPlay","audioData","processPlaybackQueue","source","currentTime","pcmRawBuffer","Int16Array","frameCount","audioBuffer","createBuffer","channelData","getChannelData","createBufferSource","destination","onended","filter","max","duration","messageGenerator","race","interrupted","audioPart","modelTurn","find","atob","c","charCodeAt","functionCallingHandler","startAudioConversation","AudioWorkletNode","AudioContext","navigator","mediaDevices","resume","getUserMedia","workletBlob","workletURL","createObjectURL","audioWorklet","addModule","createMediaStreamSource","processorOptions","targetSampleRate","runner","DOMException","getAI","getApp","getModularInstance","_delegate","AIProvider","_getProvider","finalOptions","identifier","encodeInstanceIdentifier","aiInstance","getGenerativeModel","hybridParams","inCloudParams","getImagenModel","getLiveGenerativeModel","registerAI","_registerComponent","registerVersion"],"mappings":"2HAiBa,MAAAA,SAIX,WAAAC,GAFAC,KAAAC,OAAoC,OACpCD,KAAAE,QAAqC,OAEnCF,KAAKG,QAAU,IAAIC,SAAQ,CAACF,EAASD,KACnCD,KAAKE,QAAUA,EACfF,KAAKC,OAASA,CAAmC,GAEpD,CAOD,YAAAI,CACEC,GAEA,MAAO,CAACC,EAAOC,KACTD,EACFP,KAAKC,OAAOM,GAEZP,KAAKE,QAAQM,GAES,mBAAbF,IAGTN,KAAKG,QAAQM,OAAM,SAIK,IAApBH,EAASI,OACXJ,EAASC,GAETD,EAASC,EAAOC,GAEnB,CAEJ,ECiBG,MAAOG,sBAAsBC,MAIjC,WAAAb,CAEWc,EACTC,EAEOC,GAEPC,MAAMF,GALGd,KAAIa,KAAJA,EAGFb,KAAUe,WAAVA,EAPAf,KAAIiB,KAdI,gBA6BfC,OAAOC,eAAenB,KAAMW,cAAcS,WAItCR,MAAMS,mBACRT,MAAMS,kBAAkBrB,KAAMsB,aAAaF,UAAUG,OAExD,EAGU,MAAAD,aAIX,WAAAvB,CACmByB,EACAC,EACAC,GAFA1B,KAAOwB,QAAPA,EACAxB,KAAWyB,YAAXA,EACAzB,KAAM0B,OAANA,CACf,CAEJ,MAAAH,CACEV,KACGc,GAEH,MAAMZ,EAAcY,EAAK,IAAoB,CAAA,EACvCC,EAAW,GAAG5B,KAAKwB,WAAWX,IAC9BgB,EAAW7B,KAAK0B,OAAOb,GAEvBC,EAAUe,EAUpB,SAASC,gBAAgBD,EAAkBF,GACzC,OAAOE,EAASE,QAAQC,GAAS,CAACC,EAAGC,KACnC,MAAM1B,EAAQmB,EAAKO,GACnB,OAAgB,MAAT1B,EAAgB2B,OAAO3B,GAAS,IAAI0B,KAAO,GAEtD,CAf+BJ,CAAgBD,EAAUd,GAAc,QAE7DqB,EAAc,GAAGpC,KAAKyB,gBAAgBX,MAAYc,MAIxD,OAFc,IAAIjB,cAAciB,EAAUQ,EAAarB,EAGxD,EAUH,MAAMiB,EAAU,gBC3GH,MAAAK,UAiBX,WAAAtC,CACWkB,EACAqB,EACAC,GAFAvC,KAAIiB,KAAJA,EACAjB,KAAesC,gBAAfA,EACAtC,KAAIuC,KAAJA,EAnBXvC,KAAiBwC,mBAAG,EAIpBxC,KAAYyC,aAAe,GAE3BzC,KAAA0C,kBAA2C,OAE3C1C,KAAiB2C,kBAAwC,IAYrD,CAEJ,oBAAAC,CAAqBC,GAEnB,OADA7C,KAAK0C,kBAAoBG,EAClB7C,IACR,CAED,oBAAA8C,CAAqBN,GAEnB,OADAxC,KAAKwC,kBAAoBA,EAClBxC,IACR,CAED,eAAA+C,CAAgBC,GAEd,OADAhD,KAAKyC,aAAeO,EACbhD,IACR,CAED,0BAAAiD,CAA2B3C,GAEzB,OADAN,KAAK2C,kBAAoBrC,EAClBN,IACR,MCfSkD,GAAZ,SAAYA,GACVA,EAAAA,EAAA,MAAA,GAAA,QACAA,EAAAA,EAAA,QAAA,GAAA,UACAA,EAAAA,EAAA,KAAA,GAAA,OACAA,EAAAA,EAAA,KAAA,GAAA,OACAA,EAAAA,EAAA,MAAA,GAAA,QACAA,EAAAA,EAAA,OAAA,GAAA,QACD,CAPD,CAAYA,IAAAA,EAOX,CAAA,IAED,MAAMC,EAA2D,CAC/DC,MAASF,EAASG,MAClBC,QAAWJ,EAASK,QACpBC,KAAQN,EAASO,KACjBC,KAAQR,EAASS,KACjBpD,MAAS2C,EAASU,MAClBC,OAAUX,EAASY,QAMfC,EAA4Bb,EAASO,KAmBrCO,EAAgB,CACpB,CAACd,EAASG,OAAQ,MAClB,CAACH,EAASK,SAAU,MACpB,CAACL,EAASO,MAAO,OACjB,CAACP,EAASS,MAAO,OACjB,CAACT,EAASU,OAAQ,SAQdK,kBAAgC,CAACC,EAAUC,KAAYC,KAC3D,GAAID,EAAUD,EAASG,SACrB,OAEF,MAAMC,GAAM,IAAIC,MAAOC,cACjBC,EAAST,EAAcG,GAC7B,IAAIM,EAMF,MAAM,IAAI7D,MACR,8DAA8DuD,MANhEO,QAAQD,GACN,IAAIH,OAASJ,EAASjD,WACnBmD,EAMN,iCCvGI,MAAMO,EAAU,KAEVC,EAAmB,cAEnBC,EAAiB,kCAIjBC,EAAkBC,ECDzB,MAAOC,gBAAgBrE,cAQ3B,WAAAZ,CACWc,EACTC,EACSmE,GAGT,MAEM7C,EAAc,GAFJuC,MAEmB7D,MADlB,GADD6D,KACe9D,OAE/BG,MAAMH,EAAMuB,GARHpC,KAAIa,KAAJA,EAEAb,KAAeiF,gBAAfA,EAYLrE,MAAMS,mBAGRT,MAAMS,kBAAkBrB,KAAMgF,SAOhC9D,OAAOC,eAAenB,KAAMgF,QAAQ5D,WAGpCpB,KAAKkF,SAAW,IAAM9C,CACvB,ECpCU,MAAA+C,EAAiB,CAAC,OAAQ,QAAS,WAAY,UAM/CC,EAAe,CAC1BC,0BAA2B,4BAC3BC,gCAAiC,kCACjCC,yBAA0B,2BAC1BC,gCAAiC,mCAatBC,EAAqB,CAIhCC,oBAAqB,sBAIrBC,uBAAwB,yBAIxBC,gBAAiB,kBAIjBC,WAAY,aAKZC,IAAK,OAeMC,EAAkB,CAI7BC,SAAU,WAIVC,YAAa,eAeFC,EAAkB,CAI7BC,WAAY,aAIZC,IAAK,MAILC,OAAQ,SAIRC,KAAM,QAcKC,EAAe,CAI1BC,yBAA0B,2BAI1BC,kBAAmB,oBAInBC,qBAAsB,uBAItBC,mBAAoB,qBAOpBC,0BAA2B,6BAahBC,EAAc,CAIzBC,OAAQ,SAIRC,MAAO,QAIPC,UAAW,YAIXC,mBAAoB,sBAaTC,EAAe,CAI1BC,KAAM,OAINC,WAAY,aAIZN,OAAQ,SAIRO,WAAY,aAIZN,MAAO,QAIPC,UAAW,YAIXC,mBAAoB,qBAIpBK,KAAM,OAINC,wBAAyB,2BAYdC,EAAsB,CAKjCC,KAAM,OAONC,IAAK,MAKLC,KAAM,QAaKC,EAAW,CAItBC,qBAAsB,uBAItBC,KAAM,OAINC,MAAO,QAIPC,MAAO,QAIPC,MAAO,QAIPC,SAAU,YAcCC,EAAmB,CAK9BL,KAAM,OAKNC,MAAO,QAKPE,MAAO,SAgCIG,EAAgB,CAC3BC,iBAAoB,mBACpBC,eAAkB,iBAClBC,cAAiB,gBACjBC,gBAAmB,mBAeRC,EAAkB,CAC7BC,UAAa,YACbC,SAAY,YAgBDC,EAAU,CACrBC,YAAa,sBACbC,GAAI,aACJC,OAAQ,iBACRC,kBAAmB,6BAeRC,EAAW,CACtBJ,YAAa,uBACbK,OAAQ,UCbGC,EAAqB,CAIhCC,iCAAkC,mCAIlCC,6BAA8B,+BAI9BC,2BAA4B,6BAI5BC,6BAA8B,+BAI9BC,4BAA6B,+BAmLlBC,EAAmB,CAC9BC,eAAgB,gBAChBC,UAAW,WACXC,uBAAwB,wBCtiBbC,EAAc,CAEzBjG,MAAO,QAGPkG,cAAe,gBAGfC,eAAgB,iBAGhBC,YAAa,cAGbC,eAAgB,iBAGhBC,gBAAiB,kBAGjBC,gBAAiB,kBAGjBC,eAAgB,iBAGhBC,WAAY,aAGZC,UAAW,YAGXC,SAAU,WAGVC,cAAe,gBAGfC,aAAc,eAGdC,YAAa,eClFFC,EAAa,CAExBC,OAAQ,SAERC,OAAQ,SAERC,QAAS,UAETC,QAAS,UAETC,MAAO,QAEPC,OAAQ,UC6EGC,EAA0B,CAIrCxF,oBAAqB,sBAIrBC,uBAAwB,yBAIxBC,gBAAiB,kBAOjBC,WAAY,cA0BDsF,EAA0B,CAIrCC,UAAW,aAQXC,YAAa,cAQbC,UAAW,aA6CAC,EAAoB,CAI/BC,OAAU,MAIVC,cAAiB,MAIjBC,aAAgB,MAIhBC,eAAkB,OAIlBC,cAAiB,QClLNC,EAAc,CAKzBC,UAAW,YAMXC,UAAW,aC/CS,MAAAC,QAUpB,WAAAjM,CAAsBwC,GACpBvC,KAAKiM,YAAc1J,CACpB,EAWG,MAAO2J,wBAAwBF,QAInC,WAAAjM,GACEiB,MAAM6K,EAAYE,UACnB,EAWG,MAAOI,wBAAwBH,QAenC,WAAAjM,CAAYqM,EAAmBxH,GAC7B5D,MAAM6K,EAAYC,WAIhB9L,KAAKoM,SAHFA,GACaxH,CAInB,ECvEI,MAAMyH,EAAS,IV0GT,MAAAC,OAOX,WAAAvM,CAAmBkB,GAAAjB,KAAIiB,KAAJA,EAUXjB,KAASuM,UAAGxI,EAsBZ/D,KAAWwM,YAAevI,kBAc1BjE,KAAeyM,gBAAsB,IAzC5C,CAOD,YAAIpI,GACF,OAAOrE,KAAKuM,SACb,CAED,YAAIlI,CAASqI,GACX,KAAMA,KAAOxJ,GACX,MAAM,IAAIyJ,UAAU,kBAAkBD,+BAExC1M,KAAKuM,UAAYG,CAClB,CAGD,WAAAE,CAAYF,GACV1M,KAAKuM,UAA2B,iBAARG,EAAmBvJ,EAAkBuJ,GAAOA,CACrE,CAOD,cAAIG,GACF,OAAO7M,KAAKwM,WACb,CACD,cAAIK,CAAWH,GACb,GAAmB,mBAARA,EACT,MAAM,IAAIC,UAAU,qDAEtB3M,KAAKwM,YAAcE,CACpB,CAMD,kBAAII,GACF,OAAO9M,KAAKyM,eACb,CACD,kBAAIK,CAAeJ,GACjB1M,KAAKyM,gBAAkBC,CACxB,CAMD,KAAAtJ,IAASgB,GACPpE,KAAKyM,iBAAmBzM,KAAKyM,gBAAgBzM,KAAMkD,EAASG,SAAUe,GACtEpE,KAAKwM,YAAYxM,KAAMkD,EAASG,SAAUe,EAC3C,CACD,GAAA2I,IAAO3I,GACLpE,KAAKyM,iBACHzM,KAAKyM,gBAAgBzM,KAAMkD,EAASK,WAAYa,GAClDpE,KAAKwM,YAAYxM,KAAMkD,EAASK,WAAYa,EAC7C,CACD,IAAAZ,IAAQY,GACNpE,KAAKyM,iBAAmBzM,KAAKyM,gBAAgBzM,KAAMkD,EAASO,QAASW,GACrEpE,KAAKwM,YAAYxM,KAAMkD,EAASO,QAASW,EAC1C,CACD,IAAAV,IAAQU,GACNpE,KAAKyM,iBAAmBzM,KAAKyM,gBAAgBzM,KAAMkD,EAASS,QAASS,GACrEpE,KAAKwM,YAAYxM,KAAMkD,EAASS,QAASS,EAC1C,CACD,KAAA7D,IAAS6D,GACPpE,KAAKyM,iBAAmBzM,KAAKyM,gBAAgBzM,KAAMkD,EAASU,SAAUQ,GACtEpE,KAAKwM,YAAYxM,KAAMkD,EAASU,SAAUQ,EAC3C,GU/L8B,sBCyBjC,IAAY4I,GAAZ,SAAYA,GACVA,EAAA,YAAA,cACAA,EAAA,aAAA,eACAA,EAAA,YAAA,cACAA,EAAA,UAAA,WACD,CALD,CAAYA,IAAAA,EAKX,CAAA,ICTD,MAAMC,EAAiD,CAAC,CAAE1K,KAAM,UAOnD,MAAA2K,kBAWX,WAAAnN,CACSoN,EACAtK,EACPuK,GAFOpN,KAAqBmN,sBAArBA,EACAnN,KAAI6C,KAAJA,EAVD7C,KAAaqN,eAAG,EAGxBrN,KAAAoN,eAAiC,CAC/BE,cAAe,CACbC,eAAgBN,IAQdG,IACFpN,KAAKoN,eAAiBA,EACjBpN,KAAKoN,eAAeE,cAIbtN,KAAKoN,eAAeE,cAAcC,iBAC5CvN,KAAKoN,eAAeE,cAAcC,eAChCN,GALFjN,KAAKoN,eAAeE,cAAgB,CAClCC,eAAgBN,GAOvB,CAiBD,iBAAMO,CAAYC,GAChB,IAAKzN,KAAK6C,KAIR,OAHAwJ,EAAOjJ,MACL,+DAEK,EAET,GAAIpD,KAAK6C,OAASuF,EAAcG,cAI9B,OAHA8D,EAAOjJ,MACL,qEAEK,EAIT,MAAMsK,QAAqB1N,KAAK2N,sBAEhC,GAAI3N,KAAK6C,OAASuF,EAAcE,eAAgB,CAE9C,GAAIoF,IAAiBV,EAAaY,YAChC,MAAM,IAAI5I,QACR6E,EAAYM,gBACZ,8DAEG,OACLuD,IAAiBV,EAAaa,cAC9BH,IAAiBV,EAAac,cAG9BzB,EAAOjJ,MAAM,4DACPpD,KAAK+N,iBACJ,EAGV,CAGD,OAAIL,IAAiBV,EAAagB,WAChC3B,EAAOjJ,MACL,4DAA4DsK,QAEvD,KAEJR,kBAAkBe,kBAAkBR,KACvCpB,EAAOjJ,MACL,qEAEK,EAIV,CAWD,qBAAM8K,CAAgBT,GACpB,MAAMU,QAAgBnO,KAAKoO,gBACrBC,QAAiBjO,QAAQkO,IAC7Bb,EAAQY,SAASE,IAAIrB,kBAAkBsB,yBAEnCC,QAAaN,EAAQO,OACzBL,EACArO,KAAKoN,eAAeuB,eAEtB,OAAOzB,kBAAkB0B,WAAWH,EACrC,CAWD,2BAAMI,CACJpB,GAEA,MAAMU,QAAgBnO,KAAKoO,gBACrBC,QAAiBjO,QAAQkO,IAC7Bb,EAAQY,SAASE,IAAIrB,kBAAkBsB,yBAEnCM,EAASX,EAAQY,gBACrBV,EACArO,KAAKoN,eAAeuB,eAEtB,OAAOzB,kBAAkB8B,iBAAiBF,EAC3C,CAED,iBAAMG,CAAYC,GAChB,MAAM,IAAIlK,QACR6E,EAAYC,cACZ,yDAEH,CAKO,wBAAOmE,CAAkBR,GAE/B,GAAgC,IAA5BA,EAAQY,SAAS3N,OAEnB,OADA2L,EAAOjJ,MAAM,mDACN,EAGT,IAAK,MAAM+L,KAAW1B,EAAQY,SAAU,CACtC,GAAqB,aAAjBc,EAAQC,KAEV,OADA/C,EAAOjJ,MAAM,sDACN,EAIT,IAAK,MAAMiM,KAAQF,EAAQG,MACzB,GACED,EAAKE,aAGE,IAFPrC,kBAAkBsC,qBAAqBC,QACrCJ,EAAKE,WAAWG,UAMlB,OAHArD,EAAOjJ,MACL,0BAA0BiM,EAAKE,WAAWG,gDAErC,CAGZ,CAED,OAAO,CACR,CAKO,yBAAM/B,GACZ,MAAMD,QAAqB1N,KAAKmN,uBAAuBO,aACrD1N,KAAKoN,eAAeE,gBAOtB,OAJII,IAAiBV,EAAaa,cAChC7N,KAAK2P,WAGAjC,CACR,CAWO,QAAAiC,GACF3P,KAAKqN,gBAGTrN,KAAKqN,eAAgB,EACrBrN,KAAK+N,gBAAkB/N,KAAKmN,uBACxB5L,OAAOvB,KAAKoN,eAAeE,eAC5BsC,SAAQ,KACP5P,KAAKqN,eAAgB,CAAK,IAE/B,CAKO,mCAAamB,CACnBW,GAEA,MAAMU,QAAqCzP,QAAQkO,IACjDa,EAAQG,MAAMf,IAAIrB,kBAAkB4C,gCAEtC,MAAO,CACLV,KAAMlC,kBAAkB6C,2BAA2BZ,EAAQC,MAC3DD,QAASU,EAEZ,CAKO,0CAAaC,CACnBT,GAEA,GAAIA,EAAKZ,KACP,MAAO,CACLlM,KAAM,OACN/B,MAAO6O,EAAKZ,MAET,GAAIY,EAAKE,WAAY,CAC1B,MAAMS,QAA8BC,MAClC,QAAQZ,EAAKE,WAAWG,mBAAmBL,EAAKE,WAAW5N,QAEvDuO,QAAkBF,EAAsBG,OAE9C,MAAO,CACL5N,KAAM,QACN/B,YAHwB4P,kBAAkBF,GAK7C,CACD,MAAM,IAAIlL,QACR6E,EAAYC,cACZ,2DAEH,CAKO,iCAAOiG,CACbX,GAGA,MAAgB,UAATA,EAAmB,YAAc,MACzC,CAYO,mBAAMhB,GACZ,IAAKpO,KAAKmN,sBACR,MAAM,IAAInI,QACR6E,EAAYa,YACZ,wDAGJ,MAAM2F,QAAmBrQ,KAAKmN,sBAAsB5L,OAClDvB,KAAKoN,eAAeE,eAOtB,OALItN,KAAKsQ,YACPtQ,KAAKsQ,WAAWC,UAGlBvQ,KAAKsQ,WAAaD,EACXA,CACR,CAKO,iBAAOzB,CAAWH,GACxB,MAAO,CACL+B,KAAMC,UAAa,CACjBC,WAAY,CACV,CACEvB,QAAS,CACPG,MAAO,CAAC,CAAEb,cAMrB,CAKO,uBAAOO,CAAiBF,GAC9B,MAAM6B,EAAU,IAAIC,YACpB,MAAO,CACLC,KAAM/B,EAAOgC,YACX,IAAIC,gBAAgB,CAClB,SAAAC,CAAUC,EAAOC,GACf,MAAMV,EAAOW,KAAKC,UAAU,CAC1BV,WAAY,CACV,CACEvB,QAAS,CACPC,KAAM,QACNE,MAAO,CAAC,CAAEb,KAAMwC,SAKxBC,EAAWG,QAAQV,EAAQW,OAAO,SAASd,SAC5C,KAIR,EAMa,SAAAe,qBACd1O,EACA2O,EACAC,GAGA,QAAsB,IAAXD,GAA0B3O,EACnC,OAAO,IAAIqK,kBACRsE,EAAkBE,cACnB7O,EACA4O,EAGN,CAtWSvE,kBAAAsC,qBAAuB,CAAC,aAAc,aClBlC,MAAAmC,UAMX,WAAA5R,CACS6R,EACAC,EACPC,EACAC,EACOR,GAJAvR,KAAG4R,IAAHA,EACA5R,KAAO6R,QAAPA,EAGA7R,KAAoBuR,qBAApBA,EAMP,MAAMS,EAAWD,GAAkBE,aAAa,CAAEC,UAAU,IACtDC,EAAOL,GAAcG,aAAa,CAAEC,UAAU,IACpDlS,KAAKmS,KAAOA,GAAQ,KACpBnS,KAAKgS,SAAWA,GAAY,KAG1BhS,KAAKoM,SADHyF,aAAmB1F,gBACL0F,EAAQzF,SAER,EAEnB,CAED,OAAAgG,GACE,OAAOhS,QAAQF,SAChB,CAED,WAAImS,CAAQC,GACVtS,KAAKuS,SAAWD,CACjB,CAED,WAAID,GACF,OAAOrS,KAAKuS,QACb,EC3Ca,SAAAC,QACdC,GACAC,mBAAEA,IAEF,IAAKA,EACH,MAAM,IAAI1N,QACR6E,EAAYjG,MACZ,+CAIJ,MAAMiO,ECQF,SAAUc,yBAAyBD,GACvC,MAAME,EAAkBF,EAAmBG,MAAM,KACjD,GAAID,EAAgB,KAAOjO,EACzB,MAAM,IAAIK,QACR6E,EAAYjG,MACZ,gDAAgDgP,EAAgB,OAIpE,OADoBA,EAAgB,IAElC,IAAK,WACH,MAAMxG,EAA+BwG,EAAgB,GACrD,IAAKxG,EACH,MAAM,IAAIpH,QACR6E,EAAYjG,MACZ,kDAAkD8O,MAGtD,OAAO,IAAIvG,gBAAgBC,GAC7B,IAAK,WACH,OAAO,IAAIF,gBACb,QACE,MAAM,IAAIlH,QACR6E,EAAYjG,MACZ,wCAAwC8O,MAGhD,CDnCkBC,CAAyBD,GAGnCd,EAAMa,EAAUK,YAAY,OAAOb,eACnCE,EAAOM,EAAUK,YAAY,iBAC7Bf,EAAmBU,EAAUK,YAAY,sBAE/C,OAAO,IAAInB,UACTC,EACAC,EACAM,EACAJ,EACAR,qBAEJ,CErBsB,MAAAwB,QA6BpB,WAAAhT,CAAsBiT,EAAQC,GAC5B,IAAKD,EAAGpB,KAAKS,SAASa,OACpB,MAAM,IAAIlO,QACR6E,EAAYQ,WACZ,yHAEG,IAAK2I,EAAGpB,KAAKS,SAASc,UAC3B,MAAM,IAAInO,QACR6E,EAAYW,cACZ,+HAEG,IAAKwI,EAAGpB,KAAKS,SAASe,MAC3B,MAAM,IAAIpO,QACR6E,EAAYS,UACZ,uHAYF,GATAtK,KAAKqT,aAAe,CAClBH,OAAQF,EAAGpB,IAAIS,QAAQa,OACvBI,QAASN,EAAGpB,IAAIS,QAAQc,UACxBC,MAAOJ,EAAGpB,IAAIS,QAAQe,MACtBG,+BAAgCP,EAAGpB,IAAI2B,+BACvCnH,SAAU4G,EAAG5G,SACbyF,QAASmB,EAAGnB,SAGV2B,EAAqBR,EAAGpB,MAAQoB,EAAGpB,IAAI6B,SAASC,cAAe,CACjE,MAAMC,EAAQX,EAAGpB,IAAI6B,SAASC,cAC9B1T,KAAKqT,aAAaO,iBAAmB,IAC5BxT,QAAQF,QAAQ,CAAEyT,SAE5B,MAAWX,EAAiBhB,WACvBgB,EAAGX,SAASwB,4BACd7T,KAAKqT,aAAaO,iBAAmB,IAClCZ,EAAiBhB,SAAU8B,qBAE9B9T,KAAKqT,aAAaO,iBAAmB,IAClCZ,EAAiBhB,SAAU+B,YAI7Bf,EAAiBb,OACpBnS,KAAKqT,aAAaW,aAAe,IAC9BhB,EAAiBb,KAAM4B,YAG5B/T,KAAKiU,MAAQlB,QAAQmB,mBACnBjB,EACAjT,KAAKqT,aAAaxB,QAAQ5F,YAG/B,CAUD,yBAAOiI,CACLjB,EACAhH,GAEA,OAAIA,IAAgBJ,EAAYE,UACvBgH,QAAQoB,2BAA2BlB,GAEnCF,QAAQqB,2BAA2BnB,EAE7C,CAKO,iCAAOkB,CAA2BlB,GACxC,MAAO,UAAUA,GAClB,CAKO,iCAAOmB,CAA2BnB,GACxC,IAAIgB,EAcJ,OAVIA,EAHAhB,EAAUoB,SAAS,KACjBpB,EAAUqB,WAAW,WAEf,qBAAqBrB,IAGrBA,EAIF,4BAA4BA,IAG/BgB,CACR,EC/HH,IAAYM,GAAZ,SAAYA,GACVA,EAAA,iBAAA,kBACAA,EAAA,wBAAA,wBACAA,EAAA,aAAA,cACAA,EAAA,QAAA,SACD,CALD,CAAYA,IAAAA,EAKX,CAAA,IAEY,MAAAC,WACX,WAAAzU,CACSkU,EACAQ,EACAC,EACA5F,EACA6F,GAJA3U,KAAKiU,MAALA,EACAjU,KAAIyU,KAAJA,EACAzU,KAAW0U,YAAXA,EACA1U,KAAM8O,OAANA,EACA9O,KAAc2U,eAAdA,CACL,CACJ,QAAAzP,GACE,MAAM0P,EAAM,IAAIC,IAAI7U,KAAK8U,SAGzB,OAFAF,EAAIG,SAAW,IAAI/U,KAAKgV,cAAchV,KAAKiV,aAAajV,KAAKyU,OAC7DG,EAAIM,OAASlV,KAAKmV,YAAYjQ,WACvB0P,EAAI1P,UACZ,CAED,WAAY4P,GACV,OAAO9U,KAAK2U,gBAAgBG,SAAW,WAAWjQ,GACnD,CAED,cAAYmQ,GACV,MhBjC+B,QgBkChC,CAED,aAAYC,GACV,GAAIjV,KAAK0U,YAAY7C,mBAAmB3F,gBACtC,MAAO,YAAYlM,KAAK0U,YAAYpB,WAAWtT,KAAKiU,QAC/C,GAAIjU,KAAK0U,YAAY7C,mBAAmB1F,gBAC7C,MAAO,YAAYnM,KAAK0U,YAAYpB,qBAAqBtT,KAAK0U,YAAY7C,QAAQzF,YAAYpM,KAAKiU,QAEnG,MAAM,IAAIjP,QACR6E,EAAYjG,MACZ,oBAAoBuN,KAAKC,UAAUpR,KAAK0U,YAAY7C,WAGzD,CAED,eAAYsD,GACV,MAAM1D,EAAS,IAAI2D,gBAKnB,OAJIpV,KAAK8O,QACP2C,EAAO4D,IAAI,MAAO,OAGb5D,CACR,EAGU,MAAA6D,aACX,WAAAvV,CAAmB2U,GAAA1U,KAAW0U,YAAXA,CAA4B,CAC/C,QAAAxP,GACE,MAAM0P,EAAM,IAAIC,IAAI,SAAShQ,KAC7B+P,EAAIG,SAAW/U,KAAK+U,SAEpB,MAAMI,EAAc,IAAIC,gBAIxB,OAHAD,EAAYE,IAAI,MAAOrV,KAAK0U,YAAYxB,QACxC0B,EAAIM,OAASC,EAAYjQ,WAElB0P,EAAI1P,UACZ,CAED,YAAY6P,GACV,OAAI/U,KAAK0U,YAAY7C,QAAQ5F,cAAgBJ,EAAYE,UAChD,2EAEA,mFAAmF/L,KAAK0U,YAAYtI,UAE9G,EAaIqE,eAAe8E,WAAWX,GAC/B,MAAMY,EAAU,IAAIC,QAOpB,GANAD,EAAQE,OAAO,eAAgB,oBAC/BF,EAAQE,OAAO,oBAVjB,SAASC,mBACP,MAAMC,EAAc,GAGpB,OAFAA,EAAYC,KAAK,SAAmB/Q,KACpC8Q,EAAYC,KAAK,QAAQ/Q,KAClB8Q,EAAYE,KAAK,IAC1B,CAKsCH,IACpCH,EAAQE,OAAO,iBAAkBd,EAAIF,YAAYxB,QAC7C0B,EAAIF,YAAYnB,gCAClBiC,EAAQE,OAAO,mBAAoBd,EAAIF,YAAYtB,OAEjDwB,EAAIF,YAAYd,iBAAkB,CACpC,MAAMF,QAAsBkB,EAAIF,YAAYd,mBACxCF,IACF8B,EAAQE,OAAO,sBAAuBhC,EAAcC,OAChDD,EAAcnT,OAChB8L,EAAO3I,KACL,6CAA6CgQ,EAAcnT,MAAMO,WAIxE,CAED,GAAI8T,EAAIF,YAAYV,aAAc,CAChC,MAAM+B,QAAkBnB,EAAIF,YAAYV,eACpC+B,GACFP,EAAQE,OAAO,gBAAiB,YAAYK,EAAUC,cAEzD,CAED,OAAOR,CACT,CAqBO/E,eAAewF,YACpBhC,EACAQ,EACAC,EACA5F,EACA+B,EACA8D,GAEA,MAAMC,EAAM,IAAIJ,WAAWP,EAAOQ,EAAMC,EAAa5F,EAAQ6F,GAC7D,IAAIuB,EACAC,EACJ,IACE,MAAM1I,QA/BHgD,eAAe2F,iBACpBnC,EACAQ,EACAC,EACA5F,EACA+B,EACA8D,GAEA,MAAMC,EAAM,IAAIJ,WAAWP,EAAOQ,EAAMC,EAAa5F,EAAQ6F,GAC7D,MAAO,CACLC,IAAKA,EAAI1P,WACTmR,aAAc,CACZ5R,OAAQ,OACR+Q,cAAeD,WAAWX,GAC1B/D,QAGN,CAc0BuF,CACpBnC,EACAQ,EACAC,EACA5F,EACA+B,EACA8D,GAGI2B,EACuB,MAA3B3B,GAAgB4B,SAAmB5B,EAAe4B,SAAW,EACzD5B,EAAe4B,QhB7Je,KgB+J9BC,EAAkB,IAAIC,gBAK5B,GAJAN,EAAiBO,YAAW,IAAMF,EAAgBG,SAASL,GAC3D7I,EAAQ4I,aAAaO,OAASJ,EAAgBI,OAE9CV,QAAiBjG,MAAMxC,EAAQmH,IAAKnH,EAAQ4I,eACvCH,EAASW,GAAI,CAChB,IACIC,EADAhW,EAAU,GAEd,IACE,MAAM0P,QAAa0F,EAAS1F,OAC5B1P,EAAU0P,EAAKjQ,MAAMO,QACjB0P,EAAKjQ,MAAMwW,UACbjW,GAAW,IAAIqQ,KAAKC,UAAUZ,EAAKjQ,MAAMwW,WACzCD,EAAetG,EAAKjQ,MAAMwW,QAE7B,CAAC,MAAOC,GAER,CACD,GACsB,MAApBd,EAASe,QACTH,GACAA,EAAaI,MACVC,GAA2C,qBAAlBA,EAAOC,UAEnCN,EAAaI,MAAMC,GAEfA,EAAOE,QACL,IAAIC,YAAYjD,SAClB,8CAIJ,MAAM,IAAIrP,QACR6E,EAAYM,gBAIV,gOAAkDyK,EAAIF,YAAYpB,6JAIpE,CACE2D,OAAQf,EAASe,OACjBM,WAAYrB,EAASqB,WACrBT,iBAIN,MAAM,IAAI9R,QACR6E,EAAYG,YACZ,uBAAuB4K,OAASsB,EAASe,UAAUf,EAASqB,eAAezW,IAC3E,CACEmW,OAAQf,EAASe,OACjBM,WAAYrB,EAASqB,WACrBT,gBAGL,CACF,CAAC,MAAOE,GACP,IAAIQ,EAAMR,EAaV,MAXGA,EAAcnW,OAASgJ,EAAYG,aACnCgN,EAAcnW,OAASgJ,EAAYM,iBACpC6M,aAAapW,QAEb4W,EAAM,IAAIxS,QACR6E,EAAYjG,MACZ,uBAAuBgR,EAAI1P,eAAe8R,EAAElW,WAE9C0W,EAAIC,MAAQT,EAAES,OAGVD,CACP,CAAS,QACJrB,GACFuB,aAAavB,EAEhB,CACD,OAAOD,CACT,CCvOA,SAASyB,mBAAmBzB,GAC1B,GAAIA,EAASxF,YAAcwF,EAASxF,WAAWhQ,OAAS,EAAG,CAQzD,GAPIwV,EAASxF,WAAWhQ,OAAS,GAC/B2L,EAAO3I,KACL,qBAAqBwS,EAASxF,WAAWhQ,qIAKzCkX,mBAAmB1B,EAASxF,WAAW,IACzC,MAAM,IAAI1L,QACR6E,EAAYE,eACZ,mBAAmB8N,wBACjB3B,6CAEF,CACEA,aAIN,OAAO,CACR,CACC,OAAO,CAEX,CAMM,SAAU4B,8BACd5B,EACA6B,EAAmCtP,EAAgBE,UAQ/CuN,EAASxF,aAAewF,EAASxF,WAAW,GAAGsH,eAAe,WAChE9B,EAASxF,WAAW,GAAGuH,MAAQ,GAGjC,MAAMC,EASF,SAAUC,WACdjC,GA6DA,OA3DCA,EAA6CzH,KAAO,KACnD,GAAIkJ,mBAAmBzB,GACrB,OAAOkC,QAAQlC,GAAU7G,IAASA,EAAKgJ,UAClC,GAAInC,EAASoC,eAClB,MAAM,IAAItT,QACR6E,EAAYE,eACZ,uBAAuB8N,wBAAwB3B,KAC/C,CACEA,aAIN,MAAO,EAAE,EAEVA,EAA6CqC,eAAiB,KAC7D,GAAIZ,mBAAmBzB,GAAW,CAChC,MAAMsC,EAASJ,QAAQlC,GAAU7G,KAAUA,EAAKgJ,UAChD,MAAkB,KAAXG,OAAgBC,EAAYD,CACpC,CAAM,GAAItC,EAASoC,eAClB,MAAM,IAAItT,QACR6E,EAAYE,eACZ,kCAAkC8N,wBAAwB3B,KAC1D,CACEA,YAIU,EAEjBA,EAA6CwC,gBAAkB,KAG9D,GAAIf,mBAAmBzB,GACrB,OAiFA,SAAUyC,mBACdzC,GAEA,MAAMvU,EAAyB,GAE/B,GAAIuU,EAASxF,aAAa,GAAGvB,SAASG,MACpC,IAAK,MAAMD,KAAQ6G,EAASxF,aAAa,GAAGvB,SAASG,MAC/CD,EAAKE,YACP5N,EAAKkU,KAAKxG,GAKhB,OAAI1N,EAAKjB,OAAS,EACTiB,OAEP,CAEJ,CAnGagX,CAAmBzC,GACrB,GAAIA,EAASoC,eAClB,MAAM,IAAItT,QACR6E,EAAYE,eACZ,uBAAuB8N,wBAAwB3B,KAC/C,CACEA,YAIU,EAEjBA,EAA6C0C,cAAgB,KAC5D,GAAIjB,mBAAmBzB,GACrB,OA4CA,SAAU2C,iBACd3C,GAEA,MAAM0C,EAAgC,GACtC,GAAI1C,EAASxF,aAAa,GAAGvB,SAASG,MACpC,IAAK,MAAMD,KAAQ6G,EAASxF,aAAa,GAAGvB,SAASG,MAC/CD,EAAKyJ,cACPF,EAAc/C,KAAKxG,EAAKyJ,cAI9B,OAAIF,EAAclY,OAAS,EAClBkY,OAEP,CAEJ,CA5DaC,CAAiB3C,GACnB,GAAIA,EAASoC,eAClB,MAAM,IAAItT,QACR6E,EAAYE,eACZ,gCAAgC8N,wBAAwB3B,KACxD,CACEA,YAIU,EAEXA,CACT,CAxE8BiC,CAAWjC,GAEvC,OADAgC,EAAoBH,gBAAkBA,EAC/BG,CACT,CA8EgB,SAAAE,QACdlC,EACA6C,GAEA,MAAMC,EAAc,GACpB,GAAI9C,EAASxF,aAAa,GAAGvB,SAASG,MACpC,IAAK,MAAMD,KAAQ6G,EAASxF,aAAa,GAAGvB,SAASG,MAC/CD,EAAKZ,MAAQsK,EAAW1J,IAC1B2J,EAAYnD,KAAKxG,EAAKZ,MAI5B,OAAIuK,EAAYtY,OAAS,EAChBsY,EAAYlD,KAAK,IAEjB,EAEX,CAgDA,MAAMmD,EAAmB,CAAC/R,EAAaG,WAAYH,EAAaJ,QAEhE,SAAS8Q,mBAAmBsB,GAC1B,QACIA,EAAUC,cACZF,EAAiB/B,MAAKE,GAAUA,IAAW8B,EAAUC,cAEzD,CAEM,SAAUtB,wBACd3B,GAEA,IAAIpV,EAAU,GACd,GACIoV,EAASxF,YAA6C,IAA/BwF,EAASxF,WAAWhQ,SAC7CwV,EAASoC,gBASJ,GAAIpC,EAASxF,aAAa,GAAI,CACnC,MAAM0I,EAAiBlD,EAASxF,WAAW,GACvCkH,mBAAmBwB,KACrBtY,GAAW,gCAAgCsY,EAAeD,eACtDC,EAAeC,gBACjBvY,GAAW,KAAKsY,EAAeC,iBAGpC,OAfCvY,GAAW,uBACPoV,EAASoC,gBAAgBgB,cAC3BxY,GAAW,WAAWoV,EAASoC,eAAegB,eAE5CpD,EAASoC,gBAAgBiB,qBAC3BzY,GAAW,KAAKoV,EAASoC,eAAeiB,sBAW5C,OAAOzY,CACT,CASO2P,eAAe+I,sBAEpBtD,GACA,MAAMuD,QAA6CvD,EAAS1F,OAEtDkJ,EAAc,GACpB,IAAIC,EAGJ,IAAKF,EAAaG,aAAoD,IAArCH,EAAaG,aAAalZ,OACzD,MAAM,IAAIsE,QACR6E,EAAYE,eACZ,0KAIJ,IAAK,MAAM8P,KAAcJ,EAAaG,YACpC,GAAIC,EAAWC,kBACbH,EAAiBE,EAAWC,uBACvB,GAAID,EAAWnK,UAAYmK,EAAWE,mBAC3CL,EAAO7D,KAAK,CACVnG,SAAUmK,EAAWnK,SACrBqK,mBAAoBF,EAAWE,0BAE5B,GAAIF,EAAWnK,UAAYmK,EAAWG,OAC3CN,EAAO7D,KAAK,CACVnG,SAAUmK,EAAWnK,SACrBuK,OAAQJ,EAAWG,cAEhB,IAAIH,EAAWK,iBAGpB,MAAM,IAAIlV,QACR6E,EAAYE,eACZ,2DAA2DoH,KAAKC,UAC9DyI,OAMR,MAAO,CAAEH,SAAQC,iBACnB,CC9PM,SAAUQ,0BACdC,GAWA,GATAA,EAAuBC,gBAAgBC,SAAQC,IAC7C,GAAIA,EAAc9V,OAChB,MAAM,IAAIO,QACR6E,EAAYa,YACZ,sGAEH,IAGC0P,EAAuBI,kBAAkBC,KAAM,CACjD,MAAMC,EAAcC,KAAKC,MACvBR,EAAuBI,iBAAiBC,MAGtCC,IAAgBN,EAAuBI,iBAAiBC,OAC1DpO,EAAO3I,KACL,kIAEF0W,EAAuBI,iBAAiBC,KAAOC,EAElD,CAED,OAAON,CACT,CAWM,SAAUS,2BACdC,GAYA,MAVgC,CAC9BpK,WAAYoK,EAAiBpK,WACzBqK,6BAA6BD,EAAiBpK,iBAC9C+H,EACJ/J,OAAQoM,EAAiBxC,eACrB0C,kBAAkBF,EAAiBxC,qBACnCG,EACJwC,cAAeH,EAAiBG,cAIpC,CAoCM,SAAUF,6BACdrK,GAEA,MAAMwK,EAA+C,GACrD,IAAIC,EAoDJ,OAnDID,GACFxK,EAAW4J,SAAQpB,IAEjB,IAAIkC,EAuBJ,GAtBIlC,EAAUkC,mBACZA,EAAmB,CACjBC,UAAWnC,EAAUkC,iBAAiBE,kBAKtCpC,EAAUqC,gBACZJ,EAAsBjC,EAAUqC,cAAchN,KAAIiN,IACzC,IACFA,EACHC,SACED,EAAaC,UAAYlV,EAAaK,0BACxC8U,iBAAkBF,EAAaE,kBAAoB,EACnDC,cAAeH,EAAaG,eAAiB,OASjDzC,EAAU/J,SAASG,OAAO4H,MACxB7H,GAASA,GAAyBuM,gBAGpC,MAAM,IAAI5W,QACR6E,EAAYa,YACZ,iGAIJ,MAAMmR,EAAkB,CACtB5D,MAAOiB,EAAUjB,MACjB9I,QAAS+J,EAAU/J,QACnBgK,aAAcD,EAAUC,aACxBE,cAAeH,EAAUG,cACzBkC,cAAeJ,EACfC,mBACAU,kBAAmB5C,EAAU4C,kBAC7BC,mBAAoB7C,EAAU6C,oBAEhCb,EAAiBrF,KAAKgG,EAAgB,IAInCX,CACT,CAEM,SAAUF,kBACd1C,GAGA,MAAM6C,EAAsC,GAC5C7C,EAAeiD,cAAcjB,SAAQkB,IACnCL,EAAoBtF,KAAK,CACvBmG,SAAUR,EAAaQ,SACvBC,YAAaT,EAAaS,YAC1BR,SAAUD,EAAaC,UAAYlV,EAAaK,0BAChD8U,iBAAkBF,EAAaE,kBAAoB,EACnDC,cAAeH,EAAaG,eAAiB,EAC7CO,QAASV,EAAaU,SACtB,IAQJ,MAL6C,CAC3C5C,YAAahB,EAAegB,YAC5BiC,cAAeJ,EACf5B,mBAAoBjB,EAAeiB,mBAGvC,CC/LA,MAAM4C,EAAiB,qCAUP,SAAAC,cACdlG,EACAxB,EACAqD,GAEA,MAGMsE,EA+EF,SAAUC,kBACdC,GAEA,MAAMC,EAASD,EAAYE,YA0C3B,OAzCe,IAAIC,eAAkB,CACnC,KAAAC,CAAMzL,GACJ,IAAI0L,EAAc,GAClB,OAAOC,OACP,SAASA,OACP,OAAOL,EAAOM,OAAOC,MAAK,EAAGvc,QAAOwc,WAClC,GAAIA,EACF,OAAIJ,EAAYK,YACd/L,EAAW3Q,MACT,IAAIyE,QAAQ6E,EAAYY,aAAc,gCAI1CyG,EAAWgM,QAIbN,GAAepc,EACf,IACI2c,EADAC,EAAQR,EAAYQ,MAAMjB,GAE9B,KAAOiB,GAAO,CACZ,IACED,EAAiBhM,KAAKkM,MAAMD,EAAM,GACnC,CAAC,MAAOpG,GAOP,YANA9F,EAAW3Q,MACT,IAAIyE,QACF6E,EAAYY,aACZ,iCAAiC2S,EAAM,MAI5C,CACDlM,EAAWG,QAAQ8L,GACnBP,EAAcA,EAAYU,UAAUF,EAAM,GAAG1c,QAC7C0c,EAAQR,EAAYQ,MAAMjB,EAC3B,CACD,OAAOU,MAAM,GAEhB,CACF,GAGL,CA5HIP,CAJkBpG,EAASrF,KAAMC,YACjC,IAAIyM,kBAAkB,OAAQ,CAAEC,OAAO,OAIlCC,EAASC,GAAWrB,EAAesB,MAC1C,MAAO,CACL7O,OAAQ8O,yBAAyBH,EAAS/I,EAAaqD,GACvD7B,SAAU2H,mBAAmBH,EAAShJ,EAAaqD,GAEvD,CAEAtH,eAAeoN,mBACb/O,EACA4F,EACAqD,GAEA,MAAM+F,EAA0C,GAC1CtB,EAAS1N,EAAO2N,YACtB,OAAa,CACX,MAAMO,KAAEA,EAAIxc,MAAEA,SAAgBgc,EAAOM,OACrC,GAAIE,EAAM,CACR,IAAIe,EAA0BC,mBAAmBF,GAMjD,OALIpJ,EAAY7C,QAAQ5F,cAAgBJ,EAAYE,YAClDgS,EAA0BE,2BACxBF,IAGGjG,8BACLiG,EACAhG,EAEH,CAED+F,EAAajI,KAAKrV,EACnB,CACH,CAEAiQ,eAAgBmN,yBACd9O,EACA4F,EACAqD,GAEA,MAAMyE,EAAS1N,EAAO2N,YACtB,OAAa,CACX,MAAMjc,MAAEA,EAAKwc,KAAEA,SAAeR,EAAOM,OACrC,GAAIE,EACF,MAGF,IAAIkB,EAEFA,EADExJ,EAAY7C,QAAQ5F,cAAgBJ,EAAYE,UAC/B+L,8BACjBmG,2BACEzd,GAEFuX,GAGiBD,8BAA8BtX,EAAOuX,GAG1D,MAAMqB,EAAiB8E,EAAiBxN,aAAa,IAGlD0I,GAAgBjK,SAASG,OACzB8J,GAAgBD,cAChBC,GAAgBgC,kBAChBhC,GAAgB2C,4BAKbmC,EACP,CACH,CA2DM,SAAUF,mBACdG,GAEA,MAAMC,EAAeD,EAAUA,EAAUzd,OAAS,GAC5C2d,EAA8C,CAClD/F,eAAgB8F,GAAc9F,gBAEhC,IAAK,MAAMpC,KAAYiI,EACrB,GAAIjI,EAASxF,WACX,IAAK,MAAMwI,KAAahD,EAASxF,WAAY,CAG3C,MAAM4N,EAAIpF,EAAUjB,OAAS,EACxBoG,EAAmB3N,aACtB2N,EAAmB3N,WAAa,IAE7B2N,EAAmB3N,WAAW4N,KACjCD,EAAmB3N,WAAW4N,GAAK,CACjCrG,MAAOiB,EAAUjB,QAIrBoG,EAAmB3N,WAAW4N,GAAGlD,iBAC/BlC,EAAUkC,iBACZiD,EAAmB3N,WAAW4N,GAAGnF,aAAeD,EAAUC,aAC1DkF,EAAmB3N,WAAW4N,GAAGjF,cAC/BH,EAAUG,cACZgF,EAAmB3N,WAAW4N,GAAG/C,cAC/BrC,EAAUqC,cACZ8C,EAAmB3N,WAAW4N,GAAGxC,kBAC/B5C,EAAU4C,kBAMZ,MAAMC,EAAqB7C,EAAU6C,mBAcrC,GAZgC,iBAAvBA,GACgB,OAAvBA,GACA7a,OAAOqd,KAAKxC,GAAoBrb,OAAS,IAEzC2d,EAAmB3N,WAAW4N,GAAGvC,mBAC/BA,GAOA7C,EAAU/J,QAAS,CAErB,IAAK+J,EAAU/J,QAAQG,MACrB,SAEG+O,EAAmB3N,WAAW4N,GAAGnP,UACpCkP,EAAmB3N,WAAW4N,GAAGnP,QAAU,CACzCC,KAAM8J,EAAU/J,QAAQC,MAAQ,OAChCE,MAAO,KAGX,IAAK,MAAMD,KAAQ6J,EAAU/J,QAAQG,MAAO,CAC1C,MAAMkP,EAAgB,IAAKnP,GAIT,KAAdA,EAAKZ,OAGLvN,OAAOqd,KAAKC,GAAS9d,OAAS,GAChC2d,EAAmB3N,WAAW4N,GAAGnP,QAAQG,MAAMuG,KAC7C2I,GAGL,CACF,CACF,CAGL,OAAOH,CACT,CC9OA,MAAMI,EAAuC,CAE3C5U,EAAYG,YAEZH,EAAYjG,MAEZiG,EAAYM,iBAkBPsG,eAAeiO,kBACpBjR,EACAkR,EACAC,EACAC,GAEA,IAAKF,EACH,MAAO,CACLzI,eAAgB2I,IAChB9G,gBAAiBtP,EAAgBE,UAGrC,OAASgW,EAAoC9b,MAC3C,KAAKuF,EAAcE,eACjB,SAAUqW,EAAcnR,YAAYC,GAClC,MAAO,CACLyI,eAAgB0I,IAChB7G,gBAAiBtP,EAAgBC,WAGrC,MAAM,IAAI1D,QACR6E,EAAYa,YACZ,8EAEJ,KAAKtC,EAAcG,cACjB,MAAO,CACL2N,eAAgB2I,IAChB9G,gBAAiBtP,EAAgBE,UAErC,KAAKP,EAAcI,gBACjB,IACE,MAAO,CACL0N,eAAgB2I,IAChB9G,gBAAiBtP,EAAgBE,SAEpC,CAAC,MAAOqO,GACP,GAAIA,aAAahS,SAAWyZ,EAAsBpK,SAAS2C,EAAEnW,MAC3D,MAAO,CACLqV,eAAgB0I,IAChB7G,gBAAiBtP,EAAgBC,WAGrC,MAAMsO,CACP,CACH,KAAK5O,EAAcC,iBACjB,aAAUsW,EAAcnR,YAAYC,GAC3B,CACLyI,eAAgB0I,IAChB7G,gBAAiBtP,EAAgBC,WAG9B,CACLwN,eAAgB2I,IAChB9G,gBAAiBtP,EAAgBE,UAErC,QACE,MAAM,IAAI3D,QACR6E,EAAYjG,MACZ,gCACG+a,EAAoC9b,QAI/C,CC9DO4N,eAAe5B,sBACpB6F,EACAT,EACAxC,EACAkN,EACAhK,GAEA,MAAMmK,QAAmBJ,kBACvBjN,EACAkN,GACA,IAAMA,EAAe9P,sBAAsB4C,KAC3C,IA9BJhB,eAAesO,6BACbrK,EACAT,EACAxC,EACAkD,GAKA,OAHID,EAAY7C,QAAQ5F,cAAgBJ,EAAYE,YAClD0F,EAASuN,0BAAyCvN,IAE7CwE,YACLhC,EACAM,EAAK0K,wBACLvK,GACa,EACbvD,KAAKC,UAAUK,GACfkD,EAEJ,CAcMoK,CAA6BrK,EAAaT,EAAOxC,EAAQkD,KAE7D,OAAOyH,cAAc0C,EAAW5I,SAAUxB,EAC5C,CAqBOjE,eAAevC,gBACpBwG,EACAT,EACAxC,EACAkN,EACAhK,GAEA,MAAMmK,QAAmBJ,kBACvBjN,EACAkN,GACA,IAAMA,EAAezQ,gBAAgBuD,KACrC,IA9BJhB,eAAeyO,uBACbxK,EACAT,EACAxC,EACAkD,GAKA,OAHID,EAAY7C,QAAQ5F,cAAgBJ,EAAYE,YAClD0F,EAASuN,0BAAyCvN,IAE7CwE,YACLhC,EACAM,EAAK4K,iBACLzK,GACa,EACbvD,KAAKC,UAAUK,GACfkD,EAEJ,CAaUuK,CAAuBxK,EAAaT,EAAOxC,EAAQkD,KAErDoJ,QAaRtN,eAAe2O,+BACblJ,EACAxB,GAEA,MAAM+E,QAAqBvD,EAAS1F,OACpC,OAAIkE,EAAY7C,QAAQ5F,cAAgBJ,EAAYE,UAC3CkS,2BAA0CxE,GAE1CA,CAEX,CAvBwC2F,CACpCN,EAAW5I,SACXxB,GAMF,MAAO,CACLwB,SALuB4B,8BACvBiG,EACAe,EAAW/G,iBAKf,CC3FM,SAAUsH,wBACdC,GAGA,GAAa,MAATA,EAEG,MAAqB,iBAAVA,EACT,CAAElQ,KAAM,SAAUE,MAAO,CAAC,CAAEb,KAAM6Q,KAC/BA,EAAe7Q,KAClB,CAAEW,KAAM,SAAUE,MAAO,CAACgQ,IACvBA,EAAkBhQ,MACtBgQ,EAAkBlQ,KAGfkQ,EAFA,CAAElQ,KAAM,SAAUE,MAAQgQ,EAAkBhQ,YAFhD,CAOT,CAEM,SAAUiQ,iBACd9R,GAEA,IAAI+R,EAAmB,GACvB,GAAuB,iBAAZ/R,EACT+R,EAAW,CAAC,CAAE/Q,KAAMhB,SAEpB,IAAK,MAAMgS,KAAgBhS,EACG,iBAAjBgS,EACTD,EAAS3J,KAAK,CAAEpH,KAAMgR,IAEtBD,EAAS3J,KAAK4J,GAIpB,OAWF,SAASC,+CACPpQ,GAEA,MAAMqQ,EAAuB,CAAEvQ,KAAM,OAAQE,MAAO,IAC9CsQ,EAA2B,CAAExQ,KAAM,WAAYE,MAAO,IAC5D,IAAIuQ,GAAiB,EACjBC,GAAqB,EACzB,IAAK,MAAMzQ,KAAQC,EACb,qBAAsBD,GACxBuQ,EAAgBtQ,MAAMuG,KAAKxG,GAC3ByQ,GAAqB,IAErBH,EAAYrQ,MAAMuG,KAAKxG,GACvBwQ,GAAiB,GAIrB,GAAIA,GAAkBC,EACpB,MAAM,IAAI9a,QACR6E,EAAYK,gBACZ,8HAIJ,IAAK2V,IAAmBC,EACtB,MAAM,IAAI9a,QACR6E,EAAYK,gBACZ,oDAIJ,GAAI2V,EACF,OAAOF,EAGT,OAAOC,CACT,CA/CSF,CAA+CF,EACxD,CAgDM,SAAUO,2BACdtO,GAEA,IAAIuO,EACJ,GAAKvO,EAAkCpD,SACrC2R,EAAmBvO,MACd,CAGLuO,EAAmB,CAAE3R,SAAU,CADfkR,iBAAiB9N,IAElC,CAMD,OALKA,EAAkCwO,oBACrCD,EAAiBC,kBAAoBZ,wBAClC5N,EAAkCwO,oBAGhCD,CACT,CAQM,SAAUE,yBACdxR,GACAuL,OACEA,EAAMkG,YACNA,EAAWC,aACXA,EAAYC,eACZA,EAAiB,EAACC,eAClBA,EAAcC,YACdA,EAAWC,kBACXA,EAAiBC,kBACjBA,IAuBF,MAnBiC,CAC/BC,UAAW,CACT,CACEhS,WAGJiS,WAAY,CACVC,WAAY3G,EACZqG,iBACAO,YAAaR,EACbE,cACAO,cAAeX,EACfC,eACAI,oBACAO,iBAAkBN,EAClBO,kBAAkB,EAClBC,yBAAyB,GAI/B,CC7IA,MAAMC,EAAuC,CAC3C,OACA,aACA,eACA,mBACA,UACA,oBAGIC,EAA6D,CACjEC,KAAM,CAAC,OAAQ,cACfC,SAAU,CAAC,oBACXpN,MAAO,CAAC,OAAQ,eAAgB,UAAW,oBAE3CqN,OAAQ,CAAC,SAGLC,EAA0D,CAC9DH,KAAM,CAAC,SACPC,SAAU,CAAC,SACXpN,MAAO,CAAC,OAAQ,YAEhBqN,OAAQ,ICPV,MAAME,EAAe,eAQR,MAAAC,YAKX,WAAA1hB,CACE2U,EACOT,EACC0K,EACDlN,EACAkD,GAHA3U,KAAKiU,MAALA,EACCjU,KAAa2e,cAAbA,EACD3e,KAAMyR,OAANA,EACAzR,KAAc2U,eAAdA,EARD3U,KAAQ0hB,SAAc,GACtB1hB,KAAA2hB,aAA8BvhB,QAAQF,UAS5CF,KAAKqT,aAAeqB,EAChBjD,GAAQmQ,WDXV,SAAUC,oBAAoBD,GAClC,IAAIE,EAA8B,KAClC,IAAK,MAAMC,KAAeH,EAAS,CACjC,MAAMxS,KAAEA,EAAIE,MAAEA,GAAUyS,EACxB,IAAKD,GAAwB,SAAT1S,EAClB,MAAM,IAAIpK,QACR6E,EAAYK,gBACZ,iDAAiDkF,KAGrD,IAAKjK,EAAekP,SAASjF,GAC3B,MAAM,IAAIpK,QACR6E,EAAYK,gBACZ,4CAA4CkF,0BAA6B+B,KAAKC,UAC5EjM,MAKN,IAAK6c,MAAMC,QAAQ3S,GACjB,MAAM,IAAItK,QACR6E,EAAYK,gBACZ,+DAIJ,GAAqB,IAAjBoF,EAAM5O,OACR,MAAM,IAAIsE,QACR6E,EAAYK,gBACZ,8CAIJ,MAAMgY,EAA0C,CAC9CzT,KAAM,EACNc,WAAY,EACZuJ,aAAc,EACdqJ,iBAAkB,EAClB9J,QAAS,EACT+J,iBAAkB,EAClBC,eAAgB,EAChBC,oBAAqB,GAGvB,IAAK,MAAMjT,KAAQC,EACjB,IAAK,MAAMpN,KAAOgf,EACZhf,KAAOmN,IACT6S,EAAYhgB,IAAQ,GAI1B,MAAMqgB,EAAapB,EAAqB/R,GACxC,IAAK,MAAMlN,KAAOgf,EAChB,IAAKqB,EAAWlO,SAASnS,IAAQggB,EAAYhgB,GAAO,EAClD,MAAM,IAAI8C,QACR6E,EAAYK,gBACZ,sBAAsBkF,qBAAwBlN,WAKpD,GAAI4f,IACgCP,EAA6BnS,GAChCiF,SAASyN,EAAY1S,MAClD,MAAM,IAAIpK,QACR6E,EAAYK,gBACZ,sBAAsBkF,oBACpB0S,EAAY1S,gCACc+B,KAAKC,UAC/BmQ,MAKRO,EAAcC,CACf,CACH,CChEMF,CAAoBpQ,EAAOmQ,SAC3B5hB,KAAK0hB,SAAWjQ,EAAOmQ,QAE1B,CAOD,gBAAMY,GAEJ,aADMxiB,KAAK2hB,aACJ3hB,KAAK0hB,QACb,CAMD,iBAAMe,CACJhV,SAEMzN,KAAK2hB,aACX,MAAMe,EAAanD,iBAAiB9R,GAC9B2M,EAAiD,CACrDC,eAAgBra,KAAKyR,QAAQ4I,eAC7BG,iBAAkBxa,KAAKyR,QAAQ+I,iBAC/BmI,MAAO3iB,KAAKyR,QAAQkR,MACpBC,WAAY5iB,KAAKyR,QAAQmR,WACzB3C,kBAAmBjgB,KAAKyR,QAAQwO,kBAChC5R,SAAU,IAAIrO,KAAK0hB,SAAUgB,IAE/B,IAAIG,EAAc,CAAA,EAmClB,OAjCA7iB,KAAK2hB,aAAe3hB,KAAK2hB,aACtB5E,MAAK,IACJ7O,gBACElO,KAAKqT,aACLrT,KAAKiU,MACLmG,EACApa,KAAK2e,cACL3e,KAAK2U,kBAGRoI,MAAKvE,IACJ,GACEA,EAAOtC,SAASxF,YAChB8H,EAAOtC,SAASxF,WAAWhQ,OAAS,EACpC,CACAV,KAAK0hB,SAAS7L,KAAK6M,GACnB,MAAMI,EAA2B,CAC/BxT,MAAOkJ,EAAOtC,SAASxF,aAAa,GAAGvB,QAAQG,OAAS,GAExDF,KAAMoJ,EAAOtC,SAASxF,aAAa,GAAGvB,QAAQC,MAAQ,SAExDpP,KAAK0hB,SAAS7L,KAAKiN,EACpB,KAAM,CACL,MAAMC,EAAoBlL,wBAAwBW,EAAOtC,UACrD6M,GACF1W,EAAO3I,KACL,mCAAmCqf,0CAGxC,CACDF,EAAcrK,CAAM,UAElBxY,KAAK2hB,aACJkB,CACR,CAOD,uBAAMG,CACJvV,SAEMzN,KAAK2hB,aACX,MAAMe,EAAanD,iBAAiB9R,GAC9B2M,EAAiD,CACrDC,eAAgBra,KAAKyR,QAAQ4I,eAC7BG,iBAAkBxa,KAAKyR,QAAQ+I,iBAC/BmI,MAAO3iB,KAAKyR,QAAQkR,MACpBC,WAAY5iB,KAAKyR,QAAQmR,WACzB3C,kBAAmBjgB,KAAKyR,QAAQwO,kBAChC5R,SAAU,IAAIrO,KAAK0hB,SAAUgB,IAEzBO,EAAgBpU,sBACpB7O,KAAKqT,aACLrT,KAAKiU,MACLmG,EACApa,KAAK2e,cACL3e,KAAK2U,gBAwCP,OApCA3U,KAAK2hB,aAAe3hB,KAAK2hB,aACtB5E,MAAK,IAAMkG,IAGXxiB,OAAMyiB,IACL,MAAM,IAAItiB,MAAM4gB,EAAa,IAE9BzE,MAAKoG,GAAgBA,EAAajN,WAClC6G,MAAK7G,IACJ,GAAIA,EAASxF,YAAcwF,EAASxF,WAAWhQ,OAAS,EAAG,CACzDV,KAAK0hB,SAAS7L,KAAK6M,GACnB,MAAMI,EAAkB,IAAK5M,EAASxF,WAAW,GAAGvB,SAE/C2T,EAAgB1T,OACnB0T,EAAgB1T,KAAO,SAEzBpP,KAAK0hB,SAAS7L,KAAKiN,EACpB,KAAM,CACL,MAAMC,EAAoBlL,wBAAwB3B,GAC9C6M,GACF1W,EAAO3I,KACL,yCAAyCqf,0CAG9C,KAEFtiB,OAAMuW,IAIDA,EAAElW,UAAY0gB,GAGhBnV,EAAO9L,MAAMyW,EACd,IAEEiM,CACR,ECzIIxS,eAAexB,YACpByF,EACAT,EACAxC,EACAkN,EACAhK,GAEA,GACGgK,GAAqC9b,OAASuF,EAAcE,eAE7D,MAAM,IAAItD,QACR6E,EAAYa,YACZ,wDAGJ,OAvCK+F,eAAe2S,mBACpB1O,EACAT,EACAxC,EACAkD,GAEA,IAAI9D,EAAe,GACnB,GAAI6D,EAAY7C,QAAQ5F,cAAgBJ,EAAYE,UAAW,CAC7D,MAAMsX,EPiFM,SAAAC,sBACdC,EACAtP,GASA,MAP6D,CAC3DmG,uBAAwB,CACtBnG,WACGsP,GAKT,CO7FyBC,CAAqC/R,EAAQwC,GAClEpD,EAAOM,KAAKC,UAAUiS,EACvB,MACCxS,EAAOM,KAAKC,UAAUK,GAUxB,aARuBwE,YACrBhC,EACAM,EAAKkP,aACL/O,GACA,EACA7D,EACA8D,IAEcnE,MAClB,CAiBS4S,CAAmB1O,EAAaT,EAAOxC,EAAQkD,EACxD,CCrBM,MAAO+O,wBAAwB3Q,QAQnC,WAAAhT,CACEiT,EACA2Q,EACAhP,EACQgK,GAER3d,MAAMgS,EAAI2Q,EAAY1P,OAFdjU,KAAa2e,cAAbA,EAGR3e,KAAKwa,iBAAmBmJ,EAAYnJ,kBAAoB,CAAA,EACxDxa,KAAKqa,eAAiBsJ,EAAYtJ,gBAAkB,GACpDra,KAAK2iB,MAAQgB,EAAYhB,MACzB3iB,KAAK4iB,WAAae,EAAYf,WAC9B5iB,KAAKigB,kBAAoBZ,wBACvBsE,EAAY1D,mBAEdjgB,KAAK2U,eAAiBA,GAAkB,EACzC,CAMD,qBAAMzG,CACJT,GAEA,MAAMmW,EAAkB7D,2BAA2BtS,GACnD,OAAOS,gBACLlO,KAAKqT,aACLrT,KAAKiU,MACL,CACEuG,iBAAkBxa,KAAKwa,iBACvBH,eAAgBra,KAAKqa,eACrBsI,MAAO3iB,KAAK2iB,MACZC,WAAY5iB,KAAK4iB,WACjB3C,kBAAmBjgB,KAAKigB,qBACrB2D,GAEL5jB,KAAK2e,cACL3e,KAAK2U,eAER,CAQD,2BAAM9F,CACJpB,GAEA,MAAMmW,EAAkB7D,2BAA2BtS,GACnD,OAAOoB,sBACL7O,KAAKqT,aACLrT,KAAKiU,MACL,CACEuG,iBAAkBxa,KAAKwa,iBACvBH,eAAgBra,KAAKqa,eACrBsI,MAAO3iB,KAAK2iB,MACZC,WAAY5iB,KAAK4iB,WACjB3C,kBAAmBjgB,KAAKigB,qBACrB2D,GAEL5jB,KAAK2e,cACL3e,KAAK2U,eAER,CAMD,SAAAkP,CAAUC,GACR,OAAO,IAAIrC,YACTzhB,KAAKqT,aACLrT,KAAKiU,MACLjU,KAAK2e,cACL,CACEgE,MAAO3iB,KAAK2iB,MACZC,WAAY5iB,KAAK4iB,WACjB3C,kBAAmBjgB,KAAKigB,kBACxBzF,iBAAkBxa,KAAKwa,iBACvBH,eAAgBra,KAAKqa,kBAMlByJ,GAEL9jB,KAAK2U,eAER,CAKD,iBAAM1F,CACJxB,GAEA,MAAMmW,EAAkB7D,2BAA2BtS,GACnD,OAAOwB,YACLjP,KAAKqT,aACLrT,KAAKiU,MACL2P,EACA5jB,KAAK2e,cAER,ECzHU,MAAAoF,YAiBX,WAAAhkB,CACUikB,EACAC,GADAjkB,KAAgBgkB,iBAAhBA,EACAhkB,KAAcikB,eAAdA,EAbVjkB,KAAQkkB,UAAG,EAMXlkB,KAAcmkB,gBAAG,CAQb,CAWJ,UAAMC,CACJ3W,EACA4W,GAAe,GAEf,GAAIrkB,KAAKkkB,SACP,MAAM,IAAIlf,QACR6E,EAAYC,cACZ,wDAIJ,MAEMhJ,EAA8B,CAClCwjB,cAAe,CACbC,MAAO,CAJQhF,iBAAiB9R,IAKhC4W,iBAGJrkB,KAAKgkB,iBAAiBI,KAAKjT,KAAKC,UAAUtQ,GAC3C,CAeD,sBAAM0jB,CAAiB/V,GACrB,GAAIzO,KAAKkkB,SACP,MAAM,IAAIlf,QACR6E,EAAYC,cACZ,wDAIJ,MAAMhJ,EAAoC,CACxC2jB,cAAe,CACbhW,SAGJzO,KAAKgkB,iBAAiBI,KAAKjT,KAAKC,UAAUtQ,GAC3C,CAoBD,uBAAM4jB,CAAkBvU,GACtB,GAAInQ,KAAKkkB,SACP,MAAM,IAAIlf,QACR6E,EAAYC,cACZ,wDAIJ,MAAMhJ,EAAoC,CACxC2jB,cAAe,CACbE,MAAOxU,IAGXnQ,KAAKgkB,iBAAiBI,KAAKjT,KAAKC,UAAUtQ,GAC3C,CAmBD,uBAAM8jB,CAAkBzU,GACtB,GAAInQ,KAAKkkB,SACP,MAAM,IAAIlf,QACR6E,EAAYC,cACZ,wDAIJ,MAAMhJ,EAAoC,CACxC2jB,cAAe,CACbI,MAAO1U,IAGXnQ,KAAKgkB,iBAAiBI,KAAKjT,KAAKC,UAAUtQ,GAC3C,CAUD,2BAAMgkB,CACJC,GAEA,GAAI/kB,KAAKkkB,SACP,MAAM,IAAIlf,QACR6E,EAAYC,cACZ,wDAIJ,MAAMhJ,EAAmC,CACvCkkB,aAAc,CACZD,sBAGJ/kB,KAAKgkB,iBAAiBI,KAAKjT,KAAKC,UAAUtQ,GAC3C,CAWD,aAAOmkB,GAGL,GAAIjlB,KAAKkkB,SACP,MAAM,IAAIlf,QACR6E,EAAYI,eACZ,oFAGJ,UAAW,MAAMnJ,KAAWd,KAAKikB,eAC3BnjB,GAA8B,iBAAZA,EAChB2I,EAAiBC,kBAAkB5I,OAC/B,CACJyB,KAAM,mBACFzB,EACDokB,eAEIzb,EAAiBE,aAAa7I,OACjC,CACJyB,KAAM,cACFzB,EACDqkB,UAEI1b,EAAiBG,0BAA0B9I,OAC9C,CACJyB,KAAM,0BAEJzB,EAMAskB,sBAGJ/Y,EAAO3I,KACL,qDAAqDyN,KAAKC,UACxDtQ,MAKNuL,EAAO3I,KACL,gDAAgDyN,KAAKC,UACnDtQ,KAKT,CAQD,WAAMoc,GACCld,KAAKkkB,WACRlkB,KAAKkkB,UAAW,QACVlkB,KAAKgkB,iBAAiB9G,MAAM,IAAM,0BAE3C,CAYD,qBAAMmI,CAAgBC,GACpB,GAAItlB,KAAKkkB,SACP,MAAM,IAAIlf,QACR6E,EAAYC,cACZ,wDAMJwb,EAAYhL,SAAQiL,IAClB,MAAMzkB,EAAoC,CACxC2jB,cAAe,CAAEa,YAAa,CAACC,KAEjCvlB,KAAKgkB,iBAAiBI,KAAKjT,KAAKC,UAAUtQ,GAAS,GAEtD,CAYD,qBAAM0kB,CACJC,GAEA,GAAIzlB,KAAKkkB,SACP,MAAM,IAAIlf,QACR6E,EAAYC,cACZ,wDAIJ,MAAM0S,EAASiJ,EAAiBhJ,YAChC,OACE,IACE,MAAMO,KAAEA,EAAIxc,MAAEA,SAAgBgc,EAAOM,OAErC,GAAIE,EACF,MACK,IAAKxc,EACV,MAAM,IAAII,MAAM,0DAGZZ,KAAKqlB,gBAAgB,CAAC7kB,GAC7B,CAAC,MAAOwW,GAEP,MAAMlW,EACJkW,aAAapW,MAAQoW,EAAElW,QAAU,iCACnC,MAAM,IAAIkE,QAAQ6E,EAAYC,cAAehJ,EAC9C,CAEJ,EC7TG,MAAO4kB,4BAA4B3S,QASvC,WAAAhT,CACEiT,EACA2Q,EAIQgC,GAER3kB,MAAMgS,EAAI2Q,EAAY1P,OAFdjU,KAAiB2lB,kBAAjBA,EAGR3lB,KAAKwa,iBAAmBmJ,EAAYnJ,kBAAoB,CAAA,EACxDxa,KAAK2iB,MAAQgB,EAAYhB,MACzB3iB,KAAK4iB,WAAae,EAAYf,WAC9B5iB,KAAKigB,kBAAoBZ,wBACvBsE,EAAY1D,kBAEf,CAUD,aAAM2F,GACJ,MAAMhR,EAAM,IAAIU,aAAatV,KAAKqT,cAGlC,IAAIwS,QAFE7lB,KAAK2lB,kBAAkBC,QAAQhR,EAAI1P,YAIvC2gB,EADE7lB,KAAKqT,aAAaxB,QAAQ5F,cAAgBJ,EAAYE,UACxC,YAAY/L,KAAKqT,aAAaC,WAAWtT,KAAKiU,QAE9C,YAAYjU,KAAKqT,aAAaC,qBAAqBtT,KAAKqT,aAAajH,YAAYpM,KAAKiU,QAKxG,MAAM6R,wBACJA,EAAuBC,yBACvBA,KACGvL,GACDxa,KAAKwa,iBAEHwL,EAAiC,CACrCC,MAAO,CACLhS,MAAO4R,EACPrL,mBACAmI,MAAO3iB,KAAK2iB,MACZC,WAAY5iB,KAAK4iB,WACjB3C,kBAAmBjgB,KAAKigB,kBACxB6F,0BACAC,6BAIJ,IAEE,MAAM9B,EAAiBjkB,KAAK2lB,kBAAkBO,SAC9ClmB,KAAK2lB,kBAAkBvB,KAAKjT,KAAKC,UAAU4U,IAG3C,MAAMG,SAAsBlC,EAAemC,QAAQ5lB,MACnD,IACG2lB,GACyB,iBAAjBA,KACP,kBAAmBA,GAGrB,YADMnmB,KAAK2lB,kBAAkBzI,MAAM,KAAM,qBACnC,IAAIlY,QACR6E,EAAYE,eACZ,gGAIJ,OAAO,IAAIga,YAAY/jB,KAAK2lB,kBAAmB1B,EAChD,CAAC,MAAOjN,GAGP,YADMhX,KAAK2lB,kBAAkBzI,QACvBlG,CACP,CACF,EC/EG,MAAOqP,oBAAoBtT,QAoB/B,WAAAhT,CACEiT,EACA2Q,EACOhP,GAEP,MAAMV,MAAEA,EAAKuG,iBAAEA,EAAgBH,eAAEA,GAAmBsJ,EACpD3iB,MAAMgS,EAAIiB,GAHHjU,KAAc2U,eAAdA,EAIP3U,KAAKwa,iBAAmBA,EACxBxa,KAAKqa,eAAiBA,CACvB,CAoBD,oBAAMiM,CACJ5X,GAEA,MAAMmC,EAAOqP,yBAAyBxR,EAAQ,IACzC1O,KAAKwa,oBACLxa,KAAKqa,iBAUV,OAAOb,4BARgBvD,YACrBjW,KAAKiU,MACLM,EAAKgS,QACLvmB,KAAKqT,cACQ,EACblC,KAAKC,UAAUP,GACf7Q,KAAK2U,gBAGR,CAqBD,uBAAM6R,CACJ9X,EACAuL,GAEA,MAAMpJ,EAAOqP,yBAAyBxR,EAAQ,CAC5CuL,YACGja,KAAKwa,oBACLxa,KAAKqa,iBAUV,OAAOb,4BARgBvD,YACrBjW,KAAKiU,MACLM,EAAKgS,QACLvmB,KAAKqT,cACQ,EACblC,KAAKC,UAAUP,GACf7Q,KAAK2U,gBAGR,ECzFU,MAAA8R,qBAGX,WAAA1mB,GACE,GAAyB,oBAAd2mB,UACT,MAAM,IAAI1hB,QACR6E,EAAYa,YACZ,mMAKL,CAED,OAAAkb,CAAQhR,GACN,OAAO,IAAIxU,SAAQ,CAACF,EAASD,KAC3BD,KAAK2mB,GAAK,IAAID,UAAU9R,GACxB5U,KAAK2mB,GAAGC,WAAa,OACrB5mB,KAAK2mB,GAAGE,iBAAiB,QAAQ,IAAM3mB,KAAW,CAAE4mB,MAAM,IAC1D9mB,KAAK2mB,GAAGE,iBACN,SACA,IACE5mB,EACE,IAAI+E,QACF6E,EAAYG,YACZ,qCAGN,CAAE8c,MAAM,IAEV9mB,KAAK2mB,GAAIE,iBAAiB,SAAUE,IAC9BA,EAAW3P,QACb/K,EAAO3I,KACL,mDAAmDqjB,EAAW3P,UAEjE,GACD,GAEL,CAED,IAAAgN,CAAKziB,GACH,IAAK3B,KAAK2mB,IAAM3mB,KAAK2mB,GAAGK,aAAeN,UAAUO,KAC/C,MAAM,IAAIjiB,QAAQ6E,EAAYC,cAAe,0BAE/C9J,KAAK2mB,GAAGvC,KAAKziB,EACd,CAED,YAAOukB,GACL,IAAKlmB,KAAK2mB,GACR,MAAM,IAAI3hB,QACR6E,EAAYC,cACZ,+BAIJ,MAAMod,EAA0B,GAC1BC,EAAsB,GAC5B,IAAIC,EAAsC,KACtClD,GAAW,EAEf,MAAMmD,gBAAkB5W,MAAO6W,IAC7B,IAAI3lB,EACJ,GAAI2lB,EAAM3lB,gBAAgB4lB,KACxB5lB,QAAa2lB,EAAM3lB,KAAK8M,WACnB,IAA0B,iBAAf6Y,EAAM3lB,KAatB,OAVAwlB,EAAWtR,KACT,IAAI7Q,QACF6E,EAAYY,aACZ,4FAA4F6c,EAAM3lB,eAGlGylB,IACFA,IACAA,EAAiB,OAVnBzlB,EAAO2lB,EAAM3lB,IAad,CAED,IACE,MAAM6lB,EAAMrW,KAAKkM,MAAM1b,GACvBulB,EAAarR,KAAK2R,EACnB,CAAC,MAAOxQ,GACP,MAAMQ,EAAMR,EACZmQ,EAAWtR,KACT,IAAI7Q,QACF6E,EAAYY,aACZ,4CAA4C+M,EAAI1W,WAGrD,CAEGsmB,IACFA,IACAA,EAAiB,KAClB,EAGGK,cAAgB,KACpBN,EAAWtR,KACT,IAAI7Q,QAAQ6E,EAAYG,YAAa,gCAEnCod,IACFA,IACAA,EAAiB,KAClB,EAGGM,cAAiBJ,IACjBA,EAAMlQ,QACR/K,EAAO3I,KACL,0DAA0D4jB,EAAMlQ,UAGpE8M,GAAW,EACPkD,IACFA,IACAA,EAAiB,MAGnBpnB,KAAK2mB,IAAIgB,oBAAoB,UAAWN,iBACxCrnB,KAAK2mB,IAAIgB,oBAAoB,QAASD,eACtC1nB,KAAK2mB,IAAIgB,oBAAoB,QAASF,cAAc,EAOtD,IAJAznB,KAAK2mB,GAAGE,iBAAiB,UAAWQ,iBACpCrnB,KAAK2mB,GAAGE,iBAAiB,QAASa,eAClC1nB,KAAK2mB,GAAGE,iBAAiB,QAASY,gBAE1BvD,GAAU,CAChB,GAAIiD,EAAWzmB,OAAS,EAAG,CAEzB,MADcymB,EAAWS,OAE1B,CACGV,EAAaxmB,OAAS,QAClBwmB,EAAaU,cAEb,IAAIxnB,SAAcF,IACtBknB,EAAiBlnB,CAAO,GAG7B,CAGD,GAAIinB,EAAWzmB,OAAS,EAAG,CAEzB,MADcymB,EAAWS,OAE1B,CACF,CAED,KAAA1K,CAAMrc,EAAeuW,GACnB,OAAO,IAAIhX,SAAQF,GACZF,KAAK2mB,IAIV3mB,KAAK2mB,GAAGE,iBAAiB,SAAS,IAAM3mB,KAAW,CAAE4mB,MAAM,IAGzD9mB,KAAK2mB,GAAGK,aAAeN,UAAUmB,QACjC7nB,KAAK2mB,GAAGK,aAAeN,UAAUoB,WAE1B5nB,SAGLF,KAAK2mB,GAAGK,aAAeN,UAAUqB,SACnC/nB,KAAK2mB,GAAGzJ,MAAMrc,EAAMuW,KAbblX,KAgBZ,EC9MmB,MAAA8nB,OAkCpB,WAAAjoB,CAAYkoB,GAEV,IAAKA,EAAa1lB,OAAS0lB,EAAaC,MACtC,MAAM,IAAIljB,QACR6E,EAAYO,eACZ,0EAIJ,IAAK,MAAM+d,KAAYF,EACrBjoB,KAAKmoB,GAAYF,EAAaE,GAGhCnoB,KAAKuC,KAAO0lB,EAAa1lB,KACzBvC,KAAKooB,OAASH,EAAajQ,eAAe,UACtCiQ,EAAaG,YACb3P,EACJzY,KAAKqoB,WAAWJ,EAAajQ,eAAe,eACtCiQ,EAAaI,QAEpB,CAOD,MAAAC,GACE,MAAMd,EAAqD,CACzDjlB,KAAMvC,KAAKuC,MAEb,IAAK,MAAMgmB,KAAQvoB,KACbA,KAAKgY,eAAeuQ,SAAwB9P,IAAfzY,KAAKuoB,KACvB,aAATA,GAAuBvoB,KAAKuC,OAASoI,EAAWM,SAClDuc,EAAIe,GAAQvoB,KAAKuoB,KAIvB,OAAOf,CACR,CAED,YAAOgB,CAAMC,GACX,OAAO,IAAIC,YAAYD,EAAaA,EAAYE,MACjD,CAED,aAAOC,CACLC,GAOA,OAAO,IAAIC,aACTD,EACAA,EAAaE,WACbF,EAAaG,mBAEhB,CAGD,aAAOC,CAAOC,GACZ,OAAO,IAAIC,aAAaD,EACzB,CAED,iBAAOE,CACLF,GAEA,OAAO,IAAIC,aAAaD,EAAcA,EAAaG,KACpD,CAED,cAAOC,CAAQC,GACb,OAAO,IAAIC,cAAcD,EAC1B,CAGD,aAAOE,CAAOC,GACZ,OAAO,IAAIC,aAAaD,EACzB,CAGD,cAAOE,CAAQC,GACb,OAAO,IAAIC,cAAcD,EAC1B,CAED,YAAO3B,CACL6B,GAEA,OAAO,IAAIC,YAAYD,EACxB,EAoBG,MAAOP,sBAAsBxB,OACjC,WAAAjoB,CAAYkoB,GACVjnB,MAAM,CACJuB,KAAMoI,EAAWG,WACdmd,GAEN,EAOG,MAAO0B,qBAAqB3B,OAChC,WAAAjoB,CAAYkoB,GACVjnB,MAAM,CACJuB,KAAMoI,EAAWE,UACdod,GAEN,EAOG,MAAO6B,sBAAsB9B,OACjC,WAAAjoB,CAAYkoB,GACVjnB,MAAM,CACJuB,KAAMoI,EAAWI,WACdkd,GAEN,EAQG,MAAOkB,qBAAqBnB,OAEhC,WAAAjoB,CAAYkoB,EAA6BgC,GACvCjpB,MAAM,CACJuB,KAAMoI,EAAWC,UACdqd,IAELjoB,KAAKqpB,KAAOY,CACb,CAKD,MAAA3B,GACE,MAAMd,EAAMxmB,MAAMsnB,SAIlB,OAHItoB,KAAKqpB,OACP7B,EAAU,KAAIxnB,KAAKqpB,MAEd7B,CACR,EASG,MAAOkB,oBAAoBV,OAC/B,WAAAjoB,CAAYkoB,EAAmCU,GAC7C3nB,MAAM,CACJuB,KAAMoI,EAAWK,SACdid,IAHwCjoB,KAAK2oB,MAALA,CAK9C,CAKD,MAAAL,GACE,MAAMd,EAAMxmB,MAAMsnB,SAElB,OADAd,EAAImB,MAAQ3oB,KAAK2oB,MAAML,SAChBd,CACR,EAQG,MAAOsB,qBAAqBd,OAChC,WAAAjoB,CACEkoB,EACOc,EAGAC,EAA+B,IAEtChoB,MAAM,CACJuB,KAAMoI,EAAWM,UACdgd,IAPEjoB,KAAU+oB,WAAVA,EAGA/oB,KAAkBgpB,mBAAlBA,CAMR,CAKD,MAAAV,GACE,MAAMd,EAAMxmB,MAAMsnB,SAClBd,EAAIuB,WAAa,IAAK/oB,KAAK+oB,YAC3B,MAAMmB,EAAW,GACjB,GAAIlqB,KAAKgpB,mBACP,IAAK,MAAMmB,KAAenqB,KAAKgpB,mBAC7B,IAAKhpB,KAAK+oB,WAAW/Q,eAAemS,GAClC,MAAM,IAAInlB,QACR6E,EAAYO,eACZ,aAAa+f,wDAKrB,IAAK,MAAMA,KAAenqB,KAAK+oB,WACzB/oB,KAAK+oB,WAAW/Q,eAAemS,KACjC3C,EAAIuB,WAAWoB,GAAenqB,KAAK+oB,WACjCoB,GACA7B,SACGtoB,KAAKgpB,mBAAmB3U,SAAS8V,IACpCD,EAASrU,KAAKsU,IAQpB,OAJID,EAASxpB,OAAS,IACpB8mB,EAAI0C,SAAWA,UAEV1C,EAAIwB,mBACJxB,CACR,EAQG,MAAOwC,oBAAoBhC,OAE/B,WAAAjoB,CAAYkoB,GACV,GAAkC,IAA9BA,EAAaC,MAAMxnB,OACrB,MAAM,IAAIsE,QACR6E,EAAYO,eACZ,wCAGJpJ,MAAM,IACDinB,EACH1lB,UAAMkW,IAERzY,KAAKkoB,MAAQD,EAAaC,KAC3B,CAKD,MAAAI,GACE,MAAMd,EAAMxmB,MAAMsnB,SAKlB,OAHItoB,KAAKkoB,OAASlG,MAAMC,QAAQjiB,KAAKkoB,SACnCV,EAAIU,MAASloB,KAAKkoB,MAAwB3Z,KAAI6b,GAAKA,EAAE9B,YAEhDd,CACR,ECvTU,MAAA6C,kBAUX,WAAAtqB,GACEC,KAAK0P,SAAW,WACjB,CAUD,WAAO4a,CAAKC,GASV,OAPEA,IACCA,EAAqB,GAAKA,EAAqB,MAEhDle,EAAO3I,KACL,uCAAuC6mB,iDAGpC,CAAE7a,SAAU,aAAc6a,qBAClC,CASD,UAAOC,GACL,MAAO,CAAE9a,SAAU,YACpB,EClDH,MAGM+a,EAAuB,kBAYvBC,EAA8B,m0DA6CbD,yBAiDV,MAAAE,wBAiBX,WAAA5qB,CACmB6qB,EACAvY,EACAwY,GAFA7qB,KAAW4qB,YAAXA,EACA5qB,KAAOqS,QAAPA,EACArS,KAAI6qB,KAAJA,EAlBX7qB,KAAS8qB,WAAG,EAEH9qB,KAAA+qB,aAAe,IAAIjrB,SAKnBE,KAAagrB,cAAkB,GAExChrB,KAAgBirB,iBAA4B,GAE5CjrB,KAAakrB,cAAG,EAEhBlrB,KAAqBmrB,uBAAG,EAO9BnrB,KAAK4qB,YAAYzG,gBAAiB,EAGlCnkB,KAAKorB,mBAAqBprB,KAAKqrB,iBAAiBzb,SAAQ,IACtD5P,KAAKsrB,YAKPtrB,KAAK6qB,KAAKU,YAAYC,KAAKC,UAAYnE,IACrC,GAAItnB,KAAK8qB,UACP,OAGF,MAAMY,EAAQpE,EAAM3lB,KAQdsP,EAA+B,CACnCvB,SAAU,YACV/N,KATagqB,KACbxpB,OAAOypB,aAAaC,MAClB,KACA7J,MAAM8J,KAAK,IAAIC,WAAWL,EAAMM,YAQ/BhsB,KAAK4qB,YAAYlG,kBAAkBzT,EAAM,CAEjD,CAKD,UAAMgb,GACAjsB,KAAK8qB,YAGT9qB,KAAK8qB,WAAY,EACjB9qB,KAAK+qB,aAAa7qB,gBACZF,KAAKorB,mBACZ,CAMO,OAAAE,GACNtrB,KAAKksB,oBACLlsB,KAAK6qB,KAAKU,YAAYC,KAAKC,UAAY,KACvCzrB,KAAK6qB,KAAKU,YAAYY,aACtBnsB,KAAK6qB,KAAKuB,WAAWD,aACrBnsB,KAAK6qB,KAAKwB,YAAYC,YAAYhS,SAAQiS,GAASA,EAAMN,SACpB,WAAjCjsB,KAAK6qB,KAAK2B,aAAaC,OACpBzsB,KAAK6qB,KAAK2B,aAAatP,QAE9Bld,KAAK4qB,YAAYzG,gBAAiB,CACnC,CAKO,cAAAuI,CAAeC,GACrB3sB,KAAKgrB,cAAcnV,KAAK8W,GAEnB3sB,KAAK4sB,sBACX,CAOO,iBAAAV,GAGN,IAAIlsB,KAAKirB,kBAAkB3Q,SAAQuS,GAAUA,EAAOZ,KAAK,KAGzDjsB,KAAKgrB,cAActqB,OAAS,EAG5BV,KAAKkrB,cAAgBlrB,KAAK6qB,KAAK2B,aAAaM,WAC7C,CAKO,0BAAMF,GACZ,IAAI5sB,KAAKmrB,sBAAT,CAKA,IAFAnrB,KAAKmrB,uBAAwB,EAEtBnrB,KAAKgrB,cAActqB,OAAS,IAAMV,KAAK8qB,WAAW,CACvD,MAAMiC,EAAe/sB,KAAKgrB,cAAcpD,QACxC,IACE,MAAM8D,EAAQ,IAAIsB,WAAWD,GACvBE,EAAavB,EAAMhrB,OAEnBwsB,EAAcltB,KAAK6qB,KAAK2B,aAAaW,aACzC,EACAF,EAvOwB,MA4OpBG,EAAcF,EAAYG,eAAe,GAC/C,IAAK,IAAI/O,EAAI,EAAGA,EAAI2O,EAAY3O,IAC9B8O,EAAY9O,GAAKoN,EAAMpN,GAAK,MAG9B,MAAMuO,EAAS7sB,KAAK6qB,KAAK2B,aAAac,qBACtCT,EAAOb,OAASkB,EAChBL,EAAOjH,QAAQ5lB,KAAK6qB,KAAK2B,aAAae,aAGtCvtB,KAAKirB,iBAAiBpV,KAAKgX,GAC3BA,EAAOW,QAAU,KACfxtB,KAAKirB,iBAAmBjrB,KAAKirB,iBAAiBwC,QAC5CrD,GAAKA,IAAMyC,GACZ,EAKH7sB,KAAKkrB,cAAgBvQ,KAAK+S,IACxB1tB,KAAK6qB,KAAK2B,aAAaM,YACvB9sB,KAAKkrB,eAEP2B,EAAOlQ,MAAM3c,KAAKkrB,eAGlBlrB,KAAKkrB,eAAiBgC,EAAYS,QACnC,CAAC,MAAO3W,GACP3K,EAAO9L,MAAM,uBAAwByW,EACtC,CACF,CAEDhX,KAAKmrB,uBAAwB,CAhD5B,CAiDF,CAKO,oBAAME,GACZ,MAAMuC,EAAmB5tB,KAAK4qB,YAAY3F,UAC1C,MAAQjlB,KAAK8qB,WAAW,CACtB,MAAMtS,QAAepY,QAAQytB,KAAK,CAChCD,EAAiBxH,OACjBpmB,KAAK+qB,aAAa5qB,UAGpB,GAAIH,KAAK8qB,YAActS,GAAUA,EAAOwE,KACtC,MAGF,MAAMlc,EAAU0X,EAAOhY,MACvB,GAAqB,kBAAjBM,EAAQyB,KAA0B,CACpC,MAAM2iB,EAAgBpkB,EAClBokB,EAAc4I,aAChB9tB,KAAKksB,oBAGP,MAAM6B,EAAY7I,EAAc8I,WAAW1e,MAAM2e,MAAK5e,GACpDA,EAAKE,YAAYG,SAAS4E,WAAW,YAEvC,GAAIyZ,GAAWxe,WAAY,CACzB,MAAMod,EAAYZ,WAAWD,KAC3BoC,KAAKH,EAAUxe,WAAW5N,OAC1BwsB,GAAKA,EAAEC,WAAW,KAClBpC,OACFhsB,KAAK0sB,eAAeC,EACrB,CACF,MAAM,GAAqB,aAAjB7rB,EAAQyB,KACjB,GAAKvC,KAAKqS,QAAQgc,uBAKhB,IACE,MAAMlM,QAAyBniB,KAAKqS,QAAQgc,uBAC1CvtB,EAAQ8X,eAEL5Y,KAAK8qB,WACH9qB,KAAK4qB,YAAY9F,sBAAsB,CAAC3C,GAEhD,CAAC,MAAOnL,GACP,MAAM,IAAIhS,QACR6E,EAAYjG,MACZ,oCAAqCoT,EAAYlW,UAEpD,MAhBDuL,EAAO3I,KACL,yHAkBP,CACF,EAiDI+M,eAAe6d,uBACpB1D,EACAvY,EAAyC,IAEzC,GAAIuY,EAAY1G,SACd,MAAM,IAAIlf,QACR6E,EAAYI,eACZ,4DAIJ,GAAI2gB,EAAYzG,eACd,MAAM,IAAInf,QACR6E,EAAYC,cACZ,kEAKJ,GAC8B,oBAArBykB,kBACiB,oBAAjBC,cACc,oBAAdC,YACNA,UAAUC,aAEX,MAAM,IAAI1pB,QACR6E,EAAYa,YACZ,oHAIJ,IAAI8hB,EACJ,IAGEA,EAAe,IAAIgC,aACQ,cAAvBhC,EAAaC,aACTD,EAAamC,SAKrB,MAAMtC,QAAoBoC,UAAUC,aAAaE,aAAa,CAC5DjK,OAAO,IAKHkK,EAAc,IAAItH,KAAK,CAACmD,GAA8B,CAC1DnoB,KAAM,2BAEFusB,EAAaja,IAAIka,gBAAgBF,SACjCrC,EAAawC,aAAaC,UAAUH,GAG1C,MAAM1C,EAAaI,EAAa0C,wBAAwB7C,GAClDd,EAAc,IAAIgD,iBACtB/B,EACA/B,EACA,CACE0E,iBAAkB,CAAEC,iBAnbK,QAsb7BhD,EAAWxG,QAAQ2F,GAGnB,MAAM8D,EAAS,IAAI1E,wBAAwBC,EAAavY,EAAS,CAC/Dma,eACAH,cACAD,aACAb,gBAGF,MAAO,CAAEU,KAAM,IAAMoD,EAAOpD,OAC7B,CAAC,MAAOjV,GAQP,GANIwV,GAAuC,WAAvBA,EAAaC,OAC1BD,EAAatP,QAKhBlG,aAAahS,SAAWgS,aAAasY,aACvC,MAAMtY,EAIR,MAAM,IAAIhS,QACR6E,EAAYjG,MACZ,yCAA0CoT,EAAYlW,UAEzD,CACH,CCxZgB,SAAAyuB,MAAM3d,EAAmB4d,IAAUnd,GACjDT,ECpEI,SAAU6d,mBACdjuB,GAEA,OAAIA,GAAYA,EAA+BkuB,UACrCluB,EAA+BkuB,UAEhCluB,CAEX,CD4DQiuB,CAAmB7d,GAEzB,MAAM+d,EAA6BC,aAAahe,EAAKjN,GAE/CkN,EAAUQ,GAASR,SAAW,IAAI3F,gBAElC2jB,EAA2C,CAC/Chc,4BAA6BxB,GAASwB,8BAA+B,GAGjEic,EpBvEF,SAAUC,yBAAyBle,GACvC,GAAIA,aAAmB3F,gBACrB,MAAO,GAAGvH,aACL,GAAIkN,aAAmB1F,gBAC5B,MAAO,GAAGxH,cAAoBkN,EAAQzF,WAEtC,MAAM,IAAIpH,QACR6E,EAAYjG,MACZ,oBAAoBuN,KAAKC,UAAUS,EAAQ5F,eAGjD,CoB4DqB8jB,CAAyBle,GACtCme,EAAaL,EAAW1d,aAAa,CACzC6d,eAKF,OAFAE,EAAW3d,QAAUwd,EAEdG,CACT,CAQgB,SAAAC,mBACdjd,EACA2Q,EACAhP,GAGA,MAAMub,EAAevM,EACrB,IAAIwM,EASJ,GAPEA,EADED,EAAartB,KACCqtB,EAAaC,eAAiB,CAC5Clc,MlCzFuC,yBkC4FzB0P,GAGbwM,EAAclc,MACjB,MAAM,IAAIjP,QACR6E,EAAYU,SACZ,sFAQJ,MAAMoU,EAAiB3L,EAAiBzB,uBACtC2e,EAAartB,KACK,oBAAX2O,YAAyBiH,EAAYjH,OAC5C0e,EAAa9iB,gBAGf,OAAO,IAAIsW,gBAAgB1Q,EAAImd,EAAexb,EAAgBgK,EAChE,CAgBgB,SAAAyR,eACdpd,EACA2Q,EACAhP,GAEA,IAAKgP,EAAY1P,MACf,MAAM,IAAIjP,QACR6E,EAAYU,SACZ,kFAGJ,OAAO,IAAI8b,YAAYrT,EAAI2Q,EAAahP,EAC1C,CAcgB,SAAA0b,uBACdrd,EACA2Q,GAEA,IAAKA,EAAY1P,MACf,MAAM,IAAIjP,QACR6E,EAAYU,SACZ,yHAGJ,MAAMyZ,EAAmB,IAAIyC,qBAC7B,OAAO,IAAIf,oBAAoB1S,EAAI2Q,EAAaK,EAClD,EEvKA,SAASsM,aACPC,EACE,IAAIluB,UAAUsC,EAAS6N,QAA8B,UAAC1P,sBACpD,IAIJ0tB,EAAgBvvB,EAAM8D,GAEtByrB,EAAgBvvB,EAAM8D,EAAS,UACjC,CAEAurB","preExistingComment":"firebase-ai.js.map"}
\ No newline at end of file |
