Package Methods (1.117.0)

Summary of entries of Methods for aiplatform.

vertexai.init

init( *, project: typing.Optional[str] = None, location: typing.Optional[str] = None, experiment: typing.Optional[str] = None, experiment_description: typing.Optional[str] = None, experiment_tensorboard: typing.Optional[ typing.Union[ str, google.cloud.aiplatform.tensorboard.tensorboard_resource.Tensorboard, bool, ] ] = None, staging_bucket: typing.Optional[str] = None, credentials: typing.Optional[google.auth.credentials.Credentials] = None, encryption_spec_key_name: typing.Optional[str] = None, network: typing.Optional[str] = None, service_account: typing.Optional[str] = None, api_endpoint: typing.Optional[str] = None, api_key: typing.Optional[str] = None, api_transport: typing.Optional[str] = None, request_metadata: typing.Optional[typing.Sequence[typing.Tuple[str, str]]] = None )

Updates common initialization parameters with provided options.

See more: vertexai.init

vertexai.agent_engines.create

create( agent_engine: typing.Union[ None, vertexai.agent_engines.AsyncQueryable, vertexai.agent_engines.AsyncStreamQueryable, vertexai.agent_engines._agent_engines.BidiStreamQueryable, vertexai.agent_engines.OperationRegistrable, vertexai.agent_engines.Queryable, vertexai.agent_engines.StreamQueryable, ] = None, *, requirements: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, display_name: typing.Optional[str] = None, description: typing.Optional[str] = None, gcs_dir_name: typing.Optional[str] = None, extra_packages: typing.Optional[typing.Sequence[str]] = None, env_vars: typing.Optional[ typing.Union[ typing.Sequence[str], typing.Dict[ str, typing.Union[str, google.cloud.aiplatform_v1.types.env_var.SecretRef], ], ] ] = None, build_options: typing.Optional[typing.Dict[str, typing.Sequence[str]]] = None, service_account: typing.Optional[str] = None, psc_interface_config: typing.Optional[ google.cloud.aiplatform_v1.types.service_networking.PscInterfaceConfig ] = None, min_instances: typing.Optional[int] = None, max_instances: typing.Optional[int] = None, resource_limits: typing.Optional[typing.Dict[str, str]] = None, container_concurrency: typing.Optional[int] = None, encryption_spec: typing.Optional[ google.cloud.aiplatform_v1.types.encryption_spec.EncryptionSpec ] = None ) -> vertexai.agent_engines.AgentEngine

Creates a new Agent Engine.

See more: vertexai.agent_engines.create

vertexai.agent_engines.delete

delete(resource_name: str, *, force: bool = False, **kwargs) -> None

Delete an Agent Engine resource.

See more: vertexai.agent_engines.delete

vertexai.agent_engines.get

get(resource_name: str) -> vertexai.agent_engines.AgentEngine

Retrieves an Agent Engine resource.

See more: vertexai.agent_engines.get

vertexai.agent_engines.list

list(*, filter: str = "") -> typing.Iterable[vertexai.agent_engines.AgentEngine]

List all instances of Agent Engine matching the filter.

See more: vertexai.agent_engines.list

vertexai.agent_engines.update

update( resource_name: str, *, agent_engine: typing.Optional[ typing.Union[ vertexai.agent_engines.Queryable, vertexai.agent_engines.OperationRegistrable, ] ] = None, requirements: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, display_name: typing.Optional[str] = None, description: typing.Optional[str] = None, gcs_dir_name: typing.Optional[str] = None, extra_packages: typing.Optional[typing.Sequence[str]] = None, env_vars: typing.Optional[ typing.Union[ typing.Sequence[str], typing.Dict[ str, typing.Union[str, google.cloud.aiplatform_v1.types.env_var.SecretRef], ], ] ] = None, build_options: typing.Optional[typing.Dict[str, typing.Sequence[str]]] = None, service_account: typing.Optional[str] = None, psc_interface_config: typing.Optional[ google.cloud.aiplatform_v1.types.service_networking.PscInterfaceConfig ] = None, min_instances: typing.Optional[int] = None, max_instances: typing.Optional[int] = None, resource_limits: typing.Optional[typing.Dict[str, str]] = None, container_concurrency: typing.Optional[int] = None, encryption_spec: typing.Optional[ google.cloud.aiplatform_v1.types.encryption_spec.EncryptionSpec ] = None ) -> vertexai.agent_engines.AgentEngine

Updates an existing Agent Engine.

See more: vertexai.agent_engines.update

vertexai.preview.end_run

end_run( state: google.cloud.aiplatform_v1.types.execution.Execution.State = State.COMPLETE, )

Ends the the current experiment run.

See more: vertexai.preview.end_run

vertexai.preview.get_experiment_df

get_experiment_df( experiment: typing.Optional[str] = None, *, include_time_series: bool = True ) -> pd.DataFrame

Returns a Pandas DataFrame of the parameters and metrics associated with one experiment.

See more: vertexai.preview.get_experiment_df

vertexai.preview.log_classification_metrics

log_classification_metrics( *, labels: typing.Optional[typing.List[str]] = None, matrix: typing.Optional[typing.List[typing.List[int]]] = None, fpr: typing.Optional[typing.List[float]] = None, tpr: typing.Optional[typing.List[float]] = None, threshold: typing.Optional[typing.List[float]] = None, display_name: typing.Optional[str] = None ) -> ( google.cloud.aiplatform.metadata.schema.google.artifact_schema.ClassificationMetrics )

Create an artifact for classification metrics and log to ExperimentRun.

See more: vertexai.preview.log_classification_metrics

vertexai.preview.log_metrics

log_metrics(metrics: typing.Dict[str, typing.Union[float, int, str]])

Log single or multiple Metrics with specified key and value pairs.

See more: vertexai.preview.log_metrics

vertexai.preview.log_params

log_params(params: typing.Dict[str, typing.Union[float, int, str]])

Log single or multiple parameters with specified key and value pairs.

See more: vertexai.preview.log_params

vertexai.preview.log_time_series_metrics

log_time_series_metrics( metrics: typing.Dict[str, float], step: typing.Optional[int] = None, wall_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None, )

Logs time series metrics to to this Experiment Run.

See more: vertexai.preview.log_time_series_metrics

vertexai.preview.start_run

start_run( run: str, *, tensorboard: typing.Optional[ typing.Union[ google.cloud.aiplatform.tensorboard.tensorboard_resource.Tensorboard, str ] ] = None, resume=False ) -> google.cloud.aiplatform.metadata.experiment_run_resource.ExperimentRun

Start a run to current session.

See more: vertexai.preview.start_run

vertexai.preview.prompts.create_version

create_version( prompt: vertexai.prompts._prompts.Prompt, prompt_id: typing.Optional[str] = None, version_name: typing.Optional[str] = None, ) -> vertexai.prompts._prompts.Prompt

Creates a Prompt or Prompt Version in the online prompt store .

See more: vertexai.preview.prompts.create_version

vertexai.preview.prompts.delete

delete(prompt_id: str) -> None

Deletes the online prompt resource associated with the prompt id.

See more: vertexai.preview.prompts.delete

vertexai.preview.prompts.get

get( prompt_id: str, version_id: typing.Optional[str] = None ) -> vertexai.prompts._prompts.Prompt

Creates a Prompt object from an online resource.

See more: vertexai.preview.prompts.get

vertexai.preview.prompts.list

list() -> list[vertexai.prompts._prompt_management.PromptMetadata]

Lists all prompt resources in the online prompt store associated with the project.

See more: vertexai.preview.prompts.list

vertexai.preview.prompts.list_versions

list_versions( prompt_id: str, ) -> list[vertexai.prompts._prompt_management.PromptVersionMetadata]

Returns a list of PromptVersionMetadata objects for the prompt resource.

See more: vertexai.preview.prompts.list_versions

vertexai.preview.prompts.restore_version

restore_version( prompt_id: str, version_id: str ) -> vertexai.prompts._prompt_management.PromptVersionMetadata

Restores a previous version of the prompt resource and loads that version into the current Prompt object.

See more: vertexai.preview.prompts.restore_version

vertexai.preview.tuning.sft.preview_train

preview_train( *, source_model: typing.Union[ str, vertexai.generative_models.GenerativeModel, vertexai.preview.tuning._tuning.SourceModel, ], train_dataset: typing.Union[ str, google.cloud.aiplatform.preview.datasets.MultimodalDataset ], validation_dataset: typing.Optional[ typing.Union[str, google.cloud.aiplatform.preview.datasets.MultimodalDataset] ] = None, tuned_model_display_name: typing.Optional[str] = None, tuning_mode: typing.Optional[typing.Literal["FULL", "PEFT_ADAPTER"]] = None, epochs: typing.Optional[int] = None, learning_rate: typing.Optional[float] = None, learning_rate_multiplier: typing.Optional[float] = None, adapter_size: typing.Optional[typing.Literal[1, 4, 8, 16, 32]] = None, labels: typing.Optional[typing.Dict[str, str]] = None, output_uri: typing.Optional[str] = None ) -> vertexai.preview.tuning._supervised_tuning.SupervisedTuningJob

Tunes a model using supervised training.

See more: vertexai.preview.tuning.sft.preview_train

vertexai.preview.tuning.sft.rebase_tuned_model

rebase_tuned_model( tuned_model_ref: str, *, artifact_destination: typing.Optional[str] = None, deploy_to_same_endpoint: typing.Optional[bool] = False )

Re-runs fine tuning on top of a new foundational model.

See more: vertexai.preview.tuning.sft.rebase_tuned_model

vertexai.preview.tuning.sft.train

train( *, source_model: typing.Union[str, vertexai.generative_models.GenerativeModel], train_dataset: typing.Union[ str, google.cloud.aiplatform.preview.datasets.MultimodalDataset ], validation_dataset: typing.Optional[ typing.Union[str, google.cloud.aiplatform.preview.datasets.MultimodalDataset] ] = None, tuned_model_display_name: typing.Optional[str] = None, epochs: typing.Optional[int] = None, learning_rate_multiplier: typing.Optional[float] = None, adapter_size: typing.Optional[typing.Literal[1, 4, 8, 16, 32]] = None, labels: typing.Optional[typing.Dict[str, str]] = None ) -> vertexai.tuning._supervised_tuning.SupervisedTuningJob

Tunes a model using supervised training.

See more: vertexai.preview.tuning.sft.train

vertexai.prompts._prompt_management.create_version

create_version( prompt: vertexai.prompts._prompts.Prompt, prompt_id: typing.Optional[str] = None, version_name: typing.Optional[str] = None, ) -> vertexai.prompts._prompts.Prompt

Creates a Prompt or Prompt Version in the online prompt store .

See more: vertexai.prompts._prompt_management.create_version

vertexai.prompts._prompt_management.delete

delete(prompt_id: str) -> None

Deletes the online prompt resource associated with the prompt id.

See more: vertexai.prompts._prompt_management.delete

vertexai.prompts._prompt_management.get

get( prompt_id: str, version_id: typing.Optional[str] = None ) -> vertexai.prompts._prompts.Prompt

Creates a Prompt object from an online resource.

See more: vertexai.prompts._prompt_management.get

vertexai.prompts._prompt_management.list_prompts

list_prompts() -> list[vertexai.prompts._prompt_management.PromptMetadata]

Lists all prompt resources in the online prompt store associated with the project.

See more: vertexai.prompts._prompt_management.list_prompts

vertexai.prompts._prompt_management.list_versions

list_versions( prompt_id: str, ) -> list[vertexai.prompts._prompt_management.PromptVersionMetadata]

Returns a list of PromptVersionMetadata objects for the prompt resource.

See more: vertexai.prompts._prompt_management.list_versions

vertexai.prompts._prompt_management.restore_version

restore_version( prompt_id: str, version_id: str ) -> vertexai.prompts._prompt_management.PromptVersionMetadata

Restores a previous version of the prompt resource and loads that version into the current Prompt object.

See more: vertexai.prompts._prompt_management.restore_version

vertexai.Client

Client( *, api_key: typing.Optional[str] = None, credentials: typing.Optional[google.auth.credentials.Credentials] = None, project: typing.Optional[str] = None, location: typing.Optional[str] = None, debug_config: typing.Optional[google.genai.client.DebugConfig] = None, http_options: typing.Optional[ typing.Union[google.genai.types.HttpOptions, google.genai.types.HttpOptionsDict] ] = None )

Initializes the client.

See more: vertexai.Client

vertexai._genai.agent_engines.AgentEngines.append_session_event

append_session_event( *, name: str, author: str, invocation_id: str, timestamp: datetime.datetime, config: typing.Optional[ typing.Union[ vertexai._genai.types.AppendAgentEngineSessionEventConfig, vertexai._genai.types.AppendAgentEngineSessionEventConfigDict, ] ] = None ) -> vertexai._genai.types.AppendAgentEngineSessionEventResponse

vertexai._genai.agent_engines.AgentEngines.create

create( *, agent_engine: typing.Optional[typing.Any] = None, agent: typing.Optional[typing.Any] = None, config: typing.Optional[ typing.Union[ vertexai._genai.types.AgentEngineConfig, vertexai._genai.types.AgentEngineConfigDict, ] ] = None ) -> vertexai._genai.types.AgentEngine

Creates an agent engine.

See more: vertexai._genai.agent_engines.AgentEngines.create

vertexai._genai.agent_engines.AgentEngines.create_memory

create_memory( *, name: str, fact: str, scope: dict[str, str], config: typing.Optional[ typing.Union[ vertexai._genai.types.AgentEngineMemoryConfig, vertexai._genai.types.AgentEngineMemoryConfigDict, ] ] = None ) -> vertexai._genai.types.AgentEngineMemoryOperation

vertexai._genai.agent_engines.AgentEngines.create_session

create_session( *, name: str, user_id: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.CreateAgentEngineSessionConfig, vertexai._genai.types.CreateAgentEngineSessionConfigDict, ] ] = None ) -> vertexai._genai.types.AgentEngineSessionOperation

vertexai._genai.agent_engines.AgentEngines.delete

delete( *, name: str, force: typing.Optional[bool] = None, config: typing.Optional[ typing.Union[ vertexai._genai.types.DeleteAgentEngineConfig, vertexai._genai.types.DeleteAgentEngineConfigDict, ] ] = None ) -> vertexai._genai.types.DeleteAgentEngineOperation

Delete an Agent Engine resource.

See more: vertexai._genai.agent_engines.AgentEngines.delete

vertexai._genai.agent_engines.AgentEngines.delete_memory

delete_memory( *, name: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.DeleteAgentEngineMemoryConfig, vertexai._genai.types.DeleteAgentEngineMemoryConfigDict, ] ] = None ) -> vertexai._genai.types.DeleteAgentEngineMemoryOperation

vertexai._genai.agent_engines.AgentEngines.delete_session

delete_session( *, name: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.DeleteAgentEngineSessionConfig, vertexai._genai.types.DeleteAgentEngineSessionConfigDict, ] ] = None ) -> vertexai._genai.types.DeleteAgentEngineSessionOperation

vertexai._genai.agent_engines.AgentEngines.generate_memories

generate_memories( *, name: str, vertex_session_source: typing.Optional[ typing.Union[ vertexai._genai.types.GenerateMemoriesRequestVertexSessionSource, vertexai._genai.types.GenerateMemoriesRequestVertexSessionSourceDict, ] ] = None, direct_contents_source: typing.Optional[ typing.Union[ vertexai._genai.types.GenerateMemoriesRequestDirectContentsSource, vertexai._genai.types.GenerateMemoriesRequestDirectContentsSourceDict, ] ] = None, direct_memories_source: typing.Optional[ typing.Union[ vertexai._genai.types.GenerateMemoriesRequestDirectMemoriesSource, vertexai._genai.types.GenerateMemoriesRequestDirectMemoriesSourceDict, ] ] = None, scope: typing.Optional[dict[str, str]] = None, config: typing.Optional[ typing.Union[ vertexai._genai.types.GenerateAgentEngineMemoriesConfig, vertexai._genai.types.GenerateAgentEngineMemoriesConfigDict, ] ] = None ) -> vertexai._genai.types.AgentEngineGenerateMemoriesOperation

vertexai._genai.agent_engines.AgentEngines.get

get( *, name: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.GetAgentEngineConfig, vertexai._genai.types.GetAgentEngineConfigDict, ] ] = None ) -> vertexai._genai.types.AgentEngine

Gets an agent engine.

See more: vertexai._genai.agent_engines.AgentEngines.get

vertexai._genai.agent_engines.AgentEngines.get_memory

get_memory( *, name: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.GetAgentEngineMemoryConfig, vertexai._genai.types.GetAgentEngineMemoryConfigDict, ] ] = None ) -> vertexai._genai.types.Memory

vertexai._genai.agent_engines.AgentEngines.get_session

get_session( *, name: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.GetAgentEngineSessionConfig, vertexai._genai.types.GetAgentEngineSessionConfigDict, ] ] = None ) -> vertexai._genai.types.Session

vertexai._genai.agent_engines.AgentEngines.list

list( *, config: typing.Optional[ typing.Union[ vertexai._genai.types.ListAgentEngineConfig, vertexai._genai.types.ListAgentEngineConfigDict, ] ] = None ) -> typing.Iterator[vertexai._genai.types.AgentEngine]

List all instances of Agent Engine matching the filter.

See more: vertexai._genai.agent_engines.AgentEngines.list

vertexai._genai.agent_engines.AgentEngines.list_memories

list_memories( *, name: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.ListAgentEngineMemoryConfig, vertexai._genai.types.ListAgentEngineMemoryConfigDict, ] ] = None ) -> typing.Iterator[vertexai._genai.types.Memory]

vertexai._genai.agent_engines.AgentEngines.list_session_events

list_session_events( *, name: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.ListAgentEngineSessionEventsConfig, vertexai._genai.types.ListAgentEngineSessionEventsConfigDict, ] ] = None ) -> typing.Iterator[vertexai._genai.types.SessionEvent]

vertexai._genai.agent_engines.AgentEngines.list_sessions

list_sessions( *, name: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.ListAgentEngineSessionsConfig, vertexai._genai.types.ListAgentEngineSessionsConfigDict, ] ] = None ) -> typing.Iterator[vertexai._genai.types.Session]

vertexai._genai.agent_engines.AgentEngines.retrieve_memories

retrieve_memories( *, name: str, scope: dict[str, str], similarity_search_params: typing.Optional[ typing.Union[ vertexai._genai.types.RetrieveMemoriesRequestSimilaritySearchParams, vertexai._genai.types.RetrieveMemoriesRequestSimilaritySearchParamsDict, ] ] = None, simple_retrieval_params: typing.Optional[ typing.Union[ vertexai._genai.types.RetrieveMemoriesRequestSimpleRetrievalParams, vertexai._genai.types.RetrieveMemoriesRequestSimpleRetrievalParamsDict, ] ] = None, config: typing.Optional[ typing.Union[ vertexai._genai.types.RetrieveAgentEngineMemoriesConfig, vertexai._genai.types.RetrieveAgentEngineMemoriesConfigDict, ] ] = None ) -> typing.Iterator[vertexai._genai.types.RetrieveMemoriesResponseRetrievedMemory]

vertexai._genai.agent_engines.AgentEngines.update

update( *, name: str, agent: typing.Optional[typing.Any] = None, agent_engine: typing.Optional[typing.Any] = None, config: typing.Union[ vertexai._genai.types.AgentEngineConfig, vertexai._genai.types.AgentEngineConfigDict, ] ) -> vertexai._genai.types.AgentEngine

Updates an existing Agent Engine.

See more: vertexai._genai.agent_engines.AgentEngines.update

vertexai._genai.agent_engines.AsyncAgentEngines.append_session_event

append_session_event( *, name: str, author: str, invocation_id: str, timestamp: datetime.datetime, config: typing.Optional[ typing.Union[ vertexai._genai.types.AppendAgentEngineSessionEventConfig, vertexai._genai.types.AppendAgentEngineSessionEventConfigDict, ] ] = None ) -> vertexai._genai.types.AppendAgentEngineSessionEventResponse

vertexai._genai.agent_engines.AsyncAgentEngines.delete

delete( *, name: str, force: typing.Optional[bool] = None, config: typing.Optional[ typing.Union[ vertexai._genai.types.DeleteAgentEngineConfig, vertexai._genai.types.DeleteAgentEngineConfigDict, ] ] = None ) -> vertexai._genai.types.DeleteAgentEngineOperation

Delete an Agent Engine resource.

See more: vertexai._genai.agent_engines.AsyncAgentEngines.delete

vertexai._genai.agent_engines.AsyncAgentEngines.delete_memory

delete_memory( *, name: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.DeleteAgentEngineMemoryConfig, vertexai._genai.types.DeleteAgentEngineMemoryConfigDict, ] ] = None ) -> vertexai._genai.types.DeleteAgentEngineMemoryOperation

vertexai._genai.agent_engines.AsyncAgentEngines.delete_session

delete_session( *, name: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.DeleteAgentEngineSessionConfig, vertexai._genai.types.DeleteAgentEngineSessionConfigDict, ] ] = None ) -> vertexai._genai.types.DeleteAgentEngineSessionOperation

vertexai._genai.agent_engines.AsyncAgentEngines.get_memory

get_memory( *, name: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.GetAgentEngineMemoryConfig, vertexai._genai.types.GetAgentEngineMemoryConfigDict, ] ] = None ) -> vertexai._genai.types.Memory

vertexai._genai.agent_engines.AsyncAgentEngines.get_session

get_session( *, name: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.GetAgentEngineSessionConfig, vertexai._genai.types.GetAgentEngineSessionConfigDict, ] ] = None ) -> vertexai._genai.types.Session

vertexai._genai.evals.AsyncEvals.batch_evaluate

batch_evaluate( *, dataset: typing.Union[ vertexai._genai.types.EvaluationDataset, vertexai._genai.types.EvaluationDatasetDict, ], metrics: list[ typing.Union[vertexai._genai.types.Metric, vertexai._genai.types.MetricDict] ], dest: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.EvaluateDatasetConfig, vertexai._genai.types.EvaluateDatasetConfigDict, ] ] = None ) -> vertexai._genai.types.EvaluateDatasetOperation

Evaluates a dataset based on a set of given metrics.

See more: vertexai._genai.evals.AsyncEvals.batch_evaluate

vertexai._genai.evals.AsyncEvals.evaluate_instances

evaluate_instances( *, metric_config: vertexai._genai.types._EvaluateInstancesRequestParameters ) -> vertexai._genai.types.EvaluateInstancesResponse

Evaluates an instance of a model.

See more: vertexai._genai.evals.AsyncEvals.evaluate_instances

vertexai._genai.evals.Evals.batch_evaluate

batch_evaluate( *, dataset: typing.Union[ vertexai._genai.types.EvaluationDataset, vertexai._genai.types.EvaluationDatasetDict, ], metrics: list[ typing.Union[vertexai._genai.types.Metric, vertexai._genai.types.MetricDict] ], dest: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.EvaluateDatasetConfig, vertexai._genai.types.EvaluateDatasetConfigDict, ] ] = None ) -> vertexai._genai.types.EvaluateDatasetOperation

Evaluates a dataset based on a set of given metrics.

See more: vertexai._genai.evals.Evals.batch_evaluate

vertexai._genai.evals.Evals.evaluate

evaluate( *, dataset: typing.Union[ vertexai._genai.types.EvaluationDataset, vertexai._genai.types.EvaluationDatasetDict, list[ typing.Union[ vertexai._genai.types.EvaluationDataset, vertexai._genai.types.EvaluationDatasetDict, ] ], ], metrics: typing.Optional[ list[ typing.Union[vertexai._genai.types.Metric, vertexai._genai.types.MetricDict] ] ] = None, config: typing.Optional[ typing.Union[ vertexai._genai.types.EvaluateMethodConfig, vertexai._genai.types.EvaluateMethodConfigDict, ] ] = None ) -> vertexai._genai.types.EvaluationResult

Evaluates candidate responses in the provided dataset(s) using the specified metrics.

See more: vertexai._genai.evals.Evals.evaluate

vertexai._genai.evals.Evals.evaluate_instances

evaluate_instances( *, metric_config: vertexai._genai.types._EvaluateInstancesRequestParameters ) -> vertexai._genai.types.EvaluateInstancesResponse

Evaluates an instance of a model.

See more: vertexai._genai.evals.Evals.evaluate_instances

vertexai._genai.evals.Evals.generate_rubrics

generate_rubrics( *, src: typing.Union[str, pd.DataFrame, vertexai._genai.types.EvaluationDataset], rubric_group_name: str, prompt_template: typing.Optional[str] = None, generator_model_config: typing.Optional[genai_types.AutoraterConfigOrDict] = None, rubric_content_type: typing.Optional[types.RubricContentType] = None, rubric_type_ontology: typing.Optional[list[str]] = None, predefined_spec_name: typing.Optional[ typing.Union[str, types.PrebuiltMetric] ] = None, metric_spec_parameters: typing.Optional[dict[str, typing.Any]] = None, config: typing.Optional[ typing.Union[ vertexai._genai.types.RubricGenerationConfig, vertexai._genai.types.RubricGenerationConfigDict, ] ] = None ) -> vertexai._genai.types.EvaluationDataset

Generates rubrics for each prompt in the source and adds them as a new column structured as a dictionary.

See more: vertexai._genai.evals.Evals.generate_rubrics

vertexai._genai.evals.Evals.run

run() -> vertexai._genai.types.EvaluateInstancesResponse

Evaluates an instance of a model.

See more: vertexai._genai.evals.Evals.run

vertexai._genai.evals.Evals.run_inference

run_inference( *, model: typing.Union[str, typing.Callable[[typing.Any], typing.Any]], src: typing.Union[ str, pandas.core.frame.DataFrame, vertexai._genai.types.EvaluationDataset ], config: typing.Optional[ typing.Union[ vertexai._genai.types.EvalRunInferenceConfig, vertexai._genai.types.EvalRunInferenceConfigDict, ] ] = None ) -> vertexai._genai.types.EvaluationDataset

Runs inference on a dataset for evaluation.

See more: vertexai._genai.evals.Evals.run_inference

vertexai._genai.prompt_management.AsyncPromptManagement.create_version

create_version( *, prompt: typing.Union[ vertexai._genai.types.Prompt, vertexai._genai.types.PromptDict ], config: typing.Optional[ typing.Union[ vertexai._genai.types.CreatePromptConfig, vertexai._genai.types.CreatePromptConfigDict, ] ] = None ) -> vertexai._genai.types.Prompt

Creates a new version of a prompt in a Vertex Dataset resource.

See more: vertexai._genai.prompt_management.AsyncPromptManagement.create_version

vertexai._genai.prompt_management.AsyncPromptManagement.delete_prompt

delete_prompt( *, prompt_id: str, config: typing.Optional[vertexai._genai.types.DeletePromptConfig] = None ) -> None

vertexai._genai.prompt_management.AsyncPromptManagement.delete_version

delete_version( *, prompt_id: str, version_id: str, config: typing.Optional[vertexai._genai.types.DeletePromptConfig] = None ) -> None

vertexai._genai.prompt_management.AsyncPromptManagement.get

get( *, prompt_id: str, config: typing.Optional[vertexai._genai.types.GetPromptConfig] = None ) -> vertexai._genai.types.Prompt

Gets a prompt resource from a Vertex Dataset.

See more: vertexai._genai.prompt_management.AsyncPromptManagement.get

vertexai._genai.prompt_management.AsyncPromptManagement.list_prompts

list_prompts( *, config: typing.Optional[ typing.Union[ vertexai._genai.types.ListPromptsConfig, vertexai._genai.types.ListPromptsConfigDict, ] ] = None ) -> typing.AsyncIterator[vertexai._genai.types.PromptRef]

Lists prompt resources in a project.

See more: vertexai._genai.prompt_management.AsyncPromptManagement.list_prompts

vertexai._genai.prompt_management.AsyncPromptManagement.list_versions

list_versions( *, prompt_id: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.ListPromptsConfig, vertexai._genai.types.ListPromptsConfigDict, ] ] = None ) -> typing.AsyncIterator[vertexai._genai.types.PromptVersionRef]

Lists prompt version resources for a provided prompt_id.

See more: vertexai._genai.prompt_management.AsyncPromptManagement.list_versions

vertexai._genai.prompt_management.AsyncPromptManagement.restore_version

restore_version( *, prompt_id: str, version_id: str, config: typing.Optional[vertexai._genai.types.RestoreVersionConfig] = None ) -> vertexai._genai.types.Prompt

Restores the provided prompt version to the latest version.

See more: vertexai._genai.prompt_management.AsyncPromptManagement.restore_version

vertexai._genai.prompt_management.PromptManagement.create_version

create_version( *, prompt: typing.Union[ vertexai._genai.types.Prompt, vertexai._genai.types.PromptDict ], config: typing.Optional[ typing.Union[ vertexai._genai.types.CreatePromptConfig, vertexai._genai.types.CreatePromptConfigDict, ] ] = None ) -> vertexai._genai.types.Prompt

Creates a new version of a prompt in a Vertex Dataset resource.

See more: vertexai._genai.prompt_management.PromptManagement.create_version

vertexai._genai.prompt_management.PromptManagement.delete_prompt

delete_prompt( *, prompt_id: str, config: typing.Optional[vertexai._genai.types.DeletePromptConfig] = None ) -> None

vertexai._genai.prompt_management.PromptManagement.delete_version

delete_version( *, prompt_id: str, version_id: str, config: typing.Optional[vertexai._genai.types.DeletePromptConfig] = None ) -> None

Deletes a prompt version resource.

See more: vertexai._genai.prompt_management.PromptManagement.delete_version

vertexai._genai.prompt_management.PromptManagement.get

get( *, prompt_id: str, config: typing.Optional[vertexai._genai.types.GetPromptConfig] = None ) -> vertexai._genai.types.Prompt

Gets a prompt resource from a Vertex Dataset.

See more: vertexai._genai.prompt_management.PromptManagement.get

vertexai._genai.prompt_management.PromptManagement.list_prompts

list_prompts( *, config: typing.Optional[ typing.Union[ vertexai._genai.types.ListPromptsConfig, vertexai._genai.types.ListPromptsConfigDict, ] ] = None ) -> typing.Iterator[vertexai._genai.types.PromptRef]

Lists prompt resources in a project.

See more: vertexai._genai.prompt_management.PromptManagement.list_prompts

vertexai._genai.prompt_management.PromptManagement.list_versions

list_versions( *, prompt_id: str, config: typing.Optional[ typing.Union[ vertexai._genai.types.ListPromptsConfig, vertexai._genai.types.ListPromptsConfigDict, ] ] = None ) -> typing.Iterator[vertexai._genai.types.PromptVersionRef]

Lists prompt version resources for a provided prompt_id.

See more: vertexai._genai.prompt_management.PromptManagement.list_versions

vertexai._genai.prompt_management.PromptManagement.restore_version

restore_version( *, prompt_id: str, version_id: str, config: typing.Optional[vertexai._genai.types.RestoreVersionConfig] = None ) -> vertexai._genai.types.Prompt

Restores the provided prompt version to the latest version.

See more: vertexai._genai.prompt_management.PromptManagement.restore_version

vertexai._genai.prompt_optimizer.AsyncPromptOptimizer.optimize

optimize( method: str, config: typing.Union[ vertexai._genai.types.PromptOptimizerVAPOConfig, vertexai._genai.types.PromptOptimizerVAPOConfigDict, ], ) -> vertexai._genai.types.CustomJob

Call async Vertex AI Prompt Optimizer (VAPO).

See more: vertexai._genai.prompt_optimizer.AsyncPromptOptimizer.optimize

vertexai._genai.prompt_optimizer.AsyncPromptOptimizer.optimize_prompt

optimize_prompt( *, prompt: str, config: typing.Optional[vertexai._genai.types.OptimizeConfig] = None ) -> vertexai._genai.types.OptimizeResponse

Makes an async request to _optimize_prompt and returns an optimized prompt.

See more: vertexai._genai.prompt_optimizer.AsyncPromptOptimizer.optimize_prompt

vertexai._genai.prompt_optimizer.PromptOptimizer.optimize

optimize( method: str, config: typing.Union[ vertexai._genai.types.PromptOptimizerVAPOConfig, vertexai._genai.types.PromptOptimizerVAPOConfigDict, ], ) -> vertexai._genai.types.CustomJob

vertexai._genai.prompt_optimizer.PromptOptimizer.optimize_prompt

optimize_prompt( *, prompt: str, config: typing.Optional[vertexai._genai.types.OptimizeConfig] = None ) -> vertexai._genai.types.OptimizeResponse

Makes an API request to _optimize_prompt and returns the parsed response.

See more: vertexai._genai.prompt_optimizer.PromptOptimizer.optimize_prompt

vertexai.agent_engines.AG2Agent

AG2Agent( model: str, runnable_name: str, *, api_type: typing.Optional[str] = None, llm_config: typing.Optional[typing.Mapping[str, typing.Any]] = None, system_instruction: typing.Optional[str] = None, runnable_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, runnable_builder: typing.Optional[typing.Callable[[...], ConversableAgent]] = None, tools: typing.Optional[typing.Sequence[typing.Callable[[...], typing.Any]]] = None, enable_tracing: bool = False, instrumentor_builder: typing.Optional[typing.Callable[[...], typing.Any]] = None )

Initializes the AG2 Agent.

See more: vertexai.agent_engines.AG2Agent

vertexai.agent_engines.AG2Agent.clone

clone() -> vertexai.agent_engines.templates.ag2.AG2Agent

Returns a clone of the AG2Agent.

See more: vertexai.agent_engines.AG2Agent.clone

vertexai.agent_engines.AG2Agent.query

query( *, input: typing.Union[str, typing.Mapping[str, typing.Any]], max_turns: typing.Optional[int] = None, **kwargs: typing.Any ) -> typing.Dict[str, typing.Any]

Queries the Agent with the given input.

See more: vertexai.agent_engines.AG2Agent.query

vertexai.agent_engines.AG2Agent.set_up

set_up()

Sets up the agent for execution of queries at runtime.

See more: vertexai.agent_engines.AG2Agent.set_up

vertexai.agent_engines.AdkApp

AdkApp( *, agent: BaseAgent, app_name: typing.Optional[str] = None, plugins: typing.Optional[typing.List[BasePlugin]] = None, enable_tracing: bool = False, session_service_builder: typing.Optional[ typing.Callable[[...], BaseSessionService] ] = None, artifact_service_builder: typing.Optional[ typing.Callable[[...], BaseArtifactService] ] = None, memory_service_builder: typing.Optional[ typing.Callable[[...], BaseMemoryService] ] = None, instrumentor_builder: typing.Optional[typing.Callable[[...], typing.Any]] = None )

An ADK Application.

See more: vertexai.agent_engines.AdkApp

vertexai.agent_engines.AdkApp.async_add_session_to_memory

async_add_session_to_memory(*, session: typing.Dict[str, typing.Any])

vertexai.agent_engines.AdkApp.async_create_session

async_create_session( *, user_id: str, session_id: typing.Optional[str] = None, state: typing.Optional[typing.Dict[str, typing.Any]] = None, **kwargs )

Creates a new session.

See more: vertexai.agent_engines.AdkApp.async_create_session

vertexai.agent_engines.AdkApp.async_delete_session

async_delete_session(*, user_id: str, session_id: str, **kwargs)

Deletes a session for the given user.

See more: vertexai.agent_engines.AdkApp.async_delete_session

vertexai.agent_engines.AdkApp.async_get_session

async_get_session(*, user_id: str, session_id: str, **kwargs)

Get a session for the given user.

See more: vertexai.agent_engines.AdkApp.async_get_session

vertexai.agent_engines.AdkApp.async_list_sessions

async_list_sessions(*, user_id: str, **kwargs)

List sessions for the given user.

See more: vertexai.agent_engines.AdkApp.async_list_sessions

vertexai.agent_engines.AdkApp.async_search_memory

async_search_memory(*, user_id: str, query: str)

Searches memories for the given user.

See more: vertexai.agent_engines.AdkApp.async_search_memory

vertexai.agent_engines.AdkApp.async_stream_query

async_stream_query( *, message: typing.Union[str, typing.Dict[str, typing.Any]], user_id: str, session_id: typing.Optional[str] = None, run_config: typing.Optional[typing.Dict[str, typing.Any]] = None, **kwargs ) -> typing.AsyncIterable[typing.Dict[str, typing.Any]]

Streams responses asynchronously from the ADK application.

See more: vertexai.agent_engines.AdkApp.async_stream_query

vertexai.agent_engines.AdkApp.clone

clone()

Returns a clone of the ADK application.

See more: vertexai.agent_engines.AdkApp.clone

vertexai.agent_engines.AdkApp.register_operations

register_operations() -> typing.Dict[str, typing.List[str]]

Registers the operations of the ADK application.

See more: vertexai.agent_engines.AdkApp.register_operations

vertexai.agent_engines.AdkApp.set_up

set_up()

Sets up the ADK application.

See more: vertexai.agent_engines.AdkApp.set_up

vertexai.agent_engines.AdkApp.streaming_agent_run_with_events

streaming_agent_run_with_events(request_json: str)

Streams responses asynchronously from the ADK application.

See more: vertexai.agent_engines.AdkApp.streaming_agent_run_with_events

vertexai.agent_engines.AgentEngine

AgentEngine(resource_name: str)

Retrieves an Agent Engine resource.

See more: vertexai.agent_engines.AgentEngine

vertexai.agent_engines.AgentEngine.create

create( agent_engine: typing.Union[ None, vertexai.agent_engines.AsyncQueryable, vertexai.agent_engines.AsyncStreamQueryable, vertexai.agent_engines._agent_engines.BidiStreamQueryable, vertexai.agent_engines.OperationRegistrable, vertexai.agent_engines.Queryable, vertexai.agent_engines.StreamQueryable, ] = None, *, requirements: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, display_name: typing.Optional[str] = None, description: typing.Optional[str] = None, gcs_dir_name: typing.Optional[str] = None, extra_packages: typing.Optional[typing.Sequence[str]] = None, env_vars: typing.Optional[ typing.Union[ typing.Sequence[str], typing.Dict[ str, typing.Union[str, google.cloud.aiplatform_v1.types.env_var.SecretRef], ], ] ] = None, build_options: typing.Optional[typing.Dict[str, typing.Sequence[str]]] = None, service_account: typing.Optional[str] = None, psc_interface_config: typing.Optional[ google.cloud.aiplatform_v1.types.service_networking.PscInterfaceConfig ] = None, min_instances: typing.Optional[int] = None, max_instances: typing.Optional[int] = None, resource_limits: typing.Optional[typing.Dict[str, str]] = None, container_concurrency: typing.Optional[int] = None, encryption_spec: typing.Optional[ google.cloud.aiplatform_v1.types.encryption_spec.EncryptionSpec ] = None ) -> vertexai.agent_engines.AgentEngine

Creates a new Agent Engine.

See more: vertexai.agent_engines.AgentEngine.create

vertexai.agent_engines.AgentEngine.delete

delete(*, force: bool = False, **kwargs) -> None

Deletes the ReasoningEngine.

See more: vertexai.agent_engines.AgentEngine.delete

vertexai.agent_engines.AgentEngine.list

list( filter: typing.Optional[str] = None, order_by: typing.Optional[str] = None, project: typing.Optional[str] = None, location: typing.Optional[str] = None, credentials: typing.Optional[google.auth.credentials.Credentials] = None, parent: typing.Optional[str] = None, ) -> typing.List[google.cloud.aiplatform.base.VertexAiResourceNoun]

List all instances of this Vertex AI Resource.

See more: vertexai.agent_engines.AgentEngine.list

vertexai.agent_engines.AgentEngine.operation_schemas

operation_schemas() -> typing.Sequence[typing.Dict[str, typing.Any]]

Returns the (Open)API schemas for the Agent Engine.

See more: vertexai.agent_engines.AgentEngine.operation_schemas

vertexai.agent_engines.AgentEngine.to_dict

to_dict() -> typing.Dict[str, typing.Any]

Returns the resource proto as a dictionary.

See more: vertexai.agent_engines.AgentEngine.to_dict

vertexai.agent_engines.AgentEngine.update

update( *, agent_engine: typing.Union[ None, vertexai.agent_engines.AsyncQueryable, vertexai.agent_engines.AsyncStreamQueryable, vertexai.agent_engines._agent_engines.BidiStreamQueryable, vertexai.agent_engines.OperationRegistrable, vertexai.agent_engines.Queryable, vertexai.agent_engines.StreamQueryable, ] = None, requirements: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, display_name: typing.Optional[str] = None, description: typing.Optional[str] = None, gcs_dir_name: typing.Optional[str] = None, extra_packages: typing.Optional[typing.Sequence[str]] = None, env_vars: typing.Optional[ typing.Union[ typing.Sequence[str], typing.Dict[ str, typing.Union[str, google.cloud.aiplatform_v1.types.env_var.SecretRef], ], ] ] = None, build_options: typing.Optional[typing.Dict[str, typing.Sequence[str]]] = None, service_account: typing.Optional[str] = None, psc_interface_config: typing.Optional[ google.cloud.aiplatform_v1.types.service_networking.PscInterfaceConfig ] = None, min_instances: typing.Optional[int] = None, max_instances: typing.Optional[int] = None, resource_limits: typing.Optional[typing.Dict[str, str]] = None, container_concurrency: typing.Optional[int] = None, encryption_spec: typing.Optional[ google.cloud.aiplatform_v1.types.encryption_spec.EncryptionSpec ] = None ) -> vertexai.agent_engines.AgentEngine

Updates an existing Agent Engine.

See more: vertexai.agent_engines.AgentEngine.update

vertexai.agent_engines.AgentEngine.wait

wait()

Helper method that blocks until all futures are complete.

See more: vertexai.agent_engines.AgentEngine.wait

vertexai.agent_engines.AsyncQueryable.async_query

async_query(**kwargs) -> typing.Coroutine[typing.Any, typing.Any, typing.Any]

Runs the Agent Engine to serve the user query asynchronously.

See more: vertexai.agent_engines.AsyncQueryable.async_query

vertexai.agent_engines.AsyncStreamQueryable.async_stream_query

async_stream_query(**kwargs) -> typing.AsyncIterable[typing.Any]

Asynchronously stream responses to serve the user query.

See more: vertexai.agent_engines.AsyncStreamQueryable.async_stream_query

vertexai.agent_engines.Cloneable.clone

clone() -> typing.Any

Return a clone of the object.

See more: vertexai.agent_engines.Cloneable.clone

vertexai.agent_engines.LangchainAgent

LangchainAgent( model: str, *, system_instruction: typing.Optional[str] = None, prompt: typing.Optional[RunnableSerializable] = None, tools: typing.Optional[typing.Sequence[_ToolLike]] = None, output_parser: typing.Optional[RunnableSerializable] = None, chat_history: typing.Optional[GetSessionHistoryCallable] = None, model_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, model_tool_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, agent_executor_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, runnable_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, model_builder: typing.Optional[typing.Callable] = None, runnable_builder: typing.Optional[typing.Callable] = None, enable_tracing: bool = False, instrumentor_builder: typing.Optional[typing.Callable[[...], typing.Any]] = None )

Initializes the LangchainAgent.

See more: vertexai.agent_engines.LangchainAgent

vertexai.agent_engines.LangchainAgent.clone

clone() -> vertexai.agent_engines.templates.langchain.LangchainAgent

Returns a clone of the LangchainAgent.

See more: vertexai.agent_engines.LangchainAgent.clone

vertexai.agent_engines.LangchainAgent.query

query( *, input: typing.Union[str, typing.Mapping[str, typing.Any]], config: typing.Optional[RunnableConfig] = None, **kwargs: typing.Any ) -> typing.Dict[str, typing.Any]

Queries the Agent with the given input and config.

See more: vertexai.agent_engines.LangchainAgent.query

vertexai.agent_engines.LangchainAgent.set_up

set_up()

Sets up the agent for execution of queries at runtime.

See more: vertexai.agent_engines.LangchainAgent.set_up

vertexai.agent_engines.LangchainAgent.stream_query

stream_query( *, input: typing.Union[str, typing.Mapping[str, typing.Any]], config: typing.Optional[RunnableConfig] = None, **kwargs ) -> typing.Iterable[typing.Any]

Stream queries the Agent with the given input and config.

See more: vertexai.agent_engines.LangchainAgent.stream_query

vertexai.agent_engines.LanggraphAgent

LanggraphAgent( model: str, *, tools: typing.Optional[typing.Sequence[_ToolLike]] = None, model_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, model_tool_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, model_builder: typing.Optional[typing.Callable[[...], BaseLanguageModel]] = None, runnable_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, runnable_builder: typing.Optional[ typing.Callable[[...], RunnableSerializable] ] = None, checkpointer_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, checkpointer_builder: typing.Optional[ typing.Callable[[...], BaseCheckpointSaver] ] = None, enable_tracing: bool = False, instrumentor_builder: typing.Optional[typing.Callable[[...], typing.Any]] = None )

Initializes the LangGraph Agent.

See more: vertexai.agent_engines.LanggraphAgent

vertexai.agent_engines.LanggraphAgent.clone

clone() -> vertexai.agent_engines.templates.langgraph.LanggraphAgent

Returns a clone of the LanggraphAgent.

See more: vertexai.agent_engines.LanggraphAgent.clone

vertexai.agent_engines.LanggraphAgent.get_state

get_state( config: typing.Optional[RunnableConfig] = None, **kwargs: typing.Any ) -> typing.Dict[str, typing.Any]

Gets the current state of the Agent.

See more: vertexai.agent_engines.LanggraphAgent.get_state

vertexai.agent_engines.LanggraphAgent.get_state_history

get_state_history( config: typing.Optional[RunnableConfig] = None, **kwargs: typing.Any ) -> typing.Iterable[typing.Any]

Gets the state history of the Agent.

See more: vertexai.agent_engines.LanggraphAgent.get_state_history

vertexai.agent_engines.LanggraphAgent.query

query( *, input: typing.Union[str, typing.Mapping[str, typing.Any]], config: typing.Optional[RunnableConfig] = None, **kwargs: typing.Any ) -> typing.Dict[str, typing.Any]

Queries the Agent with the given input and config.

See more: vertexai.agent_engines.LanggraphAgent.query

vertexai.agent_engines.LanggraphAgent.register_operations

register_operations() -> typing.Mapping[str, typing.Sequence[str]]

Registers the operations of the Agent.

See more: vertexai.agent_engines.LanggraphAgent.register_operations

vertexai.agent_engines.LanggraphAgent.set_up

set_up()

Sets up the agent for execution of queries at runtime.

See more: vertexai.agent_engines.LanggraphAgent.set_up

vertexai.agent_engines.LanggraphAgent.stream_query

stream_query( *, input: typing.Union[str, typing.Mapping[str, typing.Any]], config: typing.Optional[RunnableConfig] = None, **kwargs ) -> typing.Iterable[typing.Any]

Stream queries the Agent with the given input and config.

See more: vertexai.agent_engines.LanggraphAgent.stream_query

vertexai.agent_engines.LanggraphAgent.update_state

update_state( config: typing.Optional[RunnableConfig] = None, **kwargs: typing.Any ) -> typing.Dict[str, typing.Any]

Updates the state of the Agent.

See more: vertexai.agent_engines.LanggraphAgent.update_state

vertexai.agent_engines.ModuleAgent

ModuleAgent( *, module_name: str, agent_name: str, register_operations: typing.Dict[str, typing.Sequence[str]], sys_paths: typing.Optional[typing.Sequence[str]] = None, agent_framework: typing.Optional[str] = None )

Initializes a module-based agent.

See more: vertexai.agent_engines.ModuleAgent

vertexai.agent_engines.ModuleAgent.clone

clone()

Return a clone of the agent.

See more: vertexai.agent_engines.ModuleAgent.clone

vertexai.agent_engines.ModuleAgent.register_operations

register_operations(**kwargs) -> typing.Dict[str, typing.Sequence[str]]

Register the user provided operations (modes and methods).

See more: vertexai.agent_engines.ModuleAgent.register_operations

vertexai.agent_engines.ModuleAgent.set_up

set_up() -> None

Sets up the agent for execution of queries at runtime.

See more: vertexai.agent_engines.ModuleAgent.set_up

vertexai.agent_engines.OperationRegistrable.register_operations

register_operations(**kwargs) -> typing.Dict[str, typing.Sequence[str]]

Register the user provided operations (modes and methods).

See more: vertexai.agent_engines.OperationRegistrable.register_operations

vertexai.agent_engines.Queryable.query

query(**kwargs) -> typing.Any

Runs the Agent Engine to serve the user query.

See more: vertexai.agent_engines.Queryable.query

vertexai.agent_engines.StreamQueryable.stream_query

stream_query(**kwargs) -> typing.Iterable[typing.Any]

Stream responses to serve the user query.

See more: vertexai.agent_engines.StreamQueryable.stream_query

vertexai.evaluation.CustomMetric

CustomMetric( name: str, metric_function: typing.Callable[ [typing.Dict[str, typing.Any]], typing.Dict[str, typing.Any] ], )

Initializes the evaluation metric.

See more: vertexai.evaluation.CustomMetric

vertexai.evaluation.EvalTask

EvalTask( *, dataset: typing.Union[pd.DataFrame, str, typing.Dict[str, typing.Any]], metrics: typing.List[ typing.Union[ typing.Literal[ "exact_match", "bleu", "rouge_1", "rouge_2", "rouge_l", "rouge_l_sum", "tool_call_valid", "tool_name_match", "tool_parameter_key_match", "tool_parameter_kv_match", ], vertexai.evaluation.CustomMetric, vertexai.evaluation.metrics._base._AutomaticMetric, vertexai.evaluation.metrics._base._TranslationMetric, vertexai.evaluation.metrics.pointwise_metric.PointwiseMetric, vertexai.evaluation.metrics.pairwise_metric.PairwiseMetric, ] ], experiment: typing.Optional[str] = None, metric_column_mapping: typing.Optional[typing.Dict[str, str]] = None, output_uri_prefix: typing.Optional[str] = "" )

Initializes an EvalTask.

See more: vertexai.evaluation.EvalTask

vertexai.evaluation.EvalTask.display_runs

display_runs()

Displays experiment runs associated with this EvalTask.

See more: vertexai.evaluation.EvalTask.display_runs

vertexai.evaluation.EvalTask.evaluate

evaluate( *, model: typing.Optional[ typing.Union[ vertexai.generative_models.GenerativeModel, typing.Callable[[str], str] ] ] = None, prompt_template: typing.Optional[str] = None, experiment_run_name: typing.Optional[str] = None, response_column_name: typing.Optional[str] = None, baseline_model_response_column_name: typing.Optional[str] = None, evaluation_service_qps: typing.Optional[float] = None, retry_timeout: float = 120.0, output_file_name: typing.Optional[str] = None ) -> vertexai.evaluation.EvalResult

Runs an evaluation for the EvalTask.

See more: vertexai.evaluation.EvalTask.evaluate

vertexai.evaluation.MetricPromptTemplateExamples.get_prompt_template

get_prompt_template(metric_name: str) -> str

Returns the prompt template for the given metric name.

See more: vertexai.evaluation.MetricPromptTemplateExamples.get_prompt_template

vertexai.evaluation.MetricPromptTemplateExamples.list_example_metric_names

list_example_metric_names() -> typing.List[str]

Returns a list of all metric prompt templates.

See more: vertexai.evaluation.MetricPromptTemplateExamples.list_example_metric_names

vertexai.evaluation.PairwiseMetric

PairwiseMetric( *, metric: str, metric_prompt_template: typing.Union[ vertexai.evaluation.metrics.metric_prompt_template.PairwiseMetricPromptTemplate, str, ], baseline_model: typing.Optional[ typing.Union[ vertexai.generative_models.GenerativeModel, typing.Callable[[str], str] ] ] = None )

Initializes a pairwise evaluation metric.

See more: vertexai.evaluation.PairwiseMetric

vertexai.evaluation.PairwiseMetricPromptTemplate

PairwiseMetricPromptTemplate( *, criteria: typing.Dict[str, str], rating_rubric: typing.Dict[str, str], input_variables: typing.Optional[typing.List[str]] = None, instruction: typing.Optional[str] = None, metric_definition: typing.Optional[str] = None, evaluation_steps: typing.Optional[typing.Dict[str, str]] = None, few_shot_examples: typing.Optional[typing.List[str]] = None )

Initializes a pairwise metric prompt template.

See more: vertexai.evaluation.PairwiseMetricPromptTemplate

vertexai.evaluation.PairwiseMetricPromptTemplate.__str__

__str__()

Serializes the pairwise metric prompt template to a string.

See more: vertexai.evaluation.PairwiseMetricPromptTemplate.str

vertexai.evaluation.PairwiseMetricPromptTemplate.assemble

assemble(**kwargs) -> vertexai.evaluation.prompt_template.PromptTemplate

Replaces only the provided variables in the template with specific values.

See more: vertexai.evaluation.PairwiseMetricPromptTemplate.assemble

vertexai.evaluation.PairwiseMetricPromptTemplate.get_default_pairwise_evaluation_steps

get_default_pairwise_evaluation_steps() -> typing.Dict[str, str]

Returns the default evaluation steps for the metric prompt template.

See more: vertexai.evaluation.PairwiseMetricPromptTemplate.get_default_pairwise_evaluation_steps

vertexai.evaluation.PairwiseMetricPromptTemplate.get_default_pairwise_instruction

get_default_pairwise_instruction() -> str

Returns the default instruction for the metric prompt template.

See more: vertexai.evaluation.PairwiseMetricPromptTemplate.get_default_pairwise_instruction

vertexai.evaluation.PointwiseMetric

PointwiseMetric( *, metric: str, metric_prompt_template: typing.Union[ vertexai.evaluation.metrics.metric_prompt_template.PointwiseMetricPromptTemplate, str, ] )

Initializes a pointwise evaluation metric.

See more: vertexai.evaluation.PointwiseMetric

vertexai.evaluation.PointwiseMetricPromptTemplate

PointwiseMetricPromptTemplate( *, criteria: typing.Dict[str, str], rating_rubric: typing.Dict[str, str], input_variables: typing.Optional[typing.List[str]] = None, instruction: typing.Optional[str] = None, metric_definition: typing.Optional[str] = None, evaluation_steps: typing.Optional[typing.Dict[str, str]] = None, few_shot_examples: typing.Optional[typing.List[str]] = None )

Initializes a pointwise metric prompt template.

See more: vertexai.evaluation.PointwiseMetricPromptTemplate

vertexai.evaluation.PointwiseMetricPromptTemplate.__str__

__str__()

Serializes the pointwise metric prompt template to a string.

See more: vertexai.evaluation.PointwiseMetricPromptTemplate.str

vertexai.evaluation.PointwiseMetricPromptTemplate.assemble

assemble(**kwargs) -> vertexai.evaluation.prompt_template.PromptTemplate

Replaces only the provided variables in the template with specific values.

See more: vertexai.evaluation.PointwiseMetricPromptTemplate.assemble

vertexai.evaluation.PointwiseMetricPromptTemplate.get_default_pointwise_evaluation_steps

get_default_pointwise_evaluation_steps() -> typing.Dict[str, str]

Returns the default evaluation steps for the metric prompt template.

See more: vertexai.evaluation.PointwiseMetricPromptTemplate.get_default_pointwise_evaluation_steps

vertexai.evaluation.PointwiseMetricPromptTemplate.get_default_pointwise_instruction

get_default_pointwise_instruction() -> str

Returns the default instruction for the metric prompt template.

See more: vertexai.evaluation.PointwiseMetricPromptTemplate.get_default_pointwise_instruction

vertexai.evaluation.PromptTemplate

PromptTemplate(template: str)

Initializes the PromptTemplate with a given template.

See more: vertexai.evaluation.PromptTemplate

vertexai.evaluation.PromptTemplate.__repr__

__repr__() -> str

Returns a string representation of the PromptTemplate.

See more: vertexai.evaluation.PromptTemplate.repr

vertexai.evaluation.PromptTemplate.__str__

__str__() -> str

Returns the template string.

See more: vertexai.evaluation.PromptTemplate.str

vertexai.evaluation.PromptTemplate.assemble

assemble(**kwargs) -> vertexai.evaluation.prompt_template.PromptTemplate

Replaces only the provided variables in the template with specific values.

See more: vertexai.evaluation.PromptTemplate.assemble

vertexai.evaluation.Rouge

Rouge( *, rouge_type: typing.Literal[ "rouge1", "rouge2", "rouge3", "rouge4", "rouge5", "rouge6", "rouge7", "rouge8", "rouge9", "rougeL", "rougeLsum", ], use_stemmer: bool = False, split_summaries: bool = False )

Initializes the ROUGE metric.

See more: vertexai.evaluation.Rouge

vertexai.generative_models.ChatSession.send_message

vertexai.generative_models.ChatSession.send_message_async

Generates content asynchronously.

See more: vertexai.generative_models.ChatSession.send_message_async

vertexai.generative_models.FunctionDeclaration

FunctionDeclaration( *, name: str, parameters: typing.Dict[str, typing.Any], description: typing.Optional[str] = None, response: typing.Optional[typing.Dict[str, typing.Any]] = None )

Constructs a FunctionDeclaration.

See more: vertexai.generative_models.FunctionDeclaration

vertexai.generative_models.GenerationConfig

GenerationConfig( *, temperature: typing.Optional[float] = None, top_p: typing.Optional[float] = None, top_k: typing.Optional[int] = None, candidate_count: typing.Optional[int] = None, max_output_tokens: typing.Optional[int] = None, stop_sequences: typing.Optional[typing.List[str]] = None, presence_penalty: typing.Optional[float] = None, frequency_penalty: typing.Optional[float] = None, response_mime_type: typing.Optional[str] = None, response_schema: typing.Optional[typing.Dict[str, typing.Any]] = None, seed: typing.Optional[int] = None, audio_timestamp: typing.Optional[bool] = None, routing_config: typing.Optional[RoutingConfig] = None, logprobs: typing.Optional[int] = None, response_logprobs: typing.Optional[bool] = None, response_modalities: typing.Optional[typing.List[GenerationConfig.Modality]] = None, model_config: typing.Optional[GenerationConfig.ModelConfig] = None )

Constructs a GenerationConfig object.

See more: vertexai.generative_models.GenerationConfig

vertexai.generative_models.GenerationConfig.ModelConfig.__delattr__

__delattr__(key)

Delete the value on the given field.

See more: vertexai.generative_models.GenerationConfig.ModelConfig.delattr

vertexai.generative_models.GenerationConfig.ModelConfig.__eq__

__eq__(other)

Return True if the messages are equal, False otherwise.

See more: vertexai.generative_models.GenerationConfig.ModelConfig.eq

vertexai.generative_models.GenerationConfig.ModelConfig.__ne__

__ne__(other)

Return True if the messages are unequal, False otherwise.

See more: vertexai.generative_models.GenerationConfig.ModelConfig.ne

vertexai.generative_models.GenerationConfig.ModelConfig.__setattr__

__setattr__(key, value)

Set the value on the given field.

See more: vertexai.generative_models.GenerationConfig.ModelConfig.setattr

vertexai.generative_models.GenerationConfig.RoutingConfig.AutoRoutingMode

AutoRoutingMode( *, model_routing_preference: google.cloud.aiplatform_v1beta1.types.content.GenerationConfig.RoutingConfig.AutoRoutingMode.ModelRoutingPreference )

vertexai.generative_models.GenerationConfig.RoutingConfig.ManualRoutingMode

ManualRoutingMode(*, model_name: str)

vertexai.generative_models.GenerativeModel.compute_tokens

compute_tokens( contents: ContentsType, ) -> google.cloud.aiplatform_v1beta1.types.llm_utility_service.ComputeTokensResponse

vertexai.generative_models.GenerativeModel.compute_tokens_async

compute_tokens_async( contents: ContentsType, ) -> google.cloud.aiplatform_v1beta1.types.llm_utility_service.ComputeTokensResponse

Computes tokens asynchronously.

See more: vertexai.generative_models.GenerativeModel.compute_tokens_async

vertexai.generative_models.GenerativeModel.count_tokens

count_tokens( contents: ContentsType, *, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None ) -> google.cloud.aiplatform_v1beta1.types.prediction_service.CountTokensResponse

vertexai.generative_models.GenerativeModel.count_tokens_async

count_tokens_async( contents: ContentsType, *, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None ) -> google.cloud.aiplatform_v1beta1.types.prediction_service.CountTokensResponse

Counts tokens asynchronously.

See more: vertexai.generative_models.GenerativeModel.count_tokens_async

vertexai.generative_models.GenerativeModel.from_cached_content

from_cached_content( cached_content: typing.Union[str, CachedContent], *, generation_config: typing.Optional[GenerationConfigType] = None, safety_settings: typing.Optional[SafetySettingsType] = None ) -> _GenerativeModel

Creates a model from cached content.

See more: vertexai.generative_models.GenerativeModel.from_cached_content

vertexai.generative_models.GenerativeModel.generate_content

generate_content( contents: ContentsType, *, generation_config: typing.Optional[GenerationConfigType] = None, safety_settings: typing.Optional[SafetySettingsType] = None, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None, tool_config: typing.Optional[ vertexai.generative_models._generative_models.ToolConfig ] = None, labels: typing.Optional[typing.Dict[str, str]] = None, stream: bool = False ) -> typing.Union[ vertexai.generative_models._generative_models.GenerationResponse, typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse], ]

vertexai.generative_models.GenerativeModel.generate_content_async

generate_content_async( contents: ContentsType, *, generation_config: typing.Optional[GenerationConfigType] = None, safety_settings: typing.Optional[SafetySettingsType] = None, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None, tool_config: typing.Optional[ vertexai.generative_models._generative_models.ToolConfig ] = None, labels: typing.Optional[typing.Dict[str, str]] = None, stream: bool = False ) -> typing.Union[ vertexai.generative_models._generative_models.GenerationResponse, typing.AsyncIterable[ vertexai.generative_models._generative_models.GenerationResponse ], ]

Generates content asynchronously.

See more: vertexai.generative_models.GenerativeModel.generate_content_async

vertexai.generative_models.GenerativeModel.start_chat

start_chat( *, history: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Content] ] = None, response_validation: bool = True ) -> vertexai.generative_models._generative_models.ChatSession

Creates a stateful chat session.

See more: vertexai.generative_models.GenerativeModel.start_chat

vertexai.generative_models.Image.from_bytes

from_bytes(data: bytes) -> vertexai.generative_models._generative_models.Image

Loads image from image bytes.

See more: vertexai.generative_models.Image.from_bytes

vertexai.generative_models.Image.load_from_file

load_from_file( location: str, ) -> vertexai.generative_models._generative_models.Image

Loads image from file.

See more: vertexai.generative_models.Image.load_from_file

vertexai.generative_models.ResponseValidationError.with_traceback

Exception.with_traceback(tb) -- set self.traceback to tb and return self.

See more: vertexai.generative_models.ResponseValidationError.with_traceback

vertexai.generative_models.SafetySetting

SafetySetting( *, category: google.cloud.aiplatform_v1beta1.types.content.HarmCategory, threshold: google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold, method: typing.Optional[ google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockMethod ] = None )

Safety settings.

See more: vertexai.generative_models.SafetySetting

vertexai.generative_models.grounding.DynamicRetrievalConfig

DynamicRetrievalConfig( mode: google.cloud.aiplatform_v1beta1.types.tool.DynamicRetrievalConfig.Mode = Mode.MODE_UNSPECIFIED, dynamic_threshold: typing.Optional[float] = None, )

Initializes a DynamicRetrievalConfig.

See more: vertexai.generative_models.grounding.DynamicRetrievalConfig

vertexai.generative_models.grounding.GoogleSearchRetrieval

GoogleSearchRetrieval( dynamic_retrieval_config: typing.Optional[ vertexai.generative_models._generative_models.grounding.DynamicRetrievalConfig ] = None, )

Initializes a Google Search Retrieval tool.

See more: vertexai.generative_models.grounding.GoogleSearchRetrieval

vertexai.generative_models.grounding.Retrieval

Retrieval( source: vertexai.generative_models._generative_models.grounding.VertexAISearch, disable_attribution: typing.Optional[bool] = None, )

Initializes a Retrieval tool.

See more: vertexai.generative_models.grounding.Retrieval

vertexai.generative_models.grounding.VertexAISearch

VertexAISearch( datastore: str, *, project: typing.Optional[str] = None, location: typing.Optional[str] = None )

Initializes a Vertex AI Search tool.

See more: vertexai.generative_models.grounding.VertexAISearch

vertexai.language_models.ChatModel

ChatModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a LanguageModel.

See more: vertexai.language_models.ChatModel

vertexai.language_models.ChatModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

Loads a _ModelGardenModel.

See more: vertexai.language_models.ChatModel.from_pretrained

vertexai.language_models.ChatModel.get_tuned_model

get_tuned_model( tuned_model_name: str, ) -> vertexai.language_models._language_models._LanguageModel

Loads the specified tuned language model.

See more: vertexai.language_models.ChatModel.get_tuned_model

vertexai.language_models.ChatModel.list_tuned_model_names

list_tuned_model_names() -> typing.Sequence[str]

Lists the names of tuned models.

See more: vertexai.language_models.ChatModel.list_tuned_model_names

vertexai.language_models.ChatModel.start_chat

start_chat( *, context: typing.Optional[str] = None, examples: typing.Optional[ typing.List[vertexai.language_models.InputOutputTextPair] ] = None, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, top_k: typing.Optional[int] = None, top_p: typing.Optional[float] = None, message_history: typing.Optional[ typing.List[vertexai.language_models.ChatMessage] ] = None, stop_sequences: typing.Optional[typing.List[str]] = None ) -> vertexai.language_models.ChatSession

Starts a chat session with the model.

See more: vertexai.language_models.ChatModel.start_chat

vertexai.language_models.ChatModel.tune_model

tune_model( training_data: typing.Union[str, pandas.core.frame.DataFrame], *, train_steps: typing.Optional[int] = None, learning_rate_multiplier: typing.Optional[float] = None, tuning_job_location: typing.Optional[str] = None, tuned_model_location: typing.Optional[str] = None, model_display_name: typing.Optional[str] = None, default_context: typing.Optional[str] = None, accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None, tuning_evaluation_spec: typing.Optional[ vertexai.language_models.TuningEvaluationSpec ] = None ) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model based on training data.

See more: vertexai.language_models.ChatModel.tune_model

vertexai.language_models.ChatModel.tune_model_rlhf

tune_model_rlhf( *, prompt_data: typing.Union[str, pandas.core.frame.DataFrame], preference_data: typing.Union[str, pandas.core.frame.DataFrame], model_display_name: typing.Optional[str] = None, prompt_sequence_length: typing.Optional[int] = None, target_sequence_length: typing.Optional[int] = None, reward_model_learning_rate_multiplier: typing.Optional[float] = None, reinforcement_learning_rate_multiplier: typing.Optional[float] = None, reward_model_train_steps: typing.Optional[int] = None, reinforcement_learning_train_steps: typing.Optional[int] = None, kl_coeff: typing.Optional[float] = None, default_context: typing.Optional[str] = None, tuning_job_location: typing.Optional[str] = None, accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None, tuning_evaluation_spec: typing.Optional[ vertexai.language_models.TuningEvaluationSpec ] = None ) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model using reinforcement learning from human feedback.

See more: vertexai.language_models.ChatModel.tune_model_rlhf

vertexai.language_models.ChatSession.send_message

send_message( message: str, *, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, top_k: typing.Optional[int] = None, top_p: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None, candidate_count: typing.Optional[int] = None, grounding_source: typing.Optional[ typing.Union[ vertexai.language_models._language_models.WebSearch, vertexai.language_models._language_models.VertexAISearch, vertexai.language_models._language_models.InlineContext, ] ] = None ) -> vertexai.language_models.MultiCandidateTextGenerationResponse

Sends message to the language model and gets a response.

See more: vertexai.language_models.ChatSession.send_message

vertexai.language_models.ChatSession.send_message_async

send_message_async( message: str, *, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, top_k: typing.Optional[int] = None, top_p: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None, candidate_count: typing.Optional[int] = None, grounding_source: typing.Optional[ typing.Union[ vertexai.language_models._language_models.WebSearch, vertexai.language_models._language_models.VertexAISearch, vertexai.language_models._language_models.InlineContext, ] ] = None ) -> vertexai.language_models.MultiCandidateTextGenerationResponse

Asynchronously sends message to the language model and gets a response.

See more: vertexai.language_models.ChatSession.send_message_async

vertexai.language_models.ChatSession.send_message_streaming

send_message_streaming( message: str, *, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, top_k: typing.Optional[int] = None, top_p: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None ) -> typing.Iterator[vertexai.language_models.TextGenerationResponse]

Sends message to the language model and gets a streamed response.

See more: vertexai.language_models.ChatSession.send_message_streaming

vertexai.language_models.ChatSession.send_message_streaming_async

send_message_streaming_async( message: str, *, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, top_k: typing.Optional[int] = None, top_p: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None ) -> typing.AsyncIterator[vertexai.language_models.TextGenerationResponse]

Asynchronously sends message to the language model and gets a streamed response.

See more: vertexai.language_models.ChatSession.send_message_streaming_async

vertexai.language_models.CodeChatModel

CodeChatModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a LanguageModel.

See more: vertexai.language_models.CodeChatModel

vertexai.language_models.CodeChatModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

Loads a _ModelGardenModel.

See more: vertexai.language_models.CodeChatModel.from_pretrained

vertexai.language_models.CodeChatModel.get_tuned_model

get_tuned_model( tuned_model_name: str, ) -> vertexai.language_models._language_models._LanguageModel

Loads the specified tuned language model.

See more: vertexai.language_models.CodeChatModel.get_tuned_model

vertexai.language_models.CodeChatModel.list_tuned_model_names

list_tuned_model_names() -> typing.Sequence[str]

Lists the names of tuned models.

See more: vertexai.language_models.CodeChatModel.list_tuned_model_names

vertexai.language_models.CodeChatModel.start_chat

start_chat( *, context: typing.Optional[str] = None, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, message_history: typing.Optional[ typing.List[vertexai.language_models.ChatMessage] ] = None, stop_sequences: typing.Optional[typing.List[str]] = None ) -> vertexai.language_models.CodeChatSession

Starts a chat session with the code chat model.

See more: vertexai.language_models.CodeChatModel.start_chat

vertexai.language_models.CodeChatModel.tune_model

tune_model( training_data: typing.Union[str, pandas.core.frame.DataFrame], *, train_steps: typing.Optional[int] = None, learning_rate_multiplier: typing.Optional[float] = None, tuning_job_location: typing.Optional[str] = None, tuned_model_location: typing.Optional[str] = None, model_display_name: typing.Optional[str] = None, default_context: typing.Optional[str] = None, accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None, tuning_evaluation_spec: typing.Optional[ vertexai.language_models.TuningEvaluationSpec ] = None ) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model based on training data.

See more: vertexai.language_models.CodeChatModel.tune_model

vertexai.language_models.CodeChatSession.send_message

send_message( message: str, *, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None, candidate_count: typing.Optional[int] = None ) -> vertexai.language_models.MultiCandidateTextGenerationResponse

Sends message to the code chat model and gets a response.

See more: vertexai.language_models.CodeChatSession.send_message

vertexai.language_models.CodeChatSession.send_message_async

send_message_async( message: str, *, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, candidate_count: typing.Optional[int] = None ) -> vertexai.language_models.MultiCandidateTextGenerationResponse

Asynchronously sends message to the code chat model and gets a response.

See more: vertexai.language_models.CodeChatSession.send_message_async

vertexai.language_models.CodeChatSession.send_message_streaming

send_message_streaming( message: str, *, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None ) -> typing.Iterator[vertexai.language_models.TextGenerationResponse]

Sends message to the language model and gets a streamed response.

See more: vertexai.language_models.CodeChatSession.send_message_streaming

vertexai.language_models.CodeChatSession.send_message_streaming_async

send_message_streaming_async( message: str, *, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None ) -> typing.AsyncIterator[vertexai.language_models.TextGenerationResponse]

Asynchronously sends message to the language model and gets a streamed response.

See more: vertexai.language_models.CodeChatSession.send_message_streaming_async

vertexai.language_models.CodeGenerationModel.batch_predict

batch_predict( *, dataset: typing.Union[str, typing.List[str]], destination_uri_prefix: str, model_parameters: typing.Optional[typing.Dict] = None ) -> google.cloud.aiplatform.jobs.BatchPredictionJob

Starts a batch prediction job with the model.

See more: vertexai.language_models.CodeGenerationModel.batch_predict

vertexai.language_models.CodeGenerationModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.language_models.CodeGenerationModel.get_tuned_model

get_tuned_model( tuned_model_name: str, ) -> vertexai.language_models._language_models._LanguageModel

Loads the specified tuned language model.

See more: vertexai.language_models.CodeGenerationModel.get_tuned_model

vertexai.language_models.CodeGenerationModel.list_tuned_model_names

list_tuned_model_names() -> typing.Sequence[str]

vertexai.language_models.CodeGenerationModel.predict

predict( prefix: str, suffix: typing.Optional[str] = None, *, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None, candidate_count: typing.Optional[int] = None ) -> vertexai.language_models.TextGenerationResponse

Gets model response for a single prompt.

See more: vertexai.language_models.CodeGenerationModel.predict

vertexai.language_models.CodeGenerationModel.predict_async

predict_async( prefix: str, suffix: typing.Optional[str] = None, *, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None, candidate_count: typing.Optional[int] = None ) -> vertexai.language_models.TextGenerationResponse

Asynchronously gets model response for a single prompt.

See more: vertexai.language_models.CodeGenerationModel.predict_async

vertexai.language_models.CodeGenerationModel.predict_streaming

predict_streaming( prefix: str, suffix: typing.Optional[str] = None, *, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None ) -> typing.Iterator[vertexai.language_models.TextGenerationResponse]

Predicts the code based on previous code.

See more: vertexai.language_models.CodeGenerationModel.predict_streaming

vertexai.language_models.CodeGenerationModel.predict_streaming_async

predict_streaming_async( prefix: str, suffix: typing.Optional[str] = None, *, max_output_tokens: typing.Optional[int] = None, temperature: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None ) -> typing.AsyncIterator[vertexai.language_models.TextGenerationResponse]

Asynchronously predicts the code based on previous code.

See more: vertexai.language_models.CodeGenerationModel.predict_streaming_async

vertexai.language_models.CodeGenerationModel.tune_model

tune_model( training_data: typing.Union[str, pandas.core.frame.DataFrame], *, train_steps: typing.Optional[int] = None, learning_rate_multiplier: typing.Optional[float] = None, tuning_job_location: typing.Optional[str] = None, tuned_model_location: typing.Optional[str] = None, model_display_name: typing.Optional[str] = None, tuning_evaluation_spec: typing.Optional[ vertexai.language_models.TuningEvaluationSpec ] = None, accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None, max_context_length: typing.Optional[str] = None ) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model based on training data.

See more: vertexai.language_models.CodeGenerationModel.tune_model

vertexai.language_models.TextEmbeddingModel.batch_predict

batch_predict( *, dataset: typing.Union[str, typing.List[str]], destination_uri_prefix: str, model_parameters: typing.Optional[typing.Dict] = None ) -> google.cloud.aiplatform.jobs.BatchPredictionJob

Starts a batch prediction job with the model.

See more: vertexai.language_models.TextEmbeddingModel.batch_predict

vertexai.language_models.TextEmbeddingModel.count_tokens

count_tokens( prompts: typing.List[str], ) -> vertexai.preview.language_models.CountTokensResponse

Counts the tokens and billable characters for a given prompt.

See more: vertexai.language_models.TextEmbeddingModel.count_tokens

vertexai.language_models.TextEmbeddingModel.deploy_tuned_model

deploy_tuned_model( tuned_model_name: str, machine_type: typing.Optional[str] = None, accelerator: typing.Optional[str] = None, accelerator_count: typing.Optional[int] = None, ) -> vertexai.language_models._language_models._LanguageModel

Loads the specified tuned language model.

See more: vertexai.language_models.TextEmbeddingModel.deploy_tuned_model

vertexai.language_models.TextEmbeddingModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.language_models.TextEmbeddingModel.get_embeddings

get_embeddings( texts: typing.List[typing.Union[str, vertexai.language_models.TextEmbeddingInput]], *, auto_truncate: bool = True, output_dimensionality: typing.Optional[int] = None ) -> typing.List[vertexai.language_models.TextEmbedding]

Calculates embeddings for the given texts.

See more: vertexai.language_models.TextEmbeddingModel.get_embeddings

vertexai.language_models.TextEmbeddingModel.get_embeddings_async

get_embeddings_async( texts: typing.List[typing.Union[str, vertexai.language_models.TextEmbeddingInput]], *, auto_truncate: bool = True, output_dimensionality: typing.Optional[int] = None ) -> typing.List[vertexai.language_models.TextEmbedding]

Asynchronously calculates embeddings for the given texts.

See more: vertexai.language_models.TextEmbeddingModel.get_embeddings_async

vertexai.language_models.TextEmbeddingModel.get_tuned_model

get_tuned_model(*args, **kwargs)

Loads the specified tuned language model.

See more: vertexai.language_models.TextEmbeddingModel.get_tuned_model

vertexai.language_models.TextEmbeddingModel.list_tuned_model_names

list_tuned_model_names() -> typing.Sequence[str]

Lists the names of tuned models.

See more: vertexai.language_models.TextEmbeddingModel.list_tuned_model_names

vertexai.language_models.TextEmbeddingModel.tune_model

tune_model( *, training_data: typing.Optional[str] = None, corpus_data: typing.Optional[str] = None, queries_data: typing.Optional[str] = None, test_data: typing.Optional[str] = None, validation_data: typing.Optional[str] = None, batch_size: typing.Optional[int] = None, train_steps: typing.Optional[int] = None, tuned_model_location: typing.Optional[str] = None, model_display_name: typing.Optional[str] = None, task_type: typing.Optional[str] = None, machine_type: typing.Optional[str] = None, accelerator: typing.Optional[str] = None, accelerator_count: typing.Optional[int] = None, output_dimensionality: typing.Optional[int] = None, learning_rate_multiplier: typing.Optional[float] = None ) -> vertexai.language_models._language_models._TextEmbeddingModelTuningJob

Tunes a model based on training data.

See more: vertexai.language_models.TextEmbeddingModel.tune_model

vertexai.language_models.TextGenerationModel.batch_predict

batch_predict( *, dataset: typing.Union[str, typing.List[str]], destination_uri_prefix: str, model_parameters: typing.Optional[typing.Dict] = None ) -> google.cloud.aiplatform.jobs.BatchPredictionJob

Starts a batch prediction job with the model.

See more: vertexai.language_models.TextGenerationModel.batch_predict

vertexai.language_models.TextGenerationModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.language_models.TextGenerationModel.get_tuned_model

get_tuned_model( tuned_model_name: str, ) -> vertexai.language_models._language_models._LanguageModel

Loads the specified tuned language model.

See more: vertexai.language_models.TextGenerationModel.get_tuned_model

vertexai.language_models.TextGenerationModel.list_tuned_model_names

list_tuned_model_names() -> typing.Sequence[str]

vertexai.language_models.TextGenerationModel.predict

predict( prompt: str, *, max_output_tokens: typing.Optional[int] = 128, temperature: typing.Optional[float] = None, top_k: typing.Optional[int] = None, top_p: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None, candidate_count: typing.Optional[int] = None, grounding_source: typing.Optional[ typing.Union[ vertexai.language_models._language_models.WebSearch, vertexai.language_models._language_models.VertexAISearch, vertexai.language_models._language_models.InlineContext, ] ] = None, logprobs: typing.Optional[int] = None, presence_penalty: typing.Optional[float] = None, frequency_penalty: typing.Optional[float] = None, logit_bias: typing.Optional[typing.Dict[str, float]] = None, seed: typing.Optional[int] = None ) -> vertexai.language_models.MultiCandidateTextGenerationResponse

Gets model response for a single prompt.

See more: vertexai.language_models.TextGenerationModel.predict

vertexai.language_models.TextGenerationModel.predict_async

predict_async( prompt: str, *, max_output_tokens: typing.Optional[int] = 128, temperature: typing.Optional[float] = None, top_k: typing.Optional[int] = None, top_p: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None, candidate_count: typing.Optional[int] = None, grounding_source: typing.Optional[ typing.Union[ vertexai.language_models._language_models.WebSearch, vertexai.language_models._language_models.VertexAISearch, vertexai.language_models._language_models.InlineContext, ] ] = None, logprobs: typing.Optional[int] = None, presence_penalty: typing.Optional[float] = None, frequency_penalty: typing.Optional[float] = None, logit_bias: typing.Optional[typing.Dict[str, float]] = None, seed: typing.Optional[int] = None ) -> vertexai.language_models.MultiCandidateTextGenerationResponse

Asynchronously gets model response for a single prompt.

See more: vertexai.language_models.TextGenerationModel.predict_async

vertexai.language_models.TextGenerationModel.predict_streaming

predict_streaming( prompt: str, *, max_output_tokens: int = 128, temperature: typing.Optional[float] = None, top_k: typing.Optional[int] = None, top_p: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None, logprobs: typing.Optional[int] = None, presence_penalty: typing.Optional[float] = None, frequency_penalty: typing.Optional[float] = None, logit_bias: typing.Optional[typing.Dict[str, float]] = None, seed: typing.Optional[int] = None ) -> typing.Iterator[vertexai.language_models.TextGenerationResponse]

Gets a streaming model response for a single prompt.

See more: vertexai.language_models.TextGenerationModel.predict_streaming

vertexai.language_models.TextGenerationModel.predict_streaming_async

predict_streaming_async( prompt: str, *, max_output_tokens: int = 128, temperature: typing.Optional[float] = None, top_k: typing.Optional[int] = None, top_p: typing.Optional[float] = None, stop_sequences: typing.Optional[typing.List[str]] = None, logprobs: typing.Optional[int] = None, presence_penalty: typing.Optional[float] = None, frequency_penalty: typing.Optional[float] = None, logit_bias: typing.Optional[typing.Dict[str, float]] = None, seed: typing.Optional[int] = None ) -> typing.AsyncIterator[vertexai.language_models.TextGenerationResponse]

Asynchronously gets a streaming model response for a single prompt.

See more: vertexai.language_models.TextGenerationModel.predict_streaming_async

vertexai.language_models.TextGenerationModel.tune_model

tune_model( training_data: typing.Union[str, pandas.core.frame.DataFrame], *, train_steps: typing.Optional[int] = None, learning_rate_multiplier: typing.Optional[float] = None, tuning_job_location: typing.Optional[str] = None, tuned_model_location: typing.Optional[str] = None, model_display_name: typing.Optional[str] = None, tuning_evaluation_spec: typing.Optional[ vertexai.language_models.TuningEvaluationSpec ] = None, accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None, max_context_length: typing.Optional[str] = None ) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model based on training data.

See more: vertexai.language_models.TextGenerationModel.tune_model

vertexai.language_models.TextGenerationModel.tune_model_rlhf

tune_model_rlhf( *, prompt_data: typing.Union[str, pandas.core.frame.DataFrame], preference_data: typing.Union[str, pandas.core.frame.DataFrame], model_display_name: typing.Optional[str] = None, prompt_sequence_length: typing.Optional[int] = None, target_sequence_length: typing.Optional[int] = None, reward_model_learning_rate_multiplier: typing.Optional[float] = None, reinforcement_learning_rate_multiplier: typing.Optional[float] = None, reward_model_train_steps: typing.Optional[int] = None, reinforcement_learning_train_steps: typing.Optional[int] = None, kl_coeff: typing.Optional[float] = None, default_context: typing.Optional[str] = None, tuning_job_location: typing.Optional[str] = None, accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None, tuning_evaluation_spec: typing.Optional[ vertexai.language_models.TuningEvaluationSpec ] = None ) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model using reinforcement learning from human feedback.

See more: vertexai.language_models.TextGenerationModel.tune_model_rlhf

vertexai.language_models._language_models._TunableModelMixin

_TunableModelMixin(model_id: str, endpoint_name: typing.Optional[str] = None)

vertexai.language_models._language_models._TunableModelMixin.tune_model

tune_model( training_data: typing.Union[str, pandas.core.frame.DataFrame], *, corpus_data: typing.Optional[str] = None, queries_data: typing.Optional[str] = None, test_data: typing.Optional[str] = None, validation_data: typing.Optional[str] = None, batch_size: typing.Optional[int] = None, train_steps: typing.Optional[int] = None, learning_rate: typing.Optional[float] = None, learning_rate_multiplier: typing.Optional[float] = None, tuning_job_location: typing.Optional[str] = None, tuned_model_location: typing.Optional[str] = None, model_display_name: typing.Optional[str] = None, tuning_evaluation_spec: typing.Optional[ vertexai.language_models.TuningEvaluationSpec ] = None, default_context: typing.Optional[str] = None, task_type: typing.Optional[str] = None, machine_type: typing.Optional[str] = None, accelerator: typing.Optional[str] = None, accelerator_count: typing.Optional[int] = None, accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None, max_context_length: typing.Optional[str] = None, output_dimensionality: typing.Optional[int] = None ) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model based on training data.

See more: vertexai.language_models._language_models._TunableModelMixin.tune_model

vertexai.preview.generative_models.AutomaticFunctionCallingResponder

AutomaticFunctionCallingResponder(max_automatic_function_calls: int = 1)

vertexai.preview.generative_models.CallableFunctionDeclaration

CallableFunctionDeclaration( name: str, function: typing.Callable[[...], typing.Any], parameters: typing.Dict[str, typing.Any], description: typing.Optional[str] = None, )

Constructs a FunctionDeclaration.

See more: vertexai.preview.generative_models.CallableFunctionDeclaration

vertexai.preview.generative_models.CallableFunctionDeclaration.from_func

from_func( func: typing.Callable[[...], typing.Any], ) -> vertexai.generative_models._generative_models.CallableFunctionDeclaration

Automatically creates a CallableFunctionDeclaration from a Python function.

See more: vertexai.preview.generative_models.CallableFunctionDeclaration.from_func

vertexai.preview.generative_models.ChatSession.send_message

send_message( content: PartsType, *, generation_config: typing.Optional[GenerationConfigType] = None, safety_settings: typing.Optional[SafetySettingsType] = None, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None, labels: typing.Optional[typing.Dict[str, str]] = None, stream: bool = False ) -> typing.Union[ vertexai.generative_models._generative_models.GenerationResponse, typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse], ]

vertexai.preview.generative_models.ChatSession.send_message_async

send_message_async( content: PartsType, *, generation_config: typing.Optional[GenerationConfigType] = None, safety_settings: typing.Optional[SafetySettingsType] = None, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None, labels: typing.Optional[typing.Dict[str, str]] = None, stream: bool = False ) -> typing.Union[ typing.Awaitable[vertexai.generative_models._generative_models.GenerationResponse], typing.Awaitable[ typing.AsyncIterable[ vertexai.generative_models._generative_models.GenerationResponse ] ], ]

Generates content asynchronously.

See more: vertexai.preview.generative_models.ChatSession.send_message_async

vertexai.preview.generative_models.FunctionDeclaration

FunctionDeclaration( *, name: str, parameters: typing.Dict[str, typing.Any], description: typing.Optional[str] = None, response: typing.Optional[typing.Dict[str, typing.Any]] = None )

Constructs a FunctionDeclaration.

See more: vertexai.preview.generative_models.FunctionDeclaration

vertexai.preview.generative_models.GenerationConfig

GenerationConfig( *, temperature: typing.Optional[float] = None, top_p: typing.Optional[float] = None, top_k: typing.Optional[int] = None, candidate_count: typing.Optional[int] = None, max_output_tokens: typing.Optional[int] = None, stop_sequences: typing.Optional[typing.List[str]] = None, presence_penalty: typing.Optional[float] = None, frequency_penalty: typing.Optional[float] = None, response_mime_type: typing.Optional[str] = None, response_schema: typing.Optional[typing.Dict[str, typing.Any]] = None, seed: typing.Optional[int] = None, audio_timestamp: typing.Optional[bool] = None, routing_config: typing.Optional[RoutingConfig] = None, logprobs: typing.Optional[int] = None, response_logprobs: typing.Optional[bool] = None, response_modalities: typing.Optional[typing.List[GenerationConfig.Modality]] = None, model_config: typing.Optional[GenerationConfig.ModelConfig] = None )

Constructs a GenerationConfig object.

See more: vertexai.preview.generative_models.GenerationConfig

vertexai.preview.generative_models.GenerationConfig.ModelConfig.__delattr__

__delattr__(key)

Delete the value on the given field.

See more: vertexai.preview.generative_models.GenerationConfig.ModelConfig.delattr

vertexai.preview.generative_models.GenerationConfig.ModelConfig.__eq__

__eq__(other)

Return True if the messages are equal, False otherwise.

See more: vertexai.preview.generative_models.GenerationConfig.ModelConfig.eq

vertexai.preview.generative_models.GenerationConfig.ModelConfig.__ne__

__ne__(other)

Return True if the messages are unequal, False otherwise.

See more: vertexai.preview.generative_models.GenerationConfig.ModelConfig.ne

vertexai.preview.generative_models.GenerationConfig.ModelConfig.__setattr__

__setattr__(key, value)

vertexai.preview.generative_models.GenerationConfig.RoutingConfig.AutoRoutingMode

AutoRoutingMode( *, model_routing_preference: google.cloud.aiplatform_v1beta1.types.content.GenerationConfig.RoutingConfig.AutoRoutingMode.ModelRoutingPreference )

vertexai.preview.generative_models.GenerationConfig.RoutingConfig.ManualRoutingMode

ManualRoutingMode(*, model_name: str)

vertexai.preview.generative_models.GenerativeModel.compute_tokens

compute_tokens( contents: ContentsType, ) -> google.cloud.aiplatform_v1beta1.types.llm_utility_service.ComputeTokensResponse

vertexai.preview.generative_models.GenerativeModel.compute_tokens_async

compute_tokens_async( contents: ContentsType, ) -> google.cloud.aiplatform_v1beta1.types.llm_utility_service.ComputeTokensResponse

vertexai.preview.generative_models.GenerativeModel.count_tokens

count_tokens( contents: ContentsType, *, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None ) -> google.cloud.aiplatform_v1beta1.types.prediction_service.CountTokensResponse

vertexai.preview.generative_models.GenerativeModel.count_tokens_async

count_tokens_async( contents: ContentsType, *, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None ) -> google.cloud.aiplatform_v1beta1.types.prediction_service.CountTokensResponse

vertexai.preview.generative_models.GenerativeModel.from_cached_content

from_cached_content( cached_content: typing.Union[str, CachedContent], *, generation_config: typing.Optional[GenerationConfigType] = None, safety_settings: typing.Optional[SafetySettingsType] = None ) -> _GenerativeModel

Creates a model from cached content.

See more: vertexai.preview.generative_models.GenerativeModel.from_cached_content

vertexai.preview.generative_models.GenerativeModel.generate_content

generate_content( contents: ContentsType, *, generation_config: typing.Optional[GenerationConfigType] = None, safety_settings: typing.Optional[SafetySettingsType] = None, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None, tool_config: typing.Optional[ vertexai.generative_models._generative_models.ToolConfig ] = None, labels: typing.Optional[typing.Dict[str, str]] = None, stream: bool = False ) -> typing.Union[ vertexai.generative_models._generative_models.GenerationResponse, typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse], ]

vertexai.preview.generative_models.GenerativeModel.generate_content_async

generate_content_async( contents: ContentsType, *, generation_config: typing.Optional[GenerationConfigType] = None, safety_settings: typing.Optional[SafetySettingsType] = None, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None, tool_config: typing.Optional[ vertexai.generative_models._generative_models.ToolConfig ] = None, labels: typing.Optional[typing.Dict[str, str]] = None, stream: bool = False ) -> typing.Union[ vertexai.generative_models._generative_models.GenerationResponse, typing.AsyncIterable[ vertexai.generative_models._generative_models.GenerationResponse ], ]

vertexai.preview.generative_models.GenerativeModel.set_request_response_logging_config

set_request_response_logging_config( *, enabled: bool, sampling_rate: float, bigquery_destination: str, enable_otel_logging: typing.Optional[bool] = None ) -> typing.Union[ google.cloud.aiplatform_v1beta1.types.endpoint.PublisherModelConfig, google.cloud.aiplatform_v1beta1.types.endpoint.Endpoint, ]

vertexai.preview.generative_models.GenerativeModel.start_chat

start_chat( *, history: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Content] ] = None, response_validation: bool = True, responder: typing.Optional[ vertexai.generative_models._generative_models.AutomaticFunctionCallingResponder ] = None ) -> vertexai.generative_models._generative_models.ChatSession

Creates a stateful chat session.

See more: vertexai.preview.generative_models.GenerativeModel.start_chat

vertexai.preview.generative_models.Image.from_bytes

from_bytes(data: bytes) -> vertexai.generative_models._generative_models.Image

Loads image from image bytes.

See more: vertexai.preview.generative_models.Image.from_bytes

vertexai.preview.generative_models.Image.load_from_file

load_from_file( location: str, ) -> vertexai.generative_models._generative_models.Image

vertexai.preview.generative_models.ResponseBlockedError.with_traceback

Exception.with_traceback(tb) -- set self.traceback to tb and return self.

See more: vertexai.preview.generative_models.ResponseBlockedError.with_traceback

vertexai.preview.generative_models.ResponseValidationError.with_traceback

Exception.with_traceback(tb) -- set self.traceback to tb and return self.

See more: vertexai.preview.generative_models.ResponseValidationError.with_traceback

vertexai.preview.generative_models.SafetySetting

SafetySetting( *, category: google.cloud.aiplatform_v1beta1.types.content.HarmCategory, threshold: google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold, method: typing.Optional[ google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockMethod ] = None )

vertexai.preview.prompts.Prompt

Prompt( prompt_data: typing.Optional[PartsType] = None, *, variables: typing.Optional[typing.List[typing.Dict[str, PartsType]]] = None, prompt_name: typing.Optional[str] = None, generation_config: typing.Optional[ vertexai.generative_models._generative_models.GenerationConfig ] = None, model_name: typing.Optional[str] = None, safety_settings: typing.Optional[ vertexai.generative_models._generative_models.SafetySetting ] = None, system_instruction: typing.Optional[PartsType] = None, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None, tool_config: typing.Optional[ vertexai.generative_models._generative_models.ToolConfig ] = None )

Initializes the Prompt with a given prompt, and variables.

See more: vertexai.preview.prompts.Prompt

vertexai.preview.prompts.Prompt.__repr__

__repr__() -> str

Returns a string representation of the unassembled prompt.

See more: vertexai.preview.prompts.Prompt.repr

vertexai.preview.prompts.Prompt.__str__

__str__() -> str

Returns the prompt data as a string, without any variables replaced.

See more: vertexai.preview.prompts.Prompt.str

vertexai.preview.prompts.Prompt.assemble_contents

assemble_contents( **variables_dict: PartsType, ) -> typing.List[vertexai.generative_models._generative_models.Content]

Returns the prompt data, as a List[Content], assembled with variables if applicable.

See more: vertexai.preview.prompts.Prompt.assemble_contents

vertexai.preview.prompts.Prompt.generate_content

generate_content( contents: ContentsType, *, generation_config: typing.Optional[GenerationConfigType] = None, safety_settings: typing.Optional[SafetySettingsType] = None, model_name: typing.Optional[str] = None, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None, tool_config: typing.Optional[ vertexai.generative_models._generative_models.ToolConfig ] = None, stream: bool = False, system_instruction: typing.Optional[PartsType] = None ) -> typing.Union[ vertexai.generative_models._generative_models.GenerationResponse, typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse], ]

Generates content using the saved Prompt configs.

See more: vertexai.preview.prompts.Prompt.generate_content

vertexai.preview.prompts.Prompt.get_unassembled_prompt_data

get_unassembled_prompt_data() -> PartsType

Returns the prompt data, without any variables replaced.

See more: vertexai.preview.prompts.Prompt.get_unassembled_prompt_data

vertexai.preview.reasoning_engines.A2aAgent

A2aAgent( *, agent_card: AgentCard, task_store_builder: typing.Callable[[...], TaskStore] = None, task_store_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, agent_executor_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, agent_executor_builder: typing.Optional[ typing.Callable[[...], AgentExecutor] ] = None, request_handler_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, request_handler_builder: typing.Optional[ typing.Callable[[...], RequestHandler] ] = None, extended_agent_card: AgentCard = None )

Initializes the A2A agent.

See more: vertexai.preview.reasoning_engines.A2aAgent

vertexai.preview.reasoning_engines.A2aAgent.clone

clone() -> vertexai.preview.reasoning_engines.templates.a2a.A2aAgent

Clones the A2A agent.

See more: vertexai.preview.reasoning_engines.A2aAgent.clone

vertexai.preview.reasoning_engines.A2aAgent.register_operations

register_operations() -> typing.Dict[str, typing.List[str]]

Registers the operations of the A2A Agent.

See more: vertexai.preview.reasoning_engines.A2aAgent.register_operations

vertexai.preview.reasoning_engines.A2aAgent.set_up

set_up()

Sets up the A2A application.

See more: vertexai.preview.reasoning_engines.A2aAgent.set_up

vertexai.preview.reasoning_engines.AG2Agent

AG2Agent( model: str, runnable_name: str, *, api_type: typing.Optional[str] = None, llm_config: typing.Optional[typing.Mapping[str, typing.Any]] = None, system_instruction: typing.Optional[str] = None, runnable_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, runnable_builder: typing.Optional[typing.Callable[[...], ConversableAgent]] = None, tools: typing.Optional[typing.Sequence[typing.Callable[[...], typing.Any]]] = None, enable_tracing: bool = False )

Initializes the AG2 Agent.

See more: vertexai.preview.reasoning_engines.AG2Agent

vertexai.preview.reasoning_engines.AG2Agent.clone

clone() -> vertexai.preview.reasoning_engines.templates.ag2.AG2Agent

Returns a clone of the AG2Agent.

See more: vertexai.preview.reasoning_engines.AG2Agent.clone

vertexai.preview.reasoning_engines.AG2Agent.query

query( *, input: typing.Union[str, typing.Mapping[str, typing.Any]], max_turns: typing.Optional[int] = None, **kwargs: typing.Any ) -> typing.Dict[str, typing.Any]

Queries the Agent with the given input.

See more: vertexai.preview.reasoning_engines.AG2Agent.query

vertexai.preview.reasoning_engines.AG2Agent.set_up

set_up()

Sets up the agent for execution of queries at runtime.

See more: vertexai.preview.reasoning_engines.AG2Agent.set_up

vertexai.preview.reasoning_engines.AdkApp

AdkApp( *, agent: BaseAgent, plugins: typing.Optional[typing.List[BasePlugin]] = None, enable_tracing: bool = False, session_service_builder: typing.Optional[ typing.Callable[[...], BaseSessionService] ] = None, artifact_service_builder: typing.Optional[ typing.Callable[[...], BaseArtifactService] ] = None, memory_service_builder: typing.Optional[ typing.Callable[[...], BaseMemoryService] ] = None, credential_service_builder: typing.Optional[ typing.Callable[[...], BaseCredentialService] ] = None, env_vars: typing.Optional[typing.Dict[str, str]] = None )

An ADK Application.

See more: vertexai.preview.reasoning_engines.AdkApp

vertexai.preview.reasoning_engines.AdkApp.async_add_session_to_memory

async_add_session_to_memory(*, session: typing.Dict[str, typing.Any])

vertexai.preview.reasoning_engines.AdkApp.async_create_session

async_create_session( *, user_id: str, session_id: typing.Optional[str] = None, state: typing.Optional[typing.Dict[str, typing.Any]] = None, **kwargs )

vertexai.preview.reasoning_engines.AdkApp.async_delete_session

async_delete_session(*, user_id: str, session_id: str, **kwargs)

Deletes a session for the given user.

See more: vertexai.preview.reasoning_engines.AdkApp.async_delete_session

vertexai.preview.reasoning_engines.AdkApp.async_get_session

async_get_session(*, user_id: str, session_id: str, **kwargs)

Get a session for the given user.

See more: vertexai.preview.reasoning_engines.AdkApp.async_get_session

vertexai.preview.reasoning_engines.AdkApp.async_list_sessions

async_list_sessions(*, user_id: str, **kwargs)

List sessions for the given user.

See more: vertexai.preview.reasoning_engines.AdkApp.async_list_sessions

vertexai.preview.reasoning_engines.AdkApp.async_search_memory

async_search_memory(*, user_id: str, query: str)

Searches memories for the given user.

See more: vertexai.preview.reasoning_engines.AdkApp.async_search_memory

vertexai.preview.reasoning_engines.AdkApp.async_stream_query

async_stream_query( *, message: typing.Union[str, typing.Dict[str, typing.Any]], user_id: str, session_id: typing.Optional[str] = None, run_config: typing.Optional[typing.Dict[str, typing.Any]] = None, **kwargs ) -> typing.AsyncIterable[typing.Dict[str, typing.Any]]

Streams responses asynchronously from the ADK application.

See more: vertexai.preview.reasoning_engines.AdkApp.async_stream_query

vertexai.preview.reasoning_engines.AdkApp.bidi_stream_query

bidi_stream_query(request_queue: typing.Any) -> typing.AsyncIterable[typing.Any]

Bidi streaming query the ADK application.

See more: vertexai.preview.reasoning_engines.AdkApp.bidi_stream_query

vertexai.preview.reasoning_engines.AdkApp.clone

clone()

Returns a clone of the ADK application.

See more: vertexai.preview.reasoning_engines.AdkApp.clone

vertexai.preview.reasoning_engines.AdkApp.create_session

create_session( *, user_id: str, session_id: typing.Optional[str] = None, state: typing.Optional[typing.Dict[str, typing.Any]] = None, **kwargs )

vertexai.preview.reasoning_engines.AdkApp.delete_session

delete_session(*, user_id: str, session_id: str, **kwargs)

Deletes a session for the given user.

See more: vertexai.preview.reasoning_engines.AdkApp.delete_session

vertexai.preview.reasoning_engines.AdkApp.get_session

get_session(*, user_id: str, session_id: str, **kwargs)

Get a session for the given user.

See more: vertexai.preview.reasoning_engines.AdkApp.get_session

vertexai.preview.reasoning_engines.AdkApp.list_sessions

list_sessions(*, user_id: str, **kwargs)

List sessions for the given user.

See more: vertexai.preview.reasoning_engines.AdkApp.list_sessions

vertexai.preview.reasoning_engines.AdkApp.register_operations

register_operations() -> typing.Dict[str, typing.List[str]]

Registers the operations of the ADK application.

See more: vertexai.preview.reasoning_engines.AdkApp.register_operations

vertexai.preview.reasoning_engines.AdkApp.set_up

set_up()

Sets up the ADK application.

See more: vertexai.preview.reasoning_engines.AdkApp.set_up

vertexai.preview.reasoning_engines.AdkApp.stream_query

stream_query( *, message: typing.Union[str, typing.Dict[str, typing.Any]], user_id: str, session_id: typing.Optional[str] = None, run_config: typing.Optional[typing.Dict[str, typing.Any]] = None, **kwargs )

Streams responses from the ADK application in response to a message.

See more: vertexai.preview.reasoning_engines.AdkApp.stream_query

vertexai.preview.reasoning_engines.LangchainAgent

LangchainAgent( model: str, *, system_instruction: typing.Optional[str] = None, prompt: typing.Optional[RunnableSerializable] = None, tools: typing.Optional[typing.Sequence[_ToolLike]] = None, output_parser: typing.Optional[RunnableSerializable] = None, chat_history: typing.Optional[GetSessionHistoryCallable] = None, model_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, model_tool_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, agent_executor_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, runnable_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, model_builder: typing.Optional[typing.Callable] = None, runnable_builder: typing.Optional[typing.Callable] = None, enable_tracing: bool = False )

Initializes the LangchainAgent.

See more: vertexai.preview.reasoning_engines.LangchainAgent

vertexai.preview.reasoning_engines.LangchainAgent.clone

clone() -> vertexai.preview.reasoning_engines.templates.langchain.LangchainAgent

Returns a clone of the LangchainAgent.

See more: vertexai.preview.reasoning_engines.LangchainAgent.clone

vertexai.preview.reasoning_engines.LangchainAgent.query

query( *, input: typing.Union[str, typing.Mapping[str, typing.Any]], config: typing.Optional[RunnableConfig] = None, **kwargs: typing.Any ) -> typing.Dict[str, typing.Any]

Queries the Agent with the given input and config.

See more: vertexai.preview.reasoning_engines.LangchainAgent.query

vertexai.preview.reasoning_engines.LangchainAgent.set_up

set_up()

Sets up the agent for execution of queries at runtime.

See more: vertexai.preview.reasoning_engines.LangchainAgent.set_up

vertexai.preview.reasoning_engines.LangchainAgent.stream_query

stream_query( *, input: typing.Union[str, typing.Mapping[str, typing.Any]], config: typing.Optional[RunnableConfig] = None, **kwargs ) -> typing.Iterable[typing.Any]

Stream queries the Agent with the given input and config.

See more: vertexai.preview.reasoning_engines.LangchainAgent.stream_query

vertexai.preview.reasoning_engines.LanggraphAgent

LanggraphAgent( model: str, *, tools: typing.Optional[typing.Sequence[_ToolLike]] = None, model_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, model_tool_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, model_builder: typing.Optional[typing.Callable[[...], BaseLanguageModel]] = None, runnable_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, runnable_builder: typing.Optional[ typing.Callable[[...], RunnableSerializable] ] = None, checkpointer_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, checkpointer_builder: typing.Optional[ typing.Callable[[...], BaseCheckpointSaver] ] = None, enable_tracing: bool = False )

Initializes the LangGraph Agent.

See more: vertexai.preview.reasoning_engines.LanggraphAgent

vertexai.preview.reasoning_engines.LanggraphAgent.clone

clone() -> vertexai.preview.reasoning_engines.templates.langgraph.LanggraphAgent

Returns a clone of the LanggraphAgent.

See more: vertexai.preview.reasoning_engines.LanggraphAgent.clone

vertexai.preview.reasoning_engines.LanggraphAgent.get_state

get_state( config: typing.Optional[RunnableConfig] = None, **kwargs: typing.Any ) -> typing.Dict[str, typing.Any]

Gets the current state of the Agent.

See more: vertexai.preview.reasoning_engines.LanggraphAgent.get_state

vertexai.preview.reasoning_engines.LanggraphAgent.get_state_history

get_state_history( config: typing.Optional[RunnableConfig] = None, **kwargs: typing.Any ) -> typing.Iterable[typing.Any]

Gets the state history of the Agent.

See more: vertexai.preview.reasoning_engines.LanggraphAgent.get_state_history

vertexai.preview.reasoning_engines.LanggraphAgent.query

query( *, input: typing.Union[str, typing.Mapping[str, typing.Any]], config: typing.Optional[RunnableConfig] = None, **kwargs: typing.Any ) -> typing.Dict[str, typing.Any]

Queries the Agent with the given input and config.

See more: vertexai.preview.reasoning_engines.LanggraphAgent.query

vertexai.preview.reasoning_engines.LanggraphAgent.register_operations

register_operations() -> typing.Mapping[str, typing.Sequence[str]]

Registers the operations of the Agent.

See more: vertexai.preview.reasoning_engines.LanggraphAgent.register_operations

vertexai.preview.reasoning_engines.LanggraphAgent.set_up

set_up()

Sets up the agent for execution of queries at runtime.

See more: vertexai.preview.reasoning_engines.LanggraphAgent.set_up

vertexai.preview.reasoning_engines.LanggraphAgent.stream_query

stream_query( *, input: typing.Union[str, typing.Mapping[str, typing.Any]], config: typing.Optional[RunnableConfig] = None, **kwargs ) -> typing.Iterable[typing.Any]

Stream queries the Agent with the given input and config.

See more: vertexai.preview.reasoning_engines.LanggraphAgent.stream_query

vertexai.preview.reasoning_engines.LanggraphAgent.update_state

update_state( config: typing.Optional[RunnableConfig] = None, **kwargs: typing.Any ) -> typing.Dict[str, typing.Any]

Updates the state of the Agent.

See more: vertexai.preview.reasoning_engines.LanggraphAgent.update_state

vertexai.preview.reasoning_engines.LlamaIndexQueryPipelineAgent

LlamaIndexQueryPipelineAgent( model: str, *, system_instruction: typing.Optional[str] = None, prompt: typing.Optional[QueryComponent] = None, model_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, model_builder: typing.Optional[typing.Callable[[...], FunctionCallingLLM]] = None, retriever_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, retriever_builder: typing.Optional[typing.Callable[[...], QueryComponent]] = None, response_synthesizer_kwargs: typing.Optional[ typing.Mapping[str, typing.Any] ] = None, response_synthesizer_builder: typing.Optional[ typing.Callable[[...], QueryComponent] ] = None, runnable_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None, runnable_builder: typing.Optional[typing.Callable[[...], QueryPipeline]] = None, enable_tracing: bool = False )

Initializes the LlamaIndexQueryPipelineAgent.

See more: vertexai.preview.reasoning_engines.LlamaIndexQueryPipelineAgent

vertexai.preview.reasoning_engines.LlamaIndexQueryPipelineAgent.clone

clone() -> ( vertexai.preview.reasoning_engines.templates.llama_index.LlamaIndexQueryPipelineAgent )

Returns a clone of the LlamaIndexQueryPipelineAgent.

See more: vertexai.preview.reasoning_engines.LlamaIndexQueryPipelineAgent.clone

vertexai.preview.reasoning_engines.LlamaIndexQueryPipelineAgent.query

query( input: typing.Union[str, typing.Mapping[str, typing.Any]], **kwargs: typing.Any ) -> typing.Union[ str, typing.Dict[str, typing.Any], typing.Sequence[typing.Union[str, typing.Dict[str, typing.Any]]], ]

Queries the Agent with the given input and config.

See more: vertexai.preview.reasoning_engines.LlamaIndexQueryPipelineAgent.query

vertexai.preview.reasoning_engines.LlamaIndexQueryPipelineAgent.set_up

set_up()

Sets up the agent for execution of queries at runtime.

See more: vertexai.preview.reasoning_engines.LlamaIndexQueryPipelineAgent.set_up

vertexai.preview.reasoning_engines.Queryable.query

query(**kwargs)

Runs the Reasoning Engine to serve the user query.

See more: vertexai.preview.reasoning_engines.Queryable.query

vertexai.preview.reasoning_engines.ReasoningEngine

ReasoningEngine(reasoning_engine_name: str)

Retrieves a Reasoning Engine resource.

See more: vertexai.preview.reasoning_engines.ReasoningEngine

vertexai.preview.reasoning_engines.ReasoningEngine.create

create( reasoning_engine: typing.Union[ vertexai.reasoning_engines._reasoning_engines.Queryable, vertexai.reasoning_engines._reasoning_engines.OperationRegistrable, ], *, requirements: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, reasoning_engine_name: typing.Optional[str] = None, display_name: typing.Optional[str] = None, description: typing.Optional[str] = None, gcs_dir_name: str = "reasoning_engine", sys_version: typing.Optional[str] = None, extra_packages: typing.Optional[typing.Sequence[str]] = None ) -> vertexai.reasoning_engines._reasoning_engines.ReasoningEngine

Creates a new ReasoningEngine.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.create

vertexai.preview.reasoning_engines.ReasoningEngine.delete

delete(sync: bool = True) -> None

Deletes this Vertex AI resource.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.delete

vertexai.preview.reasoning_engines.ReasoningEngine.list

list( filter: typing.Optional[str] = None, order_by: typing.Optional[str] = None, project: typing.Optional[str] = None, location: typing.Optional[str] = None, credentials: typing.Optional[google.auth.credentials.Credentials] = None, parent: typing.Optional[str] = None, ) -> typing.List[google.cloud.aiplatform.base.VertexAiResourceNoun]

List all instances of this Vertex AI Resource.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.list

vertexai.preview.reasoning_engines.ReasoningEngine.operation_schemas

operation_schemas() -> typing.Sequence[typing.Dict[str, typing.Any]]

Returns the (Open)API schemas for the Reasoning Engine.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.operation_schemas

vertexai.preview.reasoning_engines.ReasoningEngine.to_dict

to_dict() -> typing.Dict[str, typing.Any]

Returns the resource proto as a dictionary.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.to_dict

vertexai.preview.reasoning_engines.ReasoningEngine.update

update( *, reasoning_engine: typing.Optional[ typing.Union[ vertexai.reasoning_engines._reasoning_engines.Queryable, vertexai.reasoning_engines._reasoning_engines.OperationRegistrable, ] ] = None, requirements: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, display_name: typing.Optional[str] = None, description: typing.Optional[str] = None, gcs_dir_name: str = "reasoning_engine", sys_version: typing.Optional[str] = None, extra_packages: typing.Optional[typing.Sequence[str]] = None ) -> vertexai.reasoning_engines._reasoning_engines.ReasoningEngine

Updates an existing ReasoningEngine.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.update

vertexai.preview.reasoning_engines.ReasoningEngine.wait

wait()

Helper method that blocks until all futures are complete.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.wait

vertexai.preview.tuning.SourceModel

SourceModel(base_model: str, custom_base_model: str = "")

Initializes SourceModel.

See more: vertexai.preview.tuning.SourceModel

vertexai.preview.tuning.TuningJob

TuningJob(tuning_job_name: str)

Initializes class with project, location, and api_client.

See more: vertexai.preview.tuning.TuningJob

vertexai.preview.tuning.TuningJob.list

list( filter: typing.Optional[str] = None, ) -> typing.List[vertexai.tuning._tuning.TuningJob]

Lists TuningJobs.

See more: vertexai.preview.tuning.TuningJob.list

vertexai.preview.tuning.TuningJob.refresh

refresh() -> vertexai.tuning._tuning.TuningJob

Refreshed the tuning job from the service.

See more: vertexai.preview.tuning.TuningJob.refresh

vertexai.preview.tuning.TuningJob.to_dict

to_dict() -> typing.Dict[str, typing.Any]

Returns the resource proto as a dictionary.

See more: vertexai.preview.tuning.TuningJob.to_dict

vertexai.preview.tuning.sft.SupervisedTuningJob.list

list( filter: typing.Optional[str] = None, ) -> typing.List[vertexai.tuning._tuning.TuningJob]

vertexai.preview.tuning.sft.SupervisedTuningJob.refresh

refresh() -> vertexai.tuning._tuning.TuningJob

Refreshed the tuning job from the service.

See more: vertexai.preview.tuning.sft.SupervisedTuningJob.refresh

vertexai.preview.tuning.sft.SupervisedTuningJob.to_dict

to_dict() -> typing.Dict[str, typing.Any]

Returns the resource proto as a dictionary.

See more: vertexai.preview.tuning.sft.SupervisedTuningJob.to_dict

vertexai.preview.vision_models.ControlReferenceImage

ControlReferenceImage( reference_id, image: typing.Optional[ typing.Union[bytes, vertexai.vision_models.Image, str] ] = None, control_type: typing.Optional[ typing.Literal["default", "scribble", "face_mesh", "canny"] ] = None, enable_control_image_computation: typing.Optional[bool] = False, )

Creates a ControlReferenceImage object.

See more: vertexai.preview.vision_models.ControlReferenceImage

vertexai.preview.vision_models.GeneratedImage

GeneratedImage( image_bytes: typing.Optional[bytes], generation_parameters: typing.Dict[str, typing.Any], gcs_uri: typing.Optional[str] = None, )

Creates a GeneratedImage object.

See more: vertexai.preview.vision_models.GeneratedImage

vertexai.preview.vision_models.GeneratedImage.load_from_file

load_from_file(location: str) -> vertexai.preview.vision_models.GeneratedImage

vertexai.preview.vision_models.GeneratedImage.save

save(location: str, include_generation_parameters: bool = True)

Saves image to a file.

See more: vertexai.preview.vision_models.GeneratedImage.save

vertexai.preview.vision_models.GeneratedImage.show

show()

vertexai.preview.vision_models.GeneratedMask

GeneratedMask( image_bytes: typing.Optional[bytes], gcs_uri: typing.Optional[str] = None, labels: typing.Optional[ typing.List[vertexai.preview.vision_models.EntityLabel] ] = None, )

Creates a GeneratedMask object.

See more: vertexai.preview.vision_models.GeneratedMask

vertexai.preview.vision_models.GeneratedMask.load_from_file

load_from_file(location: str) -> vertexai.vision_models.Image

Loads image from local file or Google Cloud Storage.

See more: vertexai.preview.vision_models.GeneratedMask.load_from_file

vertexai.preview.vision_models.GeneratedMask.save

save(location: str)

Saves image to a file.

See more: vertexai.preview.vision_models.GeneratedMask.save

vertexai.preview.vision_models.GeneratedMask.show

show()

vertexai.preview.vision_models.Image

Image( image_bytes: typing.Optional[bytes] = None, gcs_uri: typing.Optional[str] = None )

Creates an Image object.

See more: vertexai.preview.vision_models.Image

vertexai.preview.vision_models.Image.load_from_file

load_from_file(location: str) -> vertexai.vision_models.Image

Loads image from local file or Google Cloud Storage.

See more: vertexai.preview.vision_models.Image.load_from_file

vertexai.preview.vision_models.Image.save

save(location: str)

Saves image to a file.

See more: vertexai.preview.vision_models.Image.save

vertexai.preview.vision_models.Image.show

show()

Shows the image.

See more: vertexai.preview.vision_models.Image.show

vertexai.preview.vision_models.ImageCaptioningModel

ImageCaptioningModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.ImageCaptioningModel

vertexai.preview.vision_models.ImageCaptioningModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.ImageCaptioningModel.get_captions

get_captions( image: vertexai.vision_models.Image, *, number_of_results: int = 1, language: str = "en", output_gcs_uri: typing.Optional[str] = None ) -> typing.List[str]

Generates captions for a given image.

See more: vertexai.preview.vision_models.ImageCaptioningModel.get_captions

vertexai.preview.vision_models.ImageGenerationModel

ImageGenerationModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.ImageGenerationModel

vertexai.preview.vision_models.ImageGenerationModel.edit_image

edit_image( *, prompt: str, base_image: typing.Optional[vertexai.vision_models.Image] = None, mask: typing.Optional[vertexai.vision_models.Image] = None, reference_images: typing.Optional[ typing.List[vertexai.vision_models.ReferenceImage] ] = None, negative_prompt: typing.Optional[str] = None, number_of_images: int = 1, guidance_scale: typing.Optional[float] = None, edit_mode: typing.Optional[ typing.Literal[ "inpainting-insert", "inpainting-remove", "outpainting", "product-image" ] ] = None, mask_mode: typing.Optional[ typing.Literal["background", "foreground", "semantic"] ] = None, segmentation_classes: typing.Optional[typing.List[str]] = None, mask_dilation: typing.Optional[float] = None, product_position: typing.Optional[typing.Literal["fixed", "reposition"]] = None, output_mime_type: typing.Optional[typing.Literal["image/png", "image/jpeg"]] = None, compression_quality: typing.Optional[float] = None, language: typing.Optional[str] = None, seed: typing.Optional[int] = None, output_gcs_uri: typing.Optional[str] = None, safety_filter_level: typing.Optional[ typing.Literal["block_most", "block_some", "block_few", "block_fewest"] ] = None, person_generation: typing.Optional[ typing.Literal["dont_allow", "allow_adult", "allow_all"] ] = None ) -> vertexai.preview.vision_models.ImageGenerationResponse

Edits an existing image based on text prompt.

See more: vertexai.preview.vision_models.ImageGenerationModel.edit_image

vertexai.preview.vision_models.ImageGenerationModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.ImageGenerationModel.generate_images

generate_images( prompt: str, *, negative_prompt: typing.Optional[str] = None, number_of_images: int = 1, aspect_ratio: typing.Optional[ typing.Literal["1:1", "9:16", "16:9", "4:3", "3:4"] ] = None, guidance_scale: typing.Optional[float] = None, language: typing.Optional[str] = None, seed: typing.Optional[int] = None, output_gcs_uri: typing.Optional[str] = None, add_watermark: typing.Optional[bool] = True, safety_filter_level: typing.Optional[ typing.Literal["block_most", "block_some", "block_few", "block_fewest"] ] = None, person_generation: typing.Optional[ typing.Literal["dont_allow", "allow_adult", "allow_all"] ] = None ) -> vertexai.preview.vision_models.ImageGenerationResponse

Generates images from text prompt.

See more: vertexai.preview.vision_models.ImageGenerationModel.generate_images

vertexai.preview.vision_models.ImageGenerationModel.upscale_image

upscale_image( image: typing.Union[ vertexai.vision_models.Image, vertexai.preview.vision_models.GeneratedImage ], new_size: typing.Optional[int] = 2048, upscale_factor: typing.Optional[typing.Literal["x2", "x4"]] = None, output_mime_type: typing.Optional[ typing.Literal["image/png", "image/jpeg"] ] = "image/png", output_compression_quality: typing.Optional[int] = None, output_gcs_uri: typing.Optional[str] = None, ) -> vertexai.vision_models.Image

vertexai.preview.vision_models.ImageGenerationResponse.__getitem__

__getitem__(idx: int) -> vertexai.preview.vision_models.GeneratedImage

Gets the generated image by index.

See more: vertexai.preview.vision_models.ImageGenerationResponse.getitem

vertexai.preview.vision_models.ImageGenerationResponse.__iter__

__iter__() -> typing.Iterator[vertexai.preview.vision_models.GeneratedImage]

Iterates through the generated images.

See more: vertexai.preview.vision_models.ImageGenerationResponse.iter

vertexai.preview.vision_models.ImageQnAModel

ImageQnAModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.ImageQnAModel

vertexai.preview.vision_models.ImageQnAModel.ask_question

ask_question( image: vertexai.vision_models.Image, question: str, *, number_of_results: int = 1 ) -> typing.List[str]

Answers questions about an image.

See more: vertexai.preview.vision_models.ImageQnAModel.ask_question

vertexai.preview.vision_models.ImageQnAModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.ImageSegmentationModel

ImageSegmentationModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.ImageSegmentationModel

vertexai.preview.vision_models.ImageSegmentationModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.ImageSegmentationModel.segment_image

segment_image( base_image: vertexai.vision_models.Image, prompt: typing.Optional[str] = None, scribble: typing.Optional[vertexai.preview.vision_models.Scribble] = None, mode: typing.Literal[ "foreground", "background", "semantic", "prompt", "interactive" ] = "foreground", max_predictions: typing.Optional[int] = None, confidence_threshold: typing.Optional[float] = 0.1, mask_dilation: typing.Optional[float] = None, binary_color_threshold: typing.Optional[float] = None, ) -> vertexai.preview.vision_models.ImageSegmentationResponse

vertexai.preview.vision_models.ImageSegmentationResponse.__getitem__

__getitem__(idx: int) -> vertexai.preview.vision_models.GeneratedMask

Gets the generated masks by index.

See more: vertexai.preview.vision_models.ImageSegmentationResponse.getitem

vertexai.preview.vision_models.ImageSegmentationResponse.__iter__

__iter__() -> typing.Iterator[vertexai.preview.vision_models.GeneratedMask]

Iterates through the generated masks.

See more: vertexai.preview.vision_models.ImageSegmentationResponse.iter

vertexai.preview.vision_models.ImageTextModel

ImageTextModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.ImageTextModel

vertexai.preview.vision_models.ImageTextModel.ask_question

ask_question( image: vertexai.vision_models.Image, question: str, *, number_of_results: int = 1 ) -> typing.List[str]

Answers questions about an image.

See more: vertexai.preview.vision_models.ImageTextModel.ask_question

vertexai.preview.vision_models.ImageTextModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.ImageTextModel.get_captions

get_captions( image: vertexai.vision_models.Image, *, number_of_results: int = 1, language: str = "en", output_gcs_uri: typing.Optional[str] = None ) -> typing.List[str]

Generates captions for a given image.

See more: vertexai.preview.vision_models.ImageTextModel.get_captions

vertexai.preview.vision_models.MaskReferenceImage

MaskReferenceImage( reference_id, image: typing.Optional[ typing.Union[bytes, vertexai.vision_models.Image, str] ] = None, mask_mode: typing.Optional[ typing.Literal[ "default", "user_provided", "background", "foreground", "semantic" ] ] = None, dilation: typing.Optional[float] = None, segmentation_classes: typing.Optional[typing.List[int]] = None, )

Creates a MaskReferenceImage object.

See more: vertexai.preview.vision_models.MaskReferenceImage

vertexai.preview.vision_models.MultiModalEmbeddingModel

MultiModalEmbeddingModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.MultiModalEmbeddingModel

vertexai.preview.vision_models.MultiModalEmbeddingModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.MultiModalEmbeddingModel.get_embeddings

get_embeddings( image: typing.Optional[vertexai.vision_models.Image] = None, video: typing.Optional[vertexai.vision_models.Video] = None, contextual_text: typing.Optional[str] = None, dimension: typing.Optional[int] = None, video_segment_config: typing.Optional[ vertexai.vision_models.VideoSegmentConfig ] = None, ) -> vertexai.vision_models.MultiModalEmbeddingResponse

Gets embedding vectors from the provided image.

See more: vertexai.preview.vision_models.MultiModalEmbeddingModel.get_embeddings

vertexai.preview.vision_models.RawReferenceImage

RawReferenceImage( reference_id, image: typing.Optional[ typing.Union[bytes, vertexai.vision_models.Image, str] ] = None, )

Creates a ReferenceImage object.

See more: vertexai.preview.vision_models.RawReferenceImage

vertexai.preview.vision_models.ReferenceImage

ReferenceImage( reference_id, image: typing.Optional[ typing.Union[bytes, vertexai.vision_models.Image, str] ] = None, )

Creates a ReferenceImage object.

See more: vertexai.preview.vision_models.ReferenceImage

vertexai.preview.vision_models.Scribble

Scribble(image_bytes: typing.Optional[bytes], gcs_uri: typing.Optional[str] = None)

Creates a Scribble object.

See more: vertexai.preview.vision_models.Scribble

vertexai.preview.vision_models.StyleReferenceImage

StyleReferenceImage( reference_id, image: typing.Optional[ typing.Union[bytes, vertexai.vision_models.Image, str] ] = None, style_description: typing.Optional[str] = None, )

Creates a StyleReferenceImage object.

See more: vertexai.preview.vision_models.StyleReferenceImage

vertexai.preview.vision_models.SubjectReferenceImage

SubjectReferenceImage( reference_id, image: typing.Optional[ typing.Union[bytes, vertexai.vision_models.Image, str] ] = None, subject_description: typing.Optional[str] = None, subject_type: typing.Optional[ typing.Literal["default", "person", "animal", "product"] ] = None, )

Creates a SubjectReferenceImage object.

See more: vertexai.preview.vision_models.SubjectReferenceImage

vertexai.preview.vision_models.Video

Video( video_bytes: typing.Optional[bytes] = None, gcs_uri: typing.Optional[str] = None )

Creates a Video object.

See more: vertexai.preview.vision_models.Video

vertexai.preview.vision_models.Video.load_from_file

load_from_file(location: str) -> vertexai.vision_models.Video

Loads video from local file or Google Cloud Storage.

See more: vertexai.preview.vision_models.Video.load_from_file

vertexai.preview.vision_models.Video.save

save(location: str)

Saves video to a file.

See more: vertexai.preview.vision_models.Video.save

vertexai.preview.vision_models.VideoEmbedding

VideoEmbedding( start_offset_sec: int, end_offset_sec: int, embedding: typing.List[float] )

Creates a VideoEmbedding object.

See more: vertexai.preview.vision_models.VideoEmbedding

vertexai.preview.vision_models.VideoSegmentConfig

VideoSegmentConfig( start_offset_sec: int = 0, end_offset_sec: int = 120, interval_sec: int = 16 )

Creates a VideoSegmentConfig object.

See more: vertexai.preview.vision_models.VideoSegmentConfig

vertexai.preview.vision_models.WatermarkVerificationModel

WatermarkVerificationModel( model_id: str, endpoint_name: typing.Optional[str] = None )

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.WatermarkVerificationModel

vertexai.preview.vision_models.WatermarkVerificationModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.WatermarkVerificationModel.verify_image

verify_image( image: vertexai.vision_models.Image, ) -> vertexai.preview.vision_models.WatermarkVerificationResponse

Verifies the watermark of an image.

See more: vertexai.preview.vision_models.WatermarkVerificationModel.verify_image

vertexai.prompts._prompts.Prompt

Prompt( prompt_data: typing.Optional[PartsType] = None, *, variables: typing.Optional[typing.List[typing.Dict[str, PartsType]]] = None, prompt_name: typing.Optional[str] = None, generation_config: typing.Optional[ vertexai.generative_models._generative_models.GenerationConfig ] = None, model_name: typing.Optional[str] = None, safety_settings: typing.Optional[ vertexai.generative_models._generative_models.SafetySetting ] = None, system_instruction: typing.Optional[PartsType] = None, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None, tool_config: typing.Optional[ vertexai.generative_models._generative_models.ToolConfig ] = None )

Initializes the Prompt with a given prompt, and variables.

See more: vertexai.prompts._prompts.Prompt

vertexai.prompts._prompts.Prompt.__repr__

__repr__() -> str

Returns a string representation of the unassembled prompt.

See more: vertexai.prompts.prompts.Prompt._repr

vertexai.prompts._prompts.Prompt.__str__

__str__() -> str

Returns the prompt data as a string, without any variables replaced.

See more: vertexai.prompts.prompts.Prompt._str

vertexai.prompts._prompts.Prompt.assemble_contents

assemble_contents( **variables_dict: PartsType, ) -> typing.List[vertexai.generative_models._generative_models.Content]

Returns the prompt data, as a List[Content], assembled with variables if applicable.

See more: vertexai.prompts._prompts.Prompt.assemble_contents

vertexai.prompts._prompts.Prompt.generate_content

generate_content( contents: ContentsType, *, generation_config: typing.Optional[GenerationConfigType] = None, safety_settings: typing.Optional[SafetySettingsType] = None, model_name: typing.Optional[str] = None, tools: typing.Optional[ typing.List[vertexai.generative_models._generative_models.Tool] ] = None, tool_config: typing.Optional[ vertexai.generative_models._generative_models.ToolConfig ] = None, stream: bool = False, system_instruction: typing.Optional[PartsType] = None ) -> typing.Union[ vertexai.generative_models._generative_models.GenerationResponse, typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse], ]

Generates content using the saved Prompt configs.

See more: vertexai.prompts._prompts.Prompt.generate_content

vertexai.prompts._prompts.Prompt.get_unassembled_prompt_data

get_unassembled_prompt_data() -> PartsType

Returns the prompt data, without any variables replaced.

See more: vertexai.prompts._prompts.Prompt.get_unassembled_prompt_data

vertexai.resources.preview.ml_monitoring.ModelMonitor

ModelMonitor( model_monitor_name: str, project: typing.Optional[str] = None, location: typing.Optional[str] = None, credentials: typing.Optional[google.auth.credentials.Credentials] = None, )

Initializes class with project, location, and api_client.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor

vertexai.resources.preview.ml_monitoring.ModelMonitor.create

create( model_name: str, model_version_id: str, training_dataset: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput ] = None, display_name: typing.Optional[str] = None, model_monitoring_schema: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.schema.ModelMonitoringSchema ] = None, tabular_objective_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.TabularObjective ] = None, output_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.output.OutputSpec ] = None, notification_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.notification.NotificationSpec ] = None, explanation_spec: typing.Optional[ google.cloud.aiplatform_v1beta1.types.explanation.ExplanationSpec ] = None, project: typing.Optional[str] = None, location: typing.Optional[str] = None, credentials: typing.Optional[google.auth.credentials.Credentials] = None, model_monitor_id: typing.Optional[str] = None, ) -> vertexai.resources.preview.ml_monitoring.model_monitors.ModelMonitor

vertexai.resources.preview.ml_monitoring.ModelMonitor.create_schedule

create_schedule( cron: str, target_dataset: vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput, display_name: typing.Optional[str] = None, model_monitoring_job_display_name: typing.Optional[str] = None, start_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None, end_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None, tabular_objective_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.TabularObjective ] = None, baseline_dataset: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput ] = None, output_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.output.OutputSpec ] = None, notification_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.notification.NotificationSpec ] = None, explanation_spec: typing.Optional[ google.cloud.aiplatform_v1beta1.types.explanation.ExplanationSpec ] = None, ) -> google.cloud.aiplatform_v1beta1.types.schedule.Schedule

Creates a new Scheduled run for model monitoring job.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.create_schedule

vertexai.resources.preview.ml_monitoring.ModelMonitor.delete

delete(force: bool = False, sync: bool = True) -> None

Force delete the model monitor.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.delete

vertexai.resources.preview.ml_monitoring.ModelMonitor.delete_model_monitoring_job

delete_model_monitoring_job(model_monitoring_job_name: str) -> None

vertexai.resources.preview.ml_monitoring.ModelMonitor.delete_schedule

delete_schedule(schedule_name: str) -> None

vertexai.resources.preview.ml_monitoring.ModelMonitor.get_model_monitoring_job

get_model_monitoring_job( model_monitoring_job_name: str, ) -> vertexai.resources.preview.ml_monitoring.model_monitors.ModelMonitoringJob

vertexai.resources.preview.ml_monitoring.ModelMonitor.get_schedule

get_schedule( schedule_name: str, ) -> google.cloud.aiplatform_v1beta1.types.schedule.Schedule

vertexai.resources.preview.ml_monitoring.ModelMonitor.get_schema

get_schema() -> ( google.cloud.aiplatform_v1beta1.types.model_monitor.ModelMonitoringSchema )

Get the schema of the model monitor.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.get_schema

vertexai.resources.preview.ml_monitoring.ModelMonitor.list

list( filter: typing.Optional[str] = None, order_by: typing.Optional[str] = None, project: typing.Optional[str] = None, location: typing.Optional[str] = None, credentials: typing.Optional[google.auth.credentials.Credentials] = None, parent: typing.Optional[str] = None, ) -> typing.List[google.cloud.aiplatform.base.VertexAiResourceNoun]

List all instances of this Vertex AI Resource.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.list

vertexai.resources.preview.ml_monitoring.ModelMonitor.list_jobs

list_jobs( page_size: typing.Optional[int] = None, page_token: typing.Optional[str] = None ) -> ListJobsResponse.list_jobs

vertexai.resources.preview.ml_monitoring.ModelMonitor.list_schedules

list_schedules( filter: typing.Optional[str] = None, page_size: typing.Optional[int] = None, page_token: typing.Optional[str] = None, ) -> ListSchedulesResponse.list_schedules

vertexai.resources.preview.ml_monitoring.ModelMonitor.pause_schedule

pause_schedule(schedule_name: str) -> None

vertexai.resources.preview.ml_monitoring.ModelMonitor.resume_schedule

resume_schedule(schedule_name: str) -> None

vertexai.resources.preview.ml_monitoring.ModelMonitor.run

run( target_dataset: vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput, display_name: typing.Optional[str] = None, model_monitoring_job_id: typing.Optional[str] = None, sync: typing.Optional[bool] = False, tabular_objective_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.TabularObjective ] = None, baseline_dataset: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput ] = None, output_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.output.OutputSpec ] = None, notification_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.notification.NotificationSpec ] = None, explanation_spec: typing.Optional[ google.cloud.aiplatform_v1beta1.types.explanation.ExplanationSpec ] = None, ) -> vertexai.resources.preview.ml_monitoring.model_monitors.ModelMonitoringJob

Creates a new ModelMonitoringJob.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.run

vertexai.resources.preview.ml_monitoring.ModelMonitor.search_alerts

search_alerts( stats_name: typing.Optional[str] = None, objective_type: typing.Optional[str] = None, model_monitoring_job_name: typing.Optional[str] = None, start_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None, end_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None, page_size: typing.Optional[int] = None, page_token: typing.Optional[str] = None, ) -> typing.Dict[str, typing.Any]

vertexai.resources.preview.ml_monitoring.ModelMonitor.search_metrics

search_metrics( stats_name: typing.Optional[str] = None, objective_type: typing.Optional[str] = None, model_monitoring_job_name: typing.Optional[str] = None, schedule_name: typing.Optional[str] = None, algorithm: typing.Optional[str] = None, start_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None, end_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None, page_size: typing.Optional[int] = None, page_token: typing.Optional[str] = None, ) -> MetricsSearchResponse.monitoring_stats

vertexai.resources.preview.ml_monitoring.ModelMonitor.show_feature_attribution_drift_stats

show_feature_attribution_drift_stats(model_monitoring_job_name: str) -> None

The method to visualize the feature attribution drift result from a model monitoring job as a histogram chart and a table.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.show_feature_attribution_drift_stats

vertexai.resources.preview.ml_monitoring.ModelMonitor.show_feature_drift_stats

show_feature_drift_stats(model_monitoring_job_name: str) -> None

The method to visualize the feature drift result from a model monitoring job as a histogram chart and a table.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.show_feature_drift_stats

vertexai.resources.preview.ml_monitoring.ModelMonitor.show_output_drift_stats

show_output_drift_stats(model_monitoring_job_name: str) -> None

The method to visualize the prediction output drift result from a model monitoring job as a histogram chart and a table.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.show_output_drift_stats

vertexai.resources.preview.ml_monitoring.ModelMonitor.to_dict

to_dict() -> typing.Dict[str, typing.Any]

Returns the resource proto as a dictionary.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.to_dict

vertexai.resources.preview.ml_monitoring.ModelMonitor.update

update( display_name: typing.Optional[str] = None, training_dataset: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput ] = None, model_monitoring_schema: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.schema.ModelMonitoringSchema ] = None, tabular_objective_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.TabularObjective ] = None, output_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.output.OutputSpec ] = None, notification_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.notification.NotificationSpec ] = None, explanation_spec: typing.Optional[ google.cloud.aiplatform_v1beta1.types.explanation.ExplanationSpec ] = None, ) -> vertexai.resources.preview.ml_monitoring.model_monitors.ModelMonitor

Updates an existing ModelMonitor.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.update

vertexai.resources.preview.ml_monitoring.ModelMonitor.update_schedule

update_schedule( schedule_name: str, display_name: typing.Optional[str] = None, model_monitoring_job_display_name: typing.Optional[str] = None, cron: typing.Optional[str] = None, baseline_dataset: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput ] = None, target_dataset: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput ] = None, tabular_objective_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.TabularObjective ] = None, output_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.output.OutputSpec ] = None, notification_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.notification.NotificationSpec ] = None, explanation_spec: typing.Optional[ google.cloud.aiplatform_v1beta1.types.explanation.ExplanationSpec ] = None, end_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None, ) -> google.cloud.aiplatform_v1beta1.types.schedule.Schedule

vertexai.resources.preview.ml_monitoring.ModelMonitor.wait

wait()

Helper method that blocks until all futures are complete.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.wait

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob

ModelMonitoringJob( model_monitoring_job_name: str, model_monitor_id: typing.Optional[str] = None, project: typing.Optional[str] = None, location: typing.Optional[str] = None, credentials: typing.Optional[google.auth.credentials.Credentials] = None, )

Initializes class with project, location, and api_client.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.create

create( model_monitor_name: typing.Optional[str] = None, target_dataset: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput ] = None, display_name: typing.Optional[str] = None, model_monitoring_job_id: typing.Optional[str] = None, project: typing.Optional[str] = None, location: typing.Optional[str] = None, credentials: typing.Optional[google.auth.credentials.Credentials] = None, baseline_dataset: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput ] = None, tabular_objective_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.objective.TabularObjective ] = None, output_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.output.OutputSpec ] = None, notification_spec: typing.Optional[ vertexai.resources.preview.ml_monitoring.spec.notification.NotificationSpec ] = None, explanation_spec: typing.Optional[ google.cloud.aiplatform_v1beta1.types.explanation.ExplanationSpec ] = None, sync: bool = False, ) -> vertexai.resources.preview.ml_monitoring.model_monitors.ModelMonitoringJob

Creates a new ModelMonitoringJob.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.create

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.delete

delete() -> None

Deletes an Model Monitoring Job.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.delete

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.done

done() -> bool

Method indicating whether a job has completed.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.done

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.list

list( filter: typing.Optional[str] = None, order_by: typing.Optional[str] = None, project: typing.Optional[str] = None, location: typing.Optional[str] = None, credentials: typing.Optional[google.auth.credentials.Credentials] = None, parent: typing.Optional[str] = None, ) -> typing.List[google.cloud.aiplatform.base.VertexAiResourceNoun]

List all instances of this Vertex AI Resource.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.list

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.to_dict

to_dict() -> typing.Dict[str, typing.Any]

Returns the resource proto as a dictionary.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.to_dict

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.wait

wait()

Helper method that blocks until all futures are complete.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.wait

vertexai.resources.preview.ml_monitoring.spec.ModelMonitoringSchema.to_json

to_json(output_dir: typing.Optional[str] = None) -> str

Transform ModelMonitoringSchema to json format.

See more: vertexai.resources.preview.ml_monitoring.spec.ModelMonitoringSchema.to_json

vertexai.vision_models.GeneratedImage

GeneratedImage( image_bytes: typing.Optional[bytes], generation_parameters: typing.Dict[str, typing.Any], gcs_uri: typing.Optional[str] = None, )

Creates a GeneratedImage object.

See more: vertexai.vision_models.GeneratedImage

vertexai.vision_models.GeneratedImage.load_from_file

load_from_file(location: str) -> vertexai.preview.vision_models.GeneratedImage

vertexai.vision_models.GeneratedImage.save

save(location: str, include_generation_parameters: bool = True)

Saves image to a file.

See more: vertexai.vision_models.GeneratedImage.save

vertexai.vision_models.GeneratedImage.show

show()

Shows the image.

See more: vertexai.vision_models.GeneratedImage.show

vertexai.vision_models.Image

Image( image_bytes: typing.Optional[bytes] = None, gcs_uri: typing.Optional[str] = None )

Creates an Image object.

See more: vertexai.vision_models.Image

vertexai.vision_models.Image.load_from_file

load_from_file(location: str) -> vertexai.vision_models.Image

Loads image from local file or Google Cloud Storage.

See more: vertexai.vision_models.Image.load_from_file

vertexai.vision_models.Image.save

save(location: str)

Saves image to a file.

See more: vertexai.vision_models.Image.save

vertexai.vision_models.Image.show

show()

Shows the image.

See more: vertexai.vision_models.Image.show

vertexai.vision_models.ImageCaptioningModel

ImageCaptioningModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.vision_models.ImageCaptioningModel

vertexai.vision_models.ImageCaptioningModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.vision_models.ImageCaptioningModel.get_captions

get_captions( image: vertexai.vision_models.Image, *, number_of_results: int = 1, language: str = "en", output_gcs_uri: typing.Optional[str] = None ) -> typing.List[str]

Generates captions for a given image.

See more: vertexai.vision_models.ImageCaptioningModel.get_captions

vertexai.vision_models.ImageGenerationModel

ImageGenerationModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.vision_models.ImageGenerationModel

vertexai.vision_models.ImageGenerationModel.edit_image

edit_image( *, prompt: str, base_image: typing.Optional[vertexai.vision_models.Image] = None, mask: typing.Optional[vertexai.vision_models.Image] = None, reference_images: typing.Optional[ typing.List[vertexai.vision_models.ReferenceImage] ] = None, negative_prompt: typing.Optional[str] = None, number_of_images: int = 1, guidance_scale: typing.Optional[float] = None, edit_mode: typing.Optional[ typing.Literal[ "inpainting-insert", "inpainting-remove", "outpainting", "product-image" ] ] = None, mask_mode: typing.Optional[ typing.Literal["background", "foreground", "semantic"] ] = None, segmentation_classes: typing.Optional[typing.List[str]] = None, mask_dilation: typing.Optional[float] = None, product_position: typing.Optional[typing.Literal["fixed", "reposition"]] = None, output_mime_type: typing.Optional[typing.Literal["image/png", "image/jpeg"]] = None, compression_quality: typing.Optional[float] = None, language: typing.Optional[str] = None, seed: typing.Optional[int] = None, output_gcs_uri: typing.Optional[str] = None, safety_filter_level: typing.Optional[ typing.Literal["block_most", "block_some", "block_few", "block_fewest"] ] = None, person_generation: typing.Optional[ typing.Literal["dont_allow", "allow_adult", "allow_all"] ] = None ) -> vertexai.preview.vision_models.ImageGenerationResponse

Edits an existing image based on text prompt.

See more: vertexai.vision_models.ImageGenerationModel.edit_image

vertexai.vision_models.ImageGenerationModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.vision_models.ImageGenerationModel.generate_images

generate_images( prompt: str, *, negative_prompt: typing.Optional[str] = None, number_of_images: int = 1, aspect_ratio: typing.Optional[ typing.Literal["1:1", "9:16", "16:9", "4:3", "3:4"] ] = None, guidance_scale: typing.Optional[float] = None, language: typing.Optional[str] = None, seed: typing.Optional[int] = None, output_gcs_uri: typing.Optional[str] = None, add_watermark: typing.Optional[bool] = True, safety_filter_level: typing.Optional[ typing.Literal["block_most", "block_some", "block_few", "block_fewest"] ] = None, person_generation: typing.Optional[ typing.Literal["dont_allow", "allow_adult", "allow_all"] ] = None ) -> vertexai.preview.vision_models.ImageGenerationResponse

Generates images from text prompt.

See more: vertexai.vision_models.ImageGenerationModel.generate_images

vertexai.vision_models.ImageGenerationModel.upscale_image

upscale_image( image: typing.Union[ vertexai.vision_models.Image, vertexai.preview.vision_models.GeneratedImage ], new_size: typing.Optional[int] = 2048, upscale_factor: typing.Optional[typing.Literal["x2", "x4"]] = None, output_mime_type: typing.Optional[ typing.Literal["image/png", "image/jpeg"] ] = "image/png", output_compression_quality: typing.Optional[int] = None, output_gcs_uri: typing.Optional[str] = None, ) -> vertexai.vision_models.Image

vertexai.vision_models.ImageGenerationResponse.__getitem__

__getitem__(idx: int) -> vertexai.preview.vision_models.GeneratedImage

Gets the generated image by index.

See more: vertexai.vision_models.ImageGenerationResponse.getitem

vertexai.vision_models.ImageGenerationResponse.__iter__

__iter__() -> typing.Iterator[vertexai.preview.vision_models.GeneratedImage]

Iterates through the generated images.

See more: vertexai.vision_models.ImageGenerationResponse.iter

vertexai.vision_models.ImageQnAModel

ImageQnAModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.vision_models.ImageQnAModel

vertexai.vision_models.ImageQnAModel.ask_question

ask_question( image: vertexai.vision_models.Image, question: str, *, number_of_results: int = 1 ) -> typing.List[str]

Answers questions about an image.

See more: vertexai.vision_models.ImageQnAModel.ask_question

vertexai.vision_models.ImageQnAModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

Loads a _ModelGardenModel.

See more: vertexai.vision_models.ImageQnAModel.from_pretrained

vertexai.vision_models.ImageTextModel

ImageTextModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.vision_models.ImageTextModel

vertexai.vision_models.ImageTextModel.ask_question

ask_question( image: vertexai.vision_models.Image, question: str, *, number_of_results: int = 1 ) -> typing.List[str]

Answers questions about an image.

See more: vertexai.vision_models.ImageTextModel.ask_question

vertexai.vision_models.ImageTextModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

Loads a _ModelGardenModel.

See more: vertexai.vision_models.ImageTextModel.from_pretrained

vertexai.vision_models.ImageTextModel.get_captions

get_captions( image: vertexai.vision_models.Image, *, number_of_results: int = 1, language: str = "en", output_gcs_uri: typing.Optional[str] = None ) -> typing.List[str]

Generates captions for a given image.

See more: vertexai.vision_models.ImageTextModel.get_captions

vertexai.vision_models.MultiModalEmbeddingModel

MultiModalEmbeddingModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.vision_models.MultiModalEmbeddingModel

vertexai.vision_models.MultiModalEmbeddingModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.vision_models.MultiModalEmbeddingModel.get_embeddings

get_embeddings( image: typing.Optional[vertexai.vision_models.Image] = None, video: typing.Optional[vertexai.vision_models.Video] = None, contextual_text: typing.Optional[str] = None, dimension: typing.Optional[int] = None, video_segment_config: typing.Optional[ vertexai.vision_models.VideoSegmentConfig ] = None, ) -> vertexai.vision_models.MultiModalEmbeddingResponse

Gets embedding vectors from the provided image.

See more: vertexai.vision_models.MultiModalEmbeddingModel.get_embeddings

vertexai.vision_models.Video

Video( video_bytes: typing.Optional[bytes] = None, gcs_uri: typing.Optional[str] = None )

Creates a Video object.

See more: vertexai.vision_models.Video

vertexai.vision_models.Video.load_from_file

load_from_file(location: str) -> vertexai.vision_models.Video

Loads video from local file or Google Cloud Storage.

See more: vertexai.vision_models.Video.load_from_file

vertexai.vision_models.Video.save

save(location: str)

Saves video to a file.

See more: vertexai.vision_models.Video.save

vertexai.vision_models.VideoEmbedding

VideoEmbedding( start_offset_sec: int, end_offset_sec: int, embedding: typing.List[float] )

Creates a VideoEmbedding object.

See more: vertexai.vision_models.VideoEmbedding

vertexai.vision_models.VideoSegmentConfig

VideoSegmentConfig( start_offset_sec: int = 0, end_offset_sec: int = 120, interval_sec: int = 16 )

Creates a VideoSegmentConfig object.

See more: vertexai.vision_models.VideoSegmentConfig