Skip to content
This repository was archived by the owner on Jul 20, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion protos/google/cloud/dialogflow/v2/agent.proto
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,10 @@ message Agent {
bool enable_logging = 8 [(google.api.field_behavior) = OPTIONAL];

// Optional. Determines how intents are detected from user queries.
MatchMode match_mode = 9 [(google.api.field_behavior) = OPTIONAL];
MatchMode match_mode = 9 [
deprecated = true,
(google.api.field_behavior) = OPTIONAL
];

// Optional. To filter out false positive results and still get variety in
// matched natural language inputs for your agent, you can tune the machine
Expand Down
123 changes: 82 additions & 41 deletions protos/google/cloud/dialogflow/v2/session.proto
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,10 @@ service Sessions {
// as a result. This method is not idempotent, because it may cause contexts
// and session entity types to be updated, which in turn might affect
// results of future queries.
//
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
rpc DetectIntent(DetectIntentRequest) returns (DetectIntentResponse) {
option (google.api.http) = {
post: "/v2/{session=projects/*/agent/sessions/*}:detectIntent"
Expand All @@ -72,8 +76,12 @@ service Sessions {
// Processes a natural language query in audio format in a streaming fashion
// and returns structured, actionable data as a result. This method is only
// available via the gRPC API (not REST).
rpc StreamingDetectIntent(stream StreamingDetectIntentRequest) returns (stream StreamingDetectIntentResponse) {
}
//
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
rpc StreamingDetectIntent(stream StreamingDetectIntentRequest)
returns (stream StreamingDetectIntentResponse) {}
}

// The request to detect user's intent.
Expand All @@ -90,6 +98,10 @@ message DetectIntentRequest {
//
// For more information, see the [API interactions
// guide](https://cloud.google.com/dialogflow/docs/api-overview).
//
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
string session = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
Expand All @@ -115,12 +127,14 @@ message DetectIntentRequest {
// configured, no output audio is generated.
OutputAudioConfig output_audio_config = 4;

// Mask for [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config] indicating which settings in this
// request-level config should override speech synthesizer settings defined at
// agent-level.
// Mask for
// [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config]
// indicating which settings in this request-level config should override
// speech synthesizer settings defined at agent-level.
//
// If unspecified or empty, [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config] replaces the agent-level
// config in its entirety.
// If unspecified or empty,
// [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config]
// replaces the agent-level config in its entirety.
google.protobuf.FieldMask output_audio_config_mask = 7;

// The natural language speech audio to be processed. This field
Expand Down Expand Up @@ -192,6 +206,17 @@ message QueryParameters {
// Configures the type of sentiment analysis to perform. If not
// provided, sentiment analysis is not performed.
SentimentAnalysisRequestConfig sentiment_analysis_request_config = 10;

// This field can be used to pass HTTP headers for a webhook
// call. These headers will be sent to webhook along with the headers that
// have been configured through the Dialogflow web console. The headers
// defined within this field will overwrite the headers configured through the
// Dialogflow console if there is a conflict. Header names are
// case-insensitive. Google's specified headers are not allowed. Including:
// "Host", "Content-Length", "Connection", "From", "User-Agent",
// "Accept-Encoding", "If-Modified-Since", "If-None-Match", "X-Forwarded-For",
// etc.
map<string, string> webhook_headers = 14;
}

// Represents the query input. It can contain either:
Expand Down Expand Up @@ -325,25 +350,29 @@ message QueryResult {
}

// The top-level message sent by the client to the
// [Sessions.StreamingDetectIntent][google.cloud.dialogflow.v2.Sessions.StreamingDetectIntent] method.
// [Sessions.StreamingDetectIntent][google.cloud.dialogflow.v2.Sessions.StreamingDetectIntent]
// method.
//
// Multiple request messages should be sent in order:
//
// 1. The first message must contain
// [session][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.session],
// [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input] plus optionally
// [query_params][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_params]. If the client
// wants to receive an audio response, it should also contain
// [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input]
// plus optionally
// [query_params][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_params].
// If the client wants to receive an audio response, it should also contain
// [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config].
// The message must not contain
// [input_audio][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.input_audio].
// 2. If [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input] was set to
// [query_input.audio_config][google.cloud.dialogflow.v2.InputAudioConfig], all subsequent
// messages must contain
// [input_audio][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.input_audio] to continue with
// Speech recognition.
// If you decide to rather detect an intent from text input after you
// already started Speech recognition, please send a message with
// 2. If
// [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input]
// was set to
// [query_input.audio_config][google.cloud.dialogflow.v2.InputAudioConfig],
// all subsequent messages must contain
// [input_audio][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.input_audio]
// to continue with Speech recognition. If you decide to rather detect an
// intent from text input after you already started Speech recognition,
// please send a message with
// [query_input.text][google.cloud.dialogflow.v2.QueryInput.text].
//
// However, note that:
Expand All @@ -368,6 +397,10 @@ message StreamingDetectIntentRequest {
//
// For more information, see the [API interactions
// guide](https://cloud.google.com/dialogflow/docs/api-overview).
//
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
string session = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
Expand All @@ -388,27 +421,30 @@ message StreamingDetectIntentRequest {
// 3. an event that specifies which intent to trigger.
QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];

// Please use [InputAudioConfig.single_utterance][google.cloud.dialogflow.v2.InputAudioConfig.single_utterance] instead.
// If `false` (default), recognition does not cease until
// the client closes the stream. If `true`, the recognizer will detect a
// single spoken utterance in input audio. Recognition ceases when it detects
// the audio's voice has stopped or paused. In this case, once a detected
// intent is received, the client should close the stream and start a new
// request with a new stream as needed.
// This setting is ignored when `query_input` is a piece of text or an event.
// Please use
// [InputAudioConfig.single_utterance][google.cloud.dialogflow.v2.InputAudioConfig.single_utterance]
// instead. If `false` (default), recognition does not cease until the client
// closes the stream. If `true`, the recognizer will detect a single spoken
// utterance in input audio. Recognition ceases when it detects the audio's
// voice has stopped or paused. In this case, once a detected intent is
// received, the client should close the stream and start a new request with a
// new stream as needed. This setting is ignored when `query_input` is a piece
// of text or an event.
bool single_utterance = 4 [deprecated = true];

// Instructs the speech synthesizer how to generate the output
// audio. If this field is not set and agent-level speech synthesizer is not
// configured, no output audio is generated.
OutputAudioConfig output_audio_config = 5;

// Mask for [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config] indicating which settings in this
// request-level config should override speech synthesizer settings defined at
// agent-level.
// Mask for
// [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config]
// indicating which settings in this request-level config should override
// speech synthesizer settings defined at agent-level.
//
// If unspecified or empty, [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config] replaces the agent-level
// config in its entirety.
// If unspecified or empty,
// [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config]
// replaces the agent-level config in its entirety.
google.protobuf.FieldMask output_audio_config_mask = 7;

// The input audio content to be recognized. Must be sent if
Expand Down Expand Up @@ -503,11 +539,12 @@ message StreamingRecognitionResult {

// Event indicates that the server has detected the end of the user's speech
// utterance and expects no additional inputs.
// Therefore, the server will not process additional audio (although it may subsequently return additional results). The
// client should stop sending additional audio data, half-close the gRPC
// connection, and wait for any additional results until the server closes
// the gRPC connection. This message is only sent if `single_utterance` was
// set to `true`, and is not used otherwise.
// Therefore, the server will not process additional audio (although it may
// subsequently return additional results). The client should stop sending
// additional audio data, half-close the gRPC connection, and wait for any
// additional results until the server closes the gRPC connection. This
// message is only sent if `single_utterance` was set to `true`, and is not
// used otherwise.
END_OF_SINGLE_UTTERANCE = 2;
}

Expand All @@ -534,7 +571,8 @@ message StreamingRecognitionResult {
float confidence = 4;

// Word-specific information for the words recognized by Speech in
// [transcript][google.cloud.dialogflow.v2.StreamingRecognitionResult.transcript]. Populated if and only if `message_type` = `TRANSCRIPT` and
// [transcript][google.cloud.dialogflow.v2.StreamingRecognitionResult.transcript].
// Populated if and only if `message_type` = `TRANSCRIPT` and
// [InputAudioConfig.enable_word_info] is set.
repeated SpeechWordInfo speech_word_info = 7;

Expand Down Expand Up @@ -601,11 +639,14 @@ message SentimentAnalysisRequestConfig {
// and identifies the prevailing subjective opinion, especially to determine a
// user's attitude as positive, negative, or neutral.
// For [Participants.DetectIntent][], it needs to be configured in
// [DetectIntentRequest.query_params][google.cloud.dialogflow.v2.DetectIntentRequest.query_params]. For
// [Participants.StreamingDetectIntent][], it needs to be configured in
// [DetectIntentRequest.query_params][google.cloud.dialogflow.v2.DetectIntentRequest.query_params].
// For [Participants.StreamingDetectIntent][], it needs to be configured in
// [StreamingDetectIntentRequest.query_params][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_params].
// And for [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent] and
// [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent], it needs to be configured in
// And for
// [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
// and
// [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent],
// it needs to be configured in
// [ConversationProfile.human_agent_assistant_config][google.cloud.dialogflow.v2.ConversationProfile.human_agent_assistant_config]
message SentimentAnalysisResult {
// The sentiment analysis result for `query_text`.
Expand Down
6 changes: 6 additions & 0 deletions protos/protos.d.ts

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading