embedded - ActiveState ActiveGo 1.8
...

Package embedded

import "google.golang.org/genproto/googleapis/assistant/embedded/v1alpha1"
Overview
Index

Overview ▾

Package embedded is a generated protocol buffer package.

It is generated from these files:

google/assistant/embedded/v1alpha1/embedded_assistant.proto

It has these top-level messages:

ConverseConfig
AudioInConfig
AudioOutConfig
ConverseState
AudioOut
ConverseResult
ConverseRequest
ConverseResponse

Index ▾

Variables
func RegisterEmbeddedAssistantServer(s *grpc.Server, srv EmbeddedAssistantServer)
type AudioInConfig
    func (*AudioInConfig) Descriptor() ([]byte, []int)
    func (m *AudioInConfig) GetEncoding() AudioInConfig_Encoding
    func (m *AudioInConfig) GetSampleRateHertz() int32
    func (*AudioInConfig) ProtoMessage()
    func (m *AudioInConfig) Reset()
    func (m *AudioInConfig) String() string
type AudioInConfig_Encoding
    func (AudioInConfig_Encoding) EnumDescriptor() ([]byte, []int)
    func (x AudioInConfig_Encoding) String() string
type AudioOut
    func (*AudioOut) Descriptor() ([]byte, []int)
    func (m *AudioOut) GetAudioData() []byte
    func (*AudioOut) ProtoMessage()
    func (m *AudioOut) Reset()
    func (m *AudioOut) String() string
type AudioOutConfig
    func (*AudioOutConfig) Descriptor() ([]byte, []int)
    func (m *AudioOutConfig) GetEncoding() AudioOutConfig_Encoding
    func (m *AudioOutConfig) GetSampleRateHertz() int32
    func (m *AudioOutConfig) GetVolumePercentage() int32
    func (*AudioOutConfig) ProtoMessage()
    func (m *AudioOutConfig) Reset()
    func (m *AudioOutConfig) String() string
type AudioOutConfig_Encoding
    func (AudioOutConfig_Encoding) EnumDescriptor() ([]byte, []int)
    func (x AudioOutConfig_Encoding) String() string
type ConverseConfig
    func (*ConverseConfig) Descriptor() ([]byte, []int)
    func (m *ConverseConfig) GetAudioInConfig() *AudioInConfig
    func (m *ConverseConfig) GetAudioOutConfig() *AudioOutConfig
    func (m *ConverseConfig) GetConverseState() *ConverseState
    func (*ConverseConfig) ProtoMessage()
    func (m *ConverseConfig) Reset()
    func (m *ConverseConfig) String() string
type ConverseRequest
    func (*ConverseRequest) Descriptor() ([]byte, []int)
    func (m *ConverseRequest) GetAudioIn() []byte
    func (m *ConverseRequest) GetConfig() *ConverseConfig
    func (m *ConverseRequest) GetConverseRequest() isConverseRequest_ConverseRequest
    func (*ConverseRequest) ProtoMessage()
    func (m *ConverseRequest) Reset()
    func (m *ConverseRequest) String() string
    func (*ConverseRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
type ConverseRequest_AudioIn
type ConverseRequest_Config
type ConverseResponse
    func (*ConverseResponse) Descriptor() ([]byte, []int)
    func (m *ConverseResponse) GetAudioOut() *AudioOut
    func (m *ConverseResponse) GetConverseResponse() isConverseResponse_ConverseResponse
    func (m *ConverseResponse) GetError() *google_rpc.Status
    func (m *ConverseResponse) GetEventType() ConverseResponse_EventType
    func (m *ConverseResponse) GetResult() *ConverseResult
    func (*ConverseResponse) ProtoMessage()
    func (m *ConverseResponse) Reset()
    func (m *ConverseResponse) String() string
    func (*ConverseResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
type ConverseResponse_AudioOut
type ConverseResponse_Error
type ConverseResponse_EventType
    func (ConverseResponse_EventType) EnumDescriptor() ([]byte, []int)
    func (x ConverseResponse_EventType) String() string
type ConverseResponse_EventType_
type ConverseResponse_Result
type ConverseResult
    func (*ConverseResult) Descriptor() ([]byte, []int)
    func (m *ConverseResult) GetConversationState() []byte
    func (m *ConverseResult) GetMicrophoneMode() ConverseResult_MicrophoneMode
    func (m *ConverseResult) GetSpokenRequestText() string
    func (m *ConverseResult) GetSpokenResponseText() string
    func (m *ConverseResult) GetVolumePercentage() int32
    func (*ConverseResult) ProtoMessage()
    func (m *ConverseResult) Reset()
    func (m *ConverseResult) String() string
type ConverseResult_MicrophoneMode
    func (ConverseResult_MicrophoneMode) EnumDescriptor() ([]byte, []int)
    func (x ConverseResult_MicrophoneMode) String() string
type ConverseState
    func (*ConverseState) Descriptor() ([]byte, []int)
    func (m *ConverseState) GetConversationState() []byte
    func (*ConverseState) ProtoMessage()
    func (m *ConverseState) Reset()
    func (m *ConverseState) String() string
type EmbeddedAssistantClient
    func NewEmbeddedAssistantClient(cc *grpc.ClientConn) EmbeddedAssistantClient
type EmbeddedAssistantServer
type EmbeddedAssistant_ConverseClient
type EmbeddedAssistant_ConverseServer

Package files

embedded_assistant.pb.go

Variables

var AudioInConfig_Encoding_name = map[int32]string{
    0: "ENCODING_UNSPECIFIED",
    1: "LINEAR16",
    2: "FLAC",
}
var AudioInConfig_Encoding_value = map[string]int32{
    "ENCODING_UNSPECIFIED": 0,
    "LINEAR16":             1,
    "FLAC":                 2,
}
var AudioOutConfig_Encoding_name = map[int32]string{
    0: "ENCODING_UNSPECIFIED",
    1: "LINEAR16",
    2: "MP3",
    3: "OPUS_IN_OGG",
}
var AudioOutConfig_Encoding_value = map[string]int32{
    "ENCODING_UNSPECIFIED": 0,
    "LINEAR16":             1,
    "MP3":                  2,
    "OPUS_IN_OGG":          3,
}
var ConverseResponse_EventType_name = map[int32]string{
    0: "EVENT_TYPE_UNSPECIFIED",
    1: "END_OF_UTTERANCE",
}
var ConverseResponse_EventType_value = map[string]int32{
    "EVENT_TYPE_UNSPECIFIED": 0,
    "END_OF_UTTERANCE":       1,
}
var ConverseResult_MicrophoneMode_name = map[int32]string{
    0: "MICROPHONE_MODE_UNSPECIFIED",
    1: "CLOSE_MICROPHONE",
    2: "DIALOG_FOLLOW_ON",
}
var ConverseResult_MicrophoneMode_value = map[string]int32{
    "MICROPHONE_MODE_UNSPECIFIED": 0,
    "CLOSE_MICROPHONE":            1,
    "DIALOG_FOLLOW_ON":            2,
}

func RegisterEmbeddedAssistantServer

func RegisterEmbeddedAssistantServer(s *grpc.Server, srv EmbeddedAssistantServer)

type AudioInConfig

Specifies how to process the `audio_in` data that will be provided in subsequent requests. For recommended settings, see the Google Assistant SDK [best practices](https://developers.google.com/assistant/best-practices).

type AudioInConfig struct {
    // *Required* Encoding of audio data sent in all `audio_in` messages.
    Encoding AudioInConfig_Encoding `protobuf:"varint,1,opt,name=encoding,enum=google.assistant.embedded.v1alpha1.AudioInConfig_Encoding" json:"encoding,omitempty"`
    // *Required* Sample rate (in Hertz) of the audio data sent in all `audio_in`
    // messages. Valid values are from 16000-24000, but 16000 is optimal.
    // For best results, set the sampling rate of the audio source to 16000 Hz.
    // If that's not possible, use the native sample rate of the audio source
    // (instead of re-sampling).
    SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"`
}

func (*AudioInConfig) Descriptor

func (*AudioInConfig) Descriptor() ([]byte, []int)

func (*AudioInConfig) GetEncoding

func (m *AudioInConfig) GetEncoding() AudioInConfig_Encoding

func (*AudioInConfig) GetSampleRateHertz

func (m *AudioInConfig) GetSampleRateHertz() int32

func (*AudioInConfig) ProtoMessage

func (*AudioInConfig) ProtoMessage()

func (*AudioInConfig) Reset

func (m *AudioInConfig) Reset()

func (*AudioInConfig) String

func (m *AudioInConfig) String() string

type AudioInConfig_Encoding

Audio encoding of the data sent in the audio message. Audio must be one-channel (mono). The only language supported is "en-US".

type AudioInConfig_Encoding int32
const (
    // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
    AudioInConfig_ENCODING_UNSPECIFIED AudioInConfig_Encoding = 0
    // Uncompressed 16-bit signed little-endian samples (Linear PCM).
    // This encoding includes no header, only the raw audio bytes.
    AudioInConfig_LINEAR16 AudioInConfig_Encoding = 1
    // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
    // Codec) is the recommended encoding because it is
    // lossless--therefore recognition is not compromised--and
    // requires only about half the bandwidth of `LINEAR16`. This encoding
    // includes the `FLAC` stream header followed by audio data. It supports
    // 16-bit and 24-bit samples, however, not all fields in `STREAMINFO` are
    // supported.
    AudioInConfig_FLAC AudioInConfig_Encoding = 2
)

func (AudioInConfig_Encoding) EnumDescriptor

func (AudioInConfig_Encoding) EnumDescriptor() ([]byte, []int)

func (AudioInConfig_Encoding) String

func (x AudioInConfig_Encoding) String() string

type AudioOut

The audio containing the assistant's response to the query. Sequential chunks of audio data are received in sequential `ConverseResponse` messages.

type AudioOut struct {
    // *Output-only* The audio data containing the assistant's response to the
    // query. Sequential chunks of audio data are received in sequential
    // `ConverseResponse` messages.
    AudioData []byte `protobuf:"bytes,1,opt,name=audio_data,json=audioData,proto3" json:"audio_data,omitempty"`
}

func (*AudioOut) Descriptor

func (*AudioOut) Descriptor() ([]byte, []int)

func (*AudioOut) GetAudioData

func (m *AudioOut) GetAudioData() []byte

func (*AudioOut) ProtoMessage

func (*AudioOut) ProtoMessage()

func (*AudioOut) Reset

func (m *AudioOut) Reset()

func (*AudioOut) String

func (m *AudioOut) String() string

type AudioOutConfig

Specifies the desired format for the server to use when it returns `audio_out` messages.

type AudioOutConfig struct {
    // *Required* The encoding of audio data to be returned in all `audio_out`
    // messages.
    Encoding AudioOutConfig_Encoding `protobuf:"varint,1,opt,name=encoding,enum=google.assistant.embedded.v1alpha1.AudioOutConfig_Encoding" json:"encoding,omitempty"`
    // *Required* The sample rate in Hertz of the audio data returned in
    // `audio_out` messages. Valid values are: 16000-24000.
    SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"`
    // *Required* Current volume setting of the device's audio output.
    // Valid values are 1 to 100 (corresponding to 1% to 100%).
    VolumePercentage int32 `protobuf:"varint,3,opt,name=volume_percentage,json=volumePercentage" json:"volume_percentage,omitempty"`
}

func (*AudioOutConfig) Descriptor

func (*AudioOutConfig) Descriptor() ([]byte, []int)

func (*AudioOutConfig) GetEncoding

func (m *AudioOutConfig) GetEncoding() AudioOutConfig_Encoding

func (*AudioOutConfig) GetSampleRateHertz

func (m *AudioOutConfig) GetSampleRateHertz() int32

func (*AudioOutConfig) GetVolumePercentage

func (m *AudioOutConfig) GetVolumePercentage() int32

func (*AudioOutConfig) ProtoMessage

func (*AudioOutConfig) ProtoMessage()

func (*AudioOutConfig) Reset

func (m *AudioOutConfig) Reset()

func (*AudioOutConfig) String

func (m *AudioOutConfig) String() string

type AudioOutConfig_Encoding

Audio encoding of the data returned in the audio message. All encodings are raw audio bytes with no header, except as indicated below.

type AudioOutConfig_Encoding int32
const (
    // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
    AudioOutConfig_ENCODING_UNSPECIFIED AudioOutConfig_Encoding = 0
    // Uncompressed 16-bit signed little-endian samples (Linear PCM).
    AudioOutConfig_LINEAR16 AudioOutConfig_Encoding = 1
    // MP3 audio encoding. The sample rate is encoded in the payload.
    AudioOutConfig_MP3 AudioOutConfig_Encoding = 2
    // Opus-encoded audio wrapped in an ogg container. The result will be a
    // file which can be played natively on Android and in some browsers (such
    // as Chrome). The quality of the encoding is considerably higher than MP3
    // while using the same bitrate. The sample rate is encoded in the payload.
    AudioOutConfig_OPUS_IN_OGG AudioOutConfig_Encoding = 3
)

func (AudioOutConfig_Encoding) EnumDescriptor

func (AudioOutConfig_Encoding) EnumDescriptor() ([]byte, []int)

func (AudioOutConfig_Encoding) String

func (x AudioOutConfig_Encoding) String() string

type ConverseConfig

Specifies how to process the `ConverseRequest` messages.

type ConverseConfig struct {
    // *Required* Specifies how to process the subsequent incoming audio.
    AudioInConfig *AudioInConfig `protobuf:"bytes,1,opt,name=audio_in_config,json=audioInConfig" json:"audio_in_config,omitempty"`
    // *Required* Specifies how to format the audio that will be returned.
    AudioOutConfig *AudioOutConfig `protobuf:"bytes,2,opt,name=audio_out_config,json=audioOutConfig" json:"audio_out_config,omitempty"`
    // *Required* Represents the current dialog state.
    ConverseState *ConverseState `protobuf:"bytes,3,opt,name=converse_state,json=converseState" json:"converse_state,omitempty"`
}

func (*ConverseConfig) Descriptor

func (*ConverseConfig) Descriptor() ([]byte, []int)

func (*ConverseConfig) GetAudioInConfig

func (m *ConverseConfig) GetAudioInConfig() *AudioInConfig

func (*ConverseConfig) GetAudioOutConfig

func (m *ConverseConfig) GetAudioOutConfig() *AudioOutConfig

func (*ConverseConfig) GetConverseState

func (m *ConverseConfig) GetConverseState() *ConverseState

func (*ConverseConfig) ProtoMessage

func (*ConverseConfig) ProtoMessage()

func (*ConverseConfig) Reset

func (m *ConverseConfig) Reset()

func (*ConverseConfig) String

func (m *ConverseConfig) String() string

type ConverseRequest

The top-level message sent by the client. Clients must send at least two, and typically numerous `ConverseRequest` messages. The first message must contain a `config` message and must not contain `audio_in` data. All subsequent messages must contain `audio_in` data and must not contain a `config` message.

type ConverseRequest struct {
    // Exactly one of these fields must be specified in each `ConverseRequest`.
    //
    // Types that are valid to be assigned to ConverseRequest:
    //	*ConverseRequest_Config
    //	*ConverseRequest_AudioIn
    ConverseRequest isConverseRequest_ConverseRequest `protobuf_oneof:"converse_request"`
}

func (*ConverseRequest) Descriptor

func (*ConverseRequest) Descriptor() ([]byte, []int)

func (*ConverseRequest) GetAudioIn

func (m *ConverseRequest) GetAudioIn() []byte

func (*ConverseRequest) GetConfig

func (m *ConverseRequest) GetConfig() *ConverseConfig

func (*ConverseRequest) GetConverseRequest

func (m *ConverseRequest) GetConverseRequest() isConverseRequest_ConverseRequest

func (*ConverseRequest) ProtoMessage

func (*ConverseRequest) ProtoMessage()

func (*ConverseRequest) Reset

func (m *ConverseRequest) Reset()

func (*ConverseRequest) String

func (m *ConverseRequest) String() string

func (*ConverseRequest) XXX_OneofFuncs

func (*ConverseRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

type ConverseRequest_AudioIn

type ConverseRequest_AudioIn struct {
    AudioIn []byte `protobuf:"bytes,2,opt,name=audio_in,json=audioIn,proto3,oneof"`
}

type ConverseRequest_Config

type ConverseRequest_Config struct {
    Config *ConverseConfig `protobuf:"bytes,1,opt,name=config,oneof"`
}

type ConverseResponse

The top-level message received by the client. A series of one or more `ConverseResponse` messages are streamed back to the client.

type ConverseResponse struct {
    // Exactly one of these fields will be populated in each `ConverseResponse`.
    //
    // Types that are valid to be assigned to ConverseResponse:
    //	*ConverseResponse_Error
    //	*ConverseResponse_EventType_
    //	*ConverseResponse_AudioOut
    //	*ConverseResponse_Result
    ConverseResponse isConverseResponse_ConverseResponse `protobuf_oneof:"converse_response"`
}

func (*ConverseResponse) Descriptor

func (*ConverseResponse) Descriptor() ([]byte, []int)

func (*ConverseResponse) GetAudioOut

func (m *ConverseResponse) GetAudioOut() *AudioOut

func (*ConverseResponse) GetConverseResponse

func (m *ConverseResponse) GetConverseResponse() isConverseResponse_ConverseResponse

func (*ConverseResponse) GetError

func (m *ConverseResponse) GetError() *google_rpc.Status

func (*ConverseResponse) GetEventType

func (m *ConverseResponse) GetEventType() ConverseResponse_EventType

func (*ConverseResponse) GetResult

func (m *ConverseResponse) GetResult() *ConverseResult

func (*ConverseResponse) ProtoMessage

func (*ConverseResponse) ProtoMessage()

func (*ConverseResponse) Reset

func (m *ConverseResponse) Reset()

func (*ConverseResponse) String

func (m *ConverseResponse) String() string

func (*ConverseResponse) XXX_OneofFuncs

func (*ConverseResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

type ConverseResponse_AudioOut

type ConverseResponse_AudioOut struct {
    AudioOut *AudioOut `protobuf:"bytes,3,opt,name=audio_out,json=audioOut,oneof"`
}

type ConverseResponse_Error

type ConverseResponse_Error struct {
    Error *google_rpc.Status `protobuf:"bytes,1,opt,name=error,oneof"`
}

type ConverseResponse_EventType

Indicates the type of event.

type ConverseResponse_EventType int32
const (
    // No event specified.
    ConverseResponse_EVENT_TYPE_UNSPECIFIED ConverseResponse_EventType = 0
    // This event indicates that the server has detected the end of the user's
    // speech utterance and expects no additional speech. Therefore, the server
    // will not process additional audio (although it may subsequently return
    // additional results). The client should stop sending additional audio
    // data, half-close the gRPC connection, and wait for any additional results
    // until the server closes the gRPC connection.
    ConverseResponse_END_OF_UTTERANCE ConverseResponse_EventType = 1
)

func (ConverseResponse_EventType) EnumDescriptor

func (ConverseResponse_EventType) EnumDescriptor() ([]byte, []int)

func (ConverseResponse_EventType) String

func (x ConverseResponse_EventType) String() string

type ConverseResponse_EventType_

type ConverseResponse_EventType_ struct {
    EventType ConverseResponse_EventType `protobuf:"varint,2,opt,name=event_type,json=eventType,enum=google.assistant.embedded.v1alpha1.ConverseResponse_EventType,oneof"`
}

type ConverseResponse_Result

type ConverseResponse_Result struct {
    Result *ConverseResult `protobuf:"bytes,5,opt,name=result,oneof"`
}

type ConverseResult

The semantic result for the user's spoken query.

type ConverseResult struct {
    // *Output-only* The recognized transcript of what the user said.
    SpokenRequestText string `protobuf:"bytes,1,opt,name=spoken_request_text,json=spokenRequestText" json:"spoken_request_text,omitempty"`
    // *Output-only* The text of the assistant's spoken response. This is only
    // returned for an IFTTT action.
    SpokenResponseText string `protobuf:"bytes,2,opt,name=spoken_response_text,json=spokenResponseText" json:"spoken_response_text,omitempty"`
    // *Output-only* State information for subsequent `ConverseRequest`. This
    // value should be saved in the client and returned in the
    // `conversation_state` with the next `ConverseRequest`. (The client does not
    // need to interpret or otherwise use this value.) There is no need to save
    // this information across device restarts.
    ConversationState []byte `protobuf:"bytes,3,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"`
    // *Output-only* Specifies the mode of the microphone after this `Converse`
    // RPC is processed.
    MicrophoneMode ConverseResult_MicrophoneMode `protobuf:"varint,4,opt,name=microphone_mode,json=microphoneMode,enum=google.assistant.embedded.v1alpha1.ConverseResult_MicrophoneMode" json:"microphone_mode,omitempty"`
    // *Output-only* Updated volume level. The value will be 0 or omitted
    // (indicating no change) unless a voice command such as "Increase the volume"
    // or "Set volume level 4" was recognized, in which case the value will be
    // between 1 and 100 (corresponding to the new volume level of 1% to 100%).
    // Typically, a client should use this volume level when playing the
    // `audio_out` data, and retain this value as the current volume level and
    // supply it in the `AudioOutConfig` of the next `ConverseRequest`. (Some
    // clients may also implement other ways to allow the current volume level to
    // be changed, for example, by providing a knob that the user can turn.)
    VolumePercentage int32 `protobuf:"varint,5,opt,name=volume_percentage,json=volumePercentage" json:"volume_percentage,omitempty"`
}

func (*ConverseResult) Descriptor

func (*ConverseResult) Descriptor() ([]byte, []int)

func (*ConverseResult) GetConversationState

func (m *ConverseResult) GetConversationState() []byte

func (*ConverseResult) GetMicrophoneMode

func (m *ConverseResult) GetMicrophoneMode() ConverseResult_MicrophoneMode

func (*ConverseResult) GetSpokenRequestText

func (m *ConverseResult) GetSpokenRequestText() string

func (*ConverseResult) GetSpokenResponseText

func (m *ConverseResult) GetSpokenResponseText() string

func (*ConverseResult) GetVolumePercentage

func (m *ConverseResult) GetVolumePercentage() int32

func (*ConverseResult) ProtoMessage

func (*ConverseResult) ProtoMessage()

func (*ConverseResult) Reset

func (m *ConverseResult) Reset()

func (*ConverseResult) String

func (m *ConverseResult) String() string

type ConverseResult_MicrophoneMode

Possible states of the microphone after a `Converse` RPC completes.

type ConverseResult_MicrophoneMode int32
const (
    // No mode specified.
    ConverseResult_MICROPHONE_MODE_UNSPECIFIED ConverseResult_MicrophoneMode = 0
    // The service is not expecting a follow-on question from the user.
    // The microphone should remain off until the user re-activates it.
    ConverseResult_CLOSE_MICROPHONE ConverseResult_MicrophoneMode = 1
    // The service is expecting a follow-on question from the user. The
    // microphone should be re-opened when the `AudioOut` playback completes
    // (by starting a new `Converse` RPC call to send the new audio).
    ConverseResult_DIALOG_FOLLOW_ON ConverseResult_MicrophoneMode = 2
)

func (ConverseResult_MicrophoneMode) EnumDescriptor

func (ConverseResult_MicrophoneMode) EnumDescriptor() ([]byte, []int)

func (ConverseResult_MicrophoneMode) String

func (x ConverseResult_MicrophoneMode) String() string

type ConverseState

Provides information about the current dialog state.

type ConverseState struct {
    // *Required* The `conversation_state` value returned in the prior
    // `ConverseResponse`. Omit (do not set the field) if there was no prior
    // `ConverseResponse`. If there was a prior `ConverseResponse`, do not omit
    // this field; doing so will end that conversation (and this new request will
    // start a new conversation).
    ConversationState []byte `protobuf:"bytes,1,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"`
}

func (*ConverseState) Descriptor

func (*ConverseState) Descriptor() ([]byte, []int)

func (*ConverseState) GetConversationState

func (m *ConverseState) GetConversationState() []byte

func (*ConverseState) ProtoMessage

func (*ConverseState) ProtoMessage()

func (*ConverseState) Reset

func (m *ConverseState) Reset()

func (*ConverseState) String

func (m *ConverseState) String() string

type EmbeddedAssistantClient

type EmbeddedAssistantClient interface {
    // Initiates or continues a conversation with the embedded assistant service.
    // Each call performs one round-trip, sending an audio request to the service
    // and receiving the audio response. Uses bidirectional streaming to receive
    // results, such as the `END_OF_UTTERANCE` event, while sending audio.
    //
    // A conversation is one or more gRPC connections, each consisting of several
    // streamed requests and responses.
    // For example, the user says *Add to my shopping list* and the assistant
    // responds *What do you want to add?*. The sequence of streamed requests and
    // responses in the first gRPC message could be:
    //
    // *   ConverseRequest.config
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseResponse.event_type.END_OF_UTTERANCE
    // *   ConverseResponse.result.microphone_mode.DIALOG_FOLLOW_ON
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    //
    // The user then says *bagels* and the assistant responds
    // *OK, I've added bagels to your shopping list*. This is sent as another gRPC
    // connection call to the `Converse` method, again with streamed requests and
    // responses, such as:
    //
    // *   ConverseRequest.config
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseResponse.event_type.END_OF_UTTERANCE
    // *   ConverseResponse.result.microphone_mode.CLOSE_MICROPHONE
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    //
    // Although the precise order of responses is not guaranteed, sequential
    // ConverseResponse.audio_out messages will always contain sequential portions
    // of audio.
    Converse(ctx context.Context, opts ...grpc.CallOption) (EmbeddedAssistant_ConverseClient, error)
}

func NewEmbeddedAssistantClient

func NewEmbeddedAssistantClient(cc *grpc.ClientConn) EmbeddedAssistantClient

type EmbeddedAssistantServer

type EmbeddedAssistantServer interface {
    // Initiates or continues a conversation with the embedded assistant service.
    // Each call performs one round-trip, sending an audio request to the service
    // and receiving the audio response. Uses bidirectional streaming to receive
    // results, such as the `END_OF_UTTERANCE` event, while sending audio.
    //
    // A conversation is one or more gRPC connections, each consisting of several
    // streamed requests and responses.
    // For example, the user says *Add to my shopping list* and the assistant
    // responds *What do you want to add?*. The sequence of streamed requests and
    // responses in the first gRPC message could be:
    //
    // *   ConverseRequest.config
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseResponse.event_type.END_OF_UTTERANCE
    // *   ConverseResponse.result.microphone_mode.DIALOG_FOLLOW_ON
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    //
    // The user then says *bagels* and the assistant responds
    // *OK, I've added bagels to your shopping list*. This is sent as another gRPC
    // connection call to the `Converse` method, again with streamed requests and
    // responses, such as:
    //
    // *   ConverseRequest.config
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseResponse.event_type.END_OF_UTTERANCE
    // *   ConverseResponse.result.microphone_mode.CLOSE_MICROPHONE
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    //
    // Although the precise order of responses is not guaranteed, sequential
    // ConverseResponse.audio_out messages will always contain sequential portions
    // of audio.
    Converse(EmbeddedAssistant_ConverseServer) error
}

type EmbeddedAssistant_ConverseClient

type EmbeddedAssistant_ConverseClient interface {
    Send(*ConverseRequest) error
    Recv() (*ConverseResponse, error)
    grpc.ClientStream
}

type EmbeddedAssistant_ConverseServer

type EmbeddedAssistant_ConverseServer interface {
    Send(*ConverseResponse) error
    Recv() (*ConverseRequest, error)
    grpc.ServerStream
}