videointelligence - ActiveState ActiveGo 1.8
...

Package videointelligence

import "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1"
Overview
Index

Overview ▾

Package videointelligence is a generated protocol buffer package.

It is generated from these files:

google/cloud/videointelligence/v1beta1/video_intelligence.proto

It has these top-level messages:

AnnotateVideoRequest
VideoContext
VideoSegment
LabelLocation
LabelAnnotation
SafeSearchAnnotation
BoundingBox
FaceLocation
FaceAnnotation
VideoAnnotationResults
AnnotateVideoResponse
VideoAnnotationProgress
AnnotateVideoProgress

Index ▾

Variables
func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer)
type AnnotateVideoProgress
    func (*AnnotateVideoProgress) Descriptor() ([]byte, []int)
    func (m *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress
    func (*AnnotateVideoProgress) ProtoMessage()
    func (m *AnnotateVideoProgress) Reset()
    func (m *AnnotateVideoProgress) String() string
type AnnotateVideoRequest
    func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)
    func (m *AnnotateVideoRequest) GetFeatures() []Feature
    func (m *AnnotateVideoRequest) GetInputContent() string
    func (m *AnnotateVideoRequest) GetInputUri() string
    func (m *AnnotateVideoRequest) GetLocationId() string
    func (m *AnnotateVideoRequest) GetOutputUri() string
    func (m *AnnotateVideoRequest) GetVideoContext() *VideoContext
    func (*AnnotateVideoRequest) ProtoMessage()
    func (m *AnnotateVideoRequest) Reset()
    func (m *AnnotateVideoRequest) String() string
type AnnotateVideoResponse
    func (*AnnotateVideoResponse) Descriptor() ([]byte, []int)
    func (m *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults
    func (*AnnotateVideoResponse) ProtoMessage()
    func (m *AnnotateVideoResponse) Reset()
    func (m *AnnotateVideoResponse) String() string
type BoundingBox
    func (*BoundingBox) Descriptor() ([]byte, []int)
    func (m *BoundingBox) GetBottom() int32
    func (m *BoundingBox) GetLeft() int32
    func (m *BoundingBox) GetRight() int32
    func (m *BoundingBox) GetTop() int32
    func (*BoundingBox) ProtoMessage()
    func (m *BoundingBox) Reset()
    func (m *BoundingBox) String() string
type FaceAnnotation
    func (*FaceAnnotation) Descriptor() ([]byte, []int)
    func (m *FaceAnnotation) GetLocations() []*FaceLocation
    func (m *FaceAnnotation) GetSegments() []*VideoSegment
    func (m *FaceAnnotation) GetThumbnail() string
    func (*FaceAnnotation) ProtoMessage()
    func (m *FaceAnnotation) Reset()
    func (m *FaceAnnotation) String() string
type FaceLocation
    func (*FaceLocation) Descriptor() ([]byte, []int)
    func (m *FaceLocation) GetBoundingBox() *BoundingBox
    func (m *FaceLocation) GetTimeOffset() int64
    func (*FaceLocation) ProtoMessage()
    func (m *FaceLocation) Reset()
    func (m *FaceLocation) String() string
type Feature
    func (Feature) EnumDescriptor() ([]byte, []int)
    func (x Feature) String() string
type LabelAnnotation
    func (*LabelAnnotation) Descriptor() ([]byte, []int)
    func (m *LabelAnnotation) GetDescription() string
    func (m *LabelAnnotation) GetLanguageCode() string
    func (m *LabelAnnotation) GetLocations() []*LabelLocation
    func (*LabelAnnotation) ProtoMessage()
    func (m *LabelAnnotation) Reset()
    func (m *LabelAnnotation) String() string
type LabelDetectionMode
    func (LabelDetectionMode) EnumDescriptor() ([]byte, []int)
    func (x LabelDetectionMode) String() string
type LabelLevel
    func (LabelLevel) EnumDescriptor() ([]byte, []int)
    func (x LabelLevel) String() string
type LabelLocation
    func (*LabelLocation) Descriptor() ([]byte, []int)
    func (m *LabelLocation) GetConfidence() float32
    func (m *LabelLocation) GetLevel() LabelLevel
    func (m *LabelLocation) GetSegment() *VideoSegment
    func (*LabelLocation) ProtoMessage()
    func (m *LabelLocation) Reset()
    func (m *LabelLocation) String() string
type Likelihood
    func (Likelihood) EnumDescriptor() ([]byte, []int)
    func (x Likelihood) String() string
type SafeSearchAnnotation
    func (*SafeSearchAnnotation) Descriptor() ([]byte, []int)
    func (m *SafeSearchAnnotation) GetAdult() Likelihood
    func (m *SafeSearchAnnotation) GetMedical() Likelihood
    func (m *SafeSearchAnnotation) GetRacy() Likelihood
    func (m *SafeSearchAnnotation) GetSpoof() Likelihood
    func (m *SafeSearchAnnotation) GetTimeOffset() int64
    func (m *SafeSearchAnnotation) GetViolent() Likelihood
    func (*SafeSearchAnnotation) ProtoMessage()
    func (m *SafeSearchAnnotation) Reset()
    func (m *SafeSearchAnnotation) String() string
type VideoAnnotationProgress
    func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)
    func (m *VideoAnnotationProgress) GetInputUri() string
    func (m *VideoAnnotationProgress) GetProgressPercent() int32
    func (m *VideoAnnotationProgress) GetStartTime() *google_protobuf3.Timestamp
    func (m *VideoAnnotationProgress) GetUpdateTime() *google_protobuf3.Timestamp
    func (*VideoAnnotationProgress) ProtoMessage()
    func (m *VideoAnnotationProgress) Reset()
    func (m *VideoAnnotationProgress) String() string
type VideoAnnotationResults
    func (*VideoAnnotationResults) Descriptor() ([]byte, []int)
    func (m *VideoAnnotationResults) GetError() *google_rpc.Status
    func (m *VideoAnnotationResults) GetFaceAnnotations() []*FaceAnnotation
    func (m *VideoAnnotationResults) GetInputUri() string
    func (m *VideoAnnotationResults) GetLabelAnnotations() []*LabelAnnotation
    func (m *VideoAnnotationResults) GetSafeSearchAnnotations() []*SafeSearchAnnotation
    func (m *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment
    func (*VideoAnnotationResults) ProtoMessage()
    func (m *VideoAnnotationResults) Reset()
    func (m *VideoAnnotationResults) String() string
type VideoContext
    func (*VideoContext) Descriptor() ([]byte, []int)
    func (m *VideoContext) GetFaceDetectionModel() string
    func (m *VideoContext) GetLabelDetectionMode() LabelDetectionMode
    func (m *VideoContext) GetLabelDetectionModel() string
    func (m *VideoContext) GetSafeSearchDetectionModel() string
    func (m *VideoContext) GetSegments() []*VideoSegment
    func (m *VideoContext) GetShotChangeDetectionModel() string
    func (m *VideoContext) GetStationaryCamera() bool
    func (*VideoContext) ProtoMessage()
    func (m *VideoContext) Reset()
    func (m *VideoContext) String() string
type VideoIntelligenceServiceClient
    func NewVideoIntelligenceServiceClient(cc *grpc.ClientConn) VideoIntelligenceServiceClient
type VideoIntelligenceServiceServer
type VideoSegment
    func (*VideoSegment) Descriptor() ([]byte, []int)
    func (m *VideoSegment) GetEndTimeOffset() int64
    func (m *VideoSegment) GetStartTimeOffset() int64
    func (*VideoSegment) ProtoMessage()
    func (m *VideoSegment) Reset()
    func (m *VideoSegment) String() string

Package files

video_intelligence.pb.go

Variables

var Feature_name = map[int32]string{
    0: "FEATURE_UNSPECIFIED",
    1: "LABEL_DETECTION",
    2: "FACE_DETECTION",
    3: "SHOT_CHANGE_DETECTION",
    4: "SAFE_SEARCH_DETECTION",
}
var Feature_value = map[string]int32{
    "FEATURE_UNSPECIFIED":   0,
    "LABEL_DETECTION":       1,
    "FACE_DETECTION":        2,
    "SHOT_CHANGE_DETECTION": 3,
    "SAFE_SEARCH_DETECTION": 4,
}
var LabelDetectionMode_name = map[int32]string{
    0: "LABEL_DETECTION_MODE_UNSPECIFIED",
    1: "SHOT_MODE",
    2: "FRAME_MODE",
    3: "SHOT_AND_FRAME_MODE",
}
var LabelDetectionMode_value = map[string]int32{
    "LABEL_DETECTION_MODE_UNSPECIFIED": 0,
    "SHOT_MODE":                        1,
    "FRAME_MODE":                       2,
    "SHOT_AND_FRAME_MODE":              3,
}
var LabelLevel_name = map[int32]string{
    0: "LABEL_LEVEL_UNSPECIFIED",
    1: "VIDEO_LEVEL",
    2: "SEGMENT_LEVEL",
    3: "SHOT_LEVEL",
    4: "FRAME_LEVEL",
}
var LabelLevel_value = map[string]int32{
    "LABEL_LEVEL_UNSPECIFIED": 0,
    "VIDEO_LEVEL":             1,
    "SEGMENT_LEVEL":           2,
    "SHOT_LEVEL":              3,
    "FRAME_LEVEL":             4,
}
var Likelihood_name = map[int32]string{
    0: "UNKNOWN",
    1: "VERY_UNLIKELY",
    2: "UNLIKELY",
    3: "POSSIBLE",
    4: "LIKELY",
    5: "VERY_LIKELY",
}
var Likelihood_value = map[string]int32{
    "UNKNOWN":       0,
    "VERY_UNLIKELY": 1,
    "UNLIKELY":      2,
    "POSSIBLE":      3,
    "LIKELY":        4,
    "VERY_LIKELY":   5,
}

func RegisterVideoIntelligenceServiceServer

func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer)

type AnnotateVideoProgress

Video annotation progress. Included in the `metadata` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.

type AnnotateVideoProgress struct {
    // Progress metadata for all videos specified in `AnnotateVideoRequest`.
    AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress" json:"annotation_progress,omitempty"`
}

func (*AnnotateVideoProgress) Descriptor

func (*AnnotateVideoProgress) Descriptor() ([]byte, []int)

func (*AnnotateVideoProgress) GetAnnotationProgress

func (m *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress

func (*AnnotateVideoProgress) ProtoMessage

func (*AnnotateVideoProgress) ProtoMessage()

func (*AnnotateVideoProgress) Reset

func (m *AnnotateVideoProgress) Reset()

func (*AnnotateVideoProgress) String

func (m *AnnotateVideoProgress) String() string

type AnnotateVideoRequest

Video annotation request.

type AnnotateVideoRequest struct {
    // Input video location. Currently, only
    // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
    // supported, which must be specified in the following format:
    // `gs://bucket-id/object-id` (other URI formats return
    // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
    // [Request URIs](/storage/docs/reference-uris).
    // A video URI may include wildcards in `object-id`, and thus identify
    // multiple videos. Supported wildcards: '*' to match 0 or more characters;
    // '?' to match 1 character. If unset, the input video should be embedded
    // in the request as `input_content`. If set, `input_content` should be unset.
    InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"`
    // The video data bytes. Encoding: base64. If unset, the input video(s)
    // should be specified via `input_uri`. If set, `input_uri` should be unset.
    InputContent string `protobuf:"bytes,6,opt,name=input_content,json=inputContent" json:"input_content,omitempty"`
    // Requested video annotation features.
    Features []Feature `protobuf:"varint,2,rep,packed,name=features,enum=google.cloud.videointelligence.v1beta1.Feature" json:"features,omitempty"`
    // Additional video context and/or feature-specific parameters.
    VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext" json:"video_context,omitempty"`
    // Optional location where the output (in JSON format) should be stored.
    // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
    // URIs are supported, which must be specified in the following format:
    // `gs://bucket-id/object-id` (other URI formats return
    // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
    // [Request URIs](/storage/docs/reference-uris).
    OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri" json:"output_uri,omitempty"`
    // Optional cloud region where annotation should take place. Supported cloud
    // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
    // is specified, a region will be determined based on video file location.
    LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId" json:"location_id,omitempty"`
}

func (*AnnotateVideoRequest) Descriptor

func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)

func (*AnnotateVideoRequest) GetFeatures

func (m *AnnotateVideoRequest) GetFeatures() []Feature

func (*AnnotateVideoRequest) GetInputContent

func (m *AnnotateVideoRequest) GetInputContent() string

func (*AnnotateVideoRequest) GetInputUri

func (m *AnnotateVideoRequest) GetInputUri() string

func (*AnnotateVideoRequest) GetLocationId

func (m *AnnotateVideoRequest) GetLocationId() string

func (*AnnotateVideoRequest) GetOutputUri

func (m *AnnotateVideoRequest) GetOutputUri() string

func (*AnnotateVideoRequest) GetVideoContext

func (m *AnnotateVideoRequest) GetVideoContext() *VideoContext

func (*AnnotateVideoRequest) ProtoMessage

func (*AnnotateVideoRequest) ProtoMessage()

func (*AnnotateVideoRequest) Reset

func (m *AnnotateVideoRequest) Reset()

func (*AnnotateVideoRequest) String

func (m *AnnotateVideoRequest) String() string

type AnnotateVideoResponse

Video annotation response. Included in the `response` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.

type AnnotateVideoResponse struct {
    // Annotation results for all videos specified in `AnnotateVideoRequest`.
    AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults" json:"annotation_results,omitempty"`
}

func (*AnnotateVideoResponse) Descriptor

func (*AnnotateVideoResponse) Descriptor() ([]byte, []int)

func (*AnnotateVideoResponse) GetAnnotationResults

func (m *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults

func (*AnnotateVideoResponse) ProtoMessage

func (*AnnotateVideoResponse) ProtoMessage()

func (*AnnotateVideoResponse) Reset

func (m *AnnotateVideoResponse) Reset()

func (*AnnotateVideoResponse) String

func (m *AnnotateVideoResponse) String() string

type BoundingBox

Bounding box.

type BoundingBox struct {
    // Left X coordinate.
    Left int32 `protobuf:"varint,1,opt,name=left" json:"left,omitempty"`
    // Right X coordinate.
    Right int32 `protobuf:"varint,2,opt,name=right" json:"right,omitempty"`
    // Bottom Y coordinate.
    Bottom int32 `protobuf:"varint,3,opt,name=bottom" json:"bottom,omitempty"`
    // Top Y coordinate.
    Top int32 `protobuf:"varint,4,opt,name=top" json:"top,omitempty"`
}

func (*BoundingBox) Descriptor

func (*BoundingBox) Descriptor() ([]byte, []int)

func (*BoundingBox) GetBottom

func (m *BoundingBox) GetBottom() int32

func (*BoundingBox) GetLeft

func (m *BoundingBox) GetLeft() int32

func (*BoundingBox) GetRight

func (m *BoundingBox) GetRight() int32

func (*BoundingBox) GetTop

func (m *BoundingBox) GetTop() int32

func (*BoundingBox) ProtoMessage

func (*BoundingBox) ProtoMessage()

func (*BoundingBox) Reset

func (m *BoundingBox) Reset()

func (*BoundingBox) String

func (m *BoundingBox) String() string

type FaceAnnotation

Face annotation.

type FaceAnnotation struct {
    // Thumbnail of a representative face view (in JPEG format). Encoding: base64.
    Thumbnail string `protobuf:"bytes,1,opt,name=thumbnail" json:"thumbnail,omitempty"`
    // All locations where a face was detected.
    // Faces are detected and tracked on a per-video basis
    // (as opposed to across multiple videos).
    Segments []*VideoSegment `protobuf:"bytes,2,rep,name=segments" json:"segments,omitempty"`
    // Face locations at one frame per second.
    Locations []*FaceLocation `protobuf:"bytes,3,rep,name=locations" json:"locations,omitempty"`
}

func (*FaceAnnotation) Descriptor

func (*FaceAnnotation) Descriptor() ([]byte, []int)

func (*FaceAnnotation) GetLocations

func (m *FaceAnnotation) GetLocations() []*FaceLocation

func (*FaceAnnotation) GetSegments

func (m *FaceAnnotation) GetSegments() []*VideoSegment

func (*FaceAnnotation) GetThumbnail

func (m *FaceAnnotation) GetThumbnail() string

func (*FaceAnnotation) ProtoMessage

func (*FaceAnnotation) ProtoMessage()

func (*FaceAnnotation) Reset

func (m *FaceAnnotation) Reset()

func (*FaceAnnotation) String

func (m *FaceAnnotation) String() string

type FaceLocation

Face location.

type FaceLocation struct {
    // Bounding box in a frame.
    BoundingBox *BoundingBox `protobuf:"bytes,1,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"`
    // Video time offset in microseconds.
    TimeOffset int64 `protobuf:"varint,2,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"`
}

func (*FaceLocation) Descriptor

func (*FaceLocation) Descriptor() ([]byte, []int)

func (*FaceLocation) GetBoundingBox

func (m *FaceLocation) GetBoundingBox() *BoundingBox

func (*FaceLocation) GetTimeOffset

func (m *FaceLocation) GetTimeOffset() int64

func (*FaceLocation) ProtoMessage

func (*FaceLocation) ProtoMessage()

func (*FaceLocation) Reset

func (m *FaceLocation) Reset()

func (*FaceLocation) String

func (m *FaceLocation) String() string

type Feature

Video annotation feature.

type Feature int32
const (
    // Unspecified.
    Feature_FEATURE_UNSPECIFIED Feature = 0
    // Label detection. Detect objects, such as dog or flower.
    Feature_LABEL_DETECTION Feature = 1
    // Human face detection and tracking.
    Feature_FACE_DETECTION Feature = 2
    // Shot change detection.
    Feature_SHOT_CHANGE_DETECTION Feature = 3
    // Safe search detection.
    Feature_SAFE_SEARCH_DETECTION Feature = 4
)

func (Feature) EnumDescriptor

func (Feature) EnumDescriptor() ([]byte, []int)

func (Feature) String

func (x Feature) String() string

type LabelAnnotation

Label annotation.

type LabelAnnotation struct {
    // Textual description, e.g. `Fixed-gear bicycle`.
    Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"`
    // Language code for `description` in BCP-47 format.
    LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode" json:"language_code,omitempty"`
    // Where the label was detected and with what confidence.
    Locations []*LabelLocation `protobuf:"bytes,3,rep,name=locations" json:"locations,omitempty"`
}

func (*LabelAnnotation) Descriptor

func (*LabelAnnotation) Descriptor() ([]byte, []int)

func (*LabelAnnotation) GetDescription

func (m *LabelAnnotation) GetDescription() string

func (*LabelAnnotation) GetLanguageCode

func (m *LabelAnnotation) GetLanguageCode() string

func (*LabelAnnotation) GetLocations

func (m *LabelAnnotation) GetLocations() []*LabelLocation

func (*LabelAnnotation) ProtoMessage

func (*LabelAnnotation) ProtoMessage()

func (*LabelAnnotation) Reset

func (m *LabelAnnotation) Reset()

func (*LabelAnnotation) String

func (m *LabelAnnotation) String() string

type LabelDetectionMode

Label detection mode.

type LabelDetectionMode int32
const (
    // Unspecified.
    LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0
    // Detect shot-level labels.
    LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1
    // Detect frame-level labels.
    LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2
    // Detect both shot-level and frame-level labels.
    LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3
)

func (LabelDetectionMode) EnumDescriptor

func (LabelDetectionMode) EnumDescriptor() ([]byte, []int)

func (LabelDetectionMode) String

func (x LabelDetectionMode) String() string

type LabelLevel

Label level (scope).

type LabelLevel int32
const (
    // Unspecified.
    LabelLevel_LABEL_LEVEL_UNSPECIFIED LabelLevel = 0
    // Video-level. Corresponds to the whole video.
    LabelLevel_VIDEO_LEVEL LabelLevel = 1
    // Segment-level. Corresponds to one of `AnnotateSpec.segments`.
    LabelLevel_SEGMENT_LEVEL LabelLevel = 2
    // Shot-level. Corresponds to a single shot (i.e. a series of frames
    // without a major camera position or background change).
    LabelLevel_SHOT_LEVEL LabelLevel = 3
    // Frame-level. Corresponds to a single video frame.
    LabelLevel_FRAME_LEVEL LabelLevel = 4
)

func (LabelLevel) EnumDescriptor

func (LabelLevel) EnumDescriptor() ([]byte, []int)

func (LabelLevel) String

func (x LabelLevel) String() string

type LabelLocation

Label location.

type LabelLocation struct {
    // Video segment. Set to [-1, -1] for video-level labels.
    // Set to [timestamp, timestamp] for frame-level labels.
    // Otherwise, corresponds to one of `AnnotateSpec.segments`
    // (if specified) or to shot boundaries (if requested).
    Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment" json:"segment,omitempty"`
    // Confidence that the label is accurate. Range: [0, 1].
    Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"`
    // Label level.
    Level LabelLevel `protobuf:"varint,3,opt,name=level,enum=google.cloud.videointelligence.v1beta1.LabelLevel" json:"level,omitempty"`
}

func (*LabelLocation) Descriptor

func (*LabelLocation) Descriptor() ([]byte, []int)

func (*LabelLocation) GetConfidence

func (m *LabelLocation) GetConfidence() float32

func (*LabelLocation) GetLevel

func (m *LabelLocation) GetLevel() LabelLevel

func (*LabelLocation) GetSegment

func (m *LabelLocation) GetSegment() *VideoSegment

func (*LabelLocation) ProtoMessage

func (*LabelLocation) ProtoMessage()

func (*LabelLocation) Reset

func (m *LabelLocation) Reset()

func (*LabelLocation) String

func (m *LabelLocation) String() string

type Likelihood

Bucketized representation of likelihood.

type Likelihood int32
const (
    // Unknown likelihood.
    Likelihood_UNKNOWN Likelihood = 0
    // Very unlikely.
    Likelihood_VERY_UNLIKELY Likelihood = 1
    // Unlikely.
    Likelihood_UNLIKELY Likelihood = 2
    // Possible.
    Likelihood_POSSIBLE Likelihood = 3
    // Likely.
    Likelihood_LIKELY Likelihood = 4
    // Very likely.
    Likelihood_VERY_LIKELY Likelihood = 5
)

func (Likelihood) EnumDescriptor

func (Likelihood) EnumDescriptor() ([]byte, []int)

func (Likelihood) String

func (x Likelihood) String() string

type SafeSearchAnnotation

Safe search annotation (based on per-frame visual signals only). If no unsafe content has been detected in a frame, no annotations are present for that frame. If only some types of unsafe content have been detected in a frame, the likelihood is set to `UNKNOWN` for all other types of unsafe content.

type SafeSearchAnnotation struct {
    // Likelihood of adult content.
    Adult Likelihood `protobuf:"varint,1,opt,name=adult,enum=google.cloud.videointelligence.v1beta1.Likelihood" json:"adult,omitempty"`
    // Likelihood that an obvious modification was made to the original
    // version to make it appear funny or offensive.
    Spoof Likelihood `protobuf:"varint,2,opt,name=spoof,enum=google.cloud.videointelligence.v1beta1.Likelihood" json:"spoof,omitempty"`
    // Likelihood of medical content.
    Medical Likelihood `protobuf:"varint,3,opt,name=medical,enum=google.cloud.videointelligence.v1beta1.Likelihood" json:"medical,omitempty"`
    // Likelihood of violent content.
    Violent Likelihood `protobuf:"varint,4,opt,name=violent,enum=google.cloud.videointelligence.v1beta1.Likelihood" json:"violent,omitempty"`
    // Likelihood of racy content.
    Racy Likelihood `protobuf:"varint,5,opt,name=racy,enum=google.cloud.videointelligence.v1beta1.Likelihood" json:"racy,omitempty"`
    // Video time offset in microseconds.
    TimeOffset int64 `protobuf:"varint,6,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"`
}

func (*SafeSearchAnnotation) Descriptor

func (*SafeSearchAnnotation) Descriptor() ([]byte, []int)

func (*SafeSearchAnnotation) GetAdult

func (m *SafeSearchAnnotation) GetAdult() Likelihood

func (*SafeSearchAnnotation) GetMedical

func (m *SafeSearchAnnotation) GetMedical() Likelihood

func (*SafeSearchAnnotation) GetRacy

func (m *SafeSearchAnnotation) GetRacy() Likelihood

func (*SafeSearchAnnotation) GetSpoof

func (m *SafeSearchAnnotation) GetSpoof() Likelihood

func (*SafeSearchAnnotation) GetTimeOffset

func (m *SafeSearchAnnotation) GetTimeOffset() int64

func (*SafeSearchAnnotation) GetViolent

func (m *SafeSearchAnnotation) GetViolent() Likelihood

func (*SafeSearchAnnotation) ProtoMessage

func (*SafeSearchAnnotation) ProtoMessage()

func (*SafeSearchAnnotation) Reset

func (m *SafeSearchAnnotation) Reset()

func (*SafeSearchAnnotation) String

func (m *SafeSearchAnnotation) String() string

type VideoAnnotationProgress

Annotation progress for a single video.

type VideoAnnotationProgress struct {
    // Video file location in
    // [Google Cloud Storage](https://cloud.google.com/storage/).
    InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"`
    // Approximate percentage processed thus far.
    // Guaranteed to be 100 when fully processed.
    ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent" json:"progress_percent,omitempty"`
    // Time when the request was received.
    StartTime *google_protobuf3.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
    // Time of the most recent update.
    UpdateTime *google_protobuf3.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime" json:"update_time,omitempty"`
}

func (*VideoAnnotationProgress) Descriptor

func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)

func (*VideoAnnotationProgress) GetInputUri

func (m *VideoAnnotationProgress) GetInputUri() string

func (*VideoAnnotationProgress) GetProgressPercent

func (m *VideoAnnotationProgress) GetProgressPercent() int32

func (*VideoAnnotationProgress) GetStartTime

func (m *VideoAnnotationProgress) GetStartTime() *google_protobuf3.Timestamp

func (*VideoAnnotationProgress) GetUpdateTime

func (m *VideoAnnotationProgress) GetUpdateTime() *google_protobuf3.Timestamp

func (*VideoAnnotationProgress) ProtoMessage

func (*VideoAnnotationProgress) ProtoMessage()

func (*VideoAnnotationProgress) Reset

func (m *VideoAnnotationProgress) Reset()

func (*VideoAnnotationProgress) String

func (m *VideoAnnotationProgress) String() string

type VideoAnnotationResults

Annotation results for a single video.

type VideoAnnotationResults struct {
    // Video file location in
    // [Google Cloud Storage](https://cloud.google.com/storage/).
    InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"`
    // Label annotations. There is exactly one element for each unique label.
    LabelAnnotations []*LabelAnnotation `protobuf:"bytes,2,rep,name=label_annotations,json=labelAnnotations" json:"label_annotations,omitempty"`
    // Face annotations. There is exactly one element for each unique face.
    FaceAnnotations []*FaceAnnotation `protobuf:"bytes,3,rep,name=face_annotations,json=faceAnnotations" json:"face_annotations,omitempty"`
    // Shot annotations. Each shot is represented as a video segment.
    ShotAnnotations []*VideoSegment `protobuf:"bytes,4,rep,name=shot_annotations,json=shotAnnotations" json:"shot_annotations,omitempty"`
    // Safe search annotations.
    SafeSearchAnnotations []*SafeSearchAnnotation `protobuf:"bytes,6,rep,name=safe_search_annotations,json=safeSearchAnnotations" json:"safe_search_annotations,omitempty"`
    // If set, indicates an error. Note that for a single `AnnotateVideoRequest`
    // some videos may succeed and some may fail.
    Error *google_rpc.Status `protobuf:"bytes,5,opt,name=error" json:"error,omitempty"`
}

func (*VideoAnnotationResults) Descriptor

func (*VideoAnnotationResults) Descriptor() ([]byte, []int)

func (*VideoAnnotationResults) GetError

func (m *VideoAnnotationResults) GetError() *google_rpc.Status

func (*VideoAnnotationResults) GetFaceAnnotations

func (m *VideoAnnotationResults) GetFaceAnnotations() []*FaceAnnotation

func (*VideoAnnotationResults) GetInputUri

func (m *VideoAnnotationResults) GetInputUri() string

func (*VideoAnnotationResults) GetLabelAnnotations

func (m *VideoAnnotationResults) GetLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetSafeSearchAnnotations

func (m *VideoAnnotationResults) GetSafeSearchAnnotations() []*SafeSearchAnnotation

func (*VideoAnnotationResults) GetShotAnnotations

func (m *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment

func (*VideoAnnotationResults) ProtoMessage

func (*VideoAnnotationResults) ProtoMessage()

func (*VideoAnnotationResults) Reset

func (m *VideoAnnotationResults) Reset()

func (*VideoAnnotationResults) String

func (m *VideoAnnotationResults) String() string

type VideoContext

Video context and/or feature-specific parameters.

type VideoContext struct {
    // Video segments to annotate. The segments may overlap and are not required
    // to be contiguous or span the whole video. If unspecified, each video
    // is treated as a single segment.
    Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments" json:"segments,omitempty"`
    // If label detection has been requested, what labels should be detected
    // in addition to video-level labels or segment-level labels. If unspecified,
    // defaults to `SHOT_MODE`.
    LabelDetectionMode LabelDetectionMode `protobuf:"varint,2,opt,name=label_detection_mode,json=labelDetectionMode,enum=google.cloud.videointelligence.v1beta1.LabelDetectionMode" json:"label_detection_mode,omitempty"`
    // Whether the video has been shot from a stationary (i.e. non-moving) camera.
    // When set to true, might improve detection accuracy for moving objects.
    StationaryCamera bool `protobuf:"varint,3,opt,name=stationary_camera,json=stationaryCamera" json:"stationary_camera,omitempty"`
    // Model to use for label detection.
    // Supported values: "latest" and "stable" (the default).
    LabelDetectionModel string `protobuf:"bytes,4,opt,name=label_detection_model,json=labelDetectionModel" json:"label_detection_model,omitempty"`
    // Model to use for face detection.
    // Supported values: "latest" and "stable" (the default).
    FaceDetectionModel string `protobuf:"bytes,5,opt,name=face_detection_model,json=faceDetectionModel" json:"face_detection_model,omitempty"`
    // Model to use for shot change detection.
    // Supported values: "latest" and "stable" (the default).
    ShotChangeDetectionModel string `protobuf:"bytes,6,opt,name=shot_change_detection_model,json=shotChangeDetectionModel" json:"shot_change_detection_model,omitempty"`
    // Model to use for safe search detection.
    // Supported values: "latest" and "stable" (the default).
    SafeSearchDetectionModel string `protobuf:"bytes,7,opt,name=safe_search_detection_model,json=safeSearchDetectionModel" json:"safe_search_detection_model,omitempty"`
}

func (*VideoContext) Descriptor

func (*VideoContext) Descriptor() ([]byte, []int)

func (*VideoContext) GetFaceDetectionModel

func (m *VideoContext) GetFaceDetectionModel() string

func (*VideoContext) GetLabelDetectionMode

func (m *VideoContext) GetLabelDetectionMode() LabelDetectionMode

func (*VideoContext) GetLabelDetectionModel

func (m *VideoContext) GetLabelDetectionModel() string

func (*VideoContext) GetSafeSearchDetectionModel

func (m *VideoContext) GetSafeSearchDetectionModel() string

func (*VideoContext) GetSegments

func (m *VideoContext) GetSegments() []*VideoSegment

func (*VideoContext) GetShotChangeDetectionModel

func (m *VideoContext) GetShotChangeDetectionModel() string

func (*VideoContext) GetStationaryCamera

func (m *VideoContext) GetStationaryCamera() bool

func (*VideoContext) ProtoMessage

func (*VideoContext) ProtoMessage()

func (*VideoContext) Reset

func (m *VideoContext) Reset()

func (*VideoContext) String

func (m *VideoContext) String() string

type VideoIntelligenceServiceClient

type VideoIntelligenceServiceClient interface {
    // Performs asynchronous video annotation. Progress and results can be
    // retrieved through the `google.longrunning.Operations` interface.
    // `Operation.metadata` contains `AnnotateVideoProgress` (progress).
    // `Operation.response` contains `AnnotateVideoResponse` (results).
    AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error)
}

func NewVideoIntelligenceServiceClient

func NewVideoIntelligenceServiceClient(cc *grpc.ClientConn) VideoIntelligenceServiceClient

type VideoIntelligenceServiceServer

type VideoIntelligenceServiceServer interface {
    // Performs asynchronous video annotation. Progress and results can be
    // retrieved through the `google.longrunning.Operations` interface.
    // `Operation.metadata` contains `AnnotateVideoProgress` (progress).
    // `Operation.response` contains `AnnotateVideoResponse` (results).
    AnnotateVideo(context.Context, *AnnotateVideoRequest) (*google_longrunning.Operation, error)
}

type VideoSegment

Video segment.

type VideoSegment struct {
    // Start offset in microseconds (inclusive). Unset means 0.
    StartTimeOffset int64 `protobuf:"varint,1,opt,name=start_time_offset,json=startTimeOffset" json:"start_time_offset,omitempty"`
    // End offset in microseconds (inclusive). Unset means 0.
    EndTimeOffset int64 `protobuf:"varint,2,opt,name=end_time_offset,json=endTimeOffset" json:"end_time_offset,omitempty"`
}

func (*VideoSegment) Descriptor

func (*VideoSegment) Descriptor() ([]byte, []int)

func (*VideoSegment) GetEndTimeOffset

func (m *VideoSegment) GetEndTimeOffset() int64

func (*VideoSegment) GetStartTimeOffset

func (m *VideoSegment) GetStartTimeOffset() int64

func (*VideoSegment) ProtoMessage

func (*VideoSegment) ProtoMessage()

func (*VideoSegment) Reset

func (m *VideoSegment) Reset()

func (*VideoSegment) String

func (m *VideoSegment) String() string