Package vision
Overview ▹
Index ▹
Variables
var Block_BlockType_name = map[int32]string{
0: "UNKNOWN",
1: "TEXT",
2: "TABLE",
3: "PICTURE",
4: "RULER",
5: "BARCODE",
}
var Block_BlockType_value = map[string]int32{
"UNKNOWN": 0,
"TEXT": 1,
"TABLE": 2,
"PICTURE": 3,
"RULER": 4,
"BARCODE": 5,
}
var FaceAnnotation_Landmark_Type_name = map[int32]string{
0: "UNKNOWN_LANDMARK",
1: "LEFT_EYE",
2: "RIGHT_EYE",
3: "LEFT_OF_LEFT_EYEBROW",
4: "RIGHT_OF_LEFT_EYEBROW",
5: "LEFT_OF_RIGHT_EYEBROW",
6: "RIGHT_OF_RIGHT_EYEBROW",
7: "MIDPOINT_BETWEEN_EYES",
8: "NOSE_TIP",
9: "UPPER_LIP",
10: "LOWER_LIP",
11: "MOUTH_LEFT",
12: "MOUTH_RIGHT",
13: "MOUTH_CENTER",
14: "NOSE_BOTTOM_RIGHT",
15: "NOSE_BOTTOM_LEFT",
16: "NOSE_BOTTOM_CENTER",
17: "LEFT_EYE_TOP_BOUNDARY",
18: "LEFT_EYE_RIGHT_CORNER",
19: "LEFT_EYE_BOTTOM_BOUNDARY",
20: "LEFT_EYE_LEFT_CORNER",
21: "RIGHT_EYE_TOP_BOUNDARY",
22: "RIGHT_EYE_RIGHT_CORNER",
23: "RIGHT_EYE_BOTTOM_BOUNDARY",
24: "RIGHT_EYE_LEFT_CORNER",
25: "LEFT_EYEBROW_UPPER_MIDPOINT",
26: "RIGHT_EYEBROW_UPPER_MIDPOINT",
27: "LEFT_EAR_TRAGION",
28: "RIGHT_EAR_TRAGION",
29: "LEFT_EYE_PUPIL",
30: "RIGHT_EYE_PUPIL",
31: "FOREHEAD_GLABELLA",
32: "CHIN_GNATHION",
33: "CHIN_LEFT_GONION",
34: "CHIN_RIGHT_GONION",
}
var FaceAnnotation_Landmark_Type_value = map[string]int32{
"UNKNOWN_LANDMARK": 0,
"LEFT_EYE": 1,
"RIGHT_EYE": 2,
"LEFT_OF_LEFT_EYEBROW": 3,
"RIGHT_OF_LEFT_EYEBROW": 4,
"LEFT_OF_RIGHT_EYEBROW": 5,
"RIGHT_OF_RIGHT_EYEBROW": 6,
"MIDPOINT_BETWEEN_EYES": 7,
"NOSE_TIP": 8,
"UPPER_LIP": 9,
"LOWER_LIP": 10,
"MOUTH_LEFT": 11,
"MOUTH_RIGHT": 12,
"MOUTH_CENTER": 13,
"NOSE_BOTTOM_RIGHT": 14,
"NOSE_BOTTOM_LEFT": 15,
"NOSE_BOTTOM_CENTER": 16,
"LEFT_EYE_TOP_BOUNDARY": 17,
"LEFT_EYE_RIGHT_CORNER": 18,
"LEFT_EYE_BOTTOM_BOUNDARY": 19,
"LEFT_EYE_LEFT_CORNER": 20,
"RIGHT_EYE_TOP_BOUNDARY": 21,
"RIGHT_EYE_RIGHT_CORNER": 22,
"RIGHT_EYE_BOTTOM_BOUNDARY": 23,
"RIGHT_EYE_LEFT_CORNER": 24,
"LEFT_EYEBROW_UPPER_MIDPOINT": 25,
"RIGHT_EYEBROW_UPPER_MIDPOINT": 26,
"LEFT_EAR_TRAGION": 27,
"RIGHT_EAR_TRAGION": 28,
"LEFT_EYE_PUPIL": 29,
"RIGHT_EYE_PUPIL": 30,
"FOREHEAD_GLABELLA": 31,
"CHIN_GNATHION": 32,
"CHIN_LEFT_GONION": 33,
"CHIN_RIGHT_GONION": 34,
}
var Feature_Type_name = map[int32]string{
0: "TYPE_UNSPECIFIED",
1: "FACE_DETECTION",
2: "LANDMARK_DETECTION",
3: "LOGO_DETECTION",
4: "LABEL_DETECTION",
5: "TEXT_DETECTION",
11: "DOCUMENT_TEXT_DETECTION",
6: "SAFE_SEARCH_DETECTION",
7: "IMAGE_PROPERTIES",
9: "CROP_HINTS",
10: "WEB_DETECTION",
}
var Feature_Type_value = map[string]int32{
"TYPE_UNSPECIFIED": 0,
"FACE_DETECTION": 1,
"LANDMARK_DETECTION": 2,
"LOGO_DETECTION": 3,
"LABEL_DETECTION": 4,
"TEXT_DETECTION": 5,
"DOCUMENT_TEXT_DETECTION": 11,
"SAFE_SEARCH_DETECTION": 6,
"IMAGE_PROPERTIES": 7,
"CROP_HINTS": 9,
"WEB_DETECTION": 10,
}
var Likelihood_name = map[int32]string{
0: "UNKNOWN",
1: "VERY_UNLIKELY",
2: "UNLIKELY",
3: "POSSIBLE",
4: "LIKELY",
5: "VERY_LIKELY",
}
var Likelihood_value = map[string]int32{
"UNKNOWN": 0,
"VERY_UNLIKELY": 1,
"UNLIKELY": 2,
"POSSIBLE": 3,
"LIKELY": 4,
"VERY_LIKELY": 5,
}
var TextAnnotation_DetectedBreak_BreakType_name = map[int32]string{
0: "UNKNOWN",
1: "SPACE",
2: "SURE_SPACE",
3: "EOL_SURE_SPACE",
4: "HYPHEN",
5: "LINE_BREAK",
}
var TextAnnotation_DetectedBreak_BreakType_value = map[string]int32{
"UNKNOWN": 0,
"SPACE": 1,
"SURE_SPACE": 2,
"EOL_SURE_SPACE": 3,
"HYPHEN": 4,
"LINE_BREAK": 5,
}
func RegisterImageAnnotatorServer ¶
func RegisterImageAnnotatorServer(s *grpc.Server, srv ImageAnnotatorServer)
type AnnotateImageRequest ¶
Request for performing Google Cloud Vision API tasks over a user-provided image, with user-requested features.
type AnnotateImageRequest struct {
// The image to be processed.
Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"`
// Requested features.
Features []*Feature `protobuf:"bytes,2,rep,name=features" json:"features,omitempty"`
// Additional context that may accompany the image.
ImageContext *ImageContext `protobuf:"bytes,3,opt,name=image_context,json=imageContext" json:"image_context,omitempty"`
}
func (*AnnotateImageRequest) Descriptor ¶
func (*AnnotateImageRequest) Descriptor() ([]byte, []int)
func (*AnnotateImageRequest) GetFeatures ¶
func (m *AnnotateImageRequest) GetFeatures() []*Feature
func (*AnnotateImageRequest) GetImage ¶
func (m *AnnotateImageRequest) GetImage() *Image
func (*AnnotateImageRequest) GetImageContext ¶
func (m *AnnotateImageRequest) GetImageContext() *ImageContext
func (*AnnotateImageRequest) ProtoMessage ¶
func (*AnnotateImageRequest) ProtoMessage()
func (*AnnotateImageRequest) Reset ¶
func (m *AnnotateImageRequest) Reset()
func (*AnnotateImageRequest) String ¶
func (m *AnnotateImageRequest) String() string
type AnnotateImageResponse ¶
Response to an image annotation request.
type AnnotateImageResponse struct {
// If present, face detection has completed successfully.
FaceAnnotations []*FaceAnnotation `protobuf:"bytes,1,rep,name=face_annotations,json=faceAnnotations" json:"face_annotations,omitempty"`
// If present, landmark detection has completed successfully.
LandmarkAnnotations []*EntityAnnotation `protobuf:"bytes,2,rep,name=landmark_annotations,json=landmarkAnnotations" json:"landmark_annotations,omitempty"`
// If present, logo detection has completed successfully.
LogoAnnotations []*EntityAnnotation `protobuf:"bytes,3,rep,name=logo_annotations,json=logoAnnotations" json:"logo_annotations,omitempty"`
// If present, label detection has completed successfully.
LabelAnnotations []*EntityAnnotation `protobuf:"bytes,4,rep,name=label_annotations,json=labelAnnotations" json:"label_annotations,omitempty"`
// If present, text (OCR) detection or document (OCR) text detection has
// completed successfully.
TextAnnotations []*EntityAnnotation `protobuf:"bytes,5,rep,name=text_annotations,json=textAnnotations" json:"text_annotations,omitempty"`
// If present, text (OCR) detection or document (OCR) text detection has
// completed successfully.
// This annotation provides the structural hierarchy for the OCR detected
// text.
FullTextAnnotation *TextAnnotation `protobuf:"bytes,12,opt,name=full_text_annotation,json=fullTextAnnotation" json:"full_text_annotation,omitempty"`
// If present, safe-search annotation has completed successfully.
SafeSearchAnnotation *SafeSearchAnnotation `protobuf:"bytes,6,opt,name=safe_search_annotation,json=safeSearchAnnotation" json:"safe_search_annotation,omitempty"`
// If present, image properties were extracted successfully.
ImagePropertiesAnnotation *ImageProperties `protobuf:"bytes,8,opt,name=image_properties_annotation,json=imagePropertiesAnnotation" json:"image_properties_annotation,omitempty"`
// If present, crop hints have completed successfully.
CropHintsAnnotation *CropHintsAnnotation `protobuf:"bytes,11,opt,name=crop_hints_annotation,json=cropHintsAnnotation" json:"crop_hints_annotation,omitempty"`
// If present, web detection has completed successfully.
WebDetection *WebDetection `protobuf:"bytes,13,opt,name=web_detection,json=webDetection" json:"web_detection,omitempty"`
// If set, represents the error message for the operation.
// Note that filled-in image annotations are guaranteed to be
// correct, even when `error` is set.
Error *google_rpc.Status `protobuf:"bytes,9,opt,name=error" json:"error,omitempty"`
}
func (*AnnotateImageResponse) Descriptor ¶
func (*AnnotateImageResponse) Descriptor() ([]byte, []int)
func (*AnnotateImageResponse) GetCropHintsAnnotation ¶
func (m *AnnotateImageResponse) GetCropHintsAnnotation() *CropHintsAnnotation
func (*AnnotateImageResponse) GetError ¶
func (m *AnnotateImageResponse) GetError() *google_rpc.Status
func (*AnnotateImageResponse) GetFaceAnnotations ¶
func (m *AnnotateImageResponse) GetFaceAnnotations() []*FaceAnnotation
func (*AnnotateImageResponse) GetFullTextAnnotation ¶
func (m *AnnotateImageResponse) GetFullTextAnnotation() *TextAnnotation
func (*AnnotateImageResponse) GetImagePropertiesAnnotation ¶
func (m *AnnotateImageResponse) GetImagePropertiesAnnotation() *ImageProperties
func (*AnnotateImageResponse) GetLabelAnnotations ¶
func (m *AnnotateImageResponse) GetLabelAnnotations() []*EntityAnnotation
func (*AnnotateImageResponse) GetLandmarkAnnotations ¶
func (m *AnnotateImageResponse) GetLandmarkAnnotations() []*EntityAnnotation
func (*AnnotateImageResponse) GetLogoAnnotations ¶
func (m *AnnotateImageResponse) GetLogoAnnotations() []*EntityAnnotation
func (*AnnotateImageResponse) GetSafeSearchAnnotation ¶
func (m *AnnotateImageResponse) GetSafeSearchAnnotation() *SafeSearchAnnotation
func (*AnnotateImageResponse) GetTextAnnotations ¶
func (m *AnnotateImageResponse) GetTextAnnotations() []*EntityAnnotation
func (*AnnotateImageResponse) GetWebDetection ¶
func (m *AnnotateImageResponse) GetWebDetection() *WebDetection
func (*AnnotateImageResponse) ProtoMessage ¶
func (*AnnotateImageResponse) ProtoMessage()
func (*AnnotateImageResponse) Reset ¶
func (m *AnnotateImageResponse) Reset()
func (*AnnotateImageResponse) String ¶
func (m *AnnotateImageResponse) String() string
type BatchAnnotateImagesRequest ¶
Multiple image annotation requests are batched into a single service call.
type BatchAnnotateImagesRequest struct {
// Individual image annotation requests for this batch.
Requests []*AnnotateImageRequest `protobuf:"bytes,1,rep,name=requests" json:"requests,omitempty"`
}
func (*BatchAnnotateImagesRequest) Descriptor ¶
func (*BatchAnnotateImagesRequest) Descriptor() ([]byte, []int)
func (*BatchAnnotateImagesRequest) GetRequests ¶
func (m *BatchAnnotateImagesRequest) GetRequests() []*AnnotateImageRequest
func (*BatchAnnotateImagesRequest) ProtoMessage ¶
func (*BatchAnnotateImagesRequest) ProtoMessage()
func (*BatchAnnotateImagesRequest) Reset ¶
func (m *BatchAnnotateImagesRequest) Reset()
func (*BatchAnnotateImagesRequest) String ¶
func (m *BatchAnnotateImagesRequest) String() string
type BatchAnnotateImagesResponse ¶
Response to a batch image annotation request.
type BatchAnnotateImagesResponse struct {
// Individual responses to image annotation requests within the batch.
Responses []*AnnotateImageResponse `protobuf:"bytes,1,rep,name=responses" json:"responses,omitempty"`
}
func (*BatchAnnotateImagesResponse) Descriptor ¶
func (*BatchAnnotateImagesResponse) Descriptor() ([]byte, []int)
func (*BatchAnnotateImagesResponse) GetResponses ¶
func (m *BatchAnnotateImagesResponse) GetResponses() []*AnnotateImageResponse
func (*BatchAnnotateImagesResponse) ProtoMessage ¶
func (*BatchAnnotateImagesResponse) ProtoMessage()
func (*BatchAnnotateImagesResponse) Reset ¶
func (m *BatchAnnotateImagesResponse) Reset()
func (*BatchAnnotateImagesResponse) String ¶
func (m *BatchAnnotateImagesResponse) String() string
type Block ¶
Logical element on the page.
type Block struct {
// Additional information detected for the block.
Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"`
// The bounding box for the block.
// The vertices are in the order of top-left, top-right, bottom-right,
// bottom-left. When a rotation of the bounding box is detected the rotation
// is represented as around the top-left corner as defined when the text is
// read in the 'natural' orientation.
// For example:
// * when the text is horizontal it might look like:
// 0----1
// | |
// 3----2
// * when it's rotated 180 degrees around the top-left corner it becomes:
// 2----3
// | |
// 1----0
// and the vertice order will still be (0, 1, 2, 3).
BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"`
// List of paragraphs in this block (if this blocks is of type text).
Paragraphs []*Paragraph `protobuf:"bytes,3,rep,name=paragraphs" json:"paragraphs,omitempty"`
// Detected block type (text, image etc) for this block.
BlockType Block_BlockType `protobuf:"varint,4,opt,name=block_type,json=blockType,enum=google.cloud.vision.v1.Block_BlockType" json:"block_type,omitempty"`
}
func (*Block) Descriptor ¶
func (*Block) Descriptor() ([]byte, []int)
func (*Block) GetBlockType ¶
func (m *Block) GetBlockType() Block_BlockType
func (*Block) GetBoundingBox ¶
func (m *Block) GetBoundingBox() *BoundingPoly
func (*Block) GetParagraphs ¶
func (m *Block) GetParagraphs() []*Paragraph
func (*Block) GetProperty ¶
func (m *Block) GetProperty() *TextAnnotation_TextProperty
func (*Block) ProtoMessage ¶
func (*Block) ProtoMessage()
func (*Block) Reset ¶
func (m *Block) Reset()
func (*Block) String ¶
func (m *Block) String() string
type Block_BlockType ¶
Type of a block (text, image etc) as identified by OCR.
type Block_BlockType int32
const (
// Unknown block type.
Block_UNKNOWN Block_BlockType = 0
// Regular text block.
Block_TEXT Block_BlockType = 1
// Table block.
Block_TABLE Block_BlockType = 2
// Image block.
Block_PICTURE Block_BlockType = 3
// Horizontal/vertical line box.
Block_RULER Block_BlockType = 4
// Barcode block.
Block_BARCODE Block_BlockType = 5
)
func (Block_BlockType) EnumDescriptor ¶
func (Block_BlockType) EnumDescriptor() ([]byte, []int)
func (Block_BlockType) String ¶
func (x Block_BlockType) String() string
type BoundingPoly ¶
A bounding polygon for the detected image annotation.
type BoundingPoly struct {
// The bounding polygon vertices.
Vertices []*Vertex `protobuf:"bytes,1,rep,name=vertices" json:"vertices,omitempty"`
}
func (*BoundingPoly) Descriptor ¶
func (*BoundingPoly) Descriptor() ([]byte, []int)
func (*BoundingPoly) GetVertices ¶
func (m *BoundingPoly) GetVertices() []*Vertex
func (*BoundingPoly) ProtoMessage ¶
func (*BoundingPoly) ProtoMessage()
func (*BoundingPoly) Reset ¶
func (m *BoundingPoly) Reset()
func (*BoundingPoly) String ¶
func (m *BoundingPoly) String() string
type ColorInfo ¶
Color information consists of RGB channels, score, and the fraction of the image that the color occupies in the image.
type ColorInfo struct {
// RGB components of the color.
Color *google_type.Color `protobuf:"bytes,1,opt,name=color" json:"color,omitempty"`
// Image-specific score for this color. Value in range [0, 1].
Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"`
// The fraction of pixels the color occupies in the image.
// Value in range [0, 1].
PixelFraction float32 `protobuf:"fixed32,3,opt,name=pixel_fraction,json=pixelFraction" json:"pixel_fraction,omitempty"`
}
func (*ColorInfo) Descriptor ¶
func (*ColorInfo) Descriptor() ([]byte, []int)
func (*ColorInfo) GetColor ¶
func (m *ColorInfo) GetColor() *google_type.Color
func (*ColorInfo) GetPixelFraction ¶
func (m *ColorInfo) GetPixelFraction() float32
func (*ColorInfo) GetScore ¶
func (m *ColorInfo) GetScore() float32
func (*ColorInfo) ProtoMessage ¶
func (*ColorInfo) ProtoMessage()
func (*ColorInfo) Reset ¶
func (m *ColorInfo) Reset()
func (*ColorInfo) String ¶
func (m *ColorInfo) String() string
type CropHint ¶
Single crop hint that is used to generate a new crop when serving an image.
type CropHint struct {
// The bounding polygon for the crop region. The coordinates of the bounding
// box are in the original image's scale, as returned in `ImageParams`.
BoundingPoly *BoundingPoly `protobuf:"bytes,1,opt,name=bounding_poly,json=boundingPoly" json:"bounding_poly,omitempty"`
// Confidence of this being a salient region. Range [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"`
// Fraction of importance of this salient region with respect to the original
// image.
ImportanceFraction float32 `protobuf:"fixed32,3,opt,name=importance_fraction,json=importanceFraction" json:"importance_fraction,omitempty"`
}
func (*CropHint) Descriptor ¶
func (*CropHint) Descriptor() ([]byte, []int)
func (*CropHint) GetBoundingPoly ¶
func (m *CropHint) GetBoundingPoly() *BoundingPoly
func (*CropHint) GetConfidence ¶
func (m *CropHint) GetConfidence() float32
func (*CropHint) GetImportanceFraction ¶
func (m *CropHint) GetImportanceFraction() float32
func (*CropHint) ProtoMessage ¶
func (*CropHint) ProtoMessage()
func (*CropHint) Reset ¶
func (m *CropHint) Reset()
func (*CropHint) String ¶
func (m *CropHint) String() string
type CropHintsAnnotation ¶
Set of crop hints that are used to generate new crops when serving images.
type CropHintsAnnotation struct {
CropHints []*CropHint `protobuf:"bytes,1,rep,name=crop_hints,json=cropHints" json:"crop_hints,omitempty"`
}
func (*CropHintsAnnotation) Descriptor ¶
func (*CropHintsAnnotation) Descriptor() ([]byte, []int)
func (*CropHintsAnnotation) GetCropHints ¶
func (m *CropHintsAnnotation) GetCropHints() []*CropHint
func (*CropHintsAnnotation) ProtoMessage ¶
func (*CropHintsAnnotation) ProtoMessage()
func (*CropHintsAnnotation) Reset ¶
func (m *CropHintsAnnotation) Reset()
func (*CropHintsAnnotation) String ¶
func (m *CropHintsAnnotation) String() string
type CropHintsParams ¶
Parameters for crop hints annotation request.
type CropHintsParams struct {
// Aspect ratios in floats, representing the ratio of the width to the height
// of the image. For example, if the desired aspect ratio is 4/3, the
// corresponding float value should be 1.33333. If not specified, the
// best possible crop is returned. The number of provided aspect ratios is
// limited to a maximum of 16; any aspect ratios provided after the 16th are
// ignored.
AspectRatios []float32 `protobuf:"fixed32,1,rep,packed,name=aspect_ratios,json=aspectRatios" json:"aspect_ratios,omitempty"`
}
func (*CropHintsParams) Descriptor ¶
func (*CropHintsParams) Descriptor() ([]byte, []int)
func (*CropHintsParams) GetAspectRatios ¶
func (m *CropHintsParams) GetAspectRatios() []float32
func (*CropHintsParams) ProtoMessage ¶
func (*CropHintsParams) ProtoMessage()
func (*CropHintsParams) Reset ¶
func (m *CropHintsParams) Reset()
func (*CropHintsParams) String ¶
func (m *CropHintsParams) String() string
type DominantColorsAnnotation ¶
Set of dominant colors and their corresponding scores.
type DominantColorsAnnotation struct {
// RGB color values with their score and pixel fraction.
Colors []*ColorInfo `protobuf:"bytes,1,rep,name=colors" json:"colors,omitempty"`
}
func (*DominantColorsAnnotation) Descriptor ¶
func (*DominantColorsAnnotation) Descriptor() ([]byte, []int)
func (*DominantColorsAnnotation) GetColors ¶
func (m *DominantColorsAnnotation) GetColors() []*ColorInfo
func (*DominantColorsAnnotation) ProtoMessage ¶
func (*DominantColorsAnnotation) ProtoMessage()
func (*DominantColorsAnnotation) Reset ¶
func (m *DominantColorsAnnotation) Reset()
func (*DominantColorsAnnotation) String ¶
func (m *DominantColorsAnnotation) String() string
type EntityAnnotation ¶
Set of detected entity features.
type EntityAnnotation struct {
// Opaque entity ID. Some IDs may be available in
// [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
Mid string `protobuf:"bytes,1,opt,name=mid" json:"mid,omitempty"`
// The language code for the locale in which the entity textual
// `description` is expressed.
Locale string `protobuf:"bytes,2,opt,name=locale" json:"locale,omitempty"`
// Entity textual description, expressed in its `locale` language.
Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
// Overall score of the result. Range [0, 1].
Score float32 `protobuf:"fixed32,4,opt,name=score" json:"score,omitempty"`
// The accuracy of the entity detection in an image.
// For example, for an image in which the "Eiffel Tower" entity is detected,
// this field represents the confidence that there is a tower in the query
// image. Range [0, 1].
Confidence float32 `protobuf:"fixed32,5,opt,name=confidence" json:"confidence,omitempty"`
// The relevancy of the ICA (Image Content Annotation) label to the
// image. For example, the relevancy of "tower" is likely higher to an image
// containing the detected "Eiffel Tower" than to an image containing a
// detected distant towering building, even though the confidence that
// there is a tower in each image may be the same. Range [0, 1].
Topicality float32 `protobuf:"fixed32,6,opt,name=topicality" json:"topicality,omitempty"`
// Image region to which this entity belongs. Currently not produced
// for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
// are produced for the entire text detected in an image region, followed by
// `boundingPoly`s for each word within the detected text.
BoundingPoly *BoundingPoly `protobuf:"bytes,7,opt,name=bounding_poly,json=boundingPoly" json:"bounding_poly,omitempty"`
// The location information for the detected entity. Multiple
// `LocationInfo` elements can be present because one location may
// indicate the location of the scene in the image, and another location
// may indicate the location of the place where the image was taken.
// Location information is usually present for landmarks.
Locations []*LocationInfo `protobuf:"bytes,8,rep,name=locations" json:"locations,omitempty"`
// Some entities may have optional user-supplied `Property` (name/value)
// fields, such a score or string that qualifies the entity.
Properties []*Property `protobuf:"bytes,9,rep,name=properties" json:"properties,omitempty"`
}
func (*EntityAnnotation) Descriptor ¶
func (*EntityAnnotation) Descriptor() ([]byte, []int)
func (*EntityAnnotation) GetBoundingPoly ¶
func (m *EntityAnnotation) GetBoundingPoly() *BoundingPoly
func (*EntityAnnotation) GetConfidence ¶
func (m *EntityAnnotation) GetConfidence() float32
func (*EntityAnnotation) GetDescription ¶
func (m *EntityAnnotation) GetDescription() string
func (*EntityAnnotation) GetLocale ¶
func (m *EntityAnnotation) GetLocale() string
func (*EntityAnnotation) GetLocations ¶
func (m *EntityAnnotation) GetLocations() []*LocationInfo
func (*EntityAnnotation) GetMid ¶
func (m *EntityAnnotation) GetMid() string
func (*EntityAnnotation) GetProperties ¶
func (m *EntityAnnotation) GetProperties() []*Property
func (*EntityAnnotation) GetScore ¶
func (m *EntityAnnotation) GetScore() float32
func (*EntityAnnotation) GetTopicality ¶
func (m *EntityAnnotation) GetTopicality() float32
func (*EntityAnnotation) ProtoMessage ¶
func (*EntityAnnotation) ProtoMessage()
func (*EntityAnnotation) Reset ¶
func (m *EntityAnnotation) Reset()
func (*EntityAnnotation) String ¶
func (m *EntityAnnotation) String() string
type FaceAnnotation ¶
A face annotation object contains the results of face detection.
type FaceAnnotation struct {
// The bounding polygon around the face. The coordinates of the bounding box
// are in the original image's scale, as returned in `ImageParams`.
// The bounding box is computed to "frame" the face in accordance with human
// expectations. It is based on the landmarker results.
// Note that one or more x and/or y coordinates may not be generated in the
// `BoundingPoly` (the polygon will be unbounded) if only a partial face
// appears in the image to be annotated.
BoundingPoly *BoundingPoly `protobuf:"bytes,1,opt,name=bounding_poly,json=boundingPoly" json:"bounding_poly,omitempty"`
// The `fd_bounding_poly` bounding polygon is tighter than the
// `boundingPoly`, and encloses only the skin part of the face. Typically, it
// is used to eliminate the face from any image analysis that detects the
// "amount of skin" visible in an image. It is not based on the
// landmarker results, only on the initial face detection, hence
// the <code>fd</code> (face detection) prefix.
FdBoundingPoly *BoundingPoly `protobuf:"bytes,2,opt,name=fd_bounding_poly,json=fdBoundingPoly" json:"fd_bounding_poly,omitempty"`
// Detected face landmarks.
Landmarks []*FaceAnnotation_Landmark `protobuf:"bytes,3,rep,name=landmarks" json:"landmarks,omitempty"`
// Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
// of the face relative to the image vertical about the axis perpendicular to
// the face. Range [-180,180].
RollAngle float32 `protobuf:"fixed32,4,opt,name=roll_angle,json=rollAngle" json:"roll_angle,omitempty"`
// Yaw angle, which indicates the leftward/rightward angle that the face is
// pointing relative to the vertical plane perpendicular to the image. Range
// [-180,180].
PanAngle float32 `protobuf:"fixed32,5,opt,name=pan_angle,json=panAngle" json:"pan_angle,omitempty"`
// Pitch angle, which indicates the upwards/downwards angle that the face is
// pointing relative to the image's horizontal plane. Range [-180,180].
TiltAngle float32 `protobuf:"fixed32,6,opt,name=tilt_angle,json=tiltAngle" json:"tilt_angle,omitempty"`
// Detection confidence. Range [0, 1].
DetectionConfidence float32 `protobuf:"fixed32,7,opt,name=detection_confidence,json=detectionConfidence" json:"detection_confidence,omitempty"`
// Face landmarking confidence. Range [0, 1].
LandmarkingConfidence float32 `protobuf:"fixed32,8,opt,name=landmarking_confidence,json=landmarkingConfidence" json:"landmarking_confidence,omitempty"`
// Joy likelihood.
JoyLikelihood Likelihood `protobuf:"varint,9,opt,name=joy_likelihood,json=joyLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"joy_likelihood,omitempty"`
// Sorrow likelihood.
SorrowLikelihood Likelihood `protobuf:"varint,10,opt,name=sorrow_likelihood,json=sorrowLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"sorrow_likelihood,omitempty"`
// Anger likelihood.
AngerLikelihood Likelihood `protobuf:"varint,11,opt,name=anger_likelihood,json=angerLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"anger_likelihood,omitempty"`
// Surprise likelihood.
SurpriseLikelihood Likelihood `protobuf:"varint,12,opt,name=surprise_likelihood,json=surpriseLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"surprise_likelihood,omitempty"`
// Under-exposed likelihood.
UnderExposedLikelihood Likelihood `protobuf:"varint,13,opt,name=under_exposed_likelihood,json=underExposedLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"under_exposed_likelihood,omitempty"`
// Blurred likelihood.
BlurredLikelihood Likelihood `protobuf:"varint,14,opt,name=blurred_likelihood,json=blurredLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"blurred_likelihood,omitempty"`
// Headwear likelihood.
HeadwearLikelihood Likelihood `protobuf:"varint,15,opt,name=headwear_likelihood,json=headwearLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"headwear_likelihood,omitempty"`
}
func (*FaceAnnotation) Descriptor ¶
func (*FaceAnnotation) Descriptor() ([]byte, []int)
func (*FaceAnnotation) GetAngerLikelihood ¶
func (m *FaceAnnotation) GetAngerLikelihood() Likelihood
func (*FaceAnnotation) GetBlurredLikelihood ¶
func (m *FaceAnnotation) GetBlurredLikelihood() Likelihood
func (*FaceAnnotation) GetBoundingPoly ¶
func (m *FaceAnnotation) GetBoundingPoly() *BoundingPoly
func (*FaceAnnotation) GetDetectionConfidence ¶
func (m *FaceAnnotation) GetDetectionConfidence() float32
func (*FaceAnnotation) GetFdBoundingPoly ¶
func (m *FaceAnnotation) GetFdBoundingPoly() *BoundingPoly
func (*FaceAnnotation) GetHeadwearLikelihood ¶
func (m *FaceAnnotation) GetHeadwearLikelihood() Likelihood
func (*FaceAnnotation) GetJoyLikelihood ¶
func (m *FaceAnnotation) GetJoyLikelihood() Likelihood
func (*FaceAnnotation) GetLandmarkingConfidence ¶
func (m *FaceAnnotation) GetLandmarkingConfidence() float32
func (*FaceAnnotation) GetLandmarks ¶
func (m *FaceAnnotation) GetLandmarks() []*FaceAnnotation_Landmark
func (*FaceAnnotation) GetPanAngle ¶
func (m *FaceAnnotation) GetPanAngle() float32
func (*FaceAnnotation) GetRollAngle ¶
func (m *FaceAnnotation) GetRollAngle() float32
func (*FaceAnnotation) GetSorrowLikelihood ¶
func (m *FaceAnnotation) GetSorrowLikelihood() Likelihood
func (*FaceAnnotation) GetSurpriseLikelihood ¶
func (m *FaceAnnotation) GetSurpriseLikelihood() Likelihood
func (*FaceAnnotation) GetTiltAngle ¶
func (m *FaceAnnotation) GetTiltAngle() float32
func (*FaceAnnotation) GetUnderExposedLikelihood ¶
func (m *FaceAnnotation) GetUnderExposedLikelihood() Likelihood
func (*FaceAnnotation) ProtoMessage ¶
func (*FaceAnnotation) ProtoMessage()
func (*FaceAnnotation) Reset ¶
func (m *FaceAnnotation) Reset()
func (*FaceAnnotation) String ¶
func (m *FaceAnnotation) String() string
type FaceAnnotation_Landmark ¶
A face-specific landmark (for example, a face feature). Landmark positions may fall outside the bounds of the image if the face is near one or more edges of the image. Therefore it is NOT guaranteed that `0 <= x < width` or `0 <= y < height`.
type FaceAnnotation_Landmark struct {
// Face landmark type.
Type FaceAnnotation_Landmark_Type `protobuf:"varint,3,opt,name=type,enum=google.cloud.vision.v1.FaceAnnotation_Landmark_Type" json:"type,omitempty"`
// Face landmark position.
Position *Position `protobuf:"bytes,4,opt,name=position" json:"position,omitempty"`
}
func (*FaceAnnotation_Landmark) Descriptor ¶
func (*FaceAnnotation_Landmark) Descriptor() ([]byte, []int)
func (*FaceAnnotation_Landmark) GetPosition ¶
func (m *FaceAnnotation_Landmark) GetPosition() *Position
func (*FaceAnnotation_Landmark) GetType ¶
func (m *FaceAnnotation_Landmark) GetType() FaceAnnotation_Landmark_Type
func (*FaceAnnotation_Landmark) ProtoMessage ¶
func (*FaceAnnotation_Landmark) ProtoMessage()
func (*FaceAnnotation_Landmark) Reset ¶
func (m *FaceAnnotation_Landmark) Reset()
func (*FaceAnnotation_Landmark) String ¶
func (m *FaceAnnotation_Landmark) String() string
type FaceAnnotation_Landmark_Type ¶
Face landmark (feature) type. Left and right are defined from the vantage of the viewer of the image without considering mirror projections typical of photos. So, `LEFT_EYE`, typically, is the person's right eye.
type FaceAnnotation_Landmark_Type int32
const (
// Unknown face landmark detected. Should not be filled.
FaceAnnotation_Landmark_UNKNOWN_LANDMARK FaceAnnotation_Landmark_Type = 0
// Left eye.
FaceAnnotation_Landmark_LEFT_EYE FaceAnnotation_Landmark_Type = 1
// Right eye.
FaceAnnotation_Landmark_RIGHT_EYE FaceAnnotation_Landmark_Type = 2
// Left of left eyebrow.
FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW FaceAnnotation_Landmark_Type = 3
// Right of left eyebrow.
FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW FaceAnnotation_Landmark_Type = 4
// Left of right eyebrow.
FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW FaceAnnotation_Landmark_Type = 5
// Right of right eyebrow.
FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW FaceAnnotation_Landmark_Type = 6
// Midpoint between eyes.
FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES FaceAnnotation_Landmark_Type = 7
// Nose tip.
FaceAnnotation_Landmark_NOSE_TIP FaceAnnotation_Landmark_Type = 8
// Upper lip.
FaceAnnotation_Landmark_UPPER_LIP FaceAnnotation_Landmark_Type = 9
// Lower lip.
FaceAnnotation_Landmark_LOWER_LIP FaceAnnotation_Landmark_Type = 10
// Mouth left.
FaceAnnotation_Landmark_MOUTH_LEFT FaceAnnotation_Landmark_Type = 11
// Mouth right.
FaceAnnotation_Landmark_MOUTH_RIGHT FaceAnnotation_Landmark_Type = 12
// Mouth center.
FaceAnnotation_Landmark_MOUTH_CENTER FaceAnnotation_Landmark_Type = 13
// Nose, bottom right.
FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT FaceAnnotation_Landmark_Type = 14
// Nose, bottom left.
FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT FaceAnnotation_Landmark_Type = 15
// Nose, bottom center.
FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER FaceAnnotation_Landmark_Type = 16
// Left eye, top boundary.
FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY FaceAnnotation_Landmark_Type = 17
// Left eye, right corner.
FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER FaceAnnotation_Landmark_Type = 18
// Left eye, bottom boundary.
FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY FaceAnnotation_Landmark_Type = 19
// Left eye, left corner.
FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER FaceAnnotation_Landmark_Type = 20
// Right eye, top boundary.
FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY FaceAnnotation_Landmark_Type = 21
// Right eye, right corner.
FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER FaceAnnotation_Landmark_Type = 22
// Right eye, bottom boundary.
FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY FaceAnnotation_Landmark_Type = 23
// Right eye, left corner.
FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER FaceAnnotation_Landmark_Type = 24
// Left eyebrow, upper midpoint.
FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT FaceAnnotation_Landmark_Type = 25
// Right eyebrow, upper midpoint.
FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT FaceAnnotation_Landmark_Type = 26
// Left ear tragion.
FaceAnnotation_Landmark_LEFT_EAR_TRAGION FaceAnnotation_Landmark_Type = 27
// Right ear tragion.
FaceAnnotation_Landmark_RIGHT_EAR_TRAGION FaceAnnotation_Landmark_Type = 28
// Left eye pupil.
FaceAnnotation_Landmark_LEFT_EYE_PUPIL FaceAnnotation_Landmark_Type = 29
// Right eye pupil.
FaceAnnotation_Landmark_RIGHT_EYE_PUPIL FaceAnnotation_Landmark_Type = 30
// Forehead glabella.
FaceAnnotation_Landmark_FOREHEAD_GLABELLA FaceAnnotation_Landmark_Type = 31
// Chin gnathion.
FaceAnnotation_Landmark_CHIN_GNATHION FaceAnnotation_Landmark_Type = 32
// Chin left gonion.
FaceAnnotation_Landmark_CHIN_LEFT_GONION FaceAnnotation_Landmark_Type = 33
// Chin right gonion.
FaceAnnotation_Landmark_CHIN_RIGHT_GONION FaceAnnotation_Landmark_Type = 34
)
func (FaceAnnotation_Landmark_Type) EnumDescriptor ¶
func (FaceAnnotation_Landmark_Type) EnumDescriptor() ([]byte, []int)
func (FaceAnnotation_Landmark_Type) String ¶
func (x FaceAnnotation_Landmark_Type) String() string
type Feature ¶
Users describe the type of Google Cloud Vision API tasks to perform over images by using *Feature*s. Each Feature indicates a type of image detection task to perform. Features encode the Cloud Vision API vertical to operate on and the number of top-scoring results to return.
type Feature struct {
// The feature type.
Type Feature_Type `protobuf:"varint,1,opt,name=type,enum=google.cloud.vision.v1.Feature_Type" json:"type,omitempty"`
// Maximum number of results of this type.
MaxResults int32 `protobuf:"varint,2,opt,name=max_results,json=maxResults" json:"max_results,omitempty"`
}
func (*Feature) Descriptor ¶
func (*Feature) Descriptor() ([]byte, []int)
func (*Feature) GetMaxResults ¶
func (m *Feature) GetMaxResults() int32
func (*Feature) GetType ¶
func (m *Feature) GetType() Feature_Type
func (*Feature) ProtoMessage ¶
func (*Feature) ProtoMessage()
func (*Feature) Reset ¶
func (m *Feature) Reset()
func (*Feature) String ¶
func (m *Feature) String() string
type Feature_Type ¶
Type of image feature.
type Feature_Type int32
const (
// Unspecified feature type.
Feature_TYPE_UNSPECIFIED Feature_Type = 0
// Run face detection.
Feature_FACE_DETECTION Feature_Type = 1
// Run landmark detection.
Feature_LANDMARK_DETECTION Feature_Type = 2
// Run logo detection.
Feature_LOGO_DETECTION Feature_Type = 3
// Run label detection.
Feature_LABEL_DETECTION Feature_Type = 4
// Run OCR.
Feature_TEXT_DETECTION Feature_Type = 5
// Run dense text document OCR. Takes precedence when both
// DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present.
Feature_DOCUMENT_TEXT_DETECTION Feature_Type = 11
// Run computer vision models to compute image safe-search properties.
Feature_SAFE_SEARCH_DETECTION Feature_Type = 6
// Compute a set of image properties, such as the image's dominant colors.
Feature_IMAGE_PROPERTIES Feature_Type = 7
// Run crop hints.
Feature_CROP_HINTS Feature_Type = 9
// Run web detection.
Feature_WEB_DETECTION Feature_Type = 10
)
func (Feature_Type) EnumDescriptor ¶
func (Feature_Type) EnumDescriptor() ([]byte, []int)
func (Feature_Type) String ¶
func (x Feature_Type) String() string
type Image ¶
Client image to perform Google Cloud Vision API tasks over.
type Image struct {
// Image content, represented as a stream of bytes.
// Note: as with all `bytes` fields, protobuffers use a pure binary
// representation, whereas JSON representations use base64.
Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
// Google Cloud Storage image location. If both `content` and `source`
// are provided for an image, `content` takes precedence and is
// used to perform the image annotation request.
Source *ImageSource `protobuf:"bytes,2,opt,name=source" json:"source,omitempty"`
}
func (*Image) Descriptor ¶
func (*Image) Descriptor() ([]byte, []int)
func (*Image) GetContent ¶
func (m *Image) GetContent() []byte
func (*Image) GetSource ¶
func (m *Image) GetSource() *ImageSource
func (*Image) ProtoMessage ¶
func (*Image) ProtoMessage()
func (*Image) Reset ¶
func (m *Image) Reset()
func (*Image) String ¶
func (m *Image) String() string
type ImageAnnotatorClient ¶
type ImageAnnotatorClient interface {
// Run image detection and annotation for a batch of images.
BatchAnnotateImages(ctx context.Context, in *BatchAnnotateImagesRequest, opts ...grpc.CallOption) (*BatchAnnotateImagesResponse, error)
}
func NewImageAnnotatorClient ¶
func NewImageAnnotatorClient(cc *grpc.ClientConn) ImageAnnotatorClient
type ImageAnnotatorServer ¶
type ImageAnnotatorServer interface {
// Run image detection and annotation for a batch of images.
BatchAnnotateImages(context.Context, *BatchAnnotateImagesRequest) (*BatchAnnotateImagesResponse, error)
}
type ImageContext ¶
Image context and/or feature-specific parameters.
type ImageContext struct {
// lat/long rectangle that specifies the location of the image.
LatLongRect *LatLongRect `protobuf:"bytes,1,opt,name=lat_long_rect,json=latLongRect" json:"lat_long_rect,omitempty"`
// List of languages to use for TEXT_DETECTION. In most cases, an empty value
// yields the best results since it enables automatic language detection. For
// languages based on the Latin alphabet, setting `language_hints` is not
// needed. In rare cases, when the language of the text in the image is known,
// setting a hint will help get better results (although it will be a
// significant hindrance if the hint is wrong). Text detection returns an
// error if one or more of the specified languages is not one of the
// [supported languages](/vision/docs/languages).
LanguageHints []string `protobuf:"bytes,2,rep,name=language_hints,json=languageHints" json:"language_hints,omitempty"`
// Parameters for crop hints annotation request.
CropHintsParams *CropHintsParams `protobuf:"bytes,4,opt,name=crop_hints_params,json=cropHintsParams" json:"crop_hints_params,omitempty"`
}
func (*ImageContext) Descriptor ¶
func (*ImageContext) Descriptor() ([]byte, []int)
func (*ImageContext) GetCropHintsParams ¶
func (m *ImageContext) GetCropHintsParams() *CropHintsParams
func (*ImageContext) GetLanguageHints ¶
func (m *ImageContext) GetLanguageHints() []string
func (*ImageContext) GetLatLongRect ¶
func (m *ImageContext) GetLatLongRect() *LatLongRect
func (*ImageContext) ProtoMessage ¶
func (*ImageContext) ProtoMessage()
func (*ImageContext) Reset ¶
func (m *ImageContext) Reset()
func (*ImageContext) String ¶
func (m *ImageContext) String() string
type ImageProperties ¶
Stores image properties, such as dominant colors.
type ImageProperties struct {
// If present, dominant colors completed successfully.
DominantColors *DominantColorsAnnotation `protobuf:"bytes,1,opt,name=dominant_colors,json=dominantColors" json:"dominant_colors,omitempty"`
}
func (*ImageProperties) Descriptor ¶
func (*ImageProperties) Descriptor() ([]byte, []int)
func (*ImageProperties) GetDominantColors ¶
func (m *ImageProperties) GetDominantColors() *DominantColorsAnnotation
func (*ImageProperties) ProtoMessage ¶
func (*ImageProperties) ProtoMessage()
func (*ImageProperties) Reset ¶
func (m *ImageProperties) Reset()
func (*ImageProperties) String ¶
func (m *ImageProperties) String() string
type ImageSource ¶
External image source (Google Cloud Storage image location).
type ImageSource struct {
// NOTE: For new code `image_uri` below is preferred.
// Google Cloud Storage image URI, which must be in the following form:
// `gs://bucket_name/object_name` (for details, see
// [Google Cloud Storage Request
// URIs](https://cloud.google.com/storage/docs/reference-uris)).
// NOTE: Cloud Storage object versioning is not supported.
GcsImageUri string `protobuf:"bytes,1,opt,name=gcs_image_uri,json=gcsImageUri" json:"gcs_image_uri,omitempty"`
// Image URI which supports:
// 1) Google Cloud Storage image URI, which must be in the following form:
// `gs://bucket_name/object_name` (for details, see
// [Google Cloud Storage Request
// URIs](https://cloud.google.com/storage/docs/reference-uris)).
// NOTE: Cloud Storage object versioning is not supported.
// 2) Publicly accessible image HTTP/HTTPS URL.
// This is preferred over the legacy `gcs_image_uri` above. When both
// `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
// precedence.
ImageUri string `protobuf:"bytes,2,opt,name=image_uri,json=imageUri" json:"image_uri,omitempty"`
}
func (*ImageSource) Descriptor ¶
func (*ImageSource) Descriptor() ([]byte, []int)
func (*ImageSource) GetGcsImageUri ¶
func (m *ImageSource) GetGcsImageUri() string
func (*ImageSource) GetImageUri ¶
func (m *ImageSource) GetImageUri() string
func (*ImageSource) ProtoMessage ¶
func (*ImageSource) ProtoMessage()
func (*ImageSource) Reset ¶
func (m *ImageSource) Reset()
func (*ImageSource) String ¶
func (m *ImageSource) String() string
type LatLongRect ¶
Rectangle determined by min and max `LatLng` pairs.
type LatLongRect struct {
// Min lat/long pair.
MinLatLng *google_type1.LatLng `protobuf:"bytes,1,opt,name=min_lat_lng,json=minLatLng" json:"min_lat_lng,omitempty"`
// Max lat/long pair.
MaxLatLng *google_type1.LatLng `protobuf:"bytes,2,opt,name=max_lat_lng,json=maxLatLng" json:"max_lat_lng,omitempty"`
}
func (*LatLongRect) Descriptor ¶
func (*LatLongRect) Descriptor() ([]byte, []int)
func (*LatLongRect) GetMaxLatLng ¶
func (m *LatLongRect) GetMaxLatLng() *google_type1.LatLng
func (*LatLongRect) GetMinLatLng ¶
func (m *LatLongRect) GetMinLatLng() *google_type1.LatLng
func (*LatLongRect) ProtoMessage ¶
func (*LatLongRect) ProtoMessage()
func (*LatLongRect) Reset ¶
func (m *LatLongRect) Reset()
func (*LatLongRect) String ¶
func (m *LatLongRect) String() string
type Likelihood ¶
A bucketized representation of likelihood, which is intended to give clients highly stable results across model upgrades.
type Likelihood int32
const (
// Unknown likelihood.
Likelihood_UNKNOWN Likelihood = 0
// It is very unlikely that the image belongs to the specified vertical.
Likelihood_VERY_UNLIKELY Likelihood = 1
// It is unlikely that the image belongs to the specified vertical.
Likelihood_UNLIKELY Likelihood = 2
// It is possible that the image belongs to the specified vertical.
Likelihood_POSSIBLE Likelihood = 3
// It is likely that the image belongs to the specified vertical.
Likelihood_LIKELY Likelihood = 4
// It is very likely that the image belongs to the specified vertical.
Likelihood_VERY_LIKELY Likelihood = 5
)
func (Likelihood) EnumDescriptor ¶
func (Likelihood) EnumDescriptor() ([]byte, []int)
func (Likelihood) String ¶
func (x Likelihood) String() string
type LocationInfo ¶
Detected entity location information.
type LocationInfo struct {
// lat/long location coordinates.
LatLng *google_type1.LatLng `protobuf:"bytes,1,opt,name=lat_lng,json=latLng" json:"lat_lng,omitempty"`
}
func (*LocationInfo) Descriptor ¶
func (*LocationInfo) Descriptor() ([]byte, []int)
func (*LocationInfo) GetLatLng ¶
func (m *LocationInfo) GetLatLng() *google_type1.LatLng
func (*LocationInfo) ProtoMessage ¶
func (*LocationInfo) ProtoMessage()
func (*LocationInfo) Reset ¶
func (m *LocationInfo) Reset()
func (*LocationInfo) String ¶
func (m *LocationInfo) String() string
type Page ¶
Detected page from OCR.
type Page struct {
// Additional information detected on the page.
Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"`
// Page width in pixels.
Width int32 `protobuf:"varint,2,opt,name=width" json:"width,omitempty"`
// Page height in pixels.
Height int32 `protobuf:"varint,3,opt,name=height" json:"height,omitempty"`
// List of blocks of text, images etc on this page.
Blocks []*Block `protobuf:"bytes,4,rep,name=blocks" json:"blocks,omitempty"`
}
func (*Page) Descriptor ¶
func (*Page) Descriptor() ([]byte, []int)
func (*Page) GetBlocks ¶
func (m *Page) GetBlocks() []*Block
func (*Page) GetHeight ¶
func (m *Page) GetHeight() int32
func (*Page) GetProperty ¶
func (m *Page) GetProperty() *TextAnnotation_TextProperty
func (*Page) GetWidth ¶
func (m *Page) GetWidth() int32
func (*Page) ProtoMessage ¶
func (*Page) ProtoMessage()
func (*Page) Reset ¶
func (m *Page) Reset()
func (*Page) String ¶
func (m *Page) String() string
type Paragraph ¶
Structural unit of text representing a number of words in certain order.
type Paragraph struct {
// Additional information detected for the paragraph.
Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"`
// The bounding box for the paragraph.
// The vertices are in the order of top-left, top-right, bottom-right,
// bottom-left. When a rotation of the bounding box is detected the rotation
// is represented as around the top-left corner as defined when the text is
// read in the 'natural' orientation.
// For example:
// * when the text is horizontal it might look like:
// 0----1
// | |
// 3----2
// * when it's rotated 180 degrees around the top-left corner it becomes:
// 2----3
// | |
// 1----0
// and the vertice order will still be (0, 1, 2, 3).
BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"`
// List of words in this paragraph.
Words []*Word `protobuf:"bytes,3,rep,name=words" json:"words,omitempty"`
}
func (*Paragraph) Descriptor ¶
func (*Paragraph) Descriptor() ([]byte, []int)
func (*Paragraph) GetBoundingBox ¶
func (m *Paragraph) GetBoundingBox() *BoundingPoly
func (*Paragraph) GetProperty ¶
func (m *Paragraph) GetProperty() *TextAnnotation_TextProperty
func (*Paragraph) GetWords ¶
func (m *Paragraph) GetWords() []*Word
func (*Paragraph) ProtoMessage ¶
func (*Paragraph) ProtoMessage()
func (*Paragraph) Reset ¶
func (m *Paragraph) Reset()
func (*Paragraph) String ¶
func (m *Paragraph) String() string
type Position ¶
A 3D position in the image, used primarily for Face detection landmarks. A valid Position must have both x and y coordinates. The position coordinates are in the same scale as the original image.
type Position struct {
// X coordinate.
X float32 `protobuf:"fixed32,1,opt,name=x" json:"x,omitempty"`
// Y coordinate.
Y float32 `protobuf:"fixed32,2,opt,name=y" json:"y,omitempty"`
// Z coordinate (or depth).
Z float32 `protobuf:"fixed32,3,opt,name=z" json:"z,omitempty"`
}
func (*Position) Descriptor ¶
func (*Position) Descriptor() ([]byte, []int)
func (*Position) GetX ¶
func (m *Position) GetX() float32
func (*Position) GetY ¶
func (m *Position) GetY() float32
func (*Position) GetZ ¶
func (m *Position) GetZ() float32
func (*Position) ProtoMessage ¶
func (*Position) ProtoMessage()
func (*Position) Reset ¶
func (m *Position) Reset()
func (*Position) String ¶
func (m *Position) String() string
type Property ¶
A `Property` consists of a user-supplied name/value pair.
type Property struct {
// Name of the property.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Value of the property.
Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
func (*Property) Descriptor ¶
func (*Property) Descriptor() ([]byte, []int)
func (*Property) GetName ¶
func (m *Property) GetName() string
func (*Property) GetValue ¶
func (m *Property) GetValue() string
func (*Property) ProtoMessage ¶
func (*Property) ProtoMessage()
func (*Property) Reset ¶
func (m *Property) Reset()
func (*Property) String ¶
func (m *Property) String() string
type SafeSearchAnnotation ¶
Set of features pertaining to the image, computed by computer vision methods over safe-search verticals (for example, adult, spoof, medical, violence).
type SafeSearchAnnotation struct {
// Represents the adult content likelihood for the image.
Adult Likelihood `protobuf:"varint,1,opt,name=adult,enum=google.cloud.vision.v1.Likelihood" json:"adult,omitempty"`
// Spoof likelihood. The likelihood that an modification
// was made to the image's canonical version to make it appear
// funny or offensive.
Spoof Likelihood `protobuf:"varint,2,opt,name=spoof,enum=google.cloud.vision.v1.Likelihood" json:"spoof,omitempty"`
// Likelihood that this is a medical image.
Medical Likelihood `protobuf:"varint,3,opt,name=medical,enum=google.cloud.vision.v1.Likelihood" json:"medical,omitempty"`
// Violence likelihood.
Violence Likelihood `protobuf:"varint,4,opt,name=violence,enum=google.cloud.vision.v1.Likelihood" json:"violence,omitempty"`
}
func (*SafeSearchAnnotation) Descriptor ¶
func (*SafeSearchAnnotation) Descriptor() ([]byte, []int)
func (*SafeSearchAnnotation) GetAdult ¶
func (m *SafeSearchAnnotation) GetAdult() Likelihood
func (*SafeSearchAnnotation) GetMedical ¶
func (m *SafeSearchAnnotation) GetMedical() Likelihood
func (*SafeSearchAnnotation) GetSpoof ¶
func (m *SafeSearchAnnotation) GetSpoof() Likelihood
func (*SafeSearchAnnotation) GetViolence ¶
func (m *SafeSearchAnnotation) GetViolence() Likelihood
func (*SafeSearchAnnotation) ProtoMessage ¶
func (*SafeSearchAnnotation) ProtoMessage()
func (*SafeSearchAnnotation) Reset ¶
func (m *SafeSearchAnnotation) Reset()
func (*SafeSearchAnnotation) String ¶
func (m *SafeSearchAnnotation) String() string
type Symbol ¶
A single symbol representation.
type Symbol struct {
// Additional information detected for the symbol.
Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"`
// The bounding box for the symbol.
// The vertices are in the order of top-left, top-right, bottom-right,
// bottom-left. When a rotation of the bounding box is detected the rotation
// is represented as around the top-left corner as defined when the text is
// read in the 'natural' orientation.
// For example:
// * when the text is horizontal it might look like:
// 0----1
// | |
// 3----2
// * when it's rotated 180 degrees around the top-left corner it becomes:
// 2----3
// | |
// 1----0
// and the vertice order will still be (0, 1, 2, 3).
BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"`
// The actual UTF-8 representation of the symbol.
Text string `protobuf:"bytes,3,opt,name=text" json:"text,omitempty"`
}
func (*Symbol) Descriptor ¶
func (*Symbol) Descriptor() ([]byte, []int)
func (*Symbol) GetBoundingBox ¶
func (m *Symbol) GetBoundingBox() *BoundingPoly
func (*Symbol) GetProperty ¶
func (m *Symbol) GetProperty() *TextAnnotation_TextProperty
func (*Symbol) GetText ¶
func (m *Symbol) GetText() string
func (*Symbol) ProtoMessage ¶
func (*Symbol) ProtoMessage()
func (*Symbol) Reset ¶
func (m *Symbol) Reset()
func (*Symbol) String ¶
func (m *Symbol) String() string
type TextAnnotation ¶
TextAnnotation contains a structured representation of OCR extracted text. The hierarchy of an OCR extracted text structure is like this:
TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
Each structural component, starting from Page, may further have their own properties. Properties describe detected languages, breaks etc.. Please refer to the [google.cloud.vision.v1.TextAnnotation.TextProperty][google.cloud.vision.v1.TextAnnotation.TextProperty] message definition below for more detail.
type TextAnnotation struct {
// List of pages detected by OCR.
Pages []*Page `protobuf:"bytes,1,rep,name=pages" json:"pages,omitempty"`
// UTF-8 text detected on the pages.
Text string `protobuf:"bytes,2,opt,name=text" json:"text,omitempty"`
}
func (*TextAnnotation) Descriptor ¶
func (*TextAnnotation) Descriptor() ([]byte, []int)
func (*TextAnnotation) GetPages ¶
func (m *TextAnnotation) GetPages() []*Page
func (*TextAnnotation) GetText ¶
func (m *TextAnnotation) GetText() string
func (*TextAnnotation) ProtoMessage ¶
func (*TextAnnotation) ProtoMessage()
func (*TextAnnotation) Reset ¶
func (m *TextAnnotation) Reset()
func (*TextAnnotation) String ¶
func (m *TextAnnotation) String() string
type TextAnnotation_DetectedBreak ¶
Detected start or end of a structural component.
type TextAnnotation_DetectedBreak struct {
Type TextAnnotation_DetectedBreak_BreakType `protobuf:"varint,1,opt,name=type,enum=google.cloud.vision.v1.TextAnnotation_DetectedBreak_BreakType" json:"type,omitempty"`
// True if break prepends the element.
IsPrefix bool `protobuf:"varint,2,opt,name=is_prefix,json=isPrefix" json:"is_prefix,omitempty"`
}
func (*TextAnnotation_DetectedBreak) Descriptor ¶
func (*TextAnnotation_DetectedBreak) Descriptor() ([]byte, []int)
func (*TextAnnotation_DetectedBreak) GetIsPrefix ¶
func (m *TextAnnotation_DetectedBreak) GetIsPrefix() bool
func (*TextAnnotation_DetectedBreak) GetType ¶
func (m *TextAnnotation_DetectedBreak) GetType() TextAnnotation_DetectedBreak_BreakType
func (*TextAnnotation_DetectedBreak) ProtoMessage ¶
func (*TextAnnotation_DetectedBreak) ProtoMessage()
func (*TextAnnotation_DetectedBreak) Reset ¶
func (m *TextAnnotation_DetectedBreak) Reset()
func (*TextAnnotation_DetectedBreak) String ¶
func (m *TextAnnotation_DetectedBreak) String() string
type TextAnnotation_DetectedBreak_BreakType ¶
Enum to denote the type of break found. New line, space etc.
type TextAnnotation_DetectedBreak_BreakType int32
const (
// Unknown break label type.
TextAnnotation_DetectedBreak_UNKNOWN TextAnnotation_DetectedBreak_BreakType = 0
// Regular space.
TextAnnotation_DetectedBreak_SPACE TextAnnotation_DetectedBreak_BreakType = 1
// Sure space (very wide).
TextAnnotation_DetectedBreak_SURE_SPACE TextAnnotation_DetectedBreak_BreakType = 2
// Line-wrapping break.
TextAnnotation_DetectedBreak_EOL_SURE_SPACE TextAnnotation_DetectedBreak_BreakType = 3
// End-line hyphen that is not present in text; does
TextAnnotation_DetectedBreak_HYPHEN TextAnnotation_DetectedBreak_BreakType = 4
// not co-occur with SPACE, LEADER_SPACE, or
// LINE_BREAK.
// Line break that ends a paragraph.
TextAnnotation_DetectedBreak_LINE_BREAK TextAnnotation_DetectedBreak_BreakType = 5
)
func (TextAnnotation_DetectedBreak_BreakType) EnumDescriptor ¶
func (TextAnnotation_DetectedBreak_BreakType) EnumDescriptor() ([]byte, []int)
func (TextAnnotation_DetectedBreak_BreakType) String ¶
func (x TextAnnotation_DetectedBreak_BreakType) String() string
type TextAnnotation_DetectedLanguage ¶
Detected language for a structural component.
type TextAnnotation_DetectedLanguage struct {
// The BCP-47 language code, such as "en-US" or "sr-Latn". For more
// information, see
// http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode" json:"language_code,omitempty"`
// Confidence of detected language. Range [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"`
}
func (*TextAnnotation_DetectedLanguage) Descriptor ¶
func (*TextAnnotation_DetectedLanguage) Descriptor() ([]byte, []int)
func (*TextAnnotation_DetectedLanguage) GetConfidence ¶
func (m *TextAnnotation_DetectedLanguage) GetConfidence() float32
func (*TextAnnotation_DetectedLanguage) GetLanguageCode ¶
func (m *TextAnnotation_DetectedLanguage) GetLanguageCode() string
func (*TextAnnotation_DetectedLanguage) ProtoMessage ¶
func (*TextAnnotation_DetectedLanguage) ProtoMessage()
func (*TextAnnotation_DetectedLanguage) Reset ¶
func (m *TextAnnotation_DetectedLanguage) Reset()
func (*TextAnnotation_DetectedLanguage) String ¶
func (m *TextAnnotation_DetectedLanguage) String() string
type TextAnnotation_TextProperty ¶
Additional information detected on the structural component.
type TextAnnotation_TextProperty struct {
// A list of detected languages together with confidence.
DetectedLanguages []*TextAnnotation_DetectedLanguage `protobuf:"bytes,1,rep,name=detected_languages,json=detectedLanguages" json:"detected_languages,omitempty"`
// Detected start or end of a text segment.
DetectedBreak *TextAnnotation_DetectedBreak `protobuf:"bytes,2,opt,name=detected_break,json=detectedBreak" json:"detected_break,omitempty"`
}
func (*TextAnnotation_TextProperty) Descriptor ¶
func (*TextAnnotation_TextProperty) Descriptor() ([]byte, []int)
func (*TextAnnotation_TextProperty) GetDetectedBreak ¶
func (m *TextAnnotation_TextProperty) GetDetectedBreak() *TextAnnotation_DetectedBreak
func (*TextAnnotation_TextProperty) GetDetectedLanguages ¶
func (m *TextAnnotation_TextProperty) GetDetectedLanguages() []*TextAnnotation_DetectedLanguage
func (*TextAnnotation_TextProperty) ProtoMessage ¶
func (*TextAnnotation_TextProperty) ProtoMessage()
func (*TextAnnotation_TextProperty) Reset ¶
func (m *TextAnnotation_TextProperty) Reset()
func (*TextAnnotation_TextProperty) String ¶
func (m *TextAnnotation_TextProperty) String() string
type Vertex ¶
A vertex represents a 2D point in the image. NOTE: the vertex coordinates are in the same scale as the original image.
type Vertex struct {
// X coordinate.
X int32 `protobuf:"varint,1,opt,name=x" json:"x,omitempty"`
// Y coordinate.
Y int32 `protobuf:"varint,2,opt,name=y" json:"y,omitempty"`
}
func (*Vertex) Descriptor ¶
func (*Vertex) Descriptor() ([]byte, []int)
func (*Vertex) GetX ¶
func (m *Vertex) GetX() int32
func (*Vertex) GetY ¶
func (m *Vertex) GetY() int32
func (*Vertex) ProtoMessage ¶
func (*Vertex) ProtoMessage()
func (*Vertex) Reset ¶
func (m *Vertex) Reset()
func (*Vertex) String ¶
func (m *Vertex) String() string
type WebDetection ¶
Relevant information for the image from the Internet.
type WebDetection struct {
// Deduced entities from similar images on the Internet.
WebEntities []*WebDetection_WebEntity `protobuf:"bytes,1,rep,name=web_entities,json=webEntities" json:"web_entities,omitempty"`
// Fully matching images from the Internet.
// They're definite neardups and most often a copy of the query image with
// merely a size change.
FullMatchingImages []*WebDetection_WebImage `protobuf:"bytes,2,rep,name=full_matching_images,json=fullMatchingImages" json:"full_matching_images,omitempty"`
// Partial matching images from the Internet.
// Those images are similar enough to share some key-point features. For
// example an original image will likely have partial matching for its crops.
PartialMatchingImages []*WebDetection_WebImage `protobuf:"bytes,3,rep,name=partial_matching_images,json=partialMatchingImages" json:"partial_matching_images,omitempty"`
// Web pages containing the matching images from the Internet.
PagesWithMatchingImages []*WebDetection_WebPage `protobuf:"bytes,4,rep,name=pages_with_matching_images,json=pagesWithMatchingImages" json:"pages_with_matching_images,omitempty"`
}
func (*WebDetection) Descriptor ¶
func (*WebDetection) Descriptor() ([]byte, []int)
func (*WebDetection) GetFullMatchingImages ¶
func (m *WebDetection) GetFullMatchingImages() []*WebDetection_WebImage
func (*WebDetection) GetPagesWithMatchingImages ¶
func (m *WebDetection) GetPagesWithMatchingImages() []*WebDetection_WebPage
func (*WebDetection) GetPartialMatchingImages ¶
func (m *WebDetection) GetPartialMatchingImages() []*WebDetection_WebImage
func (*WebDetection) GetWebEntities ¶
func (m *WebDetection) GetWebEntities() []*WebDetection_WebEntity
func (*WebDetection) ProtoMessage ¶
func (*WebDetection) ProtoMessage()
func (*WebDetection) Reset ¶
func (m *WebDetection) Reset()
func (*WebDetection) String ¶
func (m *WebDetection) String() string
type WebDetection_WebEntity ¶
Entity deduced from similar images on the Internet.
type WebDetection_WebEntity struct {
// Opaque entity ID.
EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId" json:"entity_id,omitempty"`
// Overall relevancy score for the entity.
// Not normalized and not comparable across different image queries.
Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"`
// Canonical description of the entity, in English.
Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
}
func (*WebDetection_WebEntity) Descriptor ¶
func (*WebDetection_WebEntity) Descriptor() ([]byte, []int)
func (*WebDetection_WebEntity) GetDescription ¶
func (m *WebDetection_WebEntity) GetDescription() string
func (*WebDetection_WebEntity) GetEntityId ¶
func (m *WebDetection_WebEntity) GetEntityId() string
func (*WebDetection_WebEntity) GetScore ¶
func (m *WebDetection_WebEntity) GetScore() float32
func (*WebDetection_WebEntity) ProtoMessage ¶
func (*WebDetection_WebEntity) ProtoMessage()
func (*WebDetection_WebEntity) Reset ¶
func (m *WebDetection_WebEntity) Reset()
func (*WebDetection_WebEntity) String ¶
func (m *WebDetection_WebEntity) String() string
type WebDetection_WebImage ¶
Metadata for online images.
type WebDetection_WebImage struct {
// The result image URL.
Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
// Overall relevancy score for the image.
// Not normalized and not comparable across different image queries.
Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"`
}
func (*WebDetection_WebImage) Descriptor ¶
func (*WebDetection_WebImage) Descriptor() ([]byte, []int)
func (*WebDetection_WebImage) GetScore ¶
func (m *WebDetection_WebImage) GetScore() float32
func (*WebDetection_WebImage) GetUrl ¶
func (m *WebDetection_WebImage) GetUrl() string
func (*WebDetection_WebImage) ProtoMessage ¶
func (*WebDetection_WebImage) ProtoMessage()
func (*WebDetection_WebImage) Reset ¶
func (m *WebDetection_WebImage) Reset()
func (*WebDetection_WebImage) String ¶
func (m *WebDetection_WebImage) String() string
type WebDetection_WebPage ¶
Metadata for web pages.
type WebDetection_WebPage struct {
// The result web page URL.
Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
// Overall relevancy score for the web page.
// Not normalized and not comparable across different image queries.
Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"`
}
func (*WebDetection_WebPage) Descriptor ¶
func (*WebDetection_WebPage) Descriptor() ([]byte, []int)
func (*WebDetection_WebPage) GetScore ¶
func (m *WebDetection_WebPage) GetScore() float32
func (*WebDetection_WebPage) GetUrl ¶
func (m *WebDetection_WebPage) GetUrl() string
func (*WebDetection_WebPage) ProtoMessage ¶
func (*WebDetection_WebPage) ProtoMessage()
func (*WebDetection_WebPage) Reset ¶
func (m *WebDetection_WebPage) Reset()
func (*WebDetection_WebPage) String ¶
func (m *WebDetection_WebPage) String() string
type Word ¶
A word representation.
type Word struct {
// Additional information detected for the word.
Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"`
// The bounding box for the word.
// The vertices are in the order of top-left, top-right, bottom-right,
// bottom-left. When a rotation of the bounding box is detected the rotation
// is represented as around the top-left corner as defined when the text is
// read in the 'natural' orientation.
// For example:
// * when the text is horizontal it might look like:
// 0----1
// | |
// 3----2
// * when it's rotated 180 degrees around the top-left corner it becomes:
// 2----3
// | |
// 1----0
// and the vertice order will still be (0, 1, 2, 3).
BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"`
// List of symbols in the word.
// The order of the symbols follows the natural reading order.
Symbols []*Symbol `protobuf:"bytes,3,rep,name=symbols" json:"symbols,omitempty"`
}
func (*Word) Descriptor ¶
func (*Word) Descriptor() ([]byte, []int)
func (*Word) GetBoundingBox ¶
func (m *Word) GetBoundingBox() *BoundingPoly
func (*Word) GetProperty ¶
func (m *Word) GetProperty() *TextAnnotation_TextProperty
func (*Word) GetSymbols ¶
func (m *Word) GetSymbols() []*Symbol
func (*Word) ProtoMessage ¶
func (*Word) ProtoMessage()
func (*Word) Reset ¶
func (m *Word) Reset()
func (*Word) String ¶
func (m *Word) String() string
ActiveGo 1.8