// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package rekognition import ( "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) var _ aws.Config var _ = awsutil.Prettify // Structure containing the estimated age range, in years, for a face. // // Amazon Rekognition estimates an age range for faces detected in the input // image. Estimated age ranges can overlap. A face of a 5-year-old might have // an estimated range of 4-6, while the face of a 6-year-old might have an estimated // range of 4-8. type AgeRange struct { _ struct{} `type:"structure"` // The highest estimated age. High *int64 `type:"integer"` // The lowest estimated age. Low *int64 `type:"integer"` } // String returns the string representation func (s AgeRange) String() string { return awsutil.Prettify(s) } // Assets are the images that you use to train and evaluate a model version. // Assets are referenced by Sagemaker GroundTruth manifest files. type Asset struct { _ struct{} `type:"structure"` // The S3 bucket that contains the Ground Truth manifest file. GroundTruthManifest *GroundTruthManifest `type:"structure"` } // String returns the string representation func (s Asset) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Asset) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Asset"} if s.GroundTruthManifest != nil { if err := s.GroundTruthManifest.Validate(); err != nil { invalidParams.AddNested("GroundTruthManifest", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // Metadata information about an audio stream. An array of AudioMetadata objects // for the audio streams found in a stored video is returned by GetSegmentDetection. type AudioMetadata struct { _ struct{} `type:"structure"` // The audio codec used to encode or decode the audio stream. Codec *string `type:"string"` // The duration of the audio stream in milliseconds. DurationMillis *int64 `type:"long"` // The number of audio channels in the segement. NumberOfChannels *int64 `type:"long"` // The sample rate for the audio stream. SampleRate *int64 `type:"long"` } // String returns the string representation func (s AudioMetadata) String() string { return awsutil.Prettify(s) } // Indicates whether or not the face has a beard, and the confidence level in // the determination. type Beard struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the face has beard or not. Value *bool `type:"boolean"` } // String returns the string representation func (s Beard) String() string { return awsutil.Prettify(s) } // Identifies the bounding box around the label, face, or text. The left (x-coordinate) // and top (y-coordinate) are coordinates representing the top and left sides // of the bounding box. Note that the upper-left corner of the image is the // origin (0,0). // // The top and left values returned are ratios of the overall image size. For // example, if the input image is 700x200 pixels, and the top-left coordinate // of the bounding box is 350x50 pixels, the API returns a left value of 0.5 // (350/700) and a top value of 0.25 (50/200). // // The width and height values represent the dimensions of the bounding box // as a ratio of the overall image dimension. For example, if the input image // is 700x200 pixels, and the bounding box width is 70 pixels, the width returned // is 0.1. // // The bounding box coordinates can have negative values. For example, if Amazon // Rekognition is able to detect a face that is at the image edge and is only // partially visible, the service can return coordinates that are outside the // image bounds and, depending on the image edge, you might get negative values // or values greater than 1 for the left or top values. type BoundingBox struct { _ struct{} `type:"structure"` // Height of the bounding box as a ratio of the overall image height. Height *float64 `type:"float"` // Left coordinate of the bounding box as a ratio of overall image width. Left *float64 `type:"float"` // Top coordinate of the bounding box as a ratio of overall image height. Top *float64 `type:"float"` // Width of the bounding box as a ratio of the overall image width. Width *float64 `type:"float"` } // String returns the string representation func (s BoundingBox) String() string { return awsutil.Prettify(s) } // Provides information about a celebrity recognized by the RecognizeCelebrities // operation. type Celebrity struct { _ struct{} `type:"structure"` // Provides information about the celebrity's face, such as its location on // the image. Face *ComparedFace `type:"structure"` // A unique identifier for the celebrity. Id *string `type:"string"` // The confidence, in percentage, that Amazon Rekognition has that the recognized // face is the celebrity. MatchConfidence *float64 `type:"float"` // The name of the celebrity. Name *string `type:"string"` // An array of URLs pointing to additional information about the celebrity. // If there is no additional information about the celebrity, this list is empty. Urls []string `type:"list"` } // String returns the string representation func (s Celebrity) String() string { return awsutil.Prettify(s) } // Information about a recognized celebrity. type CelebrityDetail struct { _ struct{} `type:"structure"` // Bounding box around the body of a celebrity. BoundingBox *BoundingBox `type:"structure"` // The confidence, in percentage, that Amazon Rekognition has that the recognized // face is the celebrity. Confidence *float64 `type:"float"` // Face details for the recognized celebrity. Face *FaceDetail `type:"structure"` // The unique identifier for the celebrity. Id *string `type:"string"` // The name of the celebrity. Name *string `type:"string"` // An array of URLs pointing to additional celebrity information. Urls []string `type:"list"` } // String returns the string representation func (s CelebrityDetail) String() string { return awsutil.Prettify(s) } // Information about a detected celebrity and the time the celebrity was detected // in a stored video. For more information, see GetCelebrityRecognition in the // Amazon Rekognition Developer Guide. type CelebrityRecognition struct { _ struct{} `type:"structure"` // Information about a recognized celebrity. Celebrity *CelebrityDetail `type:"structure"` // The time, in milliseconds from the start of the video, that the celebrity // was recognized. Timestamp *int64 `type:"long"` } // String returns the string representation func (s CelebrityRecognition) String() string { return awsutil.Prettify(s) } // Provides information about a face in a target image that matches the source // image face analyzed by CompareFaces. The Face property contains the bounding // box of the face in the target image. The Similarity property is the confidence // that the source image face matches the face in the bounding box. type CompareFacesMatch struct { _ struct{} `type:"structure"` // Provides face metadata (bounding box and confidence that the bounding box // actually contains a face). Face *ComparedFace `type:"structure"` // Level of confidence that the faces match. Similarity *float64 `type:"float"` } // String returns the string representation func (s CompareFacesMatch) String() string { return awsutil.Prettify(s) } // Provides face metadata for target image faces that are analyzed by CompareFaces // and RecognizeCelebrities. type ComparedFace struct { _ struct{} `type:"structure"` // Bounding box of the face. BoundingBox *BoundingBox `type:"structure"` // Level of confidence that what the bounding box contains is a face. Confidence *float64 `type:"float"` // An array of facial landmarks. Landmarks []Landmark `type:"list"` // Indicates the pose of the face as determined by its pitch, roll, and yaw. Pose *Pose `type:"structure"` // Identifies face image brightness and sharpness. Quality *ImageQuality `type:"structure"` } // String returns the string representation func (s ComparedFace) String() string { return awsutil.Prettify(s) } // Type that describes the face Amazon Rekognition chose to compare with the // faces in the target. This contains a bounding box for the selected face and // confidence level that the bounding box contains a face. Note that Amazon // Rekognition selects the largest face in the source image for this comparison. type ComparedSourceImageFace struct { _ struct{} `type:"structure"` // Bounding box of the face. BoundingBox *BoundingBox `type:"structure"` // Confidence level that the selected bounding box contains a face. Confidence *float64 `type:"float"` } // String returns the string representation func (s ComparedSourceImageFace) String() string { return awsutil.Prettify(s) } // Information about an unsafe content label detection in a stored video. type ContentModerationDetection struct { _ struct{} `type:"structure"` // The unsafe content label detected by in the stored video. ModerationLabel *ModerationLabel `type:"structure"` // Time, in milliseconds from the beginning of the video, that the unsafe content // label was detected. Timestamp *int64 `type:"long"` } // String returns the string representation func (s ContentModerationDetection) String() string { return awsutil.Prettify(s) } // A custom label detected in an image by a call to DetectCustomLabels. type CustomLabel struct { _ struct{} `type:"structure"` // The confidence that the model has in the detection of the custom label. The // range is 0-100. A higher value indicates a higher confidence. Confidence *float64 `type:"float"` // The location of the detected object on the image that corresponds to the // custom label. Includes an axis aligned coarse bounding box surrounding the // object and a finer grain polygon for more accurate spatial information. Geometry *Geometry `type:"structure"` // The name of the custom label. Name *string `type:"string"` } // String returns the string representation func (s CustomLabel) String() string { return awsutil.Prettify(s) } // A set of optional parameters that you can use to set the criteria that the // text must meet to be included in your response. WordFilter looks at a word’s // height, width, and minimum confidence. RegionOfInterest lets you set a specific // region of the image to look for text in. type DetectTextFilters struct { _ struct{} `type:"structure"` // A Filter focusing on a certain area of the image. Uses a BoundingBox object // to set the region of the image. RegionsOfInterest []RegionOfInterest `type:"list"` // A set of parameters that allow you to filter out certain results from your // returned results. WordFilter *DetectionFilter `type:"structure"` } // String returns the string representation func (s DetectTextFilters) String() string { return awsutil.Prettify(s) } // A set of parameters that allow you to filter out certain results from your // returned results. type DetectionFilter struct { _ struct{} `type:"structure"` // Sets the minimum height of the word bounding box. Words with bounding box // heights lesser than this value will be excluded from the result. Value is // relative to the video frame height. MinBoundingBoxHeight *float64 `type:"float"` // Sets the minimum width of the word bounding box. Words with bounding boxes // widths lesser than this value will be excluded from the result. Value is // relative to the video frame width. MinBoundingBoxWidth *float64 `type:"float"` // Sets confidence of word detection. Words with detection confidence below // this will be excluded from the result. Values should be between 0.5 and 1 // as Text in Video will not return any result below 0.5. MinConfidence *float64 `type:"float"` } // String returns the string representation func (s DetectionFilter) String() string { return awsutil.Prettify(s) } // The emotions that appear to be expressed on the face, and the confidence // level in the determination. The API is only making a determination of the // physical appearance of a person's face. It is not a determination of the // person’s internal emotional state and should not be used in such a way. // For example, a person pretending to have a sad face might not be sad emotionally. type Emotion struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Type of emotion detected. Type EmotionName `type:"string" enum:"true"` } // String returns the string representation func (s Emotion) String() string { return awsutil.Prettify(s) } // The evaluation results for the training of a model. type EvaluationResult struct { _ struct{} `type:"structure"` // The F1 score for the evaluation of all labels. The F1 score metric evaluates // the overall precision and recall performance of the model as a single value. // A higher value indicates better precision and recall performance. A lower // score indicates that precision, recall, or both are performing poorly. F1Score *float64 `type:"float"` // The S3 bucket that contains the training summary. Summary *Summary `type:"structure"` } // String returns the string representation func (s EvaluationResult) String() string { return awsutil.Prettify(s) } // Indicates whether or not the eyes on the face are open, and the confidence // level in the determination. type EyeOpen struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the eyes on the face are open. Value *bool `type:"boolean"` } // String returns the string representation func (s EyeOpen) String() string { return awsutil.Prettify(s) } // Indicates whether or not the face is wearing eye glasses, and the confidence // level in the determination. type Eyeglasses struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the face is wearing eye glasses or not. Value *bool `type:"boolean"` } // String returns the string representation func (s Eyeglasses) String() string { return awsutil.Prettify(s) } // Describes the face properties such as the bounding box, face ID, image ID // of the input image, and external image ID that you assigned. type Face struct { _ struct{} `type:"structure"` // Bounding box of the face. BoundingBox *BoundingBox `type:"structure"` // Confidence level that the bounding box contains a face (and not a different // object such as a tree). Confidence *float64 `type:"float"` // Identifier that you assign to all the faces in the input image. ExternalImageId *string `min:"1" type:"string"` // Unique identifier that Amazon Rekognition assigns to the face. FaceId *string `type:"string"` // Unique identifier that Amazon Rekognition assigns to the input image. ImageId *string `type:"string"` } // String returns the string representation func (s Face) String() string { return awsutil.Prettify(s) } // Structure containing attributes of the face that the algorithm detected. // // A FaceDetail object contains either the default facial attributes or all // facial attributes. The default attributes are BoundingBox, Confidence, Landmarks, // Pose, and Quality. // // GetFaceDetection is the only Amazon Rekognition Video stored video operation // that can return a FaceDetail object with all attributes. To specify which // attributes to return, use the FaceAttributes input parameter for StartFaceDetection. // The following Amazon Rekognition Video operations return only the default // attributes. The corresponding Start operations don't have a FaceAttributes // input parameter. // // * GetCelebrityRecognition // // * GetPersonTracking // // * GetFaceSearch // // The Amazon Rekognition Image DetectFaces and IndexFaces operations can return // all facial attributes. To specify which attributes to return, use the Attributes // input parameter for DetectFaces. For IndexFaces, use the DetectAttributes // input parameter. type FaceDetail struct { _ struct{} `type:"structure"` // The estimated age range, in years, for the face. Low represents the lowest // estimated age and High represents the highest estimated age. AgeRange *AgeRange `type:"structure"` // Indicates whether or not the face has a beard, and the confidence level in // the determination. Beard *Beard `type:"structure"` // Bounding box of the face. Default attribute. BoundingBox *BoundingBox `type:"structure"` // Confidence level that the bounding box contains a face (and not a different // object such as a tree). Default attribute. Confidence *float64 `type:"float"` // The emotions that appear to be expressed on the face, and the confidence // level in the determination. The API is only making a determination of the // physical appearance of a person's face. It is not a determination of the // person’s internal emotional state and should not be used in such a way. // For example, a person pretending to have a sad face might not be sad emotionally. Emotions []Emotion `type:"list"` // Indicates whether or not the face is wearing eye glasses, and the confidence // level in the determination. Eyeglasses *Eyeglasses `type:"structure"` // Indicates whether or not the eyes on the face are open, and the confidence // level in the determination. EyesOpen *EyeOpen `type:"structure"` // The predicted gender of a detected face. Gender *Gender `type:"structure"` // Indicates the location of landmarks on the face. Default attribute. Landmarks []Landmark `type:"list"` // Indicates whether or not the mouth on the face is open, and the confidence // level in the determination. MouthOpen *MouthOpen `type:"structure"` // Indicates whether or not the face has a mustache, and the confidence level // in the determination. Mustache *Mustache `type:"structure"` // Indicates the pose of the face as determined by its pitch, roll, and yaw. // Default attribute. Pose *Pose `type:"structure"` // Identifies image brightness and sharpness. Default attribute. Quality *ImageQuality `type:"structure"` // Indicates whether or not the face is smiling, and the confidence level in // the determination. Smile *Smile `type:"structure"` // Indicates whether or not the face is wearing sunglasses, and the confidence // level in the determination. Sunglasses *Sunglasses `type:"structure"` } // String returns the string representation func (s FaceDetail) String() string { return awsutil.Prettify(s) } // Information about a face detected in a video analysis request and the time // the face was detected in the video. type FaceDetection struct { _ struct{} `type:"structure"` // The face properties for the detected face. Face *FaceDetail `type:"structure"` // Time, in milliseconds from the start of the video, that the face was detected. Timestamp *int64 `type:"long"` } // String returns the string representation func (s FaceDetection) String() string { return awsutil.Prettify(s) } // Provides face metadata. In addition, it also provides the confidence in the // match of this face with the input face. type FaceMatch struct { _ struct{} `type:"structure"` // Describes the face properties such as the bounding box, face ID, image ID // of the source image, and external image ID that you assigned. Face *Face `type:"structure"` // Confidence in the match of this face with the input face. Similarity *float64 `type:"float"` } // String returns the string representation func (s FaceMatch) String() string { return awsutil.Prettify(s) } // Object containing both the face metadata (stored in the backend database), // and facial attributes that are detected but aren't stored in the database. type FaceRecord struct { _ struct{} `type:"structure"` // Describes the face properties such as the bounding box, face ID, image ID // of the input image, and external image ID that you assigned. Face *Face `type:"structure"` // Structure containing attributes of the face that the algorithm detected. FaceDetail *FaceDetail `type:"structure"` } // String returns the string representation func (s FaceRecord) String() string { return awsutil.Prettify(s) } // Input face recognition parameters for an Amazon Rekognition stream processor. // FaceRecognitionSettings is a request parameter for CreateStreamProcessor. type FaceSearchSettings struct { _ struct{} `type:"structure"` // The ID of a collection that contains faces that you want to search for. CollectionId *string `min:"1" type:"string"` // Minimum face match confidence score that must be met to return a result for // a recognized face. Default is 80. 0 is the lowest confidence. 100 is the // highest confidence. FaceMatchThreshold *float64 `type:"float"` } // String returns the string representation func (s FaceSearchSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *FaceSearchSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "FaceSearchSettings"} if s.CollectionId != nil && len(*s.CollectionId) < 1 { invalidParams.Add(aws.NewErrParamMinLen("CollectionId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // The predicted gender of a detected face. // // Amazon Rekognition makes gender binary (male/female) predictions based on // the physical appearance of a face in a particular image. This kind of prediction // is not designed to categorize a person’s gender identity, and you shouldn't // use Amazon Rekognition to make such a determination. For example, a male // actor wearing a long-haired wig and earrings for a role might be predicted // as female. // // Using Amazon Rekognition to make gender binary predictions is best suited // for use cases where aggregate gender distribution statistics need to be analyzed // without identifying specific users. For example, the percentage of female // users compared to male users on a social media platform. // // We don't recommend using gender binary predictions to make decisions that // impact an individual's rights, privacy, or access to services. type Gender struct { _ struct{} `type:"structure"` // Level of confidence in the prediction. Confidence *float64 `type:"float"` // The predicted gender of the face. Value GenderType `type:"string" enum:"true"` } // String returns the string representation func (s Gender) String() string { return awsutil.Prettify(s) } // Information about where an object (DetectCustomLabels) or text (DetectText) // is located on an image. type Geometry struct { _ struct{} `type:"structure"` // An axis-aligned coarse representation of the detected item's location on // the image. BoundingBox *BoundingBox `type:"structure"` // Within the bounding box, a fine-grained polygon around the detected item. Polygon []Point `type:"list"` } // String returns the string representation func (s Geometry) String() string { return awsutil.Prettify(s) } // The S3 bucket that contains the Ground Truth manifest file. type GroundTruthManifest struct { _ struct{} `type:"structure"` // Provides the S3 bucket name and object name. // // The region for the S3 bucket containing the S3 object must match the region // you use for Amazon Rekognition operations. // // For Amazon Rekognition to process an S3 object, the user must have permission // to access the S3 object. For more information, see Resource-Based Policies // in the Amazon Rekognition Developer Guide. S3Object *S3Object `type:"structure"` } // String returns the string representation func (s GroundTruthManifest) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *GroundTruthManifest) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "GroundTruthManifest"} if s.S3Object != nil { if err := s.S3Object.Validate(); err != nil { invalidParams.AddNested("S3Object", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // Shows the results of the human in the loop evaluation. If there is no HumanLoopArn, // the input did not trigger human review. type HumanLoopActivationOutput struct { _ struct{} `type:"structure"` // Shows the result of condition evaluations, including those conditions which // activated a human review. HumanLoopActivationConditionsEvaluationResults aws.JSONValue `type:"jsonvalue"` // Shows if and why human review was needed. HumanLoopActivationReasons []string `min:"1" type:"list"` // The Amazon Resource Name (ARN) of the HumanLoop created. HumanLoopArn *string `type:"string"` } // String returns the string representation func (s HumanLoopActivationOutput) String() string { return awsutil.Prettify(s) } // Sets up the flow definition the image will be sent to if one of the conditions // is met. You can also set certain attributes of the image before review. type HumanLoopConfig struct { _ struct{} `type:"structure"` // Sets attributes of the input data. DataAttributes *HumanLoopDataAttributes `type:"structure"` // The Amazon Resource Name (ARN) of the flow definition. You can create a flow // definition by using the Amazon Sagemaker CreateFlowDefinition (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html) // Operation. // // FlowDefinitionArn is a required field FlowDefinitionArn *string `type:"string" required:"true"` // The name of the human review used for this image. This should be kept unique // within a region. // // HumanLoopName is a required field HumanLoopName *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s HumanLoopConfig) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *HumanLoopConfig) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "HumanLoopConfig"} if s.FlowDefinitionArn == nil { invalidParams.Add(aws.NewErrParamRequired("FlowDefinitionArn")) } if s.HumanLoopName == nil { invalidParams.Add(aws.NewErrParamRequired("HumanLoopName")) } if s.HumanLoopName != nil && len(*s.HumanLoopName) < 1 { invalidParams.Add(aws.NewErrParamMinLen("HumanLoopName", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // Allows you to set attributes of the image. Currently, you can declare an // image as free of personally identifiable information. type HumanLoopDataAttributes struct { _ struct{} `type:"structure"` // Sets whether the input image is free of personally identifiable information. ContentClassifiers []ContentClassifier `type:"list"` } // String returns the string representation func (s HumanLoopDataAttributes) String() string { return awsutil.Prettify(s) } // Provides the input image either as bytes or an S3 object. // // You pass image bytes to an Amazon Rekognition API operation by using the // Bytes property. For example, you would use the Bytes property to pass an // image loaded from a local file system. Image bytes passed by using the Bytes // property must be base64-encoded. Your code may not need to encode image bytes // if you are using an AWS SDK to call Amazon Rekognition API operations. // // For more information, see Analyzing an Image Loaded from a Local File System // in the Amazon Rekognition Developer Guide. // // You pass images stored in an S3 bucket to an Amazon Rekognition API operation // by using the S3Object property. Images stored in an S3 bucket do not need // to be base64-encoded. // // The region for the S3 bucket containing the S3 object must match the region // you use for Amazon Rekognition operations. // // If you use the AWS CLI to call Amazon Rekognition operations, passing image // bytes using the Bytes property is not supported. You must first upload the // image to an Amazon S3 bucket and then call the operation using the S3Object // property. // // For Amazon Rekognition to process an S3 object, the user must have permission // to access the S3 object. For more information, see Resource Based Policies // in the Amazon Rekognition Developer Guide. type Image struct { _ struct{} `type:"structure"` // Blob of image bytes up to 5 MBs. // // Bytes is automatically base64 encoded/decoded by the SDK. Bytes []byte `min:"1" type:"blob"` // Identifies an S3 object as the image source. S3Object *S3Object `type:"structure"` } // String returns the string representation func (s Image) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Image) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Image"} if s.Bytes != nil && len(s.Bytes) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Bytes", 1)) } if s.S3Object != nil { if err := s.S3Object.Validate(); err != nil { invalidParams.AddNested("S3Object", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // Identifies face image brightness and sharpness. type ImageQuality struct { _ struct{} `type:"structure"` // Value representing brightness of the face. The service returns a value between // 0 and 100 (inclusive). A higher value indicates a brighter face image. Brightness *float64 `type:"float"` // Value representing sharpness of the face. The service returns a value between // 0 and 100 (inclusive). A higher value indicates a sharper face image. Sharpness *float64 `type:"float"` } // String returns the string representation func (s ImageQuality) String() string { return awsutil.Prettify(s) } // An instance of a label returned by Amazon Rekognition Image (DetectLabels) // or by Amazon Rekognition Video (GetLabelDetection). type Instance struct { _ struct{} `type:"structure"` // The position of the label instance on the image. BoundingBox *BoundingBox `type:"structure"` // The confidence that Amazon Rekognition has in the accuracy of the bounding // box. Confidence *float64 `type:"float"` } // String returns the string representation func (s Instance) String() string { return awsutil.Prettify(s) } // The Kinesis data stream Amazon Rekognition to which the analysis results // of a Amazon Rekognition stream processor are streamed. For more information, // see CreateStreamProcessor in the Amazon Rekognition Developer Guide. type KinesisDataStream struct { _ struct{} `type:"structure"` // ARN of the output Amazon Kinesis Data Streams stream. Arn *string `type:"string"` } // String returns the string representation func (s KinesisDataStream) String() string { return awsutil.Prettify(s) } // Kinesis video stream stream that provides the source streaming video for // a Amazon Rekognition Video stream processor. For more information, see CreateStreamProcessor // in the Amazon Rekognition Developer Guide. type KinesisVideoStream struct { _ struct{} `type:"structure"` // ARN of the Kinesis video stream stream that streams the source video. Arn *string `type:"string"` } // String returns the string representation func (s KinesisVideoStream) String() string { return awsutil.Prettify(s) } // Structure containing details about the detected label, including the name, // detected instances, parent labels, and level of confidence. type Label struct { _ struct{} `type:"structure"` // Level of confidence. Confidence *float64 `type:"float"` // If Label represents an object, Instances contains the bounding boxes for // each instance of the detected object. Bounding boxes are returned for common // object labels such as people, cars, furniture, apparel or pets. Instances []Instance `type:"list"` // The name (label) of the object or scene. Name *string `type:"string"` // The parent labels for a label. The response includes all ancestor labels. Parents []Parent `type:"list"` } // String returns the string representation func (s Label) String() string { return awsutil.Prettify(s) } // Information about a label detected in a video analysis request and the time // the label was detected in the video. type LabelDetection struct { _ struct{} `type:"structure"` // Details about the detected label. Label *Label `type:"structure"` // Time, in milliseconds from the start of the video, that the label was detected. Timestamp *int64 `type:"long"` } // String returns the string representation func (s LabelDetection) String() string { return awsutil.Prettify(s) } // Indicates the location of the landmark on the face. type Landmark struct { _ struct{} `type:"structure"` // Type of landmark. Type LandmarkType `type:"string" enum:"true"` // The x-coordinate from the top left of the landmark expressed as the ratio // of the width of the image. For example, if the image is 700 x 200 and the // x-coordinate of the landmark is at 350 pixels, this value is 0.5. X *float64 `type:"float"` // The y-coordinate from the top left of the landmark expressed as the ratio // of the height of the image. For example, if the image is 700 x 200 and the // y-coordinate of the landmark is at 100 pixels, this value is 0.5. Y *float64 `type:"float"` } // String returns the string representation func (s Landmark) String() string { return awsutil.Prettify(s) } // Provides information about a single type of unsafe content found in an image // or video. Each type of moderated content has a label within a hierarchical // taxonomy. For more information, see Detecting Unsafe Content in the Amazon // Rekognition Developer Guide. type ModerationLabel struct { _ struct{} `type:"structure"` // Specifies the confidence that Amazon Rekognition has that the label has been // correctly identified. // // If you don't specify the MinConfidence parameter in the call to DetectModerationLabels, // the operation returns labels with a confidence value greater than or equal // to 50 percent. Confidence *float64 `type:"float"` // The label name for the type of unsafe content detected in the image. Name *string `type:"string"` // The name for the parent label. Labels at the top level of the hierarchy have // the parent label "". ParentName *string `type:"string"` } // String returns the string representation func (s ModerationLabel) String() string { return awsutil.Prettify(s) } // Indicates whether or not the mouth on the face is open, and the confidence // level in the determination. type MouthOpen struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the mouth on the face is open or not. Value *bool `type:"boolean"` } // String returns the string representation func (s MouthOpen) String() string { return awsutil.Prettify(s) } // Indicates whether or not the face has a mustache, and the confidence level // in the determination. type Mustache struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the face has mustache or not. Value *bool `type:"boolean"` } // String returns the string representation func (s Mustache) String() string { return awsutil.Prettify(s) } // The Amazon Simple Notification Service topic to which Amazon Rekognition // publishes the completion status of a video analysis operation. For more information, // see api-video. type NotificationChannel struct { _ struct{} `type:"structure"` // The ARN of an IAM role that gives Amazon Rekognition publishing permissions // to the Amazon SNS topic. // // RoleArn is a required field RoleArn *string `type:"string" required:"true"` // The Amazon SNS topic to which Amazon Rekognition to posts the completion // status. // // SNSTopicArn is a required field SNSTopicArn *string `type:"string" required:"true"` } // String returns the string representation func (s NotificationChannel) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *NotificationChannel) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "NotificationChannel"} if s.RoleArn == nil { invalidParams.Add(aws.NewErrParamRequired("RoleArn")) } if s.SNSTopicArn == nil { invalidParams.Add(aws.NewErrParamRequired("SNSTopicArn")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // The S3 bucket and folder location where training output is placed. type OutputConfig struct { _ struct{} `type:"structure"` // The S3 bucket where training output is placed. S3Bucket *string `min:"3" type:"string"` // The prefix applied to the training output files. S3KeyPrefix *string `type:"string"` } // String returns the string representation func (s OutputConfig) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *OutputConfig) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "OutputConfig"} if s.S3Bucket != nil && len(*s.S3Bucket) < 3 { invalidParams.Add(aws.NewErrParamMinLen("S3Bucket", 3)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // A parent label for a label. A label can have 0, 1, or more parents. type Parent struct { _ struct{} `type:"structure"` // The name of the parent label. Name *string `type:"string"` } // String returns the string representation func (s Parent) String() string { return awsutil.Prettify(s) } // Details about a person detected in a video analysis request. type PersonDetail struct { _ struct{} `type:"structure"` // Bounding box around the detected person. BoundingBox *BoundingBox `type:"structure"` // Face details for the detected person. Face *FaceDetail `type:"structure"` // Identifier for the person detected person within a video. Use to keep track // of the person throughout the video. The identifier is not stored by Amazon // Rekognition. Index *int64 `type:"long"` } // String returns the string representation func (s PersonDetail) String() string { return awsutil.Prettify(s) } // Details and path tracking information for a single time a person's path is // tracked in a video. Amazon Rekognition operations that track people's paths // return an array of PersonDetection objects with elements for each time a // person's path is tracked in a video. // // For more information, see GetPersonTracking in the Amazon Rekognition Developer // Guide. type PersonDetection struct { _ struct{} `type:"structure"` // Details about a person whose path was tracked in a video. Person *PersonDetail `type:"structure"` // The time, in milliseconds from the start of the video, that the person's // path was tracked. Timestamp *int64 `type:"long"` } // String returns the string representation func (s PersonDetection) String() string { return awsutil.Prettify(s) } // Information about a person whose face matches a face(s) in an Amazon Rekognition // collection. Includes information about the faces in the Amazon Rekognition // collection (FaceMatch), information about the person (PersonDetail), and // the time stamp for when the person was detected in a video. An array of PersonMatch // objects is returned by GetFaceSearch. type PersonMatch struct { _ struct{} `type:"structure"` // Information about the faces in the input collection that match the face of // a person in the video. FaceMatches []FaceMatch `type:"list"` // Information about the matched person. Person *PersonDetail `type:"structure"` // The time, in milliseconds from the beginning of the video, that the person // was matched in the video. Timestamp *int64 `type:"long"` } // String returns the string representation func (s PersonMatch) String() string { return awsutil.Prettify(s) } // The X and Y coordinates of a point on an image. The X and Y values returned // are ratios of the overall image size. For example, if the input image is // 700x200 and the operation returns X=0.5 and Y=0.25, then the point is at // the (350,50) pixel coordinate on the image. // // An array of Point objects, Polygon, is returned by DetectText and by DetectCustomLabels. // Polygon represents a fine-grained polygon around a detected item. For more // information, see Geometry in the Amazon Rekognition Developer Guide. type Point struct { _ struct{} `type:"structure"` // The value of the X coordinate for a point on a Polygon. X *float64 `type:"float"` // The value of the Y coordinate for a point on a Polygon. Y *float64 `type:"float"` } // String returns the string representation func (s Point) String() string { return awsutil.Prettify(s) } // Indicates the pose of the face as determined by its pitch, roll, and yaw. type Pose struct { _ struct{} `type:"structure"` // Value representing the face rotation on the pitch axis. Pitch *float64 `type:"float"` // Value representing the face rotation on the roll axis. Roll *float64 `type:"float"` // Value representing the face rotation on the yaw axis. Yaw *float64 `type:"float"` } // String returns the string representation func (s Pose) String() string { return awsutil.Prettify(s) } // A description of a Amazon Rekognition Custom Labels project. type ProjectDescription struct { _ struct{} `type:"structure"` // The Unix timestamp for the date and time that the project was created. CreationTimestamp *time.Time `type:"timestamp"` // The Amazon Resource Name (ARN) of the project. ProjectArn *string `min:"20" type:"string"` // The current status of the project. Status ProjectStatus `type:"string" enum:"true"` } // String returns the string representation func (s ProjectDescription) String() string { return awsutil.Prettify(s) } // The description of a version of a model. type ProjectVersionDescription struct { _ struct{} `type:"structure"` // The duration, in seconds, that the model version has been billed for training. // This value is only returned if the model version has been successfully trained. BillableTrainingTimeInSeconds *int64 `type:"long"` // The Unix datetime for the date and time that training started. CreationTimestamp *time.Time `type:"timestamp"` // The training results. EvaluationResult is only returned if training is successful. EvaluationResult *EvaluationResult `type:"structure"` // The minimum number of inference units used by the model. For more information, // see StartProjectVersion. MinInferenceUnits *int64 `min:"1" type:"integer"` // The location where training results are saved. OutputConfig *OutputConfig `type:"structure"` // The Amazon Resource Name (ARN) of the model version. ProjectVersionArn *string `min:"20" type:"string"` // The current status of the model version. Status ProjectVersionStatus `type:"string" enum:"true"` // A descriptive message for an error or warning that occurred. StatusMessage *string `type:"string"` // The manifest file that represents the testing results. TestingDataResult *TestingDataResult `type:"structure"` // The manifest file that represents the training results. TrainingDataResult *TrainingDataResult `type:"structure"` // The Unix date and time that training of the model ended. TrainingEndTimestamp *time.Time `type:"timestamp"` } // String returns the string representation func (s ProjectVersionDescription) String() string { return awsutil.Prettify(s) } // Specifies a location within the frame that Rekognition checks for text. Uses // a BoundingBox object to set a region of the screen. // // A word is included in the region if the word is more than half in that region. // If there is more than one region, the word will be compared with all regions // of the screen. Any word more than half in a region is kept in the results. type RegionOfInterest struct { _ struct{} `type:"structure"` // The box representing a region of interest on screen. BoundingBox *BoundingBox `type:"structure"` } // String returns the string representation func (s RegionOfInterest) String() string { return awsutil.Prettify(s) } // Provides the S3 bucket name and object name. // // The region for the S3 bucket containing the S3 object must match the region // you use for Amazon Rekognition operations. // // For Amazon Rekognition to process an S3 object, the user must have permission // to access the S3 object. For more information, see Resource-Based Policies // in the Amazon Rekognition Developer Guide. type S3Object struct { _ struct{} `type:"structure"` // Name of the S3 bucket. Bucket *string `min:"3" type:"string"` // S3 object key name. Name *string `min:"1" type:"string"` // If the bucket is versioning enabled, you can specify the object version. Version *string `min:"1" type:"string"` } // String returns the string representation func (s S3Object) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *S3Object) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "S3Object"} if s.Bucket != nil && len(*s.Bucket) < 3 { invalidParams.Add(aws.NewErrParamMinLen("Bucket", 3)) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) } if s.Version != nil && len(*s.Version) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Version", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // A technical cue or shot detection segment detected in a video. An array of // SegmentDetection objects containing all segments detected in a stored video // is returned by GetSegmentDetection. type SegmentDetection struct { _ struct{} `type:"structure"` // The duration of the detected segment in milliseconds. DurationMillis *int64 `type:"long"` // The duration of the timecode for the detected segment in SMPTE format. DurationSMPTE *string `type:"string"` // The frame-accurate SMPTE timecode, from the start of a video, for the end // of a detected segment. EndTimecode is in HH:MM:SS:fr format (and ;fr for // drop frame-rates). EndTimecodeSMPTE *string `type:"string"` // The end time of the detected segment, in milliseconds, from the start of // the video. EndTimestampMillis *int64 `type:"long"` // If the segment is a shot detection, contains information about the shot detection. ShotSegment *ShotSegment `type:"structure"` // The frame-accurate SMPTE timecode, from the start of a video, for the start // of a detected segment. StartTimecode is in HH:MM:SS:fr format (and ;fr for // drop frame-rates). StartTimecodeSMPTE *string `type:"string"` // The start time of the detected segment in milliseconds from the start of // the video. StartTimestampMillis *int64 `type:"long"` // If the segment is a technical cue, contains information about the technical // cue. TechnicalCueSegment *TechnicalCueSegment `type:"structure"` // The type of the segment. Valid values are TECHNICAL_CUE and SHOT. Type SegmentType `type:"string" enum:"true"` } // String returns the string representation func (s SegmentDetection) String() string { return awsutil.Prettify(s) } // Information about the type of a segment requested in a call to StartSegmentDetection. // An array of SegmentTypeInfo objects is returned by the response from GetSegmentDetection. type SegmentTypeInfo struct { _ struct{} `type:"structure"` // The version of the model used to detect segments. ModelVersion *string `type:"string"` // The type of a segment (technical cue or shot detection). Type SegmentType `type:"string" enum:"true"` } // String returns the string representation func (s SegmentTypeInfo) String() string { return awsutil.Prettify(s) } // Information about a shot detection segment detected in a video. For more // information, see SegmentDetection. type ShotSegment struct { _ struct{} `type:"structure"` // The confidence that Amazon Rekognition Video has in the accuracy of the detected // segment. Confidence *float64 `min:"50" type:"float"` // An Identifier for a shot detection segment detected in a video Index *int64 `type:"long"` } // String returns the string representation func (s ShotSegment) String() string { return awsutil.Prettify(s) } // Indicates whether or not the face is smiling, and the confidence level in // the determination. type Smile struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the face is smiling or not. Value *bool `type:"boolean"` } // String returns the string representation func (s Smile) String() string { return awsutil.Prettify(s) } // Filters applied to the technical cue or shot detection segments. For more // information, see StartSegmentDetection. type StartSegmentDetectionFilters struct { _ struct{} `type:"structure"` // Filters that are specific to shot detections. ShotFilter *StartShotDetectionFilter `type:"structure"` // Filters that are specific to technical cues. TechnicalCueFilter *StartTechnicalCueDetectionFilter `type:"structure"` } // String returns the string representation func (s StartSegmentDetectionFilters) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *StartSegmentDetectionFilters) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "StartSegmentDetectionFilters"} if s.ShotFilter != nil { if err := s.ShotFilter.Validate(); err != nil { invalidParams.AddNested("ShotFilter", err.(aws.ErrInvalidParams)) } } if s.TechnicalCueFilter != nil { if err := s.TechnicalCueFilter.Validate(); err != nil { invalidParams.AddNested("TechnicalCueFilter", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // Filters for the shot detection segments returned by GetSegmentDetection. // For more information, see StartSegmentDetectionFilters. type StartShotDetectionFilter struct { _ struct{} `type:"structure"` // Specifies the minimum confidence that Amazon Rekognition Video must have // in order to return a detected segment. Confidence represents how certain // Amazon Rekognition is that a segment is correctly identified. 0 is the lowest // confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't // return any segments with a confidence level lower than this specified value. // // If you don't specify MinSegmentConfidence, the GetSegmentDetection returns // segments with confidence values greater than or equal to 50 percent. MinSegmentConfidence *float64 `min:"50" type:"float"` } // String returns the string representation func (s StartShotDetectionFilter) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *StartShotDetectionFilter) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "StartShotDetectionFilter"} if s.MinSegmentConfidence != nil && *s.MinSegmentConfidence < 50 { invalidParams.Add(aws.NewErrParamMinValue("MinSegmentConfidence", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // Filters for the technical segments returned by GetSegmentDetection. For more // information, see StartSegmentDetectionFilters. type StartTechnicalCueDetectionFilter struct { _ struct{} `type:"structure"` // Specifies the minimum confidence that Amazon Rekognition Video must have // in order to return a detected segment. Confidence represents how certain // Amazon Rekognition is that a segment is correctly identified. 0 is the lowest // confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't // return any segments with a confidence level lower than this specified value. // // If you don't specify MinSegmentConfidence, GetSegmentDetection returns segments // with confidence values greater than or equal to 50 percent. MinSegmentConfidence *float64 `min:"50" type:"float"` } // String returns the string representation func (s StartTechnicalCueDetectionFilter) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *StartTechnicalCueDetectionFilter) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "StartTechnicalCueDetectionFilter"} if s.MinSegmentConfidence != nil && *s.MinSegmentConfidence < 50 { invalidParams.Add(aws.NewErrParamMinValue("MinSegmentConfidence", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // Set of optional parameters that let you set the criteria text must meet to // be included in your response. WordFilter looks at a word's height, width // and minimum confidence. RegionOfInterest lets you set a specific region of // the screen to look for text in. type StartTextDetectionFilters struct { _ struct{} `type:"structure"` // Filter focusing on a certain area of the frame. Uses a BoundingBox object // to set the region of the screen. RegionsOfInterest []RegionOfInterest `type:"list"` // Filters focusing on qualities of the text, such as confidence or size. WordFilter *DetectionFilter `type:"structure"` } // String returns the string representation func (s StartTextDetectionFilters) String() string { return awsutil.Prettify(s) } // An object that recognizes faces in a streaming video. An Amazon Rekognition // stream processor is created by a call to CreateStreamProcessor. The request // parameters for CreateStreamProcessor describe the Kinesis video stream source // for the streaming video, face recognition parameters, and where to stream // the analysis resullts. type StreamProcessor struct { _ struct{} `type:"structure"` // Name of the Amazon Rekognition stream processor. Name *string `min:"1" type:"string"` // Current status of the Amazon Rekognition stream processor. Status StreamProcessorStatus `type:"string" enum:"true"` } // String returns the string representation func (s StreamProcessor) String() string { return awsutil.Prettify(s) } // Information about the source streaming video. type StreamProcessorInput struct { _ struct{} `type:"structure"` // The Kinesis video stream input stream for the source streaming video. KinesisVideoStream *KinesisVideoStream `type:"structure"` } // String returns the string representation func (s StreamProcessorInput) String() string { return awsutil.Prettify(s) } // Information about the Amazon Kinesis Data Streams stream to which a Amazon // Rekognition Video stream processor streams the results of a video analysis. // For more information, see CreateStreamProcessor in the Amazon Rekognition // Developer Guide. type StreamProcessorOutput struct { _ struct{} `type:"structure"` // The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream // processor streams the analysis results. KinesisDataStream *KinesisDataStream `type:"structure"` } // String returns the string representation func (s StreamProcessorOutput) String() string { return awsutil.Prettify(s) } // Input parameters used to recognize faces in a streaming video analyzed by // a Amazon Rekognition stream processor. type StreamProcessorSettings struct { _ struct{} `type:"structure"` // Face search settings to use on a streaming video. FaceSearch *FaceSearchSettings `type:"structure"` } // String returns the string representation func (s StreamProcessorSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *StreamProcessorSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "StreamProcessorSettings"} if s.FaceSearch != nil { if err := s.FaceSearch.Validate(); err != nil { invalidParams.AddNested("FaceSearch", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // The S3 bucket that contains the training summary. The training summary includes // aggregated evaluation metrics for the entire testing dataset and metrics // for each individual label. // // You get the training summary S3 bucket location by calling DescribeProjectVersions. type Summary struct { _ struct{} `type:"structure"` // Provides the S3 bucket name and object name. // // The region for the S3 bucket containing the S3 object must match the region // you use for Amazon Rekognition operations. // // For Amazon Rekognition to process an S3 object, the user must have permission // to access the S3 object. For more information, see Resource-Based Policies // in the Amazon Rekognition Developer Guide. S3Object *S3Object `type:"structure"` } // String returns the string representation func (s Summary) String() string { return awsutil.Prettify(s) } // Indicates whether or not the face is wearing sunglasses, and the confidence // level in the determination. type Sunglasses struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the face is wearing sunglasses or not. Value *bool `type:"boolean"` } // String returns the string representation func (s Sunglasses) String() string { return awsutil.Prettify(s) } // Information about a technical cue segment. For more information, see SegmentDetection. type TechnicalCueSegment struct { _ struct{} `type:"structure"` // The confidence that Amazon Rekognition Video has in the accuracy of the detected // segment. Confidence *float64 `min:"50" type:"float"` // The type of the technical cue. Type TechnicalCueType `type:"string" enum:"true"` } // String returns the string representation func (s TechnicalCueSegment) String() string { return awsutil.Prettify(s) } // The dataset used for testing. Optionally, if AutoCreate is set, Amazon Rekognition // Custom Labels creates a testing dataset using an 80/20 split of the training // dataset. type TestingData struct { _ struct{} `type:"structure"` // The assets used for testing. Assets []Asset `type:"list"` // If specified, Amazon Rekognition Custom Labels creates a testing dataset // with an 80/20 split of the training dataset. AutoCreate *bool `type:"boolean"` } // String returns the string representation func (s TestingData) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *TestingData) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "TestingData"} if s.Assets != nil { for i, v := range s.Assets { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Assets", i), err.(aws.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // A Sagemaker Groundtruth format manifest file representing the dataset used // for testing. type TestingDataResult struct { _ struct{} `type:"structure"` // The testing dataset that was supplied for training. Input *TestingData `type:"structure"` // The subset of the dataset that was actually tested. Some images (assets) // might not be tested due to file formatting and other issues. Output *TestingData `type:"structure"` } // String returns the string representation func (s TestingDataResult) String() string { return awsutil.Prettify(s) } // Information about a word or line of text detected by DetectText. // // The DetectedText field contains the text that Amazon Rekognition detected // in the image. // // Every word and line has an identifier (Id). Each word belongs to a line and // has a parent identifier (ParentId) that identifies the line of text in which // the word appears. The word Id is also an index for the word within a line // of words. // // For more information, see Detecting Text in the Amazon Rekognition Developer // Guide. type TextDetection struct { _ struct{} `type:"structure"` // The confidence that Amazon Rekognition has in the accuracy of the detected // text and the accuracy of the geometry points around the detected text. Confidence *float64 `type:"float"` // The word or line of text recognized by Amazon Rekognition. DetectedText *string `type:"string"` // The location of the detected text on the image. Includes an axis aligned // coarse bounding box surrounding the text and a finer grain polygon for more // accurate spatial information. Geometry *Geometry `type:"structure"` // The identifier for the detected text. The identifier is only unique for a // single call to DetectText. Id *int64 `type:"integer"` // The Parent identifier for the detected text identified by the value of ID. // If the type of detected text is LINE, the value of ParentId is Null. ParentId *int64 `type:"integer"` // The type of text that was detected. Type TextTypes `type:"string" enum:"true"` } // String returns the string representation func (s TextDetection) String() string { return awsutil.Prettify(s) } // Information about text detected in a video. Incudes the detected text, the // time in milliseconds from the start of the video that the text was detected, // and where it was detected on the screen. type TextDetectionResult struct { _ struct{} `type:"structure"` // Details about text detected in a video. TextDetection *TextDetection `type:"structure"` // The time, in milliseconds from the start of the video, that the text was // detected. Timestamp *int64 `type:"long"` } // String returns the string representation func (s TextDetectionResult) String() string { return awsutil.Prettify(s) } // The dataset used for training. type TrainingData struct { _ struct{} `type:"structure"` // A Sagemaker GroundTruth manifest file that contains the training images (assets). Assets []Asset `type:"list"` } // String returns the string representation func (s TrainingData) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *TrainingData) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "TrainingData"} if s.Assets != nil { for i, v := range s.Assets { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Assets", i), err.(aws.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // A Sagemaker Groundtruth format manifest file that represents the dataset // used for training. type TrainingDataResult struct { _ struct{} `type:"structure"` // The training assets that you supplied for training. Input *TrainingData `type:"structure"` // The images (assets) that were actually trained by Amazon Rekognition Custom // Labels. Output *TrainingData `type:"structure"` } // String returns the string representation func (s TrainingDataResult) String() string { return awsutil.Prettify(s) } // A face that IndexFaces detected, but didn't index. Use the Reasons response // attribute to determine why a face wasn't indexed. type UnindexedFace struct { _ struct{} `type:"structure"` // The structure that contains attributes of a face that IndexFacesdetected, // but didn't index. FaceDetail *FaceDetail `type:"structure"` // An array of reasons that specify why a face wasn't indexed. // // * EXTREME_POSE - The face is at a pose that can't be detected. For example, // the head is turned too far away from the camera. // // * EXCEEDS_MAX_FACES - The number of faces detected is already higher than // that specified by the MaxFaces input parameter for IndexFaces. // // * LOW_BRIGHTNESS - The image is too dark. // // * LOW_SHARPNESS - The image is too blurry. // // * LOW_CONFIDENCE - The face was detected with a low confidence. // // * SMALL_BOUNDING_BOX - The bounding box around the face is too small. Reasons []Reason `type:"list"` } // String returns the string representation func (s UnindexedFace) String() string { return awsutil.Prettify(s) } // Video file stored in an Amazon S3 bucket. Amazon Rekognition video start // operations such as StartLabelDetection use Video to specify a video for analysis. // The supported file formats are .mp4, .mov and .avi. type Video struct { _ struct{} `type:"structure"` // The Amazon S3 bucket name and file name for the video. S3Object *S3Object `type:"structure"` } // String returns the string representation func (s Video) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Video) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Video"} if s.S3Object != nil { if err := s.S3Object.Validate(); err != nil { invalidParams.AddNested("S3Object", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // Information about a video that Amazon Rekognition analyzed. Videometadata // is returned in every page of paginated responses from a Amazon Rekognition // video operation. type VideoMetadata struct { _ struct{} `type:"structure"` // Type of compression used in the analyzed video. Codec *string `type:"string"` // Length of the video in milliseconds. DurationMillis *int64 `type:"long"` // Format of the analyzed video. Possible values are MP4, MOV and AVI. Format *string `type:"string"` // Vertical pixel dimension of the video. FrameHeight *int64 `type:"long"` // Number of frames per second in the video. FrameRate *float64 `type:"float"` // Horizontal pixel dimension of the video. FrameWidth *int64 `type:"long"` } // String returns the string representation func (s VideoMetadata) String() string { return awsutil.Prettify(s) }