// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package rekognition import ( "context" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) type IndexFacesInput struct { _ struct{} `type:"structure"` // The ID of an existing collection to which you want to add the faces that // are detected in the input images. // // CollectionId is a required field CollectionId *string `min:"1" type:"string" required:"true"` // An array of facial attributes that you want to be returned. This can be the // default list of attributes or all attributes. If you don't specify a value // for Attributes or if you specify ["DEFAULT"], the API returns the following // subset of facial attributes: BoundingBox, Confidence, Pose, Quality, and // Landmarks. If you provide ["ALL"], all facial attributes are returned, but // the operation takes longer to complete. // // If you provide both, ["ALL", "DEFAULT"], the service uses a logical AND operator // to determine which attributes to return (in this case, all attributes). DetectionAttributes []Attribute `type:"list"` // The ID you want to assign to all the faces detected in the image. ExternalImageId *string `min:"1" type:"string"` // The input image as base64-encoded bytes or an S3 object. If you use the AWS // CLI to call Amazon Rekognition operations, passing base64-encoded image bytes // isn't supported. // // If you are using an AWS SDK to call Amazon Rekognition, you might not need // to base64-encode image bytes passed using the Bytes field. For more information, // see Images in the Amazon Rekognition developer guide. // // Image is a required field Image *Image `type:"structure" required:"true"` // The maximum number of faces to index. The value of MaxFaces must be greater // than or equal to 1. IndexFaces returns no more than 100 detected faces in // an image, even if you specify a larger value for MaxFaces. // // If IndexFaces detects more faces than the value of MaxFaces, the faces with // the lowest quality are filtered out first. If there are still more faces // than the value of MaxFaces, the faces with the smallest bounding boxes are // filtered out (up to the number that's needed to satisfy the value of MaxFaces). // Information about the unindexed faces is available in the UnindexedFaces // array. // // The faces that are returned by IndexFaces are sorted by the largest face // bounding box size to the smallest size, in descending order. // // MaxFaces can be used with a collection associated with any version of the // face model. MaxFaces *int64 `min:"1" type:"integer"` // A filter that specifies a quality bar for how much filtering is done to identify // faces. Filtered faces aren't indexed. If you specify AUTO, Amazon Rekognition // chooses the quality bar. If you specify LOW, MEDIUM, or HIGH, filtering removes // all faces that don’t meet the chosen quality bar. The default value is // AUTO. The quality bar is based on a variety of common use cases. Low-quality // detections can occur for a number of reasons. Some examples are an object // that's misidentified as a face, a face that's too blurry, or a face with // a pose that's too extreme to use. If you specify NONE, no filtering is performed. // // To use quality filtering, the collection you are using must be associated // with version 3 of the face model or higher. QualityFilter QualityFilter `type:"string" enum:"true"` } // String returns the string representation func (s IndexFacesInput) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *IndexFacesInput) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "IndexFacesInput"} if s.CollectionId == nil { invalidParams.Add(aws.NewErrParamRequired("CollectionId")) } if s.CollectionId != nil && len(*s.CollectionId) < 1 { invalidParams.Add(aws.NewErrParamMinLen("CollectionId", 1)) } if s.ExternalImageId != nil && len(*s.ExternalImageId) < 1 { invalidParams.Add(aws.NewErrParamMinLen("ExternalImageId", 1)) } if s.Image == nil { invalidParams.Add(aws.NewErrParamRequired("Image")) } if s.MaxFaces != nil && *s.MaxFaces < 1 { invalidParams.Add(aws.NewErrParamMinValue("MaxFaces", 1)) } if s.Image != nil { if err := s.Image.Validate(); err != nil { invalidParams.AddNested("Image", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } type IndexFacesOutput struct { _ struct{} `type:"structure"` // The version number of the face detection model that's associated with the // input collection (CollectionId). FaceModelVersion *string `type:"string"` // An array of faces detected and added to the collection. For more information, // see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. FaceRecords []FaceRecord `type:"list"` // If your collection is associated with a face detection model that's later // than version 3.0, the value of OrientationCorrection is always null and no // orientation information is returned. // // If your collection is associated with a face detection model that's version // 3.0 or earlier, the following applies: // // * If the input image is in .jpeg format, it might contain exchangeable // image file format (Exif) metadata that includes the image's orientation. // Amazon Rekognition uses this orientation information to perform image // correction - the bounding box coordinates are translated to represent // object locations after the orientation information in the Exif metadata // is used to correct the image orientation. Images in .png format don't // contain Exif metadata. The value of OrientationCorrection is null. // // * If the image doesn't contain orientation information in its Exif metadata, // Amazon Rekognition returns an estimated orientation (ROTATE_0, ROTATE_90, // ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image correction // for images. The bounding box coordinates aren't translated and represent // the object locations before the image is rotated. // // Bounding box information is returned in the FaceRecords array. You can get // the version of the face detection model by calling DescribeCollection. OrientationCorrection OrientationCorrection `type:"string" enum:"true"` // An array of faces that were detected in the image but weren't indexed. They // weren't indexed because the quality filter identified them as low quality, // or the MaxFaces request parameter filtered them out. To use the quality filter, // you specify the QualityFilter request parameter. UnindexedFaces []UnindexedFace `type:"list"` } // String returns the string representation func (s IndexFacesOutput) String() string { return awsutil.Prettify(s) } const opIndexFaces = "IndexFaces" // IndexFacesRequest returns a request value for making API operation for // Amazon Rekognition. // // Detects faces in the input image and adds them to the specified collection. // // Amazon Rekognition doesn't save the actual faces that are detected. Instead, // the underlying detection algorithm first detects the faces in the input image. // For each face, the algorithm extracts facial features into a feature vector, // and stores it in the backend database. Amazon Rekognition uses feature vectors // when it performs face match and search operations using the SearchFaces and // SearchFacesByImage operations. // // For more information, see Adding Faces to a Collection in the Amazon Rekognition // Developer Guide. // // To get the number of faces in a collection, call DescribeCollection. // // If you're using version 1.0 of the face detection model, IndexFaces indexes // the 15 largest faces in the input image. Later versions of the face detection // model index the 100 largest faces in the input image. // // If you're using version 4 or later of the face model, image orientation information // is not returned in the OrientationCorrection field. // // To determine which version of the model you're using, call DescribeCollection // and supply the collection ID. You can also get the model version from the // value of FaceModelVersion in the response from IndexFaces // // For more information, see Model Versioning in the Amazon Rekognition Developer // Guide. // // If you provide the optional ExternalImageId for the input image you provided, // Amazon Rekognition associates this ID with all faces that it detects. When // you call the ListFaces operation, the response returns the external ID. You // can use this external image ID to create a client-side index to associate // the faces with each image. You can then use the index to find all faces in // an image. // // You can specify the maximum number of faces to index with the MaxFaces input // parameter. This is useful when you want to index the largest faces in an // image and don't want to index smaller faces, such as those belonging to people // standing in the background. // // The QualityFilter input parameter allows you to filter out detected faces // that don’t meet a required quality bar. The quality bar is based on a variety // of common use cases. By default, IndexFaces chooses the quality bar that's // used to filter faces. You can also explicitly choose the quality bar. Use // QualityFilter, to set the quality bar by specifying LOW, MEDIUM, or HIGH. // If you do not want to filter detected faces, specify NONE. // // To use quality filtering, you need a collection associated with version 3 // of the face model or higher. To get the version of the face model associated // with a collection, call DescribeCollection. // // Information about faces detected in an image, but not indexed, is returned // in an array of UnindexedFace objects, UnindexedFaces. Faces aren't indexed // for reasons such as: // // * The number of faces detected exceeds the value of the MaxFaces request // parameter. // // * The face is too small compared to the image dimensions. // // * The face is too blurry. // // * The image is too dark. // // * The face has an extreme pose. // // * The face doesn’t have enough detail to be suitable for face search. // // In response, the IndexFaces operation returns an array of metadata for all // detected faces, FaceRecords. This includes: // // * The bounding box, BoundingBox, of the detected face. // // * A confidence value, Confidence, which indicates the confidence that // the bounding box contains a face. // // * A face ID, FaceId, assigned by the service for each face that's detected // and stored. // // * An image ID, ImageId, assigned by the service for the input image. // // If you request all facial attributes (by using the detectionAttributes parameter), // Amazon Rekognition returns detailed facial attributes, such as facial landmarks // (for example, location of eye and mouth) and other facial attributes. If // you provide the same image, specify the same collection, and use the same // external ID in the IndexFaces operation, Amazon Rekognition doesn't save // duplicate face metadata. // // The input image is passed either as base64-encoded image bytes, or as a reference // to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon // Rekognition operations, passing image bytes isn't supported. The image must // be formatted as a PNG or JPEG file. // // This operation requires permissions to perform the rekognition:IndexFaces // action. // // // Example sending a request using IndexFacesRequest. // req := client.IndexFacesRequest(params) // resp, err := req.Send(context.TODO()) // if err == nil { // fmt.Println(resp) // } func (c *Client) IndexFacesRequest(input *IndexFacesInput) IndexFacesRequest { op := &aws.Operation{ Name: opIndexFaces, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &IndexFacesInput{} } req := c.newRequest(op, input, &IndexFacesOutput{}) return IndexFacesRequest{Request: req, Input: input, Copy: c.IndexFacesRequest} } // IndexFacesRequest is the request type for the // IndexFaces API operation. type IndexFacesRequest struct { *aws.Request Input *IndexFacesInput Copy func(*IndexFacesInput) IndexFacesRequest } // Send marshals and sends the IndexFaces API request. func (r IndexFacesRequest) Send(ctx context.Context) (*IndexFacesResponse, error) { r.Request.SetContext(ctx) err := r.Request.Send() if err != nil { return nil, err } resp := &IndexFacesResponse{ IndexFacesOutput: r.Request.Data.(*IndexFacesOutput), response: &aws.Response{Request: r.Request}, } return resp, nil } // IndexFacesResponse is the response type for the // IndexFaces API operation. type IndexFacesResponse struct { *IndexFacesOutput response *aws.Response } // SDKResponseMetdata returns the response metadata for the // IndexFaces request. func (r *IndexFacesResponse) SDKResponseMetdata() *aws.Response { return r.response }