// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package machinelearning import ( "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) var _ aws.Config var _ = awsutil.Prettify // Represents the output of a GetBatchPrediction operation. // // The content consists of the detailed metadata, the status, and the data file // information of a Batch Prediction. type BatchPrediction struct { _ struct{} `type:"structure"` // The ID of the DataSource that points to the group of observations to predict. BatchPredictionDataSourceId *string `min:"1" type:"string"` // The ID assigned to the BatchPrediction at creation. This value should be // identical to the value of the BatchPredictionID in the request. BatchPredictionId *string `min:"1" type:"string"` // Long integer type that is a 64-bit signed number. ComputeTime *int64 `type:"long"` // The time that the BatchPrediction was created. The time is expressed in epoch // time. CreatedAt *time.Time `type:"timestamp"` // The AWS user account that invoked the BatchPrediction. The account type can // be either an AWS root account or an AWS Identity and Access Management (IAM) // user account. CreatedByIamUser *string `type:"string"` // A timestamp represented in epoch time. FinishedAt *time.Time `type:"timestamp"` // The location of the data file or directory in Amazon Simple Storage Service // (Amazon S3). InputDataLocationS3 *string `type:"string"` // Long integer type that is a 64-bit signed number. InvalidRecordCount *int64 `type:"long"` // The time of the most recent edit to the BatchPrediction. The time is expressed // in epoch time. LastUpdatedAt *time.Time `type:"timestamp"` // The ID of the MLModel that generated predictions for the BatchPrediction // request. MLModelId *string `min:"1" type:"string"` // A description of the most recent details about processing the batch prediction // request. Message *string `type:"string"` // A user-supplied name or description of the BatchPrediction. Name *string `type:"string"` // The location of an Amazon S3 bucket or directory to receive the operation // results. The following substrings are not allowed in the s3 key portion of // the outputURI field: ':', '//', '/./', '/../'. OutputUri *string `type:"string"` // A timestamp represented in epoch time. StartedAt *time.Time `type:"timestamp"` // The status of the BatchPrediction. This element can have one of the following // values: // // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to // generate predictions for a batch of observations. // // * INPROGRESS - The process is underway. // // * FAILED - The request to perform a batch prediction did not run to completion. // It is not usable. // // * COMPLETED - The batch prediction process completed successfully. // // * DELETED - The BatchPrediction is marked as deleted. It is not usable. Status EntityStatus `type:"string" enum:"true"` // Long integer type that is a 64-bit signed number. TotalRecordCount *int64 `type:"long"` } // String returns the string representation func (s BatchPrediction) String() string { return awsutil.Prettify(s) } // Represents the output of the GetDataSource operation. // // The content consists of the detailed metadata and data file information and // the current status of the DataSource. type DataSource struct { _ struct{} `type:"structure"` // The parameter is true if statistics need to be generated from the observation // data. ComputeStatistics *bool `type:"boolean"` // Long integer type that is a 64-bit signed number. ComputeTime *int64 `type:"long"` // The time that the DataSource was created. The time is expressed in epoch // time. CreatedAt *time.Time `type:"timestamp"` // The AWS user account from which the DataSource was created. The account type // can be either an AWS root account or an AWS Identity and Access Management // (IAM) user account. CreatedByIamUser *string `type:"string"` // The location and name of the data in Amazon Simple Storage Service (Amazon // S3) that is used by a DataSource. DataLocationS3 *string `type:"string"` // A JSON string that represents the splitting and rearrangement requirement // used when this DataSource was created. DataRearrangement *string `type:"string"` // The total number of observations contained in the data files that the DataSource // references. DataSizeInBytes *int64 `type:"long"` // The ID that is assigned to the DataSource during creation. DataSourceId *string `min:"1" type:"string"` // A timestamp represented in epoch time. FinishedAt *time.Time `type:"timestamp"` // The time of the most recent edit to the BatchPrediction. The time is expressed // in epoch time. LastUpdatedAt *time.Time `type:"timestamp"` // A description of the most recent details about creating the DataSource. Message *string `type:"string"` // A user-supplied name or description of the DataSource. Name *string `type:"string"` // The number of data files referenced by the DataSource. NumberOfFiles *int64 `type:"long"` // The datasource details that are specific to Amazon RDS. RDSMetadata *RDSMetadata `type:"structure"` // Describes the DataSource details specific to Amazon Redshift. RedshiftMetadata *RedshiftMetadata `type:"structure"` // The Amazon Resource Name (ARN) of an AWS IAM Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts), // such as the following: arn:aws:iam::account:role/rolename. RoleARN *string `min:"1" type:"string"` // A timestamp represented in epoch time. StartedAt *time.Time `type:"timestamp"` // The current status of the DataSource. This element can have one of the following // values: // // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to // create a DataSource. // // * INPROGRESS - The creation process is underway. // // * FAILED - The request to create a DataSource did not run to completion. // It is not usable. // // * COMPLETED - The creation process completed successfully. // // * DELETED - The DataSource is marked as deleted. It is not usable. Status EntityStatus `type:"string" enum:"true"` } // String returns the string representation func (s DataSource) String() string { return awsutil.Prettify(s) } // Represents the output of GetEvaluation operation. // // The content consists of the detailed metadata and data file information and // the current status of the Evaluation. type Evaluation struct { _ struct{} `type:"structure"` // Long integer type that is a 64-bit signed number. ComputeTime *int64 `type:"long"` // The time that the Evaluation was created. The time is expressed in epoch // time. CreatedAt *time.Time `type:"timestamp"` // The AWS user account that invoked the evaluation. The account type can be // either an AWS root account or an AWS Identity and Access Management (IAM) // user account. CreatedByIamUser *string `type:"string"` // The ID of the DataSource that is used to evaluate the MLModel. EvaluationDataSourceId *string `min:"1" type:"string"` // The ID that is assigned to the Evaluation at creation. EvaluationId *string `min:"1" type:"string"` // A timestamp represented in epoch time. FinishedAt *time.Time `type:"timestamp"` // The location and name of the data in Amazon Simple Storage Server (Amazon // S3) that is used in the evaluation. InputDataLocationS3 *string `type:"string"` // The time of the most recent edit to the Evaluation. The time is expressed // in epoch time. LastUpdatedAt *time.Time `type:"timestamp"` // The ID of the MLModel that is the focus of the evaluation. MLModelId *string `min:"1" type:"string"` // A description of the most recent details about evaluating the MLModel. Message *string `type:"string"` // A user-supplied name or description of the Evaluation. Name *string `type:"string"` // Measurements of how well the MLModel performed, using observations referenced // by the DataSource. One of the following metrics is returned, based on the // type of the MLModel: // // * BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) technique // to measure performance. // // * RegressionRMSE: A regression MLModel uses the Root Mean Square Error // (RMSE) technique to measure performance. RMSE measures the difference // between predicted and actual values for a single variable. // // * MulticlassAvgFScore: A multiclass MLModel uses the F1 score technique // to measure performance. // // For more information about performance metrics, please see the Amazon Machine // Learning Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). PerformanceMetrics *PerformanceMetrics `type:"structure"` // A timestamp represented in epoch time. StartedAt *time.Time `type:"timestamp"` // The status of the evaluation. This element can have one of the following // values: // // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to // evaluate an MLModel. // // * INPROGRESS - The evaluation is underway. // // * FAILED - The request to evaluate an MLModel did not run to completion. // It is not usable. // // * COMPLETED - The evaluation process completed successfully. // // * DELETED - The Evaluation is marked as deleted. It is not usable. Status EntityStatus `type:"string" enum:"true"` } // String returns the string representation func (s Evaluation) String() string { return awsutil.Prettify(s) } // Represents the output of a GetMLModel operation. // // The content consists of the detailed metadata and the current status of the // MLModel. type MLModel struct { _ struct{} `type:"structure"` // The algorithm used to train the MLModel. The following algorithm is supported: // // * SGD -- Stochastic gradient descent. The goal of SGD is to minimize the // gradient of the loss function. Algorithm Algorithm `type:"string" enum:"true"` // Long integer type that is a 64-bit signed number. ComputeTime *int64 `type:"long"` // The time that the MLModel was created. The time is expressed in epoch time. CreatedAt *time.Time `type:"timestamp"` // The AWS user account from which the MLModel was created. The account type // can be either an AWS root account or an AWS Identity and Access Management // (IAM) user account. CreatedByIamUser *string `type:"string"` // The current endpoint of the MLModel. EndpointInfo *RealtimeEndpointInfo `type:"structure"` // A timestamp represented in epoch time. FinishedAt *time.Time `type:"timestamp"` // The location of the data file or directory in Amazon Simple Storage Service // (Amazon S3). InputDataLocationS3 *string `type:"string"` // The time of the most recent edit to the MLModel. The time is expressed in // epoch time. LastUpdatedAt *time.Time `type:"timestamp"` // The ID assigned to the MLModel at creation. MLModelId *string `min:"1" type:"string"` // Identifies the MLModel category. The following are the available types: // // * REGRESSION - Produces a numeric result. For example, "What price should // a house be listed at?" // // * BINARY - Produces one of two possible results. For example, "Is this // a child-friendly web site?". // // * MULTICLASS - Produces one of several possible results. For example, // "Is this a HIGH-, LOW-, or MEDIUM-risk trade?". MLModelType MLModelType `type:"string" enum:"true"` // A description of the most recent details about accessing the MLModel. Message *string `type:"string"` // A user-supplied name or description of the MLModel. Name *string `type:"string"` ScoreThreshold *float64 `type:"float"` // The time of the most recent edit to the ScoreThreshold. The time is expressed // in epoch time. ScoreThresholdLastUpdatedAt *time.Time `type:"timestamp"` // Long integer type that is a 64-bit signed number. SizeInBytes *int64 `type:"long"` // A timestamp represented in epoch time. StartedAt *time.Time `type:"timestamp"` // The current status of an MLModel. This element can have one of the following // values: // // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to // create an MLModel. // // * INPROGRESS - The creation process is underway. // // * FAILED - The request to create an MLModel didn't run to completion. // The model isn't usable. // // * COMPLETED - The creation process completed successfully. // // * DELETED - The MLModel is marked as deleted. It isn't usable. Status EntityStatus `type:"string" enum:"true"` // The ID of the training DataSource. The CreateMLModel operation uses the TrainingDataSourceId. TrainingDataSourceId *string `min:"1" type:"string"` // A list of the training parameters in the MLModel. The list is implemented // as a map of key-value pairs. // // The following is the current set of training parameters: // // * sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending // on the input data, the size of the model might affect its performance. // The value is an integer that ranges from 100000 to 2147483648. The default // value is 33554432. // // * sgd.maxPasses - The number of times that the training process traverses // the observations to build the MLModel. The value is an integer that ranges // from 1 to 10000. The default value is 10. // // * sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling // the data improves a model's ability to find the optimal solution for a // variety of data types. The valid values are auto and none. The default // value is none. // // * sgd.l1RegularizationAmount - The coefficient regularization L1 norm, // which controls overfitting the data by penalizing large coefficients. // This parameter tends to drive coefficients to zero, resulting in sparse // feature set. If you use this parameter, start by specifying a small value, // such as 1.0E-08. The value is a double that ranges from 0 to MAX_DOUBLE. // The default is to not use L1 normalization. This parameter can't be used // when L2 is specified. Use this parameter sparingly. // // * sgd.l2RegularizationAmount - The coefficient regularization L2 norm, // which controls overfitting the data by penalizing large coefficients. // This tends to drive coefficients to small, nonzero values. If you use // this parameter, start by specifying a small value, such as 1.0E-08. The // value is a double that ranges from 0 to MAX_DOUBLE. The default is to // not use L2 normalization. This parameter can't be used when L1 is specified. // Use this parameter sparingly. TrainingParameters map[string]string `type:"map"` } // String returns the string representation func (s MLModel) String() string { return awsutil.Prettify(s) } // Measurements of how well the MLModel performed on known observations. One // of the following metrics is returned, based on the type of the MLModel: // // * BinaryAUC: The binary MLModel uses the Area Under the Curve (AUC) technique // to measure performance. // // * RegressionRMSE: The regression MLModel uses the Root Mean Square Error // (RMSE) technique to measure performance. RMSE measures the difference // between predicted and actual values for a single variable. // // * MulticlassAvgFScore: The multiclass MLModel uses the F1 score technique // to measure performance. // // For more information about performance metrics, please see the Amazon Machine // Learning Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). type PerformanceMetrics struct { _ struct{} `type:"structure"` Properties map[string]string `type:"map"` } // String returns the string representation func (s PerformanceMetrics) String() string { return awsutil.Prettify(s) } // The output from a Predict operation: // // * Details - Contains the following attributes: DetailsAttributes.PREDICTIVE_MODEL_TYPE // - REGRESSION | BINARY | MULTICLASS DetailsAttributes.ALGORITHM - SGD // // * PredictedLabel - Present for either a BINARY or MULTICLASS MLModel request. // // * PredictedScores - Contains the raw classification score corresponding // to each label. // // * PredictedValue - Present for a REGRESSION MLModel request. type Prediction struct { _ struct{} `type:"structure"` // Provides any additional details regarding the prediction. Details map[string]string `locationName:"details" type:"map"` // The prediction label for either a BINARY or MULTICLASS MLModel. PredictedLabel *string `locationName:"predictedLabel" min:"1" type:"string"` // Provides the raw classification score corresponding to each label. PredictedScores map[string]float64 `locationName:"predictedScores" type:"map"` // The prediction value for // REGRESSION // // MLModel // . PredictedValue *float64 `locationName:"predictedValue" type:"float"` } // String returns the string representation func (s Prediction) String() string { return awsutil.Prettify(s) } // The data specification of an Amazon Relational Database Service (Amazon RDS) // DataSource. type RDSDataSpec struct { _ struct{} `type:"structure"` // A JSON string that represents the splitting and rearrangement processing // to be applied to a DataSource. If the DataRearrangement parameter is not // provided, all of the input data is used to create the Datasource. // // There are multiple parameters that control what data is used to create a // datasource: // // * percentBegin Use percentBegin to indicate the beginning of the range // of the data used to create the Datasource. If you do not include percentBegin // and percentEnd, Amazon ML includes all of the data when creating the datasource. // // * percentEnd Use percentEnd to indicate the end of the range of the data // used to create the Datasource. If you do not include percentBegin and // percentEnd, Amazon ML includes all of the data when creating the datasource. // // * complement The complement parameter instructs Amazon ML to use the data // that is not included in the range of percentBegin to percentEnd to create // a datasource. The complement parameter is useful if you need to create // complementary datasources for training and evaluation. To create a complementary // datasource, use the same values for percentBegin and percentEnd, along // with the complement parameter. For example, the following two datasources // do not share any data, and can be used to train and evaluate a model. // The first datasource has 25 percent of the data, and the second one has // 75 percent of the data. Datasource for evaluation: {"splitting":{"percentBegin":0, // "percentEnd":25}} Datasource for training: {"splitting":{"percentBegin":0, // "percentEnd":25, "complement":"true"}} // // * strategy To change how Amazon ML splits the data for a datasource, use // the strategy parameter. The default value for the strategy parameter is // sequential, meaning that Amazon ML takes all of the data records between // the percentBegin and percentEnd parameters for the datasource, in the // order that the records appear in the input data. The following two DataRearrangement // lines are examples of sequentially ordered training and evaluation datasources: // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"sequential"}} Datasource for training: {"splitting":{"percentBegin":70, // "percentEnd":100, "strategy":"sequential", "complement":"true"}} To randomly // split the input data into the proportions indicated by the percentBegin // and percentEnd parameters, set the strategy parameter to random and provide // a string that is used as the seed value for the random data splitting // (for example, you can use the S3 path to your data as the random seed // string). If you choose the random split strategy, Amazon ML assigns each // row of data a pseudo-random number between 0 and 100, and then selects // the rows that have an assigned number between percentBegin and percentEnd. // Pseudo-random numbers are assigned using both the input seed string value // and the byte offset as a seed, so changing the data results in a different // split. Any existing ordering is preserved. The random splitting strategy // ensures that variables in the training and evaluation data are distributed // similarly. It is useful in the cases where the input data may have an // implicit sort order, which would otherwise result in training and evaluation // datasources containing non-similar data records. The following two DataRearrangement // lines are examples of non-sequentially ordered training and evaluation // datasources: Datasource for evaluation: {"splitting":{"percentBegin":70, // "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}} // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}} DataRearrangement *string `type:"string"` // A JSON string that represents the schema for an Amazon RDS DataSource. The // DataSchema defines the structure of the observation data in the data file(s) // referenced in the DataSource. // // A DataSchema is not required if you specify a DataSchemaUri // // Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames // have an array of key-value pairs for their value. Use the following format // to define your DataSchema. // // { "version": "1.0", // // "recordAnnotationFieldName": "F1", // // "recordWeightFieldName": "F2", // // "targetFieldName": "F3", // // "dataFormat": "CSV", // // "dataFileContainsHeader": true, // // "attributes": [ // // { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": // "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": // "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" // }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": // "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" // } ], // // "excludedVariableNames": [ "F6" ] } DataSchema *string `type:"string"` // The Amazon S3 location of the DataSchema. DataSchemaUri *string `type:"string"` // The AWS Identity and Access Management (IAM) credentials that are used connect // to the Amazon RDS database. // // DatabaseCredentials is a required field DatabaseCredentials *RDSDatabaseCredentials `type:"structure" required:"true"` // Describes the DatabaseName and InstanceIdentifier of an Amazon RDS database. // // DatabaseInformation is a required field DatabaseInformation *RDSDatabase `type:"structure" required:"true"` // The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute // Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS // to an Amazon S3 task. For more information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) // for data pipelines. // // ResourceRole is a required field ResourceRole *string `min:"1" type:"string" required:"true"` // The Amazon S3 location for staging Amazon RDS data. The data retrieved from // Amazon RDS using SelectSqlQuery is stored in this location. // // S3StagingLocation is a required field S3StagingLocation *string `type:"string" required:"true"` // The security group IDs to be used to access a VPC-based RDS DB instance. // Ensure that there are appropriate ingress rules set up to allow access to // the RDS DB instance. This attribute is used by Data Pipeline to carry out // the copy operation from Amazon RDS to an Amazon S3 task. // // SecurityGroupIds is a required field SecurityGroupIds []string `type:"list" required:"true"` // The query that is used to retrieve the observation data for the DataSource. // // SelectSqlQuery is a required field SelectSqlQuery *string `min:"1" type:"string" required:"true"` // The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to // monitor the progress of the copy task from Amazon RDS to Amazon S3. For more // information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) // for data pipelines. // // ServiceRole is a required field ServiceRole *string `min:"1" type:"string" required:"true"` // The subnet ID to be used to access a VPC-based RDS DB instance. This attribute // is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon // S3. // // SubnetId is a required field SubnetId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s RDSDataSpec) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *RDSDataSpec) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "RDSDataSpec"} if s.DatabaseCredentials == nil { invalidParams.Add(aws.NewErrParamRequired("DatabaseCredentials")) } if s.DatabaseInformation == nil { invalidParams.Add(aws.NewErrParamRequired("DatabaseInformation")) } if s.ResourceRole == nil { invalidParams.Add(aws.NewErrParamRequired("ResourceRole")) } if s.ResourceRole != nil && len(*s.ResourceRole) < 1 { invalidParams.Add(aws.NewErrParamMinLen("ResourceRole", 1)) } if s.S3StagingLocation == nil { invalidParams.Add(aws.NewErrParamRequired("S3StagingLocation")) } if s.SecurityGroupIds == nil { invalidParams.Add(aws.NewErrParamRequired("SecurityGroupIds")) } if s.SelectSqlQuery == nil { invalidParams.Add(aws.NewErrParamRequired("SelectSqlQuery")) } if s.SelectSqlQuery != nil && len(*s.SelectSqlQuery) < 1 { invalidParams.Add(aws.NewErrParamMinLen("SelectSqlQuery", 1)) } if s.ServiceRole == nil { invalidParams.Add(aws.NewErrParamRequired("ServiceRole")) } if s.ServiceRole != nil && len(*s.ServiceRole) < 1 { invalidParams.Add(aws.NewErrParamMinLen("ServiceRole", 1)) } if s.SubnetId == nil { invalidParams.Add(aws.NewErrParamRequired("SubnetId")) } if s.SubnetId != nil && len(*s.SubnetId) < 1 { invalidParams.Add(aws.NewErrParamMinLen("SubnetId", 1)) } if s.DatabaseCredentials != nil { if err := s.DatabaseCredentials.Validate(); err != nil { invalidParams.AddNested("DatabaseCredentials", err.(aws.ErrInvalidParams)) } } if s.DatabaseInformation != nil { if err := s.DatabaseInformation.Validate(); err != nil { invalidParams.AddNested("DatabaseInformation", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // The database details of an Amazon RDS database. type RDSDatabase struct { _ struct{} `type:"structure"` // The name of a database hosted on an RDS DB instance. // // DatabaseName is a required field DatabaseName *string `min:"1" type:"string" required:"true"` // The ID of an RDS DB instance. // // InstanceIdentifier is a required field InstanceIdentifier *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s RDSDatabase) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *RDSDatabase) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "RDSDatabase"} if s.DatabaseName == nil { invalidParams.Add(aws.NewErrParamRequired("DatabaseName")) } if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { invalidParams.Add(aws.NewErrParamMinLen("DatabaseName", 1)) } if s.InstanceIdentifier == nil { invalidParams.Add(aws.NewErrParamRequired("InstanceIdentifier")) } if s.InstanceIdentifier != nil && len(*s.InstanceIdentifier) < 1 { invalidParams.Add(aws.NewErrParamMinLen("InstanceIdentifier", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // The database credentials to connect to a database on an RDS DB instance. type RDSDatabaseCredentials struct { _ struct{} `type:"structure"` // The password to be used by Amazon ML to connect to a database on an RDS DB // instance. The password should have sufficient permissions to execute the // RDSSelectQuery query. // // Password is a required field Password *string `min:"8" type:"string" required:"true"` // The username to be used by Amazon ML to connect to database on an Amazon // RDS instance. The username should have sufficient permissions to execute // an RDSSelectSqlQuery query. // // Username is a required field Username *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s RDSDatabaseCredentials) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *RDSDatabaseCredentials) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "RDSDatabaseCredentials"} if s.Password == nil { invalidParams.Add(aws.NewErrParamRequired("Password")) } if s.Password != nil && len(*s.Password) < 8 { invalidParams.Add(aws.NewErrParamMinLen("Password", 8)) } if s.Username == nil { invalidParams.Add(aws.NewErrParamRequired("Username")) } if s.Username != nil && len(*s.Username) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Username", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // The datasource details that are specific to Amazon RDS. type RDSMetadata struct { _ struct{} `type:"structure"` // The ID of the Data Pipeline instance that is used to carry to copy data from // Amazon RDS to Amazon S3. You can use the ID to find details about the instance // in the Data Pipeline console. DataPipelineId *string `min:"1" type:"string"` // The database details required to connect to an Amazon RDS. Database *RDSDatabase `type:"structure"` // The username to be used by Amazon ML to connect to database on an Amazon // RDS instance. The username should have sufficient permissions to execute // an RDSSelectSqlQuery query. DatabaseUserName *string `min:"1" type:"string"` // The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance // to carry out the copy task from Amazon RDS to Amazon S3. For more information, // see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) // for data pipelines. ResourceRole *string `min:"1" type:"string"` // The SQL query that is supplied during CreateDataSourceFromRDS. Returns only // if Verbose is true in GetDataSourceInput. SelectSqlQuery *string `min:"1" type:"string"` // The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to // monitor the progress of the copy task from Amazon RDS to Amazon S3. For more // information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) // for data pipelines. ServiceRole *string `min:"1" type:"string"` } // String returns the string representation func (s RDSMetadata) String() string { return awsutil.Prettify(s) } // Describes the real-time endpoint information for an MLModel. type RealtimeEndpointInfo struct { _ struct{} `type:"structure"` // The time that the request to create the real-time endpoint for the MLModel // was received. The time is expressed in epoch time. CreatedAt *time.Time `type:"timestamp"` // The current status of the real-time endpoint for the MLModel. This element // can have one of the following values: // // * NONE - Endpoint does not exist or was previously deleted. // // * READY - Endpoint is ready to be used for real-time predictions. // // * UPDATING - Updating/creating the endpoint. EndpointStatus RealtimeEndpointStatus `type:"string" enum:"true"` // The URI that specifies where to send real-time prediction requests for the // MLModel. // Note // The application must wait until the real-time endpoint is ready before using // this URI. EndpointUrl *string `type:"string"` // The maximum processing rate for the real-time endpoint for MLModel, measured // in incoming requests per second. PeakRequestsPerSecond *int64 `type:"integer"` } // String returns the string representation func (s RealtimeEndpointInfo) String() string { return awsutil.Prettify(s) } // Describes the data specification of an Amazon Redshift DataSource. type RedshiftDataSpec struct { _ struct{} `type:"structure"` // A JSON string that represents the splitting and rearrangement processing // to be applied to a DataSource. If the DataRearrangement parameter is not // provided, all of the input data is used to create the Datasource. // // There are multiple parameters that control what data is used to create a // datasource: // // * percentBegin Use percentBegin to indicate the beginning of the range // of the data used to create the Datasource. If you do not include percentBegin // and percentEnd, Amazon ML includes all of the data when creating the datasource. // // * percentEnd Use percentEnd to indicate the end of the range of the data // used to create the Datasource. If you do not include percentBegin and // percentEnd, Amazon ML includes all of the data when creating the datasource. // // * complement The complement parameter instructs Amazon ML to use the data // that is not included in the range of percentBegin to percentEnd to create // a datasource. The complement parameter is useful if you need to create // complementary datasources for training and evaluation. To create a complementary // datasource, use the same values for percentBegin and percentEnd, along // with the complement parameter. For example, the following two datasources // do not share any data, and can be used to train and evaluate a model. // The first datasource has 25 percent of the data, and the second one has // 75 percent of the data. Datasource for evaluation: {"splitting":{"percentBegin":0, // "percentEnd":25}} Datasource for training: {"splitting":{"percentBegin":0, // "percentEnd":25, "complement":"true"}} // // * strategy To change how Amazon ML splits the data for a datasource, use // the strategy parameter. The default value for the strategy parameter is // sequential, meaning that Amazon ML takes all of the data records between // the percentBegin and percentEnd parameters for the datasource, in the // order that the records appear in the input data. The following two DataRearrangement // lines are examples of sequentially ordered training and evaluation datasources: // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"sequential"}} Datasource for training: {"splitting":{"percentBegin":70, // "percentEnd":100, "strategy":"sequential", "complement":"true"}} To randomly // split the input data into the proportions indicated by the percentBegin // and percentEnd parameters, set the strategy parameter to random and provide // a string that is used as the seed value for the random data splitting // (for example, you can use the S3 path to your data as the random seed // string). If you choose the random split strategy, Amazon ML assigns each // row of data a pseudo-random number between 0 and 100, and then selects // the rows that have an assigned number between percentBegin and percentEnd. // Pseudo-random numbers are assigned using both the input seed string value // and the byte offset as a seed, so changing the data results in a different // split. Any existing ordering is preserved. The random splitting strategy // ensures that variables in the training and evaluation data are distributed // similarly. It is useful in the cases where the input data may have an // implicit sort order, which would otherwise result in training and evaluation // datasources containing non-similar data records. The following two DataRearrangement // lines are examples of non-sequentially ordered training and evaluation // datasources: Datasource for evaluation: {"splitting":{"percentBegin":70, // "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}} // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}} DataRearrangement *string `type:"string"` // A JSON string that represents the schema for an Amazon Redshift DataSource. // The DataSchema defines the structure of the observation data in the data // file(s) referenced in the DataSource. // // A DataSchema is not required if you specify a DataSchemaUri. // // Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames // have an array of key-value pairs for their value. Use the following format // to define your DataSchema. // // { "version": "1.0", // // "recordAnnotationFieldName": "F1", // // "recordWeightFieldName": "F2", // // "targetFieldName": "F3", // // "dataFormat": "CSV", // // "dataFileContainsHeader": true, // // "attributes": [ // // { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": // "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": // "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" // }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": // "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" // } ], // // "excludedVariableNames": [ "F6" ] } DataSchema *string `type:"string"` // Describes the schema location for an Amazon Redshift DataSource. DataSchemaUri *string `type:"string"` // Describes AWS Identity and Access Management (IAM) credentials that are used // connect to the Amazon Redshift database. // // DatabaseCredentials is a required field DatabaseCredentials *RedshiftDatabaseCredentials `type:"structure" required:"true"` // Describes the DatabaseName and ClusterIdentifier for an Amazon Redshift DataSource. // // DatabaseInformation is a required field DatabaseInformation *RedshiftDatabase `type:"structure" required:"true"` // Describes an Amazon S3 location to store the result set of the SelectSqlQuery // query. // // S3StagingLocation is a required field S3StagingLocation *string `type:"string" required:"true"` // Describes the SQL Query to execute on an Amazon Redshift database for an // Amazon Redshift DataSource. // // SelectSqlQuery is a required field SelectSqlQuery *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s RedshiftDataSpec) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *RedshiftDataSpec) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "RedshiftDataSpec"} if s.DatabaseCredentials == nil { invalidParams.Add(aws.NewErrParamRequired("DatabaseCredentials")) } if s.DatabaseInformation == nil { invalidParams.Add(aws.NewErrParamRequired("DatabaseInformation")) } if s.S3StagingLocation == nil { invalidParams.Add(aws.NewErrParamRequired("S3StagingLocation")) } if s.SelectSqlQuery == nil { invalidParams.Add(aws.NewErrParamRequired("SelectSqlQuery")) } if s.SelectSqlQuery != nil && len(*s.SelectSqlQuery) < 1 { invalidParams.Add(aws.NewErrParamMinLen("SelectSqlQuery", 1)) } if s.DatabaseCredentials != nil { if err := s.DatabaseCredentials.Validate(); err != nil { invalidParams.AddNested("DatabaseCredentials", err.(aws.ErrInvalidParams)) } } if s.DatabaseInformation != nil { if err := s.DatabaseInformation.Validate(); err != nil { invalidParams.AddNested("DatabaseInformation", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // Describes the database details required to connect to an Amazon Redshift // database. type RedshiftDatabase struct { _ struct{} `type:"structure"` // The ID of an Amazon Redshift cluster. // // ClusterIdentifier is a required field ClusterIdentifier *string `min:"1" type:"string" required:"true"` // The name of a database hosted on an Amazon Redshift cluster. // // DatabaseName is a required field DatabaseName *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s RedshiftDatabase) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *RedshiftDatabase) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "RedshiftDatabase"} if s.ClusterIdentifier == nil { invalidParams.Add(aws.NewErrParamRequired("ClusterIdentifier")) } if s.ClusterIdentifier != nil && len(*s.ClusterIdentifier) < 1 { invalidParams.Add(aws.NewErrParamMinLen("ClusterIdentifier", 1)) } if s.DatabaseName == nil { invalidParams.Add(aws.NewErrParamRequired("DatabaseName")) } if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { invalidParams.Add(aws.NewErrParamMinLen("DatabaseName", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // Describes the database credentials for connecting to a database on an Amazon // Redshift cluster. type RedshiftDatabaseCredentials struct { _ struct{} `type:"structure"` // A password to be used by Amazon ML to connect to a database on an Amazon // Redshift cluster. The password should have sufficient permissions to execute // a RedshiftSelectSqlQuery query. The password should be valid for an Amazon // Redshift USER (http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html). // // Password is a required field Password *string `min:"8" type:"string" required:"true"` // A username to be used by Amazon Machine Learning (Amazon ML)to connect to // a database on an Amazon Redshift cluster. The username should have sufficient // permissions to execute the RedshiftSelectSqlQuery query. The username should // be valid for an Amazon Redshift USER (http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html). // // Username is a required field Username *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s RedshiftDatabaseCredentials) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *RedshiftDatabaseCredentials) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "RedshiftDatabaseCredentials"} if s.Password == nil { invalidParams.Add(aws.NewErrParamRequired("Password")) } if s.Password != nil && len(*s.Password) < 8 { invalidParams.Add(aws.NewErrParamMinLen("Password", 8)) } if s.Username == nil { invalidParams.Add(aws.NewErrParamRequired("Username")) } if s.Username != nil && len(*s.Username) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Username", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // Describes the DataSource details specific to Amazon Redshift. type RedshiftMetadata struct { _ struct{} `type:"structure"` // A username to be used by Amazon Machine Learning (Amazon ML)to connect to // a database on an Amazon Redshift cluster. The username should have sufficient // permissions to execute the RedshiftSelectSqlQuery query. The username should // be valid for an Amazon Redshift USER (http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html). DatabaseUserName *string `min:"1" type:"string"` // Describes the database details required to connect to an Amazon Redshift // database. RedshiftDatabase *RedshiftDatabase `type:"structure"` // The SQL query that is specified during CreateDataSourceFromRedshift. Returns // only if Verbose is true in GetDataSourceInput. SelectSqlQuery *string `min:"1" type:"string"` } // String returns the string representation func (s RedshiftMetadata) String() string { return awsutil.Prettify(s) } // Describes the data specification of a DataSource. type S3DataSpec struct { _ struct{} `type:"structure"` // The location of the data file(s) used by a DataSource. The URI specifies // a data file or an Amazon Simple Storage Service (Amazon S3) directory or // bucket containing data files. // // DataLocationS3 is a required field DataLocationS3 *string `type:"string" required:"true"` // A JSON string that represents the splitting and rearrangement processing // to be applied to a DataSource. If the DataRearrangement parameter is not // provided, all of the input data is used to create the Datasource. // // There are multiple parameters that control what data is used to create a // datasource: // // * percentBegin Use percentBegin to indicate the beginning of the range // of the data used to create the Datasource. If you do not include percentBegin // and percentEnd, Amazon ML includes all of the data when creating the datasource. // // * percentEnd Use percentEnd to indicate the end of the range of the data // used to create the Datasource. If you do not include percentBegin and // percentEnd, Amazon ML includes all of the data when creating the datasource. // // * complement The complement parameter instructs Amazon ML to use the data // that is not included in the range of percentBegin to percentEnd to create // a datasource. The complement parameter is useful if you need to create // complementary datasources for training and evaluation. To create a complementary // datasource, use the same values for percentBegin and percentEnd, along // with the complement parameter. For example, the following two datasources // do not share any data, and can be used to train and evaluate a model. // The first datasource has 25 percent of the data, and the second one has // 75 percent of the data. Datasource for evaluation: {"splitting":{"percentBegin":0, // "percentEnd":25}} Datasource for training: {"splitting":{"percentBegin":0, // "percentEnd":25, "complement":"true"}} // // * strategy To change how Amazon ML splits the data for a datasource, use // the strategy parameter. The default value for the strategy parameter is // sequential, meaning that Amazon ML takes all of the data records between // the percentBegin and percentEnd parameters for the datasource, in the // order that the records appear in the input data. The following two DataRearrangement // lines are examples of sequentially ordered training and evaluation datasources: // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"sequential"}} Datasource for training: {"splitting":{"percentBegin":70, // "percentEnd":100, "strategy":"sequential", "complement":"true"}} To randomly // split the input data into the proportions indicated by the percentBegin // and percentEnd parameters, set the strategy parameter to random and provide // a string that is used as the seed value for the random data splitting // (for example, you can use the S3 path to your data as the random seed // string). If you choose the random split strategy, Amazon ML assigns each // row of data a pseudo-random number between 0 and 100, and then selects // the rows that have an assigned number between percentBegin and percentEnd. // Pseudo-random numbers are assigned using both the input seed string value // and the byte offset as a seed, so changing the data results in a different // split. Any existing ordering is preserved. The random splitting strategy // ensures that variables in the training and evaluation data are distributed // similarly. It is useful in the cases where the input data may have an // implicit sort order, which would otherwise result in training and evaluation // datasources containing non-similar data records. The following two DataRearrangement // lines are examples of non-sequentially ordered training and evaluation // datasources: Datasource for evaluation: {"splitting":{"percentBegin":70, // "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}} // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}} DataRearrangement *string `type:"string"` // A JSON string that represents the schema for an Amazon S3 DataSource. The // DataSchema defines the structure of the observation data in the data file(s) // referenced in the DataSource. // // You must provide either the DataSchema or the DataSchemaLocationS3. // // Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames // have an array of key-value pairs for their value. Use the following format // to define your DataSchema. // // { "version": "1.0", // // "recordAnnotationFieldName": "F1", // // "recordWeightFieldName": "F2", // // "targetFieldName": "F3", // // "dataFormat": "CSV", // // "dataFileContainsHeader": true, // // "attributes": [ // // { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": // "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": // "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" // }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": // "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" // } ], // // "excludedVariableNames": [ "F6" ] } DataSchema *string `type:"string"` // Describes the schema location in Amazon S3. You must provide either the DataSchema // or the DataSchemaLocationS3. DataSchemaLocationS3 *string `type:"string"` } // String returns the string representation func (s S3DataSpec) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *S3DataSpec) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "S3DataSpec"} if s.DataLocationS3 == nil { invalidParams.Add(aws.NewErrParamRequired("DataLocationS3")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // A custom key-value pair associated with an ML object, such as an ML model. type Tag struct { _ struct{} `type:"structure"` // A unique identifier for the tag. Valid characters include Unicode letters, // digits, white space, _, ., /, =, +, -, %, and @. Key *string `min:"1" type:"string"` // An optional string, typically used to describe or define the tag. Valid characters // include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @. Value *string `type:"string"` } // String returns the string representation func (s Tag) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Tag) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Tag"} if s.Key != nil && len(*s.Key) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil }