// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package mediaconvert import ( "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/internal/awsutil" "github.com/aws/aws-sdk-go-v2/private/protocol" ) var _ aws.Config var _ = awsutil.Prettify // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value AAC. The service accepts one of two mutually exclusive groups of // AAC settings--VBR and CBR. To select one of these modes, set the value of // Bitrate control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you // control the audio quality with the setting VBR quality (vbrQuality). In CBR // mode, you use the setting Bitrate (bitrate). Defaults and valid values depend // on the rate control mode. type AacSettings struct { _ struct{} `type:"structure"` // Choose BROADCASTER_MIXED_AD when the input contains pre-mixed main audio // + audio description (AD) as a stereo pair. The value for AudioType will be // set to 3, which signals to downstream systems that this stream contains "broadcaster // mixed AD". Note that the input received by the encoder must contain pre-mixed // audio; the encoder does not perform the mixing. When you choose BROADCASTER_MIXED_AD, // the encoder ignores any values you provide in AudioType and FollowInputAudioType. // Choose NORMAL when the input does not contain pre-mixed audio + audio description // (AD). In this case, the encoder will use any values you provide for AudioType // and FollowInputAudioType. AudioDescriptionBroadcasterMix AacAudioDescriptionBroadcasterMix `locationName:"audioDescriptionBroadcasterMix" type:"string" enum:"true"` // Specify the average bitrate in bits per second. The set of valid values for // this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, // 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, // 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, // 768000, 896000, 1024000. The value you set is also constrained by the values // that you choose for Profile (codecProfile), Bitrate control mode (codingMode), // and Sample rate (sampleRate). Default values depend on Bitrate control mode // and Profile. Bitrate *int64 `locationName:"bitrate" min:"6000" type:"integer"` // AAC Profile. CodecProfile AacCodecProfile `locationName:"codecProfile" type:"string" enum:"true"` // Mono (Audio Description), Mono, Stereo, or 5.1 channel layout. Valid values // depend on rate control mode and profile. "1.0 - Audio Description (Receiver // Mix)" setting receives a stereo description plus control track and emits // a mono AAC encode of the description track, with control data emitted in // the PES header as per ETSI TS 101 154 Annex E. CodingMode AacCodingMode `locationName:"codingMode" type:"string" enum:"true"` // Rate Control Mode. RateControlMode AacRateControlMode `locationName:"rateControlMode" type:"string" enum:"true"` // Enables LATM/LOAS AAC output. Note that if you use LATM/LOAS AAC in an output, // you must choose "No container" for the output container. RawFormat AacRawFormat `locationName:"rawFormat" type:"string" enum:"true"` // Sample rate in Hz. Valid values depend on rate control mode and profile. SampleRate *int64 `locationName:"sampleRate" min:"8000" type:"integer"` // Use MPEG-2 AAC instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream // containers. Specification AacSpecification `locationName:"specification" type:"string" enum:"true"` // VBR Quality Level - Only used if rate_control_mode is VBR. VbrQuality AacVbrQuality `locationName:"vbrQuality" type:"string" enum:"true"` } // String returns the string representation func (s AacSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *AacSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "AacSettings"} if s.Bitrate != nil && *s.Bitrate < 6000 { invalidParams.Add(aws.NewErrParamMinValue("Bitrate", 6000)) } if s.SampleRate != nil && *s.SampleRate < 8000 { invalidParams.Add(aws.NewErrParamMinValue("SampleRate", 8000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s AacSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.AudioDescriptionBroadcasterMix) > 0 { v := s.AudioDescriptionBroadcasterMix metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "audioDescriptionBroadcasterMix", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if len(s.CodecProfile) > 0 { v := s.CodecProfile metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codecProfile", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.CodingMode) > 0 { v := s.CodingMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codingMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.RateControlMode) > 0 { v := s.RateControlMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "rateControlMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.RawFormat) > 0 { v := s.RawFormat metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "rawFormat", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SampleRate != nil { v := *s.SampleRate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sampleRate", protocol.Int64Value(v), metadata) } if len(s.Specification) > 0 { v := s.Specification metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "specification", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.VbrQuality) > 0 { v := s.VbrQuality metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "vbrQuality", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value AC3. type Ac3Settings struct { _ struct{} `type:"structure"` // Specify the average bitrate in bits per second. Valid bitrates depend on // the coding mode. Bitrate *int64 `locationName:"bitrate" min:"64000" type:"integer"` // Specify the bitstream mode for the AC-3 stream that the encoder emits. For // more information about the AC3 bitstream mode, see ATSC A/52-2012 (Annex // E). BitstreamMode Ac3BitstreamMode `locationName:"bitstreamMode" type:"string" enum:"true"` // Dolby Digital coding mode. Determines number of channels. CodingMode Ac3CodingMode `locationName:"codingMode" type:"string" enum:"true"` // Sets the dialnorm for the output. If blank and input audio is Dolby Digital, // dialnorm will be passed through. Dialnorm *int64 `locationName:"dialnorm" min:"1" type:"integer"` // If set to FILM_STANDARD, adds dynamic range compression signaling to the // output bitstream as defined in the Dolby Digital specification. DynamicRangeCompressionProfile Ac3DynamicRangeCompressionProfile `locationName:"dynamicRangeCompressionProfile" type:"string" enum:"true"` // Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only // valid with 3_2_LFE coding mode. LfeFilter Ac3LfeFilter `locationName:"lfeFilter" type:"string" enum:"true"` // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, // or DolbyE decoder that supplied this audio data. If audio was not supplied // from one of these streams, then the static metadata settings will be used. MetadataControl Ac3MetadataControl `locationName:"metadataControl" type:"string" enum:"true"` // This value is always 48000. It represents the sample rate in Hz. SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"` } // String returns the string representation func (s Ac3Settings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Ac3Settings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Ac3Settings"} if s.Bitrate != nil && *s.Bitrate < 64000 { invalidParams.Add(aws.NewErrParamMinValue("Bitrate", 64000)) } if s.Dialnorm != nil && *s.Dialnorm < 1 { invalidParams.Add(aws.NewErrParamMinValue("Dialnorm", 1)) } if s.SampleRate != nil && *s.SampleRate < 48000 { invalidParams.Add(aws.NewErrParamMinValue("SampleRate", 48000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Ac3Settings) MarshalFields(e protocol.FieldEncoder) error { if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if len(s.BitstreamMode) > 0 { v := s.BitstreamMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitstreamMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.CodingMode) > 0 { v := s.CodingMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codingMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Dialnorm != nil { v := *s.Dialnorm metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dialnorm", protocol.Int64Value(v), metadata) } if len(s.DynamicRangeCompressionProfile) > 0 { v := s.DynamicRangeCompressionProfile metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dynamicRangeCompressionProfile", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.LfeFilter) > 0 { v := s.LfeFilter metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "lfeFilter", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.MetadataControl) > 0 { v := s.MetadataControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "metadataControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SampleRate != nil { v := *s.SampleRate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sampleRate", protocol.Int64Value(v), metadata) } return nil } // Accelerated transcoding can significantly speed up jobs with long, visually // complex content. type AccelerationSettings struct { _ struct{} `type:"structure"` // Specify the conditions when the service will run your job with accelerated // transcoding. // // Mode is a required field Mode AccelerationMode `locationName:"mode" type:"string" required:"true" enum:"true"` } // String returns the string representation func (s AccelerationSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *AccelerationSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "AccelerationSettings"} if len(s.Mode) == 0 { invalidParams.Add(aws.NewErrParamRequired("Mode")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s AccelerationSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.Mode) > 0 { v := s.Mode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "mode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value AIFF. type AiffSettings struct { _ struct{} `type:"structure"` // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding // quality for this audio track. BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"` // Specify the number of channels in this output audio track. Valid values are // 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Sample rate in hz. SampleRate *int64 `locationName:"sampleRate" min:"8000" type:"integer"` } // String returns the string representation func (s AiffSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *AiffSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "AiffSettings"} if s.BitDepth != nil && *s.BitDepth < 16 { invalidParams.Add(aws.NewErrParamMinValue("BitDepth", 16)) } if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(aws.NewErrParamMinValue("Channels", 1)) } if s.SampleRate != nil && *s.SampleRate < 8000 { invalidParams.Add(aws.NewErrParamMinValue("SampleRate", 8000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s AiffSettings) MarshalFields(e protocol.FieldEncoder) error { if s.BitDepth != nil { v := *s.BitDepth metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitDepth", protocol.Int64Value(v), metadata) } if s.Channels != nil { v := *s.Channels metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "channels", protocol.Int64Value(v), metadata) } if s.SampleRate != nil { v := *s.SampleRate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sampleRate", protocol.Int64Value(v), metadata) } return nil } // Settings for ancillary captions source. type AncillarySourceSettings struct { _ struct{} `type:"structure"` // Specify whether this set of input captions appears in your outputs in both // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes // the captions data in two ways: it passes the 608 data through using the 608 // compatibility bytes fields of the 708 wrapper, and it also translates the // 608 data into 708. Convert608To708 AncillaryConvert608To708 `locationName:"convert608To708" type:"string" enum:"true"` // Specifies the 608 channel number in the ancillary data track from which to // extract captions. Unused for passthrough. SourceAncillaryChannelNumber *int64 `locationName:"sourceAncillaryChannelNumber" min:"1" type:"integer"` // By default, the service terminates any unterminated captions at the end of // each input. If you want the caption to continue onto your next input, disable // this setting. TerminateCaptions AncillaryTerminateCaptions `locationName:"terminateCaptions" type:"string" enum:"true"` } // String returns the string representation func (s AncillarySourceSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *AncillarySourceSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "AncillarySourceSettings"} if s.SourceAncillaryChannelNumber != nil && *s.SourceAncillaryChannelNumber < 1 { invalidParams.Add(aws.NewErrParamMinValue("SourceAncillaryChannelNumber", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s AncillarySourceSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.Convert608To708) > 0 { v := s.Convert608To708 metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "convert608To708", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SourceAncillaryChannelNumber != nil { v := *s.SourceAncillaryChannelNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sourceAncillaryChannelNumber", protocol.Int64Value(v), metadata) } if len(s.TerminateCaptions) > 0 { v := s.TerminateCaptions metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "terminateCaptions", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Audio codec settings (CodecSettings) under (AudioDescriptions) contains the // group of settings related to audio encoding. The settings in this group vary // depending on the value that you choose for Audio codec (Codec). For each // codec enum that you choose, define the corresponding settings object. The // following lists the codec enum, settings object pairs. * AAC, AacSettings // * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings // * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings * // VORBIS, VorbisSettings * OPUS, OpusSettings type AudioCodecSettings struct { _ struct{} `type:"structure"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value AAC. The service accepts one of two mutually exclusive groups of // AAC settings--VBR and CBR. To select one of these modes, set the value of // Bitrate control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you // control the audio quality with the setting VBR quality (vbrQuality). In CBR // mode, you use the setting Bitrate (bitrate). Defaults and valid values depend // on the rate control mode. AacSettings *AacSettings `locationName:"aacSettings" type:"structure"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value AC3. Ac3Settings *Ac3Settings `locationName:"ac3Settings" type:"structure"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value AIFF. AiffSettings *AiffSettings `locationName:"aiffSettings" type:"structure"` // Type of Audio codec. Codec AudioCodec `locationName:"codec" type:"string" enum:"true"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value EAC3_ATMOS. Eac3AtmosSettings *Eac3AtmosSettings `locationName:"eac3AtmosSettings" type:"structure"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value EAC3. Eac3Settings *Eac3Settings `locationName:"eac3Settings" type:"structure"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value MP2. Mp2Settings *Mp2Settings `locationName:"mp2Settings" type:"structure"` // Required when you set Codec, under AudioDescriptions>CodecSettings, to the // value MP3. Mp3Settings *Mp3Settings `locationName:"mp3Settings" type:"structure"` // Required when you set Codec, under AudioDescriptions>CodecSettings, to the // value OPUS. OpusSettings *OpusSettings `locationName:"opusSettings" type:"structure"` // Required when you set Codec, under AudioDescriptions>CodecSettings, to the // value Vorbis. VorbisSettings *VorbisSettings `locationName:"vorbisSettings" type:"structure"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value WAV. WavSettings *WavSettings `locationName:"wavSettings" type:"structure"` } // String returns the string representation func (s AudioCodecSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *AudioCodecSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "AudioCodecSettings"} if s.AacSettings != nil { if err := s.AacSettings.Validate(); err != nil { invalidParams.AddNested("AacSettings", err.(aws.ErrInvalidParams)) } } if s.Ac3Settings != nil { if err := s.Ac3Settings.Validate(); err != nil { invalidParams.AddNested("Ac3Settings", err.(aws.ErrInvalidParams)) } } if s.AiffSettings != nil { if err := s.AiffSettings.Validate(); err != nil { invalidParams.AddNested("AiffSettings", err.(aws.ErrInvalidParams)) } } if s.Eac3AtmosSettings != nil { if err := s.Eac3AtmosSettings.Validate(); err != nil { invalidParams.AddNested("Eac3AtmosSettings", err.(aws.ErrInvalidParams)) } } if s.Eac3Settings != nil { if err := s.Eac3Settings.Validate(); err != nil { invalidParams.AddNested("Eac3Settings", err.(aws.ErrInvalidParams)) } } if s.Mp2Settings != nil { if err := s.Mp2Settings.Validate(); err != nil { invalidParams.AddNested("Mp2Settings", err.(aws.ErrInvalidParams)) } } if s.Mp3Settings != nil { if err := s.Mp3Settings.Validate(); err != nil { invalidParams.AddNested("Mp3Settings", err.(aws.ErrInvalidParams)) } } if s.OpusSettings != nil { if err := s.OpusSettings.Validate(); err != nil { invalidParams.AddNested("OpusSettings", err.(aws.ErrInvalidParams)) } } if s.VorbisSettings != nil { if err := s.VorbisSettings.Validate(); err != nil { invalidParams.AddNested("VorbisSettings", err.(aws.ErrInvalidParams)) } } if s.WavSettings != nil { if err := s.WavSettings.Validate(); err != nil { invalidParams.AddNested("WavSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s AudioCodecSettings) MarshalFields(e protocol.FieldEncoder) error { if s.AacSettings != nil { v := s.AacSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "aacSettings", v, metadata) } if s.Ac3Settings != nil { v := s.Ac3Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "ac3Settings", v, metadata) } if s.AiffSettings != nil { v := s.AiffSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "aiffSettings", v, metadata) } if len(s.Codec) > 0 { v := s.Codec metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codec", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Eac3AtmosSettings != nil { v := s.Eac3AtmosSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "eac3AtmosSettings", v, metadata) } if s.Eac3Settings != nil { v := s.Eac3Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "eac3Settings", v, metadata) } if s.Mp2Settings != nil { v := s.Mp2Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "mp2Settings", v, metadata) } if s.Mp3Settings != nil { v := s.Mp3Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "mp3Settings", v, metadata) } if s.OpusSettings != nil { v := s.OpusSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "opusSettings", v, metadata) } if s.VorbisSettings != nil { v := s.VorbisSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "vorbisSettings", v, metadata) } if s.WavSettings != nil { v := s.WavSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "wavSettings", v, metadata) } return nil } // Description of audio output type AudioDescription struct { _ struct{} `type:"structure"` // Advanced audio normalization settings. Ignore these settings unless you need // to comply with a loudness standard. AudioNormalizationSettings *AudioNormalizationSettings `locationName:"audioNormalizationSettings" type:"structure"` // Specifies which audio data to use from each input. In the simplest case, // specify an "Audio Selector":#inputs-audio_selector by name based on its order // within each input. For example if you specify "Audio Selector 3", then the // third audio selector will be used from each input. If an input does not have // an "Audio Selector 3", then the audio selector marked as "default" in that // input will be used. If there is no audio selector marked as "default", silence // will be inserted for the duration of that input. Alternatively, an "Audio // Selector Group":#inputs-audio_selector_group name may be specified, with // similar default/silence behavior. If no audio_source_name is specified, then // "Audio Selector 1" will be chosen automatically. AudioSourceName *string `locationName:"audioSourceName" type:"string"` // Applies only if Follow Input Audio Type is unchecked (false). A number between // 0 and 255. The following are defined in ISO-IEC 13818-1: 0 = Undefined, 1 // = Clean Effects, 2 = Hearing Impaired, 3 = Visually Impaired Commentary, // 4-255 = Reserved. AudioType *int64 `locationName:"audioType" type:"integer"` // When set to FOLLOW_INPUT, if the input contains an ISO 639 audio_type, then // that value is passed through to the output. If the input contains no ISO // 639 audio_type, the value in Audio Type is included in the output. Otherwise // the value in Audio Type is included in the output. Note that this field and // audioType are both ignored if audioDescriptionBroadcasterMix is set to BROADCASTER_MIXED_AD. AudioTypeControl AudioTypeControl `locationName:"audioTypeControl" type:"string" enum:"true"` // Audio codec settings (CodecSettings) under (AudioDescriptions) contains the // group of settings related to audio encoding. The settings in this group vary // depending on the value that you choose for Audio codec (Codec). For each // codec enum that you choose, define the corresponding settings object. The // following lists the codec enum, settings object pairs. * AAC, AacSettings // * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings // * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings * // VORBIS, VorbisSettings * OPUS, OpusSettings CodecSettings *AudioCodecSettings `locationName:"codecSettings" type:"structure"` // Specify the language for this audio output track. The service puts this language // code into your output audio track when you set Language code control (AudioLanguageCodeControl) // to Use configured (USE_CONFIGURED). The service also uses your specified // custom language code when you set Language code control (AudioLanguageCodeControl) // to Follow input (FOLLOW_INPUT), but your input file doesn't specify a language // code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming // outputs, you can also use any other code in the full RFC-5646 specification. // Streaming outputs are those that are in one of the following output groups: // CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming. CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"` // Indicates the language of the audio output track. The ISO 639 language specified // in the 'Language Code' drop down will be used when 'Follow Input Language // Code' is not selected or when 'Follow Input Language Code' is selected but // there is no ISO 639 language code specified by the input. LanguageCode LanguageCode `locationName:"languageCode" type:"string" enum:"true"` // Specify which source for language code takes precedence for this audio track. // When you choose Follow input (FOLLOW_INPUT), the service uses the language // code from the input track if it's present. If there's no languge code on // the input track, the service uses the code that you specify in the setting // Language code (languageCode or customLanguageCode). When you choose Use configured // (USE_CONFIGURED), the service uses the language code that you specify. LanguageCodeControl AudioLanguageCodeControl `locationName:"languageCodeControl" type:"string" enum:"true"` // Advanced audio remixing settings. RemixSettings *RemixSettings `locationName:"remixSettings" type:"structure"` // Specify a label for this output audio stream. For example, "English", "Director // commentary", or "track_2". For streaming outputs, MediaConvert passes this // information into destination manifests for display on the end-viewer's player // device. For outputs in other output groups, the service ignores this setting. StreamName *string `locationName:"streamName" type:"string"` } // String returns the string representation func (s AudioDescription) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *AudioDescription) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "AudioDescription"} if s.AudioNormalizationSettings != nil { if err := s.AudioNormalizationSettings.Validate(); err != nil { invalidParams.AddNested("AudioNormalizationSettings", err.(aws.ErrInvalidParams)) } } if s.CodecSettings != nil { if err := s.CodecSettings.Validate(); err != nil { invalidParams.AddNested("CodecSettings", err.(aws.ErrInvalidParams)) } } if s.RemixSettings != nil { if err := s.RemixSettings.Validate(); err != nil { invalidParams.AddNested("RemixSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s AudioDescription) MarshalFields(e protocol.FieldEncoder) error { if s.AudioNormalizationSettings != nil { v := s.AudioNormalizationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "audioNormalizationSettings", v, metadata) } if s.AudioSourceName != nil { v := *s.AudioSourceName metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "audioSourceName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.AudioType != nil { v := *s.AudioType metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "audioType", protocol.Int64Value(v), metadata) } if len(s.AudioTypeControl) > 0 { v := s.AudioTypeControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "audioTypeControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.CodecSettings != nil { v := s.CodecSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "codecSettings", v, metadata) } if s.CustomLanguageCode != nil { v := *s.CustomLanguageCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "customLanguageCode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.LanguageCode) > 0 { v := s.LanguageCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "languageCode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.LanguageCodeControl) > 0 { v := s.LanguageCodeControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "languageCodeControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.RemixSettings != nil { v := s.RemixSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "remixSettings", v, metadata) } if s.StreamName != nil { v := *s.StreamName metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "streamName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Advanced audio normalization settings. Ignore these settings unless you need // to comply with a loudness standard. type AudioNormalizationSettings struct { _ struct{} `type:"structure"` // Choose one of the following audio normalization algorithms: ITU-R BS.1770-1: // Ungated loudness. A measurement of ungated average loudness for an entire // piece of content, suitable for measurement of short-form content under ATSC // recommendation A/85. Supports up to 5.1 audio channels. ITU-R BS.1770-2: // Gated loudness. A measurement of gated average loudness compliant with the // requirements of EBU-R128. Supports up to 5.1 audio channels. ITU-R BS.1770-3: // Modified peak. The same loudness measurement algorithm as 1770-2, with an // updated true peak measurement. ITU-R BS.1770-4: Higher channel count. Allows // for more audio channels than the other algorithms, including configurations // such as 7.1. Algorithm AudioNormalizationAlgorithm `locationName:"algorithm" type:"string" enum:"true"` // When enabled the output audio is corrected using the chosen algorithm. If // disabled, the audio will be measured but not adjusted. AlgorithmControl AudioNormalizationAlgorithmControl `locationName:"algorithmControl" type:"string" enum:"true"` // Content measuring above this level will be corrected to the target level. // Content measuring below this level will not be corrected. CorrectionGateLevel *int64 `locationName:"correctionGateLevel" type:"integer"` // If set to LOG, log each output's audio track loudness to a CSV file. LoudnessLogging AudioNormalizationLoudnessLogging `locationName:"loudnessLogging" type:"string" enum:"true"` // If set to TRUE_PEAK, calculate and log the TruePeak for each output's audio // track loudness. PeakCalculation AudioNormalizationPeakCalculation `locationName:"peakCalculation" type:"string" enum:"true"` // When you use Audio normalization (AudioNormalizationSettings), optionally // use this setting to specify a target loudness. If you don't specify a value // here, the encoder chooses a value for you, based on the algorithm that you // choose for Algorithm (algorithm). If you choose algorithm 1770-1, the encoder // will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS. TargetLkfs *float64 `locationName:"targetLkfs" type:"double"` } // String returns the string representation func (s AudioNormalizationSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *AudioNormalizationSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "AudioNormalizationSettings"} if s.CorrectionGateLevel != nil && *s.CorrectionGateLevel < -70 { invalidParams.Add(aws.NewErrParamMinValue("CorrectionGateLevel", -70)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s AudioNormalizationSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.Algorithm) > 0 { v := s.Algorithm metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "algorithm", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.AlgorithmControl) > 0 { v := s.AlgorithmControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "algorithmControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.CorrectionGateLevel != nil { v := *s.CorrectionGateLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "correctionGateLevel", protocol.Int64Value(v), metadata) } if len(s.LoudnessLogging) > 0 { v := s.LoudnessLogging metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "loudnessLogging", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.PeakCalculation) > 0 { v := s.PeakCalculation metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "peakCalculation", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.TargetLkfs != nil { v := *s.TargetLkfs metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "targetLkfs", protocol.Float64Value(v), metadata) } return nil } // Selector for Audio type AudioSelector struct { _ struct{} `type:"structure"` // Selects a specific language code from within an audio source, using the ISO // 639-2 or ISO 639-3 three-letter language code CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"` // Enable this setting on one audio selector to set it as the default for the // job. The service uses this default for outputs where it can't find the specified // input audio. If you don't set a default, those outputs have no audio. DefaultSelection AudioDefaultSelection `locationName:"defaultSelection" type:"string" enum:"true"` // Specifies audio data from an external file source. ExternalAudioFileInput *string `locationName:"externalAudioFileInput" type:"string"` // Selects a specific language code from within an audio source. LanguageCode LanguageCode `locationName:"languageCode" type:"string" enum:"true"` // Specifies a time delta in milliseconds to offset the audio from the input // video. Offset *int64 `locationName:"offset" type:"integer"` // Selects a specific PID from within an audio source (e.g. 257 selects PID // 0x101). Pids []int64 `locationName:"pids" type:"list"` // Use this setting for input streams that contain Dolby E, to have the service // extract specific program data from the track. To select multiple programs, // create multiple selectors with the same Track and different Program numbers. // In the console, this setting is visible when you set Selector type to Track. // Choose the program number from the dropdown list. If you are sending a JSON // file, provide the program ID, which is part of the audio metadata. If your // input file has incorrect metadata, you can choose All channels instead of // a program number to have the service ignore the program IDs and include all // the programs in the track. ProgramSelection *int64 `locationName:"programSelection" type:"integer"` // Use these settings to reorder the audio channels of one input to match those // of another input. This allows you to combine the two files into a single // output, one after the other. RemixSettings *RemixSettings `locationName:"remixSettings" type:"structure"` // Specifies the type of the audio selector. SelectorType AudioSelectorType `locationName:"selectorType" type:"string" enum:"true"` // Identify a track from the input audio to include in this selector by entering // the track index number. To include several tracks in a single audio selector, // specify multiple tracks as follows. Using the console, enter a comma-separated // list. For examle, type "1,2,3" to include tracks 1 through 3. Specifying // directly in your JSON job file, provide the track numbers in an array. For // example, "tracks": [1,2,3]. Tracks []int64 `locationName:"tracks" type:"list"` } // String returns the string representation func (s AudioSelector) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *AudioSelector) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "AudioSelector"} if s.CustomLanguageCode != nil && len(*s.CustomLanguageCode) < 3 { invalidParams.Add(aws.NewErrParamMinLen("CustomLanguageCode", 3)) } if s.Offset != nil && *s.Offset < -2.147483648e+09 { invalidParams.Add(aws.NewErrParamMinValue("Offset", -2.147483648e+09)) } if s.RemixSettings != nil { if err := s.RemixSettings.Validate(); err != nil { invalidParams.AddNested("RemixSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s AudioSelector) MarshalFields(e protocol.FieldEncoder) error { if s.CustomLanguageCode != nil { v := *s.CustomLanguageCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "customLanguageCode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.DefaultSelection) > 0 { v := s.DefaultSelection metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "defaultSelection", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ExternalAudioFileInput != nil { v := *s.ExternalAudioFileInput metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "externalAudioFileInput", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.LanguageCode) > 0 { v := s.LanguageCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "languageCode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Offset != nil { v := *s.Offset metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "offset", protocol.Int64Value(v), metadata) } if s.Pids != nil { v := s.Pids metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "pids", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.Int64Value(v1)) } ls0.End() } if s.ProgramSelection != nil { v := *s.ProgramSelection metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "programSelection", protocol.Int64Value(v), metadata) } if s.RemixSettings != nil { v := s.RemixSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "remixSettings", v, metadata) } if len(s.SelectorType) > 0 { v := s.SelectorType metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "selectorType", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Tracks != nil { v := s.Tracks metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "tracks", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.Int64Value(v1)) } ls0.End() } return nil } // Group of Audio Selectors type AudioSelectorGroup struct { _ struct{} `type:"structure"` // Name of an Audio Selector within the same input to include in the group. // Audio selector names are standardized, based on their order within the input // (e.g., "Audio Selector 1"). The audio selector name parameter can be repeated // to add any number of audio selectors to the group. AudioSelectorNames []string `locationName:"audioSelectorNames" type:"list"` } // String returns the string representation func (s AudioSelectorGroup) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s AudioSelectorGroup) MarshalFields(e protocol.FieldEncoder) error { if s.AudioSelectorNames != nil { v := s.AudioSelectorNames metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "audioSelectorNames", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } return nil } // Settings for quality-defined variable bitrate encoding with the AV1 codec. // Required when you set Rate control mode to QVBR. Not valid when you set Rate // control mode to a value other than QVBR, or when you don't define Rate control // mode. type Av1QvbrSettings struct { _ struct{} `type:"structure"` // Required when you use QVBR rate control mode. That is, when you specify qvbrSettings // within av1Settings. Specify the general target quality level for this output, // from 1 to 10. Use higher numbers for greater quality. Level 10 results in // nearly lossless compression. The quality level for most broadcast-quality // transcodes is between 6 and 9. Optionally, to specify a value between whole // numbers, also provide a value for the setting qvbrQualityLevelFineTune. For // example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel // to 7 and set qvbrQualityLevelFineTune to .33. QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"` // Optional. Specify a value here to set the QVBR quality to a level that is // between whole numbers. For example, if you want your QVBR quality level to // be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. // MediaConvert rounds your QVBR quality level to the nearest third of a whole // number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune // to .25, your actual QVBR quality level is 7.33. QvbrQualityLevelFineTune *float64 `locationName:"qvbrQualityLevelFineTune" type:"double"` } // String returns the string representation func (s Av1QvbrSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Av1QvbrSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Av1QvbrSettings"} if s.QvbrQualityLevel != nil && *s.QvbrQualityLevel < 1 { invalidParams.Add(aws.NewErrParamMinValue("QvbrQualityLevel", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Av1QvbrSettings) MarshalFields(e protocol.FieldEncoder) error { if s.QvbrQualityLevel != nil { v := *s.QvbrQualityLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "qvbrQualityLevel", protocol.Int64Value(v), metadata) } if s.QvbrQualityLevelFineTune != nil { v := *s.QvbrQualityLevelFineTune metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "qvbrQualityLevelFineTune", protocol.Float64Value(v), metadata) } return nil } // Required when you set Codec, under VideoDescription>CodecSettings to the // value AV1. type Av1Settings struct { _ struct{} `type:"structure"` // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual // quality. AdaptiveQuantization Av1AdaptiveQuantization `locationName:"adaptiveQuantization" type:"string" enum:"true"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl Av1FramerateControl `locationName:"framerateControl" type:"string" enum:"true"` // Optional. Specify how the transcoder performs framerate conversion. The default // behavior is to use duplicate drop conversion. FramerateConversionAlgorithm Av1FramerateConversionAlgorithm `locationName:"framerateConversionAlgorithm" type:"string" enum:"true"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // Specify the GOP length (keyframe interval) in frames. With AV1, MediaConvert // doesn't support GOP length in seconds. This value must be greater than zero // and preferably equal to 1 + ((numberBFrames + 1) * x), where x is an integer // value. GopSize *float64 `locationName:"gopSize" type:"double"` // Maximum bitrate in bits/second. For example, enter five megabits per second // as 5000000. Required when Rate control mode is QVBR. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Specify the number of B-frames. With AV1, MediaConvert supports only 7 or // 15. NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" min:"7" type:"integer"` // Settings for quality-defined variable bitrate encoding with the AV1 codec. // Required when you set Rate control mode to QVBR. Not valid when you set Rate // control mode to a value other than QVBR, or when you don't define Rate control // mode. QvbrSettings *Av1QvbrSettings `locationName:"qvbrSettings" type:"structure"` // 'With AV1 outputs, for rate control mode, MediaConvert supports only quality-defined // variable bitrate (QVBR). You can''t use CBR or VBR.' RateControlMode Av1RateControlMode `locationName:"rateControlMode" type:"string" enum:"true"` // Specify the number of slices per picture. This value must be 1, 2, 4, 8, // 16, or 32. For progressive pictures, this value must be less than or equal // to the number of macroblock rows. For interlaced pictures, this value must // be less than or equal to half the number of macroblock rows. Slices *int64 `locationName:"slices" min:"1" type:"integer"` // Adjust quantization within each frame based on spatial variation of content // complexity. SpatialAdaptiveQuantization Av1SpatialAdaptiveQuantization `locationName:"spatialAdaptiveQuantization" type:"string" enum:"true"` } // String returns the string representation func (s Av1Settings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Av1Settings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Av1Settings"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateNumerator", 1)) } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("MaxBitrate", 1000)) } if s.NumberBFramesBetweenReferenceFrames != nil && *s.NumberBFramesBetweenReferenceFrames < 7 { invalidParams.Add(aws.NewErrParamMinValue("NumberBFramesBetweenReferenceFrames", 7)) } if s.Slices != nil && *s.Slices < 1 { invalidParams.Add(aws.NewErrParamMinValue("Slices", 1)) } if s.QvbrSettings != nil { if err := s.QvbrSettings.Validate(); err != nil { invalidParams.AddNested("QvbrSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Av1Settings) MarshalFields(e protocol.FieldEncoder) error { if len(s.AdaptiveQuantization) > 0 { v := s.AdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "adaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FramerateControl) > 0 { v := s.FramerateControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FramerateConversionAlgorithm) > 0 { v := s.FramerateConversionAlgorithm metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateConversionAlgorithm", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FramerateDenominator != nil { v := *s.FramerateDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateDenominator", protocol.Int64Value(v), metadata) } if s.FramerateNumerator != nil { v := *s.FramerateNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateNumerator", protocol.Int64Value(v), metadata) } if s.GopSize != nil { v := *s.GopSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopSize", protocol.Float64Value(v), metadata) } if s.MaxBitrate != nil { v := *s.MaxBitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxBitrate", protocol.Int64Value(v), metadata) } if s.NumberBFramesBetweenReferenceFrames != nil { v := *s.NumberBFramesBetweenReferenceFrames metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "numberBFramesBetweenReferenceFrames", protocol.Int64Value(v), metadata) } if s.QvbrSettings != nil { v := s.QvbrSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "qvbrSettings", v, metadata) } if len(s.RateControlMode) > 0 { v := s.RateControlMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "rateControlMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Slices != nil { v := *s.Slices metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "slices", protocol.Int64Value(v), metadata) } if len(s.SpatialAdaptiveQuantization) > 0 { v := s.SpatialAdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "spatialAdaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Settings for Avail Blanking type AvailBlanking struct { _ struct{} `type:"structure"` // Blanking image to be used. Leave empty for solid black. Only bmp and png // images are supported. AvailBlankingImage *string `locationName:"availBlankingImage" min:"14" type:"string"` } // String returns the string representation func (s AvailBlanking) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *AvailBlanking) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "AvailBlanking"} if s.AvailBlankingImage != nil && len(*s.AvailBlankingImage) < 14 { invalidParams.Add(aws.NewErrParamMinLen("AvailBlankingImage", 14)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s AvailBlanking) MarshalFields(e protocol.FieldEncoder) error { if s.AvailBlankingImage != nil { v := *s.AvailBlankingImage metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "availBlankingImage", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Burn-In Destination Settings. type BurninDestinationSettings struct { _ struct{} `type:"structure"` // If no explicit x_position or y_position is provided, setting alignment to // centered will place the captions at the bottom center of the output. Similarly, // setting a left alignment will align captions to the bottom left of the output. // If x and y positions are given in conjunction with the alignment parameter, // the font will be justified (either left or centered) relative to those coordinates. // This option is not valid for source captions that are STL, 608/embedded or // teletext. These source settings are already pre-defined by the caption stream. // All burn-in and DVB-Sub font settings must match. Alignment BurninSubtitleAlignment `locationName:"alignment" type:"string" enum:"true"` // Specifies the color of the rectangle behind the captions.All burn-in and // DVB-Sub font settings must match. BackgroundColor BurninSubtitleBackgroundColor `locationName:"backgroundColor" type:"string" enum:"true"` // Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. // Leaving this parameter blank is equivalent to setting it to 0 (transparent). // All burn-in and DVB-Sub font settings must match. BackgroundOpacity *int64 `locationName:"backgroundOpacity" type:"integer"` // Specifies the color of the burned-in captions. This option is not valid for // source captions that are STL, 608/embedded or teletext. These source settings // are already pre-defined by the caption stream. All burn-in and DVB-Sub font // settings must match. FontColor BurninSubtitleFontColor `locationName:"fontColor" type:"string" enum:"true"` // Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent.All // burn-in and DVB-Sub font settings must match. FontOpacity *int64 `locationName:"fontOpacity" type:"integer"` // Font resolution in DPI (dots per inch); default is 96 dpi.All burn-in and // DVB-Sub font settings must match. FontResolution *int64 `locationName:"fontResolution" min:"96" type:"integer"` // Provide the font script, using an ISO 15924 script code, if the LanguageCode // is not sufficient for determining the script type. Where LanguageCode or // CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset. This is // used to help determine the appropriate font for rendering burn-in captions. FontScript FontScript `locationName:"fontScript" type:"string" enum:"true"` // A positive integer indicates the exact font size in points. Set to 0 for // automatic font size selection. All burn-in and DVB-Sub font settings must // match. FontSize *int64 `locationName:"fontSize" type:"integer"` // Specifies font outline color. This option is not valid for source captions // that are either 608/embedded or teletext. These source settings are already // pre-defined by the caption stream. All burn-in and DVB-Sub font settings // must match. OutlineColor BurninSubtitleOutlineColor `locationName:"outlineColor" type:"string" enum:"true"` // Specifies font outline size in pixels. This option is not valid for source // captions that are either 608/embedded or teletext. These source settings // are already pre-defined by the caption stream. All burn-in and DVB-Sub font // settings must match. OutlineSize *int64 `locationName:"outlineSize" type:"integer"` // Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub // font settings must match. ShadowColor BurninSubtitleShadowColor `locationName:"shadowColor" type:"string" enum:"true"` // Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving // this parameter blank is equivalent to setting it to 0 (transparent). All // burn-in and DVB-Sub font settings must match. ShadowOpacity *int64 `locationName:"shadowOpacity" type:"integer"` // Specifies the horizontal offset of the shadow relative to the captions in // pixels. A value of -2 would result in a shadow offset 2 pixels to the left. // All burn-in and DVB-Sub font settings must match. ShadowXOffset *int64 `locationName:"shadowXOffset" type:"integer"` // Specifies the vertical offset of the shadow relative to the captions in pixels. // A value of -2 would result in a shadow offset 2 pixels above the text. All // burn-in and DVB-Sub font settings must match. ShadowYOffset *int64 `locationName:"shadowYOffset" type:"integer"` // Only applies to jobs with input captions in Teletext or STL formats. Specify // whether the spacing between letters in your captions is set by the captions // grid or varies depending on letter width. Choose fixed grid to conform to // the spacing specified in the captions file more accurately. Choose proportional // to make the text easier to read if the captions are closed caption. TeletextSpacing BurninSubtitleTeletextSpacing `locationName:"teletextSpacing" type:"string" enum:"true"` // Specifies the horizontal position of the caption relative to the left side // of the output in pixels. A value of 10 would result in the captions starting // 10 pixels from the left of the output. If no explicit x_position is provided, // the horizontal caption position will be determined by the alignment parameter. // This option is not valid for source captions that are STL, 608/embedded or // teletext. These source settings are already pre-defined by the caption stream. // All burn-in and DVB-Sub font settings must match. XPosition *int64 `locationName:"xPosition" type:"integer"` // Specifies the vertical position of the caption relative to the top of the // output in pixels. A value of 10 would result in the captions starting 10 // pixels from the top of the output. If no explicit y_position is provided, // the caption will be positioned towards the bottom of the output. This option // is not valid for source captions that are STL, 608/embedded or teletext. // These source settings are already pre-defined by the caption stream. All // burn-in and DVB-Sub font settings must match. YPosition *int64 `locationName:"yPosition" type:"integer"` } // String returns the string representation func (s BurninDestinationSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *BurninDestinationSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "BurninDestinationSettings"} if s.FontResolution != nil && *s.FontResolution < 96 { invalidParams.Add(aws.NewErrParamMinValue("FontResolution", 96)) } if s.ShadowXOffset != nil && *s.ShadowXOffset < -2.147483648e+09 { invalidParams.Add(aws.NewErrParamMinValue("ShadowXOffset", -2.147483648e+09)) } if s.ShadowYOffset != nil && *s.ShadowYOffset < -2.147483648e+09 { invalidParams.Add(aws.NewErrParamMinValue("ShadowYOffset", -2.147483648e+09)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s BurninDestinationSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.Alignment) > 0 { v := s.Alignment metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "alignment", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.BackgroundColor) > 0 { v := s.BackgroundColor metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "backgroundColor", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.BackgroundOpacity != nil { v := *s.BackgroundOpacity metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "backgroundOpacity", protocol.Int64Value(v), metadata) } if len(s.FontColor) > 0 { v := s.FontColor metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fontColor", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FontOpacity != nil { v := *s.FontOpacity metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fontOpacity", protocol.Int64Value(v), metadata) } if s.FontResolution != nil { v := *s.FontResolution metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fontResolution", protocol.Int64Value(v), metadata) } if len(s.FontScript) > 0 { v := s.FontScript metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fontScript", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FontSize != nil { v := *s.FontSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fontSize", protocol.Int64Value(v), metadata) } if len(s.OutlineColor) > 0 { v := s.OutlineColor metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "outlineColor", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.OutlineSize != nil { v := *s.OutlineSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "outlineSize", protocol.Int64Value(v), metadata) } if len(s.ShadowColor) > 0 { v := s.ShadowColor metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "shadowColor", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ShadowOpacity != nil { v := *s.ShadowOpacity metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "shadowOpacity", protocol.Int64Value(v), metadata) } if s.ShadowXOffset != nil { v := *s.ShadowXOffset metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "shadowXOffset", protocol.Int64Value(v), metadata) } if s.ShadowYOffset != nil { v := *s.ShadowYOffset metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "shadowYOffset", protocol.Int64Value(v), metadata) } if len(s.TeletextSpacing) > 0 { v := s.TeletextSpacing metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "teletextSpacing", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.XPosition != nil { v := *s.XPosition metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "xPosition", protocol.Int64Value(v), metadata) } if s.YPosition != nil { v := *s.YPosition metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "yPosition", protocol.Int64Value(v), metadata) } return nil } // Description of Caption output type CaptionDescription struct { _ struct{} `type:"structure"` // Specifies which "Caption Selector":#inputs-caption_selector to use from each // input when generating captions. The name should be of the format "Caption // Selector ", which denotes that the Nth Caption Selector will be used from // each input. CaptionSelectorName *string `locationName:"captionSelectorName" min:"1" type:"string"` // Specify the language for this captions output track. For most captions output // formats, the encoder puts this language information in the output captions // metadata. If your output captions format is DVB-Sub or Burn in, the encoder // uses this language information when automatically selecting the font script // for rendering the captions text. For all outputs, you can use an ISO 639-2 // or ISO 639-3 code. For streaming outputs, you can also use any other code // in the full RFC-5646 specification. Streaming outputs are those that are // in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft // Smooth Streaming. CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"` // Specific settings required by destination type. Note that burnin_destination_settings // are not available if the source of the caption data is Embedded or Teletext. DestinationSettings *CaptionDestinationSettings `locationName:"destinationSettings" type:"structure"` // Specify the language of this captions output track. For most captions output // formats, the encoder puts this language information in the output captions // metadata. If your output captions format is DVB-Sub or Burn in, the encoder // uses this language information to choose the font language for rendering // the captions text. LanguageCode LanguageCode `locationName:"languageCode" type:"string" enum:"true"` // Specify a label for this set of output captions. For example, "English", // "Director commentary", or "track_2". For streaming outputs, MediaConvert // passes this information into destination manifests for display on the end-viewer's // player device. For outputs in other output groups, the service ignores this // setting. LanguageDescription *string `locationName:"languageDescription" type:"string"` } // String returns the string representation func (s CaptionDescription) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionDescription) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CaptionDescription"} if s.CaptionSelectorName != nil && len(*s.CaptionSelectorName) < 1 { invalidParams.Add(aws.NewErrParamMinLen("CaptionSelectorName", 1)) } if s.DestinationSettings != nil { if err := s.DestinationSettings.Validate(); err != nil { invalidParams.AddNested("DestinationSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s CaptionDescription) MarshalFields(e protocol.FieldEncoder) error { if s.CaptionSelectorName != nil { v := *s.CaptionSelectorName metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "captionSelectorName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.CustomLanguageCode != nil { v := *s.CustomLanguageCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "customLanguageCode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.DestinationSettings != nil { v := s.DestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "destinationSettings", v, metadata) } if len(s.LanguageCode) > 0 { v := s.LanguageCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "languageCode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.LanguageDescription != nil { v := *s.LanguageDescription metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "languageDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Caption Description for preset type CaptionDescriptionPreset struct { _ struct{} `type:"structure"` // Specify the language for this captions output track. For most captions output // formats, the encoder puts this language information in the output captions // metadata. If your output captions format is DVB-Sub or Burn in, the encoder // uses this language information when automatically selecting the font script // for rendering the captions text. For all outputs, you can use an ISO 639-2 // or ISO 639-3 code. For streaming outputs, you can also use any other code // in the full RFC-5646 specification. Streaming outputs are those that are // in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft // Smooth Streaming. CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"` // Specific settings required by destination type. Note that burnin_destination_settings // are not available if the source of the caption data is Embedded or Teletext. DestinationSettings *CaptionDestinationSettings `locationName:"destinationSettings" type:"structure"` // Specify the language of this captions output track. For most captions output // formats, the encoder puts this language information in the output captions // metadata. If your output captions format is DVB-Sub or Burn in, the encoder // uses this language information to choose the font language for rendering // the captions text. LanguageCode LanguageCode `locationName:"languageCode" type:"string" enum:"true"` // Specify a label for this set of output captions. For example, "English", // "Director commentary", or "track_2". For streaming outputs, MediaConvert // passes this information into destination manifests for display on the end-viewer's // player device. For outputs in other output groups, the service ignores this // setting. LanguageDescription *string `locationName:"languageDescription" type:"string"` } // String returns the string representation func (s CaptionDescriptionPreset) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionDescriptionPreset) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CaptionDescriptionPreset"} if s.DestinationSettings != nil { if err := s.DestinationSettings.Validate(); err != nil { invalidParams.AddNested("DestinationSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s CaptionDescriptionPreset) MarshalFields(e protocol.FieldEncoder) error { if s.CustomLanguageCode != nil { v := *s.CustomLanguageCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "customLanguageCode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.DestinationSettings != nil { v := s.DestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "destinationSettings", v, metadata) } if len(s.LanguageCode) > 0 { v := s.LanguageCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "languageCode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.LanguageDescription != nil { v := *s.LanguageDescription metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "languageDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Specific settings required by destination type. Note that burnin_destination_settings // are not available if the source of the caption data is Embedded or Teletext. type CaptionDestinationSettings struct { _ struct{} `type:"structure"` // Burn-In Destination Settings. BurninDestinationSettings *BurninDestinationSettings `locationName:"burninDestinationSettings" type:"structure"` // Specify the format for this set of captions on this output. The default format // is embedded without SCTE-20. Other options are embedded with SCTE-20, burn-in, // DVB-sub, IMSC, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, // choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED) to create an output that // complies with the SCTE-43 spec. To create a non-compliant output where the // embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20). DestinationType CaptionDestinationType `locationName:"destinationType" type:"string" enum:"true"` // DVB-Sub Destination Settings DvbSubDestinationSettings *DvbSubDestinationSettings `locationName:"dvbSubDestinationSettings" type:"structure"` // Settings specific to embedded/ancillary caption outputs, including 608/708 // Channel destination number. EmbeddedDestinationSettings *EmbeddedDestinationSettings `locationName:"embeddedDestinationSettings" type:"structure"` // Settings specific to IMSC caption outputs. ImscDestinationSettings *ImscDestinationSettings `locationName:"imscDestinationSettings" type:"structure"` // Settings for SCC caption output. SccDestinationSettings *SccDestinationSettings `locationName:"sccDestinationSettings" type:"structure"` // Settings for Teletext caption output TeletextDestinationSettings *TeletextDestinationSettings `locationName:"teletextDestinationSettings" type:"structure"` // Settings specific to TTML caption outputs, including Pass style information // (TtmlStylePassthrough). TtmlDestinationSettings *TtmlDestinationSettings `locationName:"ttmlDestinationSettings" type:"structure"` } // String returns the string representation func (s CaptionDestinationSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionDestinationSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CaptionDestinationSettings"} if s.BurninDestinationSettings != nil { if err := s.BurninDestinationSettings.Validate(); err != nil { invalidParams.AddNested("BurninDestinationSettings", err.(aws.ErrInvalidParams)) } } if s.DvbSubDestinationSettings != nil { if err := s.DvbSubDestinationSettings.Validate(); err != nil { invalidParams.AddNested("DvbSubDestinationSettings", err.(aws.ErrInvalidParams)) } } if s.EmbeddedDestinationSettings != nil { if err := s.EmbeddedDestinationSettings.Validate(); err != nil { invalidParams.AddNested("EmbeddedDestinationSettings", err.(aws.ErrInvalidParams)) } } if s.TeletextDestinationSettings != nil { if err := s.TeletextDestinationSettings.Validate(); err != nil { invalidParams.AddNested("TeletextDestinationSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s CaptionDestinationSettings) MarshalFields(e protocol.FieldEncoder) error { if s.BurninDestinationSettings != nil { v := s.BurninDestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "burninDestinationSettings", v, metadata) } if len(s.DestinationType) > 0 { v := s.DestinationType metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "destinationType", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.DvbSubDestinationSettings != nil { v := s.DvbSubDestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "dvbSubDestinationSettings", v, metadata) } if s.EmbeddedDestinationSettings != nil { v := s.EmbeddedDestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "embeddedDestinationSettings", v, metadata) } if s.ImscDestinationSettings != nil { v := s.ImscDestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "imscDestinationSettings", v, metadata) } if s.SccDestinationSettings != nil { v := s.SccDestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "sccDestinationSettings", v, metadata) } if s.TeletextDestinationSettings != nil { v := s.TeletextDestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "teletextDestinationSettings", v, metadata) } if s.TtmlDestinationSettings != nil { v := s.TtmlDestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "ttmlDestinationSettings", v, metadata) } return nil } // Set up captions in your outputs by first selecting them from your input here. type CaptionSelector struct { _ struct{} `type:"structure"` // The specific language to extract from source, using the ISO 639-2 or ISO // 639-3 three-letter language code. If input is SCTE-27, complete this field // and/or PID to select the caption language to extract. If input is DVB-Sub // and output is Burn-in or SMPTE-TT, complete this field and/or PID to select // the caption language to extract. If input is DVB-Sub that is being passed // through, omit this field (and PID field); there is no way to extract a specific // language with pass-through captions. CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"` // The specific language to extract from source. If input is SCTE-27, complete // this field and/or PID to select the caption language to extract. If input // is DVB-Sub and output is Burn-in or SMPTE-TT, complete this field and/or // PID to select the caption language to extract. If input is DVB-Sub that is // being passed through, omit this field (and PID field); there is no way to // extract a specific language with pass-through captions. LanguageCode LanguageCode `locationName:"languageCode" type:"string" enum:"true"` // If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, // specify the URI of the input captions source file. If your input captions // are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings. SourceSettings *CaptionSourceSettings `locationName:"sourceSettings" type:"structure"` } // String returns the string representation func (s CaptionSelector) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionSelector) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CaptionSelector"} if s.CustomLanguageCode != nil && len(*s.CustomLanguageCode) < 3 { invalidParams.Add(aws.NewErrParamMinLen("CustomLanguageCode", 3)) } if s.SourceSettings != nil { if err := s.SourceSettings.Validate(); err != nil { invalidParams.AddNested("SourceSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s CaptionSelector) MarshalFields(e protocol.FieldEncoder) error { if s.CustomLanguageCode != nil { v := *s.CustomLanguageCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "customLanguageCode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.LanguageCode) > 0 { v := s.LanguageCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "languageCode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SourceSettings != nil { v := s.SourceSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "sourceSettings", v, metadata) } return nil } // Ignore this setting unless your input captions format is SCC. To have the // service compensate for differing frame rates between your input captions // and input video, specify the frame rate of the captions file. Specify this // value as a fraction, using the settings Framerate numerator (framerateNumerator) // and Framerate denominator (framerateDenominator). For example, you might // specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, // or 30000 / 1001 for 29.97 fps. type CaptionSourceFramerate struct { _ struct{} `type:"structure"` // Specify the denominator of the fraction that represents the frame rate for // the setting Caption source frame rate (CaptionSourceFramerate). Use this // setting along with the setting Framerate numerator (framerateNumerator). FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // Specify the numerator of the fraction that represents the frame rate for // the setting Caption source frame rate (CaptionSourceFramerate). Use this // setting along with the setting Framerate denominator (framerateDenominator). FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` } // String returns the string representation func (s CaptionSourceFramerate) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionSourceFramerate) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CaptionSourceFramerate"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateNumerator", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s CaptionSourceFramerate) MarshalFields(e protocol.FieldEncoder) error { if s.FramerateDenominator != nil { v := *s.FramerateDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateDenominator", protocol.Int64Value(v), metadata) } if s.FramerateNumerator != nil { v := *s.FramerateNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateNumerator", protocol.Int64Value(v), metadata) } return nil } // If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, // specify the URI of the input captions source file. If your input captions // are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings. type CaptionSourceSettings struct { _ struct{} `type:"structure"` // Settings for ancillary captions source. AncillarySourceSettings *AncillarySourceSettings `locationName:"ancillarySourceSettings" type:"structure"` // DVB Sub Source Settings DvbSubSourceSettings *DvbSubSourceSettings `locationName:"dvbSubSourceSettings" type:"structure"` // Settings for embedded captions Source EmbeddedSourceSettings *EmbeddedSourceSettings `locationName:"embeddedSourceSettings" type:"structure"` // If your input captions are SCC, SMI, SRT, STL, TTML, or IMSC 1.1 in an xml // file, specify the URI of the input caption source file. If your caption source // is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings. FileSourceSettings *FileSourceSettings `locationName:"fileSourceSettings" type:"structure"` // Use Source (SourceType) to identify the format of your input captions. The // service cannot auto-detect caption format. SourceType CaptionSourceType `locationName:"sourceType" type:"string" enum:"true"` // Settings specific to Teletext caption sources, including Page number. TeletextSourceSettings *TeletextSourceSettings `locationName:"teletextSourceSettings" type:"structure"` // Settings specific to caption sources that are specified by track number. // Currently, this is only IMSC captions in an IMF package. If your caption // source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead // of TrackSourceSettings. TrackSourceSettings *TrackSourceSettings `locationName:"trackSourceSettings" type:"structure"` } // String returns the string representation func (s CaptionSourceSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionSourceSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CaptionSourceSettings"} if s.AncillarySourceSettings != nil { if err := s.AncillarySourceSettings.Validate(); err != nil { invalidParams.AddNested("AncillarySourceSettings", err.(aws.ErrInvalidParams)) } } if s.DvbSubSourceSettings != nil { if err := s.DvbSubSourceSettings.Validate(); err != nil { invalidParams.AddNested("DvbSubSourceSettings", err.(aws.ErrInvalidParams)) } } if s.EmbeddedSourceSettings != nil { if err := s.EmbeddedSourceSettings.Validate(); err != nil { invalidParams.AddNested("EmbeddedSourceSettings", err.(aws.ErrInvalidParams)) } } if s.FileSourceSettings != nil { if err := s.FileSourceSettings.Validate(); err != nil { invalidParams.AddNested("FileSourceSettings", err.(aws.ErrInvalidParams)) } } if s.TeletextSourceSettings != nil { if err := s.TeletextSourceSettings.Validate(); err != nil { invalidParams.AddNested("TeletextSourceSettings", err.(aws.ErrInvalidParams)) } } if s.TrackSourceSettings != nil { if err := s.TrackSourceSettings.Validate(); err != nil { invalidParams.AddNested("TrackSourceSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s CaptionSourceSettings) MarshalFields(e protocol.FieldEncoder) error { if s.AncillarySourceSettings != nil { v := s.AncillarySourceSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "ancillarySourceSettings", v, metadata) } if s.DvbSubSourceSettings != nil { v := s.DvbSubSourceSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "dvbSubSourceSettings", v, metadata) } if s.EmbeddedSourceSettings != nil { v := s.EmbeddedSourceSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "embeddedSourceSettings", v, metadata) } if s.FileSourceSettings != nil { v := s.FileSourceSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "fileSourceSettings", v, metadata) } if len(s.SourceType) > 0 { v := s.SourceType metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sourceType", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.TeletextSourceSettings != nil { v := s.TeletextSourceSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "teletextSourceSettings", v, metadata) } if s.TrackSourceSettings != nil { v := s.TrackSourceSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "trackSourceSettings", v, metadata) } return nil } // Channel mapping (ChannelMapping) contains the group of fields that hold the // remixing value for each channel. Units are in dB. Acceptable values are within // the range from -60 (mute) through 6. A setting of 0 passes the input channel // unchanged to the output channel (no attenuation or amplification). type ChannelMapping struct { _ struct{} `type:"structure"` // List of output channels OutputChannels []OutputChannelMapping `locationName:"outputChannels" type:"list"` } // String returns the string representation func (s ChannelMapping) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s ChannelMapping) MarshalFields(e protocol.FieldEncoder) error { if s.OutputChannels != nil { v := s.OutputChannels metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "outputChannels", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } return nil } // Specify the details for each pair of HLS and DASH additional manifests that // you want the service to generate for this CMAF output group. Each pair of // manifests can reference a different subset of outputs in the group. type CmafAdditionalManifest struct { _ struct{} `type:"structure"` // Specify a name modifier that the service adds to the name of this manifest // to make it different from the file names of the other main manifests in the // output group. For example, say that the default main manifest for your HLS // group is film-name.m3u8. If you enter "-no-premium" for this setting, then // the file name the service generates for this top-level manifest is film-name-no-premium.m3u8. // For HLS output groups, specify a manifestNameModifier that is different from // the nameModifier of the output. The service uses the output name modifier // to create unique names for the individual variant manifests. ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"` // Specify the outputs that you want this additional top-level manifest to reference. SelectedOutputs []string `locationName:"selectedOutputs" type:"list"` } // String returns the string representation func (s CmafAdditionalManifest) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *CmafAdditionalManifest) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CmafAdditionalManifest"} if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 { invalidParams.Add(aws.NewErrParamMinLen("ManifestNameModifier", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s CmafAdditionalManifest) MarshalFields(e protocol.FieldEncoder) error { if s.ManifestNameModifier != nil { v := *s.ManifestNameModifier metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "manifestNameModifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.SelectedOutputs != nil { v := s.SelectedOutputs metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "selectedOutputs", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } return nil } // Settings for CMAF encryption type CmafEncryptionSettings struct { _ struct{} `type:"structure"` // This is a 128-bit, 16-byte hex value represented by a 32-character text string. // If this parameter is not set then the Initialization Vector will follow the // segment number by default. ConstantInitializationVector *string `locationName:"constantInitializationVector" min:"32" type:"string"` // Specify the encryption scheme that you want the service to use when encrypting // your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR). EncryptionMethod CmafEncryptionType `locationName:"encryptionMethod" type:"string" enum:"true"` // When you use DRM with CMAF outputs, choose whether the service writes the // 128-bit encryption initialization vector in the HLS and DASH manifests. InitializationVectorInManifest CmafInitializationVectorInManifest `locationName:"initializationVectorInManifest" type:"string" enum:"true"` // If your output group type is CMAF, use these settings when doing DRM encryption // with a SPEKE-compliant key provider. If your output group type is HLS, DASH, // or Microsoft Smooth, use the SpekeKeyProvider settings instead. SpekeKeyProvider *SpekeKeyProviderCmaf `locationName:"spekeKeyProvider" type:"structure"` // Use these settings to set up encryption with a static key provider. StaticKeyProvider *StaticKeyProvider `locationName:"staticKeyProvider" type:"structure"` // Specify whether your DRM encryption key is static or from a key provider // that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html. Type CmafKeyProviderType `locationName:"type" type:"string" enum:"true"` } // String returns the string representation func (s CmafEncryptionSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *CmafEncryptionSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CmafEncryptionSettings"} if s.ConstantInitializationVector != nil && len(*s.ConstantInitializationVector) < 32 { invalidParams.Add(aws.NewErrParamMinLen("ConstantInitializationVector", 32)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s CmafEncryptionSettings) MarshalFields(e protocol.FieldEncoder) error { if s.ConstantInitializationVector != nil { v := *s.ConstantInitializationVector metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "constantInitializationVector", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.EncryptionMethod) > 0 { v := s.EncryptionMethod metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "encryptionMethod", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.InitializationVectorInManifest) > 0 { v := s.InitializationVectorInManifest metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "initializationVectorInManifest", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SpekeKeyProvider != nil { v := s.SpekeKeyProvider metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "spekeKeyProvider", v, metadata) } if s.StaticKeyProvider != nil { v := s.StaticKeyProvider metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "staticKeyProvider", v, metadata) } if len(s.Type) > 0 { v := s.Type metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "type", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to // CMAF_GROUP_SETTINGS. Each output in a CMAF Output Group may only contain // a single video, audio, or caption output. type CmafGroupSettings struct { _ struct{} `type:"structure"` // By default, the service creates one top-level .m3u8 HLS manifest and one // top -level .mpd DASH manifest for each CMAF output group in your job. These // default manifests reference every output in the output group. To create additional // top-level manifests that reference a subset of the outputs in the output // group, specify a list of them here. For each additional manifest that you // specify, the service creates one HLS manifest and one DASH manifest. AdditionalManifests []CmafAdditionalManifest `locationName:"additionalManifests" type:"list"` // A partial URI prefix that will be put in the manifest file at the top level // BaseURL element. Can be used if streams are delivered from a different URL // than the manifest file. BaseUrl *string `locationName:"baseUrl" type:"string"` // When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client // from saving media segments for later replay. ClientCache CmafClientCache `locationName:"clientCache" type:"string" enum:"true"` // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist // generation. CodecSpecification CmafCodecSpecification `locationName:"codecSpecification" type:"string" enum:"true"` // Use Destination (Destination) to specify the S3 output location and the output // filename base. Destination accepts format identifiers. If you do not specify // the base filename in the URI, the service will use the filename of the input // file. If your job has multiple inputs, the service uses the filename of the // first input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of // destination DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"` // DRM settings. Encryption *CmafEncryptionSettings `locationName:"encryption" type:"structure"` // Length of fragments to generate (in seconds). Fragment length must be compatible // with GOP size and Framerate. Note that fragments will end on the next keyframe // after this number of seconds, so actual fragment length may be longer. When // Emit Single File is checked, the fragmentation is internal to a single output // file and it does not cause the creation of many output files as in other // output types. FragmentLength *int64 `locationName:"fragmentLength" min:"1" type:"integer"` // When set to GZIP, compresses HLS playlist. ManifestCompression CmafManifestCompression `locationName:"manifestCompression" type:"string" enum:"true"` // Indicates whether the output manifest should use floating point values for // segment duration. ManifestDurationFormat CmafManifestDurationFormat `locationName:"manifestDurationFormat" type:"string" enum:"true"` // Minimum time of initially buffered media that is needed to ensure smooth // playout. MinBufferTime *int64 `locationName:"minBufferTime" type:"integer"` // Keep this setting at the default value of 0, unless you are troubleshooting // a problem with how devices play back the end of your video asset. If you // know that player devices are hanging on the final segment of your video because // the length of your final segment is too short, use this setting to specify // a minimum final segment length, in seconds. Choose a value that is greater // than or equal to 1 and less than your segment length. When you specify a // value for this setting, the encoder will combine any final segment that is // shorter than the length that you specify with the previous segment. For example, // your segment length is 3 seconds and your final segment is .5 seconds without // a minimum final segment length; when you set the minimum final segment length // to 1, your final segment is 3.5 seconds. MinFinalSegmentLength *float64 `locationName:"minFinalSegmentLength" type:"double"` // Specify whether your DASH profile is on-demand or main. When you choose Main // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), // the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. // When you choose On-demand, you must also set the output group setting Segment // control (SegmentControl) to Single file (SINGLE_FILE). MpdProfile CmafMpdProfile `locationName:"mpdProfile" type:"string" enum:"true"` // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. SegmentControl CmafSegmentControl `locationName:"segmentControl" type:"string" enum:"true"` // Use this setting to specify the length, in seconds, of each individual CMAF // segment. This value applies to the whole package; that is, to every output // in the output group. Note that segments end on the first keyframe after this // number of seconds, so the actual segment length might be slightly longer. // If you set Segment control (CmafSegmentControl) to single file, the service // puts the content of each output in a single file that has metadata that marks // these segments. If you set it to segmented files, the service creates multiple // files for each output, each with the content of one segment. SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"` // Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag // of variant manifest. StreamInfResolution CmafStreamInfResolution `locationName:"streamInfResolution" type:"string" enum:"true"` // When set to ENABLED, a DASH MPD manifest will be generated for this output. WriteDashManifest CmafWriteDASHManifest `locationName:"writeDashManifest" type:"string" enum:"true"` // When set to ENABLED, an Apple HLS manifest will be generated for this output. WriteHlsManifest CmafWriteHLSManifest `locationName:"writeHlsManifest" type:"string" enum:"true"` // When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), // your DASH manifest shows precise segment durations. The segment duration // information appears inside the SegmentTimeline element, inside SegmentTemplate // at the Representation level. When this feature isn't enabled, the segment // durations in your DASH manifest are approximate. The segment duration information // appears in the duration attribute of the SegmentTemplate element. WriteSegmentTimelineInRepresentation CmafWriteSegmentTimelineInRepresentation `locationName:"writeSegmentTimelineInRepresentation" type:"string" enum:"true"` } // String returns the string representation func (s CmafGroupSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *CmafGroupSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CmafGroupSettings"} if s.FragmentLength != nil && *s.FragmentLength < 1 { invalidParams.Add(aws.NewErrParamMinValue("FragmentLength", 1)) } if s.SegmentLength != nil && *s.SegmentLength < 1 { invalidParams.Add(aws.NewErrParamMinValue("SegmentLength", 1)) } if s.AdditionalManifests != nil { for i, v := range s.AdditionalManifests { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(aws.ErrInvalidParams)) } } } if s.Encryption != nil { if err := s.Encryption.Validate(); err != nil { invalidParams.AddNested("Encryption", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s CmafGroupSettings) MarshalFields(e protocol.FieldEncoder) error { if s.AdditionalManifests != nil { v := s.AdditionalManifests metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "additionalManifests", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.BaseUrl != nil { v := *s.BaseUrl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "baseUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.ClientCache) > 0 { v := s.ClientCache metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "clientCache", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.CodecSpecification) > 0 { v := s.CodecSpecification metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codecSpecification", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Destination != nil { v := *s.Destination metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "destination", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.DestinationSettings != nil { v := s.DestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "destinationSettings", v, metadata) } if s.Encryption != nil { v := s.Encryption metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "encryption", v, metadata) } if s.FragmentLength != nil { v := *s.FragmentLength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fragmentLength", protocol.Int64Value(v), metadata) } if len(s.ManifestCompression) > 0 { v := s.ManifestCompression metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "manifestCompression", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.ManifestDurationFormat) > 0 { v := s.ManifestDurationFormat metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "manifestDurationFormat", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.MinBufferTime != nil { v := *s.MinBufferTime metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "minBufferTime", protocol.Int64Value(v), metadata) } if s.MinFinalSegmentLength != nil { v := *s.MinFinalSegmentLength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "minFinalSegmentLength", protocol.Float64Value(v), metadata) } if len(s.MpdProfile) > 0 { v := s.MpdProfile metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "mpdProfile", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SegmentControl) > 0 { v := s.SegmentControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "segmentControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SegmentLength != nil { v := *s.SegmentLength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "segmentLength", protocol.Int64Value(v), metadata) } if len(s.StreamInfResolution) > 0 { v := s.StreamInfResolution metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "streamInfResolution", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.WriteDashManifest) > 0 { v := s.WriteDashManifest metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "writeDashManifest", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.WriteHlsManifest) > 0 { v := s.WriteHlsManifest metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "writeHlsManifest", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.WriteSegmentTimelineInRepresentation) > 0 { v := s.WriteSegmentTimelineInRepresentation metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "writeSegmentTimelineInRepresentation", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Settings for MP4 segments in CMAF type CmfcSettings struct { _ struct{} `type:"structure"` // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting // SCC XML (sccXml). Scte35Esam CmfcScte35Esam `locationName:"scte35Esam" type:"string" enum:"true"` // Ignore this setting unless you have SCTE-35 markers in your input video file. // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear // in your input to also appear in this output. Choose None (NONE) if you don't // want those SCTE-35 markers in this output. Scte35Source CmfcScte35Source `locationName:"scte35Source" type:"string" enum:"true"` } // String returns the string representation func (s CmfcSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s CmfcSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.Scte35Esam) > 0 { v := s.Scte35Esam metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "scte35Esam", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Scte35Source) > 0 { v := s.Scte35Source metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "scte35Source", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Settings for color correction. type ColorCorrector struct { _ struct{} `type:"structure"` // Brightness level. Brightness *int64 `locationName:"brightness" min:"1" type:"integer"` // Specify the color space you want for this output. The service supports conversion // between HDR formats, between SDR formats, from SDR to HDR, and from HDR to // SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted // video has an HDR format, but visually appears the same as an unconverted // output. HDR to SDR conversion uses Elemental tone mapping technology to approximate // the outcome of manually regrading from HDR to SDR. ColorSpaceConversion ColorSpaceConversion `locationName:"colorSpaceConversion" type:"string" enum:"true"` // Contrast level. Contrast *int64 `locationName:"contrast" min:"1" type:"integer"` // Use these settings when you convert to the HDR 10 color space. Specify the // SMPTE ST 2086 Mastering Display Color Volume static metadata that you want // signaled in the output. These values don't affect the pixel values that are // encoded in the video stream. They are intended to help the downstream video // player display content in a way that reflects the intentions of the the content // creator. When you set Color space conversion (ColorSpaceConversion) to HDR // 10 (FORCE_HDR10), these settings are required. You must set values for Max // frame average light level (maxFrameAverageLightLevel) and Max content light // level (maxContentLightLevel); these settings don't have a default value. // The default values for the other HDR 10 metadata settings are defined by // the P3D65 color space. For more information about MediaConvert HDR jobs, // see https://docs.aws.amazon.com/console/mediaconvert/hdr. Hdr10Metadata *Hdr10Metadata `locationName:"hdr10Metadata" type:"structure"` // Hue in degrees. Hue *int64 `locationName:"hue" type:"integer"` // Saturation level. Saturation *int64 `locationName:"saturation" min:"1" type:"integer"` } // String returns the string representation func (s ColorCorrector) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *ColorCorrector) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "ColorCorrector"} if s.Brightness != nil && *s.Brightness < 1 { invalidParams.Add(aws.NewErrParamMinValue("Brightness", 1)) } if s.Contrast != nil && *s.Contrast < 1 { invalidParams.Add(aws.NewErrParamMinValue("Contrast", 1)) } if s.Hue != nil && *s.Hue < -180 { invalidParams.Add(aws.NewErrParamMinValue("Hue", -180)) } if s.Saturation != nil && *s.Saturation < 1 { invalidParams.Add(aws.NewErrParamMinValue("Saturation", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s ColorCorrector) MarshalFields(e protocol.FieldEncoder) error { if s.Brightness != nil { v := *s.Brightness metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "brightness", protocol.Int64Value(v), metadata) } if len(s.ColorSpaceConversion) > 0 { v := s.ColorSpaceConversion metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "colorSpaceConversion", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Contrast != nil { v := *s.Contrast metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "contrast", protocol.Int64Value(v), metadata) } if s.Hdr10Metadata != nil { v := s.Hdr10Metadata metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "hdr10Metadata", v, metadata) } if s.Hue != nil { v := *s.Hue metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "hue", protocol.Int64Value(v), metadata) } if s.Saturation != nil { v := *s.Saturation metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "saturation", protocol.Int64Value(v), metadata) } return nil } // Container specific settings. type ContainerSettings struct { _ struct{} `type:"structure"` // Settings for MP4 segments in CMAF CmfcSettings *CmfcSettings `locationName:"cmfcSettings" type:"structure"` // Container for this output. Some containers require a container settings object. // If not specified, the default object will be created. Container ContainerType `locationName:"container" type:"string" enum:"true"` // Settings for F4v container F4vSettings *F4vSettings `locationName:"f4vSettings" type:"structure"` // MPEG-2 TS container settings. These apply to outputs in a File output group // when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). // In these assets, data is organized by the program map table (PMT). Each transport // stream program contains subsets of data, including audio, video, and metadata. // Each of these subsets of data has a numerical label called a packet identifier // (PID). Each transport stream program corresponds to one MediaConvert output. // The PMT lists the types of data in a program along with their PID. Downstream // systems and players use the program map table to look up the PID for each // type of data it accesses and then uses the PIDs to locate specific data within // the asset. M2tsSettings *M2tsSettings `locationName:"m2tsSettings" type:"structure"` // Settings for TS segments in HLS M3u8Settings *M3u8Settings `locationName:"m3u8Settings" type:"structure"` // Settings for MOV Container. MovSettings *MovSettings `locationName:"movSettings" type:"structure"` // Settings for MP4 container. You can create audio-only AAC outputs with this // container. Mp4Settings *Mp4Settings `locationName:"mp4Settings" type:"structure"` // Settings for MP4 segments in DASH MpdSettings *MpdSettings `locationName:"mpdSettings" type:"structure"` // MXF settings MxfSettings *MxfSettings `locationName:"mxfSettings" type:"structure"` } // String returns the string representation func (s ContainerSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *ContainerSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "ContainerSettings"} if s.M2tsSettings != nil { if err := s.M2tsSettings.Validate(); err != nil { invalidParams.AddNested("M2tsSettings", err.(aws.ErrInvalidParams)) } } if s.M3u8Settings != nil { if err := s.M3u8Settings.Validate(); err != nil { invalidParams.AddNested("M3u8Settings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s ContainerSettings) MarshalFields(e protocol.FieldEncoder) error { if s.CmfcSettings != nil { v := s.CmfcSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "cmfcSettings", v, metadata) } if len(s.Container) > 0 { v := s.Container metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "container", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.F4vSettings != nil { v := s.F4vSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "f4vSettings", v, metadata) } if s.M2tsSettings != nil { v := s.M2tsSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "m2tsSettings", v, metadata) } if s.M3u8Settings != nil { v := s.M3u8Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "m3u8Settings", v, metadata) } if s.MovSettings != nil { v := s.MovSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "movSettings", v, metadata) } if s.Mp4Settings != nil { v := s.Mp4Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "mp4Settings", v, metadata) } if s.MpdSettings != nil { v := s.MpdSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "mpdSettings", v, metadata) } if s.MxfSettings != nil { v := s.MxfSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "mxfSettings", v, metadata) } return nil } // Specify the details for each additional DASH manifest that you want the service // to generate for this output group. Each manifest can reference a different // subset of outputs in the group. type DashAdditionalManifest struct { _ struct{} `type:"structure"` // Specify a name modifier that the service adds to the name of this manifest // to make it different from the file names of the other main manifests in the // output group. For example, say that the default main manifest for your DASH // group is film-name.mpd. If you enter "-no-premium" for this setting, then // the file name the service generates for this top-level manifest is film-name-no-premium.mpd. ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"` // Specify the outputs that you want this additional top-level manifest to reference. SelectedOutputs []string `locationName:"selectedOutputs" type:"list"` } // String returns the string representation func (s DashAdditionalManifest) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *DashAdditionalManifest) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "DashAdditionalManifest"} if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 { invalidParams.Add(aws.NewErrParamMinLen("ManifestNameModifier", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DashAdditionalManifest) MarshalFields(e protocol.FieldEncoder) error { if s.ManifestNameModifier != nil { v := *s.ManifestNameModifier metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "manifestNameModifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.SelectedOutputs != nil { v := s.SelectedOutputs metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "selectedOutputs", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } return nil } // Specifies DRM settings for DASH outputs. type DashIsoEncryptionSettings struct { _ struct{} `type:"structure"` // This setting can improve the compatibility of your output with video players // on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. // Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback // on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). // If you choose Unencrypted SEI, for that output, the service will exclude // the access unit delimiter and will leave the SEI NAL units unencrypted. PlaybackDeviceCompatibility DashIsoPlaybackDeviceCompatibility `locationName:"playbackDeviceCompatibility" type:"string" enum:"true"` // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings // when doing DRM encryption with a SPEKE-compliant key provider. If your output // group type is CMAF, use the SpekeKeyProviderCmaf settings instead. SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"` } // String returns the string representation func (s DashIsoEncryptionSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DashIsoEncryptionSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.PlaybackDeviceCompatibility) > 0 { v := s.PlaybackDeviceCompatibility metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "playbackDeviceCompatibility", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SpekeKeyProvider != nil { v := s.SpekeKeyProvider metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "spekeKeyProvider", v, metadata) } return nil } // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to // DASH_ISO_GROUP_SETTINGS. type DashIsoGroupSettings struct { _ struct{} `type:"structure"` // By default, the service creates one .mpd DASH manifest for each DASH ISO // output group in your job. This default manifest references every output in // the output group. To create additional DASH manifests that reference a subset // of the outputs in the output group, specify a list of them here. AdditionalManifests []DashAdditionalManifest `locationName:"additionalManifests" type:"list"` // A partial URI prefix that will be put in the manifest (.mpd) file at the // top level BaseURL element. Can be used if streams are delivered from a different // URL than the manifest file. BaseUrl *string `locationName:"baseUrl" type:"string"` // Use Destination (Destination) to specify the S3 output location and the output // filename base. Destination accepts format identifiers. If you do not specify // the base filename in the URI, the service will use the filename of the input // file. If your job has multiple inputs, the service uses the filename of the // first input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of // destination DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"` // DRM settings. Encryption *DashIsoEncryptionSettings `locationName:"encryption" type:"structure"` // Length of fragments to generate (in seconds). Fragment length must be compatible // with GOP size and Framerate. Note that fragments will end on the next keyframe // after this number of seconds, so actual fragment length may be longer. When // Emit Single File is checked, the fragmentation is internal to a single output // file and it does not cause the creation of many output files as in other // output types. FragmentLength *int64 `locationName:"fragmentLength" min:"1" type:"integer"` // Supports HbbTV specification as indicated HbbtvCompliance DashIsoHbbtvCompliance `locationName:"hbbtvCompliance" type:"string" enum:"true"` // Minimum time of initially buffered media that is needed to ensure smooth // playout. MinBufferTime *int64 `locationName:"minBufferTime" type:"integer"` // Specify whether your DASH profile is on-demand or main. When you choose Main // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), // the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. // When you choose On-demand, you must also set the output group setting Segment // control (SegmentControl) to Single file (SINGLE_FILE). MpdProfile DashIsoMpdProfile `locationName:"mpdProfile" type:"string" enum:"true"` // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. SegmentControl DashIsoSegmentControl `locationName:"segmentControl" type:"string" enum:"true"` // Length of mpd segments to create (in seconds). Note that segments will end // on the next keyframe after this number of seconds, so actual segment length // may be longer. When Emit Single File is checked, the segmentation is internal // to a single output file and it does not cause the creation of many output // files as in other output types. SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"` // If you get an HTTP error in the 400 range when you play back your DASH output, // enable this setting and run your transcoding job again. When you enable this // setting, the service writes precise segment durations in the DASH manifest. // The segment duration information appears inside the SegmentTimeline element, // inside SegmentTemplate at the Representation level. When you don't enable // this setting, the service writes approximate segment durations in your DASH // manifest. WriteSegmentTimelineInRepresentation DashIsoWriteSegmentTimelineInRepresentation `locationName:"writeSegmentTimelineInRepresentation" type:"string" enum:"true"` } // String returns the string representation func (s DashIsoGroupSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *DashIsoGroupSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "DashIsoGroupSettings"} if s.FragmentLength != nil && *s.FragmentLength < 1 { invalidParams.Add(aws.NewErrParamMinValue("FragmentLength", 1)) } if s.SegmentLength != nil && *s.SegmentLength < 1 { invalidParams.Add(aws.NewErrParamMinValue("SegmentLength", 1)) } if s.AdditionalManifests != nil { for i, v := range s.AdditionalManifests { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(aws.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DashIsoGroupSettings) MarshalFields(e protocol.FieldEncoder) error { if s.AdditionalManifests != nil { v := s.AdditionalManifests metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "additionalManifests", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.BaseUrl != nil { v := *s.BaseUrl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "baseUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Destination != nil { v := *s.Destination metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "destination", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.DestinationSettings != nil { v := s.DestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "destinationSettings", v, metadata) } if s.Encryption != nil { v := s.Encryption metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "encryption", v, metadata) } if s.FragmentLength != nil { v := *s.FragmentLength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fragmentLength", protocol.Int64Value(v), metadata) } if len(s.HbbtvCompliance) > 0 { v := s.HbbtvCompliance metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "hbbtvCompliance", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.MinBufferTime != nil { v := *s.MinBufferTime metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "minBufferTime", protocol.Int64Value(v), metadata) } if len(s.MpdProfile) > 0 { v := s.MpdProfile metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "mpdProfile", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SegmentControl) > 0 { v := s.SegmentControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "segmentControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SegmentLength != nil { v := *s.SegmentLength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "segmentLength", protocol.Int64Value(v), metadata) } if len(s.WriteSegmentTimelineInRepresentation) > 0 { v := s.WriteSegmentTimelineInRepresentation metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "writeSegmentTimelineInRepresentation", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Settings for deinterlacer type Deinterlacer struct { _ struct{} `type:"structure"` // Only applies when you set Deinterlacer (DeinterlaceMode) to Deinterlace (DEINTERLACE) // or Adaptive (ADAPTIVE). Motion adaptive interpolate (INTERPOLATE) produces // sharper pictures, while blend (BLEND) produces smoother motion. Use (INTERPOLATE_TICKER) // OR (BLEND_TICKER) if your source file includes a ticker, such as a scrolling // headline at the bottom of the frame. Algorithm DeinterlaceAlgorithm `locationName:"algorithm" type:"string" enum:"true"` // - When set to NORMAL (default), the deinterlacer does not convert frames // that are tagged in metadata as progressive. It will only convert those that // are tagged as some other type. - When set to FORCE_ALL_FRAMES, the deinterlacer // converts every frame to progressive - even those that are already tagged // as progressive. Turn Force mode on only if there is a good chance that the // metadata has tagged frames as progressive when they are not progressive. // Do not turn on otherwise; processing frames that are already progressive // into progressive will probably result in lower quality video. Control DeinterlacerControl `locationName:"control" type:"string" enum:"true"` // Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. // Default is Deinterlace. - Deinterlace converts interlaced to progressive. // - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. // - Adaptive auto-detects and converts to progressive. Mode DeinterlacerMode `locationName:"mode" type:"string" enum:"true"` } // String returns the string representation func (s Deinterlacer) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Deinterlacer) MarshalFields(e protocol.FieldEncoder) error { if len(s.Algorithm) > 0 { v := s.Algorithm metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "algorithm", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Control) > 0 { v := s.Control metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "control", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Mode) > 0 { v := s.Mode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "mode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Settings associated with the destination. Will vary based on the type of // destination type DestinationSettings struct { _ struct{} `type:"structure"` // Settings associated with S3 destination S3Settings *S3DestinationSettings `locationName:"s3Settings" type:"structure"` } // String returns the string representation func (s DestinationSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DestinationSettings) MarshalFields(e protocol.FieldEncoder) error { if s.S3Settings != nil { v := s.S3Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "s3Settings", v, metadata) } return nil } // Settings for Dolby Vision type DolbyVision struct { _ struct{} `type:"structure"` // Use these settings when you set DolbyVisionLevel6Mode to SPECIFY to override // the MaxCLL and MaxFALL values in your input with new values. L6Metadata *DolbyVisionLevel6Metadata `locationName:"l6Metadata" type:"structure"` // Use Dolby Vision Mode to choose how the service will handle Dolby Vision // MaxCLL and MaxFALL properies. L6Mode DolbyVisionLevel6Mode `locationName:"l6Mode" type:"string" enum:"true"` // In the current MediaConvert implementation, the Dolby Vision profile is always // 5 (PROFILE_5). Therefore, all of your inputs must contain Dolby Vision frame // interleaved data. Profile DolbyVisionProfile `locationName:"profile" type:"string" enum:"true"` } // String returns the string representation func (s DolbyVision) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DolbyVision) MarshalFields(e protocol.FieldEncoder) error { if s.L6Metadata != nil { v := s.L6Metadata metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "l6Metadata", v, metadata) } if len(s.L6Mode) > 0 { v := s.L6Mode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "l6Mode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Profile) > 0 { v := s.Profile metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "profile", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Use these settings when you set DolbyVisionLevel6Mode to SPECIFY to override // the MaxCLL and MaxFALL values in your input with new values. type DolbyVisionLevel6Metadata struct { _ struct{} `type:"structure"` // Maximum Content Light Level. Static HDR metadata that corresponds to the // brightest pixel in the entire stream. Measured in nits. MaxCll *int64 `locationName:"maxCll" type:"integer"` // Maximum Frame-Average Light Level. Static HDR metadata that corresponds to // the highest frame-average brightness in the entire stream. Measured in nits. MaxFall *int64 `locationName:"maxFall" type:"integer"` } // String returns the string representation func (s DolbyVisionLevel6Metadata) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DolbyVisionLevel6Metadata) MarshalFields(e protocol.FieldEncoder) error { if s.MaxCll != nil { v := *s.MaxCll metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxCll", protocol.Int64Value(v), metadata) } if s.MaxFall != nil { v := *s.MaxFall metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxFall", protocol.Int64Value(v), metadata) } return nil } // Inserts DVB Network Information Table (NIT) at the specified table repetition // interval. type DvbNitSettings struct { _ struct{} `type:"structure"` // The numeric value placed in the Network Information Table (NIT). NetworkId *int64 `locationName:"networkId" type:"integer"` // The network name text placed in the network_name_descriptor inside the Network // Information Table. Maximum length is 256 characters. NetworkName *string `locationName:"networkName" min:"1" type:"string"` // The number of milliseconds between instances of this table in the output // transport stream. NitInterval *int64 `locationName:"nitInterval" min:"25" type:"integer"` } // String returns the string representation func (s DvbNitSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *DvbNitSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "DvbNitSettings"} if s.NetworkName != nil && len(*s.NetworkName) < 1 { invalidParams.Add(aws.NewErrParamMinLen("NetworkName", 1)) } if s.NitInterval != nil && *s.NitInterval < 25 { invalidParams.Add(aws.NewErrParamMinValue("NitInterval", 25)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DvbNitSettings) MarshalFields(e protocol.FieldEncoder) error { if s.NetworkId != nil { v := *s.NetworkId metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "networkId", protocol.Int64Value(v), metadata) } if s.NetworkName != nil { v := *s.NetworkName metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "networkName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.NitInterval != nil { v := *s.NitInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "nitInterval", protocol.Int64Value(v), metadata) } return nil } // Inserts DVB Service Description Table (NIT) at the specified table repetition // interval. type DvbSdtSettings struct { _ struct{} `type:"structure"` // Selects method of inserting SDT information into output stream. "Follow input // SDT" copies SDT information from input stream to output stream. "Follow input // SDT if present" copies SDT information from input stream to output stream // if SDT information is present in the input, otherwise it will fall back on // the user-defined values. Enter "SDT Manually" means user will enter the SDT // information. "No SDT" means output stream will not contain SDT information. OutputSdt OutputSdt `locationName:"outputSdt" type:"string" enum:"true"` // The number of milliseconds between instances of this table in the output // transport stream. SdtInterval *int64 `locationName:"sdtInterval" min:"25" type:"integer"` // The service name placed in the service_descriptor in the Service Description // Table. Maximum length is 256 characters. ServiceName *string `locationName:"serviceName" min:"1" type:"string"` // The service provider name placed in the service_descriptor in the Service // Description Table. Maximum length is 256 characters. ServiceProviderName *string `locationName:"serviceProviderName" min:"1" type:"string"` } // String returns the string representation func (s DvbSdtSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *DvbSdtSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "DvbSdtSettings"} if s.SdtInterval != nil && *s.SdtInterval < 25 { invalidParams.Add(aws.NewErrParamMinValue("SdtInterval", 25)) } if s.ServiceName != nil && len(*s.ServiceName) < 1 { invalidParams.Add(aws.NewErrParamMinLen("ServiceName", 1)) } if s.ServiceProviderName != nil && len(*s.ServiceProviderName) < 1 { invalidParams.Add(aws.NewErrParamMinLen("ServiceProviderName", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DvbSdtSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.OutputSdt) > 0 { v := s.OutputSdt metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "outputSdt", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SdtInterval != nil { v := *s.SdtInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sdtInterval", protocol.Int64Value(v), metadata) } if s.ServiceName != nil { v := *s.ServiceName metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "serviceName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.ServiceProviderName != nil { v := *s.ServiceProviderName metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "serviceProviderName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // DVB-Sub Destination Settings type DvbSubDestinationSettings struct { _ struct{} `type:"structure"` // If no explicit x_position or y_position is provided, setting alignment to // centered will place the captions at the bottom center of the output. Similarly, // setting a left alignment will align captions to the bottom left of the output. // If x and y positions are given in conjunction with the alignment parameter, // the font will be justified (either left or centered) relative to those coordinates. // This option is not valid for source captions that are STL, 608/embedded or // teletext. These source settings are already pre-defined by the caption stream. // All burn-in and DVB-Sub font settings must match. Alignment DvbSubtitleAlignment `locationName:"alignment" type:"string" enum:"true"` // Specifies the color of the rectangle behind the captions.All burn-in and // DVB-Sub font settings must match. BackgroundColor DvbSubtitleBackgroundColor `locationName:"backgroundColor" type:"string" enum:"true"` // Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. // Leaving this parameter blank is equivalent to setting it to 0 (transparent). // All burn-in and DVB-Sub font settings must match. BackgroundOpacity *int64 `locationName:"backgroundOpacity" type:"integer"` // Specifies the color of the burned-in captions. This option is not valid for // source captions that are STL, 608/embedded or teletext. These source settings // are already pre-defined by the caption stream. All burn-in and DVB-Sub font // settings must match. FontColor DvbSubtitleFontColor `locationName:"fontColor" type:"string" enum:"true"` // Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent.All // burn-in and DVB-Sub font settings must match. FontOpacity *int64 `locationName:"fontOpacity" type:"integer"` // Font resolution in DPI (dots per inch); default is 96 dpi.All burn-in and // DVB-Sub font settings must match. FontResolution *int64 `locationName:"fontResolution" min:"96" type:"integer"` // Provide the font script, using an ISO 15924 script code, if the LanguageCode // is not sufficient for determining the script type. Where LanguageCode or // CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset. This is // used to help determine the appropriate font for rendering DVB-Sub captions. FontScript FontScript `locationName:"fontScript" type:"string" enum:"true"` // A positive integer indicates the exact font size in points. Set to 0 for // automatic font size selection. All burn-in and DVB-Sub font settings must // match. FontSize *int64 `locationName:"fontSize" type:"integer"` // Specifies font outline color. This option is not valid for source captions // that are either 608/embedded or teletext. These source settings are already // pre-defined by the caption stream. All burn-in and DVB-Sub font settings // must match. OutlineColor DvbSubtitleOutlineColor `locationName:"outlineColor" type:"string" enum:"true"` // Specifies font outline size in pixels. This option is not valid for source // captions that are either 608/embedded or teletext. These source settings // are already pre-defined by the caption stream. All burn-in and DVB-Sub font // settings must match. OutlineSize *int64 `locationName:"outlineSize" type:"integer"` // Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub // font settings must match. ShadowColor DvbSubtitleShadowColor `locationName:"shadowColor" type:"string" enum:"true"` // Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving // this parameter blank is equivalent to setting it to 0 (transparent). All // burn-in and DVB-Sub font settings must match. ShadowOpacity *int64 `locationName:"shadowOpacity" type:"integer"` // Specifies the horizontal offset of the shadow relative to the captions in // pixels. A value of -2 would result in a shadow offset 2 pixels to the left. // All burn-in and DVB-Sub font settings must match. ShadowXOffset *int64 `locationName:"shadowXOffset" type:"integer"` // Specifies the vertical offset of the shadow relative to the captions in pixels. // A value of -2 would result in a shadow offset 2 pixels above the text. All // burn-in and DVB-Sub font settings must match. ShadowYOffset *int64 `locationName:"shadowYOffset" type:"integer"` // Specify whether your DVB subtitles are standard or for hearing impaired. // Choose hearing impaired if your subtitles include audio descriptions and // dialogue. Choose standard if your subtitles include only dialogue. SubtitlingType DvbSubtitlingType `locationName:"subtitlingType" type:"string" enum:"true"` // Only applies to jobs with input captions in Teletext or STL formats. Specify // whether the spacing between letters in your captions is set by the captions // grid or varies depending on letter width. Choose fixed grid to conform to // the spacing specified in the captions file more accurately. Choose proportional // to make the text easier to read if the captions are closed caption. TeletextSpacing DvbSubtitleTeletextSpacing `locationName:"teletextSpacing" type:"string" enum:"true"` // Specifies the horizontal position of the caption relative to the left side // of the output in pixels. A value of 10 would result in the captions starting // 10 pixels from the left of the output. If no explicit x_position is provided, // the horizontal caption position will be determined by the alignment parameter. // This option is not valid for source captions that are STL, 608/embedded or // teletext. These source settings are already pre-defined by the caption stream. // All burn-in and DVB-Sub font settings must match. XPosition *int64 `locationName:"xPosition" type:"integer"` // Specifies the vertical position of the caption relative to the top of the // output in pixels. A value of 10 would result in the captions starting 10 // pixels from the top of the output. If no explicit y_position is provided, // the caption will be positioned towards the bottom of the output. This option // is not valid for source captions that are STL, 608/embedded or teletext. // These source settings are already pre-defined by the caption stream. All // burn-in and DVB-Sub font settings must match. YPosition *int64 `locationName:"yPosition" type:"integer"` } // String returns the string representation func (s DvbSubDestinationSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *DvbSubDestinationSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "DvbSubDestinationSettings"} if s.FontResolution != nil && *s.FontResolution < 96 { invalidParams.Add(aws.NewErrParamMinValue("FontResolution", 96)) } if s.ShadowXOffset != nil && *s.ShadowXOffset < -2.147483648e+09 { invalidParams.Add(aws.NewErrParamMinValue("ShadowXOffset", -2.147483648e+09)) } if s.ShadowYOffset != nil && *s.ShadowYOffset < -2.147483648e+09 { invalidParams.Add(aws.NewErrParamMinValue("ShadowYOffset", -2.147483648e+09)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DvbSubDestinationSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.Alignment) > 0 { v := s.Alignment metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "alignment", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.BackgroundColor) > 0 { v := s.BackgroundColor metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "backgroundColor", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.BackgroundOpacity != nil { v := *s.BackgroundOpacity metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "backgroundOpacity", protocol.Int64Value(v), metadata) } if len(s.FontColor) > 0 { v := s.FontColor metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fontColor", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FontOpacity != nil { v := *s.FontOpacity metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fontOpacity", protocol.Int64Value(v), metadata) } if s.FontResolution != nil { v := *s.FontResolution metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fontResolution", protocol.Int64Value(v), metadata) } if len(s.FontScript) > 0 { v := s.FontScript metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fontScript", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FontSize != nil { v := *s.FontSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fontSize", protocol.Int64Value(v), metadata) } if len(s.OutlineColor) > 0 { v := s.OutlineColor metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "outlineColor", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.OutlineSize != nil { v := *s.OutlineSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "outlineSize", protocol.Int64Value(v), metadata) } if len(s.ShadowColor) > 0 { v := s.ShadowColor metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "shadowColor", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ShadowOpacity != nil { v := *s.ShadowOpacity metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "shadowOpacity", protocol.Int64Value(v), metadata) } if s.ShadowXOffset != nil { v := *s.ShadowXOffset metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "shadowXOffset", protocol.Int64Value(v), metadata) } if s.ShadowYOffset != nil { v := *s.ShadowYOffset metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "shadowYOffset", protocol.Int64Value(v), metadata) } if len(s.SubtitlingType) > 0 { v := s.SubtitlingType metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "subtitlingType", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.TeletextSpacing) > 0 { v := s.TeletextSpacing metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "teletextSpacing", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.XPosition != nil { v := *s.XPosition metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "xPosition", protocol.Int64Value(v), metadata) } if s.YPosition != nil { v := *s.YPosition metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "yPosition", protocol.Int64Value(v), metadata) } return nil } // DVB Sub Source Settings type DvbSubSourceSettings struct { _ struct{} `type:"structure"` // When using DVB-Sub with Burn-In or SMPTE-TT, use this PID for the source // content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through, // regardless of selectors. Pid *int64 `locationName:"pid" min:"1" type:"integer"` } // String returns the string representation func (s DvbSubSourceSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *DvbSubSourceSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "DvbSubSourceSettings"} if s.Pid != nil && *s.Pid < 1 { invalidParams.Add(aws.NewErrParamMinValue("Pid", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DvbSubSourceSettings) MarshalFields(e protocol.FieldEncoder) error { if s.Pid != nil { v := *s.Pid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pid", protocol.Int64Value(v), metadata) } return nil } // Inserts DVB Time and Date Table (TDT) at the specified table repetition interval. type DvbTdtSettings struct { _ struct{} `type:"structure"` // The number of milliseconds between instances of this table in the output // transport stream. TdtInterval *int64 `locationName:"tdtInterval" min:"1000" type:"integer"` } // String returns the string representation func (s DvbTdtSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *DvbTdtSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "DvbTdtSettings"} if s.TdtInterval != nil && *s.TdtInterval < 1000 { invalidParams.Add(aws.NewErrParamMinValue("TdtInterval", 1000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DvbTdtSettings) MarshalFields(e protocol.FieldEncoder) error { if s.TdtInterval != nil { v := *s.TdtInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "tdtInterval", protocol.Int64Value(v), metadata) } return nil } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value EAC3_ATMOS. type Eac3AtmosSettings struct { _ struct{} `type:"structure"` // Specify the average bitrate in bits per second.Valid values: 384k, 448k, // 640k, 768k Bitrate *int64 `locationName:"bitrate" min:"384000" type:"integer"` // Specify the bitstream mode for the E-AC-3 stream that the encoder emits. // For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex // E). BitstreamMode Eac3AtmosBitstreamMode `locationName:"bitstreamMode" type:"string" enum:"true"` // The coding mode for Dolby Digital Plus JOC (Atmos) is always 9.1.6 (CODING_MODE_9_1_6). CodingMode Eac3AtmosCodingMode `locationName:"codingMode" type:"string" enum:"true"` // Enable Dolby Dialogue Intelligence to adjust loudness based on dialogue analysis. DialogueIntelligence Eac3AtmosDialogueIntelligence `locationName:"dialogueIntelligence" type:"string" enum:"true"` // Specify the absolute peak level for a signal with dynamic range compression. DynamicRangeCompressionLine Eac3AtmosDynamicRangeCompressionLine `locationName:"dynamicRangeCompressionLine" type:"string" enum:"true"` // Specify how the service limits the audio dynamic range when compressing the // audio. DynamicRangeCompressionRf Eac3AtmosDynamicRangeCompressionRf `locationName:"dynamicRangeCompressionRf" type:"string" enum:"true"` // Specify a value for the following Dolby Atmos setting: Left only/Right only // center mix(Lo/Ro center). MediaConvert uses this value for downmixing. How // the service uses thisvalue depends on the value that you choose for Stereo // downmix (Eac3AtmosStereoDownmix).Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, // -4.5, and -6.0. LoRoCenterMixLevel *float64 `locationName:"loRoCenterMixLevel" type:"double"` // Specify a value for the following Dolby Atmos setting: Left only/Right only // (Lo/Ro surround). MediaConvert uses this value for downmixing. How the service // uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). // Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. LoRoSurroundMixLevel *float64 `locationName:"loRoSurroundMixLevel" type:"double"` // Specify a value for the following Dolby Atmos setting: Left total/Right total // center mix (Lt/Rt center). MediaConvert uses this value for downmixing. How // the service uses this value depends on the value that you choose for Stereo // downmix (Eac3AtmosStereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, // -4.5, and -6.0. LtRtCenterMixLevel *float64 `locationName:"ltRtCenterMixLevel" type:"double"` // Specify a value for the following Dolby Atmos setting: Left total/Right total // surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. // How the service uses this value depends on the value that you choose for // Stereo downmix (Eac3AtmosStereoDownmix). Valid values: -1.5, -3.0, -4.5, // -6.0, and -60. The value -60 mutes the channel. LtRtSurroundMixLevel *float64 `locationName:"ltRtSurroundMixLevel" type:"double"` // Choose how the service meters the loudness of your audio. MeteringMode Eac3AtmosMeteringMode `locationName:"meteringMode" type:"string" enum:"true"` // This value is always 48000. It represents the sample rate in Hz. SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"` // Specify the percentage of audio content that must be speech before the encoder // uses the measured speech loudness as the overall program loudness. SpeechThreshold *int64 `locationName:"speechThreshold" min:"1" type:"integer"` // Choose how the service does stereo downmixing. StereoDownmix Eac3AtmosStereoDownmix `locationName:"stereoDownmix" type:"string" enum:"true"` // Specify whether your input audio has an additional center rear surround channel // matrix encoded into your left and right surround channels. SurroundExMode Eac3AtmosSurroundExMode `locationName:"surroundExMode" type:"string" enum:"true"` } // String returns the string representation func (s Eac3AtmosSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Eac3AtmosSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Eac3AtmosSettings"} if s.Bitrate != nil && *s.Bitrate < 384000 { invalidParams.Add(aws.NewErrParamMinValue("Bitrate", 384000)) } if s.SampleRate != nil && *s.SampleRate < 48000 { invalidParams.Add(aws.NewErrParamMinValue("SampleRate", 48000)) } if s.SpeechThreshold != nil && *s.SpeechThreshold < 1 { invalidParams.Add(aws.NewErrParamMinValue("SpeechThreshold", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Eac3AtmosSettings) MarshalFields(e protocol.FieldEncoder) error { if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if len(s.BitstreamMode) > 0 { v := s.BitstreamMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitstreamMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.CodingMode) > 0 { v := s.CodingMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codingMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.DialogueIntelligence) > 0 { v := s.DialogueIntelligence metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dialogueIntelligence", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.DynamicRangeCompressionLine) > 0 { v := s.DynamicRangeCompressionLine metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dynamicRangeCompressionLine", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.DynamicRangeCompressionRf) > 0 { v := s.DynamicRangeCompressionRf metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dynamicRangeCompressionRf", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.LoRoCenterMixLevel != nil { v := *s.LoRoCenterMixLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "loRoCenterMixLevel", protocol.Float64Value(v), metadata) } if s.LoRoSurroundMixLevel != nil { v := *s.LoRoSurroundMixLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "loRoSurroundMixLevel", protocol.Float64Value(v), metadata) } if s.LtRtCenterMixLevel != nil { v := *s.LtRtCenterMixLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "ltRtCenterMixLevel", protocol.Float64Value(v), metadata) } if s.LtRtSurroundMixLevel != nil { v := *s.LtRtSurroundMixLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "ltRtSurroundMixLevel", protocol.Float64Value(v), metadata) } if len(s.MeteringMode) > 0 { v := s.MeteringMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "meteringMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SampleRate != nil { v := *s.SampleRate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sampleRate", protocol.Int64Value(v), metadata) } if s.SpeechThreshold != nil { v := *s.SpeechThreshold metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "speechThreshold", protocol.Int64Value(v), metadata) } if len(s.StereoDownmix) > 0 { v := s.StereoDownmix metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "stereoDownmix", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SurroundExMode) > 0 { v := s.SurroundExMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "surroundExMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value EAC3. type Eac3Settings struct { _ struct{} `type:"structure"` // If set to ATTENUATE_3_DB, applies a 3 dB attenuation to the surround channels. // Only used for 3/2 coding mode. AttenuationControl Eac3AttenuationControl `locationName:"attenuationControl" type:"string" enum:"true"` // Specify the average bitrate in bits per second. Valid bitrates depend on // the coding mode. Bitrate *int64 `locationName:"bitrate" min:"64000" type:"integer"` // Specify the bitstream mode for the E-AC-3 stream that the encoder emits. // For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex // E). BitstreamMode Eac3BitstreamMode `locationName:"bitstreamMode" type:"string" enum:"true"` // Dolby Digital Plus coding mode. Determines number of channels. CodingMode Eac3CodingMode `locationName:"codingMode" type:"string" enum:"true"` // Activates a DC highpass filter for all input channels. DcFilter Eac3DcFilter `locationName:"dcFilter" type:"string" enum:"true"` // Sets the dialnorm for the output. If blank and input audio is Dolby Digital // Plus, dialnorm will be passed through. Dialnorm *int64 `locationName:"dialnorm" min:"1" type:"integer"` // Specify the absolute peak level for a signal with dynamic range compression. DynamicRangeCompressionLine Eac3DynamicRangeCompressionLine `locationName:"dynamicRangeCompressionLine" type:"string" enum:"true"` // Specify how the service limits the audio dynamic range when compressing the // audio. DynamicRangeCompressionRf Eac3DynamicRangeCompressionRf `locationName:"dynamicRangeCompressionRf" type:"string" enum:"true"` // When encoding 3/2 audio, controls whether the LFE channel is enabled LfeControl Eac3LfeControl `locationName:"lfeControl" type:"string" enum:"true"` // Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only // valid with 3_2_LFE coding mode. LfeFilter Eac3LfeFilter `locationName:"lfeFilter" type:"string" enum:"true"` // Specify a value for the following Dolby Digital Plus setting: Left only/Right // only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. // How the service uses this value depends on the value that you choose for // Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, // -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies // only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) // for the setting Coding mode (Eac3CodingMode). If you choose a different value // for Coding mode, the service ignores Left only/Right only center (loRoCenterMixLevel). LoRoCenterMixLevel *float64 `locationName:"loRoCenterMixLevel" type:"double"` // Specify a value for the following Dolby Digital Plus setting: Left only/Right // only (Lo/Ro surround). MediaConvert uses this value for downmixing. How the // service uses this value depends on the value that you choose for Stereo downmix // (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value // -60 mutes the channel. This setting applies only if you keep the default // value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode // (Eac3CodingMode). If you choose a different value for Coding mode, the service // ignores Left only/Right only surround (loRoSurroundMixLevel). LoRoSurroundMixLevel *float64 `locationName:"loRoSurroundMixLevel" type:"double"` // Specify a value for the following Dolby Digital Plus setting: Left total/Right // total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. // How the service uses this value depends on the value that you choose for // Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, // -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies // only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) // for the setting Coding mode (Eac3CodingMode). If you choose a different value // for Coding mode, the service ignores Left total/Right total center (ltRtCenterMixLevel). LtRtCenterMixLevel *float64 `locationName:"ltRtCenterMixLevel" type:"double"` // Specify a value for the following Dolby Digital Plus setting: Left total/Right // total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. // How the service uses this value depends on the value that you choose for // Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, // and -60. The value -60 mutes the channel. This setting applies only if you // keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the // setting Coding mode (Eac3CodingMode). If you choose a different value for // Coding mode, the service ignores Left total/Right total surround (ltRtSurroundMixLevel). LtRtSurroundMixLevel *float64 `locationName:"ltRtSurroundMixLevel" type:"double"` // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, // or DolbyE decoder that supplied this audio data. If audio was not supplied // from one of these streams, then the static metadata settings will be used. MetadataControl Eac3MetadataControl `locationName:"metadataControl" type:"string" enum:"true"` // When set to WHEN_POSSIBLE, input DD+ audio will be passed through if it is // present on the input. this detection is dynamic over the life of the transcode. // Inputs that alternate between DD+ and non-DD+ content will have a consistent // DD+ output as the system alternates between passthrough and encoding. PassthroughControl Eac3PassthroughControl `locationName:"passthroughControl" type:"string" enum:"true"` // Controls the amount of phase-shift applied to the surround channels. Only // used for 3/2 coding mode. PhaseControl Eac3PhaseControl `locationName:"phaseControl" type:"string" enum:"true"` // This value is always 48000. It represents the sample rate in Hz. SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"` // Choose how the service does stereo downmixing. This setting only applies // if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) // for the setting Coding mode (Eac3CodingMode). If you choose a different value // for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix). StereoDownmix Eac3StereoDownmix `locationName:"stereoDownmix" type:"string" enum:"true"` // When encoding 3/2 audio, sets whether an extra center back surround channel // is matrix encoded into the left and right surround channels. SurroundExMode Eac3SurroundExMode `locationName:"surroundExMode" type:"string" enum:"true"` // When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into // the two channels. SurroundMode Eac3SurroundMode `locationName:"surroundMode" type:"string" enum:"true"` } // String returns the string representation func (s Eac3Settings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Eac3Settings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Eac3Settings"} if s.Bitrate != nil && *s.Bitrate < 64000 { invalidParams.Add(aws.NewErrParamMinValue("Bitrate", 64000)) } if s.Dialnorm != nil && *s.Dialnorm < 1 { invalidParams.Add(aws.NewErrParamMinValue("Dialnorm", 1)) } if s.SampleRate != nil && *s.SampleRate < 48000 { invalidParams.Add(aws.NewErrParamMinValue("SampleRate", 48000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Eac3Settings) MarshalFields(e protocol.FieldEncoder) error { if len(s.AttenuationControl) > 0 { v := s.AttenuationControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "attenuationControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if len(s.BitstreamMode) > 0 { v := s.BitstreamMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitstreamMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.CodingMode) > 0 { v := s.CodingMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codingMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.DcFilter) > 0 { v := s.DcFilter metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dcFilter", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Dialnorm != nil { v := *s.Dialnorm metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dialnorm", protocol.Int64Value(v), metadata) } if len(s.DynamicRangeCompressionLine) > 0 { v := s.DynamicRangeCompressionLine metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dynamicRangeCompressionLine", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.DynamicRangeCompressionRf) > 0 { v := s.DynamicRangeCompressionRf metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dynamicRangeCompressionRf", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.LfeControl) > 0 { v := s.LfeControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "lfeControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.LfeFilter) > 0 { v := s.LfeFilter metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "lfeFilter", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.LoRoCenterMixLevel != nil { v := *s.LoRoCenterMixLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "loRoCenterMixLevel", protocol.Float64Value(v), metadata) } if s.LoRoSurroundMixLevel != nil { v := *s.LoRoSurroundMixLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "loRoSurroundMixLevel", protocol.Float64Value(v), metadata) } if s.LtRtCenterMixLevel != nil { v := *s.LtRtCenterMixLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "ltRtCenterMixLevel", protocol.Float64Value(v), metadata) } if s.LtRtSurroundMixLevel != nil { v := *s.LtRtSurroundMixLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "ltRtSurroundMixLevel", protocol.Float64Value(v), metadata) } if len(s.MetadataControl) > 0 { v := s.MetadataControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "metadataControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.PassthroughControl) > 0 { v := s.PassthroughControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "passthroughControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.PhaseControl) > 0 { v := s.PhaseControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "phaseControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SampleRate != nil { v := *s.SampleRate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sampleRate", protocol.Int64Value(v), metadata) } if len(s.StereoDownmix) > 0 { v := s.StereoDownmix metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "stereoDownmix", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SurroundExMode) > 0 { v := s.SurroundExMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "surroundExMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SurroundMode) > 0 { v := s.SurroundMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "surroundMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Settings specific to embedded/ancillary caption outputs, including 608/708 // Channel destination number. type EmbeddedDestinationSettings struct { _ struct{} `type:"structure"` // Ignore this setting unless your input captions are SCC format and your output // captions are embedded in the video stream. Specify a CC number for each captions // channel in this output. If you have two channels, choose CC numbers that // aren't in the same field. For example, choose 1 and 3. For more information, // see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded. Destination608ChannelNumber *int64 `locationName:"destination608ChannelNumber" min:"1" type:"integer"` // Ignore this setting unless your input captions are SCC format and you want // both 608 and 708 captions embedded in your output stream. Optionally, specify // the 708 service number for each output captions channel. Choose a different // number for each channel. To use this setting, also set Force 608 to 708 upconvert // (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector // settings. If you choose to upconvert but don't specify a 708 service number, // MediaConvert uses the number that you specify for CC channel number (destination608ChannelNumber) // for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded. Destination708ServiceNumber *int64 `locationName:"destination708ServiceNumber" min:"1" type:"integer"` } // String returns the string representation func (s EmbeddedDestinationSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *EmbeddedDestinationSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "EmbeddedDestinationSettings"} if s.Destination608ChannelNumber != nil && *s.Destination608ChannelNumber < 1 { invalidParams.Add(aws.NewErrParamMinValue("Destination608ChannelNumber", 1)) } if s.Destination708ServiceNumber != nil && *s.Destination708ServiceNumber < 1 { invalidParams.Add(aws.NewErrParamMinValue("Destination708ServiceNumber", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s EmbeddedDestinationSettings) MarshalFields(e protocol.FieldEncoder) error { if s.Destination608ChannelNumber != nil { v := *s.Destination608ChannelNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "destination608ChannelNumber", protocol.Int64Value(v), metadata) } if s.Destination708ServiceNumber != nil { v := *s.Destination708ServiceNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "destination708ServiceNumber", protocol.Int64Value(v), metadata) } return nil } // Settings for embedded captions Source type EmbeddedSourceSettings struct { _ struct{} `type:"structure"` // Specify whether this set of input captions appears in your outputs in both // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes // the captions data in two ways: it passes the 608 data through using the 608 // compatibility bytes fields of the 708 wrapper, and it also translates the // 608 data into 708. Convert608To708 EmbeddedConvert608To708 `locationName:"convert608To708" type:"string" enum:"true"` // Specifies the 608/708 channel number within the video track from which to // extract captions. Unused for passthrough. Source608ChannelNumber *int64 `locationName:"source608ChannelNumber" min:"1" type:"integer"` // Specifies the video track index used for extracting captions. The system // only supports one input video track, so this should always be set to '1'. Source608TrackNumber *int64 `locationName:"source608TrackNumber" min:"1" type:"integer"` // By default, the service terminates any unterminated captions at the end of // each input. If you want the caption to continue onto your next input, disable // this setting. TerminateCaptions EmbeddedTerminateCaptions `locationName:"terminateCaptions" type:"string" enum:"true"` } // String returns the string representation func (s EmbeddedSourceSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *EmbeddedSourceSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "EmbeddedSourceSettings"} if s.Source608ChannelNumber != nil && *s.Source608ChannelNumber < 1 { invalidParams.Add(aws.NewErrParamMinValue("Source608ChannelNumber", 1)) } if s.Source608TrackNumber != nil && *s.Source608TrackNumber < 1 { invalidParams.Add(aws.NewErrParamMinValue("Source608TrackNumber", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s EmbeddedSourceSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.Convert608To708) > 0 { v := s.Convert608To708 metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "convert608To708", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Source608ChannelNumber != nil { v := *s.Source608ChannelNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "source608ChannelNumber", protocol.Int64Value(v), metadata) } if s.Source608TrackNumber != nil { v := *s.Source608TrackNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "source608TrackNumber", protocol.Int64Value(v), metadata) } if len(s.TerminateCaptions) > 0 { v := s.TerminateCaptions metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "terminateCaptions", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Describes an account-specific API endpoint. type Endpoint struct { _ struct{} `type:"structure"` // URL of endpoint Url *string `locationName:"url" type:"string"` } // String returns the string representation func (s Endpoint) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Endpoint) MarshalFields(e protocol.FieldEncoder) error { if s.Url != nil { v := *s.Url metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "url", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // ESAM ManifestConfirmConditionNotification defined by OC-SP-ESAM-API-I03-131025. type EsamManifestConfirmConditionNotification struct { _ struct{} `type:"structure"` // Provide your ESAM ManifestConfirmConditionNotification XML document inside // your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. // The transcoder will use the Manifest Conditioning instructions in the message // that you supply. MccXml *string `locationName:"mccXml" type:"string"` } // String returns the string representation func (s EsamManifestConfirmConditionNotification) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s EsamManifestConfirmConditionNotification) MarshalFields(e protocol.FieldEncoder) error { if s.MccXml != nil { v := *s.MccXml metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "mccXml", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion, // you can ignore these settings. type EsamSettings struct { _ struct{} `type:"structure"` // Specifies an ESAM ManifestConfirmConditionNotification XML as per OC-SP-ESAM-API-I03-131025. // The transcoder uses the manifest conditioning instructions that you provide // in the setting MCC XML (mccXml). ManifestConfirmConditionNotification *EsamManifestConfirmConditionNotification `locationName:"manifestConfirmConditionNotification" type:"structure"` // Specifies the stream distance, in milliseconds, between the SCTE 35 messages // that the transcoder places and the splice points that they refer to. If the // time between the start of the asset and the SCTE-35 message is less than // this value, then the transcoder places the SCTE-35 marker at the beginning // of the stream. ResponseSignalPreroll *int64 `locationName:"responseSignalPreroll" type:"integer"` // Specifies an ESAM SignalProcessingNotification XML as per OC-SP-ESAM-API-I03-131025. // The transcoder uses the signal processing instructions that you provide in // the setting SCC XML (sccXml). SignalProcessingNotification *EsamSignalProcessingNotification `locationName:"signalProcessingNotification" type:"structure"` } // String returns the string representation func (s EsamSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s EsamSettings) MarshalFields(e protocol.FieldEncoder) error { if s.ManifestConfirmConditionNotification != nil { v := s.ManifestConfirmConditionNotification metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "manifestConfirmConditionNotification", v, metadata) } if s.ResponseSignalPreroll != nil { v := *s.ResponseSignalPreroll metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "responseSignalPreroll", protocol.Int64Value(v), metadata) } if s.SignalProcessingNotification != nil { v := s.SignalProcessingNotification metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "signalProcessingNotification", v, metadata) } return nil } // ESAM SignalProcessingNotification data defined by OC-SP-ESAM-API-I03-131025. type EsamSignalProcessingNotification struct { _ struct{} `type:"structure"` // Provide your ESAM SignalProcessingNotification XML document inside your JSON // job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The // transcoder will use the signal processing instructions in the message that // you supply. Provide your ESAM SignalProcessingNotification XML document inside // your JSON job settings. For your MPEG2-TS file outputs, if you want the service // to place SCTE-35 markers at the insertion points you specify in the XML document, // you must also enable SCTE-35 ESAM (scte35Esam). Note that you can either // specify an ESAM XML document or enable SCTE-35 passthrough. You can't do // both. SccXml *string `locationName:"sccXml" type:"string"` } // String returns the string representation func (s EsamSignalProcessingNotification) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s EsamSignalProcessingNotification) MarshalFields(e protocol.FieldEncoder) error { if s.SccXml != nil { v := *s.SccXml metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sccXml", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Settings for F4v container type F4vSettings struct { _ struct{} `type:"structure"` // If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning // of the archive as required for progressive downloading. Otherwise it is placed // normally at the end. MoovPlacement F4vMoovPlacement `locationName:"moovPlacement" type:"string" enum:"true"` } // String returns the string representation func (s F4vSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s F4vSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.MoovPlacement) > 0 { v := s.MoovPlacement metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "moovPlacement", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to // FILE_GROUP_SETTINGS. type FileGroupSettings struct { _ struct{} `type:"structure"` // Use Destination (Destination) to specify the S3 output location and the output // filename base. Destination accepts format identifiers. If you do not specify // the base filename in the URI, the service will use the filename of the input // file. If your job has multiple inputs, the service uses the filename of the // first input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of // destination DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"` } // String returns the string representation func (s FileGroupSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s FileGroupSettings) MarshalFields(e protocol.FieldEncoder) error { if s.Destination != nil { v := *s.Destination metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "destination", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.DestinationSettings != nil { v := s.DestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "destinationSettings", v, metadata) } return nil } // If your input captions are SCC, SMI, SRT, STL, TTML, or IMSC 1.1 in an xml // file, specify the URI of the input caption source file. If your caption source // is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings. type FileSourceSettings struct { _ struct{} `type:"structure"` // Specify whether this set of input captions appears in your outputs in both // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes // the captions data in two ways: it passes the 608 data through using the 608 // compatibility bytes fields of the 708 wrapper, and it also translates the // 608 data into 708. Convert608To708 FileSourceConvert608To708 `locationName:"convert608To708" type:"string" enum:"true"` // Ignore this setting unless your input captions format is SCC. To have the // service compensate for differing frame rates between your input captions // and input video, specify the frame rate of the captions file. Specify this // value as a fraction, using the settings Framerate numerator (framerateNumerator) // and Framerate denominator (framerateDenominator). For example, you might // specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, // or 30000 / 1001 for 29.97 fps. Framerate *CaptionSourceFramerate `locationName:"framerate" type:"structure"` // External caption file used for loading captions. Accepted file extensions // are 'scc', 'ttml', 'dfxp', 'stl', 'srt', 'xml', and 'smi'. SourceFile *string `locationName:"sourceFile" min:"14" type:"string"` // Specifies a time delta in seconds to offset the captions from the source // file. TimeDelta *int64 `locationName:"timeDelta" type:"integer"` } // String returns the string representation func (s FileSourceSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *FileSourceSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "FileSourceSettings"} if s.SourceFile != nil && len(*s.SourceFile) < 14 { invalidParams.Add(aws.NewErrParamMinLen("SourceFile", 14)) } if s.TimeDelta != nil && *s.TimeDelta < -2.147483648e+09 { invalidParams.Add(aws.NewErrParamMinValue("TimeDelta", -2.147483648e+09)) } if s.Framerate != nil { if err := s.Framerate.Validate(); err != nil { invalidParams.AddNested("Framerate", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s FileSourceSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.Convert608To708) > 0 { v := s.Convert608To708 metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "convert608To708", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Framerate != nil { v := s.Framerate metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "framerate", v, metadata) } if s.SourceFile != nil { v := *s.SourceFile metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sourceFile", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.TimeDelta != nil { v := *s.TimeDelta metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timeDelta", protocol.Int64Value(v), metadata) } return nil } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value FRAME_CAPTURE. type FrameCaptureSettings struct { _ struct{} `type:"structure"` // Frame capture will encode the first frame of the output stream, then one // frame every framerateDenominator/framerateNumerator seconds. For example, // settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of // 1/3 frame per second) will capture the first frame, then 1 frame every 3s. // Files will be named as filename.n.jpg where n is the 0-based sequence number // of each Capture. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // Frame capture will encode the first frame of the output stream, then one // frame every framerateDenominator/framerateNumerator seconds. For example, // settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of // 1/3 frame per second) will capture the first frame, then 1 frame every 3s. // Files will be named as filename.NNNNNNN.jpg where N is the 0-based frame // sequence number zero padded to 7 decimal places. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // Maximum number of captures (encoded jpg output files). MaxCaptures *int64 `locationName:"maxCaptures" min:"1" type:"integer"` // JPEG Quality - a higher value equals higher quality. Quality *int64 `locationName:"quality" min:"1" type:"integer"` } // String returns the string representation func (s FrameCaptureSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *FrameCaptureSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "FrameCaptureSettings"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateNumerator", 1)) } if s.MaxCaptures != nil && *s.MaxCaptures < 1 { invalidParams.Add(aws.NewErrParamMinValue("MaxCaptures", 1)) } if s.Quality != nil && *s.Quality < 1 { invalidParams.Add(aws.NewErrParamMinValue("Quality", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s FrameCaptureSettings) MarshalFields(e protocol.FieldEncoder) error { if s.FramerateDenominator != nil { v := *s.FramerateDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateDenominator", protocol.Int64Value(v), metadata) } if s.FramerateNumerator != nil { v := *s.FramerateNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateNumerator", protocol.Int64Value(v), metadata) } if s.MaxCaptures != nil { v := *s.MaxCaptures metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxCaptures", protocol.Int64Value(v), metadata) } if s.Quality != nil { v := *s.Quality metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "quality", protocol.Int64Value(v), metadata) } return nil } // Settings for quality-defined variable bitrate encoding with the H.264 codec. // Required when you set Rate control mode to QVBR. Not valid when you set Rate // control mode to a value other than QVBR, or when you don't define Rate control // mode. type H264QvbrSettings struct { _ struct{} `type:"structure"` // Use this setting only when Rate control mode is QVBR and Quality tuning level // is Multi-pass HQ. For Max average bitrate values suited to the complexity // of your input video, the service limits the average bitrate of the video // part of this output to the value that you choose. That is, the total size // of the video element is less than or equal to the value you set multiplied // by the number of seconds of encoded output. MaxAverageBitrate *int64 `locationName:"maxAverageBitrate" min:"1000" type:"integer"` // Required when you use QVBR rate control mode. That is, when you specify qvbrSettings // within h264Settings. Specify the general target quality level for this output, // from 1 to 10. Use higher numbers for greater quality. Level 10 results in // nearly lossless compression. The quality level for most broadcast-quality // transcodes is between 6 and 9. Optionally, to specify a value between whole // numbers, also provide a value for the setting qvbrQualityLevelFineTune. For // example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel // to 7 and set qvbrQualityLevelFineTune to .33. QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"` // Optional. Specify a value here to set the QVBR quality to a level that is // between whole numbers. For example, if you want your QVBR quality level to // be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. // MediaConvert rounds your QVBR quality level to the nearest third of a whole // number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune // to .25, your actual QVBR quality level is 7.33. QvbrQualityLevelFineTune *float64 `locationName:"qvbrQualityLevelFineTune" type:"double"` } // String returns the string representation func (s H264QvbrSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *H264QvbrSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "H264QvbrSettings"} if s.MaxAverageBitrate != nil && *s.MaxAverageBitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("MaxAverageBitrate", 1000)) } if s.QvbrQualityLevel != nil && *s.QvbrQualityLevel < 1 { invalidParams.Add(aws.NewErrParamMinValue("QvbrQualityLevel", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s H264QvbrSettings) MarshalFields(e protocol.FieldEncoder) error { if s.MaxAverageBitrate != nil { v := *s.MaxAverageBitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxAverageBitrate", protocol.Int64Value(v), metadata) } if s.QvbrQualityLevel != nil { v := *s.QvbrQualityLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "qvbrQualityLevel", protocol.Int64Value(v), metadata) } if s.QvbrQualityLevelFineTune != nil { v := *s.QvbrQualityLevelFineTune metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "qvbrQualityLevelFineTune", protocol.Float64Value(v), metadata) } return nil } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value H_264. type H264Settings struct { _ struct{} `type:"structure"` // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual // quality. AdaptiveQuantization H264AdaptiveQuantization `locationName:"adaptiveQuantization" type:"string" enum:"true"` // Specify the average bitrate in bits per second. Required for VBR and CBR. // For MS Smooth outputs, bitrates must be unique when rounded down to the nearest // multiple of 1000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // Specify an H.264 level that is consistent with your output video settings. // If you aren't sure what level to specify, choose Auto (AUTO). CodecLevel H264CodecLevel `locationName:"codecLevel" type:"string" enum:"true"` // H.264 Profile. High 4:2:2 and 10-bit profiles are only available with the // AVC-I License. CodecProfile H264CodecProfile `locationName:"codecProfile" type:"string" enum:"true"` // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the // value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). DynamicSubGop H264DynamicSubGop `locationName:"dynamicSubGop" type:"string" enum:"true"` // Entropy encoding mode. Use CABAC (must be in Main or High profile) or CAVLC. EntropyEncoding H264EntropyEncoding `locationName:"entropyEncoding" type:"string" enum:"true"` // Choosing FORCE_FIELD disables PAFF encoding for interlaced outputs. FieldEncoding H264FieldEncoding `locationName:"fieldEncoding" type:"string" enum:"true"` // Adjust quantization within each frame to reduce flicker or 'pop' on I-frames. FlickerAdaptiveQuantization H264FlickerAdaptiveQuantization `locationName:"flickerAdaptiveQuantization" type:"string" enum:"true"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl H264FramerateControl `locationName:"framerateControl" type:"string" enum:"true"` // Optional. Specify how the transcoder performs framerate conversion. The default // behavior is to use duplicate drop conversion. FramerateConversionAlgorithm H264FramerateConversionAlgorithm `locationName:"framerateConversionAlgorithm" type:"string" enum:"true"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // Frame rate numerator - frame rate is a fraction, e.g. 24000 / 1001 = 23.976 // fps. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // If enable, use reference B frames for GOP structures that have B frames > // 1. GopBReference H264GopBReference `locationName:"gopBReference" type:"string" enum:"true"` // Frequency of closed GOPs. In streaming applications, it is recommended that // this be set to 1 so a decoder joining mid-stream will receive an IDR frame // as quickly as possible. Setting this value to 0 will break output segmenting. GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` // GOP Length (keyframe interval) in frames or seconds. Must be greater than // zero. GopSize *float64 `locationName:"gopSize" type:"double"` // Indicates if the GOP Size in H264 is specified in frames or seconds. If seconds // the system will convert the GOP Size into a frame count at run time. GopSizeUnits H264GopSizeUnits `locationName:"gopSizeUnits" type:"string" enum:"true"` // Percentage of the buffer that should initially be filled (HRD buffer model). HrdBufferInitialFillPercentage *int64 `locationName:"hrdBufferInitialFillPercentage" type:"integer"` // Size of buffer (HRD buffer model) in bits. For example, enter five megabits // as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Use Interlace mode (InterlaceMode) to choose the scan line type for the output. // * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce // interlaced output with the entire output having the same field polarity (top // or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default // Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, // behavior depends on the input scan type, as follows. - If the source is interlaced, // the output will be interlaced with the same polarity as the source (it will // follow the source). The output could therefore be a mix of "top field first" // and "bottom field first". - If the source is progressive, the output will // be interlaced with "top field first" or "bottom field first" polarity, depending // on which of the Follow options you chose. InterlaceMode H264InterlaceMode `locationName:"interlaceMode" type:"string" enum:"true"` // Maximum bitrate in bits/second. For example, enter five megabits per second // as 5000000. Required when Rate control mode is QVBR. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Enforces separation between repeated (cadence) I-frames and I-frames inserted // by Scene Change Detection. If a scene change I-frame is within I-interval // frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene // change I-frame. GOP stretch requires enabling lookahead as well as setting // I-interval. The normal cadence resumes for the next GOP. This setting is // only used when Scene Change Detect is enabled. Note: Maximum GOP stretch // = GOP size + Min-I-interval - 1 MinIInterval *int64 `locationName:"minIInterval" type:"integer"` // Number of B-frames between reference frames. NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` // Number of reference frames to use. The encoder may use more than requested // if using B-frames and/or interlaced encoding. NumberReferenceFrames *int64 `locationName:"numberReferenceFrames" min:"1" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. ParControl H264ParControl `locationName:"parControl" type:"string" enum:"true"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. QualityTuningLevel H264QualityTuningLevel `locationName:"qualityTuningLevel" type:"string" enum:"true"` // Settings for quality-defined variable bitrate encoding with the H.264 codec. // Required when you set Rate control mode to QVBR. Not valid when you set Rate // control mode to a value other than QVBR, or when you don't define Rate control // mode. QvbrSettings *H264QvbrSettings `locationName:"qvbrSettings" type:"structure"` // Use this setting to specify whether this output has a variable bitrate (VBR), // constant bitrate (CBR) or quality-defined variable bitrate (QVBR). RateControlMode H264RateControlMode `locationName:"rateControlMode" type:"string" enum:"true"` // Places a PPS header on each encoded picture, even if repeated. RepeatPps H264RepeatPps `locationName:"repeatPps" type:"string" enum:"true"` // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. // If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) // for further video quality improvement. For more information about QVBR, see // https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. SceneChangeDetect H264SceneChangeDetect `locationName:"sceneChangeDetect" type:"string" enum:"true"` // Number of slices per picture. Must be less than or equal to the number of // macroblock rows for progressive pictures, and less than or equal to half // the number of macroblock rows for interlaced pictures. Slices *int64 `locationName:"slices" min:"1" type:"integer"` // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled // as 25fps, and audio is sped up correspondingly. SlowPal H264SlowPal `locationName:"slowPal" type:"string" enum:"true"` // Softness. Selects quantizer matrix, larger values reduce high-frequency content // in the encoded image. Softness *int64 `locationName:"softness" type:"integer"` // Adjust quantization within each frame based on spatial variation of content // complexity. SpatialAdaptiveQuantization H264SpatialAdaptiveQuantization `locationName:"spatialAdaptiveQuantization" type:"string" enum:"true"` // Produces a bitstream compliant with SMPTE RP-2027. Syntax H264Syntax `locationName:"syntax" type:"string" enum:"true"` // This field applies only if the Streams > Advanced > Framerate (framerate) // field is set to 29.970. This field works with the Streams > Advanced > Preprocessors // > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced // Mode field (interlace_mode) to identify the scan type for the output: Progressive, // Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output // from 23.976 input. - Soft: produces 23.976; the player converts this output // to 29.97i. Telecine H264Telecine `locationName:"telecine" type:"string" enum:"true"` // Adjust quantization within each frame based on temporal variation of content // complexity. TemporalAdaptiveQuantization H264TemporalAdaptiveQuantization `locationName:"temporalAdaptiveQuantization" type:"string" enum:"true"` // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. UnregisteredSeiTimecode H264UnregisteredSeiTimecode `locationName:"unregisteredSeiTimecode" type:"string" enum:"true"` } // String returns the string representation func (s H264Settings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *H264Settings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "H264Settings"} if s.Bitrate != nil && *s.Bitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("Bitrate", 1000)) } if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateNumerator", 1)) } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("MaxBitrate", 1000)) } if s.NumberReferenceFrames != nil && *s.NumberReferenceFrames < 1 { invalidParams.Add(aws.NewErrParamMinValue("NumberReferenceFrames", 1)) } if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("ParDenominator", 1)) } if s.ParNumerator != nil && *s.ParNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("ParNumerator", 1)) } if s.Slices != nil && *s.Slices < 1 { invalidParams.Add(aws.NewErrParamMinValue("Slices", 1)) } if s.QvbrSettings != nil { if err := s.QvbrSettings.Validate(); err != nil { invalidParams.AddNested("QvbrSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s H264Settings) MarshalFields(e protocol.FieldEncoder) error { if len(s.AdaptiveQuantization) > 0 { v := s.AdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "adaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if len(s.CodecLevel) > 0 { v := s.CodecLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codecLevel", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.CodecProfile) > 0 { v := s.CodecProfile metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codecProfile", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.DynamicSubGop) > 0 { v := s.DynamicSubGop metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dynamicSubGop", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.EntropyEncoding) > 0 { v := s.EntropyEncoding metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "entropyEncoding", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FieldEncoding) > 0 { v := s.FieldEncoding metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fieldEncoding", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FlickerAdaptiveQuantization) > 0 { v := s.FlickerAdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "flickerAdaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FramerateControl) > 0 { v := s.FramerateControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FramerateConversionAlgorithm) > 0 { v := s.FramerateConversionAlgorithm metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateConversionAlgorithm", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FramerateDenominator != nil { v := *s.FramerateDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateDenominator", protocol.Int64Value(v), metadata) } if s.FramerateNumerator != nil { v := *s.FramerateNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateNumerator", protocol.Int64Value(v), metadata) } if len(s.GopBReference) > 0 { v := s.GopBReference metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopBReference", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.GopClosedCadence != nil { v := *s.GopClosedCadence metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopClosedCadence", protocol.Int64Value(v), metadata) } if s.GopSize != nil { v := *s.GopSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopSize", protocol.Float64Value(v), metadata) } if len(s.GopSizeUnits) > 0 { v := s.GopSizeUnits metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopSizeUnits", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.HrdBufferInitialFillPercentage != nil { v := *s.HrdBufferInitialFillPercentage metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "hrdBufferInitialFillPercentage", protocol.Int64Value(v), metadata) } if s.HrdBufferSize != nil { v := *s.HrdBufferSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "hrdBufferSize", protocol.Int64Value(v), metadata) } if len(s.InterlaceMode) > 0 { v := s.InterlaceMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "interlaceMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.MaxBitrate != nil { v := *s.MaxBitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxBitrate", protocol.Int64Value(v), metadata) } if s.MinIInterval != nil { v := *s.MinIInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "minIInterval", protocol.Int64Value(v), metadata) } if s.NumberBFramesBetweenReferenceFrames != nil { v := *s.NumberBFramesBetweenReferenceFrames metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "numberBFramesBetweenReferenceFrames", protocol.Int64Value(v), metadata) } if s.NumberReferenceFrames != nil { v := *s.NumberReferenceFrames metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "numberReferenceFrames", protocol.Int64Value(v), metadata) } if len(s.ParControl) > 0 { v := s.ParControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ParDenominator != nil { v := *s.ParDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parDenominator", protocol.Int64Value(v), metadata) } if s.ParNumerator != nil { v := *s.ParNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parNumerator", protocol.Int64Value(v), metadata) } if len(s.QualityTuningLevel) > 0 { v := s.QualityTuningLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "qualityTuningLevel", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.QvbrSettings != nil { v := s.QvbrSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "qvbrSettings", v, metadata) } if len(s.RateControlMode) > 0 { v := s.RateControlMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "rateControlMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.RepeatPps) > 0 { v := s.RepeatPps metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "repeatPps", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SceneChangeDetect) > 0 { v := s.SceneChangeDetect metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sceneChangeDetect", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Slices != nil { v := *s.Slices metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "slices", protocol.Int64Value(v), metadata) } if len(s.SlowPal) > 0 { v := s.SlowPal metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "slowPal", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Softness != nil { v := *s.Softness metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "softness", protocol.Int64Value(v), metadata) } if len(s.SpatialAdaptiveQuantization) > 0 { v := s.SpatialAdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "spatialAdaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Syntax) > 0 { v := s.Syntax metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "syntax", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Telecine) > 0 { v := s.Telecine metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "telecine", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.TemporalAdaptiveQuantization) > 0 { v := s.TemporalAdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "temporalAdaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.UnregisteredSeiTimecode) > 0 { v := s.UnregisteredSeiTimecode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "unregisteredSeiTimecode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Settings for quality-defined variable bitrate encoding with the H.265 codec. // Required when you set Rate control mode to QVBR. Not valid when you set Rate // control mode to a value other than QVBR, or when you don't define Rate control // mode. type H265QvbrSettings struct { _ struct{} `type:"structure"` // Use this setting only when Rate control mode is QVBR and Quality tuning level // is Multi-pass HQ. For Max average bitrate values suited to the complexity // of your input video, the service limits the average bitrate of the video // part of this output to the value that you choose. That is, the total size // of the video element is less than or equal to the value you set multiplied // by the number of seconds of encoded output. MaxAverageBitrate *int64 `locationName:"maxAverageBitrate" min:"1000" type:"integer"` // Required when you use QVBR rate control mode. That is, when you specify qvbrSettings // within h265Settings. Specify the general target quality level for this output, // from 1 to 10. Use higher numbers for greater quality. Level 10 results in // nearly lossless compression. The quality level for most broadcast-quality // transcodes is between 6 and 9. Optionally, to specify a value between whole // numbers, also provide a value for the setting qvbrQualityLevelFineTune. For // example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel // to 7 and set qvbrQualityLevelFineTune to .33. QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"` // Optional. Specify a value here to set the QVBR quality to a level that is // between whole numbers. For example, if you want your QVBR quality level to // be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. // MediaConvert rounds your QVBR quality level to the nearest third of a whole // number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune // to .25, your actual QVBR quality level is 7.33. QvbrQualityLevelFineTune *float64 `locationName:"qvbrQualityLevelFineTune" type:"double"` } // String returns the string representation func (s H265QvbrSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *H265QvbrSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "H265QvbrSettings"} if s.MaxAverageBitrate != nil && *s.MaxAverageBitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("MaxAverageBitrate", 1000)) } if s.QvbrQualityLevel != nil && *s.QvbrQualityLevel < 1 { invalidParams.Add(aws.NewErrParamMinValue("QvbrQualityLevel", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s H265QvbrSettings) MarshalFields(e protocol.FieldEncoder) error { if s.MaxAverageBitrate != nil { v := *s.MaxAverageBitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxAverageBitrate", protocol.Int64Value(v), metadata) } if s.QvbrQualityLevel != nil { v := *s.QvbrQualityLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "qvbrQualityLevel", protocol.Int64Value(v), metadata) } if s.QvbrQualityLevelFineTune != nil { v := *s.QvbrQualityLevelFineTune metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "qvbrQualityLevelFineTune", protocol.Float64Value(v), metadata) } return nil } // Settings for H265 codec type H265Settings struct { _ struct{} `type:"structure"` // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual // quality. AdaptiveQuantization H265AdaptiveQuantization `locationName:"adaptiveQuantization" type:"string" enum:"true"` // Enables Alternate Transfer Function SEI message for outputs using Hybrid // Log Gamma (HLG) Electro-Optical Transfer Function (EOTF). AlternateTransferFunctionSei H265AlternateTransferFunctionSei `locationName:"alternateTransferFunctionSei" type:"string" enum:"true"` // Specify the average bitrate in bits per second. Required for VBR and CBR. // For MS Smooth outputs, bitrates must be unique when rounded down to the nearest // multiple of 1000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // H.265 Level. CodecLevel H265CodecLevel `locationName:"codecLevel" type:"string" enum:"true"` // Represents the Profile and Tier, per the HEVC (H.265) specification. Selections // are grouped as [Profile] / [Tier], so "Main/High" represents Main Profile // with High Tier. 4:2:2 profiles are only available with the HEVC 4:2:2 License. CodecProfile H265CodecProfile `locationName:"codecProfile" type:"string" enum:"true"` // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the // value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). DynamicSubGop H265DynamicSubGop `locationName:"dynamicSubGop" type:"string" enum:"true"` // Adjust quantization within each frame to reduce flicker or 'pop' on I-frames. FlickerAdaptiveQuantization H265FlickerAdaptiveQuantization `locationName:"flickerAdaptiveQuantization" type:"string" enum:"true"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl H265FramerateControl `locationName:"framerateControl" type:"string" enum:"true"` // Optional. Specify how the transcoder performs framerate conversion. The default // behavior is to use duplicate drop conversion. FramerateConversionAlgorithm H265FramerateConversionAlgorithm `locationName:"framerateConversionAlgorithm" type:"string" enum:"true"` // Frame rate denominator. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // Frame rate numerator - frame rate is a fraction, e.g. 24000 / 1001 = 23.976 // fps. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // If enable, use reference B frames for GOP structures that have B frames > // 1. GopBReference H265GopBReference `locationName:"gopBReference" type:"string" enum:"true"` // Frequency of closed GOPs. In streaming applications, it is recommended that // this be set to 1 so a decoder joining mid-stream will receive an IDR frame // as quickly as possible. Setting this value to 0 will break output segmenting. GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` // GOP Length (keyframe interval) in frames or seconds. Must be greater than // zero. GopSize *float64 `locationName:"gopSize" type:"double"` // Indicates if the GOP Size in H265 is specified in frames or seconds. If seconds // the system will convert the GOP Size into a frame count at run time. GopSizeUnits H265GopSizeUnits `locationName:"gopSizeUnits" type:"string" enum:"true"` // Percentage of the buffer that should initially be filled (HRD buffer model). HrdBufferInitialFillPercentage *int64 `locationName:"hrdBufferInitialFillPercentage" type:"integer"` // Size of buffer (HRD buffer model) in bits. For example, enter five megabits // as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Choose the scan line type for the output. Choose Progressive (PROGRESSIVE) // to create a progressive output, regardless of the scan type of your input. // Choose Top Field First (TOP_FIELD) or Bottom Field First (BOTTOM_FIELD) to // create an output that's interlaced with the same field polarity throughout. // Choose Follow, Default Top (FOLLOW_TOP_FIELD) or Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) // to create an interlaced output with the same field polarity as the source. // If the source is interlaced, the output will be interlaced with the same // polarity as the source (it will follow the source). The output could therefore // be a mix of "top field first" and "bottom field first". If the source is // progressive, your output will be interlaced with "top field first" or "bottom // field first" polarity, depending on which of the Follow options you chose. // If you don't choose a value, the service will default to Progressive (PROGRESSIVE). InterlaceMode H265InterlaceMode `locationName:"interlaceMode" type:"string" enum:"true"` // Maximum bitrate in bits/second. For example, enter five megabits per second // as 5000000. Required when Rate control mode is QVBR. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Enforces separation between repeated (cadence) I-frames and I-frames inserted // by Scene Change Detection. If a scene change I-frame is within I-interval // frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene // change I-frame. GOP stretch requires enabling lookahead as well as setting // I-interval. The normal cadence resumes for the next GOP. This setting is // only used when Scene Change Detect is enabled. Note: Maximum GOP stretch // = GOP size + Min-I-interval - 1 MinIInterval *int64 `locationName:"minIInterval" type:"integer"` // Number of B-frames between reference frames. NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` // Number of reference frames to use. The encoder may use more than requested // if using B-frames and/or interlaced encoding. NumberReferenceFrames *int64 `locationName:"numberReferenceFrames" min:"1" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. ParControl H265ParControl `locationName:"parControl" type:"string" enum:"true"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. QualityTuningLevel H265QualityTuningLevel `locationName:"qualityTuningLevel" type:"string" enum:"true"` // Settings for quality-defined variable bitrate encoding with the H.265 codec. // Required when you set Rate control mode to QVBR. Not valid when you set Rate // control mode to a value other than QVBR, or when you don't define Rate control // mode. QvbrSettings *H265QvbrSettings `locationName:"qvbrSettings" type:"structure"` // Use this setting to specify whether this output has a variable bitrate (VBR), // constant bitrate (CBR) or quality-defined variable bitrate (QVBR). RateControlMode H265RateControlMode `locationName:"rateControlMode" type:"string" enum:"true"` // Specify Sample Adaptive Offset (SAO) filter strength. Adaptive mode dynamically // selects best strength based on content SampleAdaptiveOffsetFilterMode H265SampleAdaptiveOffsetFilterMode `locationName:"sampleAdaptiveOffsetFilterMode" type:"string" enum:"true"` // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. // If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) // for further video quality improvement. For more information about QVBR, see // https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. SceneChangeDetect H265SceneChangeDetect `locationName:"sceneChangeDetect" type:"string" enum:"true"` // Number of slices per picture. Must be less than or equal to the number of // macroblock rows for progressive pictures, and less than or equal to half // the number of macroblock rows for interlaced pictures. Slices *int64 `locationName:"slices" min:"1" type:"integer"` // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled // as 25fps, and audio is sped up correspondingly. SlowPal H265SlowPal `locationName:"slowPal" type:"string" enum:"true"` // Adjust quantization within each frame based on spatial variation of content // complexity. SpatialAdaptiveQuantization H265SpatialAdaptiveQuantization `locationName:"spatialAdaptiveQuantization" type:"string" enum:"true"` // This field applies only if the Streams > Advanced > Framerate (framerate) // field is set to 29.970. This field works with the Streams > Advanced > Preprocessors // > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced // Mode field (interlace_mode) to identify the scan type for the output: Progressive, // Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output // from 23.976 input. - Soft: produces 23.976; the player converts this output // to 29.97i. Telecine H265Telecine `locationName:"telecine" type:"string" enum:"true"` // Adjust quantization within each frame based on temporal variation of content // complexity. TemporalAdaptiveQuantization H265TemporalAdaptiveQuantization `locationName:"temporalAdaptiveQuantization" type:"string" enum:"true"` // Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers // are supported depending on GOP structure: I- and P-frames form one layer, // reference B-frames can form a second layer and non-reference b-frames can // form a third layer. Decoders can optionally decode only the lower temporal // layers to generate a lower frame rate output. For example, given a bitstream // with temporal IDs and with b-frames = 1 (i.e. IbPbPb display order), a decoder // could decode all the frames for full frame rate output or only the I and // P frames (lowest temporal layer) for a half frame rate output. TemporalIds H265TemporalIds `locationName:"temporalIds" type:"string" enum:"true"` // Enable use of tiles, allowing horizontal as well as vertical subdivision // of the encoded pictures. Tiles H265Tiles `locationName:"tiles" type:"string" enum:"true"` // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. UnregisteredSeiTimecode H265UnregisteredSeiTimecode `locationName:"unregisteredSeiTimecode" type:"string" enum:"true"` // If the location of parameter set NAL units doesn't matter in your workflow, // ignore this setting. Use this setting only with CMAF or DASH outputs, or // with standalone file outputs in an MPEG-4 container (MP4 outputs). Choose // HVC1 to mark your output as HVC1. This makes your output compliant with the // following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 // 3rd Edition. For these outputs, the service stores parameter set NAL units // in the sample headers but not in the samples directly. For MP4 outputs, when // you choose HVC1, your output video might not work properly with some downstream // systems and video players. The service defaults to marking your output as // HEV1. For these outputs, the service writes parameter set NAL units directly // into the samples. WriteMp4PackagingType H265WriteMp4PackagingType `locationName:"writeMp4PackagingType" type:"string" enum:"true"` } // String returns the string representation func (s H265Settings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *H265Settings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "H265Settings"} if s.Bitrate != nil && *s.Bitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("Bitrate", 1000)) } if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateNumerator", 1)) } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("MaxBitrate", 1000)) } if s.NumberReferenceFrames != nil && *s.NumberReferenceFrames < 1 { invalidParams.Add(aws.NewErrParamMinValue("NumberReferenceFrames", 1)) } if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("ParDenominator", 1)) } if s.ParNumerator != nil && *s.ParNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("ParNumerator", 1)) } if s.Slices != nil && *s.Slices < 1 { invalidParams.Add(aws.NewErrParamMinValue("Slices", 1)) } if s.QvbrSettings != nil { if err := s.QvbrSettings.Validate(); err != nil { invalidParams.AddNested("QvbrSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s H265Settings) MarshalFields(e protocol.FieldEncoder) error { if len(s.AdaptiveQuantization) > 0 { v := s.AdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "adaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.AlternateTransferFunctionSei) > 0 { v := s.AlternateTransferFunctionSei metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "alternateTransferFunctionSei", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if len(s.CodecLevel) > 0 { v := s.CodecLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codecLevel", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.CodecProfile) > 0 { v := s.CodecProfile metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codecProfile", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.DynamicSubGop) > 0 { v := s.DynamicSubGop metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dynamicSubGop", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FlickerAdaptiveQuantization) > 0 { v := s.FlickerAdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "flickerAdaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FramerateControl) > 0 { v := s.FramerateControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FramerateConversionAlgorithm) > 0 { v := s.FramerateConversionAlgorithm metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateConversionAlgorithm", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FramerateDenominator != nil { v := *s.FramerateDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateDenominator", protocol.Int64Value(v), metadata) } if s.FramerateNumerator != nil { v := *s.FramerateNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateNumerator", protocol.Int64Value(v), metadata) } if len(s.GopBReference) > 0 { v := s.GopBReference metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopBReference", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.GopClosedCadence != nil { v := *s.GopClosedCadence metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopClosedCadence", protocol.Int64Value(v), metadata) } if s.GopSize != nil { v := *s.GopSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopSize", protocol.Float64Value(v), metadata) } if len(s.GopSizeUnits) > 0 { v := s.GopSizeUnits metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopSizeUnits", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.HrdBufferInitialFillPercentage != nil { v := *s.HrdBufferInitialFillPercentage metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "hrdBufferInitialFillPercentage", protocol.Int64Value(v), metadata) } if s.HrdBufferSize != nil { v := *s.HrdBufferSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "hrdBufferSize", protocol.Int64Value(v), metadata) } if len(s.InterlaceMode) > 0 { v := s.InterlaceMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "interlaceMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.MaxBitrate != nil { v := *s.MaxBitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxBitrate", protocol.Int64Value(v), metadata) } if s.MinIInterval != nil { v := *s.MinIInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "minIInterval", protocol.Int64Value(v), metadata) } if s.NumberBFramesBetweenReferenceFrames != nil { v := *s.NumberBFramesBetweenReferenceFrames metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "numberBFramesBetweenReferenceFrames", protocol.Int64Value(v), metadata) } if s.NumberReferenceFrames != nil { v := *s.NumberReferenceFrames metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "numberReferenceFrames", protocol.Int64Value(v), metadata) } if len(s.ParControl) > 0 { v := s.ParControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ParDenominator != nil { v := *s.ParDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parDenominator", protocol.Int64Value(v), metadata) } if s.ParNumerator != nil { v := *s.ParNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parNumerator", protocol.Int64Value(v), metadata) } if len(s.QualityTuningLevel) > 0 { v := s.QualityTuningLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "qualityTuningLevel", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.QvbrSettings != nil { v := s.QvbrSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "qvbrSettings", v, metadata) } if len(s.RateControlMode) > 0 { v := s.RateControlMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "rateControlMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SampleAdaptiveOffsetFilterMode) > 0 { v := s.SampleAdaptiveOffsetFilterMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sampleAdaptiveOffsetFilterMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SceneChangeDetect) > 0 { v := s.SceneChangeDetect metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sceneChangeDetect", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Slices != nil { v := *s.Slices metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "slices", protocol.Int64Value(v), metadata) } if len(s.SlowPal) > 0 { v := s.SlowPal metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "slowPal", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SpatialAdaptiveQuantization) > 0 { v := s.SpatialAdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "spatialAdaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Telecine) > 0 { v := s.Telecine metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "telecine", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.TemporalAdaptiveQuantization) > 0 { v := s.TemporalAdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "temporalAdaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.TemporalIds) > 0 { v := s.TemporalIds metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "temporalIds", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Tiles) > 0 { v := s.Tiles metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "tiles", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.UnregisteredSeiTimecode) > 0 { v := s.UnregisteredSeiTimecode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "unregisteredSeiTimecode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.WriteMp4PackagingType) > 0 { v := s.WriteMp4PackagingType metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "writeMp4PackagingType", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Use these settings to specify static color calibration metadata, as defined // by SMPTE ST 2086. These values don't affect the pixel values that are encoded // in the video stream. They are intended to help the downstream video player // display content in a way that reflects the intentions of the the content // creator. type Hdr10Metadata struct { _ struct{} `type:"structure"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. BluePrimaryX *int64 `locationName:"bluePrimaryX" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. BluePrimaryY *int64 `locationName:"bluePrimaryY" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. GreenPrimaryX *int64 `locationName:"greenPrimaryX" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. GreenPrimaryY *int64 `locationName:"greenPrimaryY" type:"integer"` // Maximum light level among all samples in the coded video sequence, in units // of candelas per square meter. This setting doesn't have a default value; // you must specify a value that is suitable for the content. MaxContentLightLevel *int64 `locationName:"maxContentLightLevel" type:"integer"` // Maximum average light level of any frame in the coded video sequence, in // units of candelas per square meter. This setting doesn't have a default value; // you must specify a value that is suitable for the content. MaxFrameAverageLightLevel *int64 `locationName:"maxFrameAverageLightLevel" type:"integer"` // Nominal maximum mastering display luminance in units of of 0.0001 candelas // per square meter. MaxLuminance *int64 `locationName:"maxLuminance" type:"integer"` // Nominal minimum mastering display luminance in units of of 0.0001 candelas // per square meter MinLuminance *int64 `locationName:"minLuminance" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. RedPrimaryX *int64 `locationName:"redPrimaryX" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. RedPrimaryY *int64 `locationName:"redPrimaryY" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. WhitePointX *int64 `locationName:"whitePointX" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. WhitePointY *int64 `locationName:"whitePointY" type:"integer"` } // String returns the string representation func (s Hdr10Metadata) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Hdr10Metadata) MarshalFields(e protocol.FieldEncoder) error { if s.BluePrimaryX != nil { v := *s.BluePrimaryX metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bluePrimaryX", protocol.Int64Value(v), metadata) } if s.BluePrimaryY != nil { v := *s.BluePrimaryY metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bluePrimaryY", protocol.Int64Value(v), metadata) } if s.GreenPrimaryX != nil { v := *s.GreenPrimaryX metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "greenPrimaryX", protocol.Int64Value(v), metadata) } if s.GreenPrimaryY != nil { v := *s.GreenPrimaryY metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "greenPrimaryY", protocol.Int64Value(v), metadata) } if s.MaxContentLightLevel != nil { v := *s.MaxContentLightLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxContentLightLevel", protocol.Int64Value(v), metadata) } if s.MaxFrameAverageLightLevel != nil { v := *s.MaxFrameAverageLightLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxFrameAverageLightLevel", protocol.Int64Value(v), metadata) } if s.MaxLuminance != nil { v := *s.MaxLuminance metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxLuminance", protocol.Int64Value(v), metadata) } if s.MinLuminance != nil { v := *s.MinLuminance metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "minLuminance", protocol.Int64Value(v), metadata) } if s.RedPrimaryX != nil { v := *s.RedPrimaryX metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "redPrimaryX", protocol.Int64Value(v), metadata) } if s.RedPrimaryY != nil { v := *s.RedPrimaryY metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "redPrimaryY", protocol.Int64Value(v), metadata) } if s.WhitePointX != nil { v := *s.WhitePointX metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "whitePointX", protocol.Int64Value(v), metadata) } if s.WhitePointY != nil { v := *s.WhitePointY metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "whitePointY", protocol.Int64Value(v), metadata) } return nil } // Specify the details for each additional HLS manifest that you want the service // to generate for this output group. Each manifest can reference a different // subset of outputs in the group. type HlsAdditionalManifest struct { _ struct{} `type:"structure"` // Specify a name modifier that the service adds to the name of this manifest // to make it different from the file names of the other main manifests in the // output group. For example, say that the default main manifest for your HLS // group is film-name.m3u8. If you enter "-no-premium" for this setting, then // the file name the service generates for this top-level manifest is film-name-no-premium.m3u8. // For HLS output groups, specify a manifestNameModifier that is different from // the nameModifier of the output. The service uses the output name modifier // to create unique names for the individual variant manifests. ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"` // Specify the outputs that you want this additional top-level manifest to reference. SelectedOutputs []string `locationName:"selectedOutputs" type:"list"` } // String returns the string representation func (s HlsAdditionalManifest) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *HlsAdditionalManifest) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "HlsAdditionalManifest"} if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 { invalidParams.Add(aws.NewErrParamMinLen("ManifestNameModifier", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s HlsAdditionalManifest) MarshalFields(e protocol.FieldEncoder) error { if s.ManifestNameModifier != nil { v := *s.ManifestNameModifier metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "manifestNameModifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.SelectedOutputs != nil { v := s.SelectedOutputs metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "selectedOutputs", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } return nil } // Caption Language Mapping type HlsCaptionLanguageMapping struct { _ struct{} `type:"structure"` // Caption channel. CaptionChannel *int64 `locationName:"captionChannel" type:"integer"` // Specify the language for this captions channel, using the ISO 639-2 or ISO // 639-3 three-letter language code CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"` // Specify the language, using the ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php. LanguageCode LanguageCode `locationName:"languageCode" type:"string" enum:"true"` // Caption language description. LanguageDescription *string `locationName:"languageDescription" type:"string"` } // String returns the string representation func (s HlsCaptionLanguageMapping) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *HlsCaptionLanguageMapping) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "HlsCaptionLanguageMapping"} if s.CaptionChannel != nil && *s.CaptionChannel < -2.147483648e+09 { invalidParams.Add(aws.NewErrParamMinValue("CaptionChannel", -2.147483648e+09)) } if s.CustomLanguageCode != nil && len(*s.CustomLanguageCode) < 3 { invalidParams.Add(aws.NewErrParamMinLen("CustomLanguageCode", 3)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s HlsCaptionLanguageMapping) MarshalFields(e protocol.FieldEncoder) error { if s.CaptionChannel != nil { v := *s.CaptionChannel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "captionChannel", protocol.Int64Value(v), metadata) } if s.CustomLanguageCode != nil { v := *s.CustomLanguageCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "customLanguageCode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.LanguageCode) > 0 { v := s.LanguageCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "languageCode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.LanguageDescription != nil { v := *s.LanguageDescription metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "languageDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Settings for HLS encryption type HlsEncryptionSettings struct { _ struct{} `type:"structure"` // This is a 128-bit, 16-byte hex value represented by a 32-character text string. // If this parameter is not set then the Initialization Vector will follow the // segment number by default. ConstantInitializationVector *string `locationName:"constantInitializationVector" min:"32" type:"string"` // Encrypts the segments with the given encryption scheme. Leave blank to disable. // Selecting 'Disabled' in the web interface also disables encryption. EncryptionMethod HlsEncryptionType `locationName:"encryptionMethod" type:"string" enum:"true"` // The Initialization Vector is a 128-bit number used in conjunction with the // key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed // in the manifest. Otherwise Initialization Vector is not in the manifest. InitializationVectorInManifest HlsInitializationVectorInManifest `locationName:"initializationVectorInManifest" type:"string" enum:"true"` // Enable this setting to insert the EXT-X-SESSION-KEY element into the master // playlist. This allows for offline Apple HLS FairPlay content protection. OfflineEncrypted HlsOfflineEncrypted `locationName:"offlineEncrypted" type:"string" enum:"true"` // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings // when doing DRM encryption with a SPEKE-compliant key provider. If your output // group type is CMAF, use the SpekeKeyProviderCmaf settings instead. SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"` // Use these settings to set up encryption with a static key provider. StaticKeyProvider *StaticKeyProvider `locationName:"staticKeyProvider" type:"structure"` // Specify whether your DRM encryption key is static or from a key provider // that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html. Type HlsKeyProviderType `locationName:"type" type:"string" enum:"true"` } // String returns the string representation func (s HlsEncryptionSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *HlsEncryptionSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "HlsEncryptionSettings"} if s.ConstantInitializationVector != nil && len(*s.ConstantInitializationVector) < 32 { invalidParams.Add(aws.NewErrParamMinLen("ConstantInitializationVector", 32)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s HlsEncryptionSettings) MarshalFields(e protocol.FieldEncoder) error { if s.ConstantInitializationVector != nil { v := *s.ConstantInitializationVector metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "constantInitializationVector", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.EncryptionMethod) > 0 { v := s.EncryptionMethod metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "encryptionMethod", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.InitializationVectorInManifest) > 0 { v := s.InitializationVectorInManifest metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "initializationVectorInManifest", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.OfflineEncrypted) > 0 { v := s.OfflineEncrypted metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "offlineEncrypted", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SpekeKeyProvider != nil { v := s.SpekeKeyProvider metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "spekeKeyProvider", v, metadata) } if s.StaticKeyProvider != nil { v := s.StaticKeyProvider metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "staticKeyProvider", v, metadata) } if len(s.Type) > 0 { v := s.Type metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "type", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to // HLS_GROUP_SETTINGS. type HlsGroupSettings struct { _ struct{} `type:"structure"` // Choose one or more ad marker types to decorate your Apple HLS manifest. This // setting does not determine whether SCTE-35 markers appear in the outputs // themselves. AdMarkers []HlsAdMarkers `locationName:"adMarkers" type:"list"` // By default, the service creates one top-level .m3u8 HLS manifest for each // HLS output group in your job. This default manifest references every output // in the output group. To create additional top-level manifests that reference // a subset of the outputs in the output group, specify a list of them here. AdditionalManifests []HlsAdditionalManifest `locationName:"additionalManifests" type:"list"` // A partial URI prefix that will be prepended to each output in the media .m3u8 // file. Can be used if base manifest is delivered from a different URL than // the main .m3u8 file. BaseUrl *string `locationName:"baseUrl" type:"string"` // Language to be used on Caption outputs CaptionLanguageMappings []HlsCaptionLanguageMapping `locationName:"captionLanguageMappings" type:"list"` // Applies only to 608 Embedded output captions. Insert: Include CLOSED-CAPTIONS // lines in the manifest. Specify at least one language in the CC1 Language // Code field. One CLOSED-CAPTION line is added for each Language Code you specify. // Make sure to specify the languages in the order in which they appear in the // original source (if the source is embedded format) or the order of the caption // selectors (if the source is other than embedded). Otherwise, languages in // the manifest will not match up properly with the output captions. None: Include // CLOSED-CAPTIONS=NONE line in the manifest. Omit: Omit any CLOSED-CAPTIONS // line from the manifest. CaptionLanguageSetting HlsCaptionLanguageSetting `locationName:"captionLanguageSetting" type:"string" enum:"true"` // When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client // from saving media segments for later replay. ClientCache HlsClientCache `locationName:"clientCache" type:"string" enum:"true"` // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist // generation. CodecSpecification HlsCodecSpecification `locationName:"codecSpecification" type:"string" enum:"true"` // Use Destination (Destination) to specify the S3 output location and the output // filename base. Destination accepts format identifiers. If you do not specify // the base filename in the URI, the service will use the filename of the input // file. If your job has multiple inputs, the service uses the filename of the // first input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of // destination DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"` // Indicates whether segments should be placed in subdirectories. DirectoryStructure HlsDirectoryStructure `locationName:"directoryStructure" type:"string" enum:"true"` // DRM settings. Encryption *HlsEncryptionSettings `locationName:"encryption" type:"structure"` // When set to GZIP, compresses HLS playlist. ManifestCompression HlsManifestCompression `locationName:"manifestCompression" type:"string" enum:"true"` // Indicates whether the output manifest should use floating point values for // segment duration. ManifestDurationFormat HlsManifestDurationFormat `locationName:"manifestDurationFormat" type:"string" enum:"true"` // Keep this setting at the default value of 0, unless you are troubleshooting // a problem with how devices play back the end of your video asset. If you // know that player devices are hanging on the final segment of your video because // the length of your final segment is too short, use this setting to specify // a minimum final segment length, in seconds. Choose a value that is greater // than or equal to 1 and less than your segment length. When you specify a // value for this setting, the encoder will combine any final segment that is // shorter than the length that you specify with the previous segment. For example, // your segment length is 3 seconds and your final segment is .5 seconds without // a minimum final segment length; when you set the minimum final segment length // to 1, your final segment is 3.5 seconds. MinFinalSegmentLength *float64 `locationName:"minFinalSegmentLength" type:"double"` // When set, Minimum Segment Size is enforced by looking ahead and back within // the specified range for a nearby avail and extending the segment size if // needed. MinSegmentLength *int64 `locationName:"minSegmentLength" type:"integer"` // Indicates whether the .m3u8 manifest file should be generated for this HLS // output group. OutputSelection HlsOutputSelection `locationName:"outputSelection" type:"string" enum:"true"` // Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. // The value is calculated as follows: either the program date and time are // initialized using the input timecode source, or the time is initialized using // the input timecode source and the date is initialized using the timestamp_offset. ProgramDateTime HlsProgramDateTime `locationName:"programDateTime" type:"string" enum:"true"` // Period of insertion of EXT-X-PROGRAM-DATE-TIME entry, in seconds. ProgramDateTimePeriod *int64 `locationName:"programDateTimePeriod" type:"integer"` // When set to SINGLE_FILE, emits program as a single media resource (.ts) file, // uses #EXT-X-BYTERANGE tags to index segment for playback. SegmentControl HlsSegmentControl `locationName:"segmentControl" type:"string" enum:"true"` // Length of MPEG-2 Transport Stream segments to create (in seconds). Note that // segments will end on the next keyframe after this number of seconds, so actual // segment length may be longer. SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"` // Number of segments to write to a subdirectory before starting a new one. // directoryStructure must be SINGLE_DIRECTORY for this setting to have an effect. SegmentsPerSubdirectory *int64 `locationName:"segmentsPerSubdirectory" min:"1" type:"integer"` // Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag // of variant manifest. StreamInfResolution HlsStreamInfResolution `locationName:"streamInfResolution" type:"string" enum:"true"` // Indicates ID3 frame that has the timecode. TimedMetadataId3Frame HlsTimedMetadataId3Frame `locationName:"timedMetadataId3Frame" type:"string" enum:"true"` // Timed Metadata interval in seconds. TimedMetadataId3Period *int64 `locationName:"timedMetadataId3Period" type:"integer"` // Provides an extra millisecond delta offset to fine tune the timestamps. TimestampDeltaMilliseconds *int64 `locationName:"timestampDeltaMilliseconds" type:"integer"` } // String returns the string representation func (s HlsGroupSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *HlsGroupSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "HlsGroupSettings"} if s.SegmentLength != nil && *s.SegmentLength < 1 { invalidParams.Add(aws.NewErrParamMinValue("SegmentLength", 1)) } if s.SegmentsPerSubdirectory != nil && *s.SegmentsPerSubdirectory < 1 { invalidParams.Add(aws.NewErrParamMinValue("SegmentsPerSubdirectory", 1)) } if s.TimedMetadataId3Period != nil && *s.TimedMetadataId3Period < -2.147483648e+09 { invalidParams.Add(aws.NewErrParamMinValue("TimedMetadataId3Period", -2.147483648e+09)) } if s.TimestampDeltaMilliseconds != nil && *s.TimestampDeltaMilliseconds < -2.147483648e+09 { invalidParams.Add(aws.NewErrParamMinValue("TimestampDeltaMilliseconds", -2.147483648e+09)) } if s.AdditionalManifests != nil { for i, v := range s.AdditionalManifests { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(aws.ErrInvalidParams)) } } } if s.CaptionLanguageMappings != nil { for i, v := range s.CaptionLanguageMappings { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionLanguageMappings", i), err.(aws.ErrInvalidParams)) } } } if s.Encryption != nil { if err := s.Encryption.Validate(); err != nil { invalidParams.AddNested("Encryption", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s HlsGroupSettings) MarshalFields(e protocol.FieldEncoder) error { if s.AdMarkers != nil { v := s.AdMarkers metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "adMarkers", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } if s.AdditionalManifests != nil { v := s.AdditionalManifests metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "additionalManifests", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.BaseUrl != nil { v := *s.BaseUrl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "baseUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.CaptionLanguageMappings != nil { v := s.CaptionLanguageMappings metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "captionLanguageMappings", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if len(s.CaptionLanguageSetting) > 0 { v := s.CaptionLanguageSetting metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "captionLanguageSetting", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.ClientCache) > 0 { v := s.ClientCache metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "clientCache", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.CodecSpecification) > 0 { v := s.CodecSpecification metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codecSpecification", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Destination != nil { v := *s.Destination metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "destination", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.DestinationSettings != nil { v := s.DestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "destinationSettings", v, metadata) } if len(s.DirectoryStructure) > 0 { v := s.DirectoryStructure metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "directoryStructure", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Encryption != nil { v := s.Encryption metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "encryption", v, metadata) } if len(s.ManifestCompression) > 0 { v := s.ManifestCompression metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "manifestCompression", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.ManifestDurationFormat) > 0 { v := s.ManifestDurationFormat metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "manifestDurationFormat", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.MinFinalSegmentLength != nil { v := *s.MinFinalSegmentLength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "minFinalSegmentLength", protocol.Float64Value(v), metadata) } if s.MinSegmentLength != nil { v := *s.MinSegmentLength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "minSegmentLength", protocol.Int64Value(v), metadata) } if len(s.OutputSelection) > 0 { v := s.OutputSelection metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "outputSelection", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.ProgramDateTime) > 0 { v := s.ProgramDateTime metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "programDateTime", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ProgramDateTimePeriod != nil { v := *s.ProgramDateTimePeriod metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "programDateTimePeriod", protocol.Int64Value(v), metadata) } if len(s.SegmentControl) > 0 { v := s.SegmentControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "segmentControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SegmentLength != nil { v := *s.SegmentLength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "segmentLength", protocol.Int64Value(v), metadata) } if s.SegmentsPerSubdirectory != nil { v := *s.SegmentsPerSubdirectory metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "segmentsPerSubdirectory", protocol.Int64Value(v), metadata) } if len(s.StreamInfResolution) > 0 { v := s.StreamInfResolution metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "streamInfResolution", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.TimedMetadataId3Frame) > 0 { v := s.TimedMetadataId3Frame metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timedMetadataId3Frame", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.TimedMetadataId3Period != nil { v := *s.TimedMetadataId3Period metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timedMetadataId3Period", protocol.Int64Value(v), metadata) } if s.TimestampDeltaMilliseconds != nil { v := *s.TimestampDeltaMilliseconds metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timestampDeltaMilliseconds", protocol.Int64Value(v), metadata) } return nil } // Settings for HLS output groups type HlsSettings struct { _ struct{} `type:"structure"` // Specifies the group to which the audio Rendition belongs. AudioGroupId *string `locationName:"audioGroupId" type:"string"` // Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream // (M2TS) to create a file in an MPEG2-TS container. Keep the default value // Automatic (AUTOMATIC) to create an audio-only file in a raw container. Regardless // of the value that you specify here, if this output has video, the service // will place the output into an MPEG2-TS container. AudioOnlyContainer HlsAudioOnlyContainer `locationName:"audioOnlyContainer" type:"string" enum:"true"` // List all the audio groups that are used with the video output stream. Input // all the audio GROUP-IDs that are associated to the video, separate by ','. AudioRenditionSets *string `locationName:"audioRenditionSets" type:"string"` // Four types of audio-only tracks are supported: Audio-Only Variant Stream // The client can play back this audio-only stream instead of video in low-bandwidth // scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest. Alternate // Audio, Auto Select, Default Alternate rendition that the client should try // to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest // with DEFAULT=YES, AUTOSELECT=YES Alternate Audio, Auto Select, Not Default // Alternate rendition that the client may try to play back by default. Represented // as an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=YES Alternate // Audio, not Auto Select Alternate rendition that the client will not try to // play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with // DEFAULT=NO, AUTOSELECT=NO AudioTrackType HlsAudioTrackType `locationName:"audioTrackType" type:"string" enum:"true"` // When set to INCLUDE, writes I-Frame Only Manifest in addition to the HLS // manifest IFrameOnlyManifest HlsIFrameOnlyManifest `locationName:"iFrameOnlyManifest" type:"string" enum:"true"` // Use this setting to add an identifying string to the filename of each segment. // The service adds this string between the name modifier and segment index // number. You can use format identifiers in the string. For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/using-variables-in-your-job-settings.html SegmentModifier *string `locationName:"segmentModifier" type:"string"` } // String returns the string representation func (s HlsSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s HlsSettings) MarshalFields(e protocol.FieldEncoder) error { if s.AudioGroupId != nil { v := *s.AudioGroupId metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "audioGroupId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.AudioOnlyContainer) > 0 { v := s.AudioOnlyContainer metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "audioOnlyContainer", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.AudioRenditionSets != nil { v := *s.AudioRenditionSets metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "audioRenditionSets", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.AudioTrackType) > 0 { v := s.AudioTrackType metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "audioTrackType", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.IFrameOnlyManifest) > 0 { v := s.IFrameOnlyManifest metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "iFrameOnlyManifest", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SegmentModifier != nil { v := *s.SegmentModifier metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "segmentModifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Optional. Configuration for a destination queue to which the job can hop // once a customer-defined minimum wait time has passed. type HopDestination struct { _ struct{} `type:"structure"` // Optional. When you set up a job to use queue hopping, you can specify a different // relative priority for the job in the destination queue. If you don't specify, // the relative priority will remain the same as in the previous queue. Priority *int64 `locationName:"priority" type:"integer"` // Optional unless the job is submitted on the default queue. When you set up // a job to use queue hopping, you can specify a destination queue. This queue // cannot be the original queue to which the job is submitted. If the original // queue isn't the default queue and you don't specify the destination queue, // the job will move to the default queue. Queue *string `locationName:"queue" type:"string"` // Required for setting up a job to use queue hopping. Minimum wait time in // minutes until the job can hop to the destination queue. Valid range is 1 // to 1440 minutes, inclusive. WaitMinutes *int64 `locationName:"waitMinutes" type:"integer"` } // String returns the string representation func (s HopDestination) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *HopDestination) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "HopDestination"} if s.Priority != nil && *s.Priority < -50 { invalidParams.Add(aws.NewErrParamMinValue("Priority", -50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s HopDestination) MarshalFields(e protocol.FieldEncoder) error { if s.Priority != nil { v := *s.Priority metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "priority", protocol.Int64Value(v), metadata) } if s.Queue != nil { v := *s.Queue metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "queue", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.WaitMinutes != nil { v := *s.WaitMinutes metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "waitMinutes", protocol.Int64Value(v), metadata) } return nil } // To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) // to specify the base 64 encoded string and use Timecode (TimeCode) to specify // the time when the tag should be inserted. To insert multiple ID3 tags in // your output, create multiple instances of ID3 insertion (Id3Insertion). type Id3Insertion struct { _ struct{} `type:"structure"` // Use ID3 tag (Id3) to provide a tag value in base64-encode format. Id3 *string `locationName:"id3" type:"string"` // Provide a Timecode (TimeCode) in HH:MM:SS:FF or HH:MM:SS;FF format. Timecode *string `locationName:"timecode" type:"string"` } // String returns the string representation func (s Id3Insertion) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Id3Insertion) MarshalFields(e protocol.FieldEncoder) error { if s.Id3 != nil { v := *s.Id3 metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "id3", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Timecode != nil { v := *s.Timecode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timecode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Enable the image inserter feature to include a graphic overlay on your video. // Enable or disable this feature for each input or output individually. This // setting is disabled by default. type ImageInserter struct { _ struct{} `type:"structure"` // Specify the images that you want to overlay on your video. The images must // be PNG or TGA files. InsertableImages []InsertableImage `locationName:"insertableImages" type:"list"` } // String returns the string representation func (s ImageInserter) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *ImageInserter) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "ImageInserter"} if s.InsertableImages != nil { for i, v := range s.InsertableImages { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InsertableImages", i), err.(aws.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s ImageInserter) MarshalFields(e protocol.FieldEncoder) error { if s.InsertableImages != nil { v := s.InsertableImages metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "insertableImages", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } return nil } // Settings specific to IMSC caption outputs. type ImscDestinationSettings struct { _ struct{} `type:"structure"` // Keep this setting enabled to have MediaConvert use the font style and position // information from the captions source in the output. This option is available // only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting // for simplified output captions. StylePassthrough ImscStylePassthrough `locationName:"stylePassthrough" type:"string" enum:"true"` } // String returns the string representation func (s ImscDestinationSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s ImscDestinationSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.StylePassthrough) > 0 { v := s.StylePassthrough metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "stylePassthrough", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Specifies media input type Input struct { _ struct{} `type:"structure"` // Specifies set of audio selectors within an input to combine. An input may // have multiple audio selector groups. See "Audio Selector Group":#inputs-audio_selector_group // for more information. AudioSelectorGroups map[string]AudioSelectorGroup `locationName:"audioSelectorGroups" type:"map"` // Use Audio selectors (AudioSelectors) to specify a track or set of tracks // from the input that you will use in your outputs. You can use multiple Audio // selectors per input. AudioSelectors map[string]AudioSelector `locationName:"audioSelectors" type:"map"` // Use captions selectors to specify the captions data from your input that // you use in your outputs. You can use up to 20 captions selectors per input. CaptionSelectors map[string]CaptionSelector `locationName:"captionSelectors" type:"map"` // Use Cropping selection (crop) to specify the video area that the service // will include in the output video frame. If you specify a value here, it will // override any value that you specify in the output setting Cropping selection // (crop). Crop *Rectangle `locationName:"crop" type:"structure"` // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. // Default is disabled. Only manually controllable for MPEG2 and uncompressed // video inputs. DeblockFilter InputDeblockFilter `locationName:"deblockFilter" type:"string" enum:"true"` // Settings for decrypting any input files that you encrypt before you upload // them to Amazon S3. MediaConvert can decrypt files only when you use AWS Key // Management Service (KMS) to encrypt the data key that you use to encrypt // your content. DecryptionSettings *InputDecryptionSettings `locationName:"decryptionSettings" type:"structure"` // Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default // is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video // inputs. DenoiseFilter InputDenoiseFilter `locationName:"denoiseFilter" type:"string" enum:"true"` // Specify the source file for your transcoding job. You can use multiple inputs // in a single job. The service concatenates these inputs, in the order that // you specify them in the job, to create the outputs. If your input format // is IMF, specify your input by providing the path to your CPL. For example, // "s3://bucket/vf/cpl.xml". If the CPL is in an incomplete IMP, make sure to // use *Supplemental IMPs* (SupplementalImps) to specify any supplemental IMPs // that contain assets referenced by the CPL. FileInput *string `locationName:"fileInput" type:"string"` // Use Filter enable (InputFilterEnable) to specify how the transcoding service // applies the denoise and deblock filters. You must also enable the filters // separately, with Denoise (InputDenoiseFilter) and Deblock (InputDeblockFilter). // * Auto - The transcoding service determines whether to apply filtering, depending // on input type and quality. * Disable - The input is not filtered. This is // true even if you use the API to enable them in (InputDeblockFilter) and (InputDeblockFilter). // * Force - The in put is filtered regardless of input type. FilterEnable InputFilterEnable `locationName:"filterEnable" type:"string" enum:"true"` // Use Filter strength (FilterStrength) to adjust the magnitude the input filter // settings (Deblock and Denoise). The range is -5 to 5. Default is 0. FilterStrength *int64 `locationName:"filterStrength" type:"integer"` // Enable the image inserter feature to include a graphic overlay on your video. // Enable or disable this feature for each input individually. This setting // is disabled by default. ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"` // (InputClippings) contains sets of start and end times that together specify // a portion of the input to be used in the outputs. If you provide only a start // time, the clip will be the entire input from that point to the end. If you // provide only an end time, it will be the entire input up to that point. When // you specify more than one input clip, the transcoding service creates the // job outputs by stringing the clips together in the order you specify them. InputClippings []InputClipping `locationName:"inputClippings" type:"list"` // Use Selection placement (position) to define the video area in your output // frame. The area outside of the rectangle that you specify here is black. // If you specify a value here, it will override any value that you specify // in the output setting Selection placement (position). If you specify a value // here, this will override any AFD values in your input, even if you set Respond // to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, // this will ignore anything that you specify for the setting Scaling Behavior // (scalingBehavior). Position *Rectangle `locationName:"position" type:"structure"` // Use Program (programNumber) to select a specific program from within a multi-program // transport stream. Note that Quad 4K is not currently supported. Default is // the first program within the transport stream. If the program you specify // doesn't exist, the transcoding service will use this default. ProgramNumber *int64 `locationName:"programNumber" min:"1" type:"integer"` // Set PSI control (InputPsiControl) for transport stream inputs to specify // which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio // and video. * Use PSI - Scan only PSI data. PsiControl InputPsiControl `locationName:"psiControl" type:"string" enum:"true"` // Provide a list of any necessary supplemental IMPs. You need supplemental // IMPs if the CPL that you're using for your input is in an incomplete IMP. // Specify either the supplemental IMP directories with a trailing slash or // the ASSETMAP.xml files. For example ["s3://bucket/ov/", "s3://bucket/vf2/ASSETMAP.xml"]. // You don't need to specify the IMP that contains your input CPL, because the // service automatically detects it. SupplementalImps []string `locationName:"supplementalImps" type:"list"` // Use this Timecode source setting, located under the input settings (InputTimecodeSource), // to specify how the service counts input video frames. This input frame count // affects only the behavior of features that apply to a single input at a time, // such as input clipping and synchronizing some captions formats. Choose Embedded // (EMBEDDED) to use the timecodes in your input video. Choose Start at zero // (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) // to start the first frame at the timecode that you specify in the setting // Start timecode (timecodeStart). If you don't specify a value for Timecode // source, the service will use Embedded by default. For more information about // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeSource InputTimecodeSource `locationName:"timecodeSource" type:"string" enum:"true"` // Specify the timecode that you want the service to use for this input's initial // frame. To use this setting, you must set the Timecode source setting, located // under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). // For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeStart *string `locationName:"timecodeStart" min:"11" type:"string"` // Selector for video. VideoSelector *VideoSelector `locationName:"videoSelector" type:"structure"` } // String returns the string representation func (s Input) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Input) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Input"} if s.FilterStrength != nil && *s.FilterStrength < -5 { invalidParams.Add(aws.NewErrParamMinValue("FilterStrength", -5)) } if s.ProgramNumber != nil && *s.ProgramNumber < 1 { invalidParams.Add(aws.NewErrParamMinValue("ProgramNumber", 1)) } if s.TimecodeStart != nil && len(*s.TimecodeStart) < 11 { invalidParams.Add(aws.NewErrParamMinLen("TimecodeStart", 11)) } if s.AudioSelectors != nil { for i, v := range s.AudioSelectors { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AudioSelectors", i), err.(aws.ErrInvalidParams)) } } } if s.CaptionSelectors != nil { for i, v := range s.CaptionSelectors { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionSelectors", i), err.(aws.ErrInvalidParams)) } } } if s.Crop != nil { if err := s.Crop.Validate(); err != nil { invalidParams.AddNested("Crop", err.(aws.ErrInvalidParams)) } } if s.DecryptionSettings != nil { if err := s.DecryptionSettings.Validate(); err != nil { invalidParams.AddNested("DecryptionSettings", err.(aws.ErrInvalidParams)) } } if s.ImageInserter != nil { if err := s.ImageInserter.Validate(); err != nil { invalidParams.AddNested("ImageInserter", err.(aws.ErrInvalidParams)) } } if s.Position != nil { if err := s.Position.Validate(); err != nil { invalidParams.AddNested("Position", err.(aws.ErrInvalidParams)) } } if s.VideoSelector != nil { if err := s.VideoSelector.Validate(); err != nil { invalidParams.AddNested("VideoSelector", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Input) MarshalFields(e protocol.FieldEncoder) error { if s.AudioSelectorGroups != nil { v := s.AudioSelectorGroups metadata := protocol.Metadata{} ms0 := e.Map(protocol.BodyTarget, "audioSelectorGroups", metadata) ms0.Start() for k1, v1 := range v { ms0.MapSetFields(k1, v1) } ms0.End() } if s.AudioSelectors != nil { v := s.AudioSelectors metadata := protocol.Metadata{} ms0 := e.Map(protocol.BodyTarget, "audioSelectors", metadata) ms0.Start() for k1, v1 := range v { ms0.MapSetFields(k1, v1) } ms0.End() } if s.CaptionSelectors != nil { v := s.CaptionSelectors metadata := protocol.Metadata{} ms0 := e.Map(protocol.BodyTarget, "captionSelectors", metadata) ms0.Start() for k1, v1 := range v { ms0.MapSetFields(k1, v1) } ms0.End() } if s.Crop != nil { v := s.Crop metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "crop", v, metadata) } if len(s.DeblockFilter) > 0 { v := s.DeblockFilter metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "deblockFilter", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.DecryptionSettings != nil { v := s.DecryptionSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "decryptionSettings", v, metadata) } if len(s.DenoiseFilter) > 0 { v := s.DenoiseFilter metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "denoiseFilter", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FileInput != nil { v := *s.FileInput metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fileInput", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.FilterEnable) > 0 { v := s.FilterEnable metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "filterEnable", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FilterStrength != nil { v := *s.FilterStrength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "filterStrength", protocol.Int64Value(v), metadata) } if s.ImageInserter != nil { v := s.ImageInserter metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "imageInserter", v, metadata) } if s.InputClippings != nil { v := s.InputClippings metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "inputClippings", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.Position != nil { v := s.Position metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "position", v, metadata) } if s.ProgramNumber != nil { v := *s.ProgramNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "programNumber", protocol.Int64Value(v), metadata) } if len(s.PsiControl) > 0 { v := s.PsiControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "psiControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SupplementalImps != nil { v := s.SupplementalImps metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "supplementalImps", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } if len(s.TimecodeSource) > 0 { v := s.TimecodeSource metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timecodeSource", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.TimecodeStart != nil { v := *s.TimecodeStart metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timecodeStart", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.VideoSelector != nil { v := s.VideoSelector metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "videoSelector", v, metadata) } return nil } // To transcode only portions of your input (clips), include one Input clipping // (one instance of InputClipping in the JSON job file) for each input clip. // All input clips you specify will be included in every output of the job. type InputClipping struct { _ struct{} `type:"structure"` // Set End timecode (EndTimecode) to the end of the portion of the input you // are clipping. The frame corresponding to the End timecode value is included // in the clip. Start timecode or End timecode may be left blank, but not both. // Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the // minute, SS is the second, and FF is the frame number. When choosing this // value, take into account your setting for timecode source under input settings // (InputTimecodeSource). For example, if you have embedded timecodes that start // at 01:00:00:00 and you want your clip to end six minutes into the video, // use 01:06:00:00. EndTimecode *string `locationName:"endTimecode" type:"string"` // Set Start timecode (StartTimecode) to the beginning of the portion of the // input you are clipping. The frame corresponding to the Start timecode value // is included in the clip. Start timecode or End timecode may be left blank, // but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the // hour, MM is the minute, SS is the second, and FF is the frame number. When // choosing this value, take into account your setting for Input timecode source. // For example, if you have embedded timecodes that start at 01:00:00:00 and // you want your clip to begin five minutes into the video, use 01:05:00:00. StartTimecode *string `locationName:"startTimecode" type:"string"` } // String returns the string representation func (s InputClipping) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s InputClipping) MarshalFields(e protocol.FieldEncoder) error { if s.EndTimecode != nil { v := *s.EndTimecode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "endTimecode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.StartTimecode != nil { v := *s.StartTimecode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "startTimecode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Settings for decrypting any input files that you encrypt before you upload // them to Amazon S3. MediaConvert can decrypt files only when you use AWS Key // Management Service (KMS) to encrypt the data key that you use to encrypt // your content. type InputDecryptionSettings struct { _ struct{} `type:"structure"` // Specify the encryption mode that you used to encrypt your input files. DecryptionMode DecryptionMode `locationName:"decryptionMode" type:"string" enum:"true"` // Warning! Don't provide your encryption key in plaintext. Your job settings // could be intercepted, making your encrypted content vulnerable. Specify the // encrypted version of the data key that you used to encrypt your content. // The data key must be encrypted by AWS Key Management Service (KMS). The key // can be 128, 192, or 256 bits. EncryptedDecryptionKey *string `locationName:"encryptedDecryptionKey" min:"24" type:"string"` // Specify the initialization vector that you used when you encrypted your content // before uploading it to Amazon S3. You can use a 16-byte initialization vector // with any encryption mode. Or, you can use a 12-byte initialization vector // with GCM or CTR. MediaConvert accepts only initialization vectors that are // base64-encoded. InitializationVector *string `locationName:"initializationVector" min:"16" type:"string"` // Specify the AWS Region for AWS Key Management Service (KMS) that you used // to encrypt your data key, if that Region is different from the one you are // using for AWS Elemental MediaConvert. KmsKeyRegion *string `locationName:"kmsKeyRegion" min:"9" type:"string"` } // String returns the string representation func (s InputDecryptionSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *InputDecryptionSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "InputDecryptionSettings"} if s.EncryptedDecryptionKey != nil && len(*s.EncryptedDecryptionKey) < 24 { invalidParams.Add(aws.NewErrParamMinLen("EncryptedDecryptionKey", 24)) } if s.InitializationVector != nil && len(*s.InitializationVector) < 16 { invalidParams.Add(aws.NewErrParamMinLen("InitializationVector", 16)) } if s.KmsKeyRegion != nil && len(*s.KmsKeyRegion) < 9 { invalidParams.Add(aws.NewErrParamMinLen("KmsKeyRegion", 9)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s InputDecryptionSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.DecryptionMode) > 0 { v := s.DecryptionMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "decryptionMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.EncryptedDecryptionKey != nil { v := *s.EncryptedDecryptionKey metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "encryptedDecryptionKey", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.InitializationVector != nil { v := *s.InitializationVector metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "initializationVector", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.KmsKeyRegion != nil { v := *s.KmsKeyRegion metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "kmsKeyRegion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Specified video input in a template. type InputTemplate struct { _ struct{} `type:"structure"` // Specifies set of audio selectors within an input to combine. An input may // have multiple audio selector groups. See "Audio Selector Group":#inputs-audio_selector_group // for more information. AudioSelectorGroups map[string]AudioSelectorGroup `locationName:"audioSelectorGroups" type:"map"` // Use Audio selectors (AudioSelectors) to specify a track or set of tracks // from the input that you will use in your outputs. You can use multiple Audio // selectors per input. AudioSelectors map[string]AudioSelector `locationName:"audioSelectors" type:"map"` // Use captions selectors to specify the captions data from your input that // you use in your outputs. You can use up to 20 captions selectors per input. CaptionSelectors map[string]CaptionSelector `locationName:"captionSelectors" type:"map"` // Use Cropping selection (crop) to specify the video area that the service // will include in the output video frame. If you specify a value here, it will // override any value that you specify in the output setting Cropping selection // (crop). Crop *Rectangle `locationName:"crop" type:"structure"` // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. // Default is disabled. Only manually controllable for MPEG2 and uncompressed // video inputs. DeblockFilter InputDeblockFilter `locationName:"deblockFilter" type:"string" enum:"true"` // Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default // is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video // inputs. DenoiseFilter InputDenoiseFilter `locationName:"denoiseFilter" type:"string" enum:"true"` // Use Filter enable (InputFilterEnable) to specify how the transcoding service // applies the denoise and deblock filters. You must also enable the filters // separately, with Denoise (InputDenoiseFilter) and Deblock (InputDeblockFilter). // * Auto - The transcoding service determines whether to apply filtering, depending // on input type and quality. * Disable - The input is not filtered. This is // true even if you use the API to enable them in (InputDeblockFilter) and (InputDeblockFilter). // * Force - The in put is filtered regardless of input type. FilterEnable InputFilterEnable `locationName:"filterEnable" type:"string" enum:"true"` // Use Filter strength (FilterStrength) to adjust the magnitude the input filter // settings (Deblock and Denoise). The range is -5 to 5. Default is 0. FilterStrength *int64 `locationName:"filterStrength" type:"integer"` // Enable the image inserter feature to include a graphic overlay on your video. // Enable or disable this feature for each input individually. This setting // is disabled by default. ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"` // (InputClippings) contains sets of start and end times that together specify // a portion of the input to be used in the outputs. If you provide only a start // time, the clip will be the entire input from that point to the end. If you // provide only an end time, it will be the entire input up to that point. When // you specify more than one input clip, the transcoding service creates the // job outputs by stringing the clips together in the order you specify them. InputClippings []InputClipping `locationName:"inputClippings" type:"list"` // Use Selection placement (position) to define the video area in your output // frame. The area outside of the rectangle that you specify here is black. // If you specify a value here, it will override any value that you specify // in the output setting Selection placement (position). If you specify a value // here, this will override any AFD values in your input, even if you set Respond // to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, // this will ignore anything that you specify for the setting Scaling Behavior // (scalingBehavior). Position *Rectangle `locationName:"position" type:"structure"` // Use Program (programNumber) to select a specific program from within a multi-program // transport stream. Note that Quad 4K is not currently supported. Default is // the first program within the transport stream. If the program you specify // doesn't exist, the transcoding service will use this default. ProgramNumber *int64 `locationName:"programNumber" min:"1" type:"integer"` // Set PSI control (InputPsiControl) for transport stream inputs to specify // which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio // and video. * Use PSI - Scan only PSI data. PsiControl InputPsiControl `locationName:"psiControl" type:"string" enum:"true"` // Use this Timecode source setting, located under the input settings (InputTimecodeSource), // to specify how the service counts input video frames. This input frame count // affects only the behavior of features that apply to a single input at a time, // such as input clipping and synchronizing some captions formats. Choose Embedded // (EMBEDDED) to use the timecodes in your input video. Choose Start at zero // (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) // to start the first frame at the timecode that you specify in the setting // Start timecode (timecodeStart). If you don't specify a value for Timecode // source, the service will use Embedded by default. For more information about // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeSource InputTimecodeSource `locationName:"timecodeSource" type:"string" enum:"true"` // Specify the timecode that you want the service to use for this input's initial // frame. To use this setting, you must set the Timecode source setting, located // under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). // For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeStart *string `locationName:"timecodeStart" min:"11" type:"string"` // Selector for video. VideoSelector *VideoSelector `locationName:"videoSelector" type:"structure"` } // String returns the string representation func (s InputTemplate) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *InputTemplate) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "InputTemplate"} if s.FilterStrength != nil && *s.FilterStrength < -5 { invalidParams.Add(aws.NewErrParamMinValue("FilterStrength", -5)) } if s.ProgramNumber != nil && *s.ProgramNumber < 1 { invalidParams.Add(aws.NewErrParamMinValue("ProgramNumber", 1)) } if s.TimecodeStart != nil && len(*s.TimecodeStart) < 11 { invalidParams.Add(aws.NewErrParamMinLen("TimecodeStart", 11)) } if s.AudioSelectors != nil { for i, v := range s.AudioSelectors { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AudioSelectors", i), err.(aws.ErrInvalidParams)) } } } if s.CaptionSelectors != nil { for i, v := range s.CaptionSelectors { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionSelectors", i), err.(aws.ErrInvalidParams)) } } } if s.Crop != nil { if err := s.Crop.Validate(); err != nil { invalidParams.AddNested("Crop", err.(aws.ErrInvalidParams)) } } if s.ImageInserter != nil { if err := s.ImageInserter.Validate(); err != nil { invalidParams.AddNested("ImageInserter", err.(aws.ErrInvalidParams)) } } if s.Position != nil { if err := s.Position.Validate(); err != nil { invalidParams.AddNested("Position", err.(aws.ErrInvalidParams)) } } if s.VideoSelector != nil { if err := s.VideoSelector.Validate(); err != nil { invalidParams.AddNested("VideoSelector", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s InputTemplate) MarshalFields(e protocol.FieldEncoder) error { if s.AudioSelectorGroups != nil { v := s.AudioSelectorGroups metadata := protocol.Metadata{} ms0 := e.Map(protocol.BodyTarget, "audioSelectorGroups", metadata) ms0.Start() for k1, v1 := range v { ms0.MapSetFields(k1, v1) } ms0.End() } if s.AudioSelectors != nil { v := s.AudioSelectors metadata := protocol.Metadata{} ms0 := e.Map(protocol.BodyTarget, "audioSelectors", metadata) ms0.Start() for k1, v1 := range v { ms0.MapSetFields(k1, v1) } ms0.End() } if s.CaptionSelectors != nil { v := s.CaptionSelectors metadata := protocol.Metadata{} ms0 := e.Map(protocol.BodyTarget, "captionSelectors", metadata) ms0.Start() for k1, v1 := range v { ms0.MapSetFields(k1, v1) } ms0.End() } if s.Crop != nil { v := s.Crop metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "crop", v, metadata) } if len(s.DeblockFilter) > 0 { v := s.DeblockFilter metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "deblockFilter", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.DenoiseFilter) > 0 { v := s.DenoiseFilter metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "denoiseFilter", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FilterEnable) > 0 { v := s.FilterEnable metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "filterEnable", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FilterStrength != nil { v := *s.FilterStrength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "filterStrength", protocol.Int64Value(v), metadata) } if s.ImageInserter != nil { v := s.ImageInserter metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "imageInserter", v, metadata) } if s.InputClippings != nil { v := s.InputClippings metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "inputClippings", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.Position != nil { v := s.Position metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "position", v, metadata) } if s.ProgramNumber != nil { v := *s.ProgramNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "programNumber", protocol.Int64Value(v), metadata) } if len(s.PsiControl) > 0 { v := s.PsiControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "psiControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.TimecodeSource) > 0 { v := s.TimecodeSource metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timecodeSource", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.TimecodeStart != nil { v := *s.TimecodeStart metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timecodeStart", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.VideoSelector != nil { v := s.VideoSelector metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "videoSelector", v, metadata) } return nil } // Settings that specify how your still graphic overlay appears. type InsertableImage struct { _ struct{} `type:"structure"` // Specify the time, in milliseconds, for the image to remain on the output // video. This duration includes fade-in time but not fade-out time. Duration *int64 `locationName:"duration" type:"integer"` // Specify the length of time, in milliseconds, between the Start time that // you specify for the image insertion and the time that the image appears at // full opacity. Full opacity is the level that you specify for the opacity // setting. If you don't specify a value for Fade-in, the image will appear // abruptly at the overlay start time. FadeIn *int64 `locationName:"fadeIn" type:"integer"` // Specify the length of time, in milliseconds, between the end of the time // that you have specified for the image overlay Duration and when the overlaid // image has faded to total transparency. If you don't specify a value for Fade-out, // the image will disappear abruptly at the end of the inserted image duration. FadeOut *int64 `locationName:"fadeOut" type:"integer"` // Specify the height of the inserted image in pixels. If you specify a value // that's larger than the video resolution height, the service will crop your // overlaid image to fit. To use the native height of the image, keep this setting // blank. Height *int64 `locationName:"height" type:"integer"` // Specify the HTTP, HTTPS, or Amazon S3 location of the image that you want // to overlay on the video. Use a PNG or TGA file. ImageInserterInput *string `locationName:"imageInserterInput" min:"14" type:"string"` // Specify the distance, in pixels, between the inserted image and the left // edge of the video frame. Required for any image overlay that you specify. ImageX *int64 `locationName:"imageX" type:"integer"` // Specify the distance, in pixels, between the overlaid image and the top edge // of the video frame. Required for any image overlay that you specify. ImageY *int64 `locationName:"imageY" type:"integer"` // Specify how overlapping inserted images appear. Images with higher values // for Layer appear on top of images with lower values for Layer. Layer *int64 `locationName:"layer" type:"integer"` // Use Opacity (Opacity) to specify how much of the underlying video shows through // the inserted image. 0 is transparent and 100 is fully opaque. Default is // 50. Opacity *int64 `locationName:"opacity" type:"integer"` // Specify the timecode of the frame that you want the overlay to first appear // on. This must be in timecode (HH:MM:SS:FF or HH:MM:SS;FF) format. Remember // to take into account your timecode source settings. StartTime *string `locationName:"startTime" type:"string"` // Specify the width of the inserted image in pixels. If you specify a value // that's larger than the video resolution width, the service will crop your // overlaid image to fit. To use the native width of the image, keep this setting // blank. Width *int64 `locationName:"width" type:"integer"` } // String returns the string representation func (s InsertableImage) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *InsertableImage) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "InsertableImage"} if s.ImageInserterInput != nil && len(*s.ImageInserterInput) < 14 { invalidParams.Add(aws.NewErrParamMinLen("ImageInserterInput", 14)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s InsertableImage) MarshalFields(e protocol.FieldEncoder) error { if s.Duration != nil { v := *s.Duration metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "duration", protocol.Int64Value(v), metadata) } if s.FadeIn != nil { v := *s.FadeIn metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fadeIn", protocol.Int64Value(v), metadata) } if s.FadeOut != nil { v := *s.FadeOut metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fadeOut", protocol.Int64Value(v), metadata) } if s.Height != nil { v := *s.Height metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "height", protocol.Int64Value(v), metadata) } if s.ImageInserterInput != nil { v := *s.ImageInserterInput metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "imageInserterInput", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.ImageX != nil { v := *s.ImageX metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "imageX", protocol.Int64Value(v), metadata) } if s.ImageY != nil { v := *s.ImageY metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "imageY", protocol.Int64Value(v), metadata) } if s.Layer != nil { v := *s.Layer metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "layer", protocol.Int64Value(v), metadata) } if s.Opacity != nil { v := *s.Opacity metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "opacity", protocol.Int64Value(v), metadata) } if s.StartTime != nil { v := *s.StartTime metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "startTime", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Width != nil { v := *s.Width metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "width", protocol.Int64Value(v), metadata) } return nil } // Each job converts an input file into an output file or files. For more information, // see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html type Job struct { _ struct{} `type:"structure"` // Accelerated transcoding can significantly speed up jobs with long, visually // complex content. AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"` // Describes whether the current job is running with accelerated transcoding. // For jobs that have Acceleration (AccelerationMode) set to DISABLED, AccelerationStatus // is always NOT_APPLICABLE. For jobs that have Acceleration (AccelerationMode) // set to ENABLED or PREFERRED, AccelerationStatus is one of the other states. // AccelerationStatus is IN_PROGRESS initially, while the service determines // whether the input files and job settings are compatible with accelerated // transcoding. If they are, AcclerationStatus is ACCELERATED. If your input // files and job settings aren't compatible with accelerated transcoding, the // service either fails your job or runs it without accelerated transcoding, // depending on how you set Acceleration (AccelerationMode). When the service // runs your job without accelerated transcoding, AccelerationStatus is NOT_ACCELERATED. AccelerationStatus AccelerationStatus `locationName:"accelerationStatus" type:"string" enum:"true"` // An identifier for this resource that is unique within all of AWS. Arn *string `locationName:"arn" type:"string"` // The tag type that AWS Billing and Cost Management will use to sort your AWS // Elemental MediaConvert costs on any billing report that you set up. BillingTagsSource BillingTagsSource `locationName:"billingTagsSource" type:"string" enum:"true"` // The time, in Unix epoch format in seconds, when the job got created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"` // A job's phase can be PROBING, TRANSCODING OR UPLOADING CurrentPhase JobPhase `locationName:"currentPhase" type:"string" enum:"true"` // Error code for the job ErrorCode *int64 `locationName:"errorCode" type:"integer"` // Error message of Job ErrorMessage *string `locationName:"errorMessage" type:"string"` // Optional list of hop destinations. HopDestinations []HopDestination `locationName:"hopDestinations" type:"list"` // A portion of the job's ARN, unique within your AWS Elemental MediaConvert // resources Id *string `locationName:"id" type:"string"` // An estimate of how far your job has progressed. This estimate is shown as // a percentage of the total time from when your job leaves its queue to when // your output files appear in your output Amazon S3 bucket. AWS Elemental MediaConvert // provides jobPercentComplete in CloudWatch STATUS_UPDATE events and in the // response to GetJob and ListJobs requests. The jobPercentComplete estimate // is reliable for the following input containers: Quicktime, Transport Stream, // MP4, and MXF. For some jobs, the service can't provide information about // job progress. In those cases, jobPercentComplete returns a null value. JobPercentComplete *int64 `locationName:"jobPercentComplete" type:"integer"` // The job template that the job is created from, if it is created from a job // template. JobTemplate *string `locationName:"jobTemplate" type:"string"` // Provides messages from the service about jobs that you have already successfully // submitted. Messages *JobMessages `locationName:"messages" type:"structure"` // List of output group details OutputGroupDetails []OutputGroupDetail `locationName:"outputGroupDetails" type:"list"` // Relative priority on the job. Priority *int64 `locationName:"priority" type:"integer"` // When you create a job, you can specify a queue to send it to. If you don't // specify, the job will go to the default queue. For more about queues, see // the User Guide topic at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html Queue *string `locationName:"queue" type:"string"` // The job's queue hopping history. QueueTransitions []QueueTransition `locationName:"queueTransitions" type:"list"` // The number of times that the service automatically attempted to process your // job after encountering an error. RetryCount *int64 `locationName:"retryCount" type:"integer"` // The IAM role you use for creating this job. For details about permissions, // see the User Guide topic at the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html // // Role is a required field Role *string `locationName:"role" type:"string" required:"true"` // JobSettings contains all the transcode settings for a job. // // Settings is a required field Settings *JobSettings `locationName:"settings" type:"structure" required:"true"` // Enable this setting when you run a test job to estimate how many reserved // transcoding slots (RTS) you need. When this is enabled, MediaConvert runs // your job from an on-demand queue with similar performance to what you will // see with one RTS in a reserved queue. This setting is disabled by default. SimulateReservedQueue SimulateReservedQueue `locationName:"simulateReservedQueue" type:"string" enum:"true"` // A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR. Status JobStatus `locationName:"status" type:"string" enum:"true"` // Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch // Events. Set the interval, in seconds, between status updates. MediaConvert // sends an update at this interval from the time the service begins processing // your job to the time it completes the transcode or encounters an error. StatusUpdateInterval StatusUpdateInterval `locationName:"statusUpdateInterval" type:"string" enum:"true"` // Information about when jobs are submitted, started, and finished is specified // in Unix epoch format in seconds. Timing *Timing `locationName:"timing" type:"structure"` // User-defined metadata that you want to associate with an MediaConvert job. // You specify metadata in key/value pairs. UserMetadata map[string]string `locationName:"userMetadata" type:"map"` } // String returns the string representation func (s Job) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Job) MarshalFields(e protocol.FieldEncoder) error { if s.AccelerationSettings != nil { v := s.AccelerationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "accelerationSettings", v, metadata) } if len(s.AccelerationStatus) > 0 { v := s.AccelerationStatus metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "accelerationStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Arn != nil { v := *s.Arn metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.BillingTagsSource) > 0 { v := s.BillingTagsSource metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "billingTagsSource", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.CreatedAt != nil { v := *s.CreatedAt metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "createdAt", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } if len(s.CurrentPhase) > 0 { v := s.CurrentPhase metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "currentPhase", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ErrorCode != nil { v := *s.ErrorCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "errorCode", protocol.Int64Value(v), metadata) } if s.ErrorMessage != nil { v := *s.ErrorMessage metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "errorMessage", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.HopDestinations != nil { v := s.HopDestinations metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "hopDestinations", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.Id != nil { v := *s.Id metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.JobPercentComplete != nil { v := *s.JobPercentComplete metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "jobPercentComplete", protocol.Int64Value(v), metadata) } if s.JobTemplate != nil { v := *s.JobTemplate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "jobTemplate", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Messages != nil { v := s.Messages metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "messages", v, metadata) } if s.OutputGroupDetails != nil { v := s.OutputGroupDetails metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "outputGroupDetails", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.Priority != nil { v := *s.Priority metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "priority", protocol.Int64Value(v), metadata) } if s.Queue != nil { v := *s.Queue metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "queue", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.QueueTransitions != nil { v := s.QueueTransitions metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "queueTransitions", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.RetryCount != nil { v := *s.RetryCount metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "retryCount", protocol.Int64Value(v), metadata) } if s.Role != nil { v := *s.Role metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "role", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Settings != nil { v := s.Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "settings", v, metadata) } if len(s.SimulateReservedQueue) > 0 { v := s.SimulateReservedQueue metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "simulateReservedQueue", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Status) > 0 { v := s.Status metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "status", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.StatusUpdateInterval) > 0 { v := s.StatusUpdateInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "statusUpdateInterval", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Timing != nil { v := s.Timing metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "timing", v, metadata) } if s.UserMetadata != nil { v := s.UserMetadata metadata := protocol.Metadata{} ms0 := e.Map(protocol.BodyTarget, "userMetadata", metadata) ms0.Start() for k1, v1 := range v { ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ms0.End() } return nil } // Provides messages from the service about jobs that you have already successfully // submitted. type JobMessages struct { _ struct{} `type:"structure"` // List of messages that are informational only and don't indicate a problem // with your job. Info []string `locationName:"info" type:"list"` // List of messages that warn about conditions that might cause your job not // to run or to fail. Warning []string `locationName:"warning" type:"list"` } // String returns the string representation func (s JobMessages) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s JobMessages) MarshalFields(e protocol.FieldEncoder) error { if s.Info != nil { v := s.Info metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "info", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } if s.Warning != nil { v := s.Warning metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "warning", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } return nil } // JobSettings contains all the transcode settings for a job. type JobSettings struct { _ struct{} `type:"structure"` // When specified, this offset (in milliseconds) is added to the input Ad Avail // PTS time. AdAvailOffset *int64 `locationName:"adAvailOffset" type:"integer"` // Settings for ad avail blanking. Video can be blanked or overlaid with an // image, and audio muted during SCTE-35 triggered ad avails. AvailBlanking *AvailBlanking `locationName:"availBlanking" type:"structure"` // Settings for Event Signaling And Messaging (ESAM). Esam *EsamSettings `locationName:"esam" type:"structure"` // Use Inputs (inputs) to define source file used in the transcode job. There // can be multiple inputs add in a job. These inputs will be concantenated together // to create the output. Inputs []Input `locationName:"inputs" type:"list"` // Overlay motion graphics on top of your video. The motion graphics that you // specify here appear on all outputs in all output groups. MotionImageInserter *MotionImageInserter `locationName:"motionImageInserter" type:"structure"` // Settings for your Nielsen configuration. If you don't do Nielsen measurement // and analytics, ignore these settings. When you enable Nielsen configuration // (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs // in the job. To enable Nielsen configuration programmatically, include an // instance of nielsenConfiguration in your JSON job specification. Even if // you don't include any children of nielsenConfiguration, you still enable // the setting. NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"` // (OutputGroups) contains one group of settings for each set of outputs that // share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, // MXF, and no container) are grouped in a single output group as well. Required // in (OutputGroups) is a group of settings that apply to the whole group. This // required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). // Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings // * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings // * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, // CmafGroupSettings OutputGroups []OutputGroup `locationName:"outputGroups" type:"list"` // Contains settings used to acquire and adjust timecode information from inputs. TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"` // Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags // in your job. To include timed metadata, you must enable it here, enable it // in each output container, and specify tags and timecodes in ID3 insertion // (Id3Insertion) objects. TimedMetadataInsertion *TimedMetadataInsertion `locationName:"timedMetadataInsertion" type:"structure"` } // String returns the string representation func (s JobSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *JobSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "JobSettings"} if s.AdAvailOffset != nil && *s.AdAvailOffset < -1000 { invalidParams.Add(aws.NewErrParamMinValue("AdAvailOffset", -1000)) } if s.AvailBlanking != nil { if err := s.AvailBlanking.Validate(); err != nil { invalidParams.AddNested("AvailBlanking", err.(aws.ErrInvalidParams)) } } if s.Inputs != nil { for i, v := range s.Inputs { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Inputs", i), err.(aws.ErrInvalidParams)) } } } if s.MotionImageInserter != nil { if err := s.MotionImageInserter.Validate(); err != nil { invalidParams.AddNested("MotionImageInserter", err.(aws.ErrInvalidParams)) } } if s.OutputGroups != nil { for i, v := range s.OutputGroups { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputGroups", i), err.(aws.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s JobSettings) MarshalFields(e protocol.FieldEncoder) error { if s.AdAvailOffset != nil { v := *s.AdAvailOffset metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "adAvailOffset", protocol.Int64Value(v), metadata) } if s.AvailBlanking != nil { v := s.AvailBlanking metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "availBlanking", v, metadata) } if s.Esam != nil { v := s.Esam metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "esam", v, metadata) } if s.Inputs != nil { v := s.Inputs metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "inputs", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.MotionImageInserter != nil { v := s.MotionImageInserter metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "motionImageInserter", v, metadata) } if s.NielsenConfiguration != nil { v := s.NielsenConfiguration metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "nielsenConfiguration", v, metadata) } if s.OutputGroups != nil { v := s.OutputGroups metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "outputGroups", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.TimecodeConfig != nil { v := s.TimecodeConfig metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "timecodeConfig", v, metadata) } if s.TimedMetadataInsertion != nil { v := s.TimedMetadataInsertion metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "timedMetadataInsertion", v, metadata) } return nil } // A job template is a pre-made set of encoding instructions that you can use // to quickly create a job. type JobTemplate struct { _ struct{} `type:"structure"` // Accelerated transcoding can significantly speed up jobs with long, visually // complex content. AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"` // An identifier for this resource that is unique within all of AWS. Arn *string `locationName:"arn" type:"string"` // An optional category you create to organize your job templates. Category *string `locationName:"category" type:"string"` // The timestamp in epoch seconds for Job template creation. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"` // An optional description you create for each job template. Description *string `locationName:"description" type:"string"` // Optional list of hop destinations. HopDestinations []HopDestination `locationName:"hopDestinations" type:"list"` // The timestamp in epoch seconds when the Job template was last updated. LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unixTimestamp"` // A name you create for each job template. Each name must be unique within // your account. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` // Relative priority on the job. Priority *int64 `locationName:"priority" type:"integer"` // Optional. The queue that jobs created from this template are assigned to. // If you don't specify this, jobs will go to the default queue. Queue *string `locationName:"queue" type:"string"` // JobTemplateSettings contains all the transcode settings saved in the template // that will be applied to jobs created from it. // // Settings is a required field Settings *JobTemplateSettings `locationName:"settings" type:"structure" required:"true"` // Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch // Events. Set the interval, in seconds, between status updates. MediaConvert // sends an update at this interval from the time the service begins processing // your job to the time it completes the transcode or encounters an error. StatusUpdateInterval StatusUpdateInterval `locationName:"statusUpdateInterval" type:"string" enum:"true"` // A job template can be of two types: system or custom. System or built-in // job templates can't be modified or deleted by the user. Type Type `locationName:"type" type:"string" enum:"true"` } // String returns the string representation func (s JobTemplate) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s JobTemplate) MarshalFields(e protocol.FieldEncoder) error { if s.AccelerationSettings != nil { v := s.AccelerationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "accelerationSettings", v, metadata) } if s.Arn != nil { v := *s.Arn metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Category != nil { v := *s.Category metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "category", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.CreatedAt != nil { v := *s.CreatedAt metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "createdAt", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } if s.Description != nil { v := *s.Description metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.HopDestinations != nil { v := s.HopDestinations metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "hopDestinations", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.LastUpdated != nil { v := *s.LastUpdated metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "lastUpdated", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } if s.Name != nil { v := *s.Name metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Priority != nil { v := *s.Priority metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "priority", protocol.Int64Value(v), metadata) } if s.Queue != nil { v := *s.Queue metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "queue", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Settings != nil { v := s.Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "settings", v, metadata) } if len(s.StatusUpdateInterval) > 0 { v := s.StatusUpdateInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "statusUpdateInterval", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Type) > 0 { v := s.Type metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "type", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // JobTemplateSettings contains all the transcode settings saved in the template // that will be applied to jobs created from it. type JobTemplateSettings struct { _ struct{} `type:"structure"` // When specified, this offset (in milliseconds) is added to the input Ad Avail // PTS time. AdAvailOffset *int64 `locationName:"adAvailOffset" type:"integer"` // Settings for ad avail blanking. Video can be blanked or overlaid with an // image, and audio muted during SCTE-35 triggered ad avails. AvailBlanking *AvailBlanking `locationName:"availBlanking" type:"structure"` // Settings for Event Signaling And Messaging (ESAM). Esam *EsamSettings `locationName:"esam" type:"structure"` // Use Inputs (inputs) to define the source file used in the transcode job. // There can only be one input in a job template. Using the API, you can include // multiple inputs when referencing a job template. Inputs []InputTemplate `locationName:"inputs" type:"list"` // Overlay motion graphics on top of your video. The motion graphics that you // specify here appear on all outputs in all output groups. MotionImageInserter *MotionImageInserter `locationName:"motionImageInserter" type:"structure"` // Settings for your Nielsen configuration. If you don't do Nielsen measurement // and analytics, ignore these settings. When you enable Nielsen configuration // (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs // in the job. To enable Nielsen configuration programmatically, include an // instance of nielsenConfiguration in your JSON job specification. Even if // you don't include any children of nielsenConfiguration, you still enable // the setting. NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"` // (OutputGroups) contains one group of settings for each set of outputs that // share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, // MXF, and no container) are grouped in a single output group as well. Required // in (OutputGroups) is a group of settings that apply to the whole group. This // required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). // Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings // * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings // * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, // CmafGroupSettings OutputGroups []OutputGroup `locationName:"outputGroups" type:"list"` // Contains settings used to acquire and adjust timecode information from inputs. TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"` // Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags // in your job. To include timed metadata, you must enable it here, enable it // in each output container, and specify tags and timecodes in ID3 insertion // (Id3Insertion) objects. TimedMetadataInsertion *TimedMetadataInsertion `locationName:"timedMetadataInsertion" type:"structure"` } // String returns the string representation func (s JobTemplateSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *JobTemplateSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "JobTemplateSettings"} if s.AdAvailOffset != nil && *s.AdAvailOffset < -1000 { invalidParams.Add(aws.NewErrParamMinValue("AdAvailOffset", -1000)) } if s.AvailBlanking != nil { if err := s.AvailBlanking.Validate(); err != nil { invalidParams.AddNested("AvailBlanking", err.(aws.ErrInvalidParams)) } } if s.Inputs != nil { for i, v := range s.Inputs { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Inputs", i), err.(aws.ErrInvalidParams)) } } } if s.MotionImageInserter != nil { if err := s.MotionImageInserter.Validate(); err != nil { invalidParams.AddNested("MotionImageInserter", err.(aws.ErrInvalidParams)) } } if s.OutputGroups != nil { for i, v := range s.OutputGroups { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputGroups", i), err.(aws.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s JobTemplateSettings) MarshalFields(e protocol.FieldEncoder) error { if s.AdAvailOffset != nil { v := *s.AdAvailOffset metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "adAvailOffset", protocol.Int64Value(v), metadata) } if s.AvailBlanking != nil { v := s.AvailBlanking metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "availBlanking", v, metadata) } if s.Esam != nil { v := s.Esam metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "esam", v, metadata) } if s.Inputs != nil { v := s.Inputs metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "inputs", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.MotionImageInserter != nil { v := s.MotionImageInserter metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "motionImageInserter", v, metadata) } if s.NielsenConfiguration != nil { v := s.NielsenConfiguration metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "nielsenConfiguration", v, metadata) } if s.OutputGroups != nil { v := s.OutputGroups metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "outputGroups", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.TimecodeConfig != nil { v := s.TimecodeConfig metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "timecodeConfig", v, metadata) } if s.TimedMetadataInsertion != nil { v := s.TimedMetadataInsertion metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "timedMetadataInsertion", v, metadata) } return nil } // Settings for SCTE-35 signals from ESAM. Include this in your job settings // to put SCTE-35 markers in your HLS and transport stream outputs at the insertion // points that you specify in an ESAM XML document. Provide the document in // the setting SCC XML (sccXml). type M2tsScte35Esam struct { _ struct{} `type:"structure"` // Packet Identifier (PID) of the SCTE-35 stream in the transport stream generated // by ESAM. Scte35EsamPid *int64 `locationName:"scte35EsamPid" min:"32" type:"integer"` } // String returns the string representation func (s M2tsScte35Esam) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *M2tsScte35Esam) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "M2tsScte35Esam"} if s.Scte35EsamPid != nil && *s.Scte35EsamPid < 32 { invalidParams.Add(aws.NewErrParamMinValue("Scte35EsamPid", 32)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s M2tsScte35Esam) MarshalFields(e protocol.FieldEncoder) error { if s.Scte35EsamPid != nil { v := *s.Scte35EsamPid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "scte35EsamPid", protocol.Int64Value(v), metadata) } return nil } // MPEG-2 TS container settings. These apply to outputs in a File output group // when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). // In these assets, data is organized by the program map table (PMT). Each transport // stream program contains subsets of data, including audio, video, and metadata. // Each of these subsets of data has a numerical label called a packet identifier // (PID). Each transport stream program corresponds to one MediaConvert output. // The PMT lists the types of data in a program along with their PID. Downstream // systems and players use the program map table to look up the PID for each // type of data it accesses and then uses the PIDs to locate specific data within // the asset. type M2tsSettings struct { _ struct{} `type:"structure"` // Selects between the DVB and ATSC buffer models for Dolby Digital audio. AudioBufferModel M2tsAudioBufferModel `locationName:"audioBufferModel" type:"string" enum:"true"` // The number of audio frames to insert for each PES packet. AudioFramesPerPes *int64 `locationName:"audioFramesPerPes" type:"integer"` // Specify the packet identifiers (PIDs) for any elementary audio streams you // include in this output. Specify multiple PIDs as a JSON array. Default is // the range 482-492. AudioPids []int64 `locationName:"audioPids" type:"list"` // Specify the output bitrate of the transport stream in bits per second. Setting // to 0 lets the muxer automatically determine the appropriate bitrate. Other // common values are 3750000, 7500000, and 15000000. Bitrate *int64 `locationName:"bitrate" type:"integer"` // Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX, // use multiplex buffer model. If set to NONE, this can lead to lower latency, // but low-memory devices may not be able to play back the stream without interruptions. BufferModel M2tsBufferModel `locationName:"bufferModel" type:"string" enum:"true"` // Inserts DVB Network Information Table (NIT) at the specified table repetition // interval. DvbNitSettings *DvbNitSettings `locationName:"dvbNitSettings" type:"structure"` // Inserts DVB Service Description Table (NIT) at the specified table repetition // interval. DvbSdtSettings *DvbSdtSettings `locationName:"dvbSdtSettings" type:"structure"` // Specify the packet identifiers (PIDs) for DVB subtitle data included in this // output. Specify multiple PIDs as a JSON array. Default is the range 460-479. DvbSubPids []int64 `locationName:"dvbSubPids" type:"list"` // Inserts DVB Time and Date Table (TDT) at the specified table repetition interval. DvbTdtSettings *DvbTdtSettings `locationName:"dvbTdtSettings" type:"structure"` // Specify the packet identifier (PID) for DVB teletext data you include in // this output. Default is 499. DvbTeletextPid *int64 `locationName:"dvbTeletextPid" min:"32" type:"integer"` // When set to VIDEO_AND_FIXED_INTERVALS, audio EBP markers will be added to // partitions 3 and 4. The interval between these additional markers will be // fixed, and will be slightly shorter than the video EBP marker interval. When // set to VIDEO_INTERVAL, these additional markers will not be inserted. Only // applicable when EBP segmentation markers are is selected (segmentationMarkers // is EBP or EBP_LEGACY). EbpAudioInterval M2tsEbpAudioInterval `locationName:"ebpAudioInterval" type:"string" enum:"true"` // Selects which PIDs to place EBP markers on. They can either be placed only // on the video PID, or on both the video PID and all audio PIDs. Only applicable // when EBP segmentation markers are is selected (segmentationMarkers is EBP // or EBP_LEGACY). EbpPlacement M2tsEbpPlacement `locationName:"ebpPlacement" type:"string" enum:"true"` // Controls whether to include the ES Rate field in the PES header. EsRateInPes M2tsEsRateInPes `locationName:"esRateInPes" type:"string" enum:"true"` // Keep the default value (DEFAULT) unless you know that your audio EBP markers // are incorrectly appearing before your video EBP markers. To correct this // problem, set this value to Force (FORCE). ForceTsVideoEbpOrder M2tsForceTsVideoEbpOrder `locationName:"forceTsVideoEbpOrder" type:"string" enum:"true"` // The length, in seconds, of each fragment. Only used with EBP markers. FragmentTime *float64 `locationName:"fragmentTime" type:"double"` // Specify the maximum time, in milliseconds, between Program Clock References // (PCRs) inserted into the transport stream. MaxPcrInterval *int64 `locationName:"maxPcrInterval" type:"integer"` // When set, enforces that Encoder Boundary Points do not come within the specified // time interval of each other by looking ahead at input video. If another EBP // is going to come in within the specified time interval, the current EBP is // not emitted, and the segment is "stretched" to the next marker. The lookahead // value does not add latency to the system. The Live Event must be configured // elsewhere to create sufficient latency to make the lookahead accurate. MinEbpInterval *int64 `locationName:"minEbpInterval" type:"integer"` // If INSERT, Nielsen inaudible tones for media tracking will be detected in // the input audio and an equivalent ID3 tag will be inserted in the output. NielsenId3 M2tsNielsenId3 `locationName:"nielsenId3" type:"string" enum:"true"` // Value in bits per second of extra null packets to insert into the transport // stream. This can be used if a downstream encryption system requires periodic // null packets. NullPacketBitrate *float64 `locationName:"nullPacketBitrate" type:"double"` // The number of milliseconds between instances of this table in the output // transport stream. PatInterval *int64 `locationName:"patInterval" type:"integer"` // When set to PCR_EVERY_PES_PACKET, a Program Clock Reference value is inserted // for every Packetized Elementary Stream (PES) header. This is effective only // when the PCR PID is the same as the video or audio elementary stream. PcrControl M2tsPcrControl `locationName:"pcrControl" type:"string" enum:"true"` // Specify the packet identifier (PID) for the program clock reference (PCR) // in this output. If you do not specify a value, the service will use the value // for Video PID (VideoPid). PcrPid *int64 `locationName:"pcrPid" min:"32" type:"integer"` // Specify the number of milliseconds between instances of the program map table // (PMT) in the output transport stream. PmtInterval *int64 `locationName:"pmtInterval" type:"integer"` // Specify the packet identifier (PID) for the program map table (PMT) itself. // Default is 480. PmtPid *int64 `locationName:"pmtPid" min:"32" type:"integer"` // Specify the packet identifier (PID) of the private metadata stream. Default // is 503. PrivateMetadataPid *int64 `locationName:"privateMetadataPid" min:"32" type:"integer"` // Use Program number (programNumber) to specify the program number used in // the program map table (PMT) for this output. Default is 1. Program numbers // and program map tables are parts of MPEG-2 transport stream containers, used // for organizing data. ProgramNumber *int64 `locationName:"programNumber" type:"integer"` // When set to CBR, inserts null packets into transport stream to fill specified // bitrate. When set to VBR, the bitrate setting acts as the maximum bitrate, // but the output will not be padded up to that bitrate. RateMode M2tsRateMode `locationName:"rateMode" type:"string" enum:"true"` // Include this in your job settings to put SCTE-35 markers in your HLS and // transport stream outputs at the insertion points that you specify in an ESAM // XML document. Provide the document in the setting SCC XML (sccXml). Scte35Esam *M2tsScte35Esam `locationName:"scte35Esam" type:"structure"` // Specify the packet identifier (PID) of the SCTE-35 stream in the transport // stream. Scte35Pid *int64 `locationName:"scte35Pid" min:"32" type:"integer"` // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if // you want SCTE-35 markers that appear in your input to also appear in this // output. Choose None (NONE) if you don't want SCTE-35 markers in this output. // For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also // provide the ESAM XML as a string in the setting Signal processing notification // XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam). Scte35Source M2tsScte35Source `locationName:"scte35Source" type:"string" enum:"true"` // Inserts segmentation markers at each segmentation_time period. rai_segstart // sets the Random Access Indicator bit in the adaptation field. rai_adapt sets // the RAI bit and adds the current timecode in the private data bytes. psi_segstart // inserts PAT and PMT tables at the start of segments. ebp adds Encoder Boundary // Point information to the adaptation field as per OpenCable specification // OC-SP-EBP-I01-130118. ebp_legacy adds Encoder Boundary Point information // to the adaptation field using a legacy proprietary format. SegmentationMarkers M2tsSegmentationMarkers `locationName:"segmentationMarkers" type:"string" enum:"true"` // The segmentation style parameter controls how segmentation markers are inserted // into the transport stream. With avails, it is possible that segments may // be truncated, which can influence where future segmentation markers are inserted. // When a segmentation style of "reset_cadence" is selected and a segment is // truncated due to an avail, we will reset the segmentation cadence. This means // the subsequent segment will have a duration of of $segmentation_time seconds. // When a segmentation style of "maintain_cadence" is selected and a segment // is truncated due to an avail, we will not reset the segmentation cadence. // This means the subsequent segment will likely be truncated as well. However, // all segments after that will have a duration of $segmentation_time seconds. // Note that EBP lookahead is a slight exception to this rule. SegmentationStyle M2tsSegmentationStyle `locationName:"segmentationStyle" type:"string" enum:"true"` // Specify the length, in seconds, of each segment. Required unless markers // is set to _none_. SegmentationTime *float64 `locationName:"segmentationTime" type:"double"` // Specify the packet identifier (PID) for timed metadata in this output. Default // is 502. TimedMetadataPid *int64 `locationName:"timedMetadataPid" min:"32" type:"integer"` // Specify the ID for the transport stream itself in the program map table for // this output. Transport stream IDs and program map tables are parts of MPEG-2 // transport stream containers, used for organizing data. TransportStreamId *int64 `locationName:"transportStreamId" type:"integer"` // Specify the packet identifier (PID) of the elementary video stream in the // transport stream. VideoPid *int64 `locationName:"videoPid" min:"32" type:"integer"` } // String returns the string representation func (s M2tsSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *M2tsSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "M2tsSettings"} if s.DvbTeletextPid != nil && *s.DvbTeletextPid < 32 { invalidParams.Add(aws.NewErrParamMinValue("DvbTeletextPid", 32)) } if s.PcrPid != nil && *s.PcrPid < 32 { invalidParams.Add(aws.NewErrParamMinValue("PcrPid", 32)) } if s.PmtPid != nil && *s.PmtPid < 32 { invalidParams.Add(aws.NewErrParamMinValue("PmtPid", 32)) } if s.PrivateMetadataPid != nil && *s.PrivateMetadataPid < 32 { invalidParams.Add(aws.NewErrParamMinValue("PrivateMetadataPid", 32)) } if s.Scte35Pid != nil && *s.Scte35Pid < 32 { invalidParams.Add(aws.NewErrParamMinValue("Scte35Pid", 32)) } if s.TimedMetadataPid != nil && *s.TimedMetadataPid < 32 { invalidParams.Add(aws.NewErrParamMinValue("TimedMetadataPid", 32)) } if s.VideoPid != nil && *s.VideoPid < 32 { invalidParams.Add(aws.NewErrParamMinValue("VideoPid", 32)) } if s.DvbNitSettings != nil { if err := s.DvbNitSettings.Validate(); err != nil { invalidParams.AddNested("DvbNitSettings", err.(aws.ErrInvalidParams)) } } if s.DvbSdtSettings != nil { if err := s.DvbSdtSettings.Validate(); err != nil { invalidParams.AddNested("DvbSdtSettings", err.(aws.ErrInvalidParams)) } } if s.DvbTdtSettings != nil { if err := s.DvbTdtSettings.Validate(); err != nil { invalidParams.AddNested("DvbTdtSettings", err.(aws.ErrInvalidParams)) } } if s.Scte35Esam != nil { if err := s.Scte35Esam.Validate(); err != nil { invalidParams.AddNested("Scte35Esam", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s M2tsSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.AudioBufferModel) > 0 { v := s.AudioBufferModel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "audioBufferModel", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.AudioFramesPerPes != nil { v := *s.AudioFramesPerPes metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "audioFramesPerPes", protocol.Int64Value(v), metadata) } if s.AudioPids != nil { v := s.AudioPids metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "audioPids", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.Int64Value(v1)) } ls0.End() } if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if len(s.BufferModel) > 0 { v := s.BufferModel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bufferModel", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.DvbNitSettings != nil { v := s.DvbNitSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "dvbNitSettings", v, metadata) } if s.DvbSdtSettings != nil { v := s.DvbSdtSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "dvbSdtSettings", v, metadata) } if s.DvbSubPids != nil { v := s.DvbSubPids metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "dvbSubPids", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.Int64Value(v1)) } ls0.End() } if s.DvbTdtSettings != nil { v := s.DvbTdtSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "dvbTdtSettings", v, metadata) } if s.DvbTeletextPid != nil { v := *s.DvbTeletextPid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dvbTeletextPid", protocol.Int64Value(v), metadata) } if len(s.EbpAudioInterval) > 0 { v := s.EbpAudioInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "ebpAudioInterval", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.EbpPlacement) > 0 { v := s.EbpPlacement metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "ebpPlacement", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.EsRateInPes) > 0 { v := s.EsRateInPes metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "esRateInPes", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.ForceTsVideoEbpOrder) > 0 { v := s.ForceTsVideoEbpOrder metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "forceTsVideoEbpOrder", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FragmentTime != nil { v := *s.FragmentTime metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fragmentTime", protocol.Float64Value(v), metadata) } if s.MaxPcrInterval != nil { v := *s.MaxPcrInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxPcrInterval", protocol.Int64Value(v), metadata) } if s.MinEbpInterval != nil { v := *s.MinEbpInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "minEbpInterval", protocol.Int64Value(v), metadata) } if len(s.NielsenId3) > 0 { v := s.NielsenId3 metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "nielsenId3", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.NullPacketBitrate != nil { v := *s.NullPacketBitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "nullPacketBitrate", protocol.Float64Value(v), metadata) } if s.PatInterval != nil { v := *s.PatInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "patInterval", protocol.Int64Value(v), metadata) } if len(s.PcrControl) > 0 { v := s.PcrControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pcrControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.PcrPid != nil { v := *s.PcrPid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pcrPid", protocol.Int64Value(v), metadata) } if s.PmtInterval != nil { v := *s.PmtInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pmtInterval", protocol.Int64Value(v), metadata) } if s.PmtPid != nil { v := *s.PmtPid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pmtPid", protocol.Int64Value(v), metadata) } if s.PrivateMetadataPid != nil { v := *s.PrivateMetadataPid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "privateMetadataPid", protocol.Int64Value(v), metadata) } if s.ProgramNumber != nil { v := *s.ProgramNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "programNumber", protocol.Int64Value(v), metadata) } if len(s.RateMode) > 0 { v := s.RateMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "rateMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Scte35Esam != nil { v := s.Scte35Esam metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "scte35Esam", v, metadata) } if s.Scte35Pid != nil { v := *s.Scte35Pid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "scte35Pid", protocol.Int64Value(v), metadata) } if len(s.Scte35Source) > 0 { v := s.Scte35Source metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "scte35Source", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SegmentationMarkers) > 0 { v := s.SegmentationMarkers metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "segmentationMarkers", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SegmentationStyle) > 0 { v := s.SegmentationStyle metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "segmentationStyle", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SegmentationTime != nil { v := *s.SegmentationTime metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "segmentationTime", protocol.Float64Value(v), metadata) } if s.TimedMetadataPid != nil { v := *s.TimedMetadataPid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timedMetadataPid", protocol.Int64Value(v), metadata) } if s.TransportStreamId != nil { v := *s.TransportStreamId metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "transportStreamId", protocol.Int64Value(v), metadata) } if s.VideoPid != nil { v := *s.VideoPid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "videoPid", protocol.Int64Value(v), metadata) } return nil } // Settings for TS segments in HLS type M3u8Settings struct { _ struct{} `type:"structure"` // The number of audio frames to insert for each PES packet. AudioFramesPerPes *int64 `locationName:"audioFramesPerPes" type:"integer"` // Packet Identifier (PID) of the elementary audio stream(s) in the transport // stream. Multiple values are accepted, and can be entered in ranges and/or // by comma separation. AudioPids []int64 `locationName:"audioPids" type:"list"` // If INSERT, Nielsen inaudible tones for media tracking will be detected in // the input audio and an equivalent ID3 tag will be inserted in the output. NielsenId3 M3u8NielsenId3 `locationName:"nielsenId3" type:"string" enum:"true"` // The number of milliseconds between instances of this table in the output // transport stream. PatInterval *int64 `locationName:"patInterval" type:"integer"` // When set to PCR_EVERY_PES_PACKET a Program Clock Reference value is inserted // for every Packetized Elementary Stream (PES) header. This parameter is effective // only when the PCR PID is the same as the video or audio elementary stream. PcrControl M3u8PcrControl `locationName:"pcrControl" type:"string" enum:"true"` // Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport // stream. When no value is given, the encoder will assign the same value as // the Video PID. PcrPid *int64 `locationName:"pcrPid" min:"32" type:"integer"` // The number of milliseconds between instances of this table in the output // transport stream. PmtInterval *int64 `locationName:"pmtInterval" type:"integer"` // Packet Identifier (PID) for the Program Map Table (PMT) in the transport // stream. PmtPid *int64 `locationName:"pmtPid" min:"32" type:"integer"` // Packet Identifier (PID) of the private metadata stream in the transport stream. PrivateMetadataPid *int64 `locationName:"privateMetadataPid" min:"32" type:"integer"` // The value of the program number field in the Program Map Table. ProgramNumber *int64 `locationName:"programNumber" type:"integer"` // Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Scte35Pid *int64 `locationName:"scte35Pid" min:"32" type:"integer"` // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if // you want SCTE-35 markers that appear in your input to also appear in this // output. Choose None (NONE) if you don't want SCTE-35 markers in this output. // For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you // don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose // Ad markers (adMarkers) if you do want manifest conditioning. In both cases, // also provide the ESAM XML as a string in the setting Signal processing notification // XML (sccXml). Scte35Source M3u8Scte35Source `locationName:"scte35Source" type:"string" enum:"true"` // Applies only to HLS outputs. Use this setting to specify whether the service // inserts the ID3 timed metadata from the input in this output. TimedMetadata TimedMetadata `locationName:"timedMetadata" type:"string" enum:"true"` // Packet Identifier (PID) of the timed metadata stream in the transport stream. TimedMetadataPid *int64 `locationName:"timedMetadataPid" min:"32" type:"integer"` // The value of the transport stream ID field in the Program Map Table. TransportStreamId *int64 `locationName:"transportStreamId" type:"integer"` // Packet Identifier (PID) of the elementary video stream in the transport stream. VideoPid *int64 `locationName:"videoPid" min:"32" type:"integer"` } // String returns the string representation func (s M3u8Settings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *M3u8Settings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "M3u8Settings"} if s.PcrPid != nil && *s.PcrPid < 32 { invalidParams.Add(aws.NewErrParamMinValue("PcrPid", 32)) } if s.PmtPid != nil && *s.PmtPid < 32 { invalidParams.Add(aws.NewErrParamMinValue("PmtPid", 32)) } if s.PrivateMetadataPid != nil && *s.PrivateMetadataPid < 32 { invalidParams.Add(aws.NewErrParamMinValue("PrivateMetadataPid", 32)) } if s.Scte35Pid != nil && *s.Scte35Pid < 32 { invalidParams.Add(aws.NewErrParamMinValue("Scte35Pid", 32)) } if s.TimedMetadataPid != nil && *s.TimedMetadataPid < 32 { invalidParams.Add(aws.NewErrParamMinValue("TimedMetadataPid", 32)) } if s.VideoPid != nil && *s.VideoPid < 32 { invalidParams.Add(aws.NewErrParamMinValue("VideoPid", 32)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s M3u8Settings) MarshalFields(e protocol.FieldEncoder) error { if s.AudioFramesPerPes != nil { v := *s.AudioFramesPerPes metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "audioFramesPerPes", protocol.Int64Value(v), metadata) } if s.AudioPids != nil { v := s.AudioPids metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "audioPids", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.Int64Value(v1)) } ls0.End() } if len(s.NielsenId3) > 0 { v := s.NielsenId3 metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "nielsenId3", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.PatInterval != nil { v := *s.PatInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "patInterval", protocol.Int64Value(v), metadata) } if len(s.PcrControl) > 0 { v := s.PcrControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pcrControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.PcrPid != nil { v := *s.PcrPid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pcrPid", protocol.Int64Value(v), metadata) } if s.PmtInterval != nil { v := *s.PmtInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pmtInterval", protocol.Int64Value(v), metadata) } if s.PmtPid != nil { v := *s.PmtPid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pmtPid", protocol.Int64Value(v), metadata) } if s.PrivateMetadataPid != nil { v := *s.PrivateMetadataPid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "privateMetadataPid", protocol.Int64Value(v), metadata) } if s.ProgramNumber != nil { v := *s.ProgramNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "programNumber", protocol.Int64Value(v), metadata) } if s.Scte35Pid != nil { v := *s.Scte35Pid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "scte35Pid", protocol.Int64Value(v), metadata) } if len(s.Scte35Source) > 0 { v := s.Scte35Source metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "scte35Source", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.TimedMetadata) > 0 { v := s.TimedMetadata metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timedMetadata", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.TimedMetadataPid != nil { v := *s.TimedMetadataPid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timedMetadataPid", protocol.Int64Value(v), metadata) } if s.TransportStreamId != nil { v := *s.TransportStreamId metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "transportStreamId", protocol.Int64Value(v), metadata) } if s.VideoPid != nil { v := *s.VideoPid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "videoPid", protocol.Int64Value(v), metadata) } return nil } // Overlay motion graphics on top of your video at the time that you specify. type MotionImageInserter struct { _ struct{} `type:"structure"` // If your motion graphic asset is a .mov file, keep this setting unspecified. // If your motion graphic asset is a series of .png files, specify the frame // rate of the overlay in frames per second, as a fraction. For example, specify // 24 fps as 24/1. Make sure that the number of images in your series matches // the frame rate and your intended overlay duration. For example, if you want // a 30-second overlay at 30 fps, you should have 900 .png images. This overlay // frame rate doesn't need to match the frame rate of the underlying video. Framerate *MotionImageInsertionFramerate `locationName:"framerate" type:"structure"` // Specify the .mov file or series of .png files that you want to overlay on // your video. For .png files, provide the file name of the first file in the // series. Make sure that the names of the .png files end with sequential numbers // that specify the order that they are played in. For example, overlay_000.png, // overlay_001.png, overlay_002.png, and so on. The sequence must start at zero, // and each image file name must have the same number of digits. Pad your initial // file names with enough zeros to complete the sequence. For example, if the // first image is overlay_0.png, there can be only 10 images in the sequence, // with the last image being overlay_9.png. But if the first image is overlay_00.png, // there can be 100 images in the sequence. Input *string `locationName:"input" min:"14" type:"string"` // Choose the type of motion graphic asset that you are providing for your overlay. // You can choose either a .mov file or a series of .png files. InsertionMode MotionImageInsertionMode `locationName:"insertionMode" type:"string" enum:"true"` // Use Offset to specify the placement of your motion graphic overlay on the // video frame. Specify in pixels, from the upper-left corner of the frame. // If you don't specify an offset, the service scales your overlay to the full // size of the frame. Otherwise, the service inserts the overlay at its native // resolution and scales the size up or down with any video scaling. Offset *MotionImageInsertionOffset `locationName:"offset" type:"structure"` // Specify whether your motion graphic overlay repeats on a loop or plays only // once. Playback MotionImagePlayback `locationName:"playback" type:"string" enum:"true"` // Specify when the motion overlay begins. Use timecode format (HH:MM:SS:FF // or HH:MM:SS;FF). Make sure that the timecode you provide here takes into // account how you have set up your timecode configuration under both job settings // and input settings. The simplest way to do that is to set both to start at // 0. If you need to set up your job to follow timecodes embedded in your source // that don't start at zero, make sure that you specify a start time that is // after the first embedded timecode. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html // Find job-wide and input timecode configuration settings in your JSON job // settings specification at settings>timecodeConfig>source and settings>inputs>timecodeSource. StartTime *string `locationName:"startTime" min:"11" type:"string"` } // String returns the string representation func (s MotionImageInserter) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *MotionImageInserter) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "MotionImageInserter"} if s.Input != nil && len(*s.Input) < 14 { invalidParams.Add(aws.NewErrParamMinLen("Input", 14)) } if s.StartTime != nil && len(*s.StartTime) < 11 { invalidParams.Add(aws.NewErrParamMinLen("StartTime", 11)) } if s.Framerate != nil { if err := s.Framerate.Validate(); err != nil { invalidParams.AddNested("Framerate", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s MotionImageInserter) MarshalFields(e protocol.FieldEncoder) error { if s.Framerate != nil { v := s.Framerate metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "framerate", v, metadata) } if s.Input != nil { v := *s.Input metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "input", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.InsertionMode) > 0 { v := s.InsertionMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "insertionMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Offset != nil { v := s.Offset metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "offset", v, metadata) } if len(s.Playback) > 0 { v := s.Playback metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "playback", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.StartTime != nil { v := *s.StartTime metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "startTime", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // For motion overlays that don't have a built-in frame rate, specify the frame // rate of the overlay in frames per second, as a fraction. For example, specify // 24 fps as 24/1. The overlay frame rate doesn't need to match the frame rate // of the underlying video. type MotionImageInsertionFramerate struct { _ struct{} `type:"structure"` // The bottom of the fraction that expresses your overlay frame rate. For example, // if your frame rate is 24 fps, set this value to 1. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // The top of the fraction that expresses your overlay frame rate. For example, // if your frame rate is 24 fps, set this value to 24. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` } // String returns the string representation func (s MotionImageInsertionFramerate) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *MotionImageInsertionFramerate) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "MotionImageInsertionFramerate"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateNumerator", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s MotionImageInsertionFramerate) MarshalFields(e protocol.FieldEncoder) error { if s.FramerateDenominator != nil { v := *s.FramerateDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateDenominator", protocol.Int64Value(v), metadata) } if s.FramerateNumerator != nil { v := *s.FramerateNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateNumerator", protocol.Int64Value(v), metadata) } return nil } // Specify the offset between the upper-left corner of the video frame and the // top left corner of the overlay. type MotionImageInsertionOffset struct { _ struct{} `type:"structure"` // Set the distance, in pixels, between the overlay and the left edge of the // video frame. ImageX *int64 `locationName:"imageX" type:"integer"` // Set the distance, in pixels, between the overlay and the top edge of the // video frame. ImageY *int64 `locationName:"imageY" type:"integer"` } // String returns the string representation func (s MotionImageInsertionOffset) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s MotionImageInsertionOffset) MarshalFields(e protocol.FieldEncoder) error { if s.ImageX != nil { v := *s.ImageX metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "imageX", protocol.Int64Value(v), metadata) } if s.ImageY != nil { v := *s.ImageY metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "imageY", protocol.Int64Value(v), metadata) } return nil } // Settings for MOV Container. type MovSettings struct { _ struct{} `type:"structure"` // When enabled, include 'clap' atom if appropriate for the video output settings. ClapAtom MovClapAtom `locationName:"clapAtom" type:"string" enum:"true"` // When enabled, file composition times will start at zero, composition times // in the 'ctts' (composition time to sample) box for B-frames will be negative, // and a 'cslg' (composition shift least greatest) box will be included per // 14496-1 amendment 1. This improves compatibility with Apple players and tools. CslgAtom MovCslgAtom `locationName:"cslgAtom" type:"string" enum:"true"` // When set to XDCAM, writes MPEG2 video streams into the QuickTime file using // XDCAM fourcc codes. This increases compatibility with Apple editors and players, // but may decrease compatibility with other players. Only applicable when the // video codec is MPEG2. Mpeg2FourCCControl MovMpeg2FourCCControl `locationName:"mpeg2FourCCControl" type:"string" enum:"true"` // If set to OMNEON, inserts Omneon-compatible padding PaddingControl MovPaddingControl `locationName:"paddingControl" type:"string" enum:"true"` // Always keep the default value (SELF_CONTAINED) for this setting. Reference MovReference `locationName:"reference" type:"string" enum:"true"` } // String returns the string representation func (s MovSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s MovSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.ClapAtom) > 0 { v := s.ClapAtom metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "clapAtom", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.CslgAtom) > 0 { v := s.CslgAtom metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "cslgAtom", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Mpeg2FourCCControl) > 0 { v := s.Mpeg2FourCCControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "mpeg2FourCCControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.PaddingControl) > 0 { v := s.PaddingControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "paddingControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Reference) > 0 { v := s.Reference metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "reference", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value MP2. type Mp2Settings struct { _ struct{} `type:"structure"` // Specify the average bitrate in bits per second. Bitrate *int64 `locationName:"bitrate" min:"32000" type:"integer"` // Set Channels to specify the number of channels in this output audio track. // Choosing Mono in the console will give you 1 output channel; choosing Stereo // will give you 2. In the API, valid values are 1 and 2. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Sample rate in hz. SampleRate *int64 `locationName:"sampleRate" min:"32000" type:"integer"` } // String returns the string representation func (s Mp2Settings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Mp2Settings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Mp2Settings"} if s.Bitrate != nil && *s.Bitrate < 32000 { invalidParams.Add(aws.NewErrParamMinValue("Bitrate", 32000)) } if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(aws.NewErrParamMinValue("Channels", 1)) } if s.SampleRate != nil && *s.SampleRate < 32000 { invalidParams.Add(aws.NewErrParamMinValue("SampleRate", 32000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Mp2Settings) MarshalFields(e protocol.FieldEncoder) error { if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if s.Channels != nil { v := *s.Channels metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "channels", protocol.Int64Value(v), metadata) } if s.SampleRate != nil { v := *s.SampleRate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sampleRate", protocol.Int64Value(v), metadata) } return nil } // Required when you set Codec, under AudioDescriptions>CodecSettings, to the // value MP3. type Mp3Settings struct { _ struct{} `type:"structure"` // Specify the average bitrate in bits per second. Bitrate *int64 `locationName:"bitrate" min:"16000" type:"integer"` // Specify the number of channels in this output audio track. Choosing Mono // on the console gives you 1 output channel; choosing Stereo gives you 2. In // the API, valid values are 1 and 2. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Specify whether the service encodes this MP3 audio output with a constant // bitrate (CBR) or a variable bitrate (VBR). RateControlMode Mp3RateControlMode `locationName:"rateControlMode" type:"string" enum:"true"` // Sample rate in hz. SampleRate *int64 `locationName:"sampleRate" min:"22050" type:"integer"` // Required when you set Bitrate control mode (rateControlMode) to VBR. Specify // the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest // quality). VbrQuality *int64 `locationName:"vbrQuality" type:"integer"` } // String returns the string representation func (s Mp3Settings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Mp3Settings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Mp3Settings"} if s.Bitrate != nil && *s.Bitrate < 16000 { invalidParams.Add(aws.NewErrParamMinValue("Bitrate", 16000)) } if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(aws.NewErrParamMinValue("Channels", 1)) } if s.SampleRate != nil && *s.SampleRate < 22050 { invalidParams.Add(aws.NewErrParamMinValue("SampleRate", 22050)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Mp3Settings) MarshalFields(e protocol.FieldEncoder) error { if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if s.Channels != nil { v := *s.Channels metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "channels", protocol.Int64Value(v), metadata) } if len(s.RateControlMode) > 0 { v := s.RateControlMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "rateControlMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SampleRate != nil { v := *s.SampleRate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sampleRate", protocol.Int64Value(v), metadata) } if s.VbrQuality != nil { v := *s.VbrQuality metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "vbrQuality", protocol.Int64Value(v), metadata) } return nil } // Settings for MP4 container. You can create audio-only AAC outputs with this // container. type Mp4Settings struct { _ struct{} `type:"structure"` // When enabled, file composition times will start at zero, composition times // in the 'ctts' (composition time to sample) box for B-frames will be negative, // and a 'cslg' (composition shift least greatest) box will be included per // 14496-1 amendment 1. This improves compatibility with Apple players and tools. CslgAtom Mp4CslgAtom `locationName:"cslgAtom" type:"string" enum:"true"` // Ignore this setting unless compliance to the CTTS box version specification // matters in your workflow. Specify a value of 1 to set your CTTS box version // to 1 and make your output compliant with the specification. When you specify // a value of 1, you must also set CSLG atom (cslgAtom) to the value INCLUDE. // Keep the default value 0 to set your CTTS box version to 0. This can provide // backward compatibility for some players and packagers. CttsVersion *int64 `locationName:"cttsVersion" type:"integer"` // Inserts a free-space box immediately after the moov box. FreeSpaceBox Mp4FreeSpaceBox `locationName:"freeSpaceBox" type:"string" enum:"true"` // If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning // of the archive as required for progressive downloading. Otherwise it is placed // normally at the end. MoovPlacement Mp4MoovPlacement `locationName:"moovPlacement" type:"string" enum:"true"` // Overrides the "Major Brand" field in the output file. Usually not necessary // to specify. Mp4MajorBrand *string `locationName:"mp4MajorBrand" type:"string"` } // String returns the string representation func (s Mp4Settings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Mp4Settings) MarshalFields(e protocol.FieldEncoder) error { if len(s.CslgAtom) > 0 { v := s.CslgAtom metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "cslgAtom", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.CttsVersion != nil { v := *s.CttsVersion metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "cttsVersion", protocol.Int64Value(v), metadata) } if len(s.FreeSpaceBox) > 0 { v := s.FreeSpaceBox metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "freeSpaceBox", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.MoovPlacement) > 0 { v := s.MoovPlacement metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "moovPlacement", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Mp4MajorBrand != nil { v := *s.Mp4MajorBrand metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "mp4MajorBrand", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Settings for MP4 segments in DASH type MpdSettings struct { _ struct{} `type:"structure"` // Use this setting only in DASH output groups that include sidecar TTML or // IMSC captions. You specify sidecar captions in a separate output from your // audio and video. Choose Raw (RAW) for captions in a single XML file in a // raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in // XML format contained within fragmented MP4 files. This set of fragmented // MP4 files is separate from your video and audio fragmented MP4 files. CaptionContainerType MpdCaptionContainerType `locationName:"captionContainerType" type:"string" enum:"true"` // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting // SCC XML (sccXml). Scte35Esam MpdScte35Esam `locationName:"scte35Esam" type:"string" enum:"true"` // Ignore this setting unless you have SCTE-35 markers in your input video file. // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear // in your input to also appear in this output. Choose None (NONE) if you don't // want those SCTE-35 markers in this output. Scte35Source MpdScte35Source `locationName:"scte35Source" type:"string" enum:"true"` } // String returns the string representation func (s MpdSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s MpdSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.CaptionContainerType) > 0 { v := s.CaptionContainerType metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "captionContainerType", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Scte35Esam) > 0 { v := s.Scte35Esam metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "scte35Esam", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Scte35Source) > 0 { v := s.Scte35Source metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "scte35Source", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value MPEG2. type Mpeg2Settings struct { _ struct{} `type:"structure"` // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual // quality. AdaptiveQuantization Mpeg2AdaptiveQuantization `locationName:"adaptiveQuantization" type:"string" enum:"true"` // Specify the average bitrate in bits per second. Required for VBR and CBR. // For MS Smooth outputs, bitrates must be unique when rounded down to the nearest // multiple of 1000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output. CodecLevel Mpeg2CodecLevel `locationName:"codecLevel" type:"string" enum:"true"` // Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output. CodecProfile Mpeg2CodecProfile `locationName:"codecProfile" type:"string" enum:"true"` // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the // value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). DynamicSubGop Mpeg2DynamicSubGop `locationName:"dynamicSubGop" type:"string" enum:"true"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl Mpeg2FramerateControl `locationName:"framerateControl" type:"string" enum:"true"` // Optional. Specify how the transcoder performs framerate conversion. The default // behavior is to use duplicate drop conversion. FramerateConversionAlgorithm Mpeg2FramerateConversionAlgorithm `locationName:"framerateConversionAlgorithm" type:"string" enum:"true"` // Frame rate denominator. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // Frame rate numerator - frame rate is a fraction, e.g. 24000 / 1001 = 23.976 // fps. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"` // Frequency of closed GOPs. In streaming applications, it is recommended that // this be set to 1 so a decoder joining mid-stream will receive an IDR frame // as quickly as possible. Setting this value to 0 will break output segmenting. GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` // GOP Length (keyframe interval) in frames or seconds. Must be greater than // zero. GopSize *float64 `locationName:"gopSize" type:"double"` // Indicates if the GOP Size in MPEG2 is specified in frames or seconds. If // seconds the system will convert the GOP Size into a frame count at run time. GopSizeUnits Mpeg2GopSizeUnits `locationName:"gopSizeUnits" type:"string" enum:"true"` // Percentage of the buffer that should initially be filled (HRD buffer model). HrdBufferInitialFillPercentage *int64 `locationName:"hrdBufferInitialFillPercentage" type:"integer"` // Size of buffer (HRD buffer model) in bits. For example, enter five megabits // as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Use Interlace mode (InterlaceMode) to choose the scan line type for the output. // * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce // interlaced output with the entire output having the same field polarity (top // or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default // Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, // behavior depends on the input scan type. - If the source is interlaced, the // output will be interlaced with the same polarity as the source (it will follow // the source). The output could therefore be a mix of "top field first" and // "bottom field first". - If the source is progressive, the output will be // interlaced with "top field first" or "bottom field first" polarity, depending // on which of the Follow options you chose. InterlaceMode Mpeg2InterlaceMode `locationName:"interlaceMode" type:"string" enum:"true"` // Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision // for intra-block DC coefficients. If you choose the value auto, the service // will automatically select the precision based on the per-frame compression // ratio. IntraDcPrecision Mpeg2IntraDcPrecision `locationName:"intraDcPrecision" type:"string" enum:"true"` // Maximum bitrate in bits/second. For example, enter five megabits per second // as 5000000. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Enforces separation between repeated (cadence) I-frames and I-frames inserted // by Scene Change Detection. If a scene change I-frame is within I-interval // frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene // change I-frame. GOP stretch requires enabling lookahead as well as setting // I-interval. The normal cadence resumes for the next GOP. This setting is // only used when Scene Change Detect is enabled. Note: Maximum GOP stretch // = GOP size + Min-I-interval - 1 MinIInterval *int64 `locationName:"minIInterval" type:"integer"` // Number of B-frames between reference frames. NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. ParControl Mpeg2ParControl `locationName:"parControl" type:"string" enum:"true"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. QualityTuningLevel Mpeg2QualityTuningLevel `locationName:"qualityTuningLevel" type:"string" enum:"true"` // Use Rate control mode (Mpeg2RateControlMode) to specifiy whether the bitrate // is variable (vbr) or constant (cbr). RateControlMode Mpeg2RateControlMode `locationName:"rateControlMode" type:"string" enum:"true"` // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. SceneChangeDetect Mpeg2SceneChangeDetect `locationName:"sceneChangeDetect" type:"string" enum:"true"` // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled // as 25fps, and audio is sped up correspondingly. SlowPal Mpeg2SlowPal `locationName:"slowPal" type:"string" enum:"true"` // Softness. Selects quantizer matrix, larger values reduce high-frequency content // in the encoded image. Softness *int64 `locationName:"softness" type:"integer"` // Adjust quantization within each frame based on spatial variation of content // complexity. SpatialAdaptiveQuantization Mpeg2SpatialAdaptiveQuantization `locationName:"spatialAdaptiveQuantization" type:"string" enum:"true"` // Produces a Type D-10 compatible bitstream (SMPTE 356M-2001). Syntax Mpeg2Syntax `locationName:"syntax" type:"string" enum:"true"` // Only use Telecine (Mpeg2Telecine) when you set Framerate (Framerate) to 29.970. // Set Telecine (Mpeg2Telecine) to Hard (hard) to produce a 29.97i output from // a 23.976 input. Set it to Soft (soft) to produce 23.976 output and leave // converstion to the player. Telecine Mpeg2Telecine `locationName:"telecine" type:"string" enum:"true"` // Adjust quantization within each frame based on temporal variation of content // complexity. TemporalAdaptiveQuantization Mpeg2TemporalAdaptiveQuantization `locationName:"temporalAdaptiveQuantization" type:"string" enum:"true"` } // String returns the string representation func (s Mpeg2Settings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Mpeg2Settings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Mpeg2Settings"} if s.Bitrate != nil && *s.Bitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("Bitrate", 1000)) } if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 24 { invalidParams.Add(aws.NewErrParamMinValue("FramerateNumerator", 24)) } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("MaxBitrate", 1000)) } if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("ParDenominator", 1)) } if s.ParNumerator != nil && *s.ParNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("ParNumerator", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Mpeg2Settings) MarshalFields(e protocol.FieldEncoder) error { if len(s.AdaptiveQuantization) > 0 { v := s.AdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "adaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if len(s.CodecLevel) > 0 { v := s.CodecLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codecLevel", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.CodecProfile) > 0 { v := s.CodecProfile metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codecProfile", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.DynamicSubGop) > 0 { v := s.DynamicSubGop metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dynamicSubGop", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FramerateControl) > 0 { v := s.FramerateControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FramerateConversionAlgorithm) > 0 { v := s.FramerateConversionAlgorithm metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateConversionAlgorithm", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FramerateDenominator != nil { v := *s.FramerateDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateDenominator", protocol.Int64Value(v), metadata) } if s.FramerateNumerator != nil { v := *s.FramerateNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateNumerator", protocol.Int64Value(v), metadata) } if s.GopClosedCadence != nil { v := *s.GopClosedCadence metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopClosedCadence", protocol.Int64Value(v), metadata) } if s.GopSize != nil { v := *s.GopSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopSize", protocol.Float64Value(v), metadata) } if len(s.GopSizeUnits) > 0 { v := s.GopSizeUnits metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopSizeUnits", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.HrdBufferInitialFillPercentage != nil { v := *s.HrdBufferInitialFillPercentage metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "hrdBufferInitialFillPercentage", protocol.Int64Value(v), metadata) } if s.HrdBufferSize != nil { v := *s.HrdBufferSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "hrdBufferSize", protocol.Int64Value(v), metadata) } if len(s.InterlaceMode) > 0 { v := s.InterlaceMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "interlaceMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.IntraDcPrecision) > 0 { v := s.IntraDcPrecision metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "intraDcPrecision", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.MaxBitrate != nil { v := *s.MaxBitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxBitrate", protocol.Int64Value(v), metadata) } if s.MinIInterval != nil { v := *s.MinIInterval metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "minIInterval", protocol.Int64Value(v), metadata) } if s.NumberBFramesBetweenReferenceFrames != nil { v := *s.NumberBFramesBetweenReferenceFrames metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "numberBFramesBetweenReferenceFrames", protocol.Int64Value(v), metadata) } if len(s.ParControl) > 0 { v := s.ParControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ParDenominator != nil { v := *s.ParDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parDenominator", protocol.Int64Value(v), metadata) } if s.ParNumerator != nil { v := *s.ParNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parNumerator", protocol.Int64Value(v), metadata) } if len(s.QualityTuningLevel) > 0 { v := s.QualityTuningLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "qualityTuningLevel", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.RateControlMode) > 0 { v := s.RateControlMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "rateControlMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SceneChangeDetect) > 0 { v := s.SceneChangeDetect metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sceneChangeDetect", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.SlowPal) > 0 { v := s.SlowPal metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "slowPal", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Softness != nil { v := *s.Softness metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "softness", protocol.Int64Value(v), metadata) } if len(s.SpatialAdaptiveQuantization) > 0 { v := s.SpatialAdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "spatialAdaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Syntax) > 0 { v := s.Syntax metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "syntax", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Telecine) > 0 { v := s.Telecine metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "telecine", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.TemporalAdaptiveQuantization) > 0 { v := s.TemporalAdaptiveQuantization metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "temporalAdaptiveQuantization", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Specify the details for each additional Microsoft Smooth Streaming manifest // that you want the service to generate for this output group. Each manifest // can reference a different subset of outputs in the group. type MsSmoothAdditionalManifest struct { _ struct{} `type:"structure"` // Specify a name modifier that the service adds to the name of this manifest // to make it different from the file names of the other main manifests in the // output group. For example, say that the default main manifest for your Microsoft // Smooth group is film-name.ismv. If you enter "-no-premium" for this setting, // then the file name the service generates for this top-level manifest is film-name-no-premium.ismv. ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"` // Specify the outputs that you want this additional top-level manifest to reference. SelectedOutputs []string `locationName:"selectedOutputs" type:"list"` } // String returns the string representation func (s MsSmoothAdditionalManifest) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *MsSmoothAdditionalManifest) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "MsSmoothAdditionalManifest"} if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 { invalidParams.Add(aws.NewErrParamMinLen("ManifestNameModifier", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s MsSmoothAdditionalManifest) MarshalFields(e protocol.FieldEncoder) error { if s.ManifestNameModifier != nil { v := *s.ManifestNameModifier metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "manifestNameModifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.SelectedOutputs != nil { v := s.SelectedOutputs metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "selectedOutputs", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } return nil } // If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify // the value SpekeKeyProvider. type MsSmoothEncryptionSettings struct { _ struct{} `type:"structure"` // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings // when doing DRM encryption with a SPEKE-compliant key provider. If your output // group type is CMAF, use the SpekeKeyProviderCmaf settings instead. SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"` } // String returns the string representation func (s MsSmoothEncryptionSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s MsSmoothEncryptionSettings) MarshalFields(e protocol.FieldEncoder) error { if s.SpekeKeyProvider != nil { v := s.SpekeKeyProvider metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "spekeKeyProvider", v, metadata) } return nil } // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to // MS_SMOOTH_GROUP_SETTINGS. type MsSmoothGroupSettings struct { _ struct{} `type:"structure"` // By default, the service creates one .ism Microsoft Smooth Streaming manifest // for each Microsoft Smooth Streaming output group in your job. This default // manifest references every output in the output group. To create additional // manifests that reference a subset of the outputs in the output group, specify // a list of them here. AdditionalManifests []MsSmoothAdditionalManifest `locationName:"additionalManifests" type:"list"` // COMBINE_DUPLICATE_STREAMS combines identical audio encoding settings across // a Microsoft Smooth output group into a single audio stream. AudioDeduplication MsSmoothAudioDeduplication `locationName:"audioDeduplication" type:"string" enum:"true"` // Use Destination (Destination) to specify the S3 output location and the output // filename base. Destination accepts format identifiers. If you do not specify // the base filename in the URI, the service will use the filename of the input // file. If your job has multiple inputs, the service uses the filename of the // first input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of // destination DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"` // If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify // the value SpekeKeyProvider. Encryption *MsSmoothEncryptionSettings `locationName:"encryption" type:"structure"` // Use Fragment length (FragmentLength) to specify the mp4 fragment sizes in // seconds. Fragment length must be compatible with GOP size and frame rate. FragmentLength *int64 `locationName:"fragmentLength" min:"1" type:"integer"` // Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding // format for the server and client manifest. Valid options are utf8 and utf16. ManifestEncoding MsSmoothManifestEncoding `locationName:"manifestEncoding" type:"string" enum:"true"` } // String returns the string representation func (s MsSmoothGroupSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *MsSmoothGroupSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "MsSmoothGroupSettings"} if s.FragmentLength != nil && *s.FragmentLength < 1 { invalidParams.Add(aws.NewErrParamMinValue("FragmentLength", 1)) } if s.AdditionalManifests != nil { for i, v := range s.AdditionalManifests { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(aws.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s MsSmoothGroupSettings) MarshalFields(e protocol.FieldEncoder) error { if s.AdditionalManifests != nil { v := s.AdditionalManifests metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "additionalManifests", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if len(s.AudioDeduplication) > 0 { v := s.AudioDeduplication metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "audioDeduplication", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Destination != nil { v := *s.Destination metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "destination", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.DestinationSettings != nil { v := s.DestinationSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "destinationSettings", v, metadata) } if s.Encryption != nil { v := s.Encryption metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "encryption", v, metadata) } if s.FragmentLength != nil { v := *s.FragmentLength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fragmentLength", protocol.Int64Value(v), metadata) } if len(s.ManifestEncoding) > 0 { v := s.ManifestEncoding metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "manifestEncoding", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // MXF settings type MxfSettings struct { _ struct{} `type:"structure"` // Optional. When you have AFD signaling set up in your output video stream, // use this setting to choose whether to also include it in the MXF wrapper. // Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. // Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from // the video stream for this output to the MXF wrapper. Regardless of which // option you choose, the AFD values remain in the video stream. Related settings: // To set up your output to include or exclude AFD values, see AfdSignaling, // under VideoDescription. On the console, find AFD signaling under the output's // video encoding settings. AfdSignaling MxfAfdSignaling `locationName:"afdSignaling" type:"string" enum:"true"` } // String returns the string representation func (s MxfSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s MxfSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.AfdSignaling) > 0 { v := s.AfdSignaling metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "afdSignaling", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // For forensic video watermarking, MediaConvert supports Nagra NexGuard File // Marker watermarking. MediaConvert supports both PreRelease Content (NGPR/G2) // and OTT Streaming workflows. type NexGuardFileMarkerSettings struct { _ struct{} `type:"structure"` // Use the base64 license string that Nagra provides you. Enter it directly // in your JSON job specification or in the console. Required when you include // Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in // your job. License *string `locationName:"license" min:"1" type:"string"` // Specify the payload ID that you want associated with this output. Valid values // vary depending on your Nagra NexGuard forensic watermarking workflow. Required // when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) // in your job. For PreRelease Content (NGPR/G2), specify an integer from 1 // through 4,194,303. You must generate a unique ID for each asset you watermark, // and keep a record of which ID you have assigned to each asset. Neither Nagra // nor MediaConvert keep track of the relationship between output files and // your IDs. For OTT Streaming, create two adaptive bitrate (ABR) stacks for // each asset. Do this by setting up two output groups. For one output group, // set the value of Payload ID (payload) to 0 in every output. For the other // output group, set Payload ID (payload) to 1 in every output. Payload *int64 `locationName:"payload" type:"integer"` // Enter one of the watermarking preset strings that Nagra provides you. Required // when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) // in your job. Preset *string `locationName:"preset" min:"1" type:"string"` // Optional. Ignore this setting unless Nagra support directs you to specify // a value. When you don't specify a value here, the Nagra NexGuard library // uses its default value. Strength WatermarkingStrength `locationName:"strength" type:"string" enum:"true"` } // String returns the string representation func (s NexGuardFileMarkerSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *NexGuardFileMarkerSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "NexGuardFileMarkerSettings"} if s.License != nil && len(*s.License) < 1 { invalidParams.Add(aws.NewErrParamMinLen("License", 1)) } if s.Preset != nil && len(*s.Preset) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Preset", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s NexGuardFileMarkerSettings) MarshalFields(e protocol.FieldEncoder) error { if s.License != nil { v := *s.License metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "license", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Payload != nil { v := *s.Payload metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "payload", protocol.Int64Value(v), metadata) } if s.Preset != nil { v := *s.Preset metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "preset", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.Strength) > 0 { v := s.Strength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "strength", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Settings for your Nielsen configuration. If you don't do Nielsen measurement // and analytics, ignore these settings. When you enable Nielsen configuration // (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs // in the job. To enable Nielsen configuration programmatically, include an // instance of nielsenConfiguration in your JSON job specification. Even if // you don't include any children of nielsenConfiguration, you still enable // the setting. type NielsenConfiguration struct { _ struct{} `type:"structure"` // Nielsen has discontinued the use of breakout code functionality. If you must // include this property, set the value to zero. BreakoutCode *int64 `locationName:"breakoutCode" type:"integer"` // Use Distributor ID (DistributorID) to specify the distributor ID that is // assigned to your organization by Neilsen. DistributorId *string `locationName:"distributorId" type:"string"` } // String returns the string representation func (s NielsenConfiguration) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s NielsenConfiguration) MarshalFields(e protocol.FieldEncoder) error { if s.BreakoutCode != nil { v := *s.BreakoutCode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "breakoutCode", protocol.Int64Value(v), metadata) } if s.DistributorId != nil { v := *s.DistributorId metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "distributorId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Enable the Noise reducer (NoiseReducer) feature to remove noise from your // video output if necessary. Enable or disable this feature for each output // individually. This setting is disabled by default. When you enable Noise // reducer (NoiseReducer), you must also select a value for Noise reducer filter // (NoiseReducerFilter). type NoiseReducer struct { _ struct{} `type:"structure"` // Use Noise reducer filter (NoiseReducerFilter) to select one of the following // spatial image filtering functions. To use this setting, you must also enable // Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing // noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution // filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain // filtering based on JND principles. * Temporal optimizes video quality for // complex motion. Filter NoiseReducerFilter `locationName:"filter" type:"string" enum:"true"` // Settings for a noise reducer filter FilterSettings *NoiseReducerFilterSettings `locationName:"filterSettings" type:"structure"` // Noise reducer filter settings for spatial filter. SpatialFilterSettings *NoiseReducerSpatialFilterSettings `locationName:"spatialFilterSettings" type:"structure"` // Noise reducer filter settings for temporal filter. TemporalFilterSettings *NoiseReducerTemporalFilterSettings `locationName:"temporalFilterSettings" type:"structure"` } // String returns the string representation func (s NoiseReducer) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *NoiseReducer) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "NoiseReducer"} if s.SpatialFilterSettings != nil { if err := s.SpatialFilterSettings.Validate(); err != nil { invalidParams.AddNested("SpatialFilterSettings", err.(aws.ErrInvalidParams)) } } if s.TemporalFilterSettings != nil { if err := s.TemporalFilterSettings.Validate(); err != nil { invalidParams.AddNested("TemporalFilterSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s NoiseReducer) MarshalFields(e protocol.FieldEncoder) error { if len(s.Filter) > 0 { v := s.Filter metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "filter", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FilterSettings != nil { v := s.FilterSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "filterSettings", v, metadata) } if s.SpatialFilterSettings != nil { v := s.SpatialFilterSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "spatialFilterSettings", v, metadata) } if s.TemporalFilterSettings != nil { v := s.TemporalFilterSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "temporalFilterSettings", v, metadata) } return nil } // Settings for a noise reducer filter type NoiseReducerFilterSettings struct { _ struct{} `type:"structure"` // Relative strength of noise reducing filter. Higher values produce stronger // filtering. Strength *int64 `locationName:"strength" type:"integer"` } // String returns the string representation func (s NoiseReducerFilterSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s NoiseReducerFilterSettings) MarshalFields(e protocol.FieldEncoder) error { if s.Strength != nil { v := *s.Strength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "strength", protocol.Int64Value(v), metadata) } return nil } // Noise reducer filter settings for spatial filter. type NoiseReducerSpatialFilterSettings struct { _ struct{} `type:"structure"` // Specify strength of post noise reduction sharpening filter, with 0 disabling // the filter and 3 enabling it at maximum strength. PostFilterSharpenStrength *int64 `locationName:"postFilterSharpenStrength" type:"integer"` // The speed of the filter, from -2 (lower speed) to 3 (higher speed), with // 0 being the nominal value. Speed *int64 `locationName:"speed" type:"integer"` // Relative strength of noise reducing filter. Higher values produce stronger // filtering. Strength *int64 `locationName:"strength" type:"integer"` } // String returns the string representation func (s NoiseReducerSpatialFilterSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *NoiseReducerSpatialFilterSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "NoiseReducerSpatialFilterSettings"} if s.Speed != nil && *s.Speed < -2 { invalidParams.Add(aws.NewErrParamMinValue("Speed", -2)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s NoiseReducerSpatialFilterSettings) MarshalFields(e protocol.FieldEncoder) error { if s.PostFilterSharpenStrength != nil { v := *s.PostFilterSharpenStrength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "postFilterSharpenStrength", protocol.Int64Value(v), metadata) } if s.Speed != nil { v := *s.Speed metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "speed", protocol.Int64Value(v), metadata) } if s.Strength != nil { v := *s.Strength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "strength", protocol.Int64Value(v), metadata) } return nil } // Noise reducer filter settings for temporal filter. type NoiseReducerTemporalFilterSettings struct { _ struct{} `type:"structure"` // Use Aggressive mode for content that has complex motion. Higher values produce // stronger temporal filtering. This filters highly complex scenes more aggressively // and creates better VQ for low bitrate outputs. AggressiveMode *int64 `locationName:"aggressiveMode" type:"integer"` // Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), // you can optionally use this setting to apply additional sharpening. The default // behavior, Auto (AUTO) allows the transcoder to determine whether to apply // filtering, depending on input type and quality. PostTemporalSharpening NoiseFilterPostTemporalSharpening `locationName:"postTemporalSharpening" type:"string" enum:"true"` // The speed of the filter (higher number is faster). Low setting reduces bit // rate at the cost of transcode time, high setting improves transcode time // at the cost of bit rate. Speed *int64 `locationName:"speed" type:"integer"` // Specify the strength of the noise reducing filter on this output. Higher // values produce stronger filtering. We recommend the following value ranges, // depending on the result that you want: * 0-2 for complexity reduction with // minimal sharpness loss * 2-8 for complexity reduction with image preservation // * 8-16 for a high level of complexity reduction Strength *int64 `locationName:"strength" type:"integer"` } // String returns the string representation func (s NoiseReducerTemporalFilterSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *NoiseReducerTemporalFilterSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "NoiseReducerTemporalFilterSettings"} if s.Speed != nil && *s.Speed < -1 { invalidParams.Add(aws.NewErrParamMinValue("Speed", -1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s NoiseReducerTemporalFilterSettings) MarshalFields(e protocol.FieldEncoder) error { if s.AggressiveMode != nil { v := *s.AggressiveMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "aggressiveMode", protocol.Int64Value(v), metadata) } if len(s.PostTemporalSharpening) > 0 { v := s.PostTemporalSharpening metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "postTemporalSharpening", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Speed != nil { v := *s.Speed metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "speed", protocol.Int64Value(v), metadata) } if s.Strength != nil { v := *s.Strength metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "strength", protocol.Int64Value(v), metadata) } return nil } // Required when you set Codec, under AudioDescriptions>CodecSettings, to the // value OPUS. type OpusSettings struct { _ struct{} `type:"structure"` // Optional. Specify the average bitrate in bits per second. Valid values are // multiples of 8000, from 32000 through 192000. The default value is 96000, // which we recommend for quality and bandwidth. Bitrate *int64 `locationName:"bitrate" min:"32000" type:"integer"` // Specify the number of channels in this output audio track. Choosing Mono // on the console gives you 1 output channel; choosing Stereo gives you 2. In // the API, valid values are 1 and 2. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Optional. Sample rate in hz. Valid values are 16000, 24000, and 48000. The // default value is 48000. SampleRate *int64 `locationName:"sampleRate" min:"16000" type:"integer"` } // String returns the string representation func (s OpusSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *OpusSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "OpusSettings"} if s.Bitrate != nil && *s.Bitrate < 32000 { invalidParams.Add(aws.NewErrParamMinValue("Bitrate", 32000)) } if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(aws.NewErrParamMinValue("Channels", 1)) } if s.SampleRate != nil && *s.SampleRate < 16000 { invalidParams.Add(aws.NewErrParamMinValue("SampleRate", 16000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s OpusSettings) MarshalFields(e protocol.FieldEncoder) error { if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if s.Channels != nil { v := *s.Channels metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "channels", protocol.Int64Value(v), metadata) } if s.SampleRate != nil { v := *s.SampleRate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sampleRate", protocol.Int64Value(v), metadata) } return nil } // An output object describes the settings for a single output file or stream // in an output group. type Output struct { _ struct{} `type:"structure"` // (AudioDescriptions) contains groups of audio encoding settings organized // by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) // can contain multiple groups of encoding settings. AudioDescriptions []AudioDescription `locationName:"audioDescriptions" type:"list"` // (CaptionDescriptions) contains groups of captions settings. For each output // that has captions, include one instance of (CaptionDescriptions). (CaptionDescriptions) // can contain multiple groups of captions settings. CaptionDescriptions []CaptionDescription `locationName:"captionDescriptions" type:"list"` // Container specific settings. ContainerSettings *ContainerSettings `locationName:"containerSettings" type:"structure"` // Use Extension (Extension) to specify the file extension for outputs in File // output groups. If you do not specify a value, the service will use default // extensions by container type as follows * MPEG-2 transport stream, m2ts * // Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, // webm * No Container, the service will use codec extensions (e.g. AAC, H265, // H265, AC3) Extension *string `locationName:"extension" type:"string"` // Use Name modifier (NameModifier) to have the service add a string to the // end of each output filename. You specify the base filename as part of your // destination URI. When you create multiple outputs in the same output group, // Name modifier (NameModifier) is required. Name modifier also accepts format // identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ // or $Time$ in one output, you must use them in the same way in all outputs // of the output group. NameModifier *string `locationName:"nameModifier" min:"1" type:"string"` // Specific settings for this type of output. OutputSettings *OutputSettings `locationName:"outputSettings" type:"structure"` // Use Preset (Preset) to specifiy a preset for your transcoding settings. Provide // the system or custom preset name. You can specify either Preset (Preset) // or Container settings (ContainerSettings), but not both. Preset *string `locationName:"preset" type:"string"` // (VideoDescription) contains a group of video encoding settings. The specific // video settings depend on the video codec that you choose when you specify // a value for Video codec (codec). Include one instance of (VideoDescription) // per output. VideoDescription *VideoDescription `locationName:"videoDescription" type:"structure"` } // String returns the string representation func (s Output) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Output) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Output"} if s.NameModifier != nil && len(*s.NameModifier) < 1 { invalidParams.Add(aws.NewErrParamMinLen("NameModifier", 1)) } if s.AudioDescriptions != nil { for i, v := range s.AudioDescriptions { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AudioDescriptions", i), err.(aws.ErrInvalidParams)) } } } if s.CaptionDescriptions != nil { for i, v := range s.CaptionDescriptions { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionDescriptions", i), err.(aws.ErrInvalidParams)) } } } if s.ContainerSettings != nil { if err := s.ContainerSettings.Validate(); err != nil { invalidParams.AddNested("ContainerSettings", err.(aws.ErrInvalidParams)) } } if s.VideoDescription != nil { if err := s.VideoDescription.Validate(); err != nil { invalidParams.AddNested("VideoDescription", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Output) MarshalFields(e protocol.FieldEncoder) error { if s.AudioDescriptions != nil { v := s.AudioDescriptions metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "audioDescriptions", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.CaptionDescriptions != nil { v := s.CaptionDescriptions metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "captionDescriptions", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.ContainerSettings != nil { v := s.ContainerSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "containerSettings", v, metadata) } if s.Extension != nil { v := *s.Extension metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "extension", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.NameModifier != nil { v := *s.NameModifier metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "nameModifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.OutputSettings != nil { v := s.OutputSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "outputSettings", v, metadata) } if s.Preset != nil { v := *s.Preset metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "preset", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.VideoDescription != nil { v := s.VideoDescription metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "videoDescription", v, metadata) } return nil } // OutputChannel mapping settings. type OutputChannelMapping struct { _ struct{} `type:"structure"` // List of input channels InputChannels []int64 `locationName:"inputChannels" type:"list"` } // String returns the string representation func (s OutputChannelMapping) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s OutputChannelMapping) MarshalFields(e protocol.FieldEncoder) error { if s.InputChannels != nil { v := s.InputChannels metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "inputChannels", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.Int64Value(v1)) } ls0.End() } return nil } // Details regarding output type OutputDetail struct { _ struct{} `type:"structure"` // Duration in milliseconds DurationInMs *int64 `locationName:"durationInMs" type:"integer"` // Contains details about the output's video stream VideoDetails *VideoDetail `locationName:"videoDetails" type:"structure"` } // String returns the string representation func (s OutputDetail) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s OutputDetail) MarshalFields(e protocol.FieldEncoder) error { if s.DurationInMs != nil { v := *s.DurationInMs metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "durationInMs", protocol.Int64Value(v), metadata) } if s.VideoDetails != nil { v := s.VideoDetails metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "videoDetails", v, metadata) } return nil } // Group of outputs type OutputGroup struct { _ struct{} `type:"structure"` // Use Custom Group Name (CustomName) to specify a name for the output group. // This value is displayed on the console and can make your job settings JSON // more human-readable. It does not affect your outputs. Use up to twelve characters // that are either letters, numbers, spaces, or underscores. CustomName *string `locationName:"customName" type:"string"` // Name of the output group Name *string `locationName:"name" type:"string"` // Output Group settings, including type OutputGroupSettings *OutputGroupSettings `locationName:"outputGroupSettings" type:"structure"` // This object holds groups of encoding settings, one group of settings per // output. Outputs []Output `locationName:"outputs" type:"list"` } // String returns the string representation func (s OutputGroup) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *OutputGroup) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "OutputGroup"} if s.OutputGroupSettings != nil { if err := s.OutputGroupSettings.Validate(); err != nil { invalidParams.AddNested("OutputGroupSettings", err.(aws.ErrInvalidParams)) } } if s.Outputs != nil { for i, v := range s.Outputs { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Outputs", i), err.(aws.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s OutputGroup) MarshalFields(e protocol.FieldEncoder) error { if s.CustomName != nil { v := *s.CustomName metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "customName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Name != nil { v := *s.Name metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.OutputGroupSettings != nil { v := s.OutputGroupSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "outputGroupSettings", v, metadata) } if s.Outputs != nil { v := s.Outputs metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "outputs", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } return nil } // Contains details about the output groups specified in the job settings. type OutputGroupDetail struct { _ struct{} `type:"structure"` // Details about the output OutputDetails []OutputDetail `locationName:"outputDetails" type:"list"` } // String returns the string representation func (s OutputGroupDetail) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s OutputGroupDetail) MarshalFields(e protocol.FieldEncoder) error { if s.OutputDetails != nil { v := s.OutputDetails metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "outputDetails", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } return nil } // Output Group settings, including type type OutputGroupSettings struct { _ struct{} `type:"structure"` // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to // CMAF_GROUP_SETTINGS. Each output in a CMAF Output Group may only contain // a single video, audio, or caption output. CmafGroupSettings *CmafGroupSettings `locationName:"cmafGroupSettings" type:"structure"` // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to // DASH_ISO_GROUP_SETTINGS. DashIsoGroupSettings *DashIsoGroupSettings `locationName:"dashIsoGroupSettings" type:"structure"` // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to // FILE_GROUP_SETTINGS. FileGroupSettings *FileGroupSettings `locationName:"fileGroupSettings" type:"structure"` // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to // HLS_GROUP_SETTINGS. HlsGroupSettings *HlsGroupSettings `locationName:"hlsGroupSettings" type:"structure"` // Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to // MS_SMOOTH_GROUP_SETTINGS. MsSmoothGroupSettings *MsSmoothGroupSettings `locationName:"msSmoothGroupSettings" type:"structure"` // Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming, // CMAF) Type OutputGroupType `locationName:"type" type:"string" enum:"true"` } // String returns the string representation func (s OutputGroupSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *OutputGroupSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "OutputGroupSettings"} if s.CmafGroupSettings != nil { if err := s.CmafGroupSettings.Validate(); err != nil { invalidParams.AddNested("CmafGroupSettings", err.(aws.ErrInvalidParams)) } } if s.DashIsoGroupSettings != nil { if err := s.DashIsoGroupSettings.Validate(); err != nil { invalidParams.AddNested("DashIsoGroupSettings", err.(aws.ErrInvalidParams)) } } if s.HlsGroupSettings != nil { if err := s.HlsGroupSettings.Validate(); err != nil { invalidParams.AddNested("HlsGroupSettings", err.(aws.ErrInvalidParams)) } } if s.MsSmoothGroupSettings != nil { if err := s.MsSmoothGroupSettings.Validate(); err != nil { invalidParams.AddNested("MsSmoothGroupSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s OutputGroupSettings) MarshalFields(e protocol.FieldEncoder) error { if s.CmafGroupSettings != nil { v := s.CmafGroupSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "cmafGroupSettings", v, metadata) } if s.DashIsoGroupSettings != nil { v := s.DashIsoGroupSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "dashIsoGroupSettings", v, metadata) } if s.FileGroupSettings != nil { v := s.FileGroupSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "fileGroupSettings", v, metadata) } if s.HlsGroupSettings != nil { v := s.HlsGroupSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "hlsGroupSettings", v, metadata) } if s.MsSmoothGroupSettings != nil { v := s.MsSmoothGroupSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "msSmoothGroupSettings", v, metadata) } if len(s.Type) > 0 { v := s.Type metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "type", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Specific settings for this type of output. type OutputSettings struct { _ struct{} `type:"structure"` // Settings for HLS output groups HlsSettings *HlsSettings `locationName:"hlsSettings" type:"structure"` } // String returns the string representation func (s OutputSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s OutputSettings) MarshalFields(e protocol.FieldEncoder) error { if s.HlsSettings != nil { v := s.HlsSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "hlsSettings", v, metadata) } return nil } // If you work with a third party video watermarking partner, use the group // of settings that correspond with your watermarking partner to include watermarks // in your output. type PartnerWatermarking struct { _ struct{} `type:"structure"` // For forensic video watermarking, MediaConvert supports Nagra NexGuard File // Marker watermarking. MediaConvert supports both PreRelease Content (NGPR/G2) // and OTT Streaming workflows. NexguardFileMarkerSettings *NexGuardFileMarkerSettings `locationName:"nexguardFileMarkerSettings" type:"structure"` } // String returns the string representation func (s PartnerWatermarking) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *PartnerWatermarking) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "PartnerWatermarking"} if s.NexguardFileMarkerSettings != nil { if err := s.NexguardFileMarkerSettings.Validate(); err != nil { invalidParams.AddNested("NexguardFileMarkerSettings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s PartnerWatermarking) MarshalFields(e protocol.FieldEncoder) error { if s.NexguardFileMarkerSettings != nil { v := s.NexguardFileMarkerSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "nexguardFileMarkerSettings", v, metadata) } return nil } // A preset is a collection of preconfigured media conversion settings that // you want MediaConvert to apply to the output during the conversion process. type Preset struct { _ struct{} `type:"structure"` // An identifier for this resource that is unique within all of AWS. Arn *string `locationName:"arn" type:"string"` // An optional category you create to organize your presets. Category *string `locationName:"category" type:"string"` // The timestamp in epoch seconds for preset creation. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"` // An optional description you create for each preset. Description *string `locationName:"description" type:"string"` // The timestamp in epoch seconds when the preset was last updated. LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unixTimestamp"` // A name you create for each preset. Each name must be unique within your account. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` // Settings for preset // // Settings is a required field Settings *PresetSettings `locationName:"settings" type:"structure" required:"true"` // A preset can be of two types: system or custom. System or built-in preset // can't be modified or deleted by the user. Type Type `locationName:"type" type:"string" enum:"true"` } // String returns the string representation func (s Preset) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Preset) MarshalFields(e protocol.FieldEncoder) error { if s.Arn != nil { v := *s.Arn metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Category != nil { v := *s.Category metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "category", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.CreatedAt != nil { v := *s.CreatedAt metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "createdAt", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } if s.Description != nil { v := *s.Description metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.LastUpdated != nil { v := *s.LastUpdated metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "lastUpdated", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } if s.Name != nil { v := *s.Name metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Settings != nil { v := s.Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "settings", v, metadata) } if len(s.Type) > 0 { v := s.Type metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "type", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Settings for preset type PresetSettings struct { _ struct{} `type:"structure"` // (AudioDescriptions) contains groups of audio encoding settings organized // by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) // can contain multiple groups of encoding settings. AudioDescriptions []AudioDescription `locationName:"audioDescriptions" type:"list"` // Caption settings for this preset. There can be multiple caption settings // in a single output. CaptionDescriptions []CaptionDescriptionPreset `locationName:"captionDescriptions" type:"list"` // Container specific settings. ContainerSettings *ContainerSettings `locationName:"containerSettings" type:"structure"` // (VideoDescription) contains a group of video encoding settings. The specific // video settings depend on the video codec that you choose when you specify // a value for Video codec (codec). Include one instance of (VideoDescription) // per output. VideoDescription *VideoDescription `locationName:"videoDescription" type:"structure"` } // String returns the string representation func (s PresetSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *PresetSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "PresetSettings"} if s.AudioDescriptions != nil { for i, v := range s.AudioDescriptions { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AudioDescriptions", i), err.(aws.ErrInvalidParams)) } } } if s.CaptionDescriptions != nil { for i, v := range s.CaptionDescriptions { if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionDescriptions", i), err.(aws.ErrInvalidParams)) } } } if s.ContainerSettings != nil { if err := s.ContainerSettings.Validate(); err != nil { invalidParams.AddNested("ContainerSettings", err.(aws.ErrInvalidParams)) } } if s.VideoDescription != nil { if err := s.VideoDescription.Validate(); err != nil { invalidParams.AddNested("VideoDescription", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s PresetSettings) MarshalFields(e protocol.FieldEncoder) error { if s.AudioDescriptions != nil { v := s.AudioDescriptions metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "audioDescriptions", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.CaptionDescriptions != nil { v := s.CaptionDescriptions metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "captionDescriptions", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } if s.ContainerSettings != nil { v := s.ContainerSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "containerSettings", v, metadata) } if s.VideoDescription != nil { v := s.VideoDescription metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "videoDescription", v, metadata) } return nil } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value PRORES. type ProresSettings struct { _ struct{} `type:"structure"` // Use Profile (ProResCodecProfile) to specifiy the type of Apple ProRes codec // to use for this output. CodecProfile ProresCodecProfile `locationName:"codecProfile" type:"string" enum:"true"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl ProresFramerateControl `locationName:"framerateControl" type:"string" enum:"true"` // Optional. Specify how the transcoder performs framerate conversion. The default // behavior is to use duplicate drop conversion. FramerateConversionAlgorithm ProresFramerateConversionAlgorithm `locationName:"framerateConversionAlgorithm" type:"string" enum:"true"` // Frame rate denominator. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // Use Interlace mode (InterlaceMode) to choose the scan line type for the output. // * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce // interlaced output with the entire output having the same field polarity (top // or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default // Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, // behavior depends on the input scan type. - If the source is interlaced, the // output will be interlaced with the same polarity as the source (it will follow // the source). The output could therefore be a mix of "top field first" and // "bottom field first". - If the source is progressive, the output will be // interlaced with "top field first" or "bottom field first" polarity, depending // on which of the Follow options you chose. InterlaceMode ProresInterlaceMode `locationName:"interlaceMode" type:"string" enum:"true"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. ParControl ProresParControl `locationName:"parControl" type:"string" enum:"true"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled // as 25fps, and audio is sped up correspondingly. SlowPal ProresSlowPal `locationName:"slowPal" type:"string" enum:"true"` // Only use Telecine (ProresTelecine) when you set Framerate (Framerate) to // 29.970. Set Telecine (ProresTelecine) to Hard (hard) to produce a 29.97i // output from a 23.976 input. Set it to Soft (soft) to produce 23.976 output // and leave converstion to the player. Telecine ProresTelecine `locationName:"telecine" type:"string" enum:"true"` } // String returns the string representation func (s ProresSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *ProresSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "ProresSettings"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateNumerator", 1)) } if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("ParDenominator", 1)) } if s.ParNumerator != nil && *s.ParNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("ParNumerator", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s ProresSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.CodecProfile) > 0 { v := s.CodecProfile metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codecProfile", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FramerateControl) > 0 { v := s.FramerateControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FramerateConversionAlgorithm) > 0 { v := s.FramerateConversionAlgorithm metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateConversionAlgorithm", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FramerateDenominator != nil { v := *s.FramerateDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateDenominator", protocol.Int64Value(v), metadata) } if s.FramerateNumerator != nil { v := *s.FramerateNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateNumerator", protocol.Int64Value(v), metadata) } if len(s.InterlaceMode) > 0 { v := s.InterlaceMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "interlaceMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.ParControl) > 0 { v := s.ParControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ParDenominator != nil { v := *s.ParDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parDenominator", protocol.Int64Value(v), metadata) } if s.ParNumerator != nil { v := *s.ParNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parNumerator", protocol.Int64Value(v), metadata) } if len(s.SlowPal) > 0 { v := s.SlowPal metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "slowPal", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.Telecine) > 0 { v := s.Telecine metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "telecine", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // You can use queues to manage the resources that are available to your AWS // account for running multiple transcoding jobs at the same time. If you don't // specify a queue, the service sends all jobs through the default queue. For // more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html. type Queue struct { _ struct{} `type:"structure"` // An identifier for this resource that is unique within all of AWS. Arn *string `locationName:"arn" type:"string"` // The timestamp in epoch seconds for when you created the queue. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"` // An optional description that you create for each queue. Description *string `locationName:"description" type:"string"` // The timestamp in epoch seconds for when you most recently updated the queue. LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unixTimestamp"` // A name that you create for each queue. Each name must be unique within your // account. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` // Specifies whether the pricing plan for the queue is on-demand or reserved. // For on-demand, you pay per minute, billed in increments of .01 minute. For // reserved, you pay for the transcoding capacity of the entire queue, regardless // of how much or how little you use it. Reserved pricing requires a 12-month // commitment. PricingPlan PricingPlan `locationName:"pricingPlan" type:"string" enum:"true"` // The estimated number of jobs with a PROGRESSING status. ProgressingJobsCount *int64 `locationName:"progressingJobsCount" type:"integer"` // Details about the pricing plan for your reserved queue. Required for reserved // queues and not applicable to on-demand queues. ReservationPlan *ReservationPlan `locationName:"reservationPlan" type:"structure"` // Queues can be ACTIVE or PAUSED. If you pause a queue, the service won't begin // processing jobs in that queue. Jobs that are running when you pause the queue // continue to run until they finish or result in an error. Status QueueStatus `locationName:"status" type:"string" enum:"true"` // The estimated number of jobs with a SUBMITTED status. SubmittedJobsCount *int64 `locationName:"submittedJobsCount" type:"integer"` // Specifies whether this on-demand queue is system or custom. System queues // are built in. You can't modify or delete system queues. You can create and // modify custom queues. Type Type `locationName:"type" type:"string" enum:"true"` } // String returns the string representation func (s Queue) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Queue) MarshalFields(e protocol.FieldEncoder) error { if s.Arn != nil { v := *s.Arn metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.CreatedAt != nil { v := *s.CreatedAt metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "createdAt", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } if s.Description != nil { v := *s.Description metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.LastUpdated != nil { v := *s.LastUpdated metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "lastUpdated", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } if s.Name != nil { v := *s.Name metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.PricingPlan) > 0 { v := s.PricingPlan metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pricingPlan", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ProgressingJobsCount != nil { v := *s.ProgressingJobsCount metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "progressingJobsCount", protocol.Int64Value(v), metadata) } if s.ReservationPlan != nil { v := s.ReservationPlan metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "reservationPlan", v, metadata) } if len(s.Status) > 0 { v := s.Status metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "status", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SubmittedJobsCount != nil { v := *s.SubmittedJobsCount metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "submittedJobsCount", protocol.Int64Value(v), metadata) } if len(s.Type) > 0 { v := s.Type metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "type", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Description of the source and destination queues between which the job has // moved, along with the timestamp of the move type QueueTransition struct { _ struct{} `type:"structure"` // The queue that the job was on after the transition. DestinationQueue *string `locationName:"destinationQueue" type:"string"` // The queue that the job was on before the transition. SourceQueue *string `locationName:"sourceQueue" type:"string"` // The time, in Unix epoch format, that the job moved from the source queue // to the destination queue. Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"unixTimestamp"` } // String returns the string representation func (s QueueTransition) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s QueueTransition) MarshalFields(e protocol.FieldEncoder) error { if s.DestinationQueue != nil { v := *s.DestinationQueue metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "destinationQueue", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.SourceQueue != nil { v := *s.SourceQueue metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sourceQueue", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Timestamp != nil { v := *s.Timestamp metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timestamp", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } return nil } // Use Rectangle to identify a specific area of the video frame. type Rectangle struct { _ struct{} `type:"structure"` // Height of rectangle in pixels. Specify only even numbers. Height *int64 `locationName:"height" min:"2" type:"integer"` // Width of rectangle in pixels. Specify only even numbers. Width *int64 `locationName:"width" min:"2" type:"integer"` // The distance, in pixels, between the rectangle and the left edge of the video // frame. Specify only even numbers. X *int64 `locationName:"x" type:"integer"` // The distance, in pixels, between the rectangle and the top edge of the video // frame. Specify only even numbers. Y *int64 `locationName:"y" type:"integer"` } // String returns the string representation func (s Rectangle) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Rectangle) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Rectangle"} if s.Height != nil && *s.Height < 2 { invalidParams.Add(aws.NewErrParamMinValue("Height", 2)) } if s.Width != nil && *s.Width < 2 { invalidParams.Add(aws.NewErrParamMinValue("Width", 2)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Rectangle) MarshalFields(e protocol.FieldEncoder) error { if s.Height != nil { v := *s.Height metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "height", protocol.Int64Value(v), metadata) } if s.Width != nil { v := *s.Width metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "width", protocol.Int64Value(v), metadata) } if s.X != nil { v := *s.X metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "x", protocol.Int64Value(v), metadata) } if s.Y != nil { v := *s.Y metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "y", protocol.Int64Value(v), metadata) } return nil } // Use Manual audio remixing (RemixSettings) to adjust audio levels for each // audio channel in each output of your job. With audio remixing, you can output // more or fewer audio channels than your input audio source provides. type RemixSettings struct { _ struct{} `type:"structure"` // Channel mapping (ChannelMapping) contains the group of fields that hold the // remixing value for each channel. Units are in dB. Acceptable values are within // the range from -60 (mute) through 6. A setting of 0 passes the input channel // unchanged to the output channel (no attenuation or amplification). ChannelMapping *ChannelMapping `locationName:"channelMapping" type:"structure"` // Specify the number of audio channels from your input that you want to use // in your output. With remixing, you might combine or split the data in these // channels, so the number of channels in your final output might be different. ChannelsIn *int64 `locationName:"channelsIn" min:"1" type:"integer"` // Specify the number of channels in this output after remixing. Valid values: // 1, 2, 4, 6, 8... 64. (1 and even numbers to 64.) ChannelsOut *int64 `locationName:"channelsOut" min:"1" type:"integer"` } // String returns the string representation func (s RemixSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *RemixSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "RemixSettings"} if s.ChannelsIn != nil && *s.ChannelsIn < 1 { invalidParams.Add(aws.NewErrParamMinValue("ChannelsIn", 1)) } if s.ChannelsOut != nil && *s.ChannelsOut < 1 { invalidParams.Add(aws.NewErrParamMinValue("ChannelsOut", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s RemixSettings) MarshalFields(e protocol.FieldEncoder) error { if s.ChannelMapping != nil { v := s.ChannelMapping metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "channelMapping", v, metadata) } if s.ChannelsIn != nil { v := *s.ChannelsIn metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "channelsIn", protocol.Int64Value(v), metadata) } if s.ChannelsOut != nil { v := *s.ChannelsOut metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "channelsOut", protocol.Int64Value(v), metadata) } return nil } // Details about the pricing plan for your reserved queue. Required for reserved // queues and not applicable to on-demand queues. type ReservationPlan struct { _ struct{} `type:"structure"` // The length of the term of your reserved queue pricing plan commitment. Commitment Commitment `locationName:"commitment" type:"string" enum:"true"` // The timestamp in epoch seconds for when the current pricing plan term for // this reserved queue expires. ExpiresAt *time.Time `locationName:"expiresAt" type:"timestamp" timestampFormat:"unixTimestamp"` // The timestamp in epoch seconds for when you set up the current pricing plan // for this reserved queue. PurchasedAt *time.Time `locationName:"purchasedAt" type:"timestamp" timestampFormat:"unixTimestamp"` // Specifies whether the term of your reserved queue pricing plan is automatically // extended (AUTO_RENEW) or expires (EXPIRE) at the end of the term. RenewalType RenewalType `locationName:"renewalType" type:"string" enum:"true"` // Specifies the number of reserved transcode slots (RTS) for this queue. The // number of RTS determines how many jobs the queue can process in parallel; // each RTS can process one job at a time. When you increase this number, you // extend your existing commitment with a new 12-month commitment for a larger // number of RTS. The new commitment begins when you purchase the additional // capacity. You can't decrease the number of RTS in your reserved queue. ReservedSlots *int64 `locationName:"reservedSlots" type:"integer"` // Specifies whether the pricing plan for your reserved queue is ACTIVE or EXPIRED. Status ReservationPlanStatus `locationName:"status" type:"string" enum:"true"` } // String returns the string representation func (s ReservationPlan) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s ReservationPlan) MarshalFields(e protocol.FieldEncoder) error { if len(s.Commitment) > 0 { v := s.Commitment metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "commitment", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ExpiresAt != nil { v := *s.ExpiresAt metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "expiresAt", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } if s.PurchasedAt != nil { v := *s.PurchasedAt metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "purchasedAt", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } if len(s.RenewalType) > 0 { v := s.RenewalType metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "renewalType", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ReservedSlots != nil { v := *s.ReservedSlots metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "reservedSlots", protocol.Int64Value(v), metadata) } if len(s.Status) > 0 { v := s.Status metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "status", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Details about the pricing plan for your reserved queue. Required for reserved // queues and not applicable to on-demand queues. type ReservationPlanSettings struct { _ struct{} `type:"structure"` // The length of the term of your reserved queue pricing plan commitment. // // Commitment is a required field Commitment Commitment `locationName:"commitment" type:"string" required:"true" enum:"true"` // Specifies whether the term of your reserved queue pricing plan is automatically // extended (AUTO_RENEW) or expires (EXPIRE) at the end of the term. When your // term is auto renewed, you extend your commitment by 12 months from the auto // renew date. You can cancel this commitment. // // RenewalType is a required field RenewalType RenewalType `locationName:"renewalType" type:"string" required:"true" enum:"true"` // Specifies the number of reserved transcode slots (RTS) for this queue. The // number of RTS determines how many jobs the queue can process in parallel; // each RTS can process one job at a time. You can't decrease the number of // RTS in your reserved queue. You can increase the number of RTS by extending // your existing commitment with a new 12-month commitment for the larger number. // The new commitment begins when you purchase the additional capacity. You // can't cancel your commitment or revert to your original commitment after // you increase the capacity. // // ReservedSlots is a required field ReservedSlots *int64 `locationName:"reservedSlots" type:"integer" required:"true"` } // String returns the string representation func (s ReservationPlanSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *ReservationPlanSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "ReservationPlanSettings"} if len(s.Commitment) == 0 { invalidParams.Add(aws.NewErrParamRequired("Commitment")) } if len(s.RenewalType) == 0 { invalidParams.Add(aws.NewErrParamRequired("RenewalType")) } if s.ReservedSlots == nil { invalidParams.Add(aws.NewErrParamRequired("ReservedSlots")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s ReservationPlanSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.Commitment) > 0 { v := s.Commitment metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "commitment", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.RenewalType) > 0 { v := s.RenewalType metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "renewalType", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ReservedSlots != nil { v := *s.ReservedSlots metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "reservedSlots", protocol.Int64Value(v), metadata) } return nil } // The Amazon Resource Name (ARN) and tags for an AWS Elemental MediaConvert // resource. type ResourceTags struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the resource. Arn *string `locationName:"arn" type:"string"` // The tags for the resource. Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation func (s ResourceTags) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s ResourceTags) MarshalFields(e protocol.FieldEncoder) error { if s.Arn != nil { v := *s.Arn metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Tags != nil { v := s.Tags metadata := protocol.Metadata{} ms0 := e.Map(protocol.BodyTarget, "tags", metadata) ms0.Start() for k1, v1 := range v { ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ms0.End() } return nil } // Optional. Have MediaConvert automatically apply Amazon S3 access control // for the outputs in this output group. When you don't use this setting, S3 // automatically applies the default access control list PRIVATE. type S3DestinationAccessControl struct { _ struct{} `type:"structure"` // Choose an Amazon S3 canned ACL for MediaConvert to apply to this output. CannedAcl S3ObjectCannedAcl `locationName:"cannedAcl" type:"string" enum:"true"` } // String returns the string representation func (s S3DestinationAccessControl) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s S3DestinationAccessControl) MarshalFields(e protocol.FieldEncoder) error { if len(s.CannedAcl) > 0 { v := s.CannedAcl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "cannedAcl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Settings associated with S3 destination type S3DestinationSettings struct { _ struct{} `type:"structure"` // Optional. Have MediaConvert automatically apply Amazon S3 access control // for the outputs in this output group. When you don't use this setting, S3 // automatically applies the default access control list PRIVATE. AccessControl *S3DestinationAccessControl `locationName:"accessControl" type:"structure"` // Settings for how your job outputs are encrypted as they are uploaded to Amazon // S3. Encryption *S3EncryptionSettings `locationName:"encryption" type:"structure"` } // String returns the string representation func (s S3DestinationSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s S3DestinationSettings) MarshalFields(e protocol.FieldEncoder) error { if s.AccessControl != nil { v := s.AccessControl metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "accessControl", v, metadata) } if s.Encryption != nil { v := s.Encryption metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "encryption", v, metadata) } return nil } // Settings for how your job outputs are encrypted as they are uploaded to Amazon // S3. type S3EncryptionSettings struct { _ struct{} `type:"structure"` // Specify how you want your data keys managed. AWS uses data keys to encrypt // your content. AWS also encrypts the data keys themselves, using a customer // master key (CMK), and then stores the encrypted data keys alongside your // encrypted content. Use this setting to specify which AWS service manages // the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). // If you want your master key to be managed by AWS Key Management Service (KMS), // choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose // AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with // Amazon S3 to encrypt your data keys. You can optionally choose to specify // a different, customer managed CMK. Do so by specifying the Amazon Resource // Name (ARN) of the key for the setting KMS ARN (kmsKeyArn). EncryptionType S3ServerSideEncryptionType `locationName:"encryptionType" type:"string" enum:"true"` // Optionally, specify the customer master key (CMK) that you want to use to // encrypt the data key that AWS uses to encrypt your output content. Enter // the Amazon Resource Name (ARN) of the CMK. To use this setting, you must // also set Server-side encryption (S3ServerSideEncryptionType) to AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). // If you set Server-side encryption to AWS KMS but don't specify a CMK here, // AWS uses the AWS managed CMK associated with Amazon S3. KmsKeyArn *string `locationName:"kmsKeyArn" type:"string"` } // String returns the string representation func (s S3EncryptionSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s S3EncryptionSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.EncryptionType) > 0 { v := s.EncryptionType metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "encryptionType", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.KmsKeyArn != nil { v := *s.KmsKeyArn metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "kmsKeyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Settings for SCC caption output. type SccDestinationSettings struct { _ struct{} `type:"structure"` // Set Framerate (SccDestinationFramerate) to make sure that the captions and // the video are synchronized in the output. Specify a frame rate that matches // the frame rate of the associated video. If the video frame rate is 29.97, // choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has // video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 // non-dropframe (FRAMERATE_29_97_NON_DROPFRAME). Framerate SccDestinationFramerate `locationName:"framerate" type:"string" enum:"true"` } // String returns the string representation func (s SccDestinationSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s SccDestinationSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.Framerate) > 0 { v := s.Framerate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerate", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings // when doing DRM encryption with a SPEKE-compliant key provider. If your output // group type is CMAF, use the SpekeKeyProviderCmaf settings instead. type SpekeKeyProvider struct { _ struct{} `type:"structure"` // If you want your key provider to encrypt the content keys that it provides // to MediaConvert, set up a certificate with a master key using AWS Certificate // Manager. Specify the certificate's Amazon Resource Name (ARN) here. CertificateArn *string `locationName:"certificateArn" type:"string"` // Specify the resource ID that your SPEKE-compliant key provider uses to identify // this content. ResourceId *string `locationName:"resourceId" type:"string"` // Relates to SPEKE implementation. DRM system identifiers. DASH output groups // support a max of two system ids. Other group types support one system id. // See https://dashif.org/identifiers/content_protection/ for more details. SystemIds []string `locationName:"systemIds" type:"list"` // Specify the URL to the key server that your SPEKE-compliant DRM key provider // uses to provide keys for encrypting your content. Url *string `locationName:"url" type:"string"` } // String returns the string representation func (s SpekeKeyProvider) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s SpekeKeyProvider) MarshalFields(e protocol.FieldEncoder) error { if s.CertificateArn != nil { v := *s.CertificateArn metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "certificateArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.ResourceId != nil { v := *s.ResourceId metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "resourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.SystemIds != nil { v := s.SystemIds metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "systemIds", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } if s.Url != nil { v := *s.Url metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "url", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // If your output group type is CMAF, use these settings when doing DRM encryption // with a SPEKE-compliant key provider. If your output group type is HLS, DASH, // or Microsoft Smooth, use the SpekeKeyProvider settings instead. type SpekeKeyProviderCmaf struct { _ struct{} `type:"structure"` // If you want your key provider to encrypt the content keys that it provides // to MediaConvert, set up a certificate with a master key using AWS Certificate // Manager. Specify the certificate's Amazon Resource Name (ARN) here. CertificateArn *string `locationName:"certificateArn" type:"string"` // Specify the DRM system IDs that you want signaled in the DASH manifest that // MediaConvert creates as part of this CMAF package. The DASH manifest can // currently signal up to three system IDs. For more information, see https://dashif.org/identifiers/content_protection/. DashSignaledSystemIds []string `locationName:"dashSignaledSystemIds" type:"list"` // Specify the DRM system ID that you want signaled in the HLS manifest that // MediaConvert creates as part of this CMAF package. The HLS manifest can currently // signal only one system ID. For more information, see https://dashif.org/identifiers/content_protection/. HlsSignaledSystemIds []string `locationName:"hlsSignaledSystemIds" type:"list"` // Specify the resource ID that your SPEKE-compliant key provider uses to identify // this content. ResourceId *string `locationName:"resourceId" type:"string"` // Specify the URL to the key server that your SPEKE-compliant DRM key provider // uses to provide keys for encrypting your content. Url *string `locationName:"url" type:"string"` } // String returns the string representation func (s SpekeKeyProviderCmaf) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s SpekeKeyProviderCmaf) MarshalFields(e protocol.FieldEncoder) error { if s.CertificateArn != nil { v := *s.CertificateArn metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "certificateArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.DashSignaledSystemIds != nil { v := s.DashSignaledSystemIds metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "dashSignaledSystemIds", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } if s.HlsSignaledSystemIds != nil { v := s.HlsSignaledSystemIds metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "hlsSignaledSystemIds", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } if s.ResourceId != nil { v := *s.ResourceId metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "resourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Url != nil { v := *s.Url metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "url", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Use these settings to set up encryption with a static key provider. type StaticKeyProvider struct { _ struct{} `type:"structure"` // Relates to DRM implementation. Sets the value of the KEYFORMAT attribute. // Must be 'identity' or a reverse DNS string. May be omitted to indicate an // implicit value of 'identity'. KeyFormat *string `locationName:"keyFormat" type:"string"` // Relates to DRM implementation. Either a single positive integer version value // or a slash delimited list of version values (1/2/3). KeyFormatVersions *string `locationName:"keyFormatVersions" type:"string"` // Relates to DRM implementation. Use a 32-character hexidecimal string to specify // Key Value (StaticKeyValue). StaticKeyValue *string `locationName:"staticKeyValue" type:"string"` // Relates to DRM implementation. The location of the license server used for // protecting content. Url *string `locationName:"url" type:"string"` } // String returns the string representation func (s StaticKeyProvider) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s StaticKeyProvider) MarshalFields(e protocol.FieldEncoder) error { if s.KeyFormat != nil { v := *s.KeyFormat metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "keyFormat", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.KeyFormatVersions != nil { v := *s.KeyFormatVersions metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "keyFormatVersions", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.StaticKeyValue != nil { v := *s.StaticKeyValue metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "staticKeyValue", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.Url != nil { v := *s.Url metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "url", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Settings for Teletext caption output type TeletextDestinationSettings struct { _ struct{} `type:"structure"` // Set pageNumber to the Teletext page number for the destination captions for // this output. This value must be a three-digit hexadecimal string; strings // ending in -FF are invalid. If you are passing through the entire set of Teletext // data, do not use this field. PageNumber *string `locationName:"pageNumber" min:"3" type:"string"` // Specify the page types for this Teletext page. If you don't specify a value // here, the service sets the page type to the default value Subtitle (PAGE_TYPE_SUBTITLE). // If you pass through the entire set of Teletext data, don't use this field. // When you pass through a set of Teletext pages, your output has the same page // types as your input. PageTypes []TeletextPageType `locationName:"pageTypes" type:"list"` } // String returns the string representation func (s TeletextDestinationSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *TeletextDestinationSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "TeletextDestinationSettings"} if s.PageNumber != nil && len(*s.PageNumber) < 3 { invalidParams.Add(aws.NewErrParamMinLen("PageNumber", 3)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s TeletextDestinationSettings) MarshalFields(e protocol.FieldEncoder) error { if s.PageNumber != nil { v := *s.PageNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pageNumber", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.PageTypes != nil { v := s.PageTypes metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "pageTypes", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } return nil } // Settings specific to Teletext caption sources, including Page number. type TeletextSourceSettings struct { _ struct{} `type:"structure"` // Use Page Number (PageNumber) to specify the three-digit hexadecimal page // number that will be used for Teletext captions. Do not use this setting if // you are passing through teletext from the input source to output. PageNumber *string `locationName:"pageNumber" min:"3" type:"string"` } // String returns the string representation func (s TeletextSourceSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *TeletextSourceSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "TeletextSourceSettings"} if s.PageNumber != nil && len(*s.PageNumber) < 3 { invalidParams.Add(aws.NewErrParamMinLen("PageNumber", 3)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s TeletextSourceSettings) MarshalFields(e protocol.FieldEncoder) error { if s.PageNumber != nil { v := *s.PageNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pageNumber", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Timecode burn-in (TimecodeBurnIn)--Burns the output timecode and specified // prefix into the output. type TimecodeBurnin struct { _ struct{} `type:"structure"` // Use Font Size (FontSize) to set the font size of any burned-in timecode. // Valid values are 10, 16, 32, 48. FontSize *int64 `locationName:"fontSize" min:"10" type:"integer"` // Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to // specify the location the burned-in timecode on output video. Position TimecodeBurninPosition `locationName:"position" type:"string" enum:"true"` // Use Prefix (Prefix) to place ASCII characters before any burned-in timecode. // For example, a prefix of "EZ-" will result in the timecode "EZ-00:00:00:00". // Provide either the characters themselves or the ASCII code equivalents. The // supported range of characters is 0x20 through 0x7e. This includes letters, // numbers, and all special characters represented on a standard English keyboard. Prefix *string `locationName:"prefix" type:"string"` } // String returns the string representation func (s TimecodeBurnin) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *TimecodeBurnin) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "TimecodeBurnin"} if s.FontSize != nil && *s.FontSize < 10 { invalidParams.Add(aws.NewErrParamMinValue("FontSize", 10)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s TimecodeBurnin) MarshalFields(e protocol.FieldEncoder) error { if s.FontSize != nil { v := *s.FontSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fontSize", protocol.Int64Value(v), metadata) } if len(s.Position) > 0 { v := s.Position metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "position", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Prefix != nil { v := *s.Prefix metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "prefix", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // These settings control how the service handles timecodes throughout the job. // These settings don't affect input clipping. type TimecodeConfig struct { _ struct{} `type:"structure"` // If you use an editing platform that relies on an anchor timecode, use Anchor // Timecode (Anchor) to specify a timecode that will match the input video frame // to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) // or (HH:MM:SS;FF). This setting ignores frame rate conversion. System behavior // for Anchor Timecode varies depending on your setting for Source (TimecodeSource). // * If Source (TimecodeSource) is set to Specified Start (SPECIFIEDSTART), // the first input frame is the specified value in Start Timecode (Start). Anchor // Timecode (Anchor) and Start Timecode (Start) are used calculate output timecode. // * If Source (TimecodeSource) is set to Start at 0 (ZEROBASED) the first frame // is 00:00:00:00. * If Source (TimecodeSource) is set to Embedded (EMBEDDED), // the first frame is the timecode value on the first input frame of the input. Anchor *string `locationName:"anchor" type:"string"` // Use Source (TimecodeSource) to set how timecodes are handled within this // job. To make sure that your video, audio, captions, and markers are synchronized // and that time-based features, such as image inserter, work correctly, choose // the Timecode source option that matches your assets. All timecodes are in // a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - // Use the timecode that is in the input video. If no embedded timecode is in // the source, the service will use Start at 0 (ZEROBASED) instead. * Start // at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. // * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame // to a value other than zero. You use Start timecode (Start) to provide this // value. Source TimecodeSource `locationName:"source" type:"string" enum:"true"` // Only use when you set Source (TimecodeSource) to Specified start (SPECIFIEDSTART). // Use Start timecode (Start) to specify the timecode for the initial frame. // Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF). Start *string `locationName:"start" type:"string"` // Only applies to outputs that support program-date-time stamp. Use Timestamp // offset (TimestampOffset) to overwrite the timecode date without affecting // the time and frame number. Provide the new date as a string in the format // "yyyy-mm-dd". To use Time stamp offset, you must also enable Insert program-date-time // (InsertProgramDateTime) in the output settings. For example, if the date // part of your timecodes is 2002-1-25 and you want to change it to one year // later, set Timestamp offset (TimestampOffset) to 2003-1-25. TimestampOffset *string `locationName:"timestampOffset" type:"string"` } // String returns the string representation func (s TimecodeConfig) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s TimecodeConfig) MarshalFields(e protocol.FieldEncoder) error { if s.Anchor != nil { v := *s.Anchor metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "anchor", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if len(s.Source) > 0 { v := s.Source metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "source", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Start != nil { v := *s.Start metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "start", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } if s.TimestampOffset != nil { v := *s.TimestampOffset metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timestampOffset", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil } // Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags // in your job. To include timed metadata, you must enable it here, enable it // in each output container, and specify tags and timecodes in ID3 insertion // (Id3Insertion) objects. type TimedMetadataInsertion struct { _ struct{} `type:"structure"` // Id3Insertions contains the array of Id3Insertion instances. Id3Insertions []Id3Insertion `locationName:"id3Insertions" type:"list"` } // String returns the string representation func (s TimedMetadataInsertion) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s TimedMetadataInsertion) MarshalFields(e protocol.FieldEncoder) error { if s.Id3Insertions != nil { v := s.Id3Insertions metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "id3Insertions", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } return nil } // Information about when jobs are submitted, started, and finished is specified // in Unix epoch format in seconds. type Timing struct { _ struct{} `type:"structure"` // The time, in Unix epoch format, that the transcoding job finished FinishTime *time.Time `locationName:"finishTime" type:"timestamp" timestampFormat:"unixTimestamp"` // The time, in Unix epoch format, that transcoding for the job began. StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"unixTimestamp"` // The time, in Unix epoch format, that you submitted the job. SubmitTime *time.Time `locationName:"submitTime" type:"timestamp" timestampFormat:"unixTimestamp"` } // String returns the string representation func (s Timing) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Timing) MarshalFields(e protocol.FieldEncoder) error { if s.FinishTime != nil { v := *s.FinishTime metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "finishTime", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } if s.StartTime != nil { v := *s.StartTime metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "startTime", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } if s.SubmitTime != nil { v := *s.SubmitTime metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "submitTime", protocol.TimeValue{V: v, Format: "unixTimestamp", QuotedFormatTime: true}, metadata) } return nil } // Settings specific to caption sources that are specified by track number. // Currently, this is only IMSC captions in an IMF package. If your caption // source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead // of TrackSourceSettings. type TrackSourceSettings struct { _ struct{} `type:"structure"` // Use this setting to select a single captions track from a source. Track numbers // correspond to the order in the captions source file. For IMF sources, track // numbering is based on the order that the captions appear in the CPL. For // example, use 1 to select the captions asset that is listed first in the CPL. // To include more than one captions track in your job outputs, create multiple // input captions selectors. Specify one track per selector. TrackNumber *int64 `locationName:"trackNumber" min:"1" type:"integer"` } // String returns the string representation func (s TrackSourceSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *TrackSourceSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "TrackSourceSettings"} if s.TrackNumber != nil && *s.TrackNumber < 1 { invalidParams.Add(aws.NewErrParamMinValue("TrackNumber", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s TrackSourceSettings) MarshalFields(e protocol.FieldEncoder) error { if s.TrackNumber != nil { v := *s.TrackNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "trackNumber", protocol.Int64Value(v), metadata) } return nil } // Settings specific to TTML caption outputs, including Pass style information // (TtmlStylePassthrough). type TtmlDestinationSettings struct { _ struct{} `type:"structure"` // Pass through style and position information from a TTML-like input source // (TTML, SMPTE-TT) to the TTML output. StylePassthrough TtmlStylePassthrough `locationName:"stylePassthrough" type:"string" enum:"true"` } // String returns the string representation func (s TtmlDestinationSettings) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s TtmlDestinationSettings) MarshalFields(e protocol.FieldEncoder) error { if len(s.StylePassthrough) > 0 { v := s.StylePassthrough metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "stylePassthrough", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Video codec settings, (CodecSettings) under (VideoDescription), contains // the group of settings related to video encoding. The settings in this group // vary depending on the value that you choose for Video codec (Codec). For // each codec enum that you choose, define the corresponding settings object. // The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, // FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings // * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VP8, Vp8Settings * VP9, // Vp9Settings type VideoCodecSettings struct { _ struct{} `type:"structure"` // Required when you set Codec, under VideoDescription>CodecSettings to the // value AV1. Av1Settings *Av1Settings `locationName:"av1Settings" type:"structure"` // Specifies the video codec. This must be equal to one of the enum values defined // by the object VideoCodec. Codec VideoCodec `locationName:"codec" type:"string" enum:"true"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value FRAME_CAPTURE. FrameCaptureSettings *FrameCaptureSettings `locationName:"frameCaptureSettings" type:"structure"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value H_264. H264Settings *H264Settings `locationName:"h264Settings" type:"structure"` // Settings for H265 codec H265Settings *H265Settings `locationName:"h265Settings" type:"structure"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value MPEG2. Mpeg2Settings *Mpeg2Settings `locationName:"mpeg2Settings" type:"structure"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value PRORES. ProresSettings *ProresSettings `locationName:"proresSettings" type:"structure"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value VP8. Vp8Settings *Vp8Settings `locationName:"vp8Settings" type:"structure"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value VP9. Vp9Settings *Vp9Settings `locationName:"vp9Settings" type:"structure"` } // String returns the string representation func (s VideoCodecSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *VideoCodecSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "VideoCodecSettings"} if s.Av1Settings != nil { if err := s.Av1Settings.Validate(); err != nil { invalidParams.AddNested("Av1Settings", err.(aws.ErrInvalidParams)) } } if s.FrameCaptureSettings != nil { if err := s.FrameCaptureSettings.Validate(); err != nil { invalidParams.AddNested("FrameCaptureSettings", err.(aws.ErrInvalidParams)) } } if s.H264Settings != nil { if err := s.H264Settings.Validate(); err != nil { invalidParams.AddNested("H264Settings", err.(aws.ErrInvalidParams)) } } if s.H265Settings != nil { if err := s.H265Settings.Validate(); err != nil { invalidParams.AddNested("H265Settings", err.(aws.ErrInvalidParams)) } } if s.Mpeg2Settings != nil { if err := s.Mpeg2Settings.Validate(); err != nil { invalidParams.AddNested("Mpeg2Settings", err.(aws.ErrInvalidParams)) } } if s.ProresSettings != nil { if err := s.ProresSettings.Validate(); err != nil { invalidParams.AddNested("ProresSettings", err.(aws.ErrInvalidParams)) } } if s.Vp8Settings != nil { if err := s.Vp8Settings.Validate(); err != nil { invalidParams.AddNested("Vp8Settings", err.(aws.ErrInvalidParams)) } } if s.Vp9Settings != nil { if err := s.Vp9Settings.Validate(); err != nil { invalidParams.AddNested("Vp9Settings", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s VideoCodecSettings) MarshalFields(e protocol.FieldEncoder) error { if s.Av1Settings != nil { v := s.Av1Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "av1Settings", v, metadata) } if len(s.Codec) > 0 { v := s.Codec metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "codec", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FrameCaptureSettings != nil { v := s.FrameCaptureSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "frameCaptureSettings", v, metadata) } if s.H264Settings != nil { v := s.H264Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "h264Settings", v, metadata) } if s.H265Settings != nil { v := s.H265Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "h265Settings", v, metadata) } if s.Mpeg2Settings != nil { v := s.Mpeg2Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "mpeg2Settings", v, metadata) } if s.ProresSettings != nil { v := s.ProresSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "proresSettings", v, metadata) } if s.Vp8Settings != nil { v := s.Vp8Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "vp8Settings", v, metadata) } if s.Vp9Settings != nil { v := s.Vp9Settings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "vp9Settings", v, metadata) } return nil } // Settings for video outputs type VideoDescription struct { _ struct{} `type:"structure"` // This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert // AFD signaling (AfdSignaling) to specify whether the service includes AFD // values in the output video data and what those values are. * Choose None // to remove all AFD values from this output. * Choose Fixed to ignore input // AFD values and instead encode the value specified in the job. * Choose Auto // to calculate output AFD values based on the input AFD scaler data. AfdSignaling AfdSignaling `locationName:"afdSignaling" type:"string" enum:"true"` // The anti-alias filter is automatically applied to all outputs. The service // no longer accepts the value DISABLED for AntiAlias. If you specify that in // your job, the service will ignore the setting. AntiAlias AntiAlias `locationName:"antiAlias" type:"string" enum:"true"` // Video codec settings, (CodecSettings) under (VideoDescription), contains // the group of settings related to video encoding. The settings in this group // vary depending on the value that you choose for Video codec (Codec). For // each codec enum that you choose, define the corresponding settings object. // The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, // FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings // * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VP8, Vp8Settings * VP9, // Vp9Settings CodecSettings *VideoCodecSettings `locationName:"codecSettings" type:"structure"` // Choose Insert (INSERT) for this setting to include color metadata in this // output. Choose Ignore (IGNORE) to exclude color metadata from this output. // If you don't specify a value, the service sets this to Insert by default. ColorMetadata ColorMetadata `locationName:"colorMetadata" type:"string" enum:"true"` // Use Cropping selection (crop) to specify the video area that the service // will include in the output video frame. Crop *Rectangle `locationName:"crop" type:"structure"` // Applies only to 29.97 fps outputs. When this feature is enabled, the service // will use drop-frame timecode on outputs. If it is not possible to use drop-frame // timecode, the system will fall back to non-drop-frame. This setting is enabled // by default when Timecode insertion (TimecodeInsertion) is enabled. DropFrameTimecode DropFrameTimecode `locationName:"dropFrameTimecode" type:"string" enum:"true"` // Applies only if you set AFD Signaling(AfdSignaling) to Fixed (FIXED). Use // Fixed (FixedAfd) to specify a four-bit AFD value which the service will write // on all frames of this video output. FixedAfd *int64 `locationName:"fixedAfd" type:"integer"` // Use the Height (Height) setting to define the video resolution height for // this output. Specify in pixels. If you don't provide a value here, the service // will use the input height. Height *int64 `locationName:"height" min:"32" type:"integer"` // Use Selection placement (position) to define the video area in your output // frame. The area outside of the rectangle that you specify here is black. Position *Rectangle `locationName:"position" type:"structure"` // Use Respond to AFD (RespondToAfd) to specify how the service changes the // video itself in response to AFD values in the input. * Choose Respond to // clip the input video frame according to the AFD value, input display aspect // ratio, and output display aspect ratio. * Choose Passthrough to include the // input AFD values. Do not choose this when AfdSignaling is set to (NONE). // A preferred implementation of this workflow is to set RespondToAfd to (NONE) // and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values // from this output. RespondToAfd RespondToAfd `locationName:"respondToAfd" type:"string" enum:"true"` // Specify how the service handles outputs that have a different aspect ratio // from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) // to have the service stretch your video image to fit. Keep the setting Default // (DEFAULT) to have the service letterbox your video instead. This setting // overrides any value that you specify for the setting Selection placement // (position) in this output. ScalingBehavior ScalingBehavior `locationName:"scalingBehavior" type:"string" enum:"true"` // Use Sharpness (Sharpness) setting to specify the strength of anti-aliasing. // This setting changes the width of the anti-alias filter kernel used for scaling. // Sharpness only applies if your output resolution is different from your input // resolution. 0 is the softest setting, 100 the sharpest, and 50 recommended // for most content. Sharpness *int64 `locationName:"sharpness" type:"integer"` // Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode // insertion when the input frame rate is identical to the output frame rate. // To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) // to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. // When the service inserts timecodes in an output, by default, it uses any // embedded timecodes from the input. If none are present, the service will // set the timecode for the first output frame to zero. To change this default // behavior, adjust the settings under Timecode configuration (TimecodeConfig). // In the console, these settings are located under Job > Job settings > Timecode // configuration. Note - Timecode source under input settings (InputTimecodeSource) // does not affect the timecodes that are inserted in the output. Source under // Job settings > Timecode configuration (TimecodeSource) does. TimecodeInsertion VideoTimecodeInsertion `locationName:"timecodeInsertion" type:"string" enum:"true"` // Find additional transcoding features under Preprocessors (VideoPreprocessors). // Enable the features at each output individually. These features are disabled // by default. VideoPreprocessors *VideoPreprocessor `locationName:"videoPreprocessors" type:"structure"` // Use Width (Width) to define the video resolution width, in pixels, for this // output. If you don't provide a value here, the service will use the input // width. Width *int64 `locationName:"width" min:"32" type:"integer"` } // String returns the string representation func (s VideoDescription) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *VideoDescription) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "VideoDescription"} if s.Height != nil && *s.Height < 32 { invalidParams.Add(aws.NewErrParamMinValue("Height", 32)) } if s.Width != nil && *s.Width < 32 { invalidParams.Add(aws.NewErrParamMinValue("Width", 32)) } if s.CodecSettings != nil { if err := s.CodecSettings.Validate(); err != nil { invalidParams.AddNested("CodecSettings", err.(aws.ErrInvalidParams)) } } if s.Crop != nil { if err := s.Crop.Validate(); err != nil { invalidParams.AddNested("Crop", err.(aws.ErrInvalidParams)) } } if s.Position != nil { if err := s.Position.Validate(); err != nil { invalidParams.AddNested("Position", err.(aws.ErrInvalidParams)) } } if s.VideoPreprocessors != nil { if err := s.VideoPreprocessors.Validate(); err != nil { invalidParams.AddNested("VideoPreprocessors", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s VideoDescription) MarshalFields(e protocol.FieldEncoder) error { if len(s.AfdSignaling) > 0 { v := s.AfdSignaling metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "afdSignaling", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.AntiAlias) > 0 { v := s.AntiAlias metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "antiAlias", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.CodecSettings != nil { v := s.CodecSettings metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "codecSettings", v, metadata) } if len(s.ColorMetadata) > 0 { v := s.ColorMetadata metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "colorMetadata", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Crop != nil { v := s.Crop metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "crop", v, metadata) } if len(s.DropFrameTimecode) > 0 { v := s.DropFrameTimecode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "dropFrameTimecode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FixedAfd != nil { v := *s.FixedAfd metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "fixedAfd", protocol.Int64Value(v), metadata) } if s.Height != nil { v := *s.Height metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "height", protocol.Int64Value(v), metadata) } if s.Position != nil { v := s.Position metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "position", v, metadata) } if len(s.RespondToAfd) > 0 { v := s.RespondToAfd metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "respondToAfd", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.ScalingBehavior) > 0 { v := s.ScalingBehavior metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "scalingBehavior", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Sharpness != nil { v := *s.Sharpness metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sharpness", protocol.Int64Value(v), metadata) } if len(s.TimecodeInsertion) > 0 { v := s.TimecodeInsertion metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "timecodeInsertion", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.VideoPreprocessors != nil { v := s.VideoPreprocessors metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "videoPreprocessors", v, metadata) } if s.Width != nil { v := *s.Width metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "width", protocol.Int64Value(v), metadata) } return nil } // Contains details about the output's video stream type VideoDetail struct { _ struct{} `type:"structure"` // Height in pixels for the output HeightInPx *int64 `locationName:"heightInPx" type:"integer"` // Width in pixels for the output WidthInPx *int64 `locationName:"widthInPx" type:"integer"` } // String returns the string representation func (s VideoDetail) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s VideoDetail) MarshalFields(e protocol.FieldEncoder) error { if s.HeightInPx != nil { v := *s.HeightInPx metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "heightInPx", protocol.Int64Value(v), metadata) } if s.WidthInPx != nil { v := *s.WidthInPx metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "widthInPx", protocol.Int64Value(v), metadata) } return nil } // Find additional transcoding features under Preprocessors (VideoPreprocessors). // Enable the features at each output individually. These features are disabled // by default. type VideoPreprocessor struct { _ struct{} `type:"structure"` // Enable the Color corrector (ColorCorrector) feature if necessary. Enable // or disable this feature for each output individually. This setting is disabled // by default. ColorCorrector *ColorCorrector `locationName:"colorCorrector" type:"structure"` // Use Deinterlacer (Deinterlacer) to produce smoother motion and a clearer // picture. Deinterlacer *Deinterlacer `locationName:"deinterlacer" type:"structure"` // Enable Dolby Vision feature to produce Dolby Vision compatible video output. DolbyVision *DolbyVision `locationName:"dolbyVision" type:"structure"` // Enable the Image inserter (ImageInserter) feature to include a graphic overlay // on your video. Enable or disable this feature for each output individually. // This setting is disabled by default. ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"` // Enable the Noise reducer (NoiseReducer) feature to remove noise from your // video output if necessary. Enable or disable this feature for each output // individually. This setting is disabled by default. NoiseReducer *NoiseReducer `locationName:"noiseReducer" type:"structure"` // If you work with a third party video watermarking partner, use the group // of settings that correspond with your watermarking partner to include watermarks // in your output. PartnerWatermarking *PartnerWatermarking `locationName:"partnerWatermarking" type:"structure"` // Timecode burn-in (TimecodeBurnIn)--Burns the output timecode and specified // prefix into the output. TimecodeBurnin *TimecodeBurnin `locationName:"timecodeBurnin" type:"structure"` } // String returns the string representation func (s VideoPreprocessor) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *VideoPreprocessor) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "VideoPreprocessor"} if s.ColorCorrector != nil { if err := s.ColorCorrector.Validate(); err != nil { invalidParams.AddNested("ColorCorrector", err.(aws.ErrInvalidParams)) } } if s.ImageInserter != nil { if err := s.ImageInserter.Validate(); err != nil { invalidParams.AddNested("ImageInserter", err.(aws.ErrInvalidParams)) } } if s.NoiseReducer != nil { if err := s.NoiseReducer.Validate(); err != nil { invalidParams.AddNested("NoiseReducer", err.(aws.ErrInvalidParams)) } } if s.PartnerWatermarking != nil { if err := s.PartnerWatermarking.Validate(); err != nil { invalidParams.AddNested("PartnerWatermarking", err.(aws.ErrInvalidParams)) } } if s.TimecodeBurnin != nil { if err := s.TimecodeBurnin.Validate(); err != nil { invalidParams.AddNested("TimecodeBurnin", err.(aws.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s VideoPreprocessor) MarshalFields(e protocol.FieldEncoder) error { if s.ColorCorrector != nil { v := s.ColorCorrector metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "colorCorrector", v, metadata) } if s.Deinterlacer != nil { v := s.Deinterlacer metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "deinterlacer", v, metadata) } if s.DolbyVision != nil { v := s.DolbyVision metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "dolbyVision", v, metadata) } if s.ImageInserter != nil { v := s.ImageInserter metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "imageInserter", v, metadata) } if s.NoiseReducer != nil { v := s.NoiseReducer metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "noiseReducer", v, metadata) } if s.PartnerWatermarking != nil { v := s.PartnerWatermarking metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "partnerWatermarking", v, metadata) } if s.TimecodeBurnin != nil { v := s.TimecodeBurnin metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "timecodeBurnin", v, metadata) } return nil } // Selector for video. type VideoSelector struct { _ struct{} `type:"structure"` // Ignore this setting unless this input is a QuickTime animation with an alpha // channel. Use this setting to create separate Key and Fill outputs. In each // output, specify which part of the input MediaConvert uses. Leave this setting // at the default value DISCARD to delete the alpha channel and preserve the // video. Set it to REMAP_TO_LUMA to delete the video and map the alpha channel // to the luma channel of your outputs. AlphaBehavior AlphaBehavior `locationName:"alphaBehavior" type:"string" enum:"true"` // If your input video has accurate color space metadata, or if you don't know // about color space, leave this set to the default value Follow (FOLLOW). The // service will automatically detect your input color space. If your input video // has metadata indicating the wrong color space, specify the accurate color // space here. If your input video is HDR 10 and the SMPTE ST 2086 Mastering // Display Color Volume static metadata isn't present in your video stream, // or if that metadata is present but not accurate, choose Force HDR 10 (FORCE_HDR10) // here and specify correct values in the input HDR 10 metadata (Hdr10Metadata) // settings. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr. ColorSpace ColorSpace `locationName:"colorSpace" type:"string" enum:"true"` // There are two sources for color metadata, the input file and the job input // settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). // The Color space usage setting determines which takes precedence. Choose Force // (FORCE) to use color metadata from the input job settings. If you don't specify // values for those settings, the service defaults to using metadata from your // input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the // source when it is present. If there's no color metadata in your input file, // the service defaults to using values you specify in the input settings. ColorSpaceUsage ColorSpaceUsage `locationName:"colorSpaceUsage" type:"string" enum:"true"` // Use these settings to provide HDR 10 metadata that is missing or inaccurate // in your input video. Appropriate values vary depending on the input video // and must be provided by a color grader. The color grader generates these // values during the HDR 10 mastering process. The valid range for each of these // settings is 0 to 50,000. Each increment represents 0.00002 in CIE1931 color // coordinate. Related settings - When you specify these values, you must also // set Color space (ColorSpace) to HDR 10 (HDR10). To specify whether the the // values you specify here take precedence over the values in the metadata of // your input file, set Color space usage (ColorSpaceUsage). To specify whether // color metadata is included in an output, set Color metadata (ColorMetadata). // For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr. Hdr10Metadata *Hdr10Metadata `locationName:"hdr10Metadata" type:"structure"` // Use PID (Pid) to select specific video data from an input file. Specify this // value as an integer; the system automatically converts it to the hexidecimal // value. For example, 257 selects PID 0x101. A PID, or packet identifier, is // an identifier for a set of data in an MPEG-2 transport stream container. Pid *int64 `locationName:"pid" min:"1" type:"integer"` // Selects a specific program from within a multi-program transport stream. // Note that Quad 4K is not currently supported. ProgramNumber *int64 `locationName:"programNumber" type:"integer"` // Use Rotate (InputRotate) to specify how the service rotates your video. You // can choose automatic rotation or specify a rotation. You can specify a clockwise // rotation of 0, 90, 180, or 270 degrees. If your input video container is // .mov or .mp4 and your input has rotation metadata, you can choose Automatic // to have the service rotate your video according to the rotation specified // in the metadata. The rotation must be within one degree of 90, 180, or 270 // degrees. If the rotation metadata specifies any other rotation, the service // will default to no rotation. By default, the service does no rotation, even // if your input video has rotation metadata. The service doesn't pass through // rotation metadata. Rotate InputRotate `locationName:"rotate" type:"string" enum:"true"` } // String returns the string representation func (s VideoSelector) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *VideoSelector) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "VideoSelector"} if s.Pid != nil && *s.Pid < 1 { invalidParams.Add(aws.NewErrParamMinValue("Pid", 1)) } if s.ProgramNumber != nil && *s.ProgramNumber < -2.147483648e+09 { invalidParams.Add(aws.NewErrParamMinValue("ProgramNumber", -2.147483648e+09)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s VideoSelector) MarshalFields(e protocol.FieldEncoder) error { if len(s.AlphaBehavior) > 0 { v := s.AlphaBehavior metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "alphaBehavior", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.ColorSpace) > 0 { v := s.ColorSpace metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "colorSpace", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.ColorSpaceUsage) > 0 { v := s.ColorSpaceUsage metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "colorSpaceUsage", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.Hdr10Metadata != nil { v := s.Hdr10Metadata metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "hdr10Metadata", v, metadata) } if s.Pid != nil { v := *s.Pid metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "pid", protocol.Int64Value(v), metadata) } if s.ProgramNumber != nil { v := *s.ProgramNumber metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "programNumber", protocol.Int64Value(v), metadata) } if len(s.Rotate) > 0 { v := s.Rotate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "rotate", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Required when you set Codec, under AudioDescriptions>CodecSettings, to the // value Vorbis. type VorbisSettings struct { _ struct{} `type:"structure"` // Optional. Specify the number of channels in this output audio track. Choosing // Mono on the console gives you 1 output channel; choosing Stereo gives you // 2. In the API, valid values are 1 and 2. The default value is 2. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Optional. Specify the audio sample rate in Hz. Valid values are 22050, 32000, // 44100, and 48000. The default value is 48000. SampleRate *int64 `locationName:"sampleRate" min:"22050" type:"integer"` // Optional. Specify the variable audio quality of this Vorbis output from -1 // (lowest quality, ~45 kbit/s) to 10 (highest quality, ~500 kbit/s). The default // value is 4 (~128 kbit/s). Values 5 and 6 are approximately 160 and 192 kbit/s, // respectively. VbrQuality *int64 `locationName:"vbrQuality" type:"integer"` } // String returns the string representation func (s VorbisSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *VorbisSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "VorbisSettings"} if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(aws.NewErrParamMinValue("Channels", 1)) } if s.SampleRate != nil && *s.SampleRate < 22050 { invalidParams.Add(aws.NewErrParamMinValue("SampleRate", 22050)) } if s.VbrQuality != nil && *s.VbrQuality < -1 { invalidParams.Add(aws.NewErrParamMinValue("VbrQuality", -1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s VorbisSettings) MarshalFields(e protocol.FieldEncoder) error { if s.Channels != nil { v := *s.Channels metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "channels", protocol.Int64Value(v), metadata) } if s.SampleRate != nil { v := *s.SampleRate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sampleRate", protocol.Int64Value(v), metadata) } if s.VbrQuality != nil { v := *s.VbrQuality metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "vbrQuality", protocol.Int64Value(v), metadata) } return nil } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value VP8. type Vp8Settings struct { _ struct{} `type:"structure"` // Target bitrate in bits/second. For example, enter five megabits per second // as 5000000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl Vp8FramerateControl `locationName:"framerateControl" type:"string" enum:"true"` // Optional. Specify how the transcoder performs framerate conversion. The default // behavior is to use Drop duplicate (DUPLICATE_DROP) conversion. When you choose // Interpolate (INTERPOLATE) instead, the conversion produces smoother motion. FramerateConversionAlgorithm Vp8FramerateConversionAlgorithm `locationName:"framerateConversionAlgorithm" type:"string" enum:"true"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // GOP Length (keyframe interval) in frames. Must be greater than zero. GopSize *float64 `locationName:"gopSize" type:"double"` // Optional. Size of buffer (HRD buffer model) in bits. For example, enter five // megabits as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional. // Specify the maximum bitrate in bits/second. For example, enter five megabits // per second as 5000000. The default behavior uses twice the target bitrate // as the maximum bitrate. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. ParControl Vp8ParControl `locationName:"parControl" type:"string" enum:"true"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, multi-pass encoding. QualityTuningLevel Vp8QualityTuningLevel `locationName:"qualityTuningLevel" type:"string" enum:"true"` // With the VP8 codec, you can use only the variable bitrate (VBR) rate control // mode. RateControlMode Vp8RateControlMode `locationName:"rateControlMode" type:"string" enum:"true"` } // String returns the string representation func (s Vp8Settings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Vp8Settings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Vp8Settings"} if s.Bitrate != nil && *s.Bitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("Bitrate", 1000)) } if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateNumerator", 1)) } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("MaxBitrate", 1000)) } if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("ParDenominator", 1)) } if s.ParNumerator != nil && *s.ParNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("ParNumerator", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Vp8Settings) MarshalFields(e protocol.FieldEncoder) error { if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if len(s.FramerateControl) > 0 { v := s.FramerateControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FramerateConversionAlgorithm) > 0 { v := s.FramerateConversionAlgorithm metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateConversionAlgorithm", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FramerateDenominator != nil { v := *s.FramerateDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateDenominator", protocol.Int64Value(v), metadata) } if s.FramerateNumerator != nil { v := *s.FramerateNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateNumerator", protocol.Int64Value(v), metadata) } if s.GopSize != nil { v := *s.GopSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopSize", protocol.Float64Value(v), metadata) } if s.HrdBufferSize != nil { v := *s.HrdBufferSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "hrdBufferSize", protocol.Int64Value(v), metadata) } if s.MaxBitrate != nil { v := *s.MaxBitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxBitrate", protocol.Int64Value(v), metadata) } if len(s.ParControl) > 0 { v := s.ParControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ParDenominator != nil { v := *s.ParDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parDenominator", protocol.Int64Value(v), metadata) } if s.ParNumerator != nil { v := *s.ParNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parNumerator", protocol.Int64Value(v), metadata) } if len(s.QualityTuningLevel) > 0 { v := s.QualityTuningLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "qualityTuningLevel", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.RateControlMode) > 0 { v := s.RateControlMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "rateControlMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value VP9. type Vp9Settings struct { _ struct{} `type:"structure"` // Target bitrate in bits/second. For example, enter five megabits per second // as 5000000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl Vp9FramerateControl `locationName:"framerateControl" type:"string" enum:"true"` // Optional. Specify how the transcoder performs framerate conversion. The default // behavior is to use Drop duplicate (DUPLICATE_DROP) conversion. When you choose // Interpolate (INTERPOLATE) instead, the conversion produces smoother motion. FramerateConversionAlgorithm Vp9FramerateConversionAlgorithm `locationName:"framerateConversionAlgorithm" type:"string" enum:"true"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // GOP Length (keyframe interval) in frames. Must be greater than zero. GopSize *float64 `locationName:"gopSize" type:"double"` // Size of buffer (HRD buffer model) in bits. For example, enter five megabits // as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional. // Specify the maximum bitrate in bits/second. For example, enter five megabits // per second as 5000000. The default behavior uses twice the target bitrate // as the maximum bitrate. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio for this // output. The default behavior is to use the same pixel aspect ratio as your // input video. ParControl Vp9ParControl `locationName:"parControl" type:"string" enum:"true"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, multi-pass encoding. QualityTuningLevel Vp9QualityTuningLevel `locationName:"qualityTuningLevel" type:"string" enum:"true"` // With the VP9 codec, you can use only the variable bitrate (VBR) rate control // mode. RateControlMode Vp9RateControlMode `locationName:"rateControlMode" type:"string" enum:"true"` } // String returns the string representation func (s Vp9Settings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *Vp9Settings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Vp9Settings"} if s.Bitrate != nil && *s.Bitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("Bitrate", 1000)) } if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("FramerateNumerator", 1)) } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(aws.NewErrParamMinValue("MaxBitrate", 1000)) } if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(aws.NewErrParamMinValue("ParDenominator", 1)) } if s.ParNumerator != nil && *s.ParNumerator < 1 { invalidParams.Add(aws.NewErrParamMinValue("ParNumerator", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Vp9Settings) MarshalFields(e protocol.FieldEncoder) error { if s.Bitrate != nil { v := *s.Bitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitrate", protocol.Int64Value(v), metadata) } if len(s.FramerateControl) > 0 { v := s.FramerateControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.FramerateConversionAlgorithm) > 0 { v := s.FramerateConversionAlgorithm metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateConversionAlgorithm", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.FramerateDenominator != nil { v := *s.FramerateDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateDenominator", protocol.Int64Value(v), metadata) } if s.FramerateNumerator != nil { v := *s.FramerateNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "framerateNumerator", protocol.Int64Value(v), metadata) } if s.GopSize != nil { v := *s.GopSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "gopSize", protocol.Float64Value(v), metadata) } if s.HrdBufferSize != nil { v := *s.HrdBufferSize metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "hrdBufferSize", protocol.Int64Value(v), metadata) } if s.MaxBitrate != nil { v := *s.MaxBitrate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "maxBitrate", protocol.Int64Value(v), metadata) } if len(s.ParControl) > 0 { v := s.ParControl metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parControl", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.ParDenominator != nil { v := *s.ParDenominator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parDenominator", protocol.Int64Value(v), metadata) } if s.ParNumerator != nil { v := *s.ParNumerator metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "parNumerator", protocol.Int64Value(v), metadata) } if len(s.QualityTuningLevel) > 0 { v := s.QualityTuningLevel metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "qualityTuningLevel", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if len(s.RateControlMode) > 0 { v := s.RateControlMode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "rateControlMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value WAV. type WavSettings struct { _ struct{} `type:"structure"` // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding // quality for this audio track. BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"` // Specify the number of channels in this output audio track. Valid values are // 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // The service defaults to using RIFF for WAV outputs. If your output audio // is likely to exceed 4 GB in file size, or if you otherwise need the extended // support of the RF64 format, set your output WAV file format to RF64. Format WavFormat `locationName:"format" type:"string" enum:"true"` // Sample rate in Hz. SampleRate *int64 `locationName:"sampleRate" min:"8000" type:"integer"` } // String returns the string representation func (s WavSettings) String() string { return awsutil.Prettify(s) } // Validate inspects the fields of the type to determine if they are valid. func (s *WavSettings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "WavSettings"} if s.BitDepth != nil && *s.BitDepth < 16 { invalidParams.Add(aws.NewErrParamMinValue("BitDepth", 16)) } if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(aws.NewErrParamMinValue("Channels", 1)) } if s.SampleRate != nil && *s.SampleRate < 8000 { invalidParams.Add(aws.NewErrParamMinValue("SampleRate", 8000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s WavSettings) MarshalFields(e protocol.FieldEncoder) error { if s.BitDepth != nil { v := *s.BitDepth metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "bitDepth", protocol.Int64Value(v), metadata) } if s.Channels != nil { v := *s.Channels metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "channels", protocol.Int64Value(v), metadata) } if len(s.Format) > 0 { v := s.Format metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "format", protocol.QuotedValue{ValueMarshaler: v}, metadata) } if s.SampleRate != nil { v := *s.SampleRate metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sampleRate", protocol.Int64Value(v), metadata) } return nil }